• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	Linux INET6 implementation
3  *	Forwarding Information Database
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Changes:
14  *	Yuji SEKIYA @USAGI:	Support default route on router node;
15  *				remove ip6_null_entry from the top of
16  *				routing table.
17  *	Ville Nuorvala:		Fixed routing subtrees.
18  */
19 
20 #define pr_fmt(fmt) "IPv6: " fmt
21 
22 #include <linux/errno.h>
23 #include <linux/types.h>
24 #include <linux/net.h>
25 #include <linux/route.h>
26 #include <linux/netdevice.h>
27 #include <linux/in6.h>
28 #include <linux/init.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
31 
32 #include <net/ipv6.h>
33 #include <net/ndisc.h>
34 #include <net/addrconf.h>
35 
36 #include <net/ip6_fib.h>
37 #include <net/ip6_route.h>
38 
39 #define RT6_DEBUG 2
40 
41 #if RT6_DEBUG >= 3
42 #define RT6_TRACE(x...) pr_debug(x)
43 #else
44 #define RT6_TRACE(x...) do { ; } while (0)
45 #endif
46 
47 static struct kmem_cache *fib6_node_kmem __read_mostly;
48 
49 struct fib6_cleaner {
50 	struct fib6_walker w;
51 	struct net *net;
52 	int (*func)(struct rt6_info *, void *arg);
53 	int sernum;
54 	void *arg;
55 };
56 
57 static DEFINE_RWLOCK(fib6_walker_lock);
58 
59 #ifdef CONFIG_IPV6_SUBTREES
60 #define FWS_INIT FWS_S
61 #else
62 #define FWS_INIT FWS_L
63 #endif
64 
65 static void fib6_prune_clones(struct net *net, struct fib6_node *fn);
66 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn);
67 static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn);
68 static int fib6_walk(struct fib6_walker *w);
69 static int fib6_walk_continue(struct fib6_walker *w);
70 
71 /*
72  *	A routing update causes an increase of the serial number on the
73  *	affected subtree. This allows for cached routes to be asynchronously
74  *	tested when modifications are made to the destination cache as a
75  *	result of redirects, path MTU changes, etc.
76  */
77 
78 static void fib6_gc_timer_cb(unsigned long arg);
79 
80 static LIST_HEAD(fib6_walkers);
81 #define FOR_WALKERS(w) list_for_each_entry(w, &fib6_walkers, lh)
82 
fib6_walker_link(struct fib6_walker * w)83 static void fib6_walker_link(struct fib6_walker *w)
84 {
85 	write_lock_bh(&fib6_walker_lock);
86 	list_add(&w->lh, &fib6_walkers);
87 	write_unlock_bh(&fib6_walker_lock);
88 }
89 
fib6_walker_unlink(struct fib6_walker * w)90 static void fib6_walker_unlink(struct fib6_walker *w)
91 {
92 	write_lock_bh(&fib6_walker_lock);
93 	list_del(&w->lh);
94 	write_unlock_bh(&fib6_walker_lock);
95 }
96 
fib6_new_sernum(struct net * net)97 static int fib6_new_sernum(struct net *net)
98 {
99 	int new, old;
100 
101 	do {
102 		old = atomic_read(&net->ipv6.fib6_sernum);
103 		new = old < INT_MAX ? old + 1 : 1;
104 	} while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
105 				old, new) != old);
106 	return new;
107 }
108 
109 enum {
110 	FIB6_NO_SERNUM_CHANGE = 0,
111 };
112 
113 /*
114  *	Auxiliary address test functions for the radix tree.
115  *
116  *	These assume a 32bit processor (although it will work on
117  *	64bit processors)
118  */
119 
120 /*
121  *	test bit
122  */
123 #if defined(__LITTLE_ENDIAN)
124 # define BITOP_BE32_SWIZZLE	(0x1F & ~7)
125 #else
126 # define BITOP_BE32_SWIZZLE	0
127 #endif
128 
addr_bit_set(const void * token,int fn_bit)129 static __be32 addr_bit_set(const void *token, int fn_bit)
130 {
131 	const __be32 *addr = token;
132 	/*
133 	 * Here,
134 	 *	1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)
135 	 * is optimized version of
136 	 *	htonl(1 << ((~fn_bit)&0x1F))
137 	 * See include/asm-generic/bitops/le.h.
138 	 */
139 	return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) &
140 	       addr[fn_bit >> 5];
141 }
142 
node_alloc(void)143 static struct fib6_node *node_alloc(void)
144 {
145 	struct fib6_node *fn;
146 
147 	fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC);
148 
149 	return fn;
150 }
151 
node_free(struct fib6_node * fn)152 static void node_free(struct fib6_node *fn)
153 {
154 	kmem_cache_free(fib6_node_kmem, fn);
155 }
156 
rt6_release(struct rt6_info * rt)157 static void rt6_release(struct rt6_info *rt)
158 {
159 	if (atomic_dec_and_test(&rt->rt6i_ref))
160 		dst_free(&rt->dst);
161 }
162 
fib6_free_table(struct fib6_table * table)163 static void fib6_free_table(struct fib6_table *table)
164 {
165 	inetpeer_invalidate_tree(&table->tb6_peers);
166 	kfree(table);
167 }
168 
fib6_link_table(struct net * net,struct fib6_table * tb)169 static void fib6_link_table(struct net *net, struct fib6_table *tb)
170 {
171 	unsigned int h;
172 
173 	/*
174 	 * Initialize table lock at a single place to give lockdep a key,
175 	 * tables aren't visible prior to being linked to the list.
176 	 */
177 	rwlock_init(&tb->tb6_lock);
178 
179 	h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1);
180 
181 	/*
182 	 * No protection necessary, this is the only list mutatation
183 	 * operation, tables never disappear once they exist.
184 	 */
185 	hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]);
186 }
187 
188 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
189 
fib6_alloc_table(struct net * net,u32 id)190 static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
191 {
192 	struct fib6_table *table;
193 
194 	table = kzalloc(sizeof(*table), GFP_ATOMIC);
195 	if (table) {
196 		table->tb6_id = id;
197 		table->tb6_root.leaf = net->ipv6.ip6_null_entry;
198 		table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
199 		inet_peer_base_init(&table->tb6_peers);
200 	}
201 
202 	return table;
203 }
204 
fib6_new_table(struct net * net,u32 id)205 struct fib6_table *fib6_new_table(struct net *net, u32 id)
206 {
207 	struct fib6_table *tb;
208 
209 	if (id == 0)
210 		id = RT6_TABLE_MAIN;
211 	tb = fib6_get_table(net, id);
212 	if (tb)
213 		return tb;
214 
215 	tb = fib6_alloc_table(net, id);
216 	if (tb)
217 		fib6_link_table(net, tb);
218 
219 	return tb;
220 }
221 
fib6_get_table(struct net * net,u32 id)222 struct fib6_table *fib6_get_table(struct net *net, u32 id)
223 {
224 	struct fib6_table *tb;
225 	struct hlist_head *head;
226 	unsigned int h;
227 
228 	if (id == 0)
229 		id = RT6_TABLE_MAIN;
230 	h = id & (FIB6_TABLE_HASHSZ - 1);
231 	rcu_read_lock();
232 	head = &net->ipv6.fib_table_hash[h];
233 	hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
234 		if (tb->tb6_id == id) {
235 			rcu_read_unlock();
236 			return tb;
237 		}
238 	}
239 	rcu_read_unlock();
240 
241 	return NULL;
242 }
243 
fib6_tables_init(struct net * net)244 static void __net_init fib6_tables_init(struct net *net)
245 {
246 	fib6_link_table(net, net->ipv6.fib6_main_tbl);
247 	fib6_link_table(net, net->ipv6.fib6_local_tbl);
248 }
249 #else
250 
fib6_new_table(struct net * net,u32 id)251 struct fib6_table *fib6_new_table(struct net *net, u32 id)
252 {
253 	return fib6_get_table(net, id);
254 }
255 
fib6_get_table(struct net * net,u32 id)256 struct fib6_table *fib6_get_table(struct net *net, u32 id)
257 {
258 	  return net->ipv6.fib6_main_tbl;
259 }
260 
fib6_rule_lookup(struct net * net,struct flowi6 * fl6,int flags,pol_lookup_t lookup)261 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
262 				   int flags, pol_lookup_t lookup)
263 {
264 	return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
265 }
266 
fib6_tables_init(struct net * net)267 static void __net_init fib6_tables_init(struct net *net)
268 {
269 	fib6_link_table(net, net->ipv6.fib6_main_tbl);
270 }
271 
272 #endif
273 
fib6_dump_node(struct fib6_walker * w)274 static int fib6_dump_node(struct fib6_walker *w)
275 {
276 	int res;
277 	struct rt6_info *rt;
278 
279 	for (rt = w->leaf; rt; rt = rt->dst.rt6_next) {
280 		res = rt6_dump_route(rt, w->args);
281 		if (res < 0) {
282 			/* Frame is full, suspend walking */
283 			w->leaf = rt;
284 			return 1;
285 		}
286 		WARN_ON(res == 0);
287 	}
288 	w->leaf = NULL;
289 	return 0;
290 }
291 
fib6_dump_end(struct netlink_callback * cb)292 static void fib6_dump_end(struct netlink_callback *cb)
293 {
294 	struct fib6_walker *w = (void *)cb->args[2];
295 
296 	if (w) {
297 		if (cb->args[4]) {
298 			cb->args[4] = 0;
299 			fib6_walker_unlink(w);
300 		}
301 		cb->args[2] = 0;
302 		kfree(w);
303 	}
304 	cb->done = (void *)cb->args[3];
305 	cb->args[1] = 3;
306 }
307 
fib6_dump_done(struct netlink_callback * cb)308 static int fib6_dump_done(struct netlink_callback *cb)
309 {
310 	fib6_dump_end(cb);
311 	return cb->done ? cb->done(cb) : 0;
312 }
313 
fib6_dump_table(struct fib6_table * table,struct sk_buff * skb,struct netlink_callback * cb)314 static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
315 			   struct netlink_callback *cb)
316 {
317 	struct fib6_walker *w;
318 	int res;
319 
320 	w = (void *)cb->args[2];
321 	w->root = &table->tb6_root;
322 
323 	if (cb->args[4] == 0) {
324 		w->count = 0;
325 		w->skip = 0;
326 
327 		read_lock_bh(&table->tb6_lock);
328 		res = fib6_walk(w);
329 		read_unlock_bh(&table->tb6_lock);
330 		if (res > 0) {
331 			cb->args[4] = 1;
332 			cb->args[5] = w->root->fn_sernum;
333 		}
334 	} else {
335 		if (cb->args[5] != w->root->fn_sernum) {
336 			/* Begin at the root if the tree changed */
337 			cb->args[5] = w->root->fn_sernum;
338 			w->state = FWS_INIT;
339 			w->node = w->root;
340 			w->skip = w->count;
341 		} else
342 			w->skip = 0;
343 
344 		read_lock_bh(&table->tb6_lock);
345 		res = fib6_walk_continue(w);
346 		read_unlock_bh(&table->tb6_lock);
347 		if (res <= 0) {
348 			fib6_walker_unlink(w);
349 			cb->args[4] = 0;
350 		}
351 	}
352 
353 	return res;
354 }
355 
inet6_dump_fib(struct sk_buff * skb,struct netlink_callback * cb)356 static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
357 {
358 	struct net *net = sock_net(skb->sk);
359 	unsigned int h, s_h;
360 	unsigned int e = 0, s_e;
361 	struct rt6_rtnl_dump_arg arg;
362 	struct fib6_walker *w;
363 	struct fib6_table *tb;
364 	struct hlist_head *head;
365 	int res = 0;
366 
367 	s_h = cb->args[0];
368 	s_e = cb->args[1];
369 
370 	w = (void *)cb->args[2];
371 	if (!w) {
372 		/* New dump:
373 		 *
374 		 * 1. hook callback destructor.
375 		 */
376 		cb->args[3] = (long)cb->done;
377 		cb->done = fib6_dump_done;
378 
379 		/*
380 		 * 2. allocate and initialize walker.
381 		 */
382 		w = kzalloc(sizeof(*w), GFP_ATOMIC);
383 		if (!w)
384 			return -ENOMEM;
385 		w->func = fib6_dump_node;
386 		cb->args[2] = (long)w;
387 	}
388 
389 	arg.skb = skb;
390 	arg.cb = cb;
391 	arg.net = net;
392 	w->args = &arg;
393 
394 	rcu_read_lock();
395 	for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
396 		e = 0;
397 		head = &net->ipv6.fib_table_hash[h];
398 		hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
399 			if (e < s_e)
400 				goto next;
401 			res = fib6_dump_table(tb, skb, cb);
402 			if (res != 0)
403 				goto out;
404 next:
405 			e++;
406 		}
407 	}
408 out:
409 	rcu_read_unlock();
410 	cb->args[1] = e;
411 	cb->args[0] = h;
412 
413 	res = res < 0 ? res : skb->len;
414 	if (res <= 0)
415 		fib6_dump_end(cb);
416 	return res;
417 }
418 
419 /*
420  *	Routing Table
421  *
422  *	return the appropriate node for a routing tree "add" operation
423  *	by either creating and inserting or by returning an existing
424  *	node.
425  */
426 
fib6_add_1(struct fib6_node * root,struct in6_addr * addr,int plen,int offset,int allow_create,int replace_required,int sernum)427 static struct fib6_node *fib6_add_1(struct fib6_node *root,
428 				     struct in6_addr *addr, int plen,
429 				     int offset, int allow_create,
430 				     int replace_required, int sernum)
431 {
432 	struct fib6_node *fn, *in, *ln;
433 	struct fib6_node *pn = NULL;
434 	struct rt6key *key;
435 	int	bit;
436 	__be32	dir = 0;
437 
438 	RT6_TRACE("fib6_add_1\n");
439 
440 	/* insert node in tree */
441 
442 	fn = root;
443 
444 	do {
445 		key = (struct rt6key *)((u8 *)fn->leaf + offset);
446 
447 		/*
448 		 *	Prefix match
449 		 */
450 		if (plen < fn->fn_bit ||
451 		    !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
452 			if (!allow_create) {
453 				if (replace_required) {
454 					pr_warn("Can't replace route, no match found\n");
455 					return ERR_PTR(-ENOENT);
456 				}
457 				pr_warn("NLM_F_CREATE should be set when creating new route\n");
458 			}
459 			goto insert_above;
460 		}
461 
462 		/*
463 		 *	Exact match ?
464 		 */
465 
466 		if (plen == fn->fn_bit) {
467 			/* clean up an intermediate node */
468 			if (!(fn->fn_flags & RTN_RTINFO)) {
469 				rt6_release(fn->leaf);
470 				fn->leaf = NULL;
471 			}
472 
473 			fn->fn_sernum = sernum;
474 
475 			return fn;
476 		}
477 
478 		/*
479 		 *	We have more bits to go
480 		 */
481 
482 		/* Try to walk down on tree. */
483 		fn->fn_sernum = sernum;
484 		dir = addr_bit_set(addr, fn->fn_bit);
485 		pn = fn;
486 		fn = dir ? fn->right : fn->left;
487 	} while (fn);
488 
489 	if (!allow_create) {
490 		/* We should not create new node because
491 		 * NLM_F_REPLACE was specified without NLM_F_CREATE
492 		 * I assume it is safe to require NLM_F_CREATE when
493 		 * REPLACE flag is used! Later we may want to remove the
494 		 * check for replace_required, because according
495 		 * to netlink specification, NLM_F_CREATE
496 		 * MUST be specified if new route is created.
497 		 * That would keep IPv6 consistent with IPv4
498 		 */
499 		if (replace_required) {
500 			pr_warn("Can't replace route, no match found\n");
501 			return ERR_PTR(-ENOENT);
502 		}
503 		pr_warn("NLM_F_CREATE should be set when creating new route\n");
504 	}
505 	/*
506 	 *	We walked to the bottom of tree.
507 	 *	Create new leaf node without children.
508 	 */
509 
510 	ln = node_alloc();
511 
512 	if (!ln)
513 		return ERR_PTR(-ENOMEM);
514 	ln->fn_bit = plen;
515 
516 	ln->parent = pn;
517 	ln->fn_sernum = sernum;
518 
519 	if (dir)
520 		pn->right = ln;
521 	else
522 		pn->left  = ln;
523 
524 	return ln;
525 
526 
527 insert_above:
528 	/*
529 	 * split since we don't have a common prefix anymore or
530 	 * we have a less significant route.
531 	 * we've to insert an intermediate node on the list
532 	 * this new node will point to the one we need to create
533 	 * and the current
534 	 */
535 
536 	pn = fn->parent;
537 
538 	/* find 1st bit in difference between the 2 addrs.
539 
540 	   See comment in __ipv6_addr_diff: bit may be an invalid value,
541 	   but if it is >= plen, the value is ignored in any case.
542 	 */
543 
544 	bit = __ipv6_addr_diff(addr, &key->addr, sizeof(*addr));
545 
546 	/*
547 	 *		(intermediate)[in]
548 	 *	          /	   \
549 	 *	(new leaf node)[ln] (old node)[fn]
550 	 */
551 	if (plen > bit) {
552 		in = node_alloc();
553 		ln = node_alloc();
554 
555 		if (!in || !ln) {
556 			if (in)
557 				node_free(in);
558 			if (ln)
559 				node_free(ln);
560 			return ERR_PTR(-ENOMEM);
561 		}
562 
563 		/*
564 		 * new intermediate node.
565 		 * RTN_RTINFO will
566 		 * be off since that an address that chooses one of
567 		 * the branches would not match less specific routes
568 		 * in the other branch
569 		 */
570 
571 		in->fn_bit = bit;
572 
573 		in->parent = pn;
574 		in->leaf = fn->leaf;
575 		atomic_inc(&in->leaf->rt6i_ref);
576 
577 		in->fn_sernum = sernum;
578 
579 		/* update parent pointer */
580 		if (dir)
581 			pn->right = in;
582 		else
583 			pn->left  = in;
584 
585 		ln->fn_bit = plen;
586 
587 		ln->parent = in;
588 		fn->parent = in;
589 
590 		ln->fn_sernum = sernum;
591 
592 		if (addr_bit_set(addr, bit)) {
593 			in->right = ln;
594 			in->left  = fn;
595 		} else {
596 			in->left  = ln;
597 			in->right = fn;
598 		}
599 	} else { /* plen <= bit */
600 
601 		/*
602 		 *		(new leaf node)[ln]
603 		 *	          /	   \
604 		 *	     (old node)[fn] NULL
605 		 */
606 
607 		ln = node_alloc();
608 
609 		if (!ln)
610 			return ERR_PTR(-ENOMEM);
611 
612 		ln->fn_bit = plen;
613 
614 		ln->parent = pn;
615 
616 		ln->fn_sernum = sernum;
617 
618 		if (dir)
619 			pn->right = ln;
620 		else
621 			pn->left  = ln;
622 
623 		if (addr_bit_set(&key->addr, plen))
624 			ln->right = fn;
625 		else
626 			ln->left  = fn;
627 
628 		fn->parent = ln;
629 	}
630 	return ln;
631 }
632 
rt6_qualify_for_ecmp(struct rt6_info * rt)633 static bool rt6_qualify_for_ecmp(struct rt6_info *rt)
634 {
635 	return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
636 	       RTF_GATEWAY;
637 }
638 
fib6_commit_metrics(struct dst_entry * dst,struct nlattr * mx,int mx_len)639 static int fib6_commit_metrics(struct dst_entry *dst,
640 			       struct nlattr *mx, int mx_len)
641 {
642 	struct nlattr *nla;
643 	int remaining;
644 	u32 *mp;
645 
646 	if (dst->flags & DST_HOST) {
647 		mp = dst_metrics_write_ptr(dst);
648 	} else {
649 		mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
650 		if (!mp)
651 			return -ENOMEM;
652 		dst_init_metrics(dst, mp, 0);
653 	}
654 
655 	nla_for_each_attr(nla, mx, mx_len, remaining) {
656 		int type = nla_type(nla);
657 
658 		if (type) {
659 			if (type > RTAX_MAX)
660 				return -EINVAL;
661 
662 			mp[type - 1] = nla_get_u32(nla);
663 		}
664 	}
665 	return 0;
666 }
667 
fib6_purge_rt(struct rt6_info * rt,struct fib6_node * fn,struct net * net)668 static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
669 			  struct net *net)
670 {
671 	if (atomic_read(&rt->rt6i_ref) != 1) {
672 		/* This route is used as dummy address holder in some split
673 		 * nodes. It is not leaked, but it still holds other resources,
674 		 * which must be released in time. So, scan ascendant nodes
675 		 * and replace dummy references to this route with references
676 		 * to still alive ones.
677 		 */
678 		while (fn) {
679 			if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
680 				fn->leaf = fib6_find_prefix(net, fn);
681 				atomic_inc(&fn->leaf->rt6i_ref);
682 				rt6_release(rt);
683 			}
684 			fn = fn->parent;
685 		}
686 		/* No more references are possible at this point. */
687 		BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
688 	}
689 }
690 
691 /*
692  *	Insert routing information in a node.
693  */
694 
fib6_add_rt2node(struct fib6_node * fn,struct rt6_info * rt,struct nl_info * info,struct nlattr * mx,int mx_len)695 static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
696 			    struct nl_info *info, struct nlattr *mx, int mx_len)
697 {
698 	struct rt6_info *iter = NULL;
699 	struct rt6_info **ins;
700 	int replace = (info->nlh &&
701 		       (info->nlh->nlmsg_flags & NLM_F_REPLACE));
702 	int add = (!info->nlh ||
703 		   (info->nlh->nlmsg_flags & NLM_F_CREATE));
704 	int found = 0;
705 	bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
706 	int err;
707 
708 	ins = &fn->leaf;
709 
710 	for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) {
711 		/*
712 		 *	Search for duplicates
713 		 */
714 
715 		if (iter->rt6i_metric == rt->rt6i_metric) {
716 			/*
717 			 *	Same priority level
718 			 */
719 			if (info->nlh &&
720 			    (info->nlh->nlmsg_flags & NLM_F_EXCL))
721 				return -EEXIST;
722 			if (replace) {
723 				found++;
724 				break;
725 			}
726 
727 			if (iter->dst.dev == rt->dst.dev &&
728 			    iter->rt6i_idev == rt->rt6i_idev &&
729 			    ipv6_addr_equal(&iter->rt6i_gateway,
730 					    &rt->rt6i_gateway)) {
731 				if (rt->rt6i_nsiblings)
732 					rt->rt6i_nsiblings = 0;
733 				if (!(iter->rt6i_flags & RTF_EXPIRES))
734 					return -EEXIST;
735 				if (!(rt->rt6i_flags & RTF_EXPIRES))
736 					rt6_clean_expires(iter);
737 				else
738 					rt6_set_expires(iter, rt->dst.expires);
739 				return -EEXIST;
740 			}
741 			/* If we have the same destination and the same metric,
742 			 * but not the same gateway, then the route we try to
743 			 * add is sibling to this route, increment our counter
744 			 * of siblings, and later we will add our route to the
745 			 * list.
746 			 * Only static routes (which don't have flag
747 			 * RTF_EXPIRES) are used for ECMPv6.
748 			 *
749 			 * To avoid long list, we only had siblings if the
750 			 * route have a gateway.
751 			 */
752 			if (rt_can_ecmp &&
753 			    rt6_qualify_for_ecmp(iter))
754 				rt->rt6i_nsiblings++;
755 		}
756 
757 		if (iter->rt6i_metric > rt->rt6i_metric)
758 			break;
759 
760 		ins = &iter->dst.rt6_next;
761 	}
762 
763 	/* Reset round-robin state, if necessary */
764 	if (ins == &fn->leaf)
765 		fn->rr_ptr = NULL;
766 
767 	/* Link this route to others same route. */
768 	if (rt->rt6i_nsiblings) {
769 		unsigned int rt6i_nsiblings;
770 		struct rt6_info *sibling, *temp_sibling;
771 
772 		/* Find the first route that have the same metric */
773 		sibling = fn->leaf;
774 		while (sibling) {
775 			if (sibling->rt6i_metric == rt->rt6i_metric &&
776 			    rt6_qualify_for_ecmp(sibling)) {
777 				list_add_tail(&rt->rt6i_siblings,
778 					      &sibling->rt6i_siblings);
779 				break;
780 			}
781 			sibling = sibling->dst.rt6_next;
782 		}
783 		/* For each sibling in the list, increment the counter of
784 		 * siblings. BUG() if counters does not match, list of siblings
785 		 * is broken!
786 		 */
787 		rt6i_nsiblings = 0;
788 		list_for_each_entry_safe(sibling, temp_sibling,
789 					 &rt->rt6i_siblings, rt6i_siblings) {
790 			sibling->rt6i_nsiblings++;
791 			BUG_ON(sibling->rt6i_nsiblings != rt->rt6i_nsiblings);
792 			rt6i_nsiblings++;
793 		}
794 		BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings);
795 	}
796 
797 	/*
798 	 *	insert node
799 	 */
800 	if (!replace) {
801 		if (!add)
802 			pr_warn("NLM_F_CREATE should be set when creating new route\n");
803 
804 add:
805 		if (mx) {
806 			err = fib6_commit_metrics(&rt->dst, mx, mx_len);
807 			if (err)
808 				return err;
809 		}
810 		rt->dst.rt6_next = iter;
811 		*ins = rt;
812 		rt->rt6i_node = fn;
813 		atomic_inc(&rt->rt6i_ref);
814 		inet6_rt_notify(RTM_NEWROUTE, rt, info);
815 		info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
816 
817 		if (!(fn->fn_flags & RTN_RTINFO)) {
818 			info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
819 			fn->fn_flags |= RTN_RTINFO;
820 		}
821 
822 	} else {
823 		if (!found) {
824 			if (add)
825 				goto add;
826 			pr_warn("NLM_F_REPLACE set, but no existing node found!\n");
827 			return -ENOENT;
828 		}
829 		if (mx) {
830 			err = fib6_commit_metrics(&rt->dst, mx, mx_len);
831 			if (err)
832 				return err;
833 		}
834 		*ins = rt;
835 		rt->rt6i_node = fn;
836 		rt->dst.rt6_next = iter->dst.rt6_next;
837 		atomic_inc(&rt->rt6i_ref);
838 		inet6_rt_notify(RTM_NEWROUTE, rt, info);
839 		if (!(fn->fn_flags & RTN_RTINFO)) {
840 			info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
841 			fn->fn_flags |= RTN_RTINFO;
842 		}
843 		fib6_purge_rt(iter, fn, info->nl_net);
844 		rt6_release(iter);
845 	}
846 
847 	return 0;
848 }
849 
fib6_start_gc(struct net * net,struct rt6_info * rt)850 static void fib6_start_gc(struct net *net, struct rt6_info *rt)
851 {
852 	if (!timer_pending(&net->ipv6.ip6_fib_timer) &&
853 	    (rt->rt6i_flags & (RTF_EXPIRES | RTF_CACHE)))
854 		mod_timer(&net->ipv6.ip6_fib_timer,
855 			  jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
856 }
857 
fib6_force_start_gc(struct net * net)858 void fib6_force_start_gc(struct net *net)
859 {
860 	if (!timer_pending(&net->ipv6.ip6_fib_timer))
861 		mod_timer(&net->ipv6.ip6_fib_timer,
862 			  jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
863 }
864 
865 /*
866  *	Add routing information to the routing tree.
867  *	<destination addr>/<source addr>
868  *	with source addr info in sub-trees
869  */
870 
fib6_add(struct fib6_node * root,struct rt6_info * rt,struct nl_info * info,struct nlattr * mx,int mx_len)871 int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
872 	     struct nlattr *mx, int mx_len)
873 {
874 	struct fib6_node *fn, *pn = NULL;
875 	int err = -ENOMEM;
876 	int allow_create = 1;
877 	int replace_required = 0;
878 	int sernum = fib6_new_sernum(info->nl_net);
879 
880 	if (info->nlh) {
881 		if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
882 			allow_create = 0;
883 		if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
884 			replace_required = 1;
885 	}
886 	if (!allow_create && !replace_required)
887 		pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
888 
889 	fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
890 			offsetof(struct rt6_info, rt6i_dst), allow_create,
891 			replace_required, sernum);
892 	if (IS_ERR(fn)) {
893 		err = PTR_ERR(fn);
894 		fn = NULL;
895 		goto out;
896 	}
897 
898 	pn = fn;
899 
900 #ifdef CONFIG_IPV6_SUBTREES
901 	if (rt->rt6i_src.plen) {
902 		struct fib6_node *sn;
903 
904 		if (!fn->subtree) {
905 			struct fib6_node *sfn;
906 
907 			/*
908 			 * Create subtree.
909 			 *
910 			 *		fn[main tree]
911 			 *		|
912 			 *		sfn[subtree root]
913 			 *		   \
914 			 *		    sn[new leaf node]
915 			 */
916 
917 			/* Create subtree root node */
918 			sfn = node_alloc();
919 			if (!sfn)
920 				goto st_failure;
921 
922 			sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
923 			atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
924 			sfn->fn_flags = RTN_ROOT;
925 			sfn->fn_sernum = sernum;
926 
927 			/* Now add the first leaf node to new subtree */
928 
929 			sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
930 					rt->rt6i_src.plen,
931 					offsetof(struct rt6_info, rt6i_src),
932 					allow_create, replace_required, sernum);
933 
934 			if (IS_ERR(sn)) {
935 				/* If it is failed, discard just allocated
936 				   root, and then (in st_failure) stale node
937 				   in main tree.
938 				 */
939 				node_free(sfn);
940 				err = PTR_ERR(sn);
941 				goto st_failure;
942 			}
943 
944 			/* Now link new subtree to main tree */
945 			sfn->parent = fn;
946 			fn->subtree = sfn;
947 		} else {
948 			sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
949 					rt->rt6i_src.plen,
950 					offsetof(struct rt6_info, rt6i_src),
951 					allow_create, replace_required, sernum);
952 
953 			if (IS_ERR(sn)) {
954 				err = PTR_ERR(sn);
955 				goto st_failure;
956 			}
957 		}
958 
959 		if (!fn->leaf) {
960 			fn->leaf = rt;
961 			atomic_inc(&rt->rt6i_ref);
962 		}
963 		fn = sn;
964 	}
965 #endif
966 
967 	err = fib6_add_rt2node(fn, rt, info, mx, mx_len);
968 	if (!err) {
969 		fib6_start_gc(info->nl_net, rt);
970 		if (!(rt->rt6i_flags & RTF_CACHE))
971 			fib6_prune_clones(info->nl_net, pn);
972 	}
973 
974 out:
975 	if (err) {
976 #ifdef CONFIG_IPV6_SUBTREES
977 		/*
978 		 * If fib6_add_1 has cleared the old leaf pointer in the
979 		 * super-tree leaf node we have to find a new one for it.
980 		 */
981 		if (pn != fn && pn->leaf == rt) {
982 			pn->leaf = NULL;
983 			atomic_dec(&rt->rt6i_ref);
984 		}
985 		if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) {
986 			pn->leaf = fib6_find_prefix(info->nl_net, pn);
987 #if RT6_DEBUG >= 2
988 			if (!pn->leaf) {
989 				WARN_ON(pn->leaf == NULL);
990 				pn->leaf = info->nl_net->ipv6.ip6_null_entry;
991 			}
992 #endif
993 			atomic_inc(&pn->leaf->rt6i_ref);
994 		}
995 #endif
996 		dst_free(&rt->dst);
997 	}
998 	return err;
999 
1000 #ifdef CONFIG_IPV6_SUBTREES
1001 	/* Subtree creation failed, probably main tree node
1002 	   is orphan. If it is, shoot it.
1003 	 */
1004 st_failure:
1005 	if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
1006 		fib6_repair_tree(info->nl_net, fn);
1007 	dst_free(&rt->dst);
1008 	return err;
1009 #endif
1010 }
1011 
1012 /*
1013  *	Routing tree lookup
1014  *
1015  */
1016 
1017 struct lookup_args {
1018 	int			offset;		/* key offset on rt6_info	*/
1019 	const struct in6_addr	*addr;		/* search key			*/
1020 };
1021 
fib6_lookup_1(struct fib6_node * root,struct lookup_args * args)1022 static struct fib6_node *fib6_lookup_1(struct fib6_node *root,
1023 				       struct lookup_args *args)
1024 {
1025 	struct fib6_node *fn;
1026 	__be32 dir;
1027 
1028 	if (unlikely(args->offset == 0))
1029 		return NULL;
1030 
1031 	/*
1032 	 *	Descend on a tree
1033 	 */
1034 
1035 	fn = root;
1036 
1037 	for (;;) {
1038 		struct fib6_node *next;
1039 
1040 		dir = addr_bit_set(args->addr, fn->fn_bit);
1041 
1042 		next = dir ? fn->right : fn->left;
1043 
1044 		if (next) {
1045 			fn = next;
1046 			continue;
1047 		}
1048 		break;
1049 	}
1050 
1051 	while (fn) {
1052 		if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) {
1053 			struct rt6key *key;
1054 
1055 			key = (struct rt6key *) ((u8 *) fn->leaf +
1056 						 args->offset);
1057 
1058 			if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
1059 #ifdef CONFIG_IPV6_SUBTREES
1060 				if (fn->subtree) {
1061 					struct fib6_node *sfn;
1062 					sfn = fib6_lookup_1(fn->subtree,
1063 							    args + 1);
1064 					if (!sfn)
1065 						goto backtrack;
1066 					fn = sfn;
1067 				}
1068 #endif
1069 				if (fn->fn_flags & RTN_RTINFO)
1070 					return fn;
1071 			}
1072 		}
1073 #ifdef CONFIG_IPV6_SUBTREES
1074 backtrack:
1075 #endif
1076 		if (fn->fn_flags & RTN_ROOT)
1077 			break;
1078 
1079 		fn = fn->parent;
1080 	}
1081 
1082 	return NULL;
1083 }
1084 
fib6_lookup(struct fib6_node * root,const struct in6_addr * daddr,const struct in6_addr * saddr)1085 struct fib6_node *fib6_lookup(struct fib6_node *root, const struct in6_addr *daddr,
1086 			      const struct in6_addr *saddr)
1087 {
1088 	struct fib6_node *fn;
1089 	struct lookup_args args[] = {
1090 		{
1091 			.offset = offsetof(struct rt6_info, rt6i_dst),
1092 			.addr = daddr,
1093 		},
1094 #ifdef CONFIG_IPV6_SUBTREES
1095 		{
1096 			.offset = offsetof(struct rt6_info, rt6i_src),
1097 			.addr = saddr,
1098 		},
1099 #endif
1100 		{
1101 			.offset = 0,	/* sentinel */
1102 		}
1103 	};
1104 
1105 	fn = fib6_lookup_1(root, daddr ? args : args + 1);
1106 	if (!fn || fn->fn_flags & RTN_TL_ROOT)
1107 		fn = root;
1108 
1109 	return fn;
1110 }
1111 
1112 /*
1113  *	Get node with specified destination prefix (and source prefix,
1114  *	if subtrees are used)
1115  */
1116 
1117 
fib6_locate_1(struct fib6_node * root,const struct in6_addr * addr,int plen,int offset)1118 static struct fib6_node *fib6_locate_1(struct fib6_node *root,
1119 				       const struct in6_addr *addr,
1120 				       int plen, int offset)
1121 {
1122 	struct fib6_node *fn;
1123 
1124 	for (fn = root; fn ; ) {
1125 		struct rt6key *key = (struct rt6key *)((u8 *)fn->leaf + offset);
1126 
1127 		/*
1128 		 *	Prefix match
1129 		 */
1130 		if (plen < fn->fn_bit ||
1131 		    !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
1132 			return NULL;
1133 
1134 		if (plen == fn->fn_bit)
1135 			return fn;
1136 
1137 		/*
1138 		 *	We have more bits to go
1139 		 */
1140 		if (addr_bit_set(addr, fn->fn_bit))
1141 			fn = fn->right;
1142 		else
1143 			fn = fn->left;
1144 	}
1145 	return NULL;
1146 }
1147 
fib6_locate(struct fib6_node * root,const struct in6_addr * daddr,int dst_len,const struct in6_addr * saddr,int src_len)1148 struct fib6_node *fib6_locate(struct fib6_node *root,
1149 			      const struct in6_addr *daddr, int dst_len,
1150 			      const struct in6_addr *saddr, int src_len)
1151 {
1152 	struct fib6_node *fn;
1153 
1154 	fn = fib6_locate_1(root, daddr, dst_len,
1155 			   offsetof(struct rt6_info, rt6i_dst));
1156 
1157 #ifdef CONFIG_IPV6_SUBTREES
1158 	if (src_len) {
1159 		WARN_ON(saddr == NULL);
1160 		if (fn && fn->subtree)
1161 			fn = fib6_locate_1(fn->subtree, saddr, src_len,
1162 					   offsetof(struct rt6_info, rt6i_src));
1163 	}
1164 #endif
1165 
1166 	if (fn && fn->fn_flags & RTN_RTINFO)
1167 		return fn;
1168 
1169 	return NULL;
1170 }
1171 
1172 
1173 /*
1174  *	Deletion
1175  *
1176  */
1177 
fib6_find_prefix(struct net * net,struct fib6_node * fn)1178 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn)
1179 {
1180 	if (fn->fn_flags & RTN_ROOT)
1181 		return net->ipv6.ip6_null_entry;
1182 
1183 	while (fn) {
1184 		if (fn->left)
1185 			return fn->left->leaf;
1186 		if (fn->right)
1187 			return fn->right->leaf;
1188 
1189 		fn = FIB6_SUBTREE(fn);
1190 	}
1191 	return NULL;
1192 }
1193 
1194 /*
1195  *	Called to trim the tree of intermediate nodes when possible. "fn"
1196  *	is the node we want to try and remove.
1197  */
1198 
fib6_repair_tree(struct net * net,struct fib6_node * fn)1199 static struct fib6_node *fib6_repair_tree(struct net *net,
1200 					   struct fib6_node *fn)
1201 {
1202 	int children;
1203 	int nstate;
1204 	struct fib6_node *child, *pn;
1205 	struct fib6_walker *w;
1206 	int iter = 0;
1207 
1208 	for (;;) {
1209 		RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
1210 		iter++;
1211 
1212 		WARN_ON(fn->fn_flags & RTN_RTINFO);
1213 		WARN_ON(fn->fn_flags & RTN_TL_ROOT);
1214 		WARN_ON(fn->leaf != NULL);
1215 
1216 		children = 0;
1217 		child = NULL;
1218 		if (fn->right)
1219 			child = fn->right, children |= 1;
1220 		if (fn->left)
1221 			child = fn->left, children |= 2;
1222 
1223 		if (children == 3 || FIB6_SUBTREE(fn)
1224 #ifdef CONFIG_IPV6_SUBTREES
1225 		    /* Subtree root (i.e. fn) may have one child */
1226 		    || (children && fn->fn_flags & RTN_ROOT)
1227 #endif
1228 		    ) {
1229 			fn->leaf = fib6_find_prefix(net, fn);
1230 #if RT6_DEBUG >= 2
1231 			if (!fn->leaf) {
1232 				WARN_ON(!fn->leaf);
1233 				fn->leaf = net->ipv6.ip6_null_entry;
1234 			}
1235 #endif
1236 			atomic_inc(&fn->leaf->rt6i_ref);
1237 			return fn->parent;
1238 		}
1239 
1240 		pn = fn->parent;
1241 #ifdef CONFIG_IPV6_SUBTREES
1242 		if (FIB6_SUBTREE(pn) == fn) {
1243 			WARN_ON(!(fn->fn_flags & RTN_ROOT));
1244 			FIB6_SUBTREE(pn) = NULL;
1245 			nstate = FWS_L;
1246 		} else {
1247 			WARN_ON(fn->fn_flags & RTN_ROOT);
1248 #endif
1249 			if (pn->right == fn)
1250 				pn->right = child;
1251 			else if (pn->left == fn)
1252 				pn->left = child;
1253 #if RT6_DEBUG >= 2
1254 			else
1255 				WARN_ON(1);
1256 #endif
1257 			if (child)
1258 				child->parent = pn;
1259 			nstate = FWS_R;
1260 #ifdef CONFIG_IPV6_SUBTREES
1261 		}
1262 #endif
1263 
1264 		read_lock(&fib6_walker_lock);
1265 		FOR_WALKERS(w) {
1266 			if (!child) {
1267 				if (w->root == fn) {
1268 					w->root = w->node = NULL;
1269 					RT6_TRACE("W %p adjusted by delroot 1\n", w);
1270 				} else if (w->node == fn) {
1271 					RT6_TRACE("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate);
1272 					w->node = pn;
1273 					w->state = nstate;
1274 				}
1275 			} else {
1276 				if (w->root == fn) {
1277 					w->root = child;
1278 					RT6_TRACE("W %p adjusted by delroot 2\n", w);
1279 				}
1280 				if (w->node == fn) {
1281 					w->node = child;
1282 					if (children&2) {
1283 						RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
1284 						w->state = w->state >= FWS_R ? FWS_U : FWS_INIT;
1285 					} else {
1286 						RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
1287 						w->state = w->state >= FWS_C ? FWS_U : FWS_INIT;
1288 					}
1289 				}
1290 			}
1291 		}
1292 		read_unlock(&fib6_walker_lock);
1293 
1294 		node_free(fn);
1295 		if (pn->fn_flags & RTN_RTINFO || FIB6_SUBTREE(pn))
1296 			return pn;
1297 
1298 		rt6_release(pn->leaf);
1299 		pn->leaf = NULL;
1300 		fn = pn;
1301 	}
1302 }
1303 
fib6_del_route(struct fib6_node * fn,struct rt6_info ** rtp,struct nl_info * info)1304 static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1305 			   struct nl_info *info)
1306 {
1307 	struct fib6_walker *w;
1308 	struct rt6_info *rt = *rtp;
1309 	struct net *net = info->nl_net;
1310 
1311 	RT6_TRACE("fib6_del_route\n");
1312 
1313 	/* Unlink it */
1314 	*rtp = rt->dst.rt6_next;
1315 	rt->rt6i_node = NULL;
1316 	net->ipv6.rt6_stats->fib_rt_entries--;
1317 	net->ipv6.rt6_stats->fib_discarded_routes++;
1318 
1319 	/* Reset round-robin state, if necessary */
1320 	if (fn->rr_ptr == rt)
1321 		fn->rr_ptr = NULL;
1322 
1323 	/* Remove this entry from other siblings */
1324 	if (rt->rt6i_nsiblings) {
1325 		struct rt6_info *sibling, *next_sibling;
1326 
1327 		list_for_each_entry_safe(sibling, next_sibling,
1328 					 &rt->rt6i_siblings, rt6i_siblings)
1329 			sibling->rt6i_nsiblings--;
1330 		rt->rt6i_nsiblings = 0;
1331 		list_del_init(&rt->rt6i_siblings);
1332 	}
1333 
1334 	/* Adjust walkers */
1335 	read_lock(&fib6_walker_lock);
1336 	FOR_WALKERS(w) {
1337 		if (w->state == FWS_C && w->leaf == rt) {
1338 			RT6_TRACE("walker %p adjusted by delroute\n", w);
1339 			w->leaf = rt->dst.rt6_next;
1340 			if (!w->leaf)
1341 				w->state = FWS_U;
1342 		}
1343 	}
1344 	read_unlock(&fib6_walker_lock);
1345 
1346 	rt->dst.rt6_next = NULL;
1347 
1348 	/* If it was last route, expunge its radix tree node */
1349 	if (!fn->leaf) {
1350 		fn->fn_flags &= ~RTN_RTINFO;
1351 		net->ipv6.rt6_stats->fib_route_nodes--;
1352 		fn = fib6_repair_tree(net, fn);
1353 	}
1354 
1355 	fib6_purge_rt(rt, fn, net);
1356 
1357 	inet6_rt_notify(RTM_DELROUTE, rt, info);
1358 	rt6_release(rt);
1359 }
1360 
fib6_del(struct rt6_info * rt,struct nl_info * info)1361 int fib6_del(struct rt6_info *rt, struct nl_info *info)
1362 {
1363 	struct net *net = info->nl_net;
1364 	struct fib6_node *fn = rt->rt6i_node;
1365 	struct rt6_info **rtp;
1366 
1367 #if RT6_DEBUG >= 2
1368 	if (rt->dst.obsolete > 0) {
1369 		WARN_ON(fn != NULL);
1370 		return -ENOENT;
1371 	}
1372 #endif
1373 	if (!fn || rt == net->ipv6.ip6_null_entry)
1374 		return -ENOENT;
1375 
1376 	WARN_ON(!(fn->fn_flags & RTN_RTINFO));
1377 
1378 	if (!(rt->rt6i_flags & RTF_CACHE)) {
1379 		struct fib6_node *pn = fn;
1380 #ifdef CONFIG_IPV6_SUBTREES
1381 		/* clones of this route might be in another subtree */
1382 		if (rt->rt6i_src.plen) {
1383 			while (!(pn->fn_flags & RTN_ROOT))
1384 				pn = pn->parent;
1385 			pn = pn->parent;
1386 		}
1387 #endif
1388 		fib6_prune_clones(info->nl_net, pn);
1389 	}
1390 
1391 	/*
1392 	 *	Walk the leaf entries looking for ourself
1393 	 */
1394 
1395 	for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->dst.rt6_next) {
1396 		if (*rtp == rt) {
1397 			fib6_del_route(fn, rtp, info);
1398 			return 0;
1399 		}
1400 	}
1401 	return -ENOENT;
1402 }
1403 
1404 /*
1405  *	Tree traversal function.
1406  *
1407  *	Certainly, it is not interrupt safe.
1408  *	However, it is internally reenterable wrt itself and fib6_add/fib6_del.
1409  *	It means, that we can modify tree during walking
1410  *	and use this function for garbage collection, clone pruning,
1411  *	cleaning tree when a device goes down etc. etc.
1412  *
1413  *	It guarantees that every node will be traversed,
1414  *	and that it will be traversed only once.
1415  *
1416  *	Callback function w->func may return:
1417  *	0 -> continue walking.
1418  *	positive value -> walking is suspended (used by tree dumps,
1419  *	and probably by gc, if it will be split to several slices)
1420  *	negative value -> terminate walking.
1421  *
1422  *	The function itself returns:
1423  *	0   -> walk is complete.
1424  *	>0  -> walk is incomplete (i.e. suspended)
1425  *	<0  -> walk is terminated by an error.
1426  */
1427 
fib6_walk_continue(struct fib6_walker * w)1428 static int fib6_walk_continue(struct fib6_walker *w)
1429 {
1430 	struct fib6_node *fn, *pn;
1431 
1432 	for (;;) {
1433 		fn = w->node;
1434 		if (!fn)
1435 			return 0;
1436 
1437 		if (w->prune && fn != w->root &&
1438 		    fn->fn_flags & RTN_RTINFO && w->state < FWS_C) {
1439 			w->state = FWS_C;
1440 			w->leaf = fn->leaf;
1441 		}
1442 		switch (w->state) {
1443 #ifdef CONFIG_IPV6_SUBTREES
1444 		case FWS_S:
1445 			if (FIB6_SUBTREE(fn)) {
1446 				w->node = FIB6_SUBTREE(fn);
1447 				continue;
1448 			}
1449 			w->state = FWS_L;
1450 #endif
1451 		case FWS_L:
1452 			if (fn->left) {
1453 				w->node = fn->left;
1454 				w->state = FWS_INIT;
1455 				continue;
1456 			}
1457 			w->state = FWS_R;
1458 		case FWS_R:
1459 			if (fn->right) {
1460 				w->node = fn->right;
1461 				w->state = FWS_INIT;
1462 				continue;
1463 			}
1464 			w->state = FWS_C;
1465 			w->leaf = fn->leaf;
1466 		case FWS_C:
1467 			if (w->leaf && fn->fn_flags & RTN_RTINFO) {
1468 				int err;
1469 
1470 				if (w->skip) {
1471 					w->skip--;
1472 					goto skip;
1473 				}
1474 
1475 				err = w->func(w);
1476 				if (err)
1477 					return err;
1478 
1479 				w->count++;
1480 				continue;
1481 			}
1482 skip:
1483 			w->state = FWS_U;
1484 		case FWS_U:
1485 			if (fn == w->root)
1486 				return 0;
1487 			pn = fn->parent;
1488 			w->node = pn;
1489 #ifdef CONFIG_IPV6_SUBTREES
1490 			if (FIB6_SUBTREE(pn) == fn) {
1491 				WARN_ON(!(fn->fn_flags & RTN_ROOT));
1492 				w->state = FWS_L;
1493 				continue;
1494 			}
1495 #endif
1496 			if (pn->left == fn) {
1497 				w->state = FWS_R;
1498 				continue;
1499 			}
1500 			if (pn->right == fn) {
1501 				w->state = FWS_C;
1502 				w->leaf = w->node->leaf;
1503 				continue;
1504 			}
1505 #if RT6_DEBUG >= 2
1506 			WARN_ON(1);
1507 #endif
1508 		}
1509 	}
1510 }
1511 
fib6_walk(struct fib6_walker * w)1512 static int fib6_walk(struct fib6_walker *w)
1513 {
1514 	int res;
1515 
1516 	w->state = FWS_INIT;
1517 	w->node = w->root;
1518 
1519 	fib6_walker_link(w);
1520 	res = fib6_walk_continue(w);
1521 	if (res <= 0)
1522 		fib6_walker_unlink(w);
1523 	return res;
1524 }
1525 
fib6_clean_node(struct fib6_walker * w)1526 static int fib6_clean_node(struct fib6_walker *w)
1527 {
1528 	int res;
1529 	struct rt6_info *rt;
1530 	struct fib6_cleaner *c = container_of(w, struct fib6_cleaner, w);
1531 	struct nl_info info = {
1532 		.nl_net = c->net,
1533 	};
1534 
1535 	if (c->sernum != FIB6_NO_SERNUM_CHANGE &&
1536 	    w->node->fn_sernum != c->sernum)
1537 		w->node->fn_sernum = c->sernum;
1538 
1539 	if (!c->func) {
1540 		WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE);
1541 		w->leaf = NULL;
1542 		return 0;
1543 	}
1544 
1545 	for (rt = w->leaf; rt; rt = rt->dst.rt6_next) {
1546 		res = c->func(rt, c->arg);
1547 		if (res < 0) {
1548 			w->leaf = rt;
1549 			res = fib6_del(rt, &info);
1550 			if (res) {
1551 #if RT6_DEBUG >= 2
1552 				pr_debug("%s: del failed: rt=%p@%p err=%d\n",
1553 					 __func__, rt, rt->rt6i_node, res);
1554 #endif
1555 				continue;
1556 			}
1557 			return 0;
1558 		}
1559 		WARN_ON(res != 0);
1560 	}
1561 	w->leaf = rt;
1562 	return 0;
1563 }
1564 
1565 /*
1566  *	Convenient frontend to tree walker.
1567  *
1568  *	func is called on each route.
1569  *		It may return -1 -> delete this route.
1570  *		              0  -> continue walking
1571  *
1572  *	prune==1 -> only immediate children of node (certainly,
1573  *	ignoring pure split nodes) will be scanned.
1574  */
1575 
fib6_clean_tree(struct net * net,struct fib6_node * root,int (* func)(struct rt6_info *,void * arg),bool prune,int sernum,void * arg)1576 static void fib6_clean_tree(struct net *net, struct fib6_node *root,
1577 			    int (*func)(struct rt6_info *, void *arg),
1578 			    bool prune, int sernum, void *arg)
1579 {
1580 	struct fib6_cleaner c;
1581 
1582 	c.w.root = root;
1583 	c.w.func = fib6_clean_node;
1584 	c.w.prune = prune;
1585 	c.w.count = 0;
1586 	c.w.skip = 0;
1587 	c.func = func;
1588 	c.sernum = sernum;
1589 	c.arg = arg;
1590 	c.net = net;
1591 
1592 	fib6_walk(&c.w);
1593 }
1594 
__fib6_clean_all(struct net * net,int (* func)(struct rt6_info *,void *),int sernum,void * arg)1595 static void __fib6_clean_all(struct net *net,
1596 			     int (*func)(struct rt6_info *, void *),
1597 			     int sernum, void *arg)
1598 {
1599 	struct fib6_table *table;
1600 	struct hlist_head *head;
1601 	unsigned int h;
1602 
1603 	rcu_read_lock();
1604 	for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
1605 		head = &net->ipv6.fib_table_hash[h];
1606 		hlist_for_each_entry_rcu(table, head, tb6_hlist) {
1607 			write_lock_bh(&table->tb6_lock);
1608 			fib6_clean_tree(net, &table->tb6_root,
1609 					func, false, sernum, arg);
1610 			write_unlock_bh(&table->tb6_lock);
1611 		}
1612 	}
1613 	rcu_read_unlock();
1614 }
1615 
fib6_clean_all(struct net * net,int (* func)(struct rt6_info *,void *),void * arg)1616 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *),
1617 		    void *arg)
1618 {
1619 	__fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg);
1620 }
1621 
fib6_prune_clone(struct rt6_info * rt,void * arg)1622 static int fib6_prune_clone(struct rt6_info *rt, void *arg)
1623 {
1624 	if (rt->rt6i_flags & RTF_CACHE) {
1625 		RT6_TRACE("pruning clone %p\n", rt);
1626 		return -1;
1627 	}
1628 
1629 	return 0;
1630 }
1631 
fib6_prune_clones(struct net * net,struct fib6_node * fn)1632 static void fib6_prune_clones(struct net *net, struct fib6_node *fn)
1633 {
1634 	fib6_clean_tree(net, fn, fib6_prune_clone, true,
1635 			FIB6_NO_SERNUM_CHANGE, NULL);
1636 }
1637 
fib6_flush_trees(struct net * net)1638 static void fib6_flush_trees(struct net *net)
1639 {
1640 	int new_sernum = fib6_new_sernum(net);
1641 
1642 	__fib6_clean_all(net, NULL, new_sernum, NULL);
1643 }
1644 
1645 /*
1646  *	Garbage collection
1647  */
1648 
1649 static struct fib6_gc_args
1650 {
1651 	int			timeout;
1652 	int			more;
1653 } gc_args;
1654 
fib6_age(struct rt6_info * rt,void * arg)1655 static int fib6_age(struct rt6_info *rt, void *arg)
1656 {
1657 	unsigned long now = jiffies;
1658 
1659 	/*
1660 	 *	check addrconf expiration here.
1661 	 *	Routes are expired even if they are in use.
1662 	 *
1663 	 *	Also age clones. Note, that clones are aged out
1664 	 *	only if they are not in use now.
1665 	 */
1666 
1667 	if (rt->rt6i_flags & RTF_EXPIRES && rt->dst.expires) {
1668 		if (time_after(now, rt->dst.expires)) {
1669 			RT6_TRACE("expiring %p\n", rt);
1670 			return -1;
1671 		}
1672 		gc_args.more++;
1673 	} else if (rt->rt6i_flags & RTF_CACHE) {
1674 		if (atomic_read(&rt->dst.__refcnt) == 0 &&
1675 		    time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) {
1676 			RT6_TRACE("aging clone %p\n", rt);
1677 			return -1;
1678 		} else if (rt->rt6i_flags & RTF_GATEWAY) {
1679 			struct neighbour *neigh;
1680 			__u8 neigh_flags = 0;
1681 
1682 			neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway);
1683 			if (neigh) {
1684 				neigh_flags = neigh->flags;
1685 				neigh_release(neigh);
1686 			}
1687 			if (!(neigh_flags & NTF_ROUTER)) {
1688 				RT6_TRACE("purging route %p via non-router but gateway\n",
1689 					  rt);
1690 				return -1;
1691 			}
1692 		}
1693 		gc_args.more++;
1694 	}
1695 
1696 	return 0;
1697 }
1698 
1699 static DEFINE_SPINLOCK(fib6_gc_lock);
1700 
fib6_run_gc(unsigned long expires,struct net * net,bool force)1701 void fib6_run_gc(unsigned long expires, struct net *net, bool force)
1702 {
1703 	unsigned long now;
1704 
1705 	if (force) {
1706 		spin_lock_bh(&fib6_gc_lock);
1707 	} else if (!spin_trylock_bh(&fib6_gc_lock)) {
1708 		mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
1709 		return;
1710 	}
1711 	gc_args.timeout = expires ? (int)expires :
1712 			  net->ipv6.sysctl.ip6_rt_gc_interval;
1713 
1714 	gc_args.more = icmp6_dst_gc();
1715 
1716 	fib6_clean_all(net, fib6_age, NULL);
1717 	now = jiffies;
1718 	net->ipv6.ip6_rt_last_gc = now;
1719 
1720 	if (gc_args.more)
1721 		mod_timer(&net->ipv6.ip6_fib_timer,
1722 			  round_jiffies(now
1723 					+ net->ipv6.sysctl.ip6_rt_gc_interval));
1724 	else
1725 		del_timer(&net->ipv6.ip6_fib_timer);
1726 	spin_unlock_bh(&fib6_gc_lock);
1727 }
1728 
fib6_gc_timer_cb(unsigned long arg)1729 static void fib6_gc_timer_cb(unsigned long arg)
1730 {
1731 	fib6_run_gc(0, (struct net *)arg, true);
1732 }
1733 
fib6_net_init(struct net * net)1734 static int __net_init fib6_net_init(struct net *net)
1735 {
1736 	size_t size = sizeof(struct hlist_head) * FIB6_TABLE_HASHSZ;
1737 
1738 	setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
1739 
1740 	net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
1741 	if (!net->ipv6.rt6_stats)
1742 		goto out_timer;
1743 
1744 	/* Avoid false sharing : Use at least a full cache line */
1745 	size = max_t(size_t, size, L1_CACHE_BYTES);
1746 
1747 	net->ipv6.fib_table_hash = kzalloc(size, GFP_KERNEL);
1748 	if (!net->ipv6.fib_table_hash)
1749 		goto out_rt6_stats;
1750 
1751 	net->ipv6.fib6_main_tbl = kzalloc(sizeof(*net->ipv6.fib6_main_tbl),
1752 					  GFP_KERNEL);
1753 	if (!net->ipv6.fib6_main_tbl)
1754 		goto out_fib_table_hash;
1755 
1756 	net->ipv6.fib6_main_tbl->tb6_id = RT6_TABLE_MAIN;
1757 	net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1758 	net->ipv6.fib6_main_tbl->tb6_root.fn_flags =
1759 		RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1760 	inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers);
1761 
1762 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1763 	net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl),
1764 					   GFP_KERNEL);
1765 	if (!net->ipv6.fib6_local_tbl)
1766 		goto out_fib6_main_tbl;
1767 	net->ipv6.fib6_local_tbl->tb6_id = RT6_TABLE_LOCAL;
1768 	net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1769 	net->ipv6.fib6_local_tbl->tb6_root.fn_flags =
1770 		RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1771 	inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers);
1772 #endif
1773 	fib6_tables_init(net);
1774 
1775 	return 0;
1776 
1777 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1778 out_fib6_main_tbl:
1779 	kfree(net->ipv6.fib6_main_tbl);
1780 #endif
1781 out_fib_table_hash:
1782 	kfree(net->ipv6.fib_table_hash);
1783 out_rt6_stats:
1784 	kfree(net->ipv6.rt6_stats);
1785 out_timer:
1786 	return -ENOMEM;
1787 }
1788 
fib6_net_exit(struct net * net)1789 static void fib6_net_exit(struct net *net)
1790 {
1791 	unsigned int i;
1792 
1793 	rt6_ifdown(net, NULL);
1794 	del_timer_sync(&net->ipv6.ip6_fib_timer);
1795 
1796 	for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
1797 		struct hlist_head *head = &net->ipv6.fib_table_hash[i];
1798 		struct hlist_node *tmp;
1799 		struct fib6_table *tb;
1800 
1801 		hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
1802 			hlist_del(&tb->tb6_hlist);
1803 			fib6_free_table(tb);
1804 		}
1805 	}
1806 
1807 	kfree(net->ipv6.fib_table_hash);
1808 	kfree(net->ipv6.rt6_stats);
1809 }
1810 
1811 static struct pernet_operations fib6_net_ops = {
1812 	.init = fib6_net_init,
1813 	.exit = fib6_net_exit,
1814 };
1815 
fib6_init(void)1816 int __init fib6_init(void)
1817 {
1818 	int ret = -ENOMEM;
1819 
1820 	fib6_node_kmem = kmem_cache_create("fib6_nodes",
1821 					   sizeof(struct fib6_node),
1822 					   0, SLAB_HWCACHE_ALIGN,
1823 					   NULL);
1824 	if (!fib6_node_kmem)
1825 		goto out;
1826 
1827 	ret = register_pernet_subsys(&fib6_net_ops);
1828 	if (ret)
1829 		goto out_kmem_cache_create;
1830 
1831 	ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib,
1832 			      NULL);
1833 	if (ret)
1834 		goto out_unregister_subsys;
1835 
1836 	__fib6_flush_trees = fib6_flush_trees;
1837 out:
1838 	return ret;
1839 
1840 out_unregister_subsys:
1841 	unregister_pernet_subsys(&fib6_net_ops);
1842 out_kmem_cache_create:
1843 	kmem_cache_destroy(fib6_node_kmem);
1844 	goto out;
1845 }
1846 
fib6_gc_cleanup(void)1847 void fib6_gc_cleanup(void)
1848 {
1849 	unregister_pernet_subsys(&fib6_net_ops);
1850 	kmem_cache_destroy(fib6_node_kmem);
1851 }
1852 
1853 #ifdef CONFIG_PROC_FS
1854 
1855 struct ipv6_route_iter {
1856 	struct seq_net_private p;
1857 	struct fib6_walker w;
1858 	loff_t skip;
1859 	struct fib6_table *tbl;
1860 	int sernum;
1861 };
1862 
ipv6_route_seq_show(struct seq_file * seq,void * v)1863 static int ipv6_route_seq_show(struct seq_file *seq, void *v)
1864 {
1865 	struct rt6_info *rt = v;
1866 	struct ipv6_route_iter *iter = seq->private;
1867 
1868 	seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
1869 
1870 #ifdef CONFIG_IPV6_SUBTREES
1871 	seq_printf(seq, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
1872 #else
1873 	seq_puts(seq, "00000000000000000000000000000000 00 ");
1874 #endif
1875 	if (rt->rt6i_flags & RTF_GATEWAY)
1876 		seq_printf(seq, "%pi6", &rt->rt6i_gateway);
1877 	else
1878 		seq_puts(seq, "00000000000000000000000000000000");
1879 
1880 	seq_printf(seq, " %08x %08x %08x %08x %8s\n",
1881 		   rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
1882 		   rt->dst.__use, rt->rt6i_flags,
1883 		   rt->dst.dev ? rt->dst.dev->name : "");
1884 	iter->w.leaf = NULL;
1885 	return 0;
1886 }
1887 
ipv6_route_yield(struct fib6_walker * w)1888 static int ipv6_route_yield(struct fib6_walker *w)
1889 {
1890 	struct ipv6_route_iter *iter = w->args;
1891 
1892 	if (!iter->skip)
1893 		return 1;
1894 
1895 	do {
1896 		iter->w.leaf = iter->w.leaf->dst.rt6_next;
1897 		iter->skip--;
1898 		if (!iter->skip && iter->w.leaf)
1899 			return 1;
1900 	} while (iter->w.leaf);
1901 
1902 	return 0;
1903 }
1904 
ipv6_route_seq_setup_walk(struct ipv6_route_iter * iter)1905 static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter)
1906 {
1907 	memset(&iter->w, 0, sizeof(iter->w));
1908 	iter->w.func = ipv6_route_yield;
1909 	iter->w.root = &iter->tbl->tb6_root;
1910 	iter->w.state = FWS_INIT;
1911 	iter->w.node = iter->w.root;
1912 	iter->w.args = iter;
1913 	iter->sernum = iter->w.root->fn_sernum;
1914 	INIT_LIST_HEAD(&iter->w.lh);
1915 	fib6_walker_link(&iter->w);
1916 }
1917 
ipv6_route_seq_next_table(struct fib6_table * tbl,struct net * net)1918 static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
1919 						    struct net *net)
1920 {
1921 	unsigned int h;
1922 	struct hlist_node *node;
1923 
1924 	if (tbl) {
1925 		h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1;
1926 		node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist));
1927 	} else {
1928 		h = 0;
1929 		node = NULL;
1930 	}
1931 
1932 	while (!node && h < FIB6_TABLE_HASHSZ) {
1933 		node = rcu_dereference_bh(
1934 			hlist_first_rcu(&net->ipv6.fib_table_hash[h++]));
1935 	}
1936 	return hlist_entry_safe(node, struct fib6_table, tb6_hlist);
1937 }
1938 
ipv6_route_check_sernum(struct ipv6_route_iter * iter)1939 static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
1940 {
1941 	if (iter->sernum != iter->w.root->fn_sernum) {
1942 		iter->sernum = iter->w.root->fn_sernum;
1943 		iter->w.state = FWS_INIT;
1944 		iter->w.node = iter->w.root;
1945 		WARN_ON(iter->w.skip);
1946 		iter->w.skip = iter->w.count;
1947 	}
1948 }
1949 
ipv6_route_seq_next(struct seq_file * seq,void * v,loff_t * pos)1950 static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1951 {
1952 	int r;
1953 	struct rt6_info *n;
1954 	struct net *net = seq_file_net(seq);
1955 	struct ipv6_route_iter *iter = seq->private;
1956 
1957 	if (!v)
1958 		goto iter_table;
1959 
1960 	n = ((struct rt6_info *)v)->dst.rt6_next;
1961 	if (n) {
1962 		++*pos;
1963 		return n;
1964 	}
1965 
1966 iter_table:
1967 	ipv6_route_check_sernum(iter);
1968 	read_lock(&iter->tbl->tb6_lock);
1969 	r = fib6_walk_continue(&iter->w);
1970 	read_unlock(&iter->tbl->tb6_lock);
1971 	if (r > 0) {
1972 		if (v)
1973 			++*pos;
1974 		return iter->w.leaf;
1975 	} else if (r < 0) {
1976 		fib6_walker_unlink(&iter->w);
1977 		return NULL;
1978 	}
1979 	fib6_walker_unlink(&iter->w);
1980 
1981 	iter->tbl = ipv6_route_seq_next_table(iter->tbl, net);
1982 	if (!iter->tbl)
1983 		return NULL;
1984 
1985 	ipv6_route_seq_setup_walk(iter);
1986 	goto iter_table;
1987 }
1988 
ipv6_route_seq_start(struct seq_file * seq,loff_t * pos)1989 static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
1990 	__acquires(RCU_BH)
1991 {
1992 	struct net *net = seq_file_net(seq);
1993 	struct ipv6_route_iter *iter = seq->private;
1994 
1995 	rcu_read_lock_bh();
1996 	iter->tbl = ipv6_route_seq_next_table(NULL, net);
1997 	iter->skip = *pos;
1998 
1999 	if (iter->tbl) {
2000 		ipv6_route_seq_setup_walk(iter);
2001 		return ipv6_route_seq_next(seq, NULL, pos);
2002 	} else {
2003 		return NULL;
2004 	}
2005 }
2006 
ipv6_route_iter_active(struct ipv6_route_iter * iter)2007 static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
2008 {
2009 	struct fib6_walker *w = &iter->w;
2010 	return w->node && !(w->state == FWS_U && w->node == w->root);
2011 }
2012 
ipv6_route_seq_stop(struct seq_file * seq,void * v)2013 static void ipv6_route_seq_stop(struct seq_file *seq, void *v)
2014 	__releases(RCU_BH)
2015 {
2016 	struct ipv6_route_iter *iter = seq->private;
2017 
2018 	if (ipv6_route_iter_active(iter))
2019 		fib6_walker_unlink(&iter->w);
2020 
2021 	rcu_read_unlock_bh();
2022 }
2023 
2024 static const struct seq_operations ipv6_route_seq_ops = {
2025 	.start	= ipv6_route_seq_start,
2026 	.next	= ipv6_route_seq_next,
2027 	.stop	= ipv6_route_seq_stop,
2028 	.show	= ipv6_route_seq_show
2029 };
2030 
ipv6_route_open(struct inode * inode,struct file * file)2031 int ipv6_route_open(struct inode *inode, struct file *file)
2032 {
2033 	return seq_open_net(inode, file, &ipv6_route_seq_ops,
2034 			    sizeof(struct ipv6_route_iter));
2035 }
2036 
2037 #endif /* CONFIG_PROC_FS */
2038