• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
43 
44 #define DEBUG
45 #define NEIGH_DEBUG 1
46 #define neigh_dbg(level, fmt, ...)		\
47 do {						\
48 	if (level <= NEIGH_DEBUG)		\
49 		pr_debug(fmt, ##__VA_ARGS__);	\
50 } while (0)
51 
52 #define PNEIGH_HASHMASK		0xF
53 
54 static void neigh_timer_handler(unsigned long arg);
55 static void __neigh_notify(struct neighbour *n, int type, int flags);
56 static void neigh_update_notify(struct neighbour *neigh);
57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
58 				    struct net_device *dev);
59 
60 #ifdef CONFIG_PROC_FS
61 static const struct file_operations neigh_stat_seq_fops;
62 #endif
63 
64 /*
65    Neighbour hash table buckets are protected with rwlock tbl->lock.
66 
67    - All the scans/updates to hash buckets MUST be made under this lock.
68    - NOTHING clever should be made under this lock: no callbacks
69      to protocol backends, no attempts to send something to network.
70      It will result in deadlocks, if backend/driver wants to use neighbour
71      cache.
72    - If the entry requires some non-trivial actions, increase
73      its reference count and release table lock.
74 
75    Neighbour entries are protected:
76    - with reference count.
77    - with rwlock neigh->lock
78 
79    Reference count prevents destruction.
80 
81    neigh->lock mainly serializes ll address data and its validity state.
82    However, the same lock is used to protect another entry fields:
83     - timer
84     - resolution queue
85 
86    Again, nothing clever shall be made under neigh->lock,
87    the most complicated procedure, which we allow is dev->hard_header.
88    It is supposed, that dev->hard_header is simplistic and does
89    not make callbacks to neighbour tables.
90  */
91 
neigh_blackhole(struct neighbour * neigh,struct sk_buff * skb)92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
93 {
94 	kfree_skb(skb);
95 	return -ENETDOWN;
96 }
97 
neigh_cleanup_and_release(struct neighbour * neigh)98 static void neigh_cleanup_and_release(struct neighbour *neigh)
99 {
100 	if (neigh->parms->neigh_cleanup)
101 		neigh->parms->neigh_cleanup(neigh);
102 
103 	__neigh_notify(neigh, RTM_DELNEIGH, 0);
104 	neigh_release(neigh);
105 }
106 
107 /*
108  * It is random distribution in the interval (1/2)*base...(3/2)*base.
109  * It corresponds to default IPv6 settings and is not overridable,
110  * because it is really reasonable choice.
111  */
112 
neigh_rand_reach_time(unsigned long base)113 unsigned long neigh_rand_reach_time(unsigned long base)
114 {
115 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
116 }
117 EXPORT_SYMBOL(neigh_rand_reach_time);
118 
119 
neigh_forced_gc(struct neigh_table * tbl)120 static int neigh_forced_gc(struct neigh_table *tbl)
121 {
122 	int shrunk = 0;
123 	int i;
124 	struct neigh_hash_table *nht;
125 
126 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
127 
128 	write_lock_bh(&tbl->lock);
129 	nht = rcu_dereference_protected(tbl->nht,
130 					lockdep_is_held(&tbl->lock));
131 	for (i = 0; i < (1 << nht->hash_shift); i++) {
132 		struct neighbour *n;
133 		struct neighbour __rcu **np;
134 
135 		np = &nht->hash_buckets[i];
136 		while ((n = rcu_dereference_protected(*np,
137 					lockdep_is_held(&tbl->lock))) != NULL) {
138 			/* Neighbour record may be discarded if:
139 			 * - nobody refers to it.
140 			 * - it is not permanent
141 			 */
142 			write_lock(&n->lock);
143 			if (atomic_read(&n->refcnt) == 1 &&
144 			    !(n->nud_state & NUD_PERMANENT)) {
145 				rcu_assign_pointer(*np,
146 					rcu_dereference_protected(n->next,
147 						  lockdep_is_held(&tbl->lock)));
148 				n->dead = 1;
149 				shrunk	= 1;
150 				write_unlock(&n->lock);
151 				neigh_cleanup_and_release(n);
152 				continue;
153 			}
154 			write_unlock(&n->lock);
155 			np = &n->next;
156 		}
157 	}
158 
159 	tbl->last_flush = jiffies;
160 
161 	write_unlock_bh(&tbl->lock);
162 
163 	return shrunk;
164 }
165 
neigh_add_timer(struct neighbour * n,unsigned long when)166 static void neigh_add_timer(struct neighbour *n, unsigned long when)
167 {
168 	neigh_hold(n);
169 	if (unlikely(mod_timer(&n->timer, when))) {
170 		printk("NEIGH: BUG, double timer add, state is %x\n",
171 		       n->nud_state);
172 		dump_stack();
173 	}
174 }
175 
neigh_del_timer(struct neighbour * n)176 static int neigh_del_timer(struct neighbour *n)
177 {
178 	if ((n->nud_state & NUD_IN_TIMER) &&
179 	    del_timer(&n->timer)) {
180 		neigh_release(n);
181 		return 1;
182 	}
183 	return 0;
184 }
185 
pneigh_queue_purge(struct sk_buff_head * list)186 static void pneigh_queue_purge(struct sk_buff_head *list)
187 {
188 	struct sk_buff *skb;
189 
190 	while ((skb = skb_dequeue(list)) != NULL) {
191 		dev_put(skb->dev);
192 		kfree_skb(skb);
193 	}
194 }
195 
neigh_flush_dev(struct neigh_table * tbl,struct net_device * dev)196 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
197 {
198 	int i;
199 	struct neigh_hash_table *nht;
200 
201 	nht = rcu_dereference_protected(tbl->nht,
202 					lockdep_is_held(&tbl->lock));
203 
204 	for (i = 0; i < (1 << nht->hash_shift); i++) {
205 		struct neighbour *n;
206 		struct neighbour __rcu **np = &nht->hash_buckets[i];
207 
208 		while ((n = rcu_dereference_protected(*np,
209 					lockdep_is_held(&tbl->lock))) != NULL) {
210 			if (dev && n->dev != dev) {
211 				np = &n->next;
212 				continue;
213 			}
214 			rcu_assign_pointer(*np,
215 				   rcu_dereference_protected(n->next,
216 						lockdep_is_held(&tbl->lock)));
217 			write_lock(&n->lock);
218 			neigh_del_timer(n);
219 			n->dead = 1;
220 
221 			if (atomic_read(&n->refcnt) != 1) {
222 				/* The most unpleasant situation.
223 				   We must destroy neighbour entry,
224 				   but someone still uses it.
225 
226 				   The destroy will be delayed until
227 				   the last user releases us, but
228 				   we must kill timers etc. and move
229 				   it to safe state.
230 				 */
231 				__skb_queue_purge(&n->arp_queue);
232 				n->arp_queue_len_bytes = 0;
233 				n->output = neigh_blackhole;
234 				if (n->nud_state & NUD_VALID)
235 					n->nud_state = NUD_NOARP;
236 				else
237 					n->nud_state = NUD_NONE;
238 				neigh_dbg(2, "neigh %p is stray\n", n);
239 			}
240 			write_unlock(&n->lock);
241 			neigh_cleanup_and_release(n);
242 		}
243 	}
244 }
245 
neigh_changeaddr(struct neigh_table * tbl,struct net_device * dev)246 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
247 {
248 	write_lock_bh(&tbl->lock);
249 	neigh_flush_dev(tbl, dev);
250 	write_unlock_bh(&tbl->lock);
251 }
252 EXPORT_SYMBOL(neigh_changeaddr);
253 
neigh_ifdown(struct neigh_table * tbl,struct net_device * dev)254 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
255 {
256 	write_lock_bh(&tbl->lock);
257 	neigh_flush_dev(tbl, dev);
258 	pneigh_ifdown_and_unlock(tbl, dev);
259 
260 	del_timer_sync(&tbl->proxy_timer);
261 	pneigh_queue_purge(&tbl->proxy_queue);
262 	return 0;
263 }
264 EXPORT_SYMBOL(neigh_ifdown);
265 
neigh_alloc(struct neigh_table * tbl,struct net_device * dev)266 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
267 {
268 	struct neighbour *n = NULL;
269 	unsigned long now = jiffies;
270 	int entries;
271 
272 	entries = atomic_inc_return(&tbl->entries) - 1;
273 	if (entries >= tbl->gc_thresh3 ||
274 	    (entries >= tbl->gc_thresh2 &&
275 	     time_after(now, tbl->last_flush + 5 * HZ))) {
276 		if (!neigh_forced_gc(tbl) &&
277 		    entries >= tbl->gc_thresh3) {
278 			net_info_ratelimited("%s: neighbor table overflow!\n",
279 					     tbl->id);
280 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
281 			goto out_entries;
282 		}
283 	}
284 
285 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
286 	if (!n)
287 		goto out_entries;
288 
289 	__skb_queue_head_init(&n->arp_queue);
290 	rwlock_init(&n->lock);
291 	seqlock_init(&n->ha_lock);
292 	n->updated	  = n->used = now;
293 	n->nud_state	  = NUD_NONE;
294 	n->output	  = neigh_blackhole;
295 	seqlock_init(&n->hh.hh_lock);
296 	n->parms	  = neigh_parms_clone(&tbl->parms);
297 	setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
298 
299 	NEIGH_CACHE_STAT_INC(tbl, allocs);
300 	n->tbl		  = tbl;
301 	atomic_set(&n->refcnt, 1);
302 	n->dead		  = 1;
303 out:
304 	return n;
305 
306 out_entries:
307 	atomic_dec(&tbl->entries);
308 	goto out;
309 }
310 
neigh_get_hash_rnd(u32 * x)311 static void neigh_get_hash_rnd(u32 *x)
312 {
313 	get_random_bytes(x, sizeof(*x));
314 	*x |= 1;
315 }
316 
neigh_hash_alloc(unsigned int shift)317 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
318 {
319 	size_t size = (1 << shift) * sizeof(struct neighbour *);
320 	struct neigh_hash_table *ret;
321 	struct neighbour __rcu **buckets;
322 	int i;
323 
324 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
325 	if (!ret)
326 		return NULL;
327 	if (size <= PAGE_SIZE)
328 		buckets = kzalloc(size, GFP_ATOMIC);
329 	else
330 		buckets = (struct neighbour __rcu **)
331 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
332 					   get_order(size));
333 	if (!buckets) {
334 		kfree(ret);
335 		return NULL;
336 	}
337 	ret->hash_buckets = buckets;
338 	ret->hash_shift = shift;
339 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
340 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
341 	return ret;
342 }
343 
neigh_hash_free_rcu(struct rcu_head * head)344 static void neigh_hash_free_rcu(struct rcu_head *head)
345 {
346 	struct neigh_hash_table *nht = container_of(head,
347 						    struct neigh_hash_table,
348 						    rcu);
349 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
350 	struct neighbour __rcu **buckets = nht->hash_buckets;
351 
352 	if (size <= PAGE_SIZE)
353 		kfree(buckets);
354 	else
355 		free_pages((unsigned long)buckets, get_order(size));
356 	kfree(nht);
357 }
358 
neigh_hash_grow(struct neigh_table * tbl,unsigned long new_shift)359 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
360 						unsigned long new_shift)
361 {
362 	unsigned int i, hash;
363 	struct neigh_hash_table *new_nht, *old_nht;
364 
365 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
366 
367 	old_nht = rcu_dereference_protected(tbl->nht,
368 					    lockdep_is_held(&tbl->lock));
369 	new_nht = neigh_hash_alloc(new_shift);
370 	if (!new_nht)
371 		return old_nht;
372 
373 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
374 		struct neighbour *n, *next;
375 
376 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
377 						   lockdep_is_held(&tbl->lock));
378 		     n != NULL;
379 		     n = next) {
380 			hash = tbl->hash(n->primary_key, n->dev,
381 					 new_nht->hash_rnd);
382 
383 			hash >>= (32 - new_nht->hash_shift);
384 			next = rcu_dereference_protected(n->next,
385 						lockdep_is_held(&tbl->lock));
386 
387 			rcu_assign_pointer(n->next,
388 					   rcu_dereference_protected(
389 						new_nht->hash_buckets[hash],
390 						lockdep_is_held(&tbl->lock)));
391 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
392 		}
393 	}
394 
395 	rcu_assign_pointer(tbl->nht, new_nht);
396 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
397 	return new_nht;
398 }
399 
neigh_lookup(struct neigh_table * tbl,const void * pkey,struct net_device * dev)400 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
401 			       struct net_device *dev)
402 {
403 	struct neighbour *n;
404 
405 	NEIGH_CACHE_STAT_INC(tbl, lookups);
406 
407 	rcu_read_lock_bh();
408 	n = __neigh_lookup_noref(tbl, pkey, dev);
409 	if (n) {
410 		if (!atomic_inc_not_zero(&n->refcnt))
411 			n = NULL;
412 		NEIGH_CACHE_STAT_INC(tbl, hits);
413 	}
414 
415 	rcu_read_unlock_bh();
416 	return n;
417 }
418 EXPORT_SYMBOL(neigh_lookup);
419 
neigh_lookup_nodev(struct neigh_table * tbl,struct net * net,const void * pkey)420 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
421 				     const void *pkey)
422 {
423 	struct neighbour *n;
424 	int key_len = tbl->key_len;
425 	u32 hash_val;
426 	struct neigh_hash_table *nht;
427 
428 	NEIGH_CACHE_STAT_INC(tbl, lookups);
429 
430 	rcu_read_lock_bh();
431 	nht = rcu_dereference_bh(tbl->nht);
432 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
433 
434 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
435 	     n != NULL;
436 	     n = rcu_dereference_bh(n->next)) {
437 		if (!memcmp(n->primary_key, pkey, key_len) &&
438 		    net_eq(dev_net(n->dev), net)) {
439 			if (!atomic_inc_not_zero(&n->refcnt))
440 				n = NULL;
441 			NEIGH_CACHE_STAT_INC(tbl, hits);
442 			break;
443 		}
444 	}
445 
446 	rcu_read_unlock_bh();
447 	return n;
448 }
449 EXPORT_SYMBOL(neigh_lookup_nodev);
450 
__neigh_create(struct neigh_table * tbl,const void * pkey,struct net_device * dev,bool want_ref)451 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
452 				 struct net_device *dev, bool want_ref)
453 {
454 	u32 hash_val;
455 	int key_len = tbl->key_len;
456 	int error;
457 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
458 	struct neigh_hash_table *nht;
459 
460 	if (!n) {
461 		rc = ERR_PTR(-ENOBUFS);
462 		goto out;
463 	}
464 
465 	memcpy(n->primary_key, pkey, key_len);
466 	n->dev = dev;
467 	dev_hold(dev);
468 
469 	/* Protocol specific setup. */
470 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
471 		rc = ERR_PTR(error);
472 		goto out_neigh_release;
473 	}
474 
475 	if (dev->netdev_ops->ndo_neigh_construct) {
476 		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
477 		if (error < 0) {
478 			rc = ERR_PTR(error);
479 			goto out_neigh_release;
480 		}
481 	}
482 
483 	/* Device specific setup. */
484 	if (n->parms->neigh_setup &&
485 	    (error = n->parms->neigh_setup(n)) < 0) {
486 		rc = ERR_PTR(error);
487 		goto out_neigh_release;
488 	}
489 
490 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
491 
492 	write_lock_bh(&tbl->lock);
493 	nht = rcu_dereference_protected(tbl->nht,
494 					lockdep_is_held(&tbl->lock));
495 
496 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
497 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
498 
499 	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
500 
501 	if (n->parms->dead) {
502 		rc = ERR_PTR(-EINVAL);
503 		goto out_tbl_unlock;
504 	}
505 
506 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
507 					    lockdep_is_held(&tbl->lock));
508 	     n1 != NULL;
509 	     n1 = rcu_dereference_protected(n1->next,
510 			lockdep_is_held(&tbl->lock))) {
511 		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
512 			if (want_ref)
513 				neigh_hold(n1);
514 			rc = n1;
515 			goto out_tbl_unlock;
516 		}
517 	}
518 
519 	n->dead = 0;
520 	if (want_ref)
521 		neigh_hold(n);
522 	rcu_assign_pointer(n->next,
523 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
524 						     lockdep_is_held(&tbl->lock)));
525 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
526 	write_unlock_bh(&tbl->lock);
527 	neigh_dbg(2, "neigh %p is created\n", n);
528 	rc = n;
529 out:
530 	return rc;
531 out_tbl_unlock:
532 	write_unlock_bh(&tbl->lock);
533 out_neigh_release:
534 	neigh_release(n);
535 	goto out;
536 }
537 EXPORT_SYMBOL(__neigh_create);
538 
pneigh_hash(const void * pkey,int key_len)539 static u32 pneigh_hash(const void *pkey, int key_len)
540 {
541 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
542 	hash_val ^= (hash_val >> 16);
543 	hash_val ^= hash_val >> 8;
544 	hash_val ^= hash_val >> 4;
545 	hash_val &= PNEIGH_HASHMASK;
546 	return hash_val;
547 }
548 
__pneigh_lookup_1(struct pneigh_entry * n,struct net * net,const void * pkey,int key_len,struct net_device * dev)549 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
550 					      struct net *net,
551 					      const void *pkey,
552 					      int key_len,
553 					      struct net_device *dev)
554 {
555 	while (n) {
556 		if (!memcmp(n->key, pkey, key_len) &&
557 		    net_eq(pneigh_net(n), net) &&
558 		    (n->dev == dev || !n->dev))
559 			return n;
560 		n = n->next;
561 	}
562 	return NULL;
563 }
564 
__pneigh_lookup(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev)565 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
566 		struct net *net, const void *pkey, struct net_device *dev)
567 {
568 	int key_len = tbl->key_len;
569 	u32 hash_val = pneigh_hash(pkey, key_len);
570 
571 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
572 				 net, pkey, key_len, dev);
573 }
574 EXPORT_SYMBOL_GPL(__pneigh_lookup);
575 
pneigh_lookup(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev,int creat)576 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
577 				    struct net *net, const void *pkey,
578 				    struct net_device *dev, int creat)
579 {
580 	struct pneigh_entry *n;
581 	int key_len = tbl->key_len;
582 	u32 hash_val = pneigh_hash(pkey, key_len);
583 
584 	read_lock_bh(&tbl->lock);
585 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
586 			      net, pkey, key_len, dev);
587 	read_unlock_bh(&tbl->lock);
588 
589 	if (n || !creat)
590 		goto out;
591 
592 	ASSERT_RTNL();
593 
594 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
595 	if (!n)
596 		goto out;
597 
598 	write_pnet(&n->net, net);
599 	memcpy(n->key, pkey, key_len);
600 	n->dev = dev;
601 	if (dev)
602 		dev_hold(dev);
603 
604 	if (tbl->pconstructor && tbl->pconstructor(n)) {
605 		if (dev)
606 			dev_put(dev);
607 		kfree(n);
608 		n = NULL;
609 		goto out;
610 	}
611 
612 	write_lock_bh(&tbl->lock);
613 	n->next = tbl->phash_buckets[hash_val];
614 	tbl->phash_buckets[hash_val] = n;
615 	write_unlock_bh(&tbl->lock);
616 out:
617 	return n;
618 }
619 EXPORT_SYMBOL(pneigh_lookup);
620 
621 
pneigh_delete(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev)622 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
623 		  struct net_device *dev)
624 {
625 	struct pneigh_entry *n, **np;
626 	int key_len = tbl->key_len;
627 	u32 hash_val = pneigh_hash(pkey, key_len);
628 
629 	write_lock_bh(&tbl->lock);
630 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
631 	     np = &n->next) {
632 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
633 		    net_eq(pneigh_net(n), net)) {
634 			*np = n->next;
635 			write_unlock_bh(&tbl->lock);
636 			if (tbl->pdestructor)
637 				tbl->pdestructor(n);
638 			if (n->dev)
639 				dev_put(n->dev);
640 			kfree(n);
641 			return 0;
642 		}
643 	}
644 	write_unlock_bh(&tbl->lock);
645 	return -ENOENT;
646 }
647 
pneigh_ifdown_and_unlock(struct neigh_table * tbl,struct net_device * dev)648 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
649 				    struct net_device *dev)
650 {
651 	struct pneigh_entry *n, **np, *freelist = NULL;
652 	u32 h;
653 
654 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
655 		np = &tbl->phash_buckets[h];
656 		while ((n = *np) != NULL) {
657 			if (!dev || n->dev == dev) {
658 				*np = n->next;
659 				n->next = freelist;
660 				freelist = n;
661 				continue;
662 			}
663 			np = &n->next;
664 		}
665 	}
666 	write_unlock_bh(&tbl->lock);
667 	while ((n = freelist)) {
668 		freelist = n->next;
669 		n->next = NULL;
670 		if (tbl->pdestructor)
671 			tbl->pdestructor(n);
672 		if (n->dev)
673 			dev_put(n->dev);
674 		kfree(n);
675 	}
676 	return -ENOENT;
677 }
678 
679 static void neigh_parms_destroy(struct neigh_parms *parms);
680 
neigh_parms_put(struct neigh_parms * parms)681 static inline void neigh_parms_put(struct neigh_parms *parms)
682 {
683 	if (atomic_dec_and_test(&parms->refcnt))
684 		neigh_parms_destroy(parms);
685 }
686 
687 /*
688  *	neighbour must already be out of the table;
689  *
690  */
neigh_destroy(struct neighbour * neigh)691 void neigh_destroy(struct neighbour *neigh)
692 {
693 	struct net_device *dev = neigh->dev;
694 
695 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
696 
697 	if (!neigh->dead) {
698 		pr_warn("Destroying alive neighbour %p\n", neigh);
699 		dump_stack();
700 		return;
701 	}
702 
703 	if (neigh_del_timer(neigh))
704 		pr_warn("Impossible event\n");
705 
706 	write_lock_bh(&neigh->lock);
707 	__skb_queue_purge(&neigh->arp_queue);
708 	write_unlock_bh(&neigh->lock);
709 	neigh->arp_queue_len_bytes = 0;
710 
711 	if (dev->netdev_ops->ndo_neigh_destroy)
712 		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
713 
714 	dev_put(dev);
715 	neigh_parms_put(neigh->parms);
716 
717 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
718 
719 	atomic_dec(&neigh->tbl->entries);
720 	kfree_rcu(neigh, rcu);
721 }
722 EXPORT_SYMBOL(neigh_destroy);
723 
724 /* Neighbour state is suspicious;
725    disable fast path.
726 
727    Called with write_locked neigh.
728  */
neigh_suspect(struct neighbour * neigh)729 static void neigh_suspect(struct neighbour *neigh)
730 {
731 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
732 
733 	neigh->output = neigh->ops->output;
734 }
735 
736 /* Neighbour state is OK;
737    enable fast path.
738 
739    Called with write_locked neigh.
740  */
neigh_connect(struct neighbour * neigh)741 static void neigh_connect(struct neighbour *neigh)
742 {
743 	neigh_dbg(2, "neigh %p is connected\n", neigh);
744 
745 	neigh->output = neigh->ops->connected_output;
746 }
747 
neigh_periodic_work(struct work_struct * work)748 static void neigh_periodic_work(struct work_struct *work)
749 {
750 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
751 	struct neighbour *n;
752 	struct neighbour __rcu **np;
753 	unsigned int i;
754 	struct neigh_hash_table *nht;
755 
756 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
757 
758 	write_lock_bh(&tbl->lock);
759 	nht = rcu_dereference_protected(tbl->nht,
760 					lockdep_is_held(&tbl->lock));
761 
762 	/*
763 	 *	periodically recompute ReachableTime from random function
764 	 */
765 
766 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
767 		struct neigh_parms *p;
768 		tbl->last_rand = jiffies;
769 		list_for_each_entry(p, &tbl->parms_list, list)
770 			p->reachable_time =
771 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
772 	}
773 
774 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
775 		goto out;
776 
777 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
778 		np = &nht->hash_buckets[i];
779 
780 		while ((n = rcu_dereference_protected(*np,
781 				lockdep_is_held(&tbl->lock))) != NULL) {
782 			unsigned int state;
783 
784 			write_lock(&n->lock);
785 
786 			state = n->nud_state;
787 			if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
788 				write_unlock(&n->lock);
789 				goto next_elt;
790 			}
791 
792 			if (time_before(n->used, n->confirmed))
793 				n->used = n->confirmed;
794 
795 			if (atomic_read(&n->refcnt) == 1 &&
796 			    (state == NUD_FAILED ||
797 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
798 				*np = n->next;
799 				n->dead = 1;
800 				write_unlock(&n->lock);
801 				neigh_cleanup_and_release(n);
802 				continue;
803 			}
804 			write_unlock(&n->lock);
805 
806 next_elt:
807 			np = &n->next;
808 		}
809 		/*
810 		 * It's fine to release lock here, even if hash table
811 		 * grows while we are preempted.
812 		 */
813 		write_unlock_bh(&tbl->lock);
814 		cond_resched();
815 		write_lock_bh(&tbl->lock);
816 		nht = rcu_dereference_protected(tbl->nht,
817 						lockdep_is_held(&tbl->lock));
818 	}
819 out:
820 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
821 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
822 	 * BASE_REACHABLE_TIME.
823 	 */
824 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
825 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
826 	write_unlock_bh(&tbl->lock);
827 }
828 
neigh_max_probes(struct neighbour * n)829 static __inline__ int neigh_max_probes(struct neighbour *n)
830 {
831 	struct neigh_parms *p = n->parms;
832 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
833 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
834 	        NEIGH_VAR(p, MCAST_PROBES));
835 }
836 
neigh_invalidate(struct neighbour * neigh)837 static void neigh_invalidate(struct neighbour *neigh)
838 	__releases(neigh->lock)
839 	__acquires(neigh->lock)
840 {
841 	struct sk_buff *skb;
842 
843 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
844 	neigh_dbg(2, "neigh %p is failed\n", neigh);
845 	neigh->updated = jiffies;
846 
847 	/* It is very thin place. report_unreachable is very complicated
848 	   routine. Particularly, it can hit the same neighbour entry!
849 
850 	   So that, we try to be accurate and avoid dead loop. --ANK
851 	 */
852 	while (neigh->nud_state == NUD_FAILED &&
853 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
854 		write_unlock(&neigh->lock);
855 		neigh->ops->error_report(neigh, skb);
856 		write_lock(&neigh->lock);
857 	}
858 	__skb_queue_purge(&neigh->arp_queue);
859 	neigh->arp_queue_len_bytes = 0;
860 }
861 
neigh_probe(struct neighbour * neigh)862 static void neigh_probe(struct neighbour *neigh)
863 	__releases(neigh->lock)
864 {
865 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
866 	/* keep skb alive even if arp_queue overflows */
867 	if (skb)
868 		skb = skb_clone(skb, GFP_ATOMIC);
869 	write_unlock(&neigh->lock);
870 	if (neigh->ops->solicit)
871 		neigh->ops->solicit(neigh, skb);
872 	atomic_inc(&neigh->probes);
873 	kfree_skb(skb);
874 }
875 
876 /* Called when a timer expires for a neighbour entry. */
877 
neigh_timer_handler(unsigned long arg)878 static void neigh_timer_handler(unsigned long arg)
879 {
880 	unsigned long now, next;
881 	struct neighbour *neigh = (struct neighbour *)arg;
882 	unsigned int state;
883 	int notify = 0;
884 
885 	write_lock(&neigh->lock);
886 
887 	state = neigh->nud_state;
888 	now = jiffies;
889 	next = now + HZ;
890 
891 	if (!(state & NUD_IN_TIMER))
892 		goto out;
893 
894 	if (state & NUD_REACHABLE) {
895 		if (time_before_eq(now,
896 				   neigh->confirmed + neigh->parms->reachable_time)) {
897 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
898 			next = neigh->confirmed + neigh->parms->reachable_time;
899 		} else if (time_before_eq(now,
900 					  neigh->used +
901 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
902 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
903 			neigh->nud_state = NUD_DELAY;
904 			neigh->updated = jiffies;
905 			neigh_suspect(neigh);
906 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
907 		} else {
908 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
909 			neigh->nud_state = NUD_STALE;
910 			neigh->updated = jiffies;
911 			neigh_suspect(neigh);
912 			notify = 1;
913 		}
914 	} else if (state & NUD_DELAY) {
915 		if (time_before_eq(now,
916 				   neigh->confirmed +
917 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
918 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
919 			neigh->nud_state = NUD_REACHABLE;
920 			neigh->updated = jiffies;
921 			neigh_connect(neigh);
922 			notify = 1;
923 			next = neigh->confirmed + neigh->parms->reachable_time;
924 		} else {
925 			neigh_dbg(2, "neigh %p is probed\n", neigh);
926 			neigh->nud_state = NUD_PROBE;
927 			neigh->updated = jiffies;
928 			atomic_set(&neigh->probes, 0);
929 			notify = 1;
930 			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
931 		}
932 	} else {
933 		/* NUD_PROBE|NUD_INCOMPLETE */
934 		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
935 	}
936 
937 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
938 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
939 		neigh->nud_state = NUD_FAILED;
940 		notify = 1;
941 		neigh_invalidate(neigh);
942 		goto out;
943 	}
944 
945 	if (neigh->nud_state & NUD_IN_TIMER) {
946 		if (time_before(next, jiffies + HZ/2))
947 			next = jiffies + HZ/2;
948 		if (!mod_timer(&neigh->timer, next))
949 			neigh_hold(neigh);
950 	}
951 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
952 		neigh_probe(neigh);
953 	} else {
954 out:
955 		write_unlock(&neigh->lock);
956 	}
957 
958 	if (notify)
959 		neigh_update_notify(neigh);
960 
961 	neigh_release(neigh);
962 }
963 
__neigh_event_send(struct neighbour * neigh,struct sk_buff * skb)964 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
965 {
966 	int rc;
967 	bool immediate_probe = false;
968 
969 	write_lock_bh(&neigh->lock);
970 
971 	rc = 0;
972 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
973 		goto out_unlock_bh;
974 	if (neigh->dead)
975 		goto out_dead;
976 
977 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
978 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
979 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
980 			unsigned long next, now = jiffies;
981 
982 			atomic_set(&neigh->probes,
983 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
984 			neigh->nud_state     = NUD_INCOMPLETE;
985 			neigh->updated = now;
986 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
987 					 HZ/2);
988 			neigh_add_timer(neigh, next);
989 			immediate_probe = true;
990 		} else {
991 			neigh->nud_state = NUD_FAILED;
992 			neigh->updated = jiffies;
993 			write_unlock_bh(&neigh->lock);
994 
995 			kfree_skb(skb);
996 			return 1;
997 		}
998 	} else if (neigh->nud_state & NUD_STALE) {
999 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1000 		neigh->nud_state = NUD_DELAY;
1001 		neigh->updated = jiffies;
1002 		neigh_add_timer(neigh, jiffies +
1003 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1004 	}
1005 
1006 	if (neigh->nud_state == NUD_INCOMPLETE) {
1007 		if (skb) {
1008 			while (neigh->arp_queue_len_bytes + skb->truesize >
1009 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1010 				struct sk_buff *buff;
1011 
1012 				buff = __skb_dequeue(&neigh->arp_queue);
1013 				if (!buff)
1014 					break;
1015 				neigh->arp_queue_len_bytes -= buff->truesize;
1016 				kfree_skb(buff);
1017 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1018 			}
1019 			skb_dst_force(skb);
1020 			__skb_queue_tail(&neigh->arp_queue, skb);
1021 			neigh->arp_queue_len_bytes += skb->truesize;
1022 		}
1023 		rc = 1;
1024 	}
1025 out_unlock_bh:
1026 	if (immediate_probe)
1027 		neigh_probe(neigh);
1028 	else
1029 		write_unlock(&neigh->lock);
1030 	local_bh_enable();
1031 	return rc;
1032 
1033 out_dead:
1034 	if (neigh->nud_state & NUD_STALE)
1035 		goto out_unlock_bh;
1036 	write_unlock_bh(&neigh->lock);
1037 	kfree_skb(skb);
1038 	return 1;
1039 }
1040 EXPORT_SYMBOL(__neigh_event_send);
1041 
neigh_update_hhs(struct neighbour * neigh)1042 static void neigh_update_hhs(struct neighbour *neigh)
1043 {
1044 	struct hh_cache *hh;
1045 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1046 		= NULL;
1047 
1048 	if (neigh->dev->header_ops)
1049 		update = neigh->dev->header_ops->cache_update;
1050 
1051 	if (update) {
1052 		hh = &neigh->hh;
1053 		if (hh->hh_len) {
1054 			write_seqlock_bh(&hh->hh_lock);
1055 			update(hh, neigh->dev, neigh->ha);
1056 			write_sequnlock_bh(&hh->hh_lock);
1057 		}
1058 	}
1059 }
1060 
1061 
1062 
1063 /* Generic update routine.
1064    -- lladdr is new lladdr or NULL, if it is not supplied.
1065    -- new    is new state.
1066    -- flags
1067 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1068 				if it is different.
1069 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1070 				lladdr instead of overriding it
1071 				if it is different.
1072 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1073 
1074 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1075 				NTF_ROUTER flag.
1076 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1077 				a router.
1078 
1079    Caller MUST hold reference count on the entry.
1080  */
1081 
neigh_update(struct neighbour * neigh,const u8 * lladdr,u8 new,u32 flags)1082 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1083 		 u32 flags)
1084 {
1085 	u8 old;
1086 	int err;
1087 	int notify = 0;
1088 	struct net_device *dev;
1089 	int update_isrouter = 0;
1090 
1091 	write_lock_bh(&neigh->lock);
1092 
1093 	dev    = neigh->dev;
1094 	old    = neigh->nud_state;
1095 	err    = -EPERM;
1096 
1097 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1098 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1099 		goto out;
1100 	if (neigh->dead)
1101 		goto out;
1102 
1103 	if (!(new & NUD_VALID)) {
1104 		neigh_del_timer(neigh);
1105 		if (old & NUD_CONNECTED)
1106 			neigh_suspect(neigh);
1107 		neigh->nud_state = new;
1108 		err = 0;
1109 		notify = old & NUD_VALID;
1110 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1111 		    (new & NUD_FAILED)) {
1112 			neigh_invalidate(neigh);
1113 			notify = 1;
1114 		}
1115 		goto out;
1116 	}
1117 
1118 	/* Compare new lladdr with cached one */
1119 	if (!dev->addr_len) {
1120 		/* First case: device needs no address. */
1121 		lladdr = neigh->ha;
1122 	} else if (lladdr) {
1123 		/* The second case: if something is already cached
1124 		   and a new address is proposed:
1125 		   - compare new & old
1126 		   - if they are different, check override flag
1127 		 */
1128 		if ((old & NUD_VALID) &&
1129 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1130 			lladdr = neigh->ha;
1131 	} else {
1132 		/* No address is supplied; if we know something,
1133 		   use it, otherwise discard the request.
1134 		 */
1135 		err = -EINVAL;
1136 		if (!(old & NUD_VALID))
1137 			goto out;
1138 		lladdr = neigh->ha;
1139 	}
1140 
1141 	/* If entry was valid and address is not changed,
1142 	   do not change entry state, if new one is STALE.
1143 	 */
1144 	err = 0;
1145 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1146 	if (old & NUD_VALID) {
1147 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1148 			update_isrouter = 0;
1149 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1150 			    (old & NUD_CONNECTED)) {
1151 				lladdr = neigh->ha;
1152 				new = NUD_STALE;
1153 			} else
1154 				goto out;
1155 		} else {
1156 			if (lladdr == neigh->ha && new == NUD_STALE &&
1157 			    !(flags & NEIGH_UPDATE_F_ADMIN))
1158 				new = old;
1159 		}
1160 	}
1161 
1162 	/* Update timestamps only once we know we will make a change to the
1163 	 * neighbour entry. Otherwise we risk to move the locktime window with
1164 	 * noop updates and ignore relevant ARP updates.
1165 	 */
1166 	if (new != old || lladdr != neigh->ha) {
1167 		if (new & NUD_CONNECTED)
1168 			neigh->confirmed = jiffies;
1169 		neigh->updated = jiffies;
1170 	}
1171 
1172 	if (new != old) {
1173 		neigh_del_timer(neigh);
1174 		if (new & NUD_PROBE)
1175 			atomic_set(&neigh->probes, 0);
1176 		if (new & NUD_IN_TIMER)
1177 			neigh_add_timer(neigh, (jiffies +
1178 						((new & NUD_REACHABLE) ?
1179 						 neigh->parms->reachable_time :
1180 						 0)));
1181 		neigh->nud_state = new;
1182 		notify = 1;
1183 	}
1184 
1185 	if (lladdr != neigh->ha) {
1186 		write_seqlock(&neigh->ha_lock);
1187 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1188 		write_sequnlock(&neigh->ha_lock);
1189 		neigh_update_hhs(neigh);
1190 		if (!(new & NUD_CONNECTED))
1191 			neigh->confirmed = jiffies -
1192 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1193 		notify = 1;
1194 	}
1195 	if (new == old)
1196 		goto out;
1197 	if (new & NUD_CONNECTED)
1198 		neigh_connect(neigh);
1199 	else
1200 		neigh_suspect(neigh);
1201 	if (!(old & NUD_VALID)) {
1202 		struct sk_buff *skb;
1203 
1204 		/* Again: avoid dead loop if something went wrong */
1205 
1206 		while (neigh->nud_state & NUD_VALID &&
1207 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1208 			struct dst_entry *dst = skb_dst(skb);
1209 			struct neighbour *n2, *n1 = neigh;
1210 			write_unlock_bh(&neigh->lock);
1211 
1212 			rcu_read_lock();
1213 
1214 			/* Why not just use 'neigh' as-is?  The problem is that
1215 			 * things such as shaper, eql, and sch_teql can end up
1216 			 * using alternative, different, neigh objects to output
1217 			 * the packet in the output path.  So what we need to do
1218 			 * here is re-lookup the top-level neigh in the path so
1219 			 * we can reinject the packet there.
1220 			 */
1221 			n2 = NULL;
1222 			if (dst) {
1223 				n2 = dst_neigh_lookup_skb(dst, skb);
1224 				if (n2)
1225 					n1 = n2;
1226 			}
1227 			n1->output(n1, skb);
1228 			if (n2)
1229 				neigh_release(n2);
1230 			rcu_read_unlock();
1231 
1232 			write_lock_bh(&neigh->lock);
1233 		}
1234 		__skb_queue_purge(&neigh->arp_queue);
1235 		neigh->arp_queue_len_bytes = 0;
1236 	}
1237 out:
1238 	if (update_isrouter) {
1239 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1240 			(neigh->flags | NTF_ROUTER) :
1241 			(neigh->flags & ~NTF_ROUTER);
1242 	}
1243 	write_unlock_bh(&neigh->lock);
1244 
1245 	if (notify)
1246 		neigh_update_notify(neigh);
1247 
1248 	return err;
1249 }
1250 EXPORT_SYMBOL(neigh_update);
1251 
1252 /* Update the neigh to listen temporarily for probe responses, even if it is
1253  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1254  */
__neigh_set_probe_once(struct neighbour * neigh)1255 void __neigh_set_probe_once(struct neighbour *neigh)
1256 {
1257 	if (neigh->dead)
1258 		return;
1259 	neigh->updated = jiffies;
1260 	if (!(neigh->nud_state & NUD_FAILED))
1261 		return;
1262 	neigh->nud_state = NUD_INCOMPLETE;
1263 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1264 	neigh_add_timer(neigh,
1265 			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1266 }
1267 EXPORT_SYMBOL(__neigh_set_probe_once);
1268 
neigh_event_ns(struct neigh_table * tbl,u8 * lladdr,void * saddr,struct net_device * dev)1269 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1270 				 u8 *lladdr, void *saddr,
1271 				 struct net_device *dev)
1272 {
1273 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1274 						 lladdr || !dev->addr_len);
1275 	if (neigh)
1276 		neigh_update(neigh, lladdr, NUD_STALE,
1277 			     NEIGH_UPDATE_F_OVERRIDE);
1278 	return neigh;
1279 }
1280 EXPORT_SYMBOL(neigh_event_ns);
1281 
1282 /* called with read_lock_bh(&n->lock); */
neigh_hh_init(struct neighbour * n)1283 static void neigh_hh_init(struct neighbour *n)
1284 {
1285 	struct net_device *dev = n->dev;
1286 	__be16 prot = n->tbl->protocol;
1287 	struct hh_cache	*hh = &n->hh;
1288 
1289 	write_lock_bh(&n->lock);
1290 
1291 	/* Only one thread can come in here and initialize the
1292 	 * hh_cache entry.
1293 	 */
1294 	if (!hh->hh_len)
1295 		dev->header_ops->cache(n, hh, prot);
1296 
1297 	write_unlock_bh(&n->lock);
1298 }
1299 
1300 /* Slow and careful. */
1301 
neigh_resolve_output(struct neighbour * neigh,struct sk_buff * skb)1302 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1303 {
1304 	int rc = 0;
1305 
1306 	if (!neigh_event_send(neigh, skb)) {
1307 		int err;
1308 		struct net_device *dev = neigh->dev;
1309 		unsigned int seq;
1310 
1311 		if (dev->header_ops->cache && !neigh->hh.hh_len)
1312 			neigh_hh_init(neigh);
1313 
1314 		do {
1315 			__skb_pull(skb, skb_network_offset(skb));
1316 			seq = read_seqbegin(&neigh->ha_lock);
1317 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1318 					      neigh->ha, NULL, skb->len);
1319 		} while (read_seqretry(&neigh->ha_lock, seq));
1320 
1321 		if (err >= 0)
1322 			rc = dev_queue_xmit(skb);
1323 		else
1324 			goto out_kfree_skb;
1325 	}
1326 out:
1327 	return rc;
1328 out_kfree_skb:
1329 	rc = -EINVAL;
1330 	kfree_skb(skb);
1331 	goto out;
1332 }
1333 EXPORT_SYMBOL(neigh_resolve_output);
1334 
1335 /* As fast as possible without hh cache */
1336 
neigh_connected_output(struct neighbour * neigh,struct sk_buff * skb)1337 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1338 {
1339 	struct net_device *dev = neigh->dev;
1340 	unsigned int seq;
1341 	int err;
1342 
1343 	do {
1344 		__skb_pull(skb, skb_network_offset(skb));
1345 		seq = read_seqbegin(&neigh->ha_lock);
1346 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1347 				      neigh->ha, NULL, skb->len);
1348 	} while (read_seqretry(&neigh->ha_lock, seq));
1349 
1350 	if (err >= 0)
1351 		err = dev_queue_xmit(skb);
1352 	else {
1353 		err = -EINVAL;
1354 		kfree_skb(skb);
1355 	}
1356 	return err;
1357 }
1358 EXPORT_SYMBOL(neigh_connected_output);
1359 
neigh_direct_output(struct neighbour * neigh,struct sk_buff * skb)1360 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1361 {
1362 	return dev_queue_xmit(skb);
1363 }
1364 EXPORT_SYMBOL(neigh_direct_output);
1365 
neigh_proxy_process(unsigned long arg)1366 static void neigh_proxy_process(unsigned long arg)
1367 {
1368 	struct neigh_table *tbl = (struct neigh_table *)arg;
1369 	long sched_next = 0;
1370 	unsigned long now = jiffies;
1371 	struct sk_buff *skb, *n;
1372 
1373 	spin_lock(&tbl->proxy_queue.lock);
1374 
1375 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1376 		long tdif = NEIGH_CB(skb)->sched_next - now;
1377 
1378 		if (tdif <= 0) {
1379 			struct net_device *dev = skb->dev;
1380 
1381 			__skb_unlink(skb, &tbl->proxy_queue);
1382 			if (tbl->proxy_redo && netif_running(dev)) {
1383 				rcu_read_lock();
1384 				tbl->proxy_redo(skb);
1385 				rcu_read_unlock();
1386 			} else {
1387 				kfree_skb(skb);
1388 			}
1389 
1390 			dev_put(dev);
1391 		} else if (!sched_next || tdif < sched_next)
1392 			sched_next = tdif;
1393 	}
1394 	del_timer(&tbl->proxy_timer);
1395 	if (sched_next)
1396 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1397 	spin_unlock(&tbl->proxy_queue.lock);
1398 }
1399 
pneigh_enqueue(struct neigh_table * tbl,struct neigh_parms * p,struct sk_buff * skb)1400 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1401 		    struct sk_buff *skb)
1402 {
1403 	unsigned long now = jiffies;
1404 
1405 	unsigned long sched_next = now + (prandom_u32() %
1406 					  NEIGH_VAR(p, PROXY_DELAY));
1407 
1408 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1409 		kfree_skb(skb);
1410 		return;
1411 	}
1412 
1413 	NEIGH_CB(skb)->sched_next = sched_next;
1414 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1415 
1416 	spin_lock(&tbl->proxy_queue.lock);
1417 	if (del_timer(&tbl->proxy_timer)) {
1418 		if (time_before(tbl->proxy_timer.expires, sched_next))
1419 			sched_next = tbl->proxy_timer.expires;
1420 	}
1421 	skb_dst_drop(skb);
1422 	dev_hold(skb->dev);
1423 	__skb_queue_tail(&tbl->proxy_queue, skb);
1424 	mod_timer(&tbl->proxy_timer, sched_next);
1425 	spin_unlock(&tbl->proxy_queue.lock);
1426 }
1427 EXPORT_SYMBOL(pneigh_enqueue);
1428 
lookup_neigh_parms(struct neigh_table * tbl,struct net * net,int ifindex)1429 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1430 						      struct net *net, int ifindex)
1431 {
1432 	struct neigh_parms *p;
1433 
1434 	list_for_each_entry(p, &tbl->parms_list, list) {
1435 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1436 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1437 			return p;
1438 	}
1439 
1440 	return NULL;
1441 }
1442 
neigh_parms_alloc(struct net_device * dev,struct neigh_table * tbl)1443 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1444 				      struct neigh_table *tbl)
1445 {
1446 	struct neigh_parms *p;
1447 	struct net *net = dev_net(dev);
1448 	const struct net_device_ops *ops = dev->netdev_ops;
1449 
1450 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1451 	if (p) {
1452 		p->tbl		  = tbl;
1453 		atomic_set(&p->refcnt, 1);
1454 		p->reachable_time =
1455 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1456 		dev_hold(dev);
1457 		p->dev = dev;
1458 		write_pnet(&p->net, net);
1459 		p->sysctl_table = NULL;
1460 
1461 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1462 			dev_put(dev);
1463 			kfree(p);
1464 			return NULL;
1465 		}
1466 
1467 		write_lock_bh(&tbl->lock);
1468 		list_add(&p->list, &tbl->parms.list);
1469 		write_unlock_bh(&tbl->lock);
1470 
1471 		neigh_parms_data_state_cleanall(p);
1472 	}
1473 	return p;
1474 }
1475 EXPORT_SYMBOL(neigh_parms_alloc);
1476 
neigh_rcu_free_parms(struct rcu_head * head)1477 static void neigh_rcu_free_parms(struct rcu_head *head)
1478 {
1479 	struct neigh_parms *parms =
1480 		container_of(head, struct neigh_parms, rcu_head);
1481 
1482 	neigh_parms_put(parms);
1483 }
1484 
neigh_parms_release(struct neigh_table * tbl,struct neigh_parms * parms)1485 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1486 {
1487 	if (!parms || parms == &tbl->parms)
1488 		return;
1489 	write_lock_bh(&tbl->lock);
1490 	list_del(&parms->list);
1491 	parms->dead = 1;
1492 	write_unlock_bh(&tbl->lock);
1493 	if (parms->dev)
1494 		dev_put(parms->dev);
1495 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1496 }
1497 EXPORT_SYMBOL(neigh_parms_release);
1498 
neigh_parms_destroy(struct neigh_parms * parms)1499 static void neigh_parms_destroy(struct neigh_parms *parms)
1500 {
1501 	kfree(parms);
1502 }
1503 
1504 static struct lock_class_key neigh_table_proxy_queue_class;
1505 
1506 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1507 
neigh_table_init(int index,struct neigh_table * tbl)1508 void neigh_table_init(int index, struct neigh_table *tbl)
1509 {
1510 	unsigned long now = jiffies;
1511 	unsigned long phsize;
1512 
1513 	INIT_LIST_HEAD(&tbl->parms_list);
1514 	list_add(&tbl->parms.list, &tbl->parms_list);
1515 	write_pnet(&tbl->parms.net, &init_net);
1516 	atomic_set(&tbl->parms.refcnt, 1);
1517 	tbl->parms.reachable_time =
1518 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1519 
1520 	tbl->stats = alloc_percpu(struct neigh_statistics);
1521 	if (!tbl->stats)
1522 		panic("cannot create neighbour cache statistics");
1523 
1524 #ifdef CONFIG_PROC_FS
1525 	if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1526 			      &neigh_stat_seq_fops, tbl))
1527 		panic("cannot create neighbour proc dir entry");
1528 #endif
1529 
1530 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1531 
1532 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1533 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1534 
1535 	if (!tbl->nht || !tbl->phash_buckets)
1536 		panic("cannot allocate neighbour cache hashes");
1537 
1538 	if (!tbl->entry_size)
1539 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1540 					tbl->key_len, NEIGH_PRIV_ALIGN);
1541 	else
1542 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1543 
1544 	rwlock_init(&tbl->lock);
1545 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1546 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1547 			tbl->parms.reachable_time);
1548 	setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1549 	skb_queue_head_init_class(&tbl->proxy_queue,
1550 			&neigh_table_proxy_queue_class);
1551 
1552 	tbl->last_flush = now;
1553 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1554 
1555 	neigh_tables[index] = tbl;
1556 }
1557 EXPORT_SYMBOL(neigh_table_init);
1558 
neigh_table_clear(int index,struct neigh_table * tbl)1559 int neigh_table_clear(int index, struct neigh_table *tbl)
1560 {
1561 	neigh_tables[index] = NULL;
1562 	/* It is not clean... Fix it to unload IPv6 module safely */
1563 	cancel_delayed_work_sync(&tbl->gc_work);
1564 	del_timer_sync(&tbl->proxy_timer);
1565 	pneigh_queue_purge(&tbl->proxy_queue);
1566 	neigh_ifdown(tbl, NULL);
1567 	if (atomic_read(&tbl->entries))
1568 		pr_crit("neighbour leakage\n");
1569 
1570 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1571 		 neigh_hash_free_rcu);
1572 	tbl->nht = NULL;
1573 
1574 	kfree(tbl->phash_buckets);
1575 	tbl->phash_buckets = NULL;
1576 
1577 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1578 
1579 	free_percpu(tbl->stats);
1580 	tbl->stats = NULL;
1581 
1582 	return 0;
1583 }
1584 EXPORT_SYMBOL(neigh_table_clear);
1585 
neigh_find_table(int family)1586 static struct neigh_table *neigh_find_table(int family)
1587 {
1588 	struct neigh_table *tbl = NULL;
1589 
1590 	switch (family) {
1591 	case AF_INET:
1592 		tbl = neigh_tables[NEIGH_ARP_TABLE];
1593 		break;
1594 	case AF_INET6:
1595 		tbl = neigh_tables[NEIGH_ND_TABLE];
1596 		break;
1597 	case AF_DECnet:
1598 		tbl = neigh_tables[NEIGH_DN_TABLE];
1599 		break;
1600 	}
1601 
1602 	return tbl;
1603 }
1604 
neigh_delete(struct sk_buff * skb,struct nlmsghdr * nlh)1605 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
1606 {
1607 	struct net *net = sock_net(skb->sk);
1608 	struct ndmsg *ndm;
1609 	struct nlattr *dst_attr;
1610 	struct neigh_table *tbl;
1611 	struct neighbour *neigh;
1612 	struct net_device *dev = NULL;
1613 	int err = -EINVAL;
1614 
1615 	ASSERT_RTNL();
1616 	if (nlmsg_len(nlh) < sizeof(*ndm))
1617 		goto out;
1618 
1619 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1620 	if (dst_attr == NULL)
1621 		goto out;
1622 
1623 	ndm = nlmsg_data(nlh);
1624 	if (ndm->ndm_ifindex) {
1625 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1626 		if (dev == NULL) {
1627 			err = -ENODEV;
1628 			goto out;
1629 		}
1630 	}
1631 
1632 	tbl = neigh_find_table(ndm->ndm_family);
1633 	if (tbl == NULL)
1634 		return -EAFNOSUPPORT;
1635 
1636 	if (nla_len(dst_attr) < tbl->key_len)
1637 		goto out;
1638 
1639 	if (ndm->ndm_flags & NTF_PROXY) {
1640 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1641 		goto out;
1642 	}
1643 
1644 	if (dev == NULL)
1645 		goto out;
1646 
1647 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1648 	if (neigh == NULL) {
1649 		err = -ENOENT;
1650 		goto out;
1651 	}
1652 
1653 	err = neigh_update(neigh, NULL, NUD_FAILED,
1654 			   NEIGH_UPDATE_F_OVERRIDE |
1655 			   NEIGH_UPDATE_F_ADMIN);
1656 	neigh_release(neigh);
1657 
1658 out:
1659 	return err;
1660 }
1661 
neigh_add(struct sk_buff * skb,struct nlmsghdr * nlh)1662 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
1663 {
1664 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1665 	struct net *net = sock_net(skb->sk);
1666 	struct ndmsg *ndm;
1667 	struct nlattr *tb[NDA_MAX+1];
1668 	struct neigh_table *tbl;
1669 	struct net_device *dev = NULL;
1670 	struct neighbour *neigh;
1671 	void *dst, *lladdr;
1672 	int err;
1673 
1674 	ASSERT_RTNL();
1675 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1676 	if (err < 0)
1677 		goto out;
1678 
1679 	err = -EINVAL;
1680 	if (tb[NDA_DST] == NULL)
1681 		goto out;
1682 
1683 	ndm = nlmsg_data(nlh);
1684 	if (ndm->ndm_ifindex) {
1685 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1686 		if (dev == NULL) {
1687 			err = -ENODEV;
1688 			goto out;
1689 		}
1690 
1691 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1692 			goto out;
1693 	}
1694 
1695 	tbl = neigh_find_table(ndm->ndm_family);
1696 	if (tbl == NULL)
1697 		return -EAFNOSUPPORT;
1698 
1699 	if (nla_len(tb[NDA_DST]) < tbl->key_len)
1700 		goto out;
1701 	dst = nla_data(tb[NDA_DST]);
1702 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1703 
1704 	if (ndm->ndm_flags & NTF_PROXY) {
1705 		struct pneigh_entry *pn;
1706 
1707 		err = -ENOBUFS;
1708 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1709 		if (pn) {
1710 			pn->flags = ndm->ndm_flags;
1711 			err = 0;
1712 		}
1713 		goto out;
1714 	}
1715 
1716 	if (dev == NULL)
1717 		goto out;
1718 
1719 	neigh = neigh_lookup(tbl, dst, dev);
1720 	if (neigh == NULL) {
1721 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1722 			err = -ENOENT;
1723 			goto out;
1724 		}
1725 
1726 		neigh = __neigh_lookup_errno(tbl, dst, dev);
1727 		if (IS_ERR(neigh)) {
1728 			err = PTR_ERR(neigh);
1729 			goto out;
1730 		}
1731 	} else {
1732 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1733 			err = -EEXIST;
1734 			neigh_release(neigh);
1735 			goto out;
1736 		}
1737 
1738 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1739 			flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1740 	}
1741 
1742 	if (ndm->ndm_flags & NTF_USE) {
1743 		neigh_event_send(neigh, NULL);
1744 		err = 0;
1745 	} else
1746 		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1747 	neigh_release(neigh);
1748 
1749 out:
1750 	return err;
1751 }
1752 
neightbl_fill_parms(struct sk_buff * skb,struct neigh_parms * parms)1753 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1754 {
1755 	struct nlattr *nest;
1756 
1757 	nest = nla_nest_start(skb, NDTA_PARMS);
1758 	if (nest == NULL)
1759 		return -ENOBUFS;
1760 
1761 	if ((parms->dev &&
1762 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1763 	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1764 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1765 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1766 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1767 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1768 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1769 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1770 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1771 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1772 			NEIGH_VAR(parms, UCAST_PROBES)) ||
1773 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1774 			NEIGH_VAR(parms, MCAST_PROBES)) ||
1775 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1776 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
1777 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
1778 			  NDTPA_PAD) ||
1779 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1780 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
1781 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1782 			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
1783 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1784 			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
1785 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1786 			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
1787 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1788 			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
1789 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1790 			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
1791 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1792 			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
1793 		goto nla_put_failure;
1794 	return nla_nest_end(skb, nest);
1795 
1796 nla_put_failure:
1797 	nla_nest_cancel(skb, nest);
1798 	return -EMSGSIZE;
1799 }
1800 
neightbl_fill_info(struct sk_buff * skb,struct neigh_table * tbl,u32 pid,u32 seq,int type,int flags)1801 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1802 			      u32 pid, u32 seq, int type, int flags)
1803 {
1804 	struct nlmsghdr *nlh;
1805 	struct ndtmsg *ndtmsg;
1806 
1807 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1808 	if (nlh == NULL)
1809 		return -EMSGSIZE;
1810 
1811 	ndtmsg = nlmsg_data(nlh);
1812 
1813 	read_lock_bh(&tbl->lock);
1814 	ndtmsg->ndtm_family = tbl->family;
1815 	ndtmsg->ndtm_pad1   = 0;
1816 	ndtmsg->ndtm_pad2   = 0;
1817 
1818 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1819 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
1820 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1821 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1822 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1823 		goto nla_put_failure;
1824 	{
1825 		unsigned long now = jiffies;
1826 		unsigned int flush_delta = now - tbl->last_flush;
1827 		unsigned int rand_delta = now - tbl->last_rand;
1828 		struct neigh_hash_table *nht;
1829 		struct ndt_config ndc = {
1830 			.ndtc_key_len		= tbl->key_len,
1831 			.ndtc_entry_size	= tbl->entry_size,
1832 			.ndtc_entries		= atomic_read(&tbl->entries),
1833 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1834 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1835 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1836 		};
1837 
1838 		rcu_read_lock_bh();
1839 		nht = rcu_dereference_bh(tbl->nht);
1840 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1841 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1842 		rcu_read_unlock_bh();
1843 
1844 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1845 			goto nla_put_failure;
1846 	}
1847 
1848 	{
1849 		int cpu;
1850 		struct ndt_stats ndst;
1851 
1852 		memset(&ndst, 0, sizeof(ndst));
1853 
1854 		for_each_possible_cpu(cpu) {
1855 			struct neigh_statistics	*st;
1856 
1857 			st = per_cpu_ptr(tbl->stats, cpu);
1858 			ndst.ndts_allocs		+= st->allocs;
1859 			ndst.ndts_destroys		+= st->destroys;
1860 			ndst.ndts_hash_grows		+= st->hash_grows;
1861 			ndst.ndts_res_failed		+= st->res_failed;
1862 			ndst.ndts_lookups		+= st->lookups;
1863 			ndst.ndts_hits			+= st->hits;
1864 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1865 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1866 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1867 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1868 			ndst.ndts_table_fulls		+= st->table_fulls;
1869 		}
1870 
1871 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
1872 				  NDTA_PAD))
1873 			goto nla_put_failure;
1874 	}
1875 
1876 	BUG_ON(tbl->parms.dev);
1877 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1878 		goto nla_put_failure;
1879 
1880 	read_unlock_bh(&tbl->lock);
1881 	nlmsg_end(skb, nlh);
1882 	return 0;
1883 
1884 nla_put_failure:
1885 	read_unlock_bh(&tbl->lock);
1886 	nlmsg_cancel(skb, nlh);
1887 	return -EMSGSIZE;
1888 }
1889 
neightbl_fill_param_info(struct sk_buff * skb,struct neigh_table * tbl,struct neigh_parms * parms,u32 pid,u32 seq,int type,unsigned int flags)1890 static int neightbl_fill_param_info(struct sk_buff *skb,
1891 				    struct neigh_table *tbl,
1892 				    struct neigh_parms *parms,
1893 				    u32 pid, u32 seq, int type,
1894 				    unsigned int flags)
1895 {
1896 	struct ndtmsg *ndtmsg;
1897 	struct nlmsghdr *nlh;
1898 
1899 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1900 	if (nlh == NULL)
1901 		return -EMSGSIZE;
1902 
1903 	ndtmsg = nlmsg_data(nlh);
1904 
1905 	read_lock_bh(&tbl->lock);
1906 	ndtmsg->ndtm_family = tbl->family;
1907 	ndtmsg->ndtm_pad1   = 0;
1908 	ndtmsg->ndtm_pad2   = 0;
1909 
1910 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1911 	    neightbl_fill_parms(skb, parms) < 0)
1912 		goto errout;
1913 
1914 	read_unlock_bh(&tbl->lock);
1915 	nlmsg_end(skb, nlh);
1916 	return 0;
1917 errout:
1918 	read_unlock_bh(&tbl->lock);
1919 	nlmsg_cancel(skb, nlh);
1920 	return -EMSGSIZE;
1921 }
1922 
1923 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1924 	[NDTA_NAME]		= { .type = NLA_STRING },
1925 	[NDTA_THRESH1]		= { .type = NLA_U32 },
1926 	[NDTA_THRESH2]		= { .type = NLA_U32 },
1927 	[NDTA_THRESH3]		= { .type = NLA_U32 },
1928 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1929 	[NDTA_PARMS]		= { .type = NLA_NESTED },
1930 };
1931 
1932 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1933 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1934 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1935 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1936 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1937 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1938 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1939 	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
1940 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1941 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1942 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1943 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1944 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1945 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1946 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1947 };
1948 
neightbl_set(struct sk_buff * skb,struct nlmsghdr * nlh)1949 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
1950 {
1951 	struct net *net = sock_net(skb->sk);
1952 	struct neigh_table *tbl;
1953 	struct ndtmsg *ndtmsg;
1954 	struct nlattr *tb[NDTA_MAX+1];
1955 	bool found = false;
1956 	int err, tidx;
1957 
1958 	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1959 			  nl_neightbl_policy);
1960 	if (err < 0)
1961 		goto errout;
1962 
1963 	if (tb[NDTA_NAME] == NULL) {
1964 		err = -EINVAL;
1965 		goto errout;
1966 	}
1967 
1968 	ndtmsg = nlmsg_data(nlh);
1969 
1970 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
1971 		tbl = neigh_tables[tidx];
1972 		if (!tbl)
1973 			continue;
1974 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1975 			continue;
1976 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
1977 			found = true;
1978 			break;
1979 		}
1980 	}
1981 
1982 	if (!found)
1983 		return -ENOENT;
1984 
1985 	/*
1986 	 * We acquire tbl->lock to be nice to the periodic timers and
1987 	 * make sure they always see a consistent set of values.
1988 	 */
1989 	write_lock_bh(&tbl->lock);
1990 
1991 	if (tb[NDTA_PARMS]) {
1992 		struct nlattr *tbp[NDTPA_MAX+1];
1993 		struct neigh_parms *p;
1994 		int i, ifindex = 0;
1995 
1996 		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1997 				       nl_ntbl_parm_policy);
1998 		if (err < 0)
1999 			goto errout_tbl_lock;
2000 
2001 		if (tbp[NDTPA_IFINDEX])
2002 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2003 
2004 		p = lookup_neigh_parms(tbl, net, ifindex);
2005 		if (p == NULL) {
2006 			err = -ENOENT;
2007 			goto errout_tbl_lock;
2008 		}
2009 
2010 		for (i = 1; i <= NDTPA_MAX; i++) {
2011 			if (tbp[i] == NULL)
2012 				continue;
2013 
2014 			switch (i) {
2015 			case NDTPA_QUEUE_LEN:
2016 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2017 					      nla_get_u32(tbp[i]) *
2018 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2019 				break;
2020 			case NDTPA_QUEUE_LENBYTES:
2021 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2022 					      nla_get_u32(tbp[i]));
2023 				break;
2024 			case NDTPA_PROXY_QLEN:
2025 				NEIGH_VAR_SET(p, PROXY_QLEN,
2026 					      nla_get_u32(tbp[i]));
2027 				break;
2028 			case NDTPA_APP_PROBES:
2029 				NEIGH_VAR_SET(p, APP_PROBES,
2030 					      nla_get_u32(tbp[i]));
2031 				break;
2032 			case NDTPA_UCAST_PROBES:
2033 				NEIGH_VAR_SET(p, UCAST_PROBES,
2034 					      nla_get_u32(tbp[i]));
2035 				break;
2036 			case NDTPA_MCAST_PROBES:
2037 				NEIGH_VAR_SET(p, MCAST_PROBES,
2038 					      nla_get_u32(tbp[i]));
2039 				break;
2040 			case NDTPA_MCAST_REPROBES:
2041 				NEIGH_VAR_SET(p, MCAST_REPROBES,
2042 					      nla_get_u32(tbp[i]));
2043 				break;
2044 			case NDTPA_BASE_REACHABLE_TIME:
2045 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2046 					      nla_get_msecs(tbp[i]));
2047 				/* update reachable_time as well, otherwise, the change will
2048 				 * only be effective after the next time neigh_periodic_work
2049 				 * decides to recompute it (can be multiple minutes)
2050 				 */
2051 				p->reachable_time =
2052 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2053 				break;
2054 			case NDTPA_GC_STALETIME:
2055 				NEIGH_VAR_SET(p, GC_STALETIME,
2056 					      nla_get_msecs(tbp[i]));
2057 				break;
2058 			case NDTPA_DELAY_PROBE_TIME:
2059 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2060 					      nla_get_msecs(tbp[i]));
2061 				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2062 				break;
2063 			case NDTPA_RETRANS_TIME:
2064 				NEIGH_VAR_SET(p, RETRANS_TIME,
2065 					      nla_get_msecs(tbp[i]));
2066 				break;
2067 			case NDTPA_ANYCAST_DELAY:
2068 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2069 					      nla_get_msecs(tbp[i]));
2070 				break;
2071 			case NDTPA_PROXY_DELAY:
2072 				NEIGH_VAR_SET(p, PROXY_DELAY,
2073 					      nla_get_msecs(tbp[i]));
2074 				break;
2075 			case NDTPA_LOCKTIME:
2076 				NEIGH_VAR_SET(p, LOCKTIME,
2077 					      nla_get_msecs(tbp[i]));
2078 				break;
2079 			}
2080 		}
2081 	}
2082 
2083 	err = -ENOENT;
2084 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2085 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2086 	    !net_eq(net, &init_net))
2087 		goto errout_tbl_lock;
2088 
2089 	if (tb[NDTA_THRESH1])
2090 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2091 
2092 	if (tb[NDTA_THRESH2])
2093 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2094 
2095 	if (tb[NDTA_THRESH3])
2096 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2097 
2098 	if (tb[NDTA_GC_INTERVAL])
2099 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2100 
2101 	err = 0;
2102 
2103 errout_tbl_lock:
2104 	write_unlock_bh(&tbl->lock);
2105 errout:
2106 	return err;
2107 }
2108 
neightbl_dump_info(struct sk_buff * skb,struct netlink_callback * cb)2109 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2110 {
2111 	struct net *net = sock_net(skb->sk);
2112 	int family, tidx, nidx = 0;
2113 	int tbl_skip = cb->args[0];
2114 	int neigh_skip = cb->args[1];
2115 	struct neigh_table *tbl;
2116 
2117 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2118 
2119 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2120 		struct neigh_parms *p;
2121 
2122 		tbl = neigh_tables[tidx];
2123 		if (!tbl)
2124 			continue;
2125 
2126 		if (tidx < tbl_skip || (family && tbl->family != family))
2127 			continue;
2128 
2129 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2130 				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2131 				       NLM_F_MULTI) < 0)
2132 			break;
2133 
2134 		nidx = 0;
2135 		p = list_next_entry(&tbl->parms, list);
2136 		list_for_each_entry_from(p, &tbl->parms_list, list) {
2137 			if (!net_eq(neigh_parms_net(p), net))
2138 				continue;
2139 
2140 			if (nidx < neigh_skip)
2141 				goto next;
2142 
2143 			if (neightbl_fill_param_info(skb, tbl, p,
2144 						     NETLINK_CB(cb->skb).portid,
2145 						     cb->nlh->nlmsg_seq,
2146 						     RTM_NEWNEIGHTBL,
2147 						     NLM_F_MULTI) < 0)
2148 				goto out;
2149 		next:
2150 			nidx++;
2151 		}
2152 
2153 		neigh_skip = 0;
2154 	}
2155 out:
2156 	cb->args[0] = tidx;
2157 	cb->args[1] = nidx;
2158 
2159 	return skb->len;
2160 }
2161 
neigh_fill_info(struct sk_buff * skb,struct neighbour * neigh,u32 pid,u32 seq,int type,unsigned int flags)2162 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2163 			   u32 pid, u32 seq, int type, unsigned int flags)
2164 {
2165 	unsigned long now = jiffies;
2166 	struct nda_cacheinfo ci;
2167 	struct nlmsghdr *nlh;
2168 	struct ndmsg *ndm;
2169 
2170 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2171 	if (nlh == NULL)
2172 		return -EMSGSIZE;
2173 
2174 	ndm = nlmsg_data(nlh);
2175 	ndm->ndm_family	 = neigh->ops->family;
2176 	ndm->ndm_pad1    = 0;
2177 	ndm->ndm_pad2    = 0;
2178 	ndm->ndm_flags	 = neigh->flags;
2179 	ndm->ndm_type	 = neigh->type;
2180 	ndm->ndm_ifindex = neigh->dev->ifindex;
2181 
2182 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2183 		goto nla_put_failure;
2184 
2185 	read_lock_bh(&neigh->lock);
2186 	ndm->ndm_state	 = neigh->nud_state;
2187 	if (neigh->nud_state & NUD_VALID) {
2188 		char haddr[MAX_ADDR_LEN];
2189 
2190 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2191 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2192 			read_unlock_bh(&neigh->lock);
2193 			goto nla_put_failure;
2194 		}
2195 	}
2196 
2197 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2198 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2199 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2200 	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
2201 	read_unlock_bh(&neigh->lock);
2202 
2203 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2204 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2205 		goto nla_put_failure;
2206 
2207 	nlmsg_end(skb, nlh);
2208 	return 0;
2209 
2210 nla_put_failure:
2211 	nlmsg_cancel(skb, nlh);
2212 	return -EMSGSIZE;
2213 }
2214 
pneigh_fill_info(struct sk_buff * skb,struct pneigh_entry * pn,u32 pid,u32 seq,int type,unsigned int flags,struct neigh_table * tbl)2215 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2216 			    u32 pid, u32 seq, int type, unsigned int flags,
2217 			    struct neigh_table *tbl)
2218 {
2219 	struct nlmsghdr *nlh;
2220 	struct ndmsg *ndm;
2221 
2222 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2223 	if (nlh == NULL)
2224 		return -EMSGSIZE;
2225 
2226 	ndm = nlmsg_data(nlh);
2227 	ndm->ndm_family	 = tbl->family;
2228 	ndm->ndm_pad1    = 0;
2229 	ndm->ndm_pad2    = 0;
2230 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2231 	ndm->ndm_type	 = RTN_UNICAST;
2232 	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2233 	ndm->ndm_state	 = NUD_NONE;
2234 
2235 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2236 		goto nla_put_failure;
2237 
2238 	nlmsg_end(skb, nlh);
2239 	return 0;
2240 
2241 nla_put_failure:
2242 	nlmsg_cancel(skb, nlh);
2243 	return -EMSGSIZE;
2244 }
2245 
neigh_update_notify(struct neighbour * neigh)2246 static void neigh_update_notify(struct neighbour *neigh)
2247 {
2248 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2249 	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
2250 }
2251 
neigh_master_filtered(struct net_device * dev,int master_idx)2252 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2253 {
2254 	struct net_device *master;
2255 
2256 	if (!master_idx)
2257 		return false;
2258 
2259 	master = netdev_master_upper_dev_get(dev);
2260 	if (!master || master->ifindex != master_idx)
2261 		return true;
2262 
2263 	return false;
2264 }
2265 
neigh_ifindex_filtered(struct net_device * dev,int filter_idx)2266 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2267 {
2268 	if (filter_idx && dev->ifindex != filter_idx)
2269 		return true;
2270 
2271 	return false;
2272 }
2273 
neigh_dump_table(struct neigh_table * tbl,struct sk_buff * skb,struct netlink_callback * cb)2274 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2275 			    struct netlink_callback *cb)
2276 {
2277 	struct net *net = sock_net(skb->sk);
2278 	const struct nlmsghdr *nlh = cb->nlh;
2279 	struct nlattr *tb[NDA_MAX + 1];
2280 	struct neighbour *n;
2281 	int rc, h, s_h = cb->args[1];
2282 	int idx, s_idx = idx = cb->args[2];
2283 	struct neigh_hash_table *nht;
2284 	int filter_master_idx = 0, filter_idx = 0;
2285 	unsigned int flags = NLM_F_MULTI;
2286 	int err;
2287 
2288 	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
2289 	if (!err) {
2290 		if (tb[NDA_IFINDEX]) {
2291 			if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
2292 				return -EINVAL;
2293 			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2294 		}
2295 		if (tb[NDA_MASTER]) {
2296 			if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
2297 				return -EINVAL;
2298 			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2299 		}
2300 		if (filter_idx || filter_master_idx)
2301 			flags |= NLM_F_DUMP_FILTERED;
2302 	}
2303 
2304 	rcu_read_lock_bh();
2305 	nht = rcu_dereference_bh(tbl->nht);
2306 
2307 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2308 		if (h > s_h)
2309 			s_idx = 0;
2310 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2311 		     n != NULL;
2312 		     n = rcu_dereference_bh(n->next)) {
2313 			if (!net_eq(dev_net(n->dev), net))
2314 				continue;
2315 			if (neigh_ifindex_filtered(n->dev, filter_idx))
2316 				continue;
2317 			if (neigh_master_filtered(n->dev, filter_master_idx))
2318 				continue;
2319 			if (idx < s_idx)
2320 				goto next;
2321 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2322 					    cb->nlh->nlmsg_seq,
2323 					    RTM_NEWNEIGH,
2324 					    flags) < 0) {
2325 				rc = -1;
2326 				goto out;
2327 			}
2328 next:
2329 			idx++;
2330 		}
2331 	}
2332 	rc = skb->len;
2333 out:
2334 	rcu_read_unlock_bh();
2335 	cb->args[1] = h;
2336 	cb->args[2] = idx;
2337 	return rc;
2338 }
2339 
pneigh_dump_table(struct neigh_table * tbl,struct sk_buff * skb,struct netlink_callback * cb)2340 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2341 			     struct netlink_callback *cb)
2342 {
2343 	struct pneigh_entry *n;
2344 	struct net *net = sock_net(skb->sk);
2345 	int rc, h, s_h = cb->args[3];
2346 	int idx, s_idx = idx = cb->args[4];
2347 
2348 	read_lock_bh(&tbl->lock);
2349 
2350 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2351 		if (h > s_h)
2352 			s_idx = 0;
2353 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2354 			if (pneigh_net(n) != net)
2355 				continue;
2356 			if (idx < s_idx)
2357 				goto next;
2358 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2359 					    cb->nlh->nlmsg_seq,
2360 					    RTM_NEWNEIGH,
2361 					    NLM_F_MULTI, tbl) < 0) {
2362 				read_unlock_bh(&tbl->lock);
2363 				rc = -1;
2364 				goto out;
2365 			}
2366 		next:
2367 			idx++;
2368 		}
2369 	}
2370 
2371 	read_unlock_bh(&tbl->lock);
2372 	rc = skb->len;
2373 out:
2374 	cb->args[3] = h;
2375 	cb->args[4] = idx;
2376 	return rc;
2377 
2378 }
2379 
neigh_dump_info(struct sk_buff * skb,struct netlink_callback * cb)2380 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2381 {
2382 	struct neigh_table *tbl;
2383 	int t, family, s_t;
2384 	int proxy = 0;
2385 	int err;
2386 
2387 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2388 
2389 	/* check for full ndmsg structure presence, family member is
2390 	 * the same for both structures
2391 	 */
2392 	if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2393 	    ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2394 		proxy = 1;
2395 
2396 	s_t = cb->args[0];
2397 
2398 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2399 		tbl = neigh_tables[t];
2400 
2401 		if (!tbl)
2402 			continue;
2403 		if (t < s_t || (family && tbl->family != family))
2404 			continue;
2405 		if (t > s_t)
2406 			memset(&cb->args[1], 0, sizeof(cb->args) -
2407 						sizeof(cb->args[0]));
2408 		if (proxy)
2409 			err = pneigh_dump_table(tbl, skb, cb);
2410 		else
2411 			err = neigh_dump_table(tbl, skb, cb);
2412 		if (err < 0)
2413 			break;
2414 	}
2415 
2416 	cb->args[0] = t;
2417 	return skb->len;
2418 }
2419 
neigh_for_each(struct neigh_table * tbl,void (* cb)(struct neighbour *,void *),void * cookie)2420 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2421 {
2422 	int chain;
2423 	struct neigh_hash_table *nht;
2424 
2425 	rcu_read_lock_bh();
2426 	nht = rcu_dereference_bh(tbl->nht);
2427 
2428 	read_lock(&tbl->lock); /* avoid resizes */
2429 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2430 		struct neighbour *n;
2431 
2432 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2433 		     n != NULL;
2434 		     n = rcu_dereference_bh(n->next))
2435 			cb(n, cookie);
2436 	}
2437 	read_unlock(&tbl->lock);
2438 	rcu_read_unlock_bh();
2439 }
2440 EXPORT_SYMBOL(neigh_for_each);
2441 
2442 /* The tbl->lock must be held as a writer and BH disabled. */
__neigh_for_each_release(struct neigh_table * tbl,int (* cb)(struct neighbour *))2443 void __neigh_for_each_release(struct neigh_table *tbl,
2444 			      int (*cb)(struct neighbour *))
2445 {
2446 	int chain;
2447 	struct neigh_hash_table *nht;
2448 
2449 	nht = rcu_dereference_protected(tbl->nht,
2450 					lockdep_is_held(&tbl->lock));
2451 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2452 		struct neighbour *n;
2453 		struct neighbour __rcu **np;
2454 
2455 		np = &nht->hash_buckets[chain];
2456 		while ((n = rcu_dereference_protected(*np,
2457 					lockdep_is_held(&tbl->lock))) != NULL) {
2458 			int release;
2459 
2460 			write_lock(&n->lock);
2461 			release = cb(n);
2462 			if (release) {
2463 				rcu_assign_pointer(*np,
2464 					rcu_dereference_protected(n->next,
2465 						lockdep_is_held(&tbl->lock)));
2466 				n->dead = 1;
2467 			} else
2468 				np = &n->next;
2469 			write_unlock(&n->lock);
2470 			if (release)
2471 				neigh_cleanup_and_release(n);
2472 		}
2473 	}
2474 }
2475 EXPORT_SYMBOL(__neigh_for_each_release);
2476 
neigh_xmit(int index,struct net_device * dev,const void * addr,struct sk_buff * skb)2477 int neigh_xmit(int index, struct net_device *dev,
2478 	       const void *addr, struct sk_buff *skb)
2479 {
2480 	int err = -EAFNOSUPPORT;
2481 	if (likely(index < NEIGH_NR_TABLES)) {
2482 		struct neigh_table *tbl;
2483 		struct neighbour *neigh;
2484 
2485 		tbl = neigh_tables[index];
2486 		if (!tbl)
2487 			goto out;
2488 		rcu_read_lock_bh();
2489 		neigh = __neigh_lookup_noref(tbl, addr, dev);
2490 		if (!neigh)
2491 			neigh = __neigh_create(tbl, addr, dev, false);
2492 		err = PTR_ERR(neigh);
2493 		if (IS_ERR(neigh)) {
2494 			rcu_read_unlock_bh();
2495 			goto out_kfree_skb;
2496 		}
2497 		err = neigh->output(neigh, skb);
2498 		rcu_read_unlock_bh();
2499 	}
2500 	else if (index == NEIGH_LINK_TABLE) {
2501 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2502 				      addr, NULL, skb->len);
2503 		if (err < 0)
2504 			goto out_kfree_skb;
2505 		err = dev_queue_xmit(skb);
2506 	}
2507 out:
2508 	return err;
2509 out_kfree_skb:
2510 	kfree_skb(skb);
2511 	goto out;
2512 }
2513 EXPORT_SYMBOL(neigh_xmit);
2514 
2515 #ifdef CONFIG_PROC_FS
2516 
neigh_get_first(struct seq_file * seq)2517 static struct neighbour *neigh_get_first(struct seq_file *seq)
2518 {
2519 	struct neigh_seq_state *state = seq->private;
2520 	struct net *net = seq_file_net(seq);
2521 	struct neigh_hash_table *nht = state->nht;
2522 	struct neighbour *n = NULL;
2523 	int bucket = state->bucket;
2524 
2525 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2526 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2527 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2528 
2529 		while (n) {
2530 			if (!net_eq(dev_net(n->dev), net))
2531 				goto next;
2532 			if (state->neigh_sub_iter) {
2533 				loff_t fakep = 0;
2534 				void *v;
2535 
2536 				v = state->neigh_sub_iter(state, n, &fakep);
2537 				if (!v)
2538 					goto next;
2539 			}
2540 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2541 				break;
2542 			if (n->nud_state & ~NUD_NOARP)
2543 				break;
2544 next:
2545 			n = rcu_dereference_bh(n->next);
2546 		}
2547 
2548 		if (n)
2549 			break;
2550 	}
2551 	state->bucket = bucket;
2552 
2553 	return n;
2554 }
2555 
neigh_get_next(struct seq_file * seq,struct neighbour * n,loff_t * pos)2556 static struct neighbour *neigh_get_next(struct seq_file *seq,
2557 					struct neighbour *n,
2558 					loff_t *pos)
2559 {
2560 	struct neigh_seq_state *state = seq->private;
2561 	struct net *net = seq_file_net(seq);
2562 	struct neigh_hash_table *nht = state->nht;
2563 
2564 	if (state->neigh_sub_iter) {
2565 		void *v = state->neigh_sub_iter(state, n, pos);
2566 		if (v)
2567 			return n;
2568 	}
2569 	n = rcu_dereference_bh(n->next);
2570 
2571 	while (1) {
2572 		while (n) {
2573 			if (!net_eq(dev_net(n->dev), net))
2574 				goto next;
2575 			if (state->neigh_sub_iter) {
2576 				void *v = state->neigh_sub_iter(state, n, pos);
2577 				if (v)
2578 					return n;
2579 				goto next;
2580 			}
2581 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2582 				break;
2583 
2584 			if (n->nud_state & ~NUD_NOARP)
2585 				break;
2586 next:
2587 			n = rcu_dereference_bh(n->next);
2588 		}
2589 
2590 		if (n)
2591 			break;
2592 
2593 		if (++state->bucket >= (1 << nht->hash_shift))
2594 			break;
2595 
2596 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2597 	}
2598 
2599 	if (n && pos)
2600 		--(*pos);
2601 	return n;
2602 }
2603 
neigh_get_idx(struct seq_file * seq,loff_t * pos)2604 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2605 {
2606 	struct neighbour *n = neigh_get_first(seq);
2607 
2608 	if (n) {
2609 		--(*pos);
2610 		while (*pos) {
2611 			n = neigh_get_next(seq, n, pos);
2612 			if (!n)
2613 				break;
2614 		}
2615 	}
2616 	return *pos ? NULL : n;
2617 }
2618 
pneigh_get_first(struct seq_file * seq)2619 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2620 {
2621 	struct neigh_seq_state *state = seq->private;
2622 	struct net *net = seq_file_net(seq);
2623 	struct neigh_table *tbl = state->tbl;
2624 	struct pneigh_entry *pn = NULL;
2625 	int bucket = state->bucket;
2626 
2627 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2628 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2629 		pn = tbl->phash_buckets[bucket];
2630 		while (pn && !net_eq(pneigh_net(pn), net))
2631 			pn = pn->next;
2632 		if (pn)
2633 			break;
2634 	}
2635 	state->bucket = bucket;
2636 
2637 	return pn;
2638 }
2639 
pneigh_get_next(struct seq_file * seq,struct pneigh_entry * pn,loff_t * pos)2640 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2641 					    struct pneigh_entry *pn,
2642 					    loff_t *pos)
2643 {
2644 	struct neigh_seq_state *state = seq->private;
2645 	struct net *net = seq_file_net(seq);
2646 	struct neigh_table *tbl = state->tbl;
2647 
2648 	do {
2649 		pn = pn->next;
2650 	} while (pn && !net_eq(pneigh_net(pn), net));
2651 
2652 	while (!pn) {
2653 		if (++state->bucket > PNEIGH_HASHMASK)
2654 			break;
2655 		pn = tbl->phash_buckets[state->bucket];
2656 		while (pn && !net_eq(pneigh_net(pn), net))
2657 			pn = pn->next;
2658 		if (pn)
2659 			break;
2660 	}
2661 
2662 	if (pn && pos)
2663 		--(*pos);
2664 
2665 	return pn;
2666 }
2667 
pneigh_get_idx(struct seq_file * seq,loff_t * pos)2668 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2669 {
2670 	struct pneigh_entry *pn = pneigh_get_first(seq);
2671 
2672 	if (pn) {
2673 		--(*pos);
2674 		while (*pos) {
2675 			pn = pneigh_get_next(seq, pn, pos);
2676 			if (!pn)
2677 				break;
2678 		}
2679 	}
2680 	return *pos ? NULL : pn;
2681 }
2682 
neigh_get_idx_any(struct seq_file * seq,loff_t * pos)2683 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2684 {
2685 	struct neigh_seq_state *state = seq->private;
2686 	void *rc;
2687 	loff_t idxpos = *pos;
2688 
2689 	rc = neigh_get_idx(seq, &idxpos);
2690 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2691 		rc = pneigh_get_idx(seq, &idxpos);
2692 
2693 	return rc;
2694 }
2695 
neigh_seq_start(struct seq_file * seq,loff_t * pos,struct neigh_table * tbl,unsigned int neigh_seq_flags)2696 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2697 	__acquires(rcu_bh)
2698 {
2699 	struct neigh_seq_state *state = seq->private;
2700 
2701 	state->tbl = tbl;
2702 	state->bucket = 0;
2703 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2704 
2705 	rcu_read_lock_bh();
2706 	state->nht = rcu_dereference_bh(tbl->nht);
2707 
2708 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2709 }
2710 EXPORT_SYMBOL(neigh_seq_start);
2711 
neigh_seq_next(struct seq_file * seq,void * v,loff_t * pos)2712 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2713 {
2714 	struct neigh_seq_state *state;
2715 	void *rc;
2716 
2717 	if (v == SEQ_START_TOKEN) {
2718 		rc = neigh_get_first(seq);
2719 		goto out;
2720 	}
2721 
2722 	state = seq->private;
2723 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2724 		rc = neigh_get_next(seq, v, NULL);
2725 		if (rc)
2726 			goto out;
2727 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2728 			rc = pneigh_get_first(seq);
2729 	} else {
2730 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2731 		rc = pneigh_get_next(seq, v, NULL);
2732 	}
2733 out:
2734 	++(*pos);
2735 	return rc;
2736 }
2737 EXPORT_SYMBOL(neigh_seq_next);
2738 
neigh_seq_stop(struct seq_file * seq,void * v)2739 void neigh_seq_stop(struct seq_file *seq, void *v)
2740 	__releases(rcu_bh)
2741 {
2742 	rcu_read_unlock_bh();
2743 }
2744 EXPORT_SYMBOL(neigh_seq_stop);
2745 
2746 /* statistics via seq_file */
2747 
neigh_stat_seq_start(struct seq_file * seq,loff_t * pos)2748 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2749 {
2750 	struct neigh_table *tbl = seq->private;
2751 	int cpu;
2752 
2753 	if (*pos == 0)
2754 		return SEQ_START_TOKEN;
2755 
2756 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2757 		if (!cpu_possible(cpu))
2758 			continue;
2759 		*pos = cpu+1;
2760 		return per_cpu_ptr(tbl->stats, cpu);
2761 	}
2762 	return NULL;
2763 }
2764 
neigh_stat_seq_next(struct seq_file * seq,void * v,loff_t * pos)2765 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2766 {
2767 	struct neigh_table *tbl = seq->private;
2768 	int cpu;
2769 
2770 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2771 		if (!cpu_possible(cpu))
2772 			continue;
2773 		*pos = cpu+1;
2774 		return per_cpu_ptr(tbl->stats, cpu);
2775 	}
2776 	return NULL;
2777 }
2778 
neigh_stat_seq_stop(struct seq_file * seq,void * v)2779 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2780 {
2781 
2782 }
2783 
neigh_stat_seq_show(struct seq_file * seq,void * v)2784 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2785 {
2786 	struct neigh_table *tbl = seq->private;
2787 	struct neigh_statistics *st = v;
2788 
2789 	if (v == SEQ_START_TOKEN) {
2790 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2791 		return 0;
2792 	}
2793 
2794 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2795 			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
2796 		   atomic_read(&tbl->entries),
2797 
2798 		   st->allocs,
2799 		   st->destroys,
2800 		   st->hash_grows,
2801 
2802 		   st->lookups,
2803 		   st->hits,
2804 
2805 		   st->res_failed,
2806 
2807 		   st->rcv_probes_mcast,
2808 		   st->rcv_probes_ucast,
2809 
2810 		   st->periodic_gc_runs,
2811 		   st->forced_gc_runs,
2812 		   st->unres_discards,
2813 		   st->table_fulls
2814 		   );
2815 
2816 	return 0;
2817 }
2818 
2819 static const struct seq_operations neigh_stat_seq_ops = {
2820 	.start	= neigh_stat_seq_start,
2821 	.next	= neigh_stat_seq_next,
2822 	.stop	= neigh_stat_seq_stop,
2823 	.show	= neigh_stat_seq_show,
2824 };
2825 
neigh_stat_seq_open(struct inode * inode,struct file * file)2826 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2827 {
2828 	int ret = seq_open(file, &neigh_stat_seq_ops);
2829 
2830 	if (!ret) {
2831 		struct seq_file *sf = file->private_data;
2832 		sf->private = PDE_DATA(inode);
2833 	}
2834 	return ret;
2835 };
2836 
2837 static const struct file_operations neigh_stat_seq_fops = {
2838 	.owner	 = THIS_MODULE,
2839 	.open 	 = neigh_stat_seq_open,
2840 	.read	 = seq_read,
2841 	.llseek	 = seq_lseek,
2842 	.release = seq_release,
2843 };
2844 
2845 #endif /* CONFIG_PROC_FS */
2846 
neigh_nlmsg_size(void)2847 static inline size_t neigh_nlmsg_size(void)
2848 {
2849 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2850 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2851 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2852 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2853 	       + nla_total_size(4); /* NDA_PROBES */
2854 }
2855 
__neigh_notify(struct neighbour * n,int type,int flags)2856 static void __neigh_notify(struct neighbour *n, int type, int flags)
2857 {
2858 	struct net *net = dev_net(n->dev);
2859 	struct sk_buff *skb;
2860 	int err = -ENOBUFS;
2861 
2862 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2863 	if (skb == NULL)
2864 		goto errout;
2865 
2866 	err = neigh_fill_info(skb, n, 0, 0, type, flags);
2867 	if (err < 0) {
2868 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2869 		WARN_ON(err == -EMSGSIZE);
2870 		kfree_skb(skb);
2871 		goto errout;
2872 	}
2873 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2874 	return;
2875 errout:
2876 	if (err < 0)
2877 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2878 }
2879 
neigh_app_ns(struct neighbour * n)2880 void neigh_app_ns(struct neighbour *n)
2881 {
2882 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2883 }
2884 EXPORT_SYMBOL(neigh_app_ns);
2885 
2886 #ifdef CONFIG_SYSCTL
2887 static int zero;
2888 static int int_max = INT_MAX;
2889 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2890 
proc_unres_qlen(struct ctl_table * ctl,int write,void __user * buffer,size_t * lenp,loff_t * ppos)2891 static int proc_unres_qlen(struct ctl_table *ctl, int write,
2892 			   void __user *buffer, size_t *lenp, loff_t *ppos)
2893 {
2894 	int size, ret;
2895 	struct ctl_table tmp = *ctl;
2896 
2897 	tmp.extra1 = &zero;
2898 	tmp.extra2 = &unres_qlen_max;
2899 	tmp.data = &size;
2900 
2901 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2902 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2903 
2904 	if (write && !ret)
2905 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2906 	return ret;
2907 }
2908 
neigh_get_dev_parms_rcu(struct net_device * dev,int family)2909 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2910 						   int family)
2911 {
2912 	switch (family) {
2913 	case AF_INET:
2914 		return __in_dev_arp_parms_get_rcu(dev);
2915 	case AF_INET6:
2916 		return __in6_dev_nd_parms_get_rcu(dev);
2917 	}
2918 	return NULL;
2919 }
2920 
neigh_copy_dflt_parms(struct net * net,struct neigh_parms * p,int index)2921 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2922 				  int index)
2923 {
2924 	struct net_device *dev;
2925 	int family = neigh_parms_family(p);
2926 
2927 	rcu_read_lock();
2928 	for_each_netdev_rcu(net, dev) {
2929 		struct neigh_parms *dst_p =
2930 				neigh_get_dev_parms_rcu(dev, family);
2931 
2932 		if (dst_p && !test_bit(index, dst_p->data_state))
2933 			dst_p->data[index] = p->data[index];
2934 	}
2935 	rcu_read_unlock();
2936 }
2937 
neigh_proc_update(struct ctl_table * ctl,int write)2938 static void neigh_proc_update(struct ctl_table *ctl, int write)
2939 {
2940 	struct net_device *dev = ctl->extra1;
2941 	struct neigh_parms *p = ctl->extra2;
2942 	struct net *net = neigh_parms_net(p);
2943 	int index = (int *) ctl->data - p->data;
2944 
2945 	if (!write)
2946 		return;
2947 
2948 	set_bit(index, p->data_state);
2949 	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
2950 		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2951 	if (!dev) /* NULL dev means this is default value */
2952 		neigh_copy_dflt_parms(net, p, index);
2953 }
2954 
neigh_proc_dointvec_zero_intmax(struct ctl_table * ctl,int write,void __user * buffer,size_t * lenp,loff_t * ppos)2955 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2956 					   void __user *buffer,
2957 					   size_t *lenp, loff_t *ppos)
2958 {
2959 	struct ctl_table tmp = *ctl;
2960 	int ret;
2961 
2962 	tmp.extra1 = &zero;
2963 	tmp.extra2 = &int_max;
2964 
2965 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2966 	neigh_proc_update(ctl, write);
2967 	return ret;
2968 }
2969 
neigh_proc_dointvec(struct ctl_table * ctl,int write,void __user * buffer,size_t * lenp,loff_t * ppos)2970 int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2971 			void __user *buffer, size_t *lenp, loff_t *ppos)
2972 {
2973 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2974 
2975 	neigh_proc_update(ctl, write);
2976 	return ret;
2977 }
2978 EXPORT_SYMBOL(neigh_proc_dointvec);
2979 
neigh_proc_dointvec_jiffies(struct ctl_table * ctl,int write,void __user * buffer,size_t * lenp,loff_t * ppos)2980 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2981 				void __user *buffer,
2982 				size_t *lenp, loff_t *ppos)
2983 {
2984 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2985 
2986 	neigh_proc_update(ctl, write);
2987 	return ret;
2988 }
2989 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
2990 
neigh_proc_dointvec_userhz_jiffies(struct ctl_table * ctl,int write,void __user * buffer,size_t * lenp,loff_t * ppos)2991 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
2992 					      void __user *buffer,
2993 					      size_t *lenp, loff_t *ppos)
2994 {
2995 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
2996 
2997 	neigh_proc_update(ctl, write);
2998 	return ret;
2999 }
3000 
neigh_proc_dointvec_ms_jiffies(struct ctl_table * ctl,int write,void __user * buffer,size_t * lenp,loff_t * ppos)3001 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3002 				   void __user *buffer,
3003 				   size_t *lenp, loff_t *ppos)
3004 {
3005 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3006 
3007 	neigh_proc_update(ctl, write);
3008 	return ret;
3009 }
3010 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3011 
neigh_proc_dointvec_unres_qlen(struct ctl_table * ctl,int write,void __user * buffer,size_t * lenp,loff_t * ppos)3012 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3013 					  void __user *buffer,
3014 					  size_t *lenp, loff_t *ppos)
3015 {
3016 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3017 
3018 	neigh_proc_update(ctl, write);
3019 	return ret;
3020 }
3021 
neigh_proc_base_reachable_time(struct ctl_table * ctl,int write,void __user * buffer,size_t * lenp,loff_t * ppos)3022 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3023 					  void __user *buffer,
3024 					  size_t *lenp, loff_t *ppos)
3025 {
3026 	struct neigh_parms *p = ctl->extra2;
3027 	int ret;
3028 
3029 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3030 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3031 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3032 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3033 	else
3034 		ret = -1;
3035 
3036 	if (write && ret == 0) {
3037 		/* update reachable_time as well, otherwise, the change will
3038 		 * only be effective after the next time neigh_periodic_work
3039 		 * decides to recompute it
3040 		 */
3041 		p->reachable_time =
3042 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3043 	}
3044 	return ret;
3045 }
3046 
3047 #define NEIGH_PARMS_DATA_OFFSET(index)	\
3048 	(&((struct neigh_parms *) 0)->data[index])
3049 
3050 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3051 	[NEIGH_VAR_ ## attr] = { \
3052 		.procname	= name, \
3053 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3054 		.maxlen		= sizeof(int), \
3055 		.mode		= mval, \
3056 		.proc_handler	= proc, \
3057 	}
3058 
3059 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3060 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3061 
3062 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3063 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3064 
3065 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3066 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3067 
3068 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3069 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3070 
3071 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3072 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3073 
3074 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3075 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3076 
3077 static struct neigh_sysctl_table {
3078 	struct ctl_table_header *sysctl_header;
3079 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3080 } neigh_sysctl_template __read_mostly = {
3081 	.neigh_vars = {
3082 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3083 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3084 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3085 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3086 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3087 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3088 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3089 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3090 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3091 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3092 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3093 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3094 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3095 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3096 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3097 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3098 		[NEIGH_VAR_GC_INTERVAL] = {
3099 			.procname	= "gc_interval",
3100 			.maxlen		= sizeof(int),
3101 			.mode		= 0644,
3102 			.proc_handler	= proc_dointvec_jiffies,
3103 		},
3104 		[NEIGH_VAR_GC_THRESH1] = {
3105 			.procname	= "gc_thresh1",
3106 			.maxlen		= sizeof(int),
3107 			.mode		= 0644,
3108 			.extra1 	= &zero,
3109 			.extra2		= &int_max,
3110 			.proc_handler	= proc_dointvec_minmax,
3111 		},
3112 		[NEIGH_VAR_GC_THRESH2] = {
3113 			.procname	= "gc_thresh2",
3114 			.maxlen		= sizeof(int),
3115 			.mode		= 0644,
3116 			.extra1 	= &zero,
3117 			.extra2		= &int_max,
3118 			.proc_handler	= proc_dointvec_minmax,
3119 		},
3120 		[NEIGH_VAR_GC_THRESH3] = {
3121 			.procname	= "gc_thresh3",
3122 			.maxlen		= sizeof(int),
3123 			.mode		= 0644,
3124 			.extra1 	= &zero,
3125 			.extra2		= &int_max,
3126 			.proc_handler	= proc_dointvec_minmax,
3127 		},
3128 		{},
3129 	},
3130 };
3131 
neigh_sysctl_register(struct net_device * dev,struct neigh_parms * p,proc_handler * handler)3132 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3133 			  proc_handler *handler)
3134 {
3135 	int i;
3136 	struct neigh_sysctl_table *t;
3137 	const char *dev_name_source;
3138 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3139 	char *p_name;
3140 
3141 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3142 	if (!t)
3143 		goto err;
3144 
3145 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3146 		t->neigh_vars[i].data += (long) p;
3147 		t->neigh_vars[i].extra1 = dev;
3148 		t->neigh_vars[i].extra2 = p;
3149 	}
3150 
3151 	if (dev) {
3152 		dev_name_source = dev->name;
3153 		/* Terminate the table early */
3154 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3155 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3156 	} else {
3157 		struct neigh_table *tbl = p->tbl;
3158 		dev_name_source = "default";
3159 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3160 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3161 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3162 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3163 	}
3164 
3165 	if (handler) {
3166 		/* RetransTime */
3167 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3168 		/* ReachableTime */
3169 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3170 		/* RetransTime (in milliseconds)*/
3171 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3172 		/* ReachableTime (in milliseconds) */
3173 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3174 	} else {
3175 		/* Those handlers will update p->reachable_time after
3176 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3177 		 * applied after the next neighbour update instead of waiting for
3178 		 * neigh_periodic_work to update its value (can be multiple minutes)
3179 		 * So any handler that replaces them should do this as well
3180 		 */
3181 		/* ReachableTime */
3182 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3183 			neigh_proc_base_reachable_time;
3184 		/* ReachableTime (in milliseconds) */
3185 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3186 			neigh_proc_base_reachable_time;
3187 	}
3188 
3189 	/* Don't export sysctls to unprivileged users */
3190 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3191 		t->neigh_vars[0].procname = NULL;
3192 
3193 	switch (neigh_parms_family(p)) {
3194 	case AF_INET:
3195 	      p_name = "ipv4";
3196 	      break;
3197 	case AF_INET6:
3198 	      p_name = "ipv6";
3199 	      break;
3200 	default:
3201 	      BUG();
3202 	}
3203 
3204 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3205 		p_name, dev_name_source);
3206 	t->sysctl_header =
3207 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3208 	if (!t->sysctl_header)
3209 		goto free;
3210 
3211 	p->sysctl_table = t;
3212 	return 0;
3213 
3214 free:
3215 	kfree(t);
3216 err:
3217 	return -ENOBUFS;
3218 }
3219 EXPORT_SYMBOL(neigh_sysctl_register);
3220 
neigh_sysctl_unregister(struct neigh_parms * p)3221 void neigh_sysctl_unregister(struct neigh_parms *p)
3222 {
3223 	if (p->sysctl_table) {
3224 		struct neigh_sysctl_table *t = p->sysctl_table;
3225 		p->sysctl_table = NULL;
3226 		unregister_net_sysctl_table(t->sysctl_header);
3227 		kfree(t);
3228 	}
3229 }
3230 EXPORT_SYMBOL(neigh_sysctl_unregister);
3231 
3232 #endif	/* CONFIG_SYSCTL */
3233 
neigh_init(void)3234 static int __init neigh_init(void)
3235 {
3236 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3237 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3238 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3239 
3240 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3241 		      NULL);
3242 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3243 
3244 	return 0;
3245 }
3246 
3247 subsys_initcall(neigh_init);
3248 
3249