• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Generic address resolution entity
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
8  *
9  *	Fixes:
10  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
11  *	Harald Welte		Add neighbour cache statistics like rtstat
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/slab.h>
17 #include <linux/kmemleak.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/arp.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
41 
42 #include <trace/events/neigh.h>
43 
44 #define DEBUG
45 #define NEIGH_DEBUG 1
46 #define neigh_dbg(level, fmt, ...)		\
47 do {						\
48 	if (level <= NEIGH_DEBUG)		\
49 		pr_debug(fmt, ##__VA_ARGS__);	\
50 } while (0)
51 
52 #define PNEIGH_HASHMASK		0xF
53 
54 static void neigh_timer_handler(struct timer_list *t);
55 static void __neigh_notify(struct neighbour *n, int type, int flags,
56 			   u32 pid);
57 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
58 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
59 				    struct net_device *dev);
60 
61 #ifdef CONFIG_PROC_FS
62 static const struct seq_operations neigh_stat_seq_ops;
63 #endif
64 
65 /*
66    Neighbour hash table buckets are protected with rwlock tbl->lock.
67 
68    - All the scans/updates to hash buckets MUST be made under this lock.
69    - NOTHING clever should be made under this lock: no callbacks
70      to protocol backends, no attempts to send something to network.
71      It will result in deadlocks, if backend/driver wants to use neighbour
72      cache.
73    - If the entry requires some non-trivial actions, increase
74      its reference count and release table lock.
75 
76    Neighbour entries are protected:
77    - with reference count.
78    - with rwlock neigh->lock
79 
80    Reference count prevents destruction.
81 
82    neigh->lock mainly serializes ll address data and its validity state.
83    However, the same lock is used to protect another entry fields:
84     - timer
85     - resolution queue
86 
87    Again, nothing clever shall be made under neigh->lock,
88    the most complicated procedure, which we allow is dev->hard_header.
89    It is supposed, that dev->hard_header is simplistic and does
90    not make callbacks to neighbour tables.
91  */
92 
neigh_blackhole(struct neighbour * neigh,struct sk_buff * skb)93 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
94 {
95 	kfree_skb(skb);
96 	return -ENETDOWN;
97 }
98 
neigh_cleanup_and_release(struct neighbour * neigh)99 static void neigh_cleanup_and_release(struct neighbour *neigh)
100 {
101 	trace_neigh_cleanup_and_release(neigh, 0);
102 	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
103 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
104 	neigh_release(neigh);
105 }
106 
107 /*
108  * It is random distribution in the interval (1/2)*base...(3/2)*base.
109  * It corresponds to default IPv6 settings and is not overridable,
110  * because it is really reasonable choice.
111  */
112 
neigh_rand_reach_time(unsigned long base)113 unsigned long neigh_rand_reach_time(unsigned long base)
114 {
115 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
116 }
117 EXPORT_SYMBOL(neigh_rand_reach_time);
118 
neigh_mark_dead(struct neighbour * n)119 static void neigh_mark_dead(struct neighbour *n)
120 {
121 	n->dead = 1;
122 	if (!list_empty(&n->gc_list)) {
123 		list_del_init(&n->gc_list);
124 		atomic_dec(&n->tbl->gc_entries);
125 	}
126 }
127 
neigh_update_gc_list(struct neighbour * n)128 static void neigh_update_gc_list(struct neighbour *n)
129 {
130 	bool on_gc_list, exempt_from_gc;
131 
132 	write_lock_bh(&n->tbl->lock);
133 	write_lock(&n->lock);
134 
135 	if (n->dead)
136 		goto out;
137 
138 	/* remove from the gc list if new state is permanent or if neighbor
139 	 * is externally learned; otherwise entry should be on the gc list
140 	 */
141 	exempt_from_gc = n->nud_state & NUD_PERMANENT ||
142 			 n->flags & NTF_EXT_LEARNED;
143 	on_gc_list = !list_empty(&n->gc_list);
144 
145 	if (exempt_from_gc && on_gc_list) {
146 		list_del_init(&n->gc_list);
147 		atomic_dec(&n->tbl->gc_entries);
148 	} else if (!exempt_from_gc && !on_gc_list) {
149 		/* add entries to the tail; cleaning removes from the front */
150 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 		atomic_inc(&n->tbl->gc_entries);
152 	}
153 
154 out:
155 	write_unlock(&n->lock);
156 	write_unlock_bh(&n->tbl->lock);
157 }
158 
neigh_update_ext_learned(struct neighbour * neigh,u32 flags,int * notify)159 static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
160 				     int *notify)
161 {
162 	bool rc = false;
163 	u8 ndm_flags;
164 
165 	if (!(flags & NEIGH_UPDATE_F_ADMIN))
166 		return rc;
167 
168 	ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
169 	if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
170 		if (ndm_flags & NTF_EXT_LEARNED)
171 			neigh->flags |= NTF_EXT_LEARNED;
172 		else
173 			neigh->flags &= ~NTF_EXT_LEARNED;
174 		rc = true;
175 		*notify = 1;
176 	}
177 
178 	return rc;
179 }
180 
neigh_del(struct neighbour * n,struct neighbour __rcu ** np,struct neigh_table * tbl)181 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
182 		      struct neigh_table *tbl)
183 {
184 	bool retval = false;
185 
186 	write_lock(&n->lock);
187 	if (refcount_read(&n->refcnt) == 1) {
188 		struct neighbour *neigh;
189 
190 		neigh = rcu_dereference_protected(n->next,
191 						  lockdep_is_held(&tbl->lock));
192 		rcu_assign_pointer(*np, neigh);
193 		neigh_mark_dead(n);
194 		retval = true;
195 	}
196 	write_unlock(&n->lock);
197 	if (retval)
198 		neigh_cleanup_and_release(n);
199 	return retval;
200 }
201 
neigh_remove_one(struct neighbour * ndel,struct neigh_table * tbl)202 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
203 {
204 	struct neigh_hash_table *nht;
205 	void *pkey = ndel->primary_key;
206 	u32 hash_val;
207 	struct neighbour *n;
208 	struct neighbour __rcu **np;
209 
210 	nht = rcu_dereference_protected(tbl->nht,
211 					lockdep_is_held(&tbl->lock));
212 	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
213 	hash_val = hash_val >> (32 - nht->hash_shift);
214 
215 	np = &nht->hash_buckets[hash_val];
216 	while ((n = rcu_dereference_protected(*np,
217 					      lockdep_is_held(&tbl->lock)))) {
218 		if (n == ndel)
219 			return neigh_del(n, np, tbl);
220 		np = &n->next;
221 	}
222 	return false;
223 }
224 
neigh_forced_gc(struct neigh_table * tbl)225 static int neigh_forced_gc(struct neigh_table *tbl)
226 {
227 	int max_clean = atomic_read(&tbl->gc_entries) -
228 			READ_ONCE(tbl->gc_thresh2);
229 	u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
230 	unsigned long tref = jiffies - 5 * HZ;
231 	struct neighbour *n, *tmp;
232 	int shrunk = 0;
233 	int loop = 0;
234 
235 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
236 
237 	write_lock_bh(&tbl->lock);
238 
239 	list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
240 		if (refcount_read(&n->refcnt) == 1) {
241 			bool remove = false;
242 
243 			write_lock(&n->lock);
244 			if ((n->nud_state == NUD_FAILED) ||
245 			    (n->nud_state == NUD_NOARP) ||
246 			    (tbl->is_multicast &&
247 			     tbl->is_multicast(n->primary_key)) ||
248 			    !time_in_range(n->updated, tref, jiffies))
249 				remove = true;
250 			write_unlock(&n->lock);
251 
252 			if (remove && neigh_remove_one(n, tbl))
253 				shrunk++;
254 			if (shrunk >= max_clean)
255 				break;
256 			if (++loop == 16) {
257 				if (ktime_get_ns() > tmax)
258 					goto unlock;
259 				loop = 0;
260 			}
261 		}
262 	}
263 
264 	WRITE_ONCE(tbl->last_flush, jiffies);
265 unlock:
266 	write_unlock_bh(&tbl->lock);
267 
268 	return shrunk;
269 }
270 
neigh_add_timer(struct neighbour * n,unsigned long when)271 static void neigh_add_timer(struct neighbour *n, unsigned long when)
272 {
273 	/* Use safe distance from the jiffies - LONG_MAX point while timer
274 	 * is running in DELAY/PROBE state but still show to user space
275 	 * large times in the past.
276 	 */
277 	unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
278 
279 	neigh_hold(n);
280 	if (!time_in_range(n->confirmed, mint, jiffies))
281 		n->confirmed = mint;
282 	if (time_before(n->used, n->confirmed))
283 		n->used = n->confirmed;
284 	if (unlikely(mod_timer(&n->timer, when))) {
285 		printk("NEIGH: BUG, double timer add, state is %x\n",
286 		       n->nud_state);
287 		dump_stack();
288 	}
289 }
290 
neigh_del_timer(struct neighbour * n)291 static int neigh_del_timer(struct neighbour *n)
292 {
293 	if ((n->nud_state & NUD_IN_TIMER) &&
294 	    del_timer(&n->timer)) {
295 		neigh_release(n);
296 		return 1;
297 	}
298 	return 0;
299 }
300 
pneigh_queue_purge(struct sk_buff_head * list,struct net * net)301 static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
302 {
303 	struct sk_buff_head tmp;
304 	unsigned long flags;
305 	struct sk_buff *skb;
306 
307 	skb_queue_head_init(&tmp);
308 	spin_lock_irqsave(&list->lock, flags);
309 	skb = skb_peek(list);
310 	while (skb != NULL) {
311 		struct sk_buff *skb_next = skb_peek_next(skb, list);
312 		if (net == NULL || net_eq(dev_net(skb->dev), net)) {
313 			__skb_unlink(skb, list);
314 			__skb_queue_tail(&tmp, skb);
315 		}
316 		skb = skb_next;
317 	}
318 	spin_unlock_irqrestore(&list->lock, flags);
319 
320 	while ((skb = __skb_dequeue(&tmp))) {
321 		dev_put(skb->dev);
322 		kfree_skb(skb);
323 	}
324 }
325 
neigh_flush_dev(struct neigh_table * tbl,struct net_device * dev,bool skip_perm)326 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
327 			    bool skip_perm)
328 {
329 	int i;
330 	struct neigh_hash_table *nht;
331 
332 	nht = rcu_dereference_protected(tbl->nht,
333 					lockdep_is_held(&tbl->lock));
334 
335 	for (i = 0; i < (1 << nht->hash_shift); i++) {
336 		struct neighbour *n;
337 		struct neighbour __rcu **np = &nht->hash_buckets[i];
338 
339 		while ((n = rcu_dereference_protected(*np,
340 					lockdep_is_held(&tbl->lock))) != NULL) {
341 			if (dev && n->dev != dev) {
342 				np = &n->next;
343 				continue;
344 			}
345 			if (skip_perm && n->nud_state & NUD_PERMANENT) {
346 				np = &n->next;
347 				continue;
348 			}
349 			rcu_assign_pointer(*np,
350 				   rcu_dereference_protected(n->next,
351 						lockdep_is_held(&tbl->lock)));
352 			write_lock(&n->lock);
353 			neigh_del_timer(n);
354 			neigh_mark_dead(n);
355 			if (refcount_read(&n->refcnt) != 1) {
356 				/* The most unpleasant situation.
357 				   We must destroy neighbour entry,
358 				   but someone still uses it.
359 
360 				   The destroy will be delayed until
361 				   the last user releases us, but
362 				   we must kill timers etc. and move
363 				   it to safe state.
364 				 */
365 				__skb_queue_purge(&n->arp_queue);
366 				n->arp_queue_len_bytes = 0;
367 				n->output = neigh_blackhole;
368 				if (n->nud_state & NUD_VALID)
369 					n->nud_state = NUD_NOARP;
370 				else
371 					n->nud_state = NUD_NONE;
372 				neigh_dbg(2, "neigh %p is stray\n", n);
373 			}
374 			write_unlock(&n->lock);
375 			neigh_cleanup_and_release(n);
376 		}
377 	}
378 }
379 
neigh_changeaddr(struct neigh_table * tbl,struct net_device * dev)380 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
381 {
382 	write_lock_bh(&tbl->lock);
383 	neigh_flush_dev(tbl, dev, false);
384 	write_unlock_bh(&tbl->lock);
385 }
386 EXPORT_SYMBOL(neigh_changeaddr);
387 
__neigh_ifdown(struct neigh_table * tbl,struct net_device * dev,bool skip_perm)388 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
389 			  bool skip_perm)
390 {
391 	write_lock_bh(&tbl->lock);
392 	neigh_flush_dev(tbl, dev, skip_perm);
393 	pneigh_ifdown_and_unlock(tbl, dev);
394 	pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL);
395 	if (skb_queue_empty_lockless(&tbl->proxy_queue))
396 		del_timer_sync(&tbl->proxy_timer);
397 	return 0;
398 }
399 
neigh_carrier_down(struct neigh_table * tbl,struct net_device * dev)400 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
401 {
402 	__neigh_ifdown(tbl, dev, true);
403 	return 0;
404 }
405 EXPORT_SYMBOL(neigh_carrier_down);
406 
neigh_ifdown(struct neigh_table * tbl,struct net_device * dev)407 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
408 {
409 	__neigh_ifdown(tbl, dev, false);
410 	return 0;
411 }
412 EXPORT_SYMBOL(neigh_ifdown);
413 
neigh_alloc(struct neigh_table * tbl,struct net_device * dev,u8 flags,bool exempt_from_gc)414 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
415 				     struct net_device *dev,
416 				     u8 flags, bool exempt_from_gc)
417 {
418 	struct neighbour *n = NULL;
419 	unsigned long now = jiffies;
420 	int entries, gc_thresh3;
421 
422 	if (exempt_from_gc)
423 		goto do_alloc;
424 
425 	entries = atomic_inc_return(&tbl->gc_entries) - 1;
426 	gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
427 	if (entries >= gc_thresh3 ||
428 	    (entries >= READ_ONCE(tbl->gc_thresh2) &&
429 	     time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
430 		if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
431 			net_info_ratelimited("%s: neighbor table overflow!\n",
432 					     tbl->id);
433 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
434 			goto out_entries;
435 		}
436 	}
437 
438 do_alloc:
439 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
440 	if (!n)
441 		goto out_entries;
442 
443 	__skb_queue_head_init(&n->arp_queue);
444 	rwlock_init(&n->lock);
445 	seqlock_init(&n->ha_lock);
446 	n->updated	  = n->used = now;
447 	n->nud_state	  = NUD_NONE;
448 	n->output	  = neigh_blackhole;
449 	n->flags	  = flags;
450 	seqlock_init(&n->hh.hh_lock);
451 	n->parms	  = neigh_parms_clone(&tbl->parms);
452 	timer_setup(&n->timer, neigh_timer_handler, 0);
453 
454 	NEIGH_CACHE_STAT_INC(tbl, allocs);
455 	n->tbl		  = tbl;
456 	refcount_set(&n->refcnt, 1);
457 	n->dead		  = 1;
458 	INIT_LIST_HEAD(&n->gc_list);
459 
460 	atomic_inc(&tbl->entries);
461 out:
462 	return n;
463 
464 out_entries:
465 	if (!exempt_from_gc)
466 		atomic_dec(&tbl->gc_entries);
467 	goto out;
468 }
469 
neigh_get_hash_rnd(u32 * x)470 static void neigh_get_hash_rnd(u32 *x)
471 {
472 	*x = get_random_u32() | 1;
473 }
474 
neigh_hash_alloc(unsigned int shift)475 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
476 {
477 	size_t size = (1 << shift) * sizeof(struct neighbour *);
478 	struct neigh_hash_table *ret;
479 	struct neighbour __rcu **buckets;
480 	int i;
481 
482 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
483 	if (!ret)
484 		return NULL;
485 	if (size <= PAGE_SIZE) {
486 		buckets = kzalloc(size, GFP_ATOMIC);
487 	} else {
488 		buckets = (struct neighbour __rcu **)
489 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
490 					   get_order(size));
491 		kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
492 	}
493 	if (!buckets) {
494 		kfree(ret);
495 		return NULL;
496 	}
497 	ret->hash_buckets = buckets;
498 	ret->hash_shift = shift;
499 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
500 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
501 	return ret;
502 }
503 
neigh_hash_free_rcu(struct rcu_head * head)504 static void neigh_hash_free_rcu(struct rcu_head *head)
505 {
506 	struct neigh_hash_table *nht = container_of(head,
507 						    struct neigh_hash_table,
508 						    rcu);
509 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
510 	struct neighbour __rcu **buckets = nht->hash_buckets;
511 
512 	if (size <= PAGE_SIZE) {
513 		kfree(buckets);
514 	} else {
515 		kmemleak_free(buckets);
516 		free_pages((unsigned long)buckets, get_order(size));
517 	}
518 	kfree(nht);
519 }
520 
neigh_hash_grow(struct neigh_table * tbl,unsigned long new_shift)521 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
522 						unsigned long new_shift)
523 {
524 	unsigned int i, hash;
525 	struct neigh_hash_table *new_nht, *old_nht;
526 
527 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
528 
529 	old_nht = rcu_dereference_protected(tbl->nht,
530 					    lockdep_is_held(&tbl->lock));
531 	new_nht = neigh_hash_alloc(new_shift);
532 	if (!new_nht)
533 		return old_nht;
534 
535 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
536 		struct neighbour *n, *next;
537 
538 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
539 						   lockdep_is_held(&tbl->lock));
540 		     n != NULL;
541 		     n = next) {
542 			hash = tbl->hash(n->primary_key, n->dev,
543 					 new_nht->hash_rnd);
544 
545 			hash >>= (32 - new_nht->hash_shift);
546 			next = rcu_dereference_protected(n->next,
547 						lockdep_is_held(&tbl->lock));
548 
549 			rcu_assign_pointer(n->next,
550 					   rcu_dereference_protected(
551 						new_nht->hash_buckets[hash],
552 						lockdep_is_held(&tbl->lock)));
553 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
554 		}
555 	}
556 
557 	rcu_assign_pointer(tbl->nht, new_nht);
558 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
559 	return new_nht;
560 }
561 
neigh_lookup(struct neigh_table * tbl,const void * pkey,struct net_device * dev)562 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
563 			       struct net_device *dev)
564 {
565 	struct neighbour *n;
566 
567 	NEIGH_CACHE_STAT_INC(tbl, lookups);
568 
569 	rcu_read_lock_bh();
570 	n = __neigh_lookup_noref(tbl, pkey, dev);
571 	if (n) {
572 		if (!refcount_inc_not_zero(&n->refcnt))
573 			n = NULL;
574 		NEIGH_CACHE_STAT_INC(tbl, hits);
575 	}
576 
577 	rcu_read_unlock_bh();
578 	return n;
579 }
580 EXPORT_SYMBOL(neigh_lookup);
581 
582 static struct neighbour *
___neigh_create(struct neigh_table * tbl,const void * pkey,struct net_device * dev,u8 flags,bool exempt_from_gc,bool want_ref)583 ___neigh_create(struct neigh_table *tbl, const void *pkey,
584 		struct net_device *dev, u8 flags,
585 		bool exempt_from_gc, bool want_ref)
586 {
587 	u32 hash_val, key_len = tbl->key_len;
588 	struct neighbour *n1, *rc, *n;
589 	struct neigh_hash_table *nht;
590 	int error;
591 
592 	n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
593 	trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
594 	if (!n) {
595 		rc = ERR_PTR(-ENOBUFS);
596 		goto out;
597 	}
598 
599 	memcpy(n->primary_key, pkey, key_len);
600 	n->dev = dev;
601 	dev_hold(dev);
602 
603 	/* Protocol specific setup. */
604 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
605 		rc = ERR_PTR(error);
606 		goto out_neigh_release;
607 	}
608 
609 	if (dev->netdev_ops->ndo_neigh_construct) {
610 		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
611 		if (error < 0) {
612 			rc = ERR_PTR(error);
613 			goto out_neigh_release;
614 		}
615 	}
616 
617 	/* Device specific setup. */
618 	if (n->parms->neigh_setup &&
619 	    (error = n->parms->neigh_setup(n)) < 0) {
620 		rc = ERR_PTR(error);
621 		goto out_neigh_release;
622 	}
623 
624 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
625 
626 	write_lock_bh(&tbl->lock);
627 	nht = rcu_dereference_protected(tbl->nht,
628 					lockdep_is_held(&tbl->lock));
629 
630 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
631 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
632 
633 	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
634 
635 	if (n->parms->dead) {
636 		rc = ERR_PTR(-EINVAL);
637 		goto out_tbl_unlock;
638 	}
639 
640 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
641 					    lockdep_is_held(&tbl->lock));
642 	     n1 != NULL;
643 	     n1 = rcu_dereference_protected(n1->next,
644 			lockdep_is_held(&tbl->lock))) {
645 		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
646 			if (want_ref)
647 				neigh_hold(n1);
648 			rc = n1;
649 			goto out_tbl_unlock;
650 		}
651 	}
652 
653 	n->dead = 0;
654 	if (!exempt_from_gc)
655 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
656 
657 	if (want_ref)
658 		neigh_hold(n);
659 	rcu_assign_pointer(n->next,
660 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
661 						     lockdep_is_held(&tbl->lock)));
662 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
663 	write_unlock_bh(&tbl->lock);
664 	neigh_dbg(2, "neigh %p is created\n", n);
665 	rc = n;
666 out:
667 	return rc;
668 out_tbl_unlock:
669 	write_unlock_bh(&tbl->lock);
670 out_neigh_release:
671 	if (!exempt_from_gc)
672 		atomic_dec(&tbl->gc_entries);
673 	neigh_release(n);
674 	goto out;
675 }
676 
__neigh_create(struct neigh_table * tbl,const void * pkey,struct net_device * dev,bool want_ref)677 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
678 				 struct net_device *dev, bool want_ref)
679 {
680 	return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
681 }
682 EXPORT_SYMBOL(__neigh_create);
683 
pneigh_hash(const void * pkey,unsigned int key_len)684 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
685 {
686 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
687 	hash_val ^= (hash_val >> 16);
688 	hash_val ^= hash_val >> 8;
689 	hash_val ^= hash_val >> 4;
690 	hash_val &= PNEIGH_HASHMASK;
691 	return hash_val;
692 }
693 
__pneigh_lookup_1(struct pneigh_entry * n,struct net * net,const void * pkey,unsigned int key_len,struct net_device * dev)694 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
695 					      struct net *net,
696 					      const void *pkey,
697 					      unsigned int key_len,
698 					      struct net_device *dev)
699 {
700 	while (n) {
701 		if (!memcmp(n->key, pkey, key_len) &&
702 		    net_eq(pneigh_net(n), net) &&
703 		    (n->dev == dev || !n->dev))
704 			return n;
705 		n = n->next;
706 	}
707 	return NULL;
708 }
709 
__pneigh_lookup(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev)710 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
711 		struct net *net, const void *pkey, struct net_device *dev)
712 {
713 	unsigned int key_len = tbl->key_len;
714 	u32 hash_val = pneigh_hash(pkey, key_len);
715 
716 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
717 				 net, pkey, key_len, dev);
718 }
719 EXPORT_SYMBOL_GPL(__pneigh_lookup);
720 
pneigh_lookup(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev,int creat)721 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
722 				    struct net *net, const void *pkey,
723 				    struct net_device *dev, int creat)
724 {
725 	struct pneigh_entry *n;
726 	unsigned int key_len = tbl->key_len;
727 	u32 hash_val = pneigh_hash(pkey, key_len);
728 
729 	read_lock_bh(&tbl->lock);
730 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
731 			      net, pkey, key_len, dev);
732 	read_unlock_bh(&tbl->lock);
733 
734 	if (n || !creat)
735 		goto out;
736 
737 	ASSERT_RTNL();
738 
739 	n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
740 	if (!n)
741 		goto out;
742 
743 	write_pnet(&n->net, net);
744 	memcpy(n->key, pkey, key_len);
745 	n->dev = dev;
746 	if (dev)
747 		dev_hold(dev);
748 
749 	if (tbl->pconstructor && tbl->pconstructor(n)) {
750 		if (dev)
751 			dev_put(dev);
752 		kfree(n);
753 		n = NULL;
754 		goto out;
755 	}
756 
757 	write_lock_bh(&tbl->lock);
758 	n->next = tbl->phash_buckets[hash_val];
759 	tbl->phash_buckets[hash_val] = n;
760 	write_unlock_bh(&tbl->lock);
761 out:
762 	return n;
763 }
764 EXPORT_SYMBOL(pneigh_lookup);
765 
766 
pneigh_delete(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev)767 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
768 		  struct net_device *dev)
769 {
770 	struct pneigh_entry *n, **np;
771 	unsigned int key_len = tbl->key_len;
772 	u32 hash_val = pneigh_hash(pkey, key_len);
773 
774 	write_lock_bh(&tbl->lock);
775 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
776 	     np = &n->next) {
777 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
778 		    net_eq(pneigh_net(n), net)) {
779 			*np = n->next;
780 			write_unlock_bh(&tbl->lock);
781 			if (tbl->pdestructor)
782 				tbl->pdestructor(n);
783 			if (n->dev)
784 				dev_put(n->dev);
785 			kfree(n);
786 			return 0;
787 		}
788 	}
789 	write_unlock_bh(&tbl->lock);
790 	return -ENOENT;
791 }
792 
pneigh_ifdown_and_unlock(struct neigh_table * tbl,struct net_device * dev)793 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
794 				    struct net_device *dev)
795 {
796 	struct pneigh_entry *n, **np, *freelist = NULL;
797 	u32 h;
798 
799 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
800 		np = &tbl->phash_buckets[h];
801 		while ((n = *np) != NULL) {
802 			if (!dev || n->dev == dev) {
803 				*np = n->next;
804 				n->next = freelist;
805 				freelist = n;
806 				continue;
807 			}
808 			np = &n->next;
809 		}
810 	}
811 	write_unlock_bh(&tbl->lock);
812 	while ((n = freelist)) {
813 		freelist = n->next;
814 		n->next = NULL;
815 		if (tbl->pdestructor)
816 			tbl->pdestructor(n);
817 		if (n->dev)
818 			dev_put(n->dev);
819 		kfree(n);
820 	}
821 	return -ENOENT;
822 }
823 
824 static void neigh_parms_destroy(struct neigh_parms *parms);
825 
neigh_parms_put(struct neigh_parms * parms)826 static inline void neigh_parms_put(struct neigh_parms *parms)
827 {
828 	if (refcount_dec_and_test(&parms->refcnt))
829 		neigh_parms_destroy(parms);
830 }
831 
832 /*
833  *	neighbour must already be out of the table;
834  *
835  */
neigh_destroy(struct neighbour * neigh)836 void neigh_destroy(struct neighbour *neigh)
837 {
838 	struct net_device *dev = neigh->dev;
839 
840 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
841 
842 	if (!neigh->dead) {
843 		pr_warn("Destroying alive neighbour %p\n", neigh);
844 		dump_stack();
845 		return;
846 	}
847 
848 	if (neigh_del_timer(neigh))
849 		pr_warn("Impossible event\n");
850 
851 	write_lock_bh(&neigh->lock);
852 	__skb_queue_purge(&neigh->arp_queue);
853 	write_unlock_bh(&neigh->lock);
854 	neigh->arp_queue_len_bytes = 0;
855 
856 	if (dev->netdev_ops->ndo_neigh_destroy)
857 		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
858 
859 	dev_put(dev);
860 	neigh_parms_put(neigh->parms);
861 
862 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
863 
864 	atomic_dec(&neigh->tbl->entries);
865 	kfree_rcu(neigh, rcu);
866 }
867 EXPORT_SYMBOL(neigh_destroy);
868 
869 /* Neighbour state is suspicious;
870    disable fast path.
871 
872    Called with write_locked neigh.
873  */
neigh_suspect(struct neighbour * neigh)874 static void neigh_suspect(struct neighbour *neigh)
875 {
876 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
877 
878 	neigh->output = neigh->ops->output;
879 }
880 
881 /* Neighbour state is OK;
882    enable fast path.
883 
884    Called with write_locked neigh.
885  */
neigh_connect(struct neighbour * neigh)886 static void neigh_connect(struct neighbour *neigh)
887 {
888 	neigh_dbg(2, "neigh %p is connected\n", neigh);
889 
890 	neigh->output = neigh->ops->connected_output;
891 }
892 
neigh_periodic_work(struct work_struct * work)893 static void neigh_periodic_work(struct work_struct *work)
894 {
895 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
896 	struct neighbour *n;
897 	struct neighbour __rcu **np;
898 	unsigned int i;
899 	struct neigh_hash_table *nht;
900 
901 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
902 
903 	write_lock_bh(&tbl->lock);
904 	nht = rcu_dereference_protected(tbl->nht,
905 					lockdep_is_held(&tbl->lock));
906 
907 	/*
908 	 *	periodically recompute ReachableTime from random function
909 	 */
910 
911 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
912 		struct neigh_parms *p;
913 
914 		WRITE_ONCE(tbl->last_rand, jiffies);
915 		list_for_each_entry(p, &tbl->parms_list, list)
916 			p->reachable_time =
917 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
918 	}
919 
920 	if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
921 		goto out;
922 
923 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
924 		np = &nht->hash_buckets[i];
925 
926 		while ((n = rcu_dereference_protected(*np,
927 				lockdep_is_held(&tbl->lock))) != NULL) {
928 			unsigned int state;
929 
930 			write_lock(&n->lock);
931 
932 			state = n->nud_state;
933 			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
934 			    (n->flags & NTF_EXT_LEARNED)) {
935 				write_unlock(&n->lock);
936 				goto next_elt;
937 			}
938 
939 			if (time_before(n->used, n->confirmed) &&
940 			    time_is_before_eq_jiffies(n->confirmed))
941 				n->used = n->confirmed;
942 
943 			if (refcount_read(&n->refcnt) == 1 &&
944 			    (state == NUD_FAILED ||
945 			     !time_in_range_open(jiffies, n->used,
946 						 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
947 				rcu_assign_pointer(*np,
948 					rcu_dereference_protected(n->next,
949 						lockdep_is_held(&tbl->lock)));
950 				neigh_mark_dead(n);
951 				write_unlock(&n->lock);
952 				neigh_cleanup_and_release(n);
953 				continue;
954 			}
955 			write_unlock(&n->lock);
956 
957 next_elt:
958 			np = &n->next;
959 		}
960 		/*
961 		 * It's fine to release lock here, even if hash table
962 		 * grows while we are preempted.
963 		 */
964 		write_unlock_bh(&tbl->lock);
965 		cond_resched();
966 		write_lock_bh(&tbl->lock);
967 		nht = rcu_dereference_protected(tbl->nht,
968 						lockdep_is_held(&tbl->lock));
969 	}
970 out:
971 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
972 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
973 	 * BASE_REACHABLE_TIME.
974 	 */
975 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
976 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
977 	write_unlock_bh(&tbl->lock);
978 }
979 
neigh_max_probes(struct neighbour * n)980 static __inline__ int neigh_max_probes(struct neighbour *n)
981 {
982 	struct neigh_parms *p = n->parms;
983 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
984 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
985 	        NEIGH_VAR(p, MCAST_PROBES));
986 }
987 
neigh_invalidate(struct neighbour * neigh)988 static void neigh_invalidate(struct neighbour *neigh)
989 	__releases(neigh->lock)
990 	__acquires(neigh->lock)
991 {
992 	struct sk_buff *skb;
993 
994 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
995 	neigh_dbg(2, "neigh %p is failed\n", neigh);
996 	neigh->updated = jiffies;
997 
998 	/* It is very thin place. report_unreachable is very complicated
999 	   routine. Particularly, it can hit the same neighbour entry!
1000 
1001 	   So that, we try to be accurate and avoid dead loop. --ANK
1002 	 */
1003 	while (neigh->nud_state == NUD_FAILED &&
1004 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1005 		write_unlock(&neigh->lock);
1006 		neigh->ops->error_report(neigh, skb);
1007 		write_lock(&neigh->lock);
1008 	}
1009 	__skb_queue_purge(&neigh->arp_queue);
1010 	neigh->arp_queue_len_bytes = 0;
1011 }
1012 
neigh_probe(struct neighbour * neigh)1013 static void neigh_probe(struct neighbour *neigh)
1014 	__releases(neigh->lock)
1015 {
1016 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1017 	/* keep skb alive even if arp_queue overflows */
1018 	if (skb)
1019 		skb = skb_clone(skb, GFP_ATOMIC);
1020 	write_unlock(&neigh->lock);
1021 	if (neigh->ops->solicit)
1022 		neigh->ops->solicit(neigh, skb);
1023 	atomic_inc(&neigh->probes);
1024 	consume_skb(skb);
1025 }
1026 
1027 /* Called when a timer expires for a neighbour entry. */
1028 
neigh_timer_handler(struct timer_list * t)1029 static void neigh_timer_handler(struct timer_list *t)
1030 {
1031 	unsigned long now, next;
1032 	struct neighbour *neigh = from_timer(neigh, t, timer);
1033 	unsigned int state;
1034 	int notify = 0;
1035 
1036 	write_lock(&neigh->lock);
1037 
1038 	state = neigh->nud_state;
1039 	now = jiffies;
1040 	next = now + HZ;
1041 
1042 	if (!(state & NUD_IN_TIMER))
1043 		goto out;
1044 
1045 	if (state & NUD_REACHABLE) {
1046 		if (time_before_eq(now,
1047 				   neigh->confirmed + neigh->parms->reachable_time)) {
1048 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
1049 			next = neigh->confirmed + neigh->parms->reachable_time;
1050 		} else if (time_before_eq(now,
1051 					  neigh->used +
1052 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1053 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
1054 			neigh->nud_state = NUD_DELAY;
1055 			neigh->updated = jiffies;
1056 			neigh_suspect(neigh);
1057 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1058 		} else {
1059 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
1060 			neigh->nud_state = NUD_STALE;
1061 			neigh->updated = jiffies;
1062 			neigh_suspect(neigh);
1063 			notify = 1;
1064 		}
1065 	} else if (state & NUD_DELAY) {
1066 		if (time_before_eq(now,
1067 				   neigh->confirmed +
1068 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1069 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1070 			neigh->nud_state = NUD_REACHABLE;
1071 			neigh->updated = jiffies;
1072 			neigh_connect(neigh);
1073 			notify = 1;
1074 			next = neigh->confirmed + neigh->parms->reachable_time;
1075 		} else {
1076 			neigh_dbg(2, "neigh %p is probed\n", neigh);
1077 			neigh->nud_state = NUD_PROBE;
1078 			neigh->updated = jiffies;
1079 			atomic_set(&neigh->probes, 0);
1080 			notify = 1;
1081 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1082 					 HZ/100);
1083 		}
1084 	} else {
1085 		/* NUD_PROBE|NUD_INCOMPLETE */
1086 		next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1087 	}
1088 
1089 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1090 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1091 		neigh->nud_state = NUD_FAILED;
1092 		notify = 1;
1093 		neigh_invalidate(neigh);
1094 		goto out;
1095 	}
1096 
1097 	if (neigh->nud_state & NUD_IN_TIMER) {
1098 		if (time_before(next, jiffies + HZ/100))
1099 			next = jiffies + HZ/100;
1100 		if (!mod_timer(&neigh->timer, next))
1101 			neigh_hold(neigh);
1102 	}
1103 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1104 		neigh_probe(neigh);
1105 	} else {
1106 out:
1107 		write_unlock(&neigh->lock);
1108 	}
1109 
1110 	if (notify)
1111 		neigh_update_notify(neigh, 0);
1112 
1113 	trace_neigh_timer_handler(neigh, 0);
1114 
1115 	neigh_release(neigh);
1116 }
1117 
__neigh_event_send(struct neighbour * neigh,struct sk_buff * skb)1118 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1119 {
1120 	int rc;
1121 	bool immediate_probe = false;
1122 
1123 	write_lock_bh(&neigh->lock);
1124 
1125 	rc = 0;
1126 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1127 		goto out_unlock_bh;
1128 	if (neigh->dead)
1129 		goto out_dead;
1130 
1131 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1132 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1133 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1134 			unsigned long next, now = jiffies;
1135 
1136 			atomic_set(&neigh->probes,
1137 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1138 			neigh_del_timer(neigh);
1139 			neigh->nud_state     = NUD_INCOMPLETE;
1140 			neigh->updated = now;
1141 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1142 					 HZ/100);
1143 			neigh_add_timer(neigh, next);
1144 			immediate_probe = true;
1145 		} else {
1146 			neigh->nud_state = NUD_FAILED;
1147 			neigh->updated = jiffies;
1148 			write_unlock_bh(&neigh->lock);
1149 
1150 			kfree_skb(skb);
1151 			return 1;
1152 		}
1153 	} else if (neigh->nud_state & NUD_STALE) {
1154 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1155 		neigh_del_timer(neigh);
1156 		neigh->nud_state = NUD_DELAY;
1157 		neigh->updated = jiffies;
1158 		neigh_add_timer(neigh, jiffies +
1159 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1160 	}
1161 
1162 	if (neigh->nud_state == NUD_INCOMPLETE) {
1163 		if (skb) {
1164 			while (neigh->arp_queue_len_bytes + skb->truesize >
1165 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1166 				struct sk_buff *buff;
1167 
1168 				buff = __skb_dequeue(&neigh->arp_queue);
1169 				if (!buff)
1170 					break;
1171 				neigh->arp_queue_len_bytes -= buff->truesize;
1172 				kfree_skb(buff);
1173 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1174 			}
1175 			skb_dst_force(skb);
1176 			__skb_queue_tail(&neigh->arp_queue, skb);
1177 			neigh->arp_queue_len_bytes += skb->truesize;
1178 		}
1179 		rc = 1;
1180 	}
1181 out_unlock_bh:
1182 	if (immediate_probe)
1183 		neigh_probe(neigh);
1184 	else
1185 		write_unlock(&neigh->lock);
1186 	local_bh_enable();
1187 	trace_neigh_event_send_done(neigh, rc);
1188 	return rc;
1189 
1190 out_dead:
1191 	if (neigh->nud_state & NUD_STALE)
1192 		goto out_unlock_bh;
1193 	write_unlock_bh(&neigh->lock);
1194 	kfree_skb(skb);
1195 	trace_neigh_event_send_dead(neigh, 1);
1196 	return 1;
1197 }
1198 EXPORT_SYMBOL(__neigh_event_send);
1199 
neigh_update_hhs(struct neighbour * neigh)1200 static void neigh_update_hhs(struct neighbour *neigh)
1201 {
1202 	struct hh_cache *hh;
1203 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1204 		= NULL;
1205 
1206 	if (neigh->dev->header_ops)
1207 		update = neigh->dev->header_ops->cache_update;
1208 
1209 	if (update) {
1210 		hh = &neigh->hh;
1211 		if (READ_ONCE(hh->hh_len)) {
1212 			write_seqlock_bh(&hh->hh_lock);
1213 			update(hh, neigh->dev, neigh->ha);
1214 			write_sequnlock_bh(&hh->hh_lock);
1215 		}
1216 	}
1217 }
1218 
1219 
1220 
1221 /* Generic update routine.
1222    -- lladdr is new lladdr or NULL, if it is not supplied.
1223    -- new    is new state.
1224    -- flags
1225 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1226 				if it is different.
1227 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1228 				lladdr instead of overriding it
1229 				if it is different.
1230 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1231 	NEIGH_UPDATE_F_USE	means that the entry is user triggered.
1232 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1233 				NTF_ROUTER flag.
1234 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1235 				a router.
1236 
1237    Caller MUST hold reference count on the entry.
1238  */
1239 
__neigh_update(struct neighbour * neigh,const u8 * lladdr,u8 new,u32 flags,u32 nlmsg_pid,struct netlink_ext_ack * extack)1240 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1241 			  u8 new, u32 flags, u32 nlmsg_pid,
1242 			  struct netlink_ext_ack *extack)
1243 {
1244 	bool ext_learn_change = false;
1245 	u8 old;
1246 	int err;
1247 	int notify = 0;
1248 	struct net_device *dev;
1249 	int update_isrouter = 0;
1250 
1251 	trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1252 
1253 	write_lock_bh(&neigh->lock);
1254 
1255 	dev    = neigh->dev;
1256 	old    = neigh->nud_state;
1257 	err    = -EPERM;
1258 
1259 	if (neigh->dead) {
1260 		NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1261 		new = old;
1262 		goto out;
1263 	}
1264 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1265 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1266 		goto out;
1267 
1268 	ext_learn_change = neigh_update_ext_learned(neigh, flags, &notify);
1269 	if (flags & NEIGH_UPDATE_F_USE) {
1270 		new = old & ~NUD_PERMANENT;
1271 		neigh->nud_state = new;
1272 		err = 0;
1273 		goto out;
1274 	}
1275 
1276 	if (!(new & NUD_VALID)) {
1277 		neigh_del_timer(neigh);
1278 		if (old & NUD_CONNECTED)
1279 			neigh_suspect(neigh);
1280 		neigh->nud_state = new;
1281 		err = 0;
1282 		notify = old & NUD_VALID;
1283 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1284 		    (new & NUD_FAILED)) {
1285 			neigh_invalidate(neigh);
1286 			notify = 1;
1287 		}
1288 		goto out;
1289 	}
1290 
1291 	/* Compare new lladdr with cached one */
1292 	if (!dev->addr_len) {
1293 		/* First case: device needs no address. */
1294 		lladdr = neigh->ha;
1295 	} else if (lladdr) {
1296 		/* The second case: if something is already cached
1297 		   and a new address is proposed:
1298 		   - compare new & old
1299 		   - if they are different, check override flag
1300 		 */
1301 		if ((old & NUD_VALID) &&
1302 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1303 			lladdr = neigh->ha;
1304 	} else {
1305 		/* No address is supplied; if we know something,
1306 		   use it, otherwise discard the request.
1307 		 */
1308 		err = -EINVAL;
1309 		if (!(old & NUD_VALID)) {
1310 			NL_SET_ERR_MSG(extack, "No link layer address given");
1311 			goto out;
1312 		}
1313 		lladdr = neigh->ha;
1314 	}
1315 
1316 	/* Update confirmed timestamp for neighbour entry after we
1317 	 * received ARP packet even if it doesn't change IP to MAC binding.
1318 	 */
1319 	if (new & NUD_CONNECTED)
1320 		neigh->confirmed = jiffies;
1321 
1322 	/* If entry was valid and address is not changed,
1323 	   do not change entry state, if new one is STALE.
1324 	 */
1325 	err = 0;
1326 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1327 	if (old & NUD_VALID) {
1328 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1329 			update_isrouter = 0;
1330 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1331 			    (old & NUD_CONNECTED)) {
1332 				lladdr = neigh->ha;
1333 				new = NUD_STALE;
1334 			} else
1335 				goto out;
1336 		} else {
1337 			if (lladdr == neigh->ha && new == NUD_STALE &&
1338 			    !(flags & NEIGH_UPDATE_F_ADMIN))
1339 				new = old;
1340 		}
1341 	}
1342 
1343 	/* Update timestamp only once we know we will make a change to the
1344 	 * neighbour entry. Otherwise we risk to move the locktime window with
1345 	 * noop updates and ignore relevant ARP updates.
1346 	 */
1347 	if (new != old || lladdr != neigh->ha)
1348 		neigh->updated = jiffies;
1349 
1350 	if (new != old) {
1351 		neigh_del_timer(neigh);
1352 		if (new & NUD_PROBE)
1353 			atomic_set(&neigh->probes, 0);
1354 		if (new & NUD_IN_TIMER)
1355 			neigh_add_timer(neigh, (jiffies +
1356 						((new & NUD_REACHABLE) ?
1357 						 neigh->parms->reachable_time :
1358 						 0)));
1359 		neigh->nud_state = new;
1360 		notify = 1;
1361 	}
1362 
1363 	if (lladdr != neigh->ha) {
1364 		write_seqlock(&neigh->ha_lock);
1365 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1366 		write_sequnlock(&neigh->ha_lock);
1367 		neigh_update_hhs(neigh);
1368 		if (!(new & NUD_CONNECTED))
1369 			neigh->confirmed = jiffies -
1370 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1371 		notify = 1;
1372 	}
1373 	if (new == old)
1374 		goto out;
1375 	if (new & NUD_CONNECTED)
1376 		neigh_connect(neigh);
1377 	else
1378 		neigh_suspect(neigh);
1379 	if (!(old & NUD_VALID)) {
1380 		struct sk_buff *skb;
1381 
1382 		/* Again: avoid dead loop if something went wrong */
1383 
1384 		while (neigh->nud_state & NUD_VALID &&
1385 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1386 			struct dst_entry *dst = skb_dst(skb);
1387 			struct neighbour *n2, *n1 = neigh;
1388 			write_unlock_bh(&neigh->lock);
1389 
1390 			rcu_read_lock();
1391 
1392 			/* Why not just use 'neigh' as-is?  The problem is that
1393 			 * things such as shaper, eql, and sch_teql can end up
1394 			 * using alternative, different, neigh objects to output
1395 			 * the packet in the output path.  So what we need to do
1396 			 * here is re-lookup the top-level neigh in the path so
1397 			 * we can reinject the packet there.
1398 			 */
1399 			n2 = NULL;
1400 			if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1401 				n2 = dst_neigh_lookup_skb(dst, skb);
1402 				if (n2)
1403 					n1 = n2;
1404 			}
1405 			n1->output(n1, skb);
1406 			if (n2)
1407 				neigh_release(n2);
1408 			rcu_read_unlock();
1409 
1410 			write_lock_bh(&neigh->lock);
1411 		}
1412 		__skb_queue_purge(&neigh->arp_queue);
1413 		neigh->arp_queue_len_bytes = 0;
1414 	}
1415 out:
1416 	if (update_isrouter)
1417 		neigh_update_is_router(neigh, flags, &notify);
1418 	write_unlock_bh(&neigh->lock);
1419 
1420 	if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
1421 		neigh_update_gc_list(neigh);
1422 
1423 	if (notify)
1424 		neigh_update_notify(neigh, nlmsg_pid);
1425 
1426 	trace_neigh_update_done(neigh, err);
1427 
1428 	return err;
1429 }
1430 
neigh_update(struct neighbour * neigh,const u8 * lladdr,u8 new,u32 flags,u32 nlmsg_pid)1431 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1432 		 u32 flags, u32 nlmsg_pid)
1433 {
1434 	return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1435 }
1436 EXPORT_SYMBOL(neigh_update);
1437 
1438 /* Update the neigh to listen temporarily for probe responses, even if it is
1439  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1440  */
__neigh_set_probe_once(struct neighbour * neigh)1441 void __neigh_set_probe_once(struct neighbour *neigh)
1442 {
1443 	if (neigh->dead)
1444 		return;
1445 	neigh->updated = jiffies;
1446 	if (!(neigh->nud_state & NUD_FAILED))
1447 		return;
1448 	neigh->nud_state = NUD_INCOMPLETE;
1449 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1450 	neigh_add_timer(neigh,
1451 			jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1452 				      HZ/100));
1453 }
1454 EXPORT_SYMBOL(__neigh_set_probe_once);
1455 
neigh_event_ns(struct neigh_table * tbl,u8 * lladdr,void * saddr,struct net_device * dev)1456 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1457 				 u8 *lladdr, void *saddr,
1458 				 struct net_device *dev)
1459 {
1460 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1461 						 lladdr || !dev->addr_len);
1462 	if (neigh)
1463 		neigh_update(neigh, lladdr, NUD_STALE,
1464 			     NEIGH_UPDATE_F_OVERRIDE, 0);
1465 	return neigh;
1466 }
1467 EXPORT_SYMBOL(neigh_event_ns);
1468 
1469 /* called with read_lock_bh(&n->lock); */
neigh_hh_init(struct neighbour * n)1470 static void neigh_hh_init(struct neighbour *n)
1471 {
1472 	struct net_device *dev = n->dev;
1473 	__be16 prot = n->tbl->protocol;
1474 	struct hh_cache	*hh = &n->hh;
1475 
1476 	write_lock_bh(&n->lock);
1477 
1478 	/* Only one thread can come in here and initialize the
1479 	 * hh_cache entry.
1480 	 */
1481 	if (!hh->hh_len)
1482 		dev->header_ops->cache(n, hh, prot);
1483 
1484 	write_unlock_bh(&n->lock);
1485 }
1486 
1487 /* Slow and careful. */
1488 
neigh_resolve_output(struct neighbour * neigh,struct sk_buff * skb)1489 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1490 {
1491 	int rc = 0;
1492 
1493 	if (!neigh_event_send(neigh, skb)) {
1494 		int err;
1495 		struct net_device *dev = neigh->dev;
1496 		unsigned int seq;
1497 
1498 		if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1499 			neigh_hh_init(neigh);
1500 
1501 		do {
1502 			__skb_pull(skb, skb_network_offset(skb));
1503 			seq = read_seqbegin(&neigh->ha_lock);
1504 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1505 					      neigh->ha, NULL, skb->len);
1506 		} while (read_seqretry(&neigh->ha_lock, seq));
1507 
1508 		if (err >= 0)
1509 			rc = dev_queue_xmit(skb);
1510 		else
1511 			goto out_kfree_skb;
1512 	}
1513 out:
1514 	return rc;
1515 out_kfree_skb:
1516 	rc = -EINVAL;
1517 	kfree_skb(skb);
1518 	goto out;
1519 }
1520 EXPORT_SYMBOL(neigh_resolve_output);
1521 
1522 /* As fast as possible without hh cache */
1523 
neigh_connected_output(struct neighbour * neigh,struct sk_buff * skb)1524 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1525 {
1526 	struct net_device *dev = neigh->dev;
1527 	unsigned int seq;
1528 	int err;
1529 
1530 	do {
1531 		__skb_pull(skb, skb_network_offset(skb));
1532 		seq = read_seqbegin(&neigh->ha_lock);
1533 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1534 				      neigh->ha, NULL, skb->len);
1535 	} while (read_seqretry(&neigh->ha_lock, seq));
1536 
1537 	if (err >= 0)
1538 		err = dev_queue_xmit(skb);
1539 	else {
1540 		err = -EINVAL;
1541 		kfree_skb(skb);
1542 	}
1543 	return err;
1544 }
1545 EXPORT_SYMBOL(neigh_connected_output);
1546 
neigh_direct_output(struct neighbour * neigh,struct sk_buff * skb)1547 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1548 {
1549 	return dev_queue_xmit(skb);
1550 }
1551 EXPORT_SYMBOL(neigh_direct_output);
1552 
neigh_proxy_process(struct timer_list * t)1553 static void neigh_proxy_process(struct timer_list *t)
1554 {
1555 	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1556 	long sched_next = 0;
1557 	unsigned long now = jiffies;
1558 	struct sk_buff *skb, *n;
1559 
1560 	spin_lock(&tbl->proxy_queue.lock);
1561 
1562 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1563 		long tdif = NEIGH_CB(skb)->sched_next - now;
1564 
1565 		if (tdif <= 0) {
1566 			struct net_device *dev = skb->dev;
1567 
1568 			__skb_unlink(skb, &tbl->proxy_queue);
1569 			if (tbl->proxy_redo && netif_running(dev)) {
1570 				rcu_read_lock();
1571 				tbl->proxy_redo(skb);
1572 				rcu_read_unlock();
1573 			} else {
1574 				kfree_skb(skb);
1575 			}
1576 
1577 			dev_put(dev);
1578 		} else if (!sched_next || tdif < sched_next)
1579 			sched_next = tdif;
1580 	}
1581 	del_timer(&tbl->proxy_timer);
1582 	if (sched_next)
1583 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1584 	spin_unlock(&tbl->proxy_queue.lock);
1585 }
1586 
pneigh_enqueue(struct neigh_table * tbl,struct neigh_parms * p,struct sk_buff * skb)1587 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1588 		    struct sk_buff *skb)
1589 {
1590 	unsigned long now = jiffies;
1591 
1592 	unsigned long sched_next = now + (prandom_u32() %
1593 					  NEIGH_VAR(p, PROXY_DELAY));
1594 
1595 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1596 		kfree_skb(skb);
1597 		return;
1598 	}
1599 
1600 	NEIGH_CB(skb)->sched_next = sched_next;
1601 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1602 
1603 	spin_lock(&tbl->proxy_queue.lock);
1604 	if (del_timer(&tbl->proxy_timer)) {
1605 		if (time_before(tbl->proxy_timer.expires, sched_next))
1606 			sched_next = tbl->proxy_timer.expires;
1607 	}
1608 	skb_dst_drop(skb);
1609 	dev_hold(skb->dev);
1610 	__skb_queue_tail(&tbl->proxy_queue, skb);
1611 	mod_timer(&tbl->proxy_timer, sched_next);
1612 	spin_unlock(&tbl->proxy_queue.lock);
1613 }
1614 EXPORT_SYMBOL(pneigh_enqueue);
1615 
lookup_neigh_parms(struct neigh_table * tbl,struct net * net,int ifindex)1616 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1617 						      struct net *net, int ifindex)
1618 {
1619 	struct neigh_parms *p;
1620 
1621 	list_for_each_entry(p, &tbl->parms_list, list) {
1622 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1623 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1624 			return p;
1625 	}
1626 
1627 	return NULL;
1628 }
1629 
neigh_parms_alloc(struct net_device * dev,struct neigh_table * tbl)1630 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1631 				      struct neigh_table *tbl)
1632 {
1633 	struct neigh_parms *p;
1634 	struct net *net = dev_net(dev);
1635 	const struct net_device_ops *ops = dev->netdev_ops;
1636 
1637 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1638 	if (p) {
1639 		p->tbl		  = tbl;
1640 		refcount_set(&p->refcnt, 1);
1641 		p->reachable_time =
1642 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1643 		dev_hold(dev);
1644 		p->dev = dev;
1645 		write_pnet(&p->net, net);
1646 		p->sysctl_table = NULL;
1647 
1648 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1649 			dev_put(dev);
1650 			kfree(p);
1651 			return NULL;
1652 		}
1653 
1654 		write_lock_bh(&tbl->lock);
1655 		list_add(&p->list, &tbl->parms.list);
1656 		write_unlock_bh(&tbl->lock);
1657 
1658 		neigh_parms_data_state_cleanall(p);
1659 	}
1660 	return p;
1661 }
1662 EXPORT_SYMBOL(neigh_parms_alloc);
1663 
neigh_rcu_free_parms(struct rcu_head * head)1664 static void neigh_rcu_free_parms(struct rcu_head *head)
1665 {
1666 	struct neigh_parms *parms =
1667 		container_of(head, struct neigh_parms, rcu_head);
1668 
1669 	neigh_parms_put(parms);
1670 }
1671 
neigh_parms_release(struct neigh_table * tbl,struct neigh_parms * parms)1672 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1673 {
1674 	if (!parms || parms == &tbl->parms)
1675 		return;
1676 	write_lock_bh(&tbl->lock);
1677 	list_del(&parms->list);
1678 	parms->dead = 1;
1679 	write_unlock_bh(&tbl->lock);
1680 	if (parms->dev)
1681 		dev_put(parms->dev);
1682 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1683 }
1684 EXPORT_SYMBOL(neigh_parms_release);
1685 
neigh_parms_destroy(struct neigh_parms * parms)1686 static void neigh_parms_destroy(struct neigh_parms *parms)
1687 {
1688 	kfree(parms);
1689 }
1690 
1691 static struct lock_class_key neigh_table_proxy_queue_class;
1692 
1693 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1694 
neigh_table_init(int index,struct neigh_table * tbl)1695 void neigh_table_init(int index, struct neigh_table *tbl)
1696 {
1697 	unsigned long now = jiffies;
1698 	unsigned long phsize;
1699 
1700 	INIT_LIST_HEAD(&tbl->parms_list);
1701 	INIT_LIST_HEAD(&tbl->gc_list);
1702 	list_add(&tbl->parms.list, &tbl->parms_list);
1703 	write_pnet(&tbl->parms.net, &init_net);
1704 	refcount_set(&tbl->parms.refcnt, 1);
1705 	tbl->parms.reachable_time =
1706 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1707 
1708 	tbl->stats = alloc_percpu(struct neigh_statistics);
1709 	if (!tbl->stats)
1710 		panic("cannot create neighbour cache statistics");
1711 
1712 #ifdef CONFIG_PROC_FS
1713 	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1714 			      &neigh_stat_seq_ops, tbl))
1715 		panic("cannot create neighbour proc dir entry");
1716 #endif
1717 
1718 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1719 
1720 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1721 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1722 
1723 	if (!tbl->nht || !tbl->phash_buckets)
1724 		panic("cannot allocate neighbour cache hashes");
1725 
1726 	if (!tbl->entry_size)
1727 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1728 					tbl->key_len, NEIGH_PRIV_ALIGN);
1729 	else
1730 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1731 
1732 	rwlock_init(&tbl->lock);
1733 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1734 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1735 			tbl->parms.reachable_time);
1736 	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1737 	skb_queue_head_init_class(&tbl->proxy_queue,
1738 			&neigh_table_proxy_queue_class);
1739 
1740 	tbl->last_flush = now;
1741 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1742 
1743 	neigh_tables[index] = tbl;
1744 }
1745 EXPORT_SYMBOL(neigh_table_init);
1746 
neigh_table_clear(int index,struct neigh_table * tbl)1747 int neigh_table_clear(int index, struct neigh_table *tbl)
1748 {
1749 	neigh_tables[index] = NULL;
1750 	/* It is not clean... Fix it to unload IPv6 module safely */
1751 	cancel_delayed_work_sync(&tbl->gc_work);
1752 	del_timer_sync(&tbl->proxy_timer);
1753 	pneigh_queue_purge(&tbl->proxy_queue, NULL);
1754 	neigh_ifdown(tbl, NULL);
1755 	if (atomic_read(&tbl->entries))
1756 		pr_crit("neighbour leakage\n");
1757 
1758 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1759 		 neigh_hash_free_rcu);
1760 	tbl->nht = NULL;
1761 
1762 	kfree(tbl->phash_buckets);
1763 	tbl->phash_buckets = NULL;
1764 
1765 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1766 
1767 	free_percpu(tbl->stats);
1768 	tbl->stats = NULL;
1769 
1770 	return 0;
1771 }
1772 EXPORT_SYMBOL(neigh_table_clear);
1773 
neigh_find_table(int family)1774 static struct neigh_table *neigh_find_table(int family)
1775 {
1776 	struct neigh_table *tbl = NULL;
1777 
1778 	switch (family) {
1779 	case AF_INET:
1780 		tbl = neigh_tables[NEIGH_ARP_TABLE];
1781 		break;
1782 	case AF_INET6:
1783 		tbl = neigh_tables[NEIGH_ND_TABLE];
1784 		break;
1785 	}
1786 
1787 	return tbl;
1788 }
1789 
1790 const struct nla_policy nda_policy[NDA_MAX+1] = {
1791 	[NDA_UNSPEC]		= { .strict_start_type = NDA_NH_ID },
1792 	[NDA_DST]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1793 	[NDA_LLADDR]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1794 	[NDA_CACHEINFO]		= { .len = sizeof(struct nda_cacheinfo) },
1795 	[NDA_PROBES]		= { .type = NLA_U32 },
1796 	[NDA_VLAN]		= { .type = NLA_U16 },
1797 	[NDA_PORT]		= { .type = NLA_U16 },
1798 	[NDA_VNI]		= { .type = NLA_U32 },
1799 	[NDA_IFINDEX]		= { .type = NLA_U32 },
1800 	[NDA_MASTER]		= { .type = NLA_U32 },
1801 	[NDA_PROTOCOL]		= { .type = NLA_U8 },
1802 	[NDA_NH_ID]		= { .type = NLA_U32 },
1803 	[NDA_FDB_EXT_ATTRS]	= { .type = NLA_NESTED },
1804 };
1805 
neigh_delete(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1806 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1807 			struct netlink_ext_ack *extack)
1808 {
1809 	struct net *net = sock_net(skb->sk);
1810 	struct ndmsg *ndm;
1811 	struct nlattr *dst_attr;
1812 	struct neigh_table *tbl;
1813 	struct neighbour *neigh;
1814 	struct net_device *dev = NULL;
1815 	int err = -EINVAL;
1816 
1817 	ASSERT_RTNL();
1818 	if (nlmsg_len(nlh) < sizeof(*ndm))
1819 		goto out;
1820 
1821 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1822 	if (!dst_attr) {
1823 		NL_SET_ERR_MSG(extack, "Network address not specified");
1824 		goto out;
1825 	}
1826 
1827 	ndm = nlmsg_data(nlh);
1828 	if (ndm->ndm_ifindex) {
1829 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1830 		if (dev == NULL) {
1831 			err = -ENODEV;
1832 			goto out;
1833 		}
1834 	}
1835 
1836 	tbl = neigh_find_table(ndm->ndm_family);
1837 	if (tbl == NULL)
1838 		return -EAFNOSUPPORT;
1839 
1840 	if (nla_len(dst_attr) < (int)tbl->key_len) {
1841 		NL_SET_ERR_MSG(extack, "Invalid network address");
1842 		goto out;
1843 	}
1844 
1845 	if (ndm->ndm_flags & NTF_PROXY) {
1846 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1847 		goto out;
1848 	}
1849 
1850 	if (dev == NULL)
1851 		goto out;
1852 
1853 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1854 	if (neigh == NULL) {
1855 		err = -ENOENT;
1856 		goto out;
1857 	}
1858 
1859 	err = __neigh_update(neigh, NULL, NUD_FAILED,
1860 			     NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1861 			     NETLINK_CB(skb).portid, extack);
1862 	write_lock_bh(&tbl->lock);
1863 	neigh_release(neigh);
1864 	neigh_remove_one(neigh, tbl);
1865 	write_unlock_bh(&tbl->lock);
1866 
1867 out:
1868 	return err;
1869 }
1870 
neigh_add(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1871 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1872 		     struct netlink_ext_ack *extack)
1873 {
1874 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1875 		NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1876 	struct net *net = sock_net(skb->sk);
1877 	struct ndmsg *ndm;
1878 	struct nlattr *tb[NDA_MAX+1];
1879 	struct neigh_table *tbl;
1880 	struct net_device *dev = NULL;
1881 	struct neighbour *neigh;
1882 	void *dst, *lladdr;
1883 	u8 protocol = 0;
1884 	int err;
1885 
1886 	ASSERT_RTNL();
1887 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1888 				     nda_policy, extack);
1889 	if (err < 0)
1890 		goto out;
1891 
1892 	err = -EINVAL;
1893 	if (!tb[NDA_DST]) {
1894 		NL_SET_ERR_MSG(extack, "Network address not specified");
1895 		goto out;
1896 	}
1897 
1898 	ndm = nlmsg_data(nlh);
1899 	if (ndm->ndm_ifindex) {
1900 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1901 		if (dev == NULL) {
1902 			err = -ENODEV;
1903 			goto out;
1904 		}
1905 
1906 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1907 			NL_SET_ERR_MSG(extack, "Invalid link address");
1908 			goto out;
1909 		}
1910 	}
1911 
1912 	tbl = neigh_find_table(ndm->ndm_family);
1913 	if (tbl == NULL)
1914 		return -EAFNOSUPPORT;
1915 
1916 	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1917 		NL_SET_ERR_MSG(extack, "Invalid network address");
1918 		goto out;
1919 	}
1920 
1921 	dst = nla_data(tb[NDA_DST]);
1922 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1923 
1924 	if (tb[NDA_PROTOCOL])
1925 		protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1926 
1927 	if (ndm->ndm_flags & NTF_PROXY) {
1928 		struct pneigh_entry *pn;
1929 
1930 		err = -ENOBUFS;
1931 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1932 		if (pn) {
1933 			pn->flags = ndm->ndm_flags;
1934 			if (protocol)
1935 				pn->protocol = protocol;
1936 			err = 0;
1937 		}
1938 		goto out;
1939 	}
1940 
1941 	if (!dev) {
1942 		NL_SET_ERR_MSG(extack, "Device not specified");
1943 		goto out;
1944 	}
1945 
1946 	if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
1947 		err = -EINVAL;
1948 		goto out;
1949 	}
1950 
1951 	neigh = neigh_lookup(tbl, dst, dev);
1952 	if (neigh == NULL) {
1953 		bool exempt_from_gc;
1954 
1955 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1956 			err = -ENOENT;
1957 			goto out;
1958 		}
1959 
1960 		exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
1961 				 ndm->ndm_flags & NTF_EXT_LEARNED;
1962 		neigh = ___neigh_create(tbl, dst, dev,
1963 					ndm->ndm_flags & NTF_EXT_LEARNED,
1964 					exempt_from_gc, true);
1965 		if (IS_ERR(neigh)) {
1966 			err = PTR_ERR(neigh);
1967 			goto out;
1968 		}
1969 	} else {
1970 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1971 			err = -EEXIST;
1972 			neigh_release(neigh);
1973 			goto out;
1974 		}
1975 
1976 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1977 			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1978 				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
1979 	}
1980 
1981 	if (protocol)
1982 		neigh->protocol = protocol;
1983 	if (ndm->ndm_flags & NTF_EXT_LEARNED)
1984 		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1985 	if (ndm->ndm_flags & NTF_ROUTER)
1986 		flags |= NEIGH_UPDATE_F_ISROUTER;
1987 	if (ndm->ndm_flags & NTF_USE)
1988 		flags |= NEIGH_UPDATE_F_USE;
1989 
1990 	err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1991 			     NETLINK_CB(skb).portid, extack);
1992 	if (!err && ndm->ndm_flags & NTF_USE) {
1993 		neigh_event_send(neigh, NULL);
1994 		err = 0;
1995 	}
1996 	neigh_release(neigh);
1997 out:
1998 	return err;
1999 }
2000 
neightbl_fill_parms(struct sk_buff * skb,struct neigh_parms * parms)2001 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2002 {
2003 	struct nlattr *nest;
2004 
2005 	nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2006 	if (nest == NULL)
2007 		return -ENOBUFS;
2008 
2009 	if ((parms->dev &&
2010 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2011 	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2012 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2013 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2014 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
2015 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
2016 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2017 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2018 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2019 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
2020 			NEIGH_VAR(parms, UCAST_PROBES)) ||
2021 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
2022 			NEIGH_VAR(parms, MCAST_PROBES)) ||
2023 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2024 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
2025 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2026 			  NDTPA_PAD) ||
2027 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2028 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2029 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
2030 			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2031 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2032 			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2033 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2034 			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2035 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2036 			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2037 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2038 			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2039 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
2040 			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
2041 		goto nla_put_failure;
2042 	return nla_nest_end(skb, nest);
2043 
2044 nla_put_failure:
2045 	nla_nest_cancel(skb, nest);
2046 	return -EMSGSIZE;
2047 }
2048 
neightbl_fill_info(struct sk_buff * skb,struct neigh_table * tbl,u32 pid,u32 seq,int type,int flags)2049 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2050 			      u32 pid, u32 seq, int type, int flags)
2051 {
2052 	struct nlmsghdr *nlh;
2053 	struct ndtmsg *ndtmsg;
2054 
2055 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2056 	if (nlh == NULL)
2057 		return -EMSGSIZE;
2058 
2059 	ndtmsg = nlmsg_data(nlh);
2060 
2061 	read_lock_bh(&tbl->lock);
2062 	ndtmsg->ndtm_family = tbl->family;
2063 	ndtmsg->ndtm_pad1   = 0;
2064 	ndtmsg->ndtm_pad2   = 0;
2065 
2066 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2067 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
2068 			  NDTA_PAD) ||
2069 	    nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
2070 	    nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
2071 	    nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
2072 		goto nla_put_failure;
2073 	{
2074 		unsigned long now = jiffies;
2075 		long flush_delta = now - READ_ONCE(tbl->last_flush);
2076 		long rand_delta = now - READ_ONCE(tbl->last_rand);
2077 		struct neigh_hash_table *nht;
2078 		struct ndt_config ndc = {
2079 			.ndtc_key_len		= tbl->key_len,
2080 			.ndtc_entry_size	= tbl->entry_size,
2081 			.ndtc_entries		= atomic_read(&tbl->entries),
2082 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
2083 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
2084 			.ndtc_proxy_qlen	= READ_ONCE(tbl->proxy_queue.qlen),
2085 		};
2086 
2087 		rcu_read_lock_bh();
2088 		nht = rcu_dereference_bh(tbl->nht);
2089 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2090 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2091 		rcu_read_unlock_bh();
2092 
2093 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2094 			goto nla_put_failure;
2095 	}
2096 
2097 	{
2098 		int cpu;
2099 		struct ndt_stats ndst;
2100 
2101 		memset(&ndst, 0, sizeof(ndst));
2102 
2103 		for_each_possible_cpu(cpu) {
2104 			struct neigh_statistics	*st;
2105 
2106 			st = per_cpu_ptr(tbl->stats, cpu);
2107 			ndst.ndts_allocs		+= READ_ONCE(st->allocs);
2108 			ndst.ndts_destroys		+= READ_ONCE(st->destroys);
2109 			ndst.ndts_hash_grows		+= READ_ONCE(st->hash_grows);
2110 			ndst.ndts_res_failed		+= READ_ONCE(st->res_failed);
2111 			ndst.ndts_lookups		+= READ_ONCE(st->lookups);
2112 			ndst.ndts_hits			+= READ_ONCE(st->hits);
2113 			ndst.ndts_rcv_probes_mcast	+= READ_ONCE(st->rcv_probes_mcast);
2114 			ndst.ndts_rcv_probes_ucast	+= READ_ONCE(st->rcv_probes_ucast);
2115 			ndst.ndts_periodic_gc_runs	+= READ_ONCE(st->periodic_gc_runs);
2116 			ndst.ndts_forced_gc_runs	+= READ_ONCE(st->forced_gc_runs);
2117 			ndst.ndts_table_fulls		+= READ_ONCE(st->table_fulls);
2118 		}
2119 
2120 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2121 				  NDTA_PAD))
2122 			goto nla_put_failure;
2123 	}
2124 
2125 	BUG_ON(tbl->parms.dev);
2126 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2127 		goto nla_put_failure;
2128 
2129 	read_unlock_bh(&tbl->lock);
2130 	nlmsg_end(skb, nlh);
2131 	return 0;
2132 
2133 nla_put_failure:
2134 	read_unlock_bh(&tbl->lock);
2135 	nlmsg_cancel(skb, nlh);
2136 	return -EMSGSIZE;
2137 }
2138 
neightbl_fill_param_info(struct sk_buff * skb,struct neigh_table * tbl,struct neigh_parms * parms,u32 pid,u32 seq,int type,unsigned int flags)2139 static int neightbl_fill_param_info(struct sk_buff *skb,
2140 				    struct neigh_table *tbl,
2141 				    struct neigh_parms *parms,
2142 				    u32 pid, u32 seq, int type,
2143 				    unsigned int flags)
2144 {
2145 	struct ndtmsg *ndtmsg;
2146 	struct nlmsghdr *nlh;
2147 
2148 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2149 	if (nlh == NULL)
2150 		return -EMSGSIZE;
2151 
2152 	ndtmsg = nlmsg_data(nlh);
2153 
2154 	read_lock_bh(&tbl->lock);
2155 	ndtmsg->ndtm_family = tbl->family;
2156 	ndtmsg->ndtm_pad1   = 0;
2157 	ndtmsg->ndtm_pad2   = 0;
2158 
2159 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2160 	    neightbl_fill_parms(skb, parms) < 0)
2161 		goto errout;
2162 
2163 	read_unlock_bh(&tbl->lock);
2164 	nlmsg_end(skb, nlh);
2165 	return 0;
2166 errout:
2167 	read_unlock_bh(&tbl->lock);
2168 	nlmsg_cancel(skb, nlh);
2169 	return -EMSGSIZE;
2170 }
2171 
2172 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2173 	[NDTA_NAME]		= { .type = NLA_STRING },
2174 	[NDTA_THRESH1]		= { .type = NLA_U32 },
2175 	[NDTA_THRESH2]		= { .type = NLA_U32 },
2176 	[NDTA_THRESH3]		= { .type = NLA_U32 },
2177 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
2178 	[NDTA_PARMS]		= { .type = NLA_NESTED },
2179 };
2180 
2181 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2182 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
2183 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
2184 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
2185 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
2186 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
2187 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
2188 	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
2189 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
2190 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
2191 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2192 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2193 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2194 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2195 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
2196 };
2197 
neightbl_set(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2198 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2199 			struct netlink_ext_ack *extack)
2200 {
2201 	struct net *net = sock_net(skb->sk);
2202 	struct neigh_table *tbl;
2203 	struct ndtmsg *ndtmsg;
2204 	struct nlattr *tb[NDTA_MAX+1];
2205 	bool found = false;
2206 	int err, tidx;
2207 
2208 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2209 				     nl_neightbl_policy, extack);
2210 	if (err < 0)
2211 		goto errout;
2212 
2213 	if (tb[NDTA_NAME] == NULL) {
2214 		err = -EINVAL;
2215 		goto errout;
2216 	}
2217 
2218 	ndtmsg = nlmsg_data(nlh);
2219 
2220 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2221 		tbl = neigh_tables[tidx];
2222 		if (!tbl)
2223 			continue;
2224 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2225 			continue;
2226 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2227 			found = true;
2228 			break;
2229 		}
2230 	}
2231 
2232 	if (!found)
2233 		return -ENOENT;
2234 
2235 	/*
2236 	 * We acquire tbl->lock to be nice to the periodic timers and
2237 	 * make sure they always see a consistent set of values.
2238 	 */
2239 	write_lock_bh(&tbl->lock);
2240 
2241 	if (tb[NDTA_PARMS]) {
2242 		struct nlattr *tbp[NDTPA_MAX+1];
2243 		struct neigh_parms *p;
2244 		int i, ifindex = 0;
2245 
2246 		err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2247 						  tb[NDTA_PARMS],
2248 						  nl_ntbl_parm_policy, extack);
2249 		if (err < 0)
2250 			goto errout_tbl_lock;
2251 
2252 		if (tbp[NDTPA_IFINDEX])
2253 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2254 
2255 		p = lookup_neigh_parms(tbl, net, ifindex);
2256 		if (p == NULL) {
2257 			err = -ENOENT;
2258 			goto errout_tbl_lock;
2259 		}
2260 
2261 		for (i = 1; i <= NDTPA_MAX; i++) {
2262 			if (tbp[i] == NULL)
2263 				continue;
2264 
2265 			switch (i) {
2266 			case NDTPA_QUEUE_LEN:
2267 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2268 					      nla_get_u32(tbp[i]) *
2269 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2270 				break;
2271 			case NDTPA_QUEUE_LENBYTES:
2272 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2273 					      nla_get_u32(tbp[i]));
2274 				break;
2275 			case NDTPA_PROXY_QLEN:
2276 				NEIGH_VAR_SET(p, PROXY_QLEN,
2277 					      nla_get_u32(tbp[i]));
2278 				break;
2279 			case NDTPA_APP_PROBES:
2280 				NEIGH_VAR_SET(p, APP_PROBES,
2281 					      nla_get_u32(tbp[i]));
2282 				break;
2283 			case NDTPA_UCAST_PROBES:
2284 				NEIGH_VAR_SET(p, UCAST_PROBES,
2285 					      nla_get_u32(tbp[i]));
2286 				break;
2287 			case NDTPA_MCAST_PROBES:
2288 				NEIGH_VAR_SET(p, MCAST_PROBES,
2289 					      nla_get_u32(tbp[i]));
2290 				break;
2291 			case NDTPA_MCAST_REPROBES:
2292 				NEIGH_VAR_SET(p, MCAST_REPROBES,
2293 					      nla_get_u32(tbp[i]));
2294 				break;
2295 			case NDTPA_BASE_REACHABLE_TIME:
2296 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2297 					      nla_get_msecs(tbp[i]));
2298 				/* update reachable_time as well, otherwise, the change will
2299 				 * only be effective after the next time neigh_periodic_work
2300 				 * decides to recompute it (can be multiple minutes)
2301 				 */
2302 				p->reachable_time =
2303 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2304 				break;
2305 			case NDTPA_GC_STALETIME:
2306 				NEIGH_VAR_SET(p, GC_STALETIME,
2307 					      nla_get_msecs(tbp[i]));
2308 				break;
2309 			case NDTPA_DELAY_PROBE_TIME:
2310 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2311 					      nla_get_msecs(tbp[i]));
2312 				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2313 				break;
2314 			case NDTPA_RETRANS_TIME:
2315 				NEIGH_VAR_SET(p, RETRANS_TIME,
2316 					      nla_get_msecs(tbp[i]));
2317 				break;
2318 			case NDTPA_ANYCAST_DELAY:
2319 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2320 					      nla_get_msecs(tbp[i]));
2321 				break;
2322 			case NDTPA_PROXY_DELAY:
2323 				NEIGH_VAR_SET(p, PROXY_DELAY,
2324 					      nla_get_msecs(tbp[i]));
2325 				break;
2326 			case NDTPA_LOCKTIME:
2327 				NEIGH_VAR_SET(p, LOCKTIME,
2328 					      nla_get_msecs(tbp[i]));
2329 				break;
2330 			}
2331 		}
2332 	}
2333 
2334 	err = -ENOENT;
2335 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2336 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2337 	    !net_eq(net, &init_net))
2338 		goto errout_tbl_lock;
2339 
2340 	if (tb[NDTA_THRESH1])
2341 		WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
2342 
2343 	if (tb[NDTA_THRESH2])
2344 		WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
2345 
2346 	if (tb[NDTA_THRESH3])
2347 		WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
2348 
2349 	if (tb[NDTA_GC_INTERVAL])
2350 		WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
2351 
2352 	err = 0;
2353 
2354 errout_tbl_lock:
2355 	write_unlock_bh(&tbl->lock);
2356 errout:
2357 	return err;
2358 }
2359 
neightbl_valid_dump_info(const struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2360 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2361 				    struct netlink_ext_ack *extack)
2362 {
2363 	struct ndtmsg *ndtm;
2364 
2365 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2366 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2367 		return -EINVAL;
2368 	}
2369 
2370 	ndtm = nlmsg_data(nlh);
2371 	if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2372 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2373 		return -EINVAL;
2374 	}
2375 
2376 	if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2377 		NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2378 		return -EINVAL;
2379 	}
2380 
2381 	return 0;
2382 }
2383 
neightbl_dump_info(struct sk_buff * skb,struct netlink_callback * cb)2384 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2385 {
2386 	const struct nlmsghdr *nlh = cb->nlh;
2387 	struct net *net = sock_net(skb->sk);
2388 	int family, tidx, nidx = 0;
2389 	int tbl_skip = cb->args[0];
2390 	int neigh_skip = cb->args[1];
2391 	struct neigh_table *tbl;
2392 
2393 	if (cb->strict_check) {
2394 		int err = neightbl_valid_dump_info(nlh, cb->extack);
2395 
2396 		if (err < 0)
2397 			return err;
2398 	}
2399 
2400 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2401 
2402 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2403 		struct neigh_parms *p;
2404 
2405 		tbl = neigh_tables[tidx];
2406 		if (!tbl)
2407 			continue;
2408 
2409 		if (tidx < tbl_skip || (family && tbl->family != family))
2410 			continue;
2411 
2412 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2413 				       nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2414 				       NLM_F_MULTI) < 0)
2415 			break;
2416 
2417 		nidx = 0;
2418 		p = list_next_entry(&tbl->parms, list);
2419 		list_for_each_entry_from(p, &tbl->parms_list, list) {
2420 			if (!net_eq(neigh_parms_net(p), net))
2421 				continue;
2422 
2423 			if (nidx < neigh_skip)
2424 				goto next;
2425 
2426 			if (neightbl_fill_param_info(skb, tbl, p,
2427 						     NETLINK_CB(cb->skb).portid,
2428 						     nlh->nlmsg_seq,
2429 						     RTM_NEWNEIGHTBL,
2430 						     NLM_F_MULTI) < 0)
2431 				goto out;
2432 		next:
2433 			nidx++;
2434 		}
2435 
2436 		neigh_skip = 0;
2437 	}
2438 out:
2439 	cb->args[0] = tidx;
2440 	cb->args[1] = nidx;
2441 
2442 	return skb->len;
2443 }
2444 
neigh_fill_info(struct sk_buff * skb,struct neighbour * neigh,u32 pid,u32 seq,int type,unsigned int flags)2445 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2446 			   u32 pid, u32 seq, int type, unsigned int flags)
2447 {
2448 	unsigned long now = jiffies;
2449 	struct nda_cacheinfo ci;
2450 	struct nlmsghdr *nlh;
2451 	struct ndmsg *ndm;
2452 
2453 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2454 	if (nlh == NULL)
2455 		return -EMSGSIZE;
2456 
2457 	ndm = nlmsg_data(nlh);
2458 	ndm->ndm_family	 = neigh->ops->family;
2459 	ndm->ndm_pad1    = 0;
2460 	ndm->ndm_pad2    = 0;
2461 	ndm->ndm_flags	 = neigh->flags;
2462 	ndm->ndm_type	 = neigh->type;
2463 	ndm->ndm_ifindex = neigh->dev->ifindex;
2464 
2465 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2466 		goto nla_put_failure;
2467 
2468 	read_lock_bh(&neigh->lock);
2469 	ndm->ndm_state	 = neigh->nud_state;
2470 	if (neigh->nud_state & NUD_VALID) {
2471 		char haddr[MAX_ADDR_LEN];
2472 
2473 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2474 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2475 			read_unlock_bh(&neigh->lock);
2476 			goto nla_put_failure;
2477 		}
2478 	}
2479 
2480 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2481 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2482 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2483 	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2484 	read_unlock_bh(&neigh->lock);
2485 
2486 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2487 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2488 		goto nla_put_failure;
2489 
2490 	if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2491 		goto nla_put_failure;
2492 
2493 	nlmsg_end(skb, nlh);
2494 	return 0;
2495 
2496 nla_put_failure:
2497 	nlmsg_cancel(skb, nlh);
2498 	return -EMSGSIZE;
2499 }
2500 
pneigh_fill_info(struct sk_buff * skb,struct pneigh_entry * pn,u32 pid,u32 seq,int type,unsigned int flags,struct neigh_table * tbl)2501 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2502 			    u32 pid, u32 seq, int type, unsigned int flags,
2503 			    struct neigh_table *tbl)
2504 {
2505 	struct nlmsghdr *nlh;
2506 	struct ndmsg *ndm;
2507 
2508 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2509 	if (nlh == NULL)
2510 		return -EMSGSIZE;
2511 
2512 	ndm = nlmsg_data(nlh);
2513 	ndm->ndm_family	 = tbl->family;
2514 	ndm->ndm_pad1    = 0;
2515 	ndm->ndm_pad2    = 0;
2516 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2517 	ndm->ndm_type	 = RTN_UNICAST;
2518 	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2519 	ndm->ndm_state	 = NUD_NONE;
2520 
2521 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2522 		goto nla_put_failure;
2523 
2524 	if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2525 		goto nla_put_failure;
2526 
2527 	nlmsg_end(skb, nlh);
2528 	return 0;
2529 
2530 nla_put_failure:
2531 	nlmsg_cancel(skb, nlh);
2532 	return -EMSGSIZE;
2533 }
2534 
neigh_update_notify(struct neighbour * neigh,u32 nlmsg_pid)2535 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2536 {
2537 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2538 	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2539 }
2540 
neigh_master_filtered(struct net_device * dev,int master_idx)2541 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2542 {
2543 	struct net_device *master;
2544 
2545 	if (!master_idx)
2546 		return false;
2547 
2548 	master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2549 	if (!master || master->ifindex != master_idx)
2550 		return true;
2551 
2552 	return false;
2553 }
2554 
neigh_ifindex_filtered(struct net_device * dev,int filter_idx)2555 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2556 {
2557 	if (filter_idx && (!dev || dev->ifindex != filter_idx))
2558 		return true;
2559 
2560 	return false;
2561 }
2562 
2563 struct neigh_dump_filter {
2564 	int master_idx;
2565 	int dev_idx;
2566 };
2567 
neigh_dump_table(struct neigh_table * tbl,struct sk_buff * skb,struct netlink_callback * cb,struct neigh_dump_filter * filter)2568 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2569 			    struct netlink_callback *cb,
2570 			    struct neigh_dump_filter *filter)
2571 {
2572 	struct net *net = sock_net(skb->sk);
2573 	struct neighbour *n;
2574 	int rc, h, s_h = cb->args[1];
2575 	int idx, s_idx = idx = cb->args[2];
2576 	struct neigh_hash_table *nht;
2577 	unsigned int flags = NLM_F_MULTI;
2578 
2579 	if (filter->dev_idx || filter->master_idx)
2580 		flags |= NLM_F_DUMP_FILTERED;
2581 
2582 	rcu_read_lock_bh();
2583 	nht = rcu_dereference_bh(tbl->nht);
2584 
2585 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2586 		if (h > s_h)
2587 			s_idx = 0;
2588 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2589 		     n != NULL;
2590 		     n = rcu_dereference_bh(n->next)) {
2591 			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2592 				goto next;
2593 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2594 			    neigh_master_filtered(n->dev, filter->master_idx))
2595 				goto next;
2596 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2597 					    cb->nlh->nlmsg_seq,
2598 					    RTM_NEWNEIGH,
2599 					    flags) < 0) {
2600 				rc = -1;
2601 				goto out;
2602 			}
2603 next:
2604 			idx++;
2605 		}
2606 	}
2607 	rc = skb->len;
2608 out:
2609 	rcu_read_unlock_bh();
2610 	cb->args[1] = h;
2611 	cb->args[2] = idx;
2612 	return rc;
2613 }
2614 
pneigh_dump_table(struct neigh_table * tbl,struct sk_buff * skb,struct netlink_callback * cb,struct neigh_dump_filter * filter)2615 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2616 			     struct netlink_callback *cb,
2617 			     struct neigh_dump_filter *filter)
2618 {
2619 	struct pneigh_entry *n;
2620 	struct net *net = sock_net(skb->sk);
2621 	int rc, h, s_h = cb->args[3];
2622 	int idx, s_idx = idx = cb->args[4];
2623 	unsigned int flags = NLM_F_MULTI;
2624 
2625 	if (filter->dev_idx || filter->master_idx)
2626 		flags |= NLM_F_DUMP_FILTERED;
2627 
2628 	read_lock_bh(&tbl->lock);
2629 
2630 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2631 		if (h > s_h)
2632 			s_idx = 0;
2633 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2634 			if (idx < s_idx || pneigh_net(n) != net)
2635 				goto next;
2636 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2637 			    neigh_master_filtered(n->dev, filter->master_idx))
2638 				goto next;
2639 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2640 					    cb->nlh->nlmsg_seq,
2641 					    RTM_NEWNEIGH, flags, tbl) < 0) {
2642 				read_unlock_bh(&tbl->lock);
2643 				rc = -1;
2644 				goto out;
2645 			}
2646 		next:
2647 			idx++;
2648 		}
2649 	}
2650 
2651 	read_unlock_bh(&tbl->lock);
2652 	rc = skb->len;
2653 out:
2654 	cb->args[3] = h;
2655 	cb->args[4] = idx;
2656 	return rc;
2657 
2658 }
2659 
neigh_valid_dump_req(const struct nlmsghdr * nlh,bool strict_check,struct neigh_dump_filter * filter,struct netlink_ext_ack * extack)2660 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2661 				bool strict_check,
2662 				struct neigh_dump_filter *filter,
2663 				struct netlink_ext_ack *extack)
2664 {
2665 	struct nlattr *tb[NDA_MAX + 1];
2666 	int err, i;
2667 
2668 	if (strict_check) {
2669 		struct ndmsg *ndm;
2670 
2671 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2672 			NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2673 			return -EINVAL;
2674 		}
2675 
2676 		ndm = nlmsg_data(nlh);
2677 		if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2678 		    ndm->ndm_state || ndm->ndm_type) {
2679 			NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2680 			return -EINVAL;
2681 		}
2682 
2683 		if (ndm->ndm_flags & ~NTF_PROXY) {
2684 			NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2685 			return -EINVAL;
2686 		}
2687 
2688 		err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2689 						    tb, NDA_MAX, nda_policy,
2690 						    extack);
2691 	} else {
2692 		err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2693 					     NDA_MAX, nda_policy, extack);
2694 	}
2695 	if (err < 0)
2696 		return err;
2697 
2698 	for (i = 0; i <= NDA_MAX; ++i) {
2699 		if (!tb[i])
2700 			continue;
2701 
2702 		/* all new attributes should require strict_check */
2703 		switch (i) {
2704 		case NDA_IFINDEX:
2705 			filter->dev_idx = nla_get_u32(tb[i]);
2706 			break;
2707 		case NDA_MASTER:
2708 			filter->master_idx = nla_get_u32(tb[i]);
2709 			break;
2710 		default:
2711 			if (strict_check) {
2712 				NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2713 				return -EINVAL;
2714 			}
2715 		}
2716 	}
2717 
2718 	return 0;
2719 }
2720 
neigh_dump_info(struct sk_buff * skb,struct netlink_callback * cb)2721 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2722 {
2723 	const struct nlmsghdr *nlh = cb->nlh;
2724 	struct neigh_dump_filter filter = {};
2725 	struct neigh_table *tbl;
2726 	int t, family, s_t;
2727 	int proxy = 0;
2728 	int err;
2729 
2730 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2731 
2732 	/* check for full ndmsg structure presence, family member is
2733 	 * the same for both structures
2734 	 */
2735 	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2736 	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2737 		proxy = 1;
2738 
2739 	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2740 	if (err < 0 && cb->strict_check)
2741 		return err;
2742 
2743 	s_t = cb->args[0];
2744 
2745 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2746 		tbl = neigh_tables[t];
2747 
2748 		if (!tbl)
2749 			continue;
2750 		if (t < s_t || (family && tbl->family != family))
2751 			continue;
2752 		if (t > s_t)
2753 			memset(&cb->args[1], 0, sizeof(cb->args) -
2754 						sizeof(cb->args[0]));
2755 		if (proxy)
2756 			err = pneigh_dump_table(tbl, skb, cb, &filter);
2757 		else
2758 			err = neigh_dump_table(tbl, skb, cb, &filter);
2759 		if (err < 0)
2760 			break;
2761 	}
2762 
2763 	cb->args[0] = t;
2764 	return skb->len;
2765 }
2766 
neigh_valid_get_req(const struct nlmsghdr * nlh,struct neigh_table ** tbl,void ** dst,int * dev_idx,u8 * ndm_flags,struct netlink_ext_ack * extack)2767 static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2768 			       struct neigh_table **tbl,
2769 			       void **dst, int *dev_idx, u8 *ndm_flags,
2770 			       struct netlink_ext_ack *extack)
2771 {
2772 	struct nlattr *tb[NDA_MAX + 1];
2773 	struct ndmsg *ndm;
2774 	int err, i;
2775 
2776 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2777 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2778 		return -EINVAL;
2779 	}
2780 
2781 	ndm = nlmsg_data(nlh);
2782 	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
2783 	    ndm->ndm_type) {
2784 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2785 		return -EINVAL;
2786 	}
2787 
2788 	if (ndm->ndm_flags & ~NTF_PROXY) {
2789 		NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2790 		return -EINVAL;
2791 	}
2792 
2793 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2794 					    NDA_MAX, nda_policy, extack);
2795 	if (err < 0)
2796 		return err;
2797 
2798 	*ndm_flags = ndm->ndm_flags;
2799 	*dev_idx = ndm->ndm_ifindex;
2800 	*tbl = neigh_find_table(ndm->ndm_family);
2801 	if (*tbl == NULL) {
2802 		NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2803 		return -EAFNOSUPPORT;
2804 	}
2805 
2806 	for (i = 0; i <= NDA_MAX; ++i) {
2807 		if (!tb[i])
2808 			continue;
2809 
2810 		switch (i) {
2811 		case NDA_DST:
2812 			if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2813 				NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2814 				return -EINVAL;
2815 			}
2816 			*dst = nla_data(tb[i]);
2817 			break;
2818 		default:
2819 			NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2820 			return -EINVAL;
2821 		}
2822 	}
2823 
2824 	return 0;
2825 }
2826 
neigh_nlmsg_size(void)2827 static inline size_t neigh_nlmsg_size(void)
2828 {
2829 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2830 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2831 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2832 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2833 	       + nla_total_size(4)  /* NDA_PROBES */
2834 	       + nla_total_size(1); /* NDA_PROTOCOL */
2835 }
2836 
neigh_get_reply(struct net * net,struct neighbour * neigh,u32 pid,u32 seq)2837 static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2838 			   u32 pid, u32 seq)
2839 {
2840 	struct sk_buff *skb;
2841 	int err = 0;
2842 
2843 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2844 	if (!skb)
2845 		return -ENOBUFS;
2846 
2847 	err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2848 	if (err) {
2849 		kfree_skb(skb);
2850 		goto errout;
2851 	}
2852 
2853 	err = rtnl_unicast(skb, net, pid);
2854 errout:
2855 	return err;
2856 }
2857 
pneigh_nlmsg_size(void)2858 static inline size_t pneigh_nlmsg_size(void)
2859 {
2860 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2861 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2862 	       + nla_total_size(1); /* NDA_PROTOCOL */
2863 }
2864 
pneigh_get_reply(struct net * net,struct pneigh_entry * neigh,u32 pid,u32 seq,struct neigh_table * tbl)2865 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2866 			    u32 pid, u32 seq, struct neigh_table *tbl)
2867 {
2868 	struct sk_buff *skb;
2869 	int err = 0;
2870 
2871 	skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2872 	if (!skb)
2873 		return -ENOBUFS;
2874 
2875 	err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2876 	if (err) {
2877 		kfree_skb(skb);
2878 		goto errout;
2879 	}
2880 
2881 	err = rtnl_unicast(skb, net, pid);
2882 errout:
2883 	return err;
2884 }
2885 
neigh_get(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2886 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2887 		     struct netlink_ext_ack *extack)
2888 {
2889 	struct net *net = sock_net(in_skb->sk);
2890 	struct net_device *dev = NULL;
2891 	struct neigh_table *tbl = NULL;
2892 	struct neighbour *neigh;
2893 	void *dst = NULL;
2894 	u8 ndm_flags = 0;
2895 	int dev_idx = 0;
2896 	int err;
2897 
2898 	err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2899 				  extack);
2900 	if (err < 0)
2901 		return err;
2902 
2903 	if (dev_idx) {
2904 		dev = __dev_get_by_index(net, dev_idx);
2905 		if (!dev) {
2906 			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2907 			return -ENODEV;
2908 		}
2909 	}
2910 
2911 	if (!dst) {
2912 		NL_SET_ERR_MSG(extack, "Network address not specified");
2913 		return -EINVAL;
2914 	}
2915 
2916 	if (ndm_flags & NTF_PROXY) {
2917 		struct pneigh_entry *pn;
2918 
2919 		pn = pneigh_lookup(tbl, net, dst, dev, 0);
2920 		if (!pn) {
2921 			NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
2922 			return -ENOENT;
2923 		}
2924 		return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
2925 					nlh->nlmsg_seq, tbl);
2926 	}
2927 
2928 	if (!dev) {
2929 		NL_SET_ERR_MSG(extack, "No device specified");
2930 		return -EINVAL;
2931 	}
2932 
2933 	neigh = neigh_lookup(tbl, dst, dev);
2934 	if (!neigh) {
2935 		NL_SET_ERR_MSG(extack, "Neighbour entry not found");
2936 		return -ENOENT;
2937 	}
2938 
2939 	err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
2940 			      nlh->nlmsg_seq);
2941 
2942 	neigh_release(neigh);
2943 
2944 	return err;
2945 }
2946 
neigh_for_each(struct neigh_table * tbl,void (* cb)(struct neighbour *,void *),void * cookie)2947 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2948 {
2949 	int chain;
2950 	struct neigh_hash_table *nht;
2951 
2952 	rcu_read_lock_bh();
2953 	nht = rcu_dereference_bh(tbl->nht);
2954 
2955 	read_lock(&tbl->lock); /* avoid resizes */
2956 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2957 		struct neighbour *n;
2958 
2959 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2960 		     n != NULL;
2961 		     n = rcu_dereference_bh(n->next))
2962 			cb(n, cookie);
2963 	}
2964 	read_unlock(&tbl->lock);
2965 	rcu_read_unlock_bh();
2966 }
2967 EXPORT_SYMBOL(neigh_for_each);
2968 
2969 /* The tbl->lock must be held as a writer and BH disabled. */
__neigh_for_each_release(struct neigh_table * tbl,int (* cb)(struct neighbour *))2970 void __neigh_for_each_release(struct neigh_table *tbl,
2971 			      int (*cb)(struct neighbour *))
2972 {
2973 	int chain;
2974 	struct neigh_hash_table *nht;
2975 
2976 	nht = rcu_dereference_protected(tbl->nht,
2977 					lockdep_is_held(&tbl->lock));
2978 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2979 		struct neighbour *n;
2980 		struct neighbour __rcu **np;
2981 
2982 		np = &nht->hash_buckets[chain];
2983 		while ((n = rcu_dereference_protected(*np,
2984 					lockdep_is_held(&tbl->lock))) != NULL) {
2985 			int release;
2986 
2987 			write_lock(&n->lock);
2988 			release = cb(n);
2989 			if (release) {
2990 				rcu_assign_pointer(*np,
2991 					rcu_dereference_protected(n->next,
2992 						lockdep_is_held(&tbl->lock)));
2993 				neigh_mark_dead(n);
2994 			} else
2995 				np = &n->next;
2996 			write_unlock(&n->lock);
2997 			if (release)
2998 				neigh_cleanup_and_release(n);
2999 		}
3000 	}
3001 }
3002 EXPORT_SYMBOL(__neigh_for_each_release);
3003 
neigh_xmit(int index,struct net_device * dev,const void * addr,struct sk_buff * skb)3004 int neigh_xmit(int index, struct net_device *dev,
3005 	       const void *addr, struct sk_buff *skb)
3006 {
3007 	int err = -EAFNOSUPPORT;
3008 	if (likely(index < NEIGH_NR_TABLES)) {
3009 		struct neigh_table *tbl;
3010 		struct neighbour *neigh;
3011 
3012 		tbl = neigh_tables[index];
3013 		if (!tbl)
3014 			goto out;
3015 		rcu_read_lock_bh();
3016 		if (index == NEIGH_ARP_TABLE) {
3017 			u32 key = *((u32 *)addr);
3018 
3019 			neigh = __ipv4_neigh_lookup_noref(dev, key);
3020 		} else {
3021 			neigh = __neigh_lookup_noref(tbl, addr, dev);
3022 		}
3023 		if (!neigh)
3024 			neigh = __neigh_create(tbl, addr, dev, false);
3025 		err = PTR_ERR(neigh);
3026 		if (IS_ERR(neigh)) {
3027 			rcu_read_unlock_bh();
3028 			goto out_kfree_skb;
3029 		}
3030 		err = neigh->output(neigh, skb);
3031 		rcu_read_unlock_bh();
3032 	}
3033 	else if (index == NEIGH_LINK_TABLE) {
3034 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3035 				      addr, NULL, skb->len);
3036 		if (err < 0)
3037 			goto out_kfree_skb;
3038 		err = dev_queue_xmit(skb);
3039 	}
3040 out:
3041 	return err;
3042 out_kfree_skb:
3043 	kfree_skb(skb);
3044 	goto out;
3045 }
3046 EXPORT_SYMBOL(neigh_xmit);
3047 
3048 #ifdef CONFIG_PROC_FS
3049 
neigh_get_first(struct seq_file * seq)3050 static struct neighbour *neigh_get_first(struct seq_file *seq)
3051 {
3052 	struct neigh_seq_state *state = seq->private;
3053 	struct net *net = seq_file_net(seq);
3054 	struct neigh_hash_table *nht = state->nht;
3055 	struct neighbour *n = NULL;
3056 	int bucket;
3057 
3058 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3059 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3060 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3061 
3062 		while (n) {
3063 			if (!net_eq(dev_net(n->dev), net))
3064 				goto next;
3065 			if (state->neigh_sub_iter) {
3066 				loff_t fakep = 0;
3067 				void *v;
3068 
3069 				v = state->neigh_sub_iter(state, n, &fakep);
3070 				if (!v)
3071 					goto next;
3072 			}
3073 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3074 				break;
3075 			if (n->nud_state & ~NUD_NOARP)
3076 				break;
3077 next:
3078 			n = rcu_dereference_bh(n->next);
3079 		}
3080 
3081 		if (n)
3082 			break;
3083 	}
3084 	state->bucket = bucket;
3085 
3086 	return n;
3087 }
3088 
neigh_get_next(struct seq_file * seq,struct neighbour * n,loff_t * pos)3089 static struct neighbour *neigh_get_next(struct seq_file *seq,
3090 					struct neighbour *n,
3091 					loff_t *pos)
3092 {
3093 	struct neigh_seq_state *state = seq->private;
3094 	struct net *net = seq_file_net(seq);
3095 	struct neigh_hash_table *nht = state->nht;
3096 
3097 	if (state->neigh_sub_iter) {
3098 		void *v = state->neigh_sub_iter(state, n, pos);
3099 		if (v)
3100 			return n;
3101 	}
3102 	n = rcu_dereference_bh(n->next);
3103 
3104 	while (1) {
3105 		while (n) {
3106 			if (!net_eq(dev_net(n->dev), net))
3107 				goto next;
3108 			if (state->neigh_sub_iter) {
3109 				void *v = state->neigh_sub_iter(state, n, pos);
3110 				if (v)
3111 					return n;
3112 				goto next;
3113 			}
3114 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3115 				break;
3116 
3117 			if (n->nud_state & ~NUD_NOARP)
3118 				break;
3119 next:
3120 			n = rcu_dereference_bh(n->next);
3121 		}
3122 
3123 		if (n)
3124 			break;
3125 
3126 		if (++state->bucket >= (1 << nht->hash_shift))
3127 			break;
3128 
3129 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3130 	}
3131 
3132 	if (n && pos)
3133 		--(*pos);
3134 	return n;
3135 }
3136 
neigh_get_idx(struct seq_file * seq,loff_t * pos)3137 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3138 {
3139 	struct neighbour *n = neigh_get_first(seq);
3140 
3141 	if (n) {
3142 		--(*pos);
3143 		while (*pos) {
3144 			n = neigh_get_next(seq, n, pos);
3145 			if (!n)
3146 				break;
3147 		}
3148 	}
3149 	return *pos ? NULL : n;
3150 }
3151 
pneigh_get_first(struct seq_file * seq)3152 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3153 {
3154 	struct neigh_seq_state *state = seq->private;
3155 	struct net *net = seq_file_net(seq);
3156 	struct neigh_table *tbl = state->tbl;
3157 	struct pneigh_entry *pn = NULL;
3158 	int bucket = state->bucket;
3159 
3160 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
3161 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3162 		pn = tbl->phash_buckets[bucket];
3163 		while (pn && !net_eq(pneigh_net(pn), net))
3164 			pn = pn->next;
3165 		if (pn)
3166 			break;
3167 	}
3168 	state->bucket = bucket;
3169 
3170 	return pn;
3171 }
3172 
pneigh_get_next(struct seq_file * seq,struct pneigh_entry * pn,loff_t * pos)3173 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3174 					    struct pneigh_entry *pn,
3175 					    loff_t *pos)
3176 {
3177 	struct neigh_seq_state *state = seq->private;
3178 	struct net *net = seq_file_net(seq);
3179 	struct neigh_table *tbl = state->tbl;
3180 
3181 	do {
3182 		pn = pn->next;
3183 	} while (pn && !net_eq(pneigh_net(pn), net));
3184 
3185 	while (!pn) {
3186 		if (++state->bucket > PNEIGH_HASHMASK)
3187 			break;
3188 		pn = tbl->phash_buckets[state->bucket];
3189 		while (pn && !net_eq(pneigh_net(pn), net))
3190 			pn = pn->next;
3191 		if (pn)
3192 			break;
3193 	}
3194 
3195 	if (pn && pos)
3196 		--(*pos);
3197 
3198 	return pn;
3199 }
3200 
pneigh_get_idx(struct seq_file * seq,loff_t * pos)3201 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3202 {
3203 	struct pneigh_entry *pn = pneigh_get_first(seq);
3204 
3205 	if (pn) {
3206 		--(*pos);
3207 		while (*pos) {
3208 			pn = pneigh_get_next(seq, pn, pos);
3209 			if (!pn)
3210 				break;
3211 		}
3212 	}
3213 	return *pos ? NULL : pn;
3214 }
3215 
neigh_get_idx_any(struct seq_file * seq,loff_t * pos)3216 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3217 {
3218 	struct neigh_seq_state *state = seq->private;
3219 	void *rc;
3220 	loff_t idxpos = *pos;
3221 
3222 	rc = neigh_get_idx(seq, &idxpos);
3223 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3224 		rc = pneigh_get_idx(seq, &idxpos);
3225 
3226 	return rc;
3227 }
3228 
neigh_seq_start(struct seq_file * seq,loff_t * pos,struct neigh_table * tbl,unsigned int neigh_seq_flags)3229 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3230 	__acquires(tbl->lock)
3231 	__acquires(rcu_bh)
3232 {
3233 	struct neigh_seq_state *state = seq->private;
3234 
3235 	state->tbl = tbl;
3236 	state->bucket = 0;
3237 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3238 
3239 	rcu_read_lock_bh();
3240 	state->nht = rcu_dereference_bh(tbl->nht);
3241 	read_lock(&tbl->lock);
3242 
3243 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3244 }
3245 EXPORT_SYMBOL(neigh_seq_start);
3246 
neigh_seq_next(struct seq_file * seq,void * v,loff_t * pos)3247 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3248 {
3249 	struct neigh_seq_state *state;
3250 	void *rc;
3251 
3252 	if (v == SEQ_START_TOKEN) {
3253 		rc = neigh_get_first(seq);
3254 		goto out;
3255 	}
3256 
3257 	state = seq->private;
3258 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3259 		rc = neigh_get_next(seq, v, NULL);
3260 		if (rc)
3261 			goto out;
3262 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3263 			rc = pneigh_get_first(seq);
3264 	} else {
3265 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3266 		rc = pneigh_get_next(seq, v, NULL);
3267 	}
3268 out:
3269 	++(*pos);
3270 	return rc;
3271 }
3272 EXPORT_SYMBOL(neigh_seq_next);
3273 
neigh_seq_stop(struct seq_file * seq,void * v)3274 void neigh_seq_stop(struct seq_file *seq, void *v)
3275 	__releases(tbl->lock)
3276 	__releases(rcu_bh)
3277 {
3278 	struct neigh_seq_state *state = seq->private;
3279 	struct neigh_table *tbl = state->tbl;
3280 
3281 	read_unlock(&tbl->lock);
3282 	rcu_read_unlock_bh();
3283 }
3284 EXPORT_SYMBOL(neigh_seq_stop);
3285 
3286 /* statistics via seq_file */
3287 
neigh_stat_seq_start(struct seq_file * seq,loff_t * pos)3288 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3289 {
3290 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3291 	int cpu;
3292 
3293 	if (*pos == 0)
3294 		return SEQ_START_TOKEN;
3295 
3296 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3297 		if (!cpu_possible(cpu))
3298 			continue;
3299 		*pos = cpu+1;
3300 		return per_cpu_ptr(tbl->stats, cpu);
3301 	}
3302 	return NULL;
3303 }
3304 
neigh_stat_seq_next(struct seq_file * seq,void * v,loff_t * pos)3305 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3306 {
3307 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3308 	int cpu;
3309 
3310 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3311 		if (!cpu_possible(cpu))
3312 			continue;
3313 		*pos = cpu+1;
3314 		return per_cpu_ptr(tbl->stats, cpu);
3315 	}
3316 	(*pos)++;
3317 	return NULL;
3318 }
3319 
neigh_stat_seq_stop(struct seq_file * seq,void * v)3320 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3321 {
3322 
3323 }
3324 
neigh_stat_seq_show(struct seq_file * seq,void * v)3325 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3326 {
3327 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3328 	struct neigh_statistics *st = v;
3329 
3330 	if (v == SEQ_START_TOKEN) {
3331 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3332 		return 0;
3333 	}
3334 
3335 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
3336 			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
3337 		   atomic_read(&tbl->entries),
3338 
3339 		   st->allocs,
3340 		   st->destroys,
3341 		   st->hash_grows,
3342 
3343 		   st->lookups,
3344 		   st->hits,
3345 
3346 		   st->res_failed,
3347 
3348 		   st->rcv_probes_mcast,
3349 		   st->rcv_probes_ucast,
3350 
3351 		   st->periodic_gc_runs,
3352 		   st->forced_gc_runs,
3353 		   st->unres_discards,
3354 		   st->table_fulls
3355 		   );
3356 
3357 	return 0;
3358 }
3359 
3360 static const struct seq_operations neigh_stat_seq_ops = {
3361 	.start	= neigh_stat_seq_start,
3362 	.next	= neigh_stat_seq_next,
3363 	.stop	= neigh_stat_seq_stop,
3364 	.show	= neigh_stat_seq_show,
3365 };
3366 #endif /* CONFIG_PROC_FS */
3367 
__neigh_notify(struct neighbour * n,int type,int flags,u32 pid)3368 static void __neigh_notify(struct neighbour *n, int type, int flags,
3369 			   u32 pid)
3370 {
3371 	struct net *net = dev_net(n->dev);
3372 	struct sk_buff *skb;
3373 	int err = -ENOBUFS;
3374 
3375 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3376 	if (skb == NULL)
3377 		goto errout;
3378 
3379 	err = neigh_fill_info(skb, n, pid, 0, type, flags);
3380 	if (err < 0) {
3381 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3382 		WARN_ON(err == -EMSGSIZE);
3383 		kfree_skb(skb);
3384 		goto errout;
3385 	}
3386 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3387 	return;
3388 errout:
3389 	if (err < 0)
3390 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3391 }
3392 
neigh_app_ns(struct neighbour * n)3393 void neigh_app_ns(struct neighbour *n)
3394 {
3395 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3396 }
3397 EXPORT_SYMBOL(neigh_app_ns);
3398 
3399 #ifdef CONFIG_SYSCTL
3400 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3401 
proc_unres_qlen(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3402 static int proc_unres_qlen(struct ctl_table *ctl, int write,
3403 			   void *buffer, size_t *lenp, loff_t *ppos)
3404 {
3405 	int size, ret;
3406 	struct ctl_table tmp = *ctl;
3407 
3408 	tmp.extra1 = SYSCTL_ZERO;
3409 	tmp.extra2 = &unres_qlen_max;
3410 	tmp.data = &size;
3411 
3412 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3413 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3414 
3415 	if (write && !ret)
3416 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3417 	return ret;
3418 }
3419 
neigh_get_dev_parms_rcu(struct net_device * dev,int family)3420 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3421 						   int family)
3422 {
3423 	switch (family) {
3424 	case AF_INET:
3425 		return __in_dev_arp_parms_get_rcu(dev);
3426 	case AF_INET6:
3427 		return __in6_dev_nd_parms_get_rcu(dev);
3428 	}
3429 	return NULL;
3430 }
3431 
neigh_copy_dflt_parms(struct net * net,struct neigh_parms * p,int index)3432 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3433 				  int index)
3434 {
3435 	struct net_device *dev;
3436 	int family = neigh_parms_family(p);
3437 
3438 	rcu_read_lock();
3439 	for_each_netdev_rcu(net, dev) {
3440 		struct neigh_parms *dst_p =
3441 				neigh_get_dev_parms_rcu(dev, family);
3442 
3443 		if (dst_p && !test_bit(index, dst_p->data_state))
3444 			dst_p->data[index] = p->data[index];
3445 	}
3446 	rcu_read_unlock();
3447 }
3448 
neigh_proc_update(struct ctl_table * ctl,int write)3449 static void neigh_proc_update(struct ctl_table *ctl, int write)
3450 {
3451 	struct net_device *dev = ctl->extra1;
3452 	struct neigh_parms *p = ctl->extra2;
3453 	struct net *net = neigh_parms_net(p);
3454 	int index = (int *) ctl->data - p->data;
3455 
3456 	if (!write)
3457 		return;
3458 
3459 	set_bit(index, p->data_state);
3460 	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3461 		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3462 	if (!dev) /* NULL dev means this is default value */
3463 		neigh_copy_dflt_parms(net, p, index);
3464 }
3465 
neigh_proc_dointvec_zero_intmax(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3466 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3467 					   void *buffer, size_t *lenp,
3468 					   loff_t *ppos)
3469 {
3470 	struct ctl_table tmp = *ctl;
3471 	int ret;
3472 
3473 	tmp.extra1 = SYSCTL_ZERO;
3474 	tmp.extra2 = SYSCTL_INT_MAX;
3475 
3476 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3477 	neigh_proc_update(ctl, write);
3478 	return ret;
3479 }
3480 
neigh_proc_dointvec(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3481 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3482 			size_t *lenp, loff_t *ppos)
3483 {
3484 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3485 
3486 	neigh_proc_update(ctl, write);
3487 	return ret;
3488 }
3489 EXPORT_SYMBOL(neigh_proc_dointvec);
3490 
neigh_proc_dointvec_jiffies(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3491 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3492 				size_t *lenp, loff_t *ppos)
3493 {
3494 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3495 
3496 	neigh_proc_update(ctl, write);
3497 	return ret;
3498 }
3499 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3500 
neigh_proc_dointvec_userhz_jiffies(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3501 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3502 					      void *buffer, size_t *lenp,
3503 					      loff_t *ppos)
3504 {
3505 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3506 
3507 	neigh_proc_update(ctl, write);
3508 	return ret;
3509 }
3510 
neigh_proc_dointvec_ms_jiffies(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3511 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3512 				   void *buffer, size_t *lenp, loff_t *ppos)
3513 {
3514 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3515 
3516 	neigh_proc_update(ctl, write);
3517 	return ret;
3518 }
3519 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3520 
neigh_proc_dointvec_unres_qlen(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3521 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3522 					  void *buffer, size_t *lenp,
3523 					  loff_t *ppos)
3524 {
3525 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3526 
3527 	neigh_proc_update(ctl, write);
3528 	return ret;
3529 }
3530 
neigh_proc_base_reachable_time(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3531 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3532 					  void *buffer, size_t *lenp,
3533 					  loff_t *ppos)
3534 {
3535 	struct neigh_parms *p = ctl->extra2;
3536 	int ret;
3537 
3538 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3539 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3540 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3541 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3542 	else
3543 		ret = -1;
3544 
3545 	if (write && ret == 0) {
3546 		/* update reachable_time as well, otherwise, the change will
3547 		 * only be effective after the next time neigh_periodic_work
3548 		 * decides to recompute it
3549 		 */
3550 		p->reachable_time =
3551 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3552 	}
3553 	return ret;
3554 }
3555 
3556 #define NEIGH_PARMS_DATA_OFFSET(index)	\
3557 	(&((struct neigh_parms *) 0)->data[index])
3558 
3559 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3560 	[NEIGH_VAR_ ## attr] = { \
3561 		.procname	= name, \
3562 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3563 		.maxlen		= sizeof(int), \
3564 		.mode		= mval, \
3565 		.proc_handler	= proc, \
3566 	}
3567 
3568 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3569 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3570 
3571 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3572 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3573 
3574 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3575 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3576 
3577 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3578 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3579 
3580 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3581 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3582 
3583 static struct neigh_sysctl_table {
3584 	struct ctl_table_header *sysctl_header;
3585 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3586 } neigh_sysctl_template __read_mostly = {
3587 	.neigh_vars = {
3588 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3589 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3590 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3591 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3592 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3593 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3594 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3595 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3596 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3597 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3598 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3599 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3600 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3601 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3602 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3603 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3604 		[NEIGH_VAR_GC_INTERVAL] = {
3605 			.procname	= "gc_interval",
3606 			.maxlen		= sizeof(int),
3607 			.mode		= 0644,
3608 			.proc_handler	= proc_dointvec_jiffies,
3609 		},
3610 		[NEIGH_VAR_GC_THRESH1] = {
3611 			.procname	= "gc_thresh1",
3612 			.maxlen		= sizeof(int),
3613 			.mode		= 0644,
3614 			.extra1		= SYSCTL_ZERO,
3615 			.extra2		= SYSCTL_INT_MAX,
3616 			.proc_handler	= proc_dointvec_minmax,
3617 		},
3618 		[NEIGH_VAR_GC_THRESH2] = {
3619 			.procname	= "gc_thresh2",
3620 			.maxlen		= sizeof(int),
3621 			.mode		= 0644,
3622 			.extra1		= SYSCTL_ZERO,
3623 			.extra2		= SYSCTL_INT_MAX,
3624 			.proc_handler	= proc_dointvec_minmax,
3625 		},
3626 		[NEIGH_VAR_GC_THRESH3] = {
3627 			.procname	= "gc_thresh3",
3628 			.maxlen		= sizeof(int),
3629 			.mode		= 0644,
3630 			.extra1		= SYSCTL_ZERO,
3631 			.extra2		= SYSCTL_INT_MAX,
3632 			.proc_handler	= proc_dointvec_minmax,
3633 		},
3634 		{},
3635 	},
3636 };
3637 
neigh_sysctl_register(struct net_device * dev,struct neigh_parms * p,proc_handler * handler)3638 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3639 			  proc_handler *handler)
3640 {
3641 	int i;
3642 	struct neigh_sysctl_table *t;
3643 	const char *dev_name_source;
3644 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3645 	char *p_name;
3646 
3647 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3648 	if (!t)
3649 		goto err;
3650 
3651 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3652 		t->neigh_vars[i].data += (long) p;
3653 		t->neigh_vars[i].extra1 = dev;
3654 		t->neigh_vars[i].extra2 = p;
3655 	}
3656 
3657 	if (dev) {
3658 		dev_name_source = dev->name;
3659 		/* Terminate the table early */
3660 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3661 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3662 	} else {
3663 		struct neigh_table *tbl = p->tbl;
3664 		dev_name_source = "default";
3665 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3666 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3667 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3668 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3669 	}
3670 
3671 	if (handler) {
3672 		/* RetransTime */
3673 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3674 		/* ReachableTime */
3675 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3676 		/* RetransTime (in milliseconds)*/
3677 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3678 		/* ReachableTime (in milliseconds) */
3679 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3680 	} else {
3681 		/* Those handlers will update p->reachable_time after
3682 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3683 		 * applied after the next neighbour update instead of waiting for
3684 		 * neigh_periodic_work to update its value (can be multiple minutes)
3685 		 * So any handler that replaces them should do this as well
3686 		 */
3687 		/* ReachableTime */
3688 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3689 			neigh_proc_base_reachable_time;
3690 		/* ReachableTime (in milliseconds) */
3691 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3692 			neigh_proc_base_reachable_time;
3693 	}
3694 
3695 	/* Don't export sysctls to unprivileged users */
3696 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3697 		t->neigh_vars[0].procname = NULL;
3698 
3699 	switch (neigh_parms_family(p)) {
3700 	case AF_INET:
3701 	      p_name = "ipv4";
3702 	      break;
3703 	case AF_INET6:
3704 	      p_name = "ipv6";
3705 	      break;
3706 	default:
3707 	      BUG();
3708 	}
3709 
3710 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3711 		p_name, dev_name_source);
3712 	t->sysctl_header =
3713 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3714 	if (!t->sysctl_header)
3715 		goto free;
3716 
3717 	p->sysctl_table = t;
3718 	return 0;
3719 
3720 free:
3721 	kfree(t);
3722 err:
3723 	return -ENOBUFS;
3724 }
3725 EXPORT_SYMBOL(neigh_sysctl_register);
3726 
neigh_sysctl_unregister(struct neigh_parms * p)3727 void neigh_sysctl_unregister(struct neigh_parms *p)
3728 {
3729 	if (p->sysctl_table) {
3730 		struct neigh_sysctl_table *t = p->sysctl_table;
3731 		p->sysctl_table = NULL;
3732 		unregister_net_sysctl_table(t->sysctl_header);
3733 		kfree(t);
3734 	}
3735 }
3736 EXPORT_SYMBOL(neigh_sysctl_unregister);
3737 
3738 #endif	/* CONFIG_SYSCTL */
3739 
neigh_init(void)3740 static int __init neigh_init(void)
3741 {
3742 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3743 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3744 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3745 
3746 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3747 		      0);
3748 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3749 
3750 	return 0;
3751 }
3752 
3753 subsys_initcall(neigh_init);
3754