1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Generic address resolution entity
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * Fixes:
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/slab.h>
17 #include <linux/kmemleak.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/arp.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
41
42 #include <trace/events/neigh.h>
43
44 #define DEBUG
45 #define NEIGH_DEBUG 1
46 #define neigh_dbg(level, fmt, ...) \
47 do { \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
50 } while (0)
51
52 #define PNEIGH_HASHMASK 0xF
53
54 static void neigh_timer_handler(struct timer_list *t);
55 static void __neigh_notify(struct neighbour *n, int type, int flags,
56 u32 pid);
57 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
58 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
59 struct net_device *dev);
60
61 #ifdef CONFIG_PROC_FS
62 static const struct seq_operations neigh_stat_seq_ops;
63 #endif
64
65 /*
66 Neighbour hash table buckets are protected with rwlock tbl->lock.
67
68 - All the scans/updates to hash buckets MUST be made under this lock.
69 - NOTHING clever should be made under this lock: no callbacks
70 to protocol backends, no attempts to send something to network.
71 It will result in deadlocks, if backend/driver wants to use neighbour
72 cache.
73 - If the entry requires some non-trivial actions, increase
74 its reference count and release table lock.
75
76 Neighbour entries are protected:
77 - with reference count.
78 - with rwlock neigh->lock
79
80 Reference count prevents destruction.
81
82 neigh->lock mainly serializes ll address data and its validity state.
83 However, the same lock is used to protect another entry fields:
84 - timer
85 - resolution queue
86
87 Again, nothing clever shall be made under neigh->lock,
88 the most complicated procedure, which we allow is dev->hard_header.
89 It is supposed, that dev->hard_header is simplistic and does
90 not make callbacks to neighbour tables.
91 */
92
neigh_blackhole(struct neighbour * neigh,struct sk_buff * skb)93 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
94 {
95 kfree_skb(skb);
96 return -ENETDOWN;
97 }
98
neigh_cleanup_and_release(struct neighbour * neigh)99 static void neigh_cleanup_and_release(struct neighbour *neigh)
100 {
101 trace_neigh_cleanup_and_release(neigh, 0);
102 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
103 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
104 neigh_release(neigh);
105 }
106
107 /*
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
111 */
112
neigh_rand_reach_time(unsigned long base)113 unsigned long neigh_rand_reach_time(unsigned long base)
114 {
115 return base ? (prandom_u32() % base) + (base >> 1) : 0;
116 }
117 EXPORT_SYMBOL(neigh_rand_reach_time);
118
neigh_mark_dead(struct neighbour * n)119 static void neigh_mark_dead(struct neighbour *n)
120 {
121 n->dead = 1;
122 if (!list_empty(&n->gc_list)) {
123 list_del_init(&n->gc_list);
124 atomic_dec(&n->tbl->gc_entries);
125 }
126 }
127
neigh_update_gc_list(struct neighbour * n)128 static void neigh_update_gc_list(struct neighbour *n)
129 {
130 bool on_gc_list, exempt_from_gc;
131
132 write_lock_bh(&n->tbl->lock);
133 write_lock(&n->lock);
134
135 if (n->dead)
136 goto out;
137
138 /* remove from the gc list if new state is permanent or if neighbor
139 * is externally learned; otherwise entry should be on the gc list
140 */
141 exempt_from_gc = n->nud_state & NUD_PERMANENT ||
142 n->flags & NTF_EXT_LEARNED;
143 on_gc_list = !list_empty(&n->gc_list);
144
145 if (exempt_from_gc && on_gc_list) {
146 list_del_init(&n->gc_list);
147 atomic_dec(&n->tbl->gc_entries);
148 } else if (!exempt_from_gc && !on_gc_list) {
149 /* add entries to the tail; cleaning removes from the front */
150 list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 atomic_inc(&n->tbl->gc_entries);
152 }
153
154 out:
155 write_unlock(&n->lock);
156 write_unlock_bh(&n->tbl->lock);
157 }
158
neigh_update_ext_learned(struct neighbour * neigh,u32 flags,int * notify)159 static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
160 int *notify)
161 {
162 bool rc = false;
163 u8 ndm_flags;
164
165 if (!(flags & NEIGH_UPDATE_F_ADMIN))
166 return rc;
167
168 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
169 if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
170 if (ndm_flags & NTF_EXT_LEARNED)
171 neigh->flags |= NTF_EXT_LEARNED;
172 else
173 neigh->flags &= ~NTF_EXT_LEARNED;
174 rc = true;
175 *notify = 1;
176 }
177
178 return rc;
179 }
180
neigh_del(struct neighbour * n,struct neighbour __rcu ** np,struct neigh_table * tbl)181 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
182 struct neigh_table *tbl)
183 {
184 bool retval = false;
185
186 write_lock(&n->lock);
187 if (refcount_read(&n->refcnt) == 1) {
188 struct neighbour *neigh;
189
190 neigh = rcu_dereference_protected(n->next,
191 lockdep_is_held(&tbl->lock));
192 rcu_assign_pointer(*np, neigh);
193 neigh_mark_dead(n);
194 retval = true;
195 }
196 write_unlock(&n->lock);
197 if (retval)
198 neigh_cleanup_and_release(n);
199 return retval;
200 }
201
neigh_remove_one(struct neighbour * ndel,struct neigh_table * tbl)202 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
203 {
204 struct neigh_hash_table *nht;
205 void *pkey = ndel->primary_key;
206 u32 hash_val;
207 struct neighbour *n;
208 struct neighbour __rcu **np;
209
210 nht = rcu_dereference_protected(tbl->nht,
211 lockdep_is_held(&tbl->lock));
212 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
213 hash_val = hash_val >> (32 - nht->hash_shift);
214
215 np = &nht->hash_buckets[hash_val];
216 while ((n = rcu_dereference_protected(*np,
217 lockdep_is_held(&tbl->lock)))) {
218 if (n == ndel)
219 return neigh_del(n, np, tbl);
220 np = &n->next;
221 }
222 return false;
223 }
224
neigh_forced_gc(struct neigh_table * tbl)225 static int neigh_forced_gc(struct neigh_table *tbl)
226 {
227 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
228 unsigned long tref = jiffies - 5 * HZ;
229 struct neighbour *n, *tmp;
230 int shrunk = 0;
231
232 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
233
234 write_lock_bh(&tbl->lock);
235
236 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
237 if (refcount_read(&n->refcnt) == 1) {
238 bool remove = false;
239
240 write_lock(&n->lock);
241 if ((n->nud_state == NUD_FAILED) ||
242 (n->nud_state == NUD_NOARP) ||
243 (tbl->is_multicast &&
244 tbl->is_multicast(n->primary_key)) ||
245 time_after(tref, n->updated))
246 remove = true;
247 write_unlock(&n->lock);
248
249 if (remove && neigh_remove_one(n, tbl))
250 shrunk++;
251 if (shrunk >= max_clean)
252 break;
253 }
254 }
255
256 tbl->last_flush = jiffies;
257
258 write_unlock_bh(&tbl->lock);
259
260 return shrunk;
261 }
262
neigh_add_timer(struct neighbour * n,unsigned long when)263 static void neigh_add_timer(struct neighbour *n, unsigned long when)
264 {
265 neigh_hold(n);
266 if (unlikely(mod_timer(&n->timer, when))) {
267 printk("NEIGH: BUG, double timer add, state is %x\n",
268 n->nud_state);
269 dump_stack();
270 }
271 }
272
neigh_del_timer(struct neighbour * n)273 static int neigh_del_timer(struct neighbour *n)
274 {
275 if ((n->nud_state & NUD_IN_TIMER) &&
276 del_timer(&n->timer)) {
277 neigh_release(n);
278 return 1;
279 }
280 return 0;
281 }
282
pneigh_queue_purge(struct sk_buff_head * list)283 static void pneigh_queue_purge(struct sk_buff_head *list)
284 {
285 struct sk_buff *skb;
286
287 while ((skb = skb_dequeue(list)) != NULL) {
288 dev_put(skb->dev);
289 kfree_skb(skb);
290 }
291 }
292
neigh_flush_dev(struct neigh_table * tbl,struct net_device * dev,bool skip_perm)293 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
294 bool skip_perm)
295 {
296 int i;
297 struct neigh_hash_table *nht;
298
299 nht = rcu_dereference_protected(tbl->nht,
300 lockdep_is_held(&tbl->lock));
301
302 for (i = 0; i < (1 << nht->hash_shift); i++) {
303 struct neighbour *n;
304 struct neighbour __rcu **np = &nht->hash_buckets[i];
305
306 while ((n = rcu_dereference_protected(*np,
307 lockdep_is_held(&tbl->lock))) != NULL) {
308 if (dev && n->dev != dev) {
309 np = &n->next;
310 continue;
311 }
312 if (skip_perm && n->nud_state & NUD_PERMANENT) {
313 np = &n->next;
314 continue;
315 }
316 rcu_assign_pointer(*np,
317 rcu_dereference_protected(n->next,
318 lockdep_is_held(&tbl->lock)));
319 write_lock(&n->lock);
320 neigh_del_timer(n);
321 neigh_mark_dead(n);
322 if (refcount_read(&n->refcnt) != 1) {
323 /* The most unpleasant situation.
324 We must destroy neighbour entry,
325 but someone still uses it.
326
327 The destroy will be delayed until
328 the last user releases us, but
329 we must kill timers etc. and move
330 it to safe state.
331 */
332 __skb_queue_purge(&n->arp_queue);
333 n->arp_queue_len_bytes = 0;
334 n->output = neigh_blackhole;
335 if (n->nud_state & NUD_VALID)
336 n->nud_state = NUD_NOARP;
337 else
338 n->nud_state = NUD_NONE;
339 neigh_dbg(2, "neigh %p is stray\n", n);
340 }
341 write_unlock(&n->lock);
342 neigh_cleanup_and_release(n);
343 }
344 }
345 }
346
neigh_changeaddr(struct neigh_table * tbl,struct net_device * dev)347 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
348 {
349 write_lock_bh(&tbl->lock);
350 neigh_flush_dev(tbl, dev, false);
351 write_unlock_bh(&tbl->lock);
352 }
353 EXPORT_SYMBOL(neigh_changeaddr);
354
__neigh_ifdown(struct neigh_table * tbl,struct net_device * dev,bool skip_perm)355 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
356 bool skip_perm)
357 {
358 write_lock_bh(&tbl->lock);
359 neigh_flush_dev(tbl, dev, skip_perm);
360 pneigh_ifdown_and_unlock(tbl, dev);
361
362 del_timer_sync(&tbl->proxy_timer);
363 pneigh_queue_purge(&tbl->proxy_queue);
364 return 0;
365 }
366
neigh_carrier_down(struct neigh_table * tbl,struct net_device * dev)367 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
368 {
369 __neigh_ifdown(tbl, dev, true);
370 return 0;
371 }
372 EXPORT_SYMBOL(neigh_carrier_down);
373
neigh_ifdown(struct neigh_table * tbl,struct net_device * dev)374 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
375 {
376 __neigh_ifdown(tbl, dev, false);
377 return 0;
378 }
379 EXPORT_SYMBOL(neigh_ifdown);
380
neigh_alloc(struct neigh_table * tbl,struct net_device * dev,u8 flags,bool exempt_from_gc)381 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
382 struct net_device *dev,
383 u8 flags, bool exempt_from_gc)
384 {
385 struct neighbour *n = NULL;
386 unsigned long now = jiffies;
387 int entries;
388
389 if (exempt_from_gc)
390 goto do_alloc;
391
392 entries = atomic_inc_return(&tbl->gc_entries) - 1;
393 if (entries >= tbl->gc_thresh3 ||
394 (entries >= tbl->gc_thresh2 &&
395 time_after(now, tbl->last_flush + 5 * HZ))) {
396 if (!neigh_forced_gc(tbl) &&
397 entries >= tbl->gc_thresh3) {
398 net_info_ratelimited("%s: neighbor table overflow!\n",
399 tbl->id);
400 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
401 goto out_entries;
402 }
403 }
404
405 do_alloc:
406 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
407 if (!n)
408 goto out_entries;
409
410 __skb_queue_head_init(&n->arp_queue);
411 rwlock_init(&n->lock);
412 seqlock_init(&n->ha_lock);
413 n->updated = n->used = now;
414 n->nud_state = NUD_NONE;
415 n->output = neigh_blackhole;
416 n->flags = flags;
417 seqlock_init(&n->hh.hh_lock);
418 n->parms = neigh_parms_clone(&tbl->parms);
419 timer_setup(&n->timer, neigh_timer_handler, 0);
420
421 NEIGH_CACHE_STAT_INC(tbl, allocs);
422 n->tbl = tbl;
423 refcount_set(&n->refcnt, 1);
424 n->dead = 1;
425 INIT_LIST_HEAD(&n->gc_list);
426
427 atomic_inc(&tbl->entries);
428 out:
429 return n;
430
431 out_entries:
432 if (!exempt_from_gc)
433 atomic_dec(&tbl->gc_entries);
434 goto out;
435 }
436
neigh_get_hash_rnd(u32 * x)437 static void neigh_get_hash_rnd(u32 *x)
438 {
439 *x = get_random_u32() | 1;
440 }
441
neigh_hash_alloc(unsigned int shift)442 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
443 {
444 size_t size = (1 << shift) * sizeof(struct neighbour *);
445 struct neigh_hash_table *ret;
446 struct neighbour __rcu **buckets;
447 int i;
448
449 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
450 if (!ret)
451 return NULL;
452 if (size <= PAGE_SIZE) {
453 buckets = kzalloc(size, GFP_ATOMIC);
454 } else {
455 buckets = (struct neighbour __rcu **)
456 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
457 get_order(size));
458 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
459 }
460 if (!buckets) {
461 kfree(ret);
462 return NULL;
463 }
464 ret->hash_buckets = buckets;
465 ret->hash_shift = shift;
466 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
467 neigh_get_hash_rnd(&ret->hash_rnd[i]);
468 return ret;
469 }
470
neigh_hash_free_rcu(struct rcu_head * head)471 static void neigh_hash_free_rcu(struct rcu_head *head)
472 {
473 struct neigh_hash_table *nht = container_of(head,
474 struct neigh_hash_table,
475 rcu);
476 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
477 struct neighbour __rcu **buckets = nht->hash_buckets;
478
479 if (size <= PAGE_SIZE) {
480 kfree(buckets);
481 } else {
482 kmemleak_free(buckets);
483 free_pages((unsigned long)buckets, get_order(size));
484 }
485 kfree(nht);
486 }
487
neigh_hash_grow(struct neigh_table * tbl,unsigned long new_shift)488 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
489 unsigned long new_shift)
490 {
491 unsigned int i, hash;
492 struct neigh_hash_table *new_nht, *old_nht;
493
494 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
495
496 old_nht = rcu_dereference_protected(tbl->nht,
497 lockdep_is_held(&tbl->lock));
498 new_nht = neigh_hash_alloc(new_shift);
499 if (!new_nht)
500 return old_nht;
501
502 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
503 struct neighbour *n, *next;
504
505 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
506 lockdep_is_held(&tbl->lock));
507 n != NULL;
508 n = next) {
509 hash = tbl->hash(n->primary_key, n->dev,
510 new_nht->hash_rnd);
511
512 hash >>= (32 - new_nht->hash_shift);
513 next = rcu_dereference_protected(n->next,
514 lockdep_is_held(&tbl->lock));
515
516 rcu_assign_pointer(n->next,
517 rcu_dereference_protected(
518 new_nht->hash_buckets[hash],
519 lockdep_is_held(&tbl->lock)));
520 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
521 }
522 }
523
524 rcu_assign_pointer(tbl->nht, new_nht);
525 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
526 return new_nht;
527 }
528
neigh_lookup(struct neigh_table * tbl,const void * pkey,struct net_device * dev)529 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
530 struct net_device *dev)
531 {
532 struct neighbour *n;
533
534 NEIGH_CACHE_STAT_INC(tbl, lookups);
535
536 rcu_read_lock_bh();
537 n = __neigh_lookup_noref(tbl, pkey, dev);
538 if (n) {
539 if (!refcount_inc_not_zero(&n->refcnt))
540 n = NULL;
541 NEIGH_CACHE_STAT_INC(tbl, hits);
542 }
543
544 rcu_read_unlock_bh();
545 return n;
546 }
547 EXPORT_SYMBOL(neigh_lookup);
548
neigh_lookup_nodev(struct neigh_table * tbl,struct net * net,const void * pkey)549 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
550 const void *pkey)
551 {
552 struct neighbour *n;
553 unsigned int key_len = tbl->key_len;
554 u32 hash_val;
555 struct neigh_hash_table *nht;
556
557 NEIGH_CACHE_STAT_INC(tbl, lookups);
558
559 rcu_read_lock_bh();
560 nht = rcu_dereference_bh(tbl->nht);
561 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
562
563 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
564 n != NULL;
565 n = rcu_dereference_bh(n->next)) {
566 if (!memcmp(n->primary_key, pkey, key_len) &&
567 net_eq(dev_net(n->dev), net)) {
568 if (!refcount_inc_not_zero(&n->refcnt))
569 n = NULL;
570 NEIGH_CACHE_STAT_INC(tbl, hits);
571 break;
572 }
573 }
574
575 rcu_read_unlock_bh();
576 return n;
577 }
578 EXPORT_SYMBOL(neigh_lookup_nodev);
579
580 static struct neighbour *
___neigh_create(struct neigh_table * tbl,const void * pkey,struct net_device * dev,u8 flags,bool exempt_from_gc,bool want_ref)581 ___neigh_create(struct neigh_table *tbl, const void *pkey,
582 struct net_device *dev, u8 flags,
583 bool exempt_from_gc, bool want_ref)
584 {
585 u32 hash_val, key_len = tbl->key_len;
586 struct neighbour *n1, *rc, *n;
587 struct neigh_hash_table *nht;
588 int error;
589
590 n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
591 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
592 if (!n) {
593 rc = ERR_PTR(-ENOBUFS);
594 goto out;
595 }
596
597 memcpy(n->primary_key, pkey, key_len);
598 n->dev = dev;
599 dev_hold(dev);
600
601 /* Protocol specific setup. */
602 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
603 rc = ERR_PTR(error);
604 goto out_neigh_release;
605 }
606
607 if (dev->netdev_ops->ndo_neigh_construct) {
608 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
609 if (error < 0) {
610 rc = ERR_PTR(error);
611 goto out_neigh_release;
612 }
613 }
614
615 /* Device specific setup. */
616 if (n->parms->neigh_setup &&
617 (error = n->parms->neigh_setup(n)) < 0) {
618 rc = ERR_PTR(error);
619 goto out_neigh_release;
620 }
621
622 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
623
624 write_lock_bh(&tbl->lock);
625 nht = rcu_dereference_protected(tbl->nht,
626 lockdep_is_held(&tbl->lock));
627
628 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
629 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
630
631 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
632
633 if (n->parms->dead) {
634 rc = ERR_PTR(-EINVAL);
635 goto out_tbl_unlock;
636 }
637
638 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
639 lockdep_is_held(&tbl->lock));
640 n1 != NULL;
641 n1 = rcu_dereference_protected(n1->next,
642 lockdep_is_held(&tbl->lock))) {
643 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
644 if (want_ref)
645 neigh_hold(n1);
646 rc = n1;
647 goto out_tbl_unlock;
648 }
649 }
650
651 n->dead = 0;
652 if (!exempt_from_gc)
653 list_add_tail(&n->gc_list, &n->tbl->gc_list);
654
655 if (want_ref)
656 neigh_hold(n);
657 rcu_assign_pointer(n->next,
658 rcu_dereference_protected(nht->hash_buckets[hash_val],
659 lockdep_is_held(&tbl->lock)));
660 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
661 write_unlock_bh(&tbl->lock);
662 neigh_dbg(2, "neigh %p is created\n", n);
663 rc = n;
664 out:
665 return rc;
666 out_tbl_unlock:
667 write_unlock_bh(&tbl->lock);
668 out_neigh_release:
669 if (!exempt_from_gc)
670 atomic_dec(&tbl->gc_entries);
671 neigh_release(n);
672 goto out;
673 }
674
__neigh_create(struct neigh_table * tbl,const void * pkey,struct net_device * dev,bool want_ref)675 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
676 struct net_device *dev, bool want_ref)
677 {
678 return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
679 }
680 EXPORT_SYMBOL(__neigh_create);
681
pneigh_hash(const void * pkey,unsigned int key_len)682 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
683 {
684 u32 hash_val = *(u32 *)(pkey + key_len - 4);
685 hash_val ^= (hash_val >> 16);
686 hash_val ^= hash_val >> 8;
687 hash_val ^= hash_val >> 4;
688 hash_val &= PNEIGH_HASHMASK;
689 return hash_val;
690 }
691
__pneigh_lookup_1(struct pneigh_entry * n,struct net * net,const void * pkey,unsigned int key_len,struct net_device * dev)692 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
693 struct net *net,
694 const void *pkey,
695 unsigned int key_len,
696 struct net_device *dev)
697 {
698 while (n) {
699 if (!memcmp(n->key, pkey, key_len) &&
700 net_eq(pneigh_net(n), net) &&
701 (n->dev == dev || !n->dev))
702 return n;
703 n = n->next;
704 }
705 return NULL;
706 }
707
__pneigh_lookup(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev)708 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
709 struct net *net, const void *pkey, struct net_device *dev)
710 {
711 unsigned int key_len = tbl->key_len;
712 u32 hash_val = pneigh_hash(pkey, key_len);
713
714 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
715 net, pkey, key_len, dev);
716 }
717 EXPORT_SYMBOL_GPL(__pneigh_lookup);
718
pneigh_lookup(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev,int creat)719 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
720 struct net *net, const void *pkey,
721 struct net_device *dev, int creat)
722 {
723 struct pneigh_entry *n;
724 unsigned int key_len = tbl->key_len;
725 u32 hash_val = pneigh_hash(pkey, key_len);
726
727 read_lock_bh(&tbl->lock);
728 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
729 net, pkey, key_len, dev);
730 read_unlock_bh(&tbl->lock);
731
732 if (n || !creat)
733 goto out;
734
735 ASSERT_RTNL();
736
737 n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
738 if (!n)
739 goto out;
740
741 write_pnet(&n->net, net);
742 memcpy(n->key, pkey, key_len);
743 n->dev = dev;
744 if (dev)
745 dev_hold(dev);
746
747 if (tbl->pconstructor && tbl->pconstructor(n)) {
748 if (dev)
749 dev_put(dev);
750 kfree(n);
751 n = NULL;
752 goto out;
753 }
754
755 write_lock_bh(&tbl->lock);
756 n->next = tbl->phash_buckets[hash_val];
757 tbl->phash_buckets[hash_val] = n;
758 write_unlock_bh(&tbl->lock);
759 out:
760 return n;
761 }
762 EXPORT_SYMBOL(pneigh_lookup);
763
764
pneigh_delete(struct neigh_table * tbl,struct net * net,const void * pkey,struct net_device * dev)765 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
766 struct net_device *dev)
767 {
768 struct pneigh_entry *n, **np;
769 unsigned int key_len = tbl->key_len;
770 u32 hash_val = pneigh_hash(pkey, key_len);
771
772 write_lock_bh(&tbl->lock);
773 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
774 np = &n->next) {
775 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
776 net_eq(pneigh_net(n), net)) {
777 *np = n->next;
778 write_unlock_bh(&tbl->lock);
779 if (tbl->pdestructor)
780 tbl->pdestructor(n);
781 if (n->dev)
782 dev_put(n->dev);
783 kfree(n);
784 return 0;
785 }
786 }
787 write_unlock_bh(&tbl->lock);
788 return -ENOENT;
789 }
790
pneigh_ifdown_and_unlock(struct neigh_table * tbl,struct net_device * dev)791 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
792 struct net_device *dev)
793 {
794 struct pneigh_entry *n, **np, *freelist = NULL;
795 u32 h;
796
797 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
798 np = &tbl->phash_buckets[h];
799 while ((n = *np) != NULL) {
800 if (!dev || n->dev == dev) {
801 *np = n->next;
802 n->next = freelist;
803 freelist = n;
804 continue;
805 }
806 np = &n->next;
807 }
808 }
809 write_unlock_bh(&tbl->lock);
810 while ((n = freelist)) {
811 freelist = n->next;
812 n->next = NULL;
813 if (tbl->pdestructor)
814 tbl->pdestructor(n);
815 if (n->dev)
816 dev_put(n->dev);
817 kfree(n);
818 }
819 return -ENOENT;
820 }
821
822 static void neigh_parms_destroy(struct neigh_parms *parms);
823
neigh_parms_put(struct neigh_parms * parms)824 static inline void neigh_parms_put(struct neigh_parms *parms)
825 {
826 if (refcount_dec_and_test(&parms->refcnt))
827 neigh_parms_destroy(parms);
828 }
829
830 /*
831 * neighbour must already be out of the table;
832 *
833 */
neigh_destroy(struct neighbour * neigh)834 void neigh_destroy(struct neighbour *neigh)
835 {
836 struct net_device *dev = neigh->dev;
837
838 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
839
840 if (!neigh->dead) {
841 pr_warn("Destroying alive neighbour %p\n", neigh);
842 dump_stack();
843 return;
844 }
845
846 if (neigh_del_timer(neigh))
847 pr_warn("Impossible event\n");
848
849 write_lock_bh(&neigh->lock);
850 __skb_queue_purge(&neigh->arp_queue);
851 write_unlock_bh(&neigh->lock);
852 neigh->arp_queue_len_bytes = 0;
853
854 if (dev->netdev_ops->ndo_neigh_destroy)
855 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
856
857 dev_put(dev);
858 neigh_parms_put(neigh->parms);
859
860 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
861
862 atomic_dec(&neigh->tbl->entries);
863 kfree_rcu(neigh, rcu);
864 }
865 EXPORT_SYMBOL(neigh_destroy);
866
867 /* Neighbour state is suspicious;
868 disable fast path.
869
870 Called with write_locked neigh.
871 */
neigh_suspect(struct neighbour * neigh)872 static void neigh_suspect(struct neighbour *neigh)
873 {
874 neigh_dbg(2, "neigh %p is suspected\n", neigh);
875
876 neigh->output = neigh->ops->output;
877 }
878
879 /* Neighbour state is OK;
880 enable fast path.
881
882 Called with write_locked neigh.
883 */
neigh_connect(struct neighbour * neigh)884 static void neigh_connect(struct neighbour *neigh)
885 {
886 neigh_dbg(2, "neigh %p is connected\n", neigh);
887
888 neigh->output = neigh->ops->connected_output;
889 }
890
neigh_periodic_work(struct work_struct * work)891 static void neigh_periodic_work(struct work_struct *work)
892 {
893 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
894 struct neighbour *n;
895 struct neighbour __rcu **np;
896 unsigned int i;
897 struct neigh_hash_table *nht;
898
899 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
900
901 write_lock_bh(&tbl->lock);
902 nht = rcu_dereference_protected(tbl->nht,
903 lockdep_is_held(&tbl->lock));
904
905 /*
906 * periodically recompute ReachableTime from random function
907 */
908
909 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
910 struct neigh_parms *p;
911 tbl->last_rand = jiffies;
912 list_for_each_entry(p, &tbl->parms_list, list)
913 p->reachable_time =
914 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
915 }
916
917 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
918 goto out;
919
920 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
921 np = &nht->hash_buckets[i];
922
923 while ((n = rcu_dereference_protected(*np,
924 lockdep_is_held(&tbl->lock))) != NULL) {
925 unsigned int state;
926
927 write_lock(&n->lock);
928
929 state = n->nud_state;
930 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
931 (n->flags & NTF_EXT_LEARNED)) {
932 write_unlock(&n->lock);
933 goto next_elt;
934 }
935
936 if (time_before(n->used, n->confirmed))
937 n->used = n->confirmed;
938
939 if (refcount_read(&n->refcnt) == 1 &&
940 (state == NUD_FAILED ||
941 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
942 *np = n->next;
943 neigh_mark_dead(n);
944 write_unlock(&n->lock);
945 neigh_cleanup_and_release(n);
946 continue;
947 }
948 write_unlock(&n->lock);
949
950 next_elt:
951 np = &n->next;
952 }
953 /*
954 * It's fine to release lock here, even if hash table
955 * grows while we are preempted.
956 */
957 write_unlock_bh(&tbl->lock);
958 cond_resched();
959 write_lock_bh(&tbl->lock);
960 nht = rcu_dereference_protected(tbl->nht,
961 lockdep_is_held(&tbl->lock));
962 }
963 out:
964 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
965 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
966 * BASE_REACHABLE_TIME.
967 */
968 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
969 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
970 write_unlock_bh(&tbl->lock);
971 }
972
neigh_max_probes(struct neighbour * n)973 static __inline__ int neigh_max_probes(struct neighbour *n)
974 {
975 struct neigh_parms *p = n->parms;
976 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
977 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
978 NEIGH_VAR(p, MCAST_PROBES));
979 }
980
neigh_invalidate(struct neighbour * neigh)981 static void neigh_invalidate(struct neighbour *neigh)
982 __releases(neigh->lock)
983 __acquires(neigh->lock)
984 {
985 struct sk_buff *skb;
986
987 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
988 neigh_dbg(2, "neigh %p is failed\n", neigh);
989 neigh->updated = jiffies;
990
991 /* It is very thin place. report_unreachable is very complicated
992 routine. Particularly, it can hit the same neighbour entry!
993
994 So that, we try to be accurate and avoid dead loop. --ANK
995 */
996 while (neigh->nud_state == NUD_FAILED &&
997 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
998 write_unlock(&neigh->lock);
999 neigh->ops->error_report(neigh, skb);
1000 write_lock(&neigh->lock);
1001 }
1002 __skb_queue_purge(&neigh->arp_queue);
1003 neigh->arp_queue_len_bytes = 0;
1004 }
1005
neigh_probe(struct neighbour * neigh)1006 static void neigh_probe(struct neighbour *neigh)
1007 __releases(neigh->lock)
1008 {
1009 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1010 /* keep skb alive even if arp_queue overflows */
1011 if (skb)
1012 skb = skb_clone(skb, GFP_ATOMIC);
1013 write_unlock(&neigh->lock);
1014 if (neigh->ops->solicit)
1015 neigh->ops->solicit(neigh, skb);
1016 atomic_inc(&neigh->probes);
1017 consume_skb(skb);
1018 }
1019
1020 /* Called when a timer expires for a neighbour entry. */
1021
neigh_timer_handler(struct timer_list * t)1022 static void neigh_timer_handler(struct timer_list *t)
1023 {
1024 unsigned long now, next;
1025 struct neighbour *neigh = from_timer(neigh, t, timer);
1026 unsigned int state;
1027 int notify = 0;
1028
1029 write_lock(&neigh->lock);
1030
1031 state = neigh->nud_state;
1032 now = jiffies;
1033 next = now + HZ;
1034
1035 if (!(state & NUD_IN_TIMER))
1036 goto out;
1037
1038 if (state & NUD_REACHABLE) {
1039 if (time_before_eq(now,
1040 neigh->confirmed + neigh->parms->reachable_time)) {
1041 neigh_dbg(2, "neigh %p is still alive\n", neigh);
1042 next = neigh->confirmed + neigh->parms->reachable_time;
1043 } else if (time_before_eq(now,
1044 neigh->used +
1045 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1046 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1047 neigh->nud_state = NUD_DELAY;
1048 neigh->updated = jiffies;
1049 neigh_suspect(neigh);
1050 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1051 } else {
1052 neigh_dbg(2, "neigh %p is suspected\n", neigh);
1053 neigh->nud_state = NUD_STALE;
1054 neigh->updated = jiffies;
1055 neigh_suspect(neigh);
1056 notify = 1;
1057 }
1058 } else if (state & NUD_DELAY) {
1059 if (time_before_eq(now,
1060 neigh->confirmed +
1061 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1062 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1063 neigh->nud_state = NUD_REACHABLE;
1064 neigh->updated = jiffies;
1065 neigh_connect(neigh);
1066 notify = 1;
1067 next = neigh->confirmed + neigh->parms->reachable_time;
1068 } else {
1069 neigh_dbg(2, "neigh %p is probed\n", neigh);
1070 neigh->nud_state = NUD_PROBE;
1071 neigh->updated = jiffies;
1072 atomic_set(&neigh->probes, 0);
1073 notify = 1;
1074 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1075 HZ/100);
1076 }
1077 } else {
1078 /* NUD_PROBE|NUD_INCOMPLETE */
1079 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1080 }
1081
1082 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1083 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1084 neigh->nud_state = NUD_FAILED;
1085 notify = 1;
1086 neigh_invalidate(neigh);
1087 goto out;
1088 }
1089
1090 if (neigh->nud_state & NUD_IN_TIMER) {
1091 if (time_before(next, jiffies + HZ/100))
1092 next = jiffies + HZ/100;
1093 if (!mod_timer(&neigh->timer, next))
1094 neigh_hold(neigh);
1095 }
1096 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1097 neigh_probe(neigh);
1098 } else {
1099 out:
1100 write_unlock(&neigh->lock);
1101 }
1102
1103 if (notify)
1104 neigh_update_notify(neigh, 0);
1105
1106 trace_neigh_timer_handler(neigh, 0);
1107
1108 neigh_release(neigh);
1109 }
1110
__neigh_event_send(struct neighbour * neigh,struct sk_buff * skb)1111 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1112 {
1113 int rc;
1114 bool immediate_probe = false;
1115
1116 write_lock_bh(&neigh->lock);
1117
1118 rc = 0;
1119 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1120 goto out_unlock_bh;
1121 if (neigh->dead)
1122 goto out_dead;
1123
1124 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1125 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1126 NEIGH_VAR(neigh->parms, APP_PROBES)) {
1127 unsigned long next, now = jiffies;
1128
1129 atomic_set(&neigh->probes,
1130 NEIGH_VAR(neigh->parms, UCAST_PROBES));
1131 neigh_del_timer(neigh);
1132 neigh->nud_state = NUD_INCOMPLETE;
1133 neigh->updated = now;
1134 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1135 HZ/100);
1136 neigh_add_timer(neigh, next);
1137 immediate_probe = true;
1138 } else {
1139 neigh->nud_state = NUD_FAILED;
1140 neigh->updated = jiffies;
1141 write_unlock_bh(&neigh->lock);
1142
1143 kfree_skb(skb);
1144 return 1;
1145 }
1146 } else if (neigh->nud_state & NUD_STALE) {
1147 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1148 neigh_del_timer(neigh);
1149 neigh->nud_state = NUD_DELAY;
1150 neigh->updated = jiffies;
1151 neigh_add_timer(neigh, jiffies +
1152 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1153 }
1154
1155 if (neigh->nud_state == NUD_INCOMPLETE) {
1156 if (skb) {
1157 while (neigh->arp_queue_len_bytes + skb->truesize >
1158 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1159 struct sk_buff *buff;
1160
1161 buff = __skb_dequeue(&neigh->arp_queue);
1162 if (!buff)
1163 break;
1164 neigh->arp_queue_len_bytes -= buff->truesize;
1165 kfree_skb(buff);
1166 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1167 }
1168 skb_dst_force(skb);
1169 __skb_queue_tail(&neigh->arp_queue, skb);
1170 neigh->arp_queue_len_bytes += skb->truesize;
1171 }
1172 rc = 1;
1173 }
1174 out_unlock_bh:
1175 if (immediate_probe)
1176 neigh_probe(neigh);
1177 else
1178 write_unlock(&neigh->lock);
1179 local_bh_enable();
1180 trace_neigh_event_send_done(neigh, rc);
1181 return rc;
1182
1183 out_dead:
1184 if (neigh->nud_state & NUD_STALE)
1185 goto out_unlock_bh;
1186 write_unlock_bh(&neigh->lock);
1187 kfree_skb(skb);
1188 trace_neigh_event_send_dead(neigh, 1);
1189 return 1;
1190 }
1191 EXPORT_SYMBOL(__neigh_event_send);
1192
neigh_update_hhs(struct neighbour * neigh)1193 static void neigh_update_hhs(struct neighbour *neigh)
1194 {
1195 struct hh_cache *hh;
1196 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1197 = NULL;
1198
1199 if (neigh->dev->header_ops)
1200 update = neigh->dev->header_ops->cache_update;
1201
1202 if (update) {
1203 hh = &neigh->hh;
1204 if (READ_ONCE(hh->hh_len)) {
1205 write_seqlock_bh(&hh->hh_lock);
1206 update(hh, neigh->dev, neigh->ha);
1207 write_sequnlock_bh(&hh->hh_lock);
1208 }
1209 }
1210 }
1211
1212
1213
1214 /* Generic update routine.
1215 -- lladdr is new lladdr or NULL, if it is not supplied.
1216 -- new is new state.
1217 -- flags
1218 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1219 if it is different.
1220 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1221 lladdr instead of overriding it
1222 if it is different.
1223 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1224 NEIGH_UPDATE_F_USE means that the entry is user triggered.
1225 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1226 NTF_ROUTER flag.
1227 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1228 a router.
1229
1230 Caller MUST hold reference count on the entry.
1231 */
1232
__neigh_update(struct neighbour * neigh,const u8 * lladdr,u8 new,u32 flags,u32 nlmsg_pid,struct netlink_ext_ack * extack)1233 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1234 u8 new, u32 flags, u32 nlmsg_pid,
1235 struct netlink_ext_ack *extack)
1236 {
1237 bool ext_learn_change = false;
1238 u8 old;
1239 int err;
1240 int notify = 0;
1241 struct net_device *dev;
1242 int update_isrouter = 0;
1243
1244 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1245
1246 write_lock_bh(&neigh->lock);
1247
1248 dev = neigh->dev;
1249 old = neigh->nud_state;
1250 err = -EPERM;
1251
1252 if (neigh->dead) {
1253 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1254 new = old;
1255 goto out;
1256 }
1257 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1258 (old & (NUD_NOARP | NUD_PERMANENT)))
1259 goto out;
1260
1261 ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify);
1262 if (flags & NEIGH_UPDATE_F_USE) {
1263 new = old & ~NUD_PERMANENT;
1264 neigh->nud_state = new;
1265 err = 0;
1266 goto out;
1267 }
1268
1269 if (!(new & NUD_VALID)) {
1270 neigh_del_timer(neigh);
1271 if (old & NUD_CONNECTED)
1272 neigh_suspect(neigh);
1273 neigh->nud_state = new;
1274 err = 0;
1275 notify = old & NUD_VALID;
1276 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1277 (new & NUD_FAILED)) {
1278 neigh_invalidate(neigh);
1279 notify = 1;
1280 }
1281 goto out;
1282 }
1283
1284 /* Compare new lladdr with cached one */
1285 if (!dev->addr_len) {
1286 /* First case: device needs no address. */
1287 lladdr = neigh->ha;
1288 } else if (lladdr) {
1289 /* The second case: if something is already cached
1290 and a new address is proposed:
1291 - compare new & old
1292 - if they are different, check override flag
1293 */
1294 if ((old & NUD_VALID) &&
1295 !memcmp(lladdr, neigh->ha, dev->addr_len))
1296 lladdr = neigh->ha;
1297 } else {
1298 /* No address is supplied; if we know something,
1299 use it, otherwise discard the request.
1300 */
1301 err = -EINVAL;
1302 if (!(old & NUD_VALID)) {
1303 NL_SET_ERR_MSG(extack, "No link layer address given");
1304 goto out;
1305 }
1306 lladdr = neigh->ha;
1307 }
1308
1309 /* Update confirmed timestamp for neighbour entry after we
1310 * received ARP packet even if it doesn't change IP to MAC binding.
1311 */
1312 if (new & NUD_CONNECTED)
1313 neigh->confirmed = jiffies;
1314
1315 /* If entry was valid and address is not changed,
1316 do not change entry state, if new one is STALE.
1317 */
1318 err = 0;
1319 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1320 if (old & NUD_VALID) {
1321 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1322 update_isrouter = 0;
1323 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1324 (old & NUD_CONNECTED)) {
1325 lladdr = neigh->ha;
1326 new = NUD_STALE;
1327 } else
1328 goto out;
1329 } else {
1330 if (lladdr == neigh->ha && new == NUD_STALE &&
1331 !(flags & NEIGH_UPDATE_F_ADMIN))
1332 new = old;
1333 }
1334 }
1335
1336 /* Update timestamp only once we know we will make a change to the
1337 * neighbour entry. Otherwise we risk to move the locktime window with
1338 * noop updates and ignore relevant ARP updates.
1339 */
1340 if (new != old || lladdr != neigh->ha)
1341 neigh->updated = jiffies;
1342
1343 if (new != old) {
1344 neigh_del_timer(neigh);
1345 if (new & NUD_PROBE)
1346 atomic_set(&neigh->probes, 0);
1347 if (new & NUD_IN_TIMER)
1348 neigh_add_timer(neigh, (jiffies +
1349 ((new & NUD_REACHABLE) ?
1350 neigh->parms->reachable_time :
1351 0)));
1352 neigh->nud_state = new;
1353 notify = 1;
1354 }
1355
1356 if (lladdr != neigh->ha) {
1357 write_seqlock(&neigh->ha_lock);
1358 memcpy(&neigh->ha, lladdr, dev->addr_len);
1359 write_sequnlock(&neigh->ha_lock);
1360 neigh_update_hhs(neigh);
1361 if (!(new & NUD_CONNECTED))
1362 neigh->confirmed = jiffies -
1363 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1364 notify = 1;
1365 }
1366 if (new == old)
1367 goto out;
1368 if (new & NUD_CONNECTED)
1369 neigh_connect(neigh);
1370 else
1371 neigh_suspect(neigh);
1372 if (!(old & NUD_VALID)) {
1373 struct sk_buff *skb;
1374
1375 /* Again: avoid dead loop if something went wrong */
1376
1377 while (neigh->nud_state & NUD_VALID &&
1378 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1379 struct dst_entry *dst = skb_dst(skb);
1380 struct neighbour *n2, *n1 = neigh;
1381 write_unlock_bh(&neigh->lock);
1382
1383 rcu_read_lock();
1384
1385 /* Why not just use 'neigh' as-is? The problem is that
1386 * things such as shaper, eql, and sch_teql can end up
1387 * using alternative, different, neigh objects to output
1388 * the packet in the output path. So what we need to do
1389 * here is re-lookup the top-level neigh in the path so
1390 * we can reinject the packet there.
1391 */
1392 n2 = NULL;
1393 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1394 n2 = dst_neigh_lookup_skb(dst, skb);
1395 if (n2)
1396 n1 = n2;
1397 }
1398 n1->output(n1, skb);
1399 if (n2)
1400 neigh_release(n2);
1401 rcu_read_unlock();
1402
1403 write_lock_bh(&neigh->lock);
1404 }
1405 __skb_queue_purge(&neigh->arp_queue);
1406 neigh->arp_queue_len_bytes = 0;
1407 }
1408 out:
1409 if (update_isrouter)
1410 neigh_update_is_router(neigh, flags, ¬ify);
1411 write_unlock_bh(&neigh->lock);
1412
1413 if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
1414 neigh_update_gc_list(neigh);
1415
1416 if (notify)
1417 neigh_update_notify(neigh, nlmsg_pid);
1418
1419 trace_neigh_update_done(neigh, err);
1420
1421 return err;
1422 }
1423
neigh_update(struct neighbour * neigh,const u8 * lladdr,u8 new,u32 flags,u32 nlmsg_pid)1424 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1425 u32 flags, u32 nlmsg_pid)
1426 {
1427 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1428 }
1429 EXPORT_SYMBOL(neigh_update);
1430
1431 /* Update the neigh to listen temporarily for probe responses, even if it is
1432 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1433 */
__neigh_set_probe_once(struct neighbour * neigh)1434 void __neigh_set_probe_once(struct neighbour *neigh)
1435 {
1436 if (neigh->dead)
1437 return;
1438 neigh->updated = jiffies;
1439 if (!(neigh->nud_state & NUD_FAILED))
1440 return;
1441 neigh->nud_state = NUD_INCOMPLETE;
1442 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1443 neigh_add_timer(neigh,
1444 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1445 HZ/100));
1446 }
1447 EXPORT_SYMBOL(__neigh_set_probe_once);
1448
neigh_event_ns(struct neigh_table * tbl,u8 * lladdr,void * saddr,struct net_device * dev)1449 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1450 u8 *lladdr, void *saddr,
1451 struct net_device *dev)
1452 {
1453 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1454 lladdr || !dev->addr_len);
1455 if (neigh)
1456 neigh_update(neigh, lladdr, NUD_STALE,
1457 NEIGH_UPDATE_F_OVERRIDE, 0);
1458 return neigh;
1459 }
1460 EXPORT_SYMBOL(neigh_event_ns);
1461
1462 /* called with read_lock_bh(&n->lock); */
neigh_hh_init(struct neighbour * n)1463 static void neigh_hh_init(struct neighbour *n)
1464 {
1465 struct net_device *dev = n->dev;
1466 __be16 prot = n->tbl->protocol;
1467 struct hh_cache *hh = &n->hh;
1468
1469 write_lock_bh(&n->lock);
1470
1471 /* Only one thread can come in here and initialize the
1472 * hh_cache entry.
1473 */
1474 if (!hh->hh_len)
1475 dev->header_ops->cache(n, hh, prot);
1476
1477 write_unlock_bh(&n->lock);
1478 }
1479
1480 /* Slow and careful. */
1481
neigh_resolve_output(struct neighbour * neigh,struct sk_buff * skb)1482 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1483 {
1484 int rc = 0;
1485
1486 if (!neigh_event_send(neigh, skb)) {
1487 int err;
1488 struct net_device *dev = neigh->dev;
1489 unsigned int seq;
1490
1491 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1492 neigh_hh_init(neigh);
1493
1494 do {
1495 __skb_pull(skb, skb_network_offset(skb));
1496 seq = read_seqbegin(&neigh->ha_lock);
1497 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1498 neigh->ha, NULL, skb->len);
1499 } while (read_seqretry(&neigh->ha_lock, seq));
1500
1501 if (err >= 0)
1502 rc = dev_queue_xmit(skb);
1503 else
1504 goto out_kfree_skb;
1505 }
1506 out:
1507 return rc;
1508 out_kfree_skb:
1509 rc = -EINVAL;
1510 kfree_skb(skb);
1511 goto out;
1512 }
1513 EXPORT_SYMBOL(neigh_resolve_output);
1514
1515 /* As fast as possible without hh cache */
1516
neigh_connected_output(struct neighbour * neigh,struct sk_buff * skb)1517 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1518 {
1519 struct net_device *dev = neigh->dev;
1520 unsigned int seq;
1521 int err;
1522
1523 do {
1524 __skb_pull(skb, skb_network_offset(skb));
1525 seq = read_seqbegin(&neigh->ha_lock);
1526 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1527 neigh->ha, NULL, skb->len);
1528 } while (read_seqretry(&neigh->ha_lock, seq));
1529
1530 if (err >= 0)
1531 err = dev_queue_xmit(skb);
1532 else {
1533 err = -EINVAL;
1534 kfree_skb(skb);
1535 }
1536 return err;
1537 }
1538 EXPORT_SYMBOL(neigh_connected_output);
1539
neigh_direct_output(struct neighbour * neigh,struct sk_buff * skb)1540 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1541 {
1542 return dev_queue_xmit(skb);
1543 }
1544 EXPORT_SYMBOL(neigh_direct_output);
1545
neigh_proxy_process(struct timer_list * t)1546 static void neigh_proxy_process(struct timer_list *t)
1547 {
1548 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1549 long sched_next = 0;
1550 unsigned long now = jiffies;
1551 struct sk_buff *skb, *n;
1552
1553 spin_lock(&tbl->proxy_queue.lock);
1554
1555 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1556 long tdif = NEIGH_CB(skb)->sched_next - now;
1557
1558 if (tdif <= 0) {
1559 struct net_device *dev = skb->dev;
1560
1561 __skb_unlink(skb, &tbl->proxy_queue);
1562 if (tbl->proxy_redo && netif_running(dev)) {
1563 rcu_read_lock();
1564 tbl->proxy_redo(skb);
1565 rcu_read_unlock();
1566 } else {
1567 kfree_skb(skb);
1568 }
1569
1570 dev_put(dev);
1571 } else if (!sched_next || tdif < sched_next)
1572 sched_next = tdif;
1573 }
1574 del_timer(&tbl->proxy_timer);
1575 if (sched_next)
1576 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1577 spin_unlock(&tbl->proxy_queue.lock);
1578 }
1579
pneigh_enqueue(struct neigh_table * tbl,struct neigh_parms * p,struct sk_buff * skb)1580 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1581 struct sk_buff *skb)
1582 {
1583 unsigned long now = jiffies;
1584
1585 unsigned long sched_next = now + (prandom_u32() %
1586 NEIGH_VAR(p, PROXY_DELAY));
1587
1588 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1589 kfree_skb(skb);
1590 return;
1591 }
1592
1593 NEIGH_CB(skb)->sched_next = sched_next;
1594 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1595
1596 spin_lock(&tbl->proxy_queue.lock);
1597 if (del_timer(&tbl->proxy_timer)) {
1598 if (time_before(tbl->proxy_timer.expires, sched_next))
1599 sched_next = tbl->proxy_timer.expires;
1600 }
1601 skb_dst_drop(skb);
1602 dev_hold(skb->dev);
1603 __skb_queue_tail(&tbl->proxy_queue, skb);
1604 mod_timer(&tbl->proxy_timer, sched_next);
1605 spin_unlock(&tbl->proxy_queue.lock);
1606 }
1607 EXPORT_SYMBOL(pneigh_enqueue);
1608
lookup_neigh_parms(struct neigh_table * tbl,struct net * net,int ifindex)1609 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1610 struct net *net, int ifindex)
1611 {
1612 struct neigh_parms *p;
1613
1614 list_for_each_entry(p, &tbl->parms_list, list) {
1615 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1616 (!p->dev && !ifindex && net_eq(net, &init_net)))
1617 return p;
1618 }
1619
1620 return NULL;
1621 }
1622
neigh_parms_alloc(struct net_device * dev,struct neigh_table * tbl)1623 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1624 struct neigh_table *tbl)
1625 {
1626 struct neigh_parms *p;
1627 struct net *net = dev_net(dev);
1628 const struct net_device_ops *ops = dev->netdev_ops;
1629
1630 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1631 if (p) {
1632 p->tbl = tbl;
1633 refcount_set(&p->refcnt, 1);
1634 p->reachable_time =
1635 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1636 dev_hold(dev);
1637 p->dev = dev;
1638 write_pnet(&p->net, net);
1639 p->sysctl_table = NULL;
1640
1641 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1642 dev_put(dev);
1643 kfree(p);
1644 return NULL;
1645 }
1646
1647 write_lock_bh(&tbl->lock);
1648 list_add(&p->list, &tbl->parms.list);
1649 write_unlock_bh(&tbl->lock);
1650
1651 neigh_parms_data_state_cleanall(p);
1652 }
1653 return p;
1654 }
1655 EXPORT_SYMBOL(neigh_parms_alloc);
1656
neigh_rcu_free_parms(struct rcu_head * head)1657 static void neigh_rcu_free_parms(struct rcu_head *head)
1658 {
1659 struct neigh_parms *parms =
1660 container_of(head, struct neigh_parms, rcu_head);
1661
1662 neigh_parms_put(parms);
1663 }
1664
neigh_parms_release(struct neigh_table * tbl,struct neigh_parms * parms)1665 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1666 {
1667 if (!parms || parms == &tbl->parms)
1668 return;
1669 write_lock_bh(&tbl->lock);
1670 list_del(&parms->list);
1671 parms->dead = 1;
1672 write_unlock_bh(&tbl->lock);
1673 if (parms->dev)
1674 dev_put(parms->dev);
1675 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1676 }
1677 EXPORT_SYMBOL(neigh_parms_release);
1678
neigh_parms_destroy(struct neigh_parms * parms)1679 static void neigh_parms_destroy(struct neigh_parms *parms)
1680 {
1681 kfree(parms);
1682 }
1683
1684 static struct lock_class_key neigh_table_proxy_queue_class;
1685
1686 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1687
neigh_table_init(int index,struct neigh_table * tbl)1688 void neigh_table_init(int index, struct neigh_table *tbl)
1689 {
1690 unsigned long now = jiffies;
1691 unsigned long phsize;
1692
1693 INIT_LIST_HEAD(&tbl->parms_list);
1694 INIT_LIST_HEAD(&tbl->gc_list);
1695 list_add(&tbl->parms.list, &tbl->parms_list);
1696 write_pnet(&tbl->parms.net, &init_net);
1697 refcount_set(&tbl->parms.refcnt, 1);
1698 tbl->parms.reachable_time =
1699 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1700
1701 tbl->stats = alloc_percpu(struct neigh_statistics);
1702 if (!tbl->stats)
1703 panic("cannot create neighbour cache statistics");
1704
1705 #ifdef CONFIG_PROC_FS
1706 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1707 &neigh_stat_seq_ops, tbl))
1708 panic("cannot create neighbour proc dir entry");
1709 #endif
1710
1711 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1712
1713 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1714 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1715
1716 if (!tbl->nht || !tbl->phash_buckets)
1717 panic("cannot allocate neighbour cache hashes");
1718
1719 if (!tbl->entry_size)
1720 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1721 tbl->key_len, NEIGH_PRIV_ALIGN);
1722 else
1723 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1724
1725 rwlock_init(&tbl->lock);
1726 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1727 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1728 tbl->parms.reachable_time);
1729 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1730 skb_queue_head_init_class(&tbl->proxy_queue,
1731 &neigh_table_proxy_queue_class);
1732
1733 tbl->last_flush = now;
1734 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1735
1736 neigh_tables[index] = tbl;
1737 }
1738 EXPORT_SYMBOL(neigh_table_init);
1739
neigh_table_clear(int index,struct neigh_table * tbl)1740 int neigh_table_clear(int index, struct neigh_table *tbl)
1741 {
1742 neigh_tables[index] = NULL;
1743 /* It is not clean... Fix it to unload IPv6 module safely */
1744 cancel_delayed_work_sync(&tbl->gc_work);
1745 del_timer_sync(&tbl->proxy_timer);
1746 pneigh_queue_purge(&tbl->proxy_queue);
1747 neigh_ifdown(tbl, NULL);
1748 if (atomic_read(&tbl->entries))
1749 pr_crit("neighbour leakage\n");
1750
1751 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1752 neigh_hash_free_rcu);
1753 tbl->nht = NULL;
1754
1755 kfree(tbl->phash_buckets);
1756 tbl->phash_buckets = NULL;
1757
1758 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1759
1760 free_percpu(tbl->stats);
1761 tbl->stats = NULL;
1762
1763 return 0;
1764 }
1765 EXPORT_SYMBOL(neigh_table_clear);
1766
neigh_find_table(int family)1767 static struct neigh_table *neigh_find_table(int family)
1768 {
1769 struct neigh_table *tbl = NULL;
1770
1771 switch (family) {
1772 case AF_INET:
1773 tbl = neigh_tables[NEIGH_ARP_TABLE];
1774 break;
1775 case AF_INET6:
1776 tbl = neigh_tables[NEIGH_ND_TABLE];
1777 break;
1778 case AF_DECnet:
1779 tbl = neigh_tables[NEIGH_DN_TABLE];
1780 break;
1781 #ifdef CONFIG_NEWIP
1782 case AF_NINET: /* NIP */
1783 tbl = neigh_tables[NEIGH_NND_TABLE];
1784 break;
1785 #endif
1786 }
1787
1788 return tbl;
1789 }
1790
1791 const struct nla_policy nda_policy[NDA_MAX+1] = {
1792 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID },
1793 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1794 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1795 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
1796 [NDA_PROBES] = { .type = NLA_U32 },
1797 [NDA_VLAN] = { .type = NLA_U16 },
1798 [NDA_PORT] = { .type = NLA_U16 },
1799 [NDA_VNI] = { .type = NLA_U32 },
1800 [NDA_IFINDEX] = { .type = NLA_U32 },
1801 [NDA_MASTER] = { .type = NLA_U32 },
1802 [NDA_PROTOCOL] = { .type = NLA_U8 },
1803 [NDA_NH_ID] = { .type = NLA_U32 },
1804 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED },
1805 };
1806
neigh_delete(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1807 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1808 struct netlink_ext_ack *extack)
1809 {
1810 struct net *net = sock_net(skb->sk);
1811 struct ndmsg *ndm;
1812 struct nlattr *dst_attr;
1813 struct neigh_table *tbl;
1814 struct neighbour *neigh;
1815 struct net_device *dev = NULL;
1816 int err = -EINVAL;
1817
1818 ASSERT_RTNL();
1819 if (nlmsg_len(nlh) < sizeof(*ndm))
1820 goto out;
1821
1822 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1823 if (!dst_attr) {
1824 NL_SET_ERR_MSG(extack, "Network address not specified");
1825 goto out;
1826 }
1827
1828 ndm = nlmsg_data(nlh);
1829 if (ndm->ndm_ifindex) {
1830 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1831 if (dev == NULL) {
1832 err = -ENODEV;
1833 goto out;
1834 }
1835 }
1836
1837 tbl = neigh_find_table(ndm->ndm_family);
1838 if (tbl == NULL)
1839 return -EAFNOSUPPORT;
1840
1841 if (nla_len(dst_attr) < (int)tbl->key_len) {
1842 NL_SET_ERR_MSG(extack, "Invalid network address");
1843 goto out;
1844 }
1845
1846 if (ndm->ndm_flags & NTF_PROXY) {
1847 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1848 goto out;
1849 }
1850
1851 if (dev == NULL)
1852 goto out;
1853
1854 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1855 if (neigh == NULL) {
1856 err = -ENOENT;
1857 goto out;
1858 }
1859
1860 err = __neigh_update(neigh, NULL, NUD_FAILED,
1861 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1862 NETLINK_CB(skb).portid, extack);
1863 write_lock_bh(&tbl->lock);
1864 neigh_release(neigh);
1865 neigh_remove_one(neigh, tbl);
1866 write_unlock_bh(&tbl->lock);
1867
1868 out:
1869 return err;
1870 }
1871
neigh_add(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1872 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1873 struct netlink_ext_ack *extack)
1874 {
1875 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1876 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1877 struct net *net = sock_net(skb->sk);
1878 struct ndmsg *ndm;
1879 struct nlattr *tb[NDA_MAX+1];
1880 struct neigh_table *tbl;
1881 struct net_device *dev = NULL;
1882 struct neighbour *neigh;
1883 void *dst, *lladdr;
1884 u8 protocol = 0;
1885 int err;
1886
1887 ASSERT_RTNL();
1888 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1889 nda_policy, extack);
1890 if (err < 0)
1891 goto out;
1892
1893 err = -EINVAL;
1894 if (!tb[NDA_DST]) {
1895 NL_SET_ERR_MSG(extack, "Network address not specified");
1896 goto out;
1897 }
1898
1899 ndm = nlmsg_data(nlh);
1900 if (ndm->ndm_ifindex) {
1901 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1902 if (dev == NULL) {
1903 err = -ENODEV;
1904 goto out;
1905 }
1906
1907 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1908 NL_SET_ERR_MSG(extack, "Invalid link address");
1909 goto out;
1910 }
1911 }
1912
1913 tbl = neigh_find_table(ndm->ndm_family);
1914 if (tbl == NULL)
1915 return -EAFNOSUPPORT;
1916
1917 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1918 NL_SET_ERR_MSG(extack, "Invalid network address");
1919 goto out;
1920 }
1921
1922 dst = nla_data(tb[NDA_DST]);
1923 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1924
1925 if (tb[NDA_PROTOCOL])
1926 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1927
1928 if (ndm->ndm_flags & NTF_PROXY) {
1929 struct pneigh_entry *pn;
1930
1931 err = -ENOBUFS;
1932 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1933 if (pn) {
1934 pn->flags = ndm->ndm_flags;
1935 if (protocol)
1936 pn->protocol = protocol;
1937 err = 0;
1938 }
1939 goto out;
1940 }
1941
1942 if (!dev) {
1943 NL_SET_ERR_MSG(extack, "Device not specified");
1944 goto out;
1945 }
1946
1947 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
1948 err = -EINVAL;
1949 goto out;
1950 }
1951
1952 neigh = neigh_lookup(tbl, dst, dev);
1953 if (neigh == NULL) {
1954 bool exempt_from_gc;
1955
1956 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1957 err = -ENOENT;
1958 goto out;
1959 }
1960
1961 exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
1962 ndm->ndm_flags & NTF_EXT_LEARNED;
1963 neigh = ___neigh_create(tbl, dst, dev,
1964 ndm->ndm_flags & NTF_EXT_LEARNED,
1965 exempt_from_gc, true);
1966 if (IS_ERR(neigh)) {
1967 err = PTR_ERR(neigh);
1968 goto out;
1969 }
1970 } else {
1971 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1972 err = -EEXIST;
1973 neigh_release(neigh);
1974 goto out;
1975 }
1976
1977 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1978 flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1979 NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
1980 }
1981
1982 if (protocol)
1983 neigh->protocol = protocol;
1984 if (ndm->ndm_flags & NTF_EXT_LEARNED)
1985 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1986 if (ndm->ndm_flags & NTF_ROUTER)
1987 flags |= NEIGH_UPDATE_F_ISROUTER;
1988 if (ndm->ndm_flags & NTF_USE)
1989 flags |= NEIGH_UPDATE_F_USE;
1990
1991 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1992 NETLINK_CB(skb).portid, extack);
1993 if (!err && ndm->ndm_flags & NTF_USE) {
1994 neigh_event_send(neigh, NULL);
1995 err = 0;
1996 }
1997 neigh_release(neigh);
1998 out:
1999 return err;
2000 }
2001
neightbl_fill_parms(struct sk_buff * skb,struct neigh_parms * parms)2002 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2003 {
2004 struct nlattr *nest;
2005
2006 nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2007 if (nest == NULL)
2008 return -ENOBUFS;
2009
2010 if ((parms->dev &&
2011 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2012 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2013 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2014 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2015 /* approximative value for deprecated QUEUE_LEN (in packets) */
2016 nla_put_u32(skb, NDTPA_QUEUE_LEN,
2017 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2018 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2019 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2020 nla_put_u32(skb, NDTPA_UCAST_PROBES,
2021 NEIGH_VAR(parms, UCAST_PROBES)) ||
2022 nla_put_u32(skb, NDTPA_MCAST_PROBES,
2023 NEIGH_VAR(parms, MCAST_PROBES)) ||
2024 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2025 NEIGH_VAR(parms, MCAST_REPROBES)) ||
2026 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2027 NDTPA_PAD) ||
2028 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2029 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2030 nla_put_msecs(skb, NDTPA_GC_STALETIME,
2031 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2032 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2033 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2034 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2035 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2036 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2037 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2038 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2039 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2040 nla_put_msecs(skb, NDTPA_LOCKTIME,
2041 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
2042 goto nla_put_failure;
2043 return nla_nest_end(skb, nest);
2044
2045 nla_put_failure:
2046 nla_nest_cancel(skb, nest);
2047 return -EMSGSIZE;
2048 }
2049
neightbl_fill_info(struct sk_buff * skb,struct neigh_table * tbl,u32 pid,u32 seq,int type,int flags)2050 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2051 u32 pid, u32 seq, int type, int flags)
2052 {
2053 struct nlmsghdr *nlh;
2054 struct ndtmsg *ndtmsg;
2055
2056 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2057 if (nlh == NULL)
2058 return -EMSGSIZE;
2059
2060 ndtmsg = nlmsg_data(nlh);
2061
2062 read_lock_bh(&tbl->lock);
2063 ndtmsg->ndtm_family = tbl->family;
2064 ndtmsg->ndtm_pad1 = 0;
2065 ndtmsg->ndtm_pad2 = 0;
2066
2067 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2068 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2069 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2070 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2071 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2072 goto nla_put_failure;
2073 {
2074 unsigned long now = jiffies;
2075 long flush_delta = now - tbl->last_flush;
2076 long rand_delta = now - tbl->last_rand;
2077 struct neigh_hash_table *nht;
2078 struct ndt_config ndc = {
2079 .ndtc_key_len = tbl->key_len,
2080 .ndtc_entry_size = tbl->entry_size,
2081 .ndtc_entries = atomic_read(&tbl->entries),
2082 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
2083 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
2084 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
2085 };
2086
2087 rcu_read_lock_bh();
2088 nht = rcu_dereference_bh(tbl->nht);
2089 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2090 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2091 rcu_read_unlock_bh();
2092
2093 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2094 goto nla_put_failure;
2095 }
2096
2097 {
2098 int cpu;
2099 struct ndt_stats ndst;
2100
2101 memset(&ndst, 0, sizeof(ndst));
2102
2103 for_each_possible_cpu(cpu) {
2104 struct neigh_statistics *st;
2105
2106 st = per_cpu_ptr(tbl->stats, cpu);
2107 ndst.ndts_allocs += st->allocs;
2108 ndst.ndts_destroys += st->destroys;
2109 ndst.ndts_hash_grows += st->hash_grows;
2110 ndst.ndts_res_failed += st->res_failed;
2111 ndst.ndts_lookups += st->lookups;
2112 ndst.ndts_hits += st->hits;
2113 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
2114 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
2115 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
2116 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
2117 ndst.ndts_table_fulls += st->table_fulls;
2118 }
2119
2120 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2121 NDTA_PAD))
2122 goto nla_put_failure;
2123 }
2124
2125 BUG_ON(tbl->parms.dev);
2126 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2127 goto nla_put_failure;
2128
2129 read_unlock_bh(&tbl->lock);
2130 nlmsg_end(skb, nlh);
2131 return 0;
2132
2133 nla_put_failure:
2134 read_unlock_bh(&tbl->lock);
2135 nlmsg_cancel(skb, nlh);
2136 return -EMSGSIZE;
2137 }
2138
neightbl_fill_param_info(struct sk_buff * skb,struct neigh_table * tbl,struct neigh_parms * parms,u32 pid,u32 seq,int type,unsigned int flags)2139 static int neightbl_fill_param_info(struct sk_buff *skb,
2140 struct neigh_table *tbl,
2141 struct neigh_parms *parms,
2142 u32 pid, u32 seq, int type,
2143 unsigned int flags)
2144 {
2145 struct ndtmsg *ndtmsg;
2146 struct nlmsghdr *nlh;
2147
2148 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2149 if (nlh == NULL)
2150 return -EMSGSIZE;
2151
2152 ndtmsg = nlmsg_data(nlh);
2153
2154 read_lock_bh(&tbl->lock);
2155 ndtmsg->ndtm_family = tbl->family;
2156 ndtmsg->ndtm_pad1 = 0;
2157 ndtmsg->ndtm_pad2 = 0;
2158
2159 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2160 neightbl_fill_parms(skb, parms) < 0)
2161 goto errout;
2162
2163 read_unlock_bh(&tbl->lock);
2164 nlmsg_end(skb, nlh);
2165 return 0;
2166 errout:
2167 read_unlock_bh(&tbl->lock);
2168 nlmsg_cancel(skb, nlh);
2169 return -EMSGSIZE;
2170 }
2171
2172 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2173 [NDTA_NAME] = { .type = NLA_STRING },
2174 [NDTA_THRESH1] = { .type = NLA_U32 },
2175 [NDTA_THRESH2] = { .type = NLA_U32 },
2176 [NDTA_THRESH3] = { .type = NLA_U32 },
2177 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
2178 [NDTA_PARMS] = { .type = NLA_NESTED },
2179 };
2180
2181 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2182 [NDTPA_IFINDEX] = { .type = NLA_U32 },
2183 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
2184 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
2185 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
2186 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
2187 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
2188 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
2189 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
2190 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
2191 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
2192 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
2193 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
2194 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
2195 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
2196 };
2197
neightbl_set(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2198 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2199 struct netlink_ext_ack *extack)
2200 {
2201 struct net *net = sock_net(skb->sk);
2202 struct neigh_table *tbl;
2203 struct ndtmsg *ndtmsg;
2204 struct nlattr *tb[NDTA_MAX+1];
2205 bool found = false;
2206 int err, tidx;
2207
2208 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2209 nl_neightbl_policy, extack);
2210 if (err < 0)
2211 goto errout;
2212
2213 if (tb[NDTA_NAME] == NULL) {
2214 err = -EINVAL;
2215 goto errout;
2216 }
2217
2218 ndtmsg = nlmsg_data(nlh);
2219
2220 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2221 tbl = neigh_tables[tidx];
2222 if (!tbl)
2223 continue;
2224 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2225 continue;
2226 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2227 found = true;
2228 break;
2229 }
2230 }
2231
2232 if (!found)
2233 return -ENOENT;
2234
2235 /*
2236 * We acquire tbl->lock to be nice to the periodic timers and
2237 * make sure they always see a consistent set of values.
2238 */
2239 write_lock_bh(&tbl->lock);
2240
2241 if (tb[NDTA_PARMS]) {
2242 struct nlattr *tbp[NDTPA_MAX+1];
2243 struct neigh_parms *p;
2244 int i, ifindex = 0;
2245
2246 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2247 tb[NDTA_PARMS],
2248 nl_ntbl_parm_policy, extack);
2249 if (err < 0)
2250 goto errout_tbl_lock;
2251
2252 if (tbp[NDTPA_IFINDEX])
2253 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2254
2255 p = lookup_neigh_parms(tbl, net, ifindex);
2256 if (p == NULL) {
2257 err = -ENOENT;
2258 goto errout_tbl_lock;
2259 }
2260
2261 for (i = 1; i <= NDTPA_MAX; i++) {
2262 if (tbp[i] == NULL)
2263 continue;
2264
2265 switch (i) {
2266 case NDTPA_QUEUE_LEN:
2267 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2268 nla_get_u32(tbp[i]) *
2269 SKB_TRUESIZE(ETH_FRAME_LEN));
2270 break;
2271 case NDTPA_QUEUE_LENBYTES:
2272 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2273 nla_get_u32(tbp[i]));
2274 break;
2275 case NDTPA_PROXY_QLEN:
2276 NEIGH_VAR_SET(p, PROXY_QLEN,
2277 nla_get_u32(tbp[i]));
2278 break;
2279 case NDTPA_APP_PROBES:
2280 NEIGH_VAR_SET(p, APP_PROBES,
2281 nla_get_u32(tbp[i]));
2282 break;
2283 case NDTPA_UCAST_PROBES:
2284 NEIGH_VAR_SET(p, UCAST_PROBES,
2285 nla_get_u32(tbp[i]));
2286 break;
2287 case NDTPA_MCAST_PROBES:
2288 NEIGH_VAR_SET(p, MCAST_PROBES,
2289 nla_get_u32(tbp[i]));
2290 break;
2291 case NDTPA_MCAST_REPROBES:
2292 NEIGH_VAR_SET(p, MCAST_REPROBES,
2293 nla_get_u32(tbp[i]));
2294 break;
2295 case NDTPA_BASE_REACHABLE_TIME:
2296 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2297 nla_get_msecs(tbp[i]));
2298 /* update reachable_time as well, otherwise, the change will
2299 * only be effective after the next time neigh_periodic_work
2300 * decides to recompute it (can be multiple minutes)
2301 */
2302 p->reachable_time =
2303 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2304 break;
2305 case NDTPA_GC_STALETIME:
2306 NEIGH_VAR_SET(p, GC_STALETIME,
2307 nla_get_msecs(tbp[i]));
2308 break;
2309 case NDTPA_DELAY_PROBE_TIME:
2310 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2311 nla_get_msecs(tbp[i]));
2312 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2313 break;
2314 case NDTPA_RETRANS_TIME:
2315 NEIGH_VAR_SET(p, RETRANS_TIME,
2316 nla_get_msecs(tbp[i]));
2317 break;
2318 case NDTPA_ANYCAST_DELAY:
2319 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2320 nla_get_msecs(tbp[i]));
2321 break;
2322 case NDTPA_PROXY_DELAY:
2323 NEIGH_VAR_SET(p, PROXY_DELAY,
2324 nla_get_msecs(tbp[i]));
2325 break;
2326 case NDTPA_LOCKTIME:
2327 NEIGH_VAR_SET(p, LOCKTIME,
2328 nla_get_msecs(tbp[i]));
2329 break;
2330 }
2331 }
2332 }
2333
2334 err = -ENOENT;
2335 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2336 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2337 !net_eq(net, &init_net))
2338 goto errout_tbl_lock;
2339
2340 if (tb[NDTA_THRESH1])
2341 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2342
2343 if (tb[NDTA_THRESH2])
2344 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2345
2346 if (tb[NDTA_THRESH3])
2347 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2348
2349 if (tb[NDTA_GC_INTERVAL])
2350 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2351
2352 err = 0;
2353
2354 errout_tbl_lock:
2355 write_unlock_bh(&tbl->lock);
2356 errout:
2357 return err;
2358 }
2359
neightbl_valid_dump_info(const struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2360 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2361 struct netlink_ext_ack *extack)
2362 {
2363 struct ndtmsg *ndtm;
2364
2365 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2366 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2367 return -EINVAL;
2368 }
2369
2370 ndtm = nlmsg_data(nlh);
2371 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
2372 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2373 return -EINVAL;
2374 }
2375
2376 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2377 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2378 return -EINVAL;
2379 }
2380
2381 return 0;
2382 }
2383
neightbl_dump_info(struct sk_buff * skb,struct netlink_callback * cb)2384 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2385 {
2386 const struct nlmsghdr *nlh = cb->nlh;
2387 struct net *net = sock_net(skb->sk);
2388 int family, tidx, nidx = 0;
2389 int tbl_skip = cb->args[0];
2390 int neigh_skip = cb->args[1];
2391 struct neigh_table *tbl;
2392
2393 if (cb->strict_check) {
2394 int err = neightbl_valid_dump_info(nlh, cb->extack);
2395
2396 if (err < 0)
2397 return err;
2398 }
2399
2400 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2401
2402 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2403 struct neigh_parms *p;
2404
2405 tbl = neigh_tables[tidx];
2406 if (!tbl)
2407 continue;
2408
2409 if (tidx < tbl_skip || (family && tbl->family != family))
2410 continue;
2411
2412 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2413 nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2414 NLM_F_MULTI) < 0)
2415 break;
2416
2417 nidx = 0;
2418 p = list_next_entry(&tbl->parms, list);
2419 list_for_each_entry_from(p, &tbl->parms_list, list) {
2420 if (!net_eq(neigh_parms_net(p), net))
2421 continue;
2422
2423 if (nidx < neigh_skip)
2424 goto next;
2425
2426 if (neightbl_fill_param_info(skb, tbl, p,
2427 NETLINK_CB(cb->skb).portid,
2428 nlh->nlmsg_seq,
2429 RTM_NEWNEIGHTBL,
2430 NLM_F_MULTI) < 0)
2431 goto out;
2432 next:
2433 nidx++;
2434 }
2435
2436 neigh_skip = 0;
2437 }
2438 out:
2439 cb->args[0] = tidx;
2440 cb->args[1] = nidx;
2441
2442 return skb->len;
2443 }
2444
neigh_fill_info(struct sk_buff * skb,struct neighbour * neigh,u32 pid,u32 seq,int type,unsigned int flags)2445 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2446 u32 pid, u32 seq, int type, unsigned int flags)
2447 {
2448 unsigned long now = jiffies;
2449 struct nda_cacheinfo ci;
2450 struct nlmsghdr *nlh;
2451 struct ndmsg *ndm;
2452
2453 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2454 if (nlh == NULL)
2455 return -EMSGSIZE;
2456
2457 ndm = nlmsg_data(nlh);
2458 ndm->ndm_family = neigh->ops->family;
2459 ndm->ndm_pad1 = 0;
2460 ndm->ndm_pad2 = 0;
2461 ndm->ndm_flags = neigh->flags;
2462 ndm->ndm_type = neigh->type;
2463 ndm->ndm_ifindex = neigh->dev->ifindex;
2464
2465 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2466 goto nla_put_failure;
2467
2468 read_lock_bh(&neigh->lock);
2469 ndm->ndm_state = neigh->nud_state;
2470 if (neigh->nud_state & NUD_VALID) {
2471 char haddr[MAX_ADDR_LEN];
2472
2473 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2474 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2475 read_unlock_bh(&neigh->lock);
2476 goto nla_put_failure;
2477 }
2478 }
2479
2480 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2481 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2482 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2483 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
2484 read_unlock_bh(&neigh->lock);
2485
2486 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2487 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2488 goto nla_put_failure;
2489
2490 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2491 goto nla_put_failure;
2492
2493 nlmsg_end(skb, nlh);
2494 return 0;
2495
2496 nla_put_failure:
2497 nlmsg_cancel(skb, nlh);
2498 return -EMSGSIZE;
2499 }
2500
pneigh_fill_info(struct sk_buff * skb,struct pneigh_entry * pn,u32 pid,u32 seq,int type,unsigned int flags,struct neigh_table * tbl)2501 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2502 u32 pid, u32 seq, int type, unsigned int flags,
2503 struct neigh_table *tbl)
2504 {
2505 struct nlmsghdr *nlh;
2506 struct ndmsg *ndm;
2507
2508 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2509 if (nlh == NULL)
2510 return -EMSGSIZE;
2511
2512 ndm = nlmsg_data(nlh);
2513 ndm->ndm_family = tbl->family;
2514 ndm->ndm_pad1 = 0;
2515 ndm->ndm_pad2 = 0;
2516 ndm->ndm_flags = pn->flags | NTF_PROXY;
2517 ndm->ndm_type = RTN_UNICAST;
2518 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2519 ndm->ndm_state = NUD_NONE;
2520
2521 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2522 goto nla_put_failure;
2523
2524 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2525 goto nla_put_failure;
2526
2527 nlmsg_end(skb, nlh);
2528 return 0;
2529
2530 nla_put_failure:
2531 nlmsg_cancel(skb, nlh);
2532 return -EMSGSIZE;
2533 }
2534
neigh_update_notify(struct neighbour * neigh,u32 nlmsg_pid)2535 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2536 {
2537 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2538 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2539 }
2540
neigh_master_filtered(struct net_device * dev,int master_idx)2541 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2542 {
2543 struct net_device *master;
2544
2545 if (!master_idx)
2546 return false;
2547
2548 master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2549 if (!master || master->ifindex != master_idx)
2550 return true;
2551
2552 return false;
2553 }
2554
neigh_ifindex_filtered(struct net_device * dev,int filter_idx)2555 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2556 {
2557 if (filter_idx && (!dev || dev->ifindex != filter_idx))
2558 return true;
2559
2560 return false;
2561 }
2562
2563 struct neigh_dump_filter {
2564 int master_idx;
2565 int dev_idx;
2566 };
2567
neigh_dump_table(struct neigh_table * tbl,struct sk_buff * skb,struct netlink_callback * cb,struct neigh_dump_filter * filter)2568 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2569 struct netlink_callback *cb,
2570 struct neigh_dump_filter *filter)
2571 {
2572 struct net *net = sock_net(skb->sk);
2573 struct neighbour *n;
2574 int rc, h, s_h = cb->args[1];
2575 int idx, s_idx = idx = cb->args[2];
2576 struct neigh_hash_table *nht;
2577 unsigned int flags = NLM_F_MULTI;
2578
2579 if (filter->dev_idx || filter->master_idx)
2580 flags |= NLM_F_DUMP_FILTERED;
2581
2582 rcu_read_lock_bh();
2583 nht = rcu_dereference_bh(tbl->nht);
2584
2585 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2586 if (h > s_h)
2587 s_idx = 0;
2588 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2589 n != NULL;
2590 n = rcu_dereference_bh(n->next)) {
2591 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2592 goto next;
2593 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2594 neigh_master_filtered(n->dev, filter->master_idx))
2595 goto next;
2596 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2597 cb->nlh->nlmsg_seq,
2598 RTM_NEWNEIGH,
2599 flags) < 0) {
2600 rc = -1;
2601 goto out;
2602 }
2603 next:
2604 idx++;
2605 }
2606 }
2607 rc = skb->len;
2608 out:
2609 rcu_read_unlock_bh();
2610 cb->args[1] = h;
2611 cb->args[2] = idx;
2612 return rc;
2613 }
2614
pneigh_dump_table(struct neigh_table * tbl,struct sk_buff * skb,struct netlink_callback * cb,struct neigh_dump_filter * filter)2615 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2616 struct netlink_callback *cb,
2617 struct neigh_dump_filter *filter)
2618 {
2619 struct pneigh_entry *n;
2620 struct net *net = sock_net(skb->sk);
2621 int rc, h, s_h = cb->args[3];
2622 int idx, s_idx = idx = cb->args[4];
2623 unsigned int flags = NLM_F_MULTI;
2624
2625 if (filter->dev_idx || filter->master_idx)
2626 flags |= NLM_F_DUMP_FILTERED;
2627
2628 read_lock_bh(&tbl->lock);
2629
2630 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2631 if (h > s_h)
2632 s_idx = 0;
2633 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2634 if (idx < s_idx || pneigh_net(n) != net)
2635 goto next;
2636 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2637 neigh_master_filtered(n->dev, filter->master_idx))
2638 goto next;
2639 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2640 cb->nlh->nlmsg_seq,
2641 RTM_NEWNEIGH, flags, tbl) < 0) {
2642 read_unlock_bh(&tbl->lock);
2643 rc = -1;
2644 goto out;
2645 }
2646 next:
2647 idx++;
2648 }
2649 }
2650
2651 read_unlock_bh(&tbl->lock);
2652 rc = skb->len;
2653 out:
2654 cb->args[3] = h;
2655 cb->args[4] = idx;
2656 return rc;
2657
2658 }
2659
neigh_valid_dump_req(const struct nlmsghdr * nlh,bool strict_check,struct neigh_dump_filter * filter,struct netlink_ext_ack * extack)2660 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2661 bool strict_check,
2662 struct neigh_dump_filter *filter,
2663 struct netlink_ext_ack *extack)
2664 {
2665 struct nlattr *tb[NDA_MAX + 1];
2666 int err, i;
2667
2668 if (strict_check) {
2669 struct ndmsg *ndm;
2670
2671 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2672 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2673 return -EINVAL;
2674 }
2675
2676 ndm = nlmsg_data(nlh);
2677 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
2678 ndm->ndm_state || ndm->ndm_type) {
2679 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2680 return -EINVAL;
2681 }
2682
2683 if (ndm->ndm_flags & ~NTF_PROXY) {
2684 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2685 return -EINVAL;
2686 }
2687
2688 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2689 tb, NDA_MAX, nda_policy,
2690 extack);
2691 } else {
2692 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2693 NDA_MAX, nda_policy, extack);
2694 }
2695 if (err < 0)
2696 return err;
2697
2698 for (i = 0; i <= NDA_MAX; ++i) {
2699 if (!tb[i])
2700 continue;
2701
2702 /* all new attributes should require strict_check */
2703 switch (i) {
2704 case NDA_IFINDEX:
2705 filter->dev_idx = nla_get_u32(tb[i]);
2706 break;
2707 case NDA_MASTER:
2708 filter->master_idx = nla_get_u32(tb[i]);
2709 break;
2710 default:
2711 if (strict_check) {
2712 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2713 return -EINVAL;
2714 }
2715 }
2716 }
2717
2718 return 0;
2719 }
2720
neigh_dump_info(struct sk_buff * skb,struct netlink_callback * cb)2721 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2722 {
2723 const struct nlmsghdr *nlh = cb->nlh;
2724 struct neigh_dump_filter filter = {};
2725 struct neigh_table *tbl;
2726 int t, family, s_t;
2727 int proxy = 0;
2728 int err;
2729
2730 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2731
2732 /* check for full ndmsg structure presence, family member is
2733 * the same for both structures
2734 */
2735 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2736 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2737 proxy = 1;
2738
2739 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2740 if (err < 0 && cb->strict_check)
2741 return err;
2742
2743 s_t = cb->args[0];
2744
2745 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2746 tbl = neigh_tables[t];
2747
2748 if (!tbl)
2749 continue;
2750 if (t < s_t || (family && tbl->family != family))
2751 continue;
2752 if (t > s_t)
2753 memset(&cb->args[1], 0, sizeof(cb->args) -
2754 sizeof(cb->args[0]));
2755 if (proxy)
2756 err = pneigh_dump_table(tbl, skb, cb, &filter);
2757 else
2758 err = neigh_dump_table(tbl, skb, cb, &filter);
2759 if (err < 0)
2760 break;
2761 }
2762
2763 cb->args[0] = t;
2764 return skb->len;
2765 }
2766
neigh_valid_get_req(const struct nlmsghdr * nlh,struct neigh_table ** tbl,void ** dst,int * dev_idx,u8 * ndm_flags,struct netlink_ext_ack * extack)2767 static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2768 struct neigh_table **tbl,
2769 void **dst, int *dev_idx, u8 *ndm_flags,
2770 struct netlink_ext_ack *extack)
2771 {
2772 struct nlattr *tb[NDA_MAX + 1];
2773 struct ndmsg *ndm;
2774 int err, i;
2775
2776 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2777 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2778 return -EINVAL;
2779 }
2780
2781 ndm = nlmsg_data(nlh);
2782 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
2783 ndm->ndm_type) {
2784 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2785 return -EINVAL;
2786 }
2787
2788 if (ndm->ndm_flags & ~NTF_PROXY) {
2789 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2790 return -EINVAL;
2791 }
2792
2793 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2794 NDA_MAX, nda_policy, extack);
2795 if (err < 0)
2796 return err;
2797
2798 *ndm_flags = ndm->ndm_flags;
2799 *dev_idx = ndm->ndm_ifindex;
2800 *tbl = neigh_find_table(ndm->ndm_family);
2801 if (*tbl == NULL) {
2802 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2803 return -EAFNOSUPPORT;
2804 }
2805
2806 for (i = 0; i <= NDA_MAX; ++i) {
2807 if (!tb[i])
2808 continue;
2809
2810 switch (i) {
2811 case NDA_DST:
2812 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2813 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2814 return -EINVAL;
2815 }
2816 *dst = nla_data(tb[i]);
2817 break;
2818 default:
2819 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2820 return -EINVAL;
2821 }
2822 }
2823
2824 return 0;
2825 }
2826
neigh_nlmsg_size(void)2827 static inline size_t neigh_nlmsg_size(void)
2828 {
2829 return NLMSG_ALIGN(sizeof(struct ndmsg))
2830 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2831 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2832 + nla_total_size(sizeof(struct nda_cacheinfo))
2833 + nla_total_size(4) /* NDA_PROBES */
2834 + nla_total_size(1); /* NDA_PROTOCOL */
2835 }
2836
neigh_get_reply(struct net * net,struct neighbour * neigh,u32 pid,u32 seq)2837 static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2838 u32 pid, u32 seq)
2839 {
2840 struct sk_buff *skb;
2841 int err = 0;
2842
2843 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2844 if (!skb)
2845 return -ENOBUFS;
2846
2847 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2848 if (err) {
2849 kfree_skb(skb);
2850 goto errout;
2851 }
2852
2853 err = rtnl_unicast(skb, net, pid);
2854 errout:
2855 return err;
2856 }
2857
pneigh_nlmsg_size(void)2858 static inline size_t pneigh_nlmsg_size(void)
2859 {
2860 return NLMSG_ALIGN(sizeof(struct ndmsg))
2861 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2862 + nla_total_size(1); /* NDA_PROTOCOL */
2863 }
2864
pneigh_get_reply(struct net * net,struct pneigh_entry * neigh,u32 pid,u32 seq,struct neigh_table * tbl)2865 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2866 u32 pid, u32 seq, struct neigh_table *tbl)
2867 {
2868 struct sk_buff *skb;
2869 int err = 0;
2870
2871 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2872 if (!skb)
2873 return -ENOBUFS;
2874
2875 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2876 if (err) {
2877 kfree_skb(skb);
2878 goto errout;
2879 }
2880
2881 err = rtnl_unicast(skb, net, pid);
2882 errout:
2883 return err;
2884 }
2885
neigh_get(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2886 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2887 struct netlink_ext_ack *extack)
2888 {
2889 struct net *net = sock_net(in_skb->sk);
2890 struct net_device *dev = NULL;
2891 struct neigh_table *tbl = NULL;
2892 struct neighbour *neigh;
2893 void *dst = NULL;
2894 u8 ndm_flags = 0;
2895 int dev_idx = 0;
2896 int err;
2897
2898 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2899 extack);
2900 if (err < 0)
2901 return err;
2902
2903 if (dev_idx) {
2904 dev = __dev_get_by_index(net, dev_idx);
2905 if (!dev) {
2906 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2907 return -ENODEV;
2908 }
2909 }
2910
2911 if (!dst) {
2912 NL_SET_ERR_MSG(extack, "Network address not specified");
2913 return -EINVAL;
2914 }
2915
2916 if (ndm_flags & NTF_PROXY) {
2917 struct pneigh_entry *pn;
2918
2919 pn = pneigh_lookup(tbl, net, dst, dev, 0);
2920 if (!pn) {
2921 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
2922 return -ENOENT;
2923 }
2924 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
2925 nlh->nlmsg_seq, tbl);
2926 }
2927
2928 if (!dev) {
2929 NL_SET_ERR_MSG(extack, "No device specified");
2930 return -EINVAL;
2931 }
2932
2933 neigh = neigh_lookup(tbl, dst, dev);
2934 if (!neigh) {
2935 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
2936 return -ENOENT;
2937 }
2938
2939 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
2940 nlh->nlmsg_seq);
2941
2942 neigh_release(neigh);
2943
2944 return err;
2945 }
2946
neigh_for_each(struct neigh_table * tbl,void (* cb)(struct neighbour *,void *),void * cookie)2947 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2948 {
2949 int chain;
2950 struct neigh_hash_table *nht;
2951
2952 rcu_read_lock_bh();
2953 nht = rcu_dereference_bh(tbl->nht);
2954
2955 read_lock(&tbl->lock); /* avoid resizes */
2956 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2957 struct neighbour *n;
2958
2959 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2960 n != NULL;
2961 n = rcu_dereference_bh(n->next))
2962 cb(n, cookie);
2963 }
2964 read_unlock(&tbl->lock);
2965 rcu_read_unlock_bh();
2966 }
2967 EXPORT_SYMBOL(neigh_for_each);
2968
2969 /* The tbl->lock must be held as a writer and BH disabled. */
__neigh_for_each_release(struct neigh_table * tbl,int (* cb)(struct neighbour *))2970 void __neigh_for_each_release(struct neigh_table *tbl,
2971 int (*cb)(struct neighbour *))
2972 {
2973 int chain;
2974 struct neigh_hash_table *nht;
2975
2976 nht = rcu_dereference_protected(tbl->nht,
2977 lockdep_is_held(&tbl->lock));
2978 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2979 struct neighbour *n;
2980 struct neighbour __rcu **np;
2981
2982 np = &nht->hash_buckets[chain];
2983 while ((n = rcu_dereference_protected(*np,
2984 lockdep_is_held(&tbl->lock))) != NULL) {
2985 int release;
2986
2987 write_lock(&n->lock);
2988 release = cb(n);
2989 if (release) {
2990 rcu_assign_pointer(*np,
2991 rcu_dereference_protected(n->next,
2992 lockdep_is_held(&tbl->lock)));
2993 neigh_mark_dead(n);
2994 } else
2995 np = &n->next;
2996 write_unlock(&n->lock);
2997 if (release)
2998 neigh_cleanup_and_release(n);
2999 }
3000 }
3001 }
3002 EXPORT_SYMBOL(__neigh_for_each_release);
3003
neigh_xmit(int index,struct net_device * dev,const void * addr,struct sk_buff * skb)3004 int neigh_xmit(int index, struct net_device *dev,
3005 const void *addr, struct sk_buff *skb)
3006 {
3007 int err = -EAFNOSUPPORT;
3008 if (likely(index < NEIGH_NR_TABLES)) {
3009 struct neigh_table *tbl;
3010 struct neighbour *neigh;
3011
3012 tbl = neigh_tables[index];
3013 if (!tbl)
3014 goto out;
3015 rcu_read_lock_bh();
3016 if (index == NEIGH_ARP_TABLE) {
3017 u32 key = *((u32 *)addr);
3018
3019 neigh = __ipv4_neigh_lookup_noref(dev, key);
3020 } else {
3021 neigh = __neigh_lookup_noref(tbl, addr, dev);
3022 }
3023 if (!neigh)
3024 neigh = __neigh_create(tbl, addr, dev, false);
3025 err = PTR_ERR(neigh);
3026 if (IS_ERR(neigh)) {
3027 rcu_read_unlock_bh();
3028 goto out_kfree_skb;
3029 }
3030 err = neigh->output(neigh, skb);
3031 rcu_read_unlock_bh();
3032 }
3033 else if (index == NEIGH_LINK_TABLE) {
3034 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3035 addr, NULL, skb->len);
3036 if (err < 0)
3037 goto out_kfree_skb;
3038 err = dev_queue_xmit(skb);
3039 }
3040 out:
3041 return err;
3042 out_kfree_skb:
3043 kfree_skb(skb);
3044 goto out;
3045 }
3046 EXPORT_SYMBOL(neigh_xmit);
3047
3048 #ifdef CONFIG_PROC_FS
3049
neigh_get_first(struct seq_file * seq)3050 static struct neighbour *neigh_get_first(struct seq_file *seq)
3051 {
3052 struct neigh_seq_state *state = seq->private;
3053 struct net *net = seq_file_net(seq);
3054 struct neigh_hash_table *nht = state->nht;
3055 struct neighbour *n = NULL;
3056 int bucket;
3057
3058 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3059 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3060 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3061
3062 while (n) {
3063 if (!net_eq(dev_net(n->dev), net))
3064 goto next;
3065 if (state->neigh_sub_iter) {
3066 loff_t fakep = 0;
3067 void *v;
3068
3069 v = state->neigh_sub_iter(state, n, &fakep);
3070 if (!v)
3071 goto next;
3072 }
3073 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3074 break;
3075 if (n->nud_state & ~NUD_NOARP)
3076 break;
3077 next:
3078 n = rcu_dereference_bh(n->next);
3079 }
3080
3081 if (n)
3082 break;
3083 }
3084 state->bucket = bucket;
3085
3086 return n;
3087 }
3088
neigh_get_next(struct seq_file * seq,struct neighbour * n,loff_t * pos)3089 static struct neighbour *neigh_get_next(struct seq_file *seq,
3090 struct neighbour *n,
3091 loff_t *pos)
3092 {
3093 struct neigh_seq_state *state = seq->private;
3094 struct net *net = seq_file_net(seq);
3095 struct neigh_hash_table *nht = state->nht;
3096
3097 if (state->neigh_sub_iter) {
3098 void *v = state->neigh_sub_iter(state, n, pos);
3099 if (v)
3100 return n;
3101 }
3102 n = rcu_dereference_bh(n->next);
3103
3104 while (1) {
3105 while (n) {
3106 if (!net_eq(dev_net(n->dev), net))
3107 goto next;
3108 if (state->neigh_sub_iter) {
3109 void *v = state->neigh_sub_iter(state, n, pos);
3110 if (v)
3111 return n;
3112 goto next;
3113 }
3114 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3115 break;
3116
3117 if (n->nud_state & ~NUD_NOARP)
3118 break;
3119 next:
3120 n = rcu_dereference_bh(n->next);
3121 }
3122
3123 if (n)
3124 break;
3125
3126 if (++state->bucket >= (1 << nht->hash_shift))
3127 break;
3128
3129 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3130 }
3131
3132 if (n && pos)
3133 --(*pos);
3134 return n;
3135 }
3136
neigh_get_idx(struct seq_file * seq,loff_t * pos)3137 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3138 {
3139 struct neighbour *n = neigh_get_first(seq);
3140
3141 if (n) {
3142 --(*pos);
3143 while (*pos) {
3144 n = neigh_get_next(seq, n, pos);
3145 if (!n)
3146 break;
3147 }
3148 }
3149 return *pos ? NULL : n;
3150 }
3151
pneigh_get_first(struct seq_file * seq)3152 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3153 {
3154 struct neigh_seq_state *state = seq->private;
3155 struct net *net = seq_file_net(seq);
3156 struct neigh_table *tbl = state->tbl;
3157 struct pneigh_entry *pn = NULL;
3158 int bucket = state->bucket;
3159
3160 state->flags |= NEIGH_SEQ_IS_PNEIGH;
3161 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3162 pn = tbl->phash_buckets[bucket];
3163 while (pn && !net_eq(pneigh_net(pn), net))
3164 pn = pn->next;
3165 if (pn)
3166 break;
3167 }
3168 state->bucket = bucket;
3169
3170 return pn;
3171 }
3172
pneigh_get_next(struct seq_file * seq,struct pneigh_entry * pn,loff_t * pos)3173 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3174 struct pneigh_entry *pn,
3175 loff_t *pos)
3176 {
3177 struct neigh_seq_state *state = seq->private;
3178 struct net *net = seq_file_net(seq);
3179 struct neigh_table *tbl = state->tbl;
3180
3181 do {
3182 pn = pn->next;
3183 } while (pn && !net_eq(pneigh_net(pn), net));
3184
3185 while (!pn) {
3186 if (++state->bucket > PNEIGH_HASHMASK)
3187 break;
3188 pn = tbl->phash_buckets[state->bucket];
3189 while (pn && !net_eq(pneigh_net(pn), net))
3190 pn = pn->next;
3191 if (pn)
3192 break;
3193 }
3194
3195 if (pn && pos)
3196 --(*pos);
3197
3198 return pn;
3199 }
3200
pneigh_get_idx(struct seq_file * seq,loff_t * pos)3201 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3202 {
3203 struct pneigh_entry *pn = pneigh_get_first(seq);
3204
3205 if (pn) {
3206 --(*pos);
3207 while (*pos) {
3208 pn = pneigh_get_next(seq, pn, pos);
3209 if (!pn)
3210 break;
3211 }
3212 }
3213 return *pos ? NULL : pn;
3214 }
3215
neigh_get_idx_any(struct seq_file * seq,loff_t * pos)3216 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3217 {
3218 struct neigh_seq_state *state = seq->private;
3219 void *rc;
3220 loff_t idxpos = *pos;
3221
3222 rc = neigh_get_idx(seq, &idxpos);
3223 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3224 rc = pneigh_get_idx(seq, &idxpos);
3225
3226 return rc;
3227 }
3228
neigh_seq_start(struct seq_file * seq,loff_t * pos,struct neigh_table * tbl,unsigned int neigh_seq_flags)3229 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3230 __acquires(tbl->lock)
3231 __acquires(rcu_bh)
3232 {
3233 struct neigh_seq_state *state = seq->private;
3234
3235 state->tbl = tbl;
3236 state->bucket = 0;
3237 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3238
3239 rcu_read_lock_bh();
3240 state->nht = rcu_dereference_bh(tbl->nht);
3241 read_lock(&tbl->lock);
3242
3243 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3244 }
3245 EXPORT_SYMBOL(neigh_seq_start);
3246
neigh_seq_next(struct seq_file * seq,void * v,loff_t * pos)3247 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3248 {
3249 struct neigh_seq_state *state;
3250 void *rc;
3251
3252 if (v == SEQ_START_TOKEN) {
3253 rc = neigh_get_first(seq);
3254 goto out;
3255 }
3256
3257 state = seq->private;
3258 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3259 rc = neigh_get_next(seq, v, NULL);
3260 if (rc)
3261 goto out;
3262 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3263 rc = pneigh_get_first(seq);
3264 } else {
3265 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3266 rc = pneigh_get_next(seq, v, NULL);
3267 }
3268 out:
3269 ++(*pos);
3270 return rc;
3271 }
3272 EXPORT_SYMBOL(neigh_seq_next);
3273
neigh_seq_stop(struct seq_file * seq,void * v)3274 void neigh_seq_stop(struct seq_file *seq, void *v)
3275 __releases(tbl->lock)
3276 __releases(rcu_bh)
3277 {
3278 struct neigh_seq_state *state = seq->private;
3279 struct neigh_table *tbl = state->tbl;
3280
3281 read_unlock(&tbl->lock);
3282 rcu_read_unlock_bh();
3283 }
3284 EXPORT_SYMBOL(neigh_seq_stop);
3285
3286 /* statistics via seq_file */
3287
neigh_stat_seq_start(struct seq_file * seq,loff_t * pos)3288 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3289 {
3290 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3291 int cpu;
3292
3293 if (*pos == 0)
3294 return SEQ_START_TOKEN;
3295
3296 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3297 if (!cpu_possible(cpu))
3298 continue;
3299 *pos = cpu+1;
3300 return per_cpu_ptr(tbl->stats, cpu);
3301 }
3302 return NULL;
3303 }
3304
neigh_stat_seq_next(struct seq_file * seq,void * v,loff_t * pos)3305 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3306 {
3307 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3308 int cpu;
3309
3310 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3311 if (!cpu_possible(cpu))
3312 continue;
3313 *pos = cpu+1;
3314 return per_cpu_ptr(tbl->stats, cpu);
3315 }
3316 (*pos)++;
3317 return NULL;
3318 }
3319
neigh_stat_seq_stop(struct seq_file * seq,void * v)3320 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3321 {
3322
3323 }
3324
neigh_stat_seq_show(struct seq_file * seq,void * v)3325 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3326 {
3327 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3328 struct neigh_statistics *st = v;
3329
3330 if (v == SEQ_START_TOKEN) {
3331 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3332 return 0;
3333 }
3334
3335 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3336 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
3337 atomic_read(&tbl->entries),
3338
3339 st->allocs,
3340 st->destroys,
3341 st->hash_grows,
3342
3343 st->lookups,
3344 st->hits,
3345
3346 st->res_failed,
3347
3348 st->rcv_probes_mcast,
3349 st->rcv_probes_ucast,
3350
3351 st->periodic_gc_runs,
3352 st->forced_gc_runs,
3353 st->unres_discards,
3354 st->table_fulls
3355 );
3356
3357 return 0;
3358 }
3359
3360 static const struct seq_operations neigh_stat_seq_ops = {
3361 .start = neigh_stat_seq_start,
3362 .next = neigh_stat_seq_next,
3363 .stop = neigh_stat_seq_stop,
3364 .show = neigh_stat_seq_show,
3365 };
3366 #endif /* CONFIG_PROC_FS */
3367
__neigh_notify(struct neighbour * n,int type,int flags,u32 pid)3368 static void __neigh_notify(struct neighbour *n, int type, int flags,
3369 u32 pid)
3370 {
3371 struct net *net = dev_net(n->dev);
3372 struct sk_buff *skb;
3373 int err = -ENOBUFS;
3374
3375 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3376 if (skb == NULL)
3377 goto errout;
3378
3379 err = neigh_fill_info(skb, n, pid, 0, type, flags);
3380 if (err < 0) {
3381 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3382 WARN_ON(err == -EMSGSIZE);
3383 kfree_skb(skb);
3384 goto errout;
3385 }
3386 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3387 return;
3388 errout:
3389 if (err < 0)
3390 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3391 }
3392
neigh_app_ns(struct neighbour * n)3393 void neigh_app_ns(struct neighbour *n)
3394 {
3395 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3396 }
3397 EXPORT_SYMBOL(neigh_app_ns);
3398
3399 #ifdef CONFIG_SYSCTL
3400 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3401
proc_unres_qlen(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3402 static int proc_unres_qlen(struct ctl_table *ctl, int write,
3403 void *buffer, size_t *lenp, loff_t *ppos)
3404 {
3405 int size, ret;
3406 struct ctl_table tmp = *ctl;
3407
3408 tmp.extra1 = SYSCTL_ZERO;
3409 tmp.extra2 = &unres_qlen_max;
3410 tmp.data = &size;
3411
3412 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3413 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3414
3415 if (write && !ret)
3416 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3417 return ret;
3418 }
3419
neigh_get_dev_parms_rcu(struct net_device * dev,int family)3420 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3421 int family)
3422 {
3423 switch (family) {
3424 case AF_INET:
3425 return __in_dev_arp_parms_get_rcu(dev);
3426 case AF_INET6:
3427 return __in6_dev_nd_parms_get_rcu(dev);
3428 }
3429 return NULL;
3430 }
3431
neigh_copy_dflt_parms(struct net * net,struct neigh_parms * p,int index)3432 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3433 int index)
3434 {
3435 struct net_device *dev;
3436 int family = neigh_parms_family(p);
3437
3438 rcu_read_lock();
3439 for_each_netdev_rcu(net, dev) {
3440 struct neigh_parms *dst_p =
3441 neigh_get_dev_parms_rcu(dev, family);
3442
3443 if (dst_p && !test_bit(index, dst_p->data_state))
3444 dst_p->data[index] = p->data[index];
3445 }
3446 rcu_read_unlock();
3447 }
3448
neigh_proc_update(struct ctl_table * ctl,int write)3449 static void neigh_proc_update(struct ctl_table *ctl, int write)
3450 {
3451 struct net_device *dev = ctl->extra1;
3452 struct neigh_parms *p = ctl->extra2;
3453 struct net *net = neigh_parms_net(p);
3454 int index = (int *) ctl->data - p->data;
3455
3456 if (!write)
3457 return;
3458
3459 set_bit(index, p->data_state);
3460 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3461 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3462 if (!dev) /* NULL dev means this is default value */
3463 neigh_copy_dflt_parms(net, p, index);
3464 }
3465
neigh_proc_dointvec_zero_intmax(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3466 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3467 void *buffer, size_t *lenp,
3468 loff_t *ppos)
3469 {
3470 struct ctl_table tmp = *ctl;
3471 int ret;
3472
3473 tmp.extra1 = SYSCTL_ZERO;
3474 tmp.extra2 = SYSCTL_INT_MAX;
3475
3476 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3477 neigh_proc_update(ctl, write);
3478 return ret;
3479 }
3480
neigh_proc_dointvec(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3481 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3482 size_t *lenp, loff_t *ppos)
3483 {
3484 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3485
3486 neigh_proc_update(ctl, write);
3487 return ret;
3488 }
3489 EXPORT_SYMBOL(neigh_proc_dointvec);
3490
neigh_proc_dointvec_jiffies(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3491 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3492 size_t *lenp, loff_t *ppos)
3493 {
3494 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3495
3496 neigh_proc_update(ctl, write);
3497 return ret;
3498 }
3499 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3500
neigh_proc_dointvec_userhz_jiffies(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3501 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3502 void *buffer, size_t *lenp,
3503 loff_t *ppos)
3504 {
3505 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3506
3507 neigh_proc_update(ctl, write);
3508 return ret;
3509 }
3510
neigh_proc_dointvec_ms_jiffies(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3511 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3512 void *buffer, size_t *lenp, loff_t *ppos)
3513 {
3514 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3515
3516 neigh_proc_update(ctl, write);
3517 return ret;
3518 }
3519 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3520
neigh_proc_dointvec_unres_qlen(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3521 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3522 void *buffer, size_t *lenp,
3523 loff_t *ppos)
3524 {
3525 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3526
3527 neigh_proc_update(ctl, write);
3528 return ret;
3529 }
3530
neigh_proc_base_reachable_time(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3531 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3532 void *buffer, size_t *lenp,
3533 loff_t *ppos)
3534 {
3535 struct neigh_parms *p = ctl->extra2;
3536 int ret;
3537
3538 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3539 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3540 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3541 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3542 else
3543 ret = -1;
3544
3545 if (write && ret == 0) {
3546 /* update reachable_time as well, otherwise, the change will
3547 * only be effective after the next time neigh_periodic_work
3548 * decides to recompute it
3549 */
3550 p->reachable_time =
3551 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3552 }
3553 return ret;
3554 }
3555
3556 #define NEIGH_PARMS_DATA_OFFSET(index) \
3557 (&((struct neigh_parms *) 0)->data[index])
3558
3559 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3560 [NEIGH_VAR_ ## attr] = { \
3561 .procname = name, \
3562 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3563 .maxlen = sizeof(int), \
3564 .mode = mval, \
3565 .proc_handler = proc, \
3566 }
3567
3568 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3569 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3570
3571 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3572 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3573
3574 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3575 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3576
3577 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3578 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3579
3580 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3581 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3582
3583 static struct neigh_sysctl_table {
3584 struct ctl_table_header *sysctl_header;
3585 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3586 } neigh_sysctl_template __read_mostly = {
3587 .neigh_vars = {
3588 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3589 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3590 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3591 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3592 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3593 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3594 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3595 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3596 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3597 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3598 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3599 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3600 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3601 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3602 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3603 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3604 [NEIGH_VAR_GC_INTERVAL] = {
3605 .procname = "gc_interval",
3606 .maxlen = sizeof(int),
3607 .mode = 0644,
3608 .proc_handler = proc_dointvec_jiffies,
3609 },
3610 [NEIGH_VAR_GC_THRESH1] = {
3611 .procname = "gc_thresh1",
3612 .maxlen = sizeof(int),
3613 .mode = 0644,
3614 .extra1 = SYSCTL_ZERO,
3615 .extra2 = SYSCTL_INT_MAX,
3616 .proc_handler = proc_dointvec_minmax,
3617 },
3618 [NEIGH_VAR_GC_THRESH2] = {
3619 .procname = "gc_thresh2",
3620 .maxlen = sizeof(int),
3621 .mode = 0644,
3622 .extra1 = SYSCTL_ZERO,
3623 .extra2 = SYSCTL_INT_MAX,
3624 .proc_handler = proc_dointvec_minmax,
3625 },
3626 [NEIGH_VAR_GC_THRESH3] = {
3627 .procname = "gc_thresh3",
3628 .maxlen = sizeof(int),
3629 .mode = 0644,
3630 .extra1 = SYSCTL_ZERO,
3631 .extra2 = SYSCTL_INT_MAX,
3632 .proc_handler = proc_dointvec_minmax,
3633 },
3634 {},
3635 },
3636 };
3637
neigh_sysctl_register(struct net_device * dev,struct neigh_parms * p,proc_handler * handler)3638 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3639 proc_handler *handler)
3640 {
3641 int i;
3642 struct neigh_sysctl_table *t;
3643 const char *dev_name_source;
3644 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3645 char *p_name;
3646
3647 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3648 if (!t)
3649 goto err;
3650
3651 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3652 t->neigh_vars[i].data += (long) p;
3653 t->neigh_vars[i].extra1 = dev;
3654 t->neigh_vars[i].extra2 = p;
3655 }
3656
3657 if (dev) {
3658 dev_name_source = dev->name;
3659 /* Terminate the table early */
3660 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3661 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3662 } else {
3663 struct neigh_table *tbl = p->tbl;
3664 dev_name_source = "default";
3665 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3666 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3667 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3668 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3669 }
3670
3671 if (handler) {
3672 /* RetransTime */
3673 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3674 /* ReachableTime */
3675 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3676 /* RetransTime (in milliseconds)*/
3677 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3678 /* ReachableTime (in milliseconds) */
3679 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3680 } else {
3681 /* Those handlers will update p->reachable_time after
3682 * base_reachable_time(_ms) is set to ensure the new timer starts being
3683 * applied after the next neighbour update instead of waiting for
3684 * neigh_periodic_work to update its value (can be multiple minutes)
3685 * So any handler that replaces them should do this as well
3686 */
3687 /* ReachableTime */
3688 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3689 neigh_proc_base_reachable_time;
3690 /* ReachableTime (in milliseconds) */
3691 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3692 neigh_proc_base_reachable_time;
3693 }
3694
3695 /* Don't export sysctls to unprivileged users */
3696 if (neigh_parms_net(p)->user_ns != &init_user_ns)
3697 t->neigh_vars[0].procname = NULL;
3698
3699 switch (neigh_parms_family(p)) {
3700 case AF_INET:
3701 p_name = "ipv4";
3702 break;
3703 case AF_INET6:
3704 p_name = "ipv6";
3705 break;
3706 default:
3707 BUG();
3708 }
3709
3710 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3711 p_name, dev_name_source);
3712 t->sysctl_header =
3713 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3714 if (!t->sysctl_header)
3715 goto free;
3716
3717 p->sysctl_table = t;
3718 return 0;
3719
3720 free:
3721 kfree(t);
3722 err:
3723 return -ENOBUFS;
3724 }
3725 EXPORT_SYMBOL(neigh_sysctl_register);
3726
neigh_sysctl_unregister(struct neigh_parms * p)3727 void neigh_sysctl_unregister(struct neigh_parms *p)
3728 {
3729 if (p->sysctl_table) {
3730 struct neigh_sysctl_table *t = p->sysctl_table;
3731 p->sysctl_table = NULL;
3732 unregister_net_sysctl_table(t->sysctl_header);
3733 kfree(t);
3734 }
3735 }
3736 EXPORT_SYMBOL(neigh_sysctl_unregister);
3737
3738 #endif /* CONFIG_SYSCTL */
3739
neigh_init(void)3740 static int __init neigh_init(void)
3741 {
3742 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3743 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3744 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3745
3746 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3747 0);
3748 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3749
3750 return 0;
3751 }
3752
3753 subsys_initcall(neigh_init);
3754