1 /*
2 * net/core/dst.c Protocol independent destination cache.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8 #include <linux/bitops.h>
9 #include <linux/errno.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/workqueue.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/netdevice.h>
17 #include <linux/skbuff.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <net/net_namespace.h>
21 #include <linux/sched.h>
22 #include <linux/prefetch.h>
23 #include <net/lwtunnel.h>
24
25 #include <net/dst.h>
26 #include <net/dst_metadata.h>
27
28 /*
29 * Theory of operations:
30 * 1) We use a list, protected by a spinlock, to add
31 * new entries from both BH and non-BH context.
32 * 2) In order to keep spinlock held for a small delay,
33 * we use a second list where are stored long lived
34 * entries, that are handled by the garbage collect thread
35 * fired by a workqueue.
36 * 3) This list is guarded by a mutex,
37 * so that the gc_task and dst_dev_event() can be synchronized.
38 */
39
40 /*
41 * We want to keep lock & list close together
42 * to dirty as few cache lines as possible in __dst_free().
43 * As this is not a very strong hint, we dont force an alignment on SMP.
44 */
45 static struct {
46 spinlock_t lock;
47 struct dst_entry *list;
48 unsigned long timer_inc;
49 unsigned long timer_expires;
50 } dst_garbage = {
51 .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
52 .timer_inc = DST_GC_MAX,
53 };
54 static void dst_gc_task(struct work_struct *work);
55 static void ___dst_free(struct dst_entry *dst);
56
57 static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
58
59 static DEFINE_MUTEX(dst_gc_mutex);
60 /*
61 * long lived entries are maintained in this list, guarded by dst_gc_mutex
62 */
63 static struct dst_entry *dst_busy_list;
64
dst_gc_task(struct work_struct * work)65 static void dst_gc_task(struct work_struct *work)
66 {
67 int delayed = 0;
68 int work_performed = 0;
69 unsigned long expires = ~0L;
70 struct dst_entry *dst, *next, head;
71 struct dst_entry *last = &head;
72
73 mutex_lock(&dst_gc_mutex);
74 next = dst_busy_list;
75
76 loop:
77 while ((dst = next) != NULL) {
78 next = dst->next;
79 prefetch(&next->next);
80 cond_resched();
81 if (likely(atomic_read(&dst->__refcnt))) {
82 last->next = dst;
83 last = dst;
84 delayed++;
85 continue;
86 }
87 work_performed++;
88
89 dst = dst_destroy(dst);
90 if (dst) {
91 /* NOHASH and still referenced. Unless it is already
92 * on gc list, invalidate it and add to gc list.
93 *
94 * Note: this is temporary. Actually, NOHASH dst's
95 * must be obsoleted when parent is obsoleted.
96 * But we do not have state "obsoleted, but
97 * referenced by parent", so it is right.
98 */
99 if (dst->obsolete > 0)
100 continue;
101
102 ___dst_free(dst);
103 dst->next = next;
104 next = dst;
105 }
106 }
107
108 spin_lock_bh(&dst_garbage.lock);
109 next = dst_garbage.list;
110 if (next) {
111 dst_garbage.list = NULL;
112 spin_unlock_bh(&dst_garbage.lock);
113 goto loop;
114 }
115 last->next = NULL;
116 dst_busy_list = head.next;
117 if (!dst_busy_list)
118 dst_garbage.timer_inc = DST_GC_MAX;
119 else {
120 /*
121 * if we freed less than 1/10 of delayed entries,
122 * we can sleep longer.
123 */
124 if (work_performed <= delayed/10) {
125 dst_garbage.timer_expires += dst_garbage.timer_inc;
126 if (dst_garbage.timer_expires > DST_GC_MAX)
127 dst_garbage.timer_expires = DST_GC_MAX;
128 dst_garbage.timer_inc += DST_GC_INC;
129 } else {
130 dst_garbage.timer_inc = DST_GC_INC;
131 dst_garbage.timer_expires = DST_GC_MIN;
132 }
133 expires = dst_garbage.timer_expires;
134 /*
135 * if the next desired timer is more than 4 seconds in the
136 * future then round the timer to whole seconds
137 */
138 if (expires > 4*HZ)
139 expires = round_jiffies_relative(expires);
140 schedule_delayed_work(&dst_gc_work, expires);
141 }
142
143 spin_unlock_bh(&dst_garbage.lock);
144 mutex_unlock(&dst_gc_mutex);
145 }
146
dst_discard_out(struct net * net,struct sock * sk,struct sk_buff * skb)147 int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
148 {
149 kfree_skb(skb);
150 return 0;
151 }
152 EXPORT_SYMBOL(dst_discard_out);
153
154 const struct dst_metrics dst_default_metrics = {
155 /* This initializer is needed to force linker to place this variable
156 * into const section. Otherwise it might end into bss section.
157 * We really want to avoid false sharing on this variable, and catch
158 * any writes on it.
159 */
160 .refcnt = ATOMIC_INIT(1),
161 };
162
dst_init(struct dst_entry * dst,struct dst_ops * ops,struct net_device * dev,int initial_ref,int initial_obsolete,unsigned short flags)163 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
164 struct net_device *dev, int initial_ref, int initial_obsolete,
165 unsigned short flags)
166 {
167 dst->child = NULL;
168 dst->dev = dev;
169 if (dev)
170 dev_hold(dev);
171 dst->ops = ops;
172 dst_init_metrics(dst, dst_default_metrics.metrics, true);
173 dst->expires = 0UL;
174 dst->path = dst;
175 dst->from = NULL;
176 #ifdef CONFIG_XFRM
177 dst->xfrm = NULL;
178 #endif
179 dst->input = dst_discard;
180 dst->output = dst_discard_out;
181 dst->error = 0;
182 dst->obsolete = initial_obsolete;
183 dst->header_len = 0;
184 dst->trailer_len = 0;
185 #ifdef CONFIG_IP_ROUTE_CLASSID
186 dst->tclassid = 0;
187 #endif
188 dst->lwtstate = NULL;
189 atomic_set(&dst->__refcnt, initial_ref);
190 dst->__use = 0;
191 dst->lastuse = jiffies;
192 dst->flags = flags;
193 dst->pending_confirm = 0;
194 dst->next = NULL;
195 if (!(flags & DST_NOCOUNT))
196 dst_entries_add(ops, 1);
197 }
198 EXPORT_SYMBOL(dst_init);
199
dst_alloc(struct dst_ops * ops,struct net_device * dev,int initial_ref,int initial_obsolete,unsigned short flags)200 void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
201 int initial_ref, int initial_obsolete, unsigned short flags)
202 {
203 struct dst_entry *dst;
204
205 if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
206 if (ops->gc(ops))
207 return NULL;
208 }
209
210 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
211 if (!dst)
212 return NULL;
213
214 dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
215
216 return dst;
217 }
218 EXPORT_SYMBOL(dst_alloc);
219
___dst_free(struct dst_entry * dst)220 static void ___dst_free(struct dst_entry *dst)
221 {
222 /* The first case (dev==NULL) is required, when
223 protocol module is unloaded.
224 */
225 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
226 dst->input = dst_discard;
227 dst->output = dst_discard_out;
228 }
229 dst->obsolete = DST_OBSOLETE_DEAD;
230 }
231
__dst_free(struct dst_entry * dst)232 void __dst_free(struct dst_entry *dst)
233 {
234 spin_lock_bh(&dst_garbage.lock);
235 ___dst_free(dst);
236 dst->next = dst_garbage.list;
237 dst_garbage.list = dst;
238 if (dst_garbage.timer_inc > DST_GC_INC) {
239 dst_garbage.timer_inc = DST_GC_INC;
240 dst_garbage.timer_expires = DST_GC_MIN;
241 mod_delayed_work(system_wq, &dst_gc_work,
242 dst_garbage.timer_expires);
243 }
244 spin_unlock_bh(&dst_garbage.lock);
245 }
246 EXPORT_SYMBOL(__dst_free);
247
dst_destroy(struct dst_entry * dst)248 struct dst_entry *dst_destroy(struct dst_entry * dst)
249 {
250 struct dst_entry *child;
251
252 smp_rmb();
253
254 again:
255 child = dst->child;
256
257 if (!(dst->flags & DST_NOCOUNT))
258 dst_entries_add(dst->ops, -1);
259
260 if (dst->ops->destroy)
261 dst->ops->destroy(dst);
262 if (dst->dev)
263 dev_put(dst->dev);
264
265 lwtstate_put(dst->lwtstate);
266
267 if (dst->flags & DST_METADATA)
268 metadata_dst_free((struct metadata_dst *)dst);
269 else
270 kmem_cache_free(dst->ops->kmem_cachep, dst);
271
272 dst = child;
273 if (dst) {
274 int nohash = dst->flags & DST_NOHASH;
275
276 if (atomic_dec_and_test(&dst->__refcnt)) {
277 /* We were real parent of this dst, so kill child. */
278 if (nohash)
279 goto again;
280 } else {
281 /* Child is still referenced, return it for freeing. */
282 if (nohash)
283 return dst;
284 /* Child is still in his hash table */
285 }
286 }
287 return NULL;
288 }
289 EXPORT_SYMBOL(dst_destroy);
290
dst_destroy_rcu(struct rcu_head * head)291 static void dst_destroy_rcu(struct rcu_head *head)
292 {
293 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
294
295 dst = dst_destroy(dst);
296 if (dst)
297 __dst_free(dst);
298 }
299
dst_release(struct dst_entry * dst)300 void dst_release(struct dst_entry *dst)
301 {
302 if (dst) {
303 int newrefcnt;
304 unsigned short nocache = dst->flags & DST_NOCACHE;
305
306 newrefcnt = atomic_dec_return(&dst->__refcnt);
307 if (unlikely(newrefcnt < 0))
308 net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
309 __func__, dst, newrefcnt);
310 if (!newrefcnt && unlikely(nocache))
311 call_rcu(&dst->rcu_head, dst_destroy_rcu);
312 }
313 }
314 EXPORT_SYMBOL(dst_release);
315
dst_cow_metrics_generic(struct dst_entry * dst,unsigned long old)316 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
317 {
318 struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
319
320 if (p) {
321 struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
322 unsigned long prev, new;
323
324 atomic_set(&p->refcnt, 1);
325 memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
326
327 new = (unsigned long) p;
328 prev = cmpxchg(&dst->_metrics, old, new);
329
330 if (prev != old) {
331 kfree(p);
332 p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
333 if (prev & DST_METRICS_READ_ONLY)
334 p = NULL;
335 } else if (prev & DST_METRICS_REFCOUNTED) {
336 if (atomic_dec_and_test(&old_p->refcnt))
337 kfree(old_p);
338 }
339 }
340 BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
341 return (u32 *)p;
342 }
343 EXPORT_SYMBOL(dst_cow_metrics_generic);
344
345 /* Caller asserts that dst_metrics_read_only(dst) is false. */
__dst_destroy_metrics_generic(struct dst_entry * dst,unsigned long old)346 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
347 {
348 unsigned long prev, new;
349
350 new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
351 prev = cmpxchg(&dst->_metrics, old, new);
352 if (prev == old)
353 kfree(__DST_METRICS_PTR(old));
354 }
355 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
356
357 static struct dst_ops md_dst_ops = {
358 .family = AF_UNSPEC,
359 };
360
dst_md_discard_out(struct net * net,struct sock * sk,struct sk_buff * skb)361 static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
362 {
363 WARN_ONCE(1, "Attempting to call output on metadata dst\n");
364 kfree_skb(skb);
365 return 0;
366 }
367
dst_md_discard(struct sk_buff * skb)368 static int dst_md_discard(struct sk_buff *skb)
369 {
370 WARN_ONCE(1, "Attempting to call input on metadata dst\n");
371 kfree_skb(skb);
372 return 0;
373 }
374
__metadata_dst_init(struct metadata_dst * md_dst,u8 optslen)375 static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen)
376 {
377 struct dst_entry *dst;
378
379 dst = &md_dst->dst;
380 dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
381 DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
382
383 dst->input = dst_md_discard;
384 dst->output = dst_md_discard_out;
385
386 memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
387 }
388
metadata_dst_alloc(u8 optslen,gfp_t flags)389 struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags)
390 {
391 struct metadata_dst *md_dst;
392
393 md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
394 if (!md_dst)
395 return NULL;
396
397 __metadata_dst_init(md_dst, optslen);
398
399 return md_dst;
400 }
401 EXPORT_SYMBOL_GPL(metadata_dst_alloc);
402
metadata_dst_free(struct metadata_dst * md_dst)403 void metadata_dst_free(struct metadata_dst *md_dst)
404 {
405 #ifdef CONFIG_DST_CACHE
406 dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
407 #endif
408 kfree(md_dst);
409 }
410
metadata_dst_alloc_percpu(u8 optslen,gfp_t flags)411 struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags)
412 {
413 int cpu;
414 struct metadata_dst __percpu *md_dst;
415
416 md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
417 __alignof__(struct metadata_dst), flags);
418 if (!md_dst)
419 return NULL;
420
421 for_each_possible_cpu(cpu)
422 __metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen);
423
424 return md_dst;
425 }
426 EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
427
428 /* Dirty hack. We did it in 2.2 (in __dst_free),
429 * we have _very_ good reasons not to repeat
430 * this mistake in 2.3, but we have no choice
431 * now. _It_ _is_ _explicit_ _deliberate_
432 * _race_ _condition_.
433 *
434 * Commented and originally written by Alexey.
435 */
dst_ifdown(struct dst_entry * dst,struct net_device * dev,int unregister)436 static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
437 int unregister)
438 {
439 if (dst->ops->ifdown)
440 dst->ops->ifdown(dst, dev, unregister);
441
442 if (dev != dst->dev)
443 return;
444
445 if (!unregister) {
446 dst->input = dst_discard;
447 dst->output = dst_discard_out;
448 } else {
449 dst->dev = dev_net(dst->dev)->loopback_dev;
450 dev_hold(dst->dev);
451 dev_put(dev);
452 }
453 }
454
dst_dev_event(struct notifier_block * this,unsigned long event,void * ptr)455 static int dst_dev_event(struct notifier_block *this, unsigned long event,
456 void *ptr)
457 {
458 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
459 struct dst_entry *dst, *last = NULL;
460
461 switch (event) {
462 case NETDEV_UNREGISTER_FINAL:
463 case NETDEV_DOWN:
464 mutex_lock(&dst_gc_mutex);
465 for (dst = dst_busy_list; dst; dst = dst->next) {
466 last = dst;
467 dst_ifdown(dst, dev, event != NETDEV_DOWN);
468 }
469
470 spin_lock_bh(&dst_garbage.lock);
471 dst = dst_garbage.list;
472 dst_garbage.list = NULL;
473 /* The code in dst_ifdown places a hold on the loopback device.
474 * If the gc entry processing is set to expire after a lengthy
475 * interval, this hold can cause netdev_wait_allrefs() to hang
476 * out and wait for a long time -- until the the loopback
477 * interface is released. If we're really unlucky, it'll emit
478 * pr_emerg messages to console too. Reset the interval here,
479 * so dst cleanups occur in a more timely fashion.
480 */
481 if (dst_garbage.timer_inc > DST_GC_INC) {
482 dst_garbage.timer_inc = DST_GC_INC;
483 dst_garbage.timer_expires = DST_GC_MIN;
484 mod_delayed_work(system_wq, &dst_gc_work,
485 dst_garbage.timer_expires);
486 }
487 spin_unlock_bh(&dst_garbage.lock);
488
489 if (last)
490 last->next = dst;
491 else
492 dst_busy_list = dst;
493 for (; dst; dst = dst->next)
494 dst_ifdown(dst, dev, event != NETDEV_DOWN);
495 mutex_unlock(&dst_gc_mutex);
496 break;
497 }
498 return NOTIFY_DONE;
499 }
500
501 static struct notifier_block dst_dev_notifier = {
502 .notifier_call = dst_dev_event,
503 .priority = -10, /* must be called after other network notifiers */
504 };
505
dst_subsys_init(void)506 void __init dst_subsys_init(void)
507 {
508 register_netdevice_notifier(&dst_dev_notifier);
509 }
510