1 /*
2 * net/core/dst.c Protocol independent destination cache.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8 #include <linux/bitops.h>
9 #include <linux/errno.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/workqueue.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/netdevice.h>
17 #include <linux/skbuff.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <net/net_namespace.h>
21 #include <linux/sched.h>
22 #include <linux/prefetch.h>
23
24 #include <net/dst.h>
25
26 /*
27 * Theory of operations:
28 * 1) We use a list, protected by a spinlock, to add
29 * new entries from both BH and non-BH context.
30 * 2) In order to keep spinlock held for a small delay,
31 * we use a second list where are stored long lived
32 * entries, that are handled by the garbage collect thread
33 * fired by a workqueue.
34 * 3) This list is guarded by a mutex,
35 * so that the gc_task and dst_dev_event() can be synchronized.
36 */
37
38 /*
39 * We want to keep lock & list close together
40 * to dirty as few cache lines as possible in __dst_free().
41 * As this is not a very strong hint, we dont force an alignment on SMP.
42 */
43 static struct {
44 spinlock_t lock;
45 struct dst_entry *list;
46 unsigned long timer_inc;
47 unsigned long timer_expires;
48 } dst_garbage = {
49 .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
50 .timer_inc = DST_GC_MAX,
51 };
52 static void dst_gc_task(struct work_struct *work);
53 static void ___dst_free(struct dst_entry *dst);
54
55 static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
56
57 static DEFINE_MUTEX(dst_gc_mutex);
58 /*
59 * long lived entries are maintained in this list, guarded by dst_gc_mutex
60 */
61 static struct dst_entry *dst_busy_list;
62
dst_gc_task(struct work_struct * work)63 static void dst_gc_task(struct work_struct *work)
64 {
65 int delayed = 0;
66 int work_performed = 0;
67 unsigned long expires = ~0L;
68 struct dst_entry *dst, *next, head;
69 struct dst_entry *last = &head;
70
71 mutex_lock(&dst_gc_mutex);
72 next = dst_busy_list;
73
74 loop:
75 while ((dst = next) != NULL) {
76 next = dst->next;
77 prefetch(&next->next);
78 cond_resched();
79 if (likely(atomic_read(&dst->__refcnt))) {
80 last->next = dst;
81 last = dst;
82 delayed++;
83 continue;
84 }
85 work_performed++;
86
87 dst = dst_destroy(dst);
88 if (dst) {
89 /* NOHASH and still referenced. Unless it is already
90 * on gc list, invalidate it and add to gc list.
91 *
92 * Note: this is temporary. Actually, NOHASH dst's
93 * must be obsoleted when parent is obsoleted.
94 * But we do not have state "obsoleted, but
95 * referenced by parent", so it is right.
96 */
97 if (dst->obsolete > 0)
98 continue;
99
100 ___dst_free(dst);
101 dst->next = next;
102 next = dst;
103 }
104 }
105
106 spin_lock_bh(&dst_garbage.lock);
107 next = dst_garbage.list;
108 if (next) {
109 dst_garbage.list = NULL;
110 spin_unlock_bh(&dst_garbage.lock);
111 goto loop;
112 }
113 last->next = NULL;
114 dst_busy_list = head.next;
115 if (!dst_busy_list)
116 dst_garbage.timer_inc = DST_GC_MAX;
117 else {
118 /*
119 * if we freed less than 1/10 of delayed entries,
120 * we can sleep longer.
121 */
122 if (work_performed <= delayed/10) {
123 dst_garbage.timer_expires += dst_garbage.timer_inc;
124 if (dst_garbage.timer_expires > DST_GC_MAX)
125 dst_garbage.timer_expires = DST_GC_MAX;
126 dst_garbage.timer_inc += DST_GC_INC;
127 } else {
128 dst_garbage.timer_inc = DST_GC_INC;
129 dst_garbage.timer_expires = DST_GC_MIN;
130 }
131 expires = dst_garbage.timer_expires;
132 /*
133 * if the next desired timer is more than 4 seconds in the
134 * future then round the timer to whole seconds
135 */
136 if (expires > 4*HZ)
137 expires = round_jiffies_relative(expires);
138 schedule_delayed_work(&dst_gc_work, expires);
139 }
140
141 spin_unlock_bh(&dst_garbage.lock);
142 mutex_unlock(&dst_gc_mutex);
143 }
144
dst_discard_sk(struct sock * sk,struct sk_buff * skb)145 int dst_discard_sk(struct sock *sk, struct sk_buff *skb)
146 {
147 kfree_skb(skb);
148 return 0;
149 }
150 EXPORT_SYMBOL(dst_discard_sk);
151
152 const u32 dst_default_metrics[RTAX_MAX + 1] = {
153 /* This initializer is needed to force linker to place this variable
154 * into const section. Otherwise it might end into bss section.
155 * We really want to avoid false sharing on this variable, and catch
156 * any writes on it.
157 */
158 [RTAX_MAX] = 0xdeadbeef,
159 };
160
161
dst_alloc(struct dst_ops * ops,struct net_device * dev,int initial_ref,int initial_obsolete,unsigned short flags)162 void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
163 int initial_ref, int initial_obsolete, unsigned short flags)
164 {
165 struct dst_entry *dst;
166
167 if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
168 if (ops->gc(ops))
169 return NULL;
170 }
171 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
172 if (!dst)
173 return NULL;
174 dst->child = NULL;
175 dst->dev = dev;
176 if (dev)
177 dev_hold(dev);
178 dst->ops = ops;
179 dst_init_metrics(dst, dst_default_metrics, true);
180 dst->expires = 0UL;
181 dst->path = dst;
182 dst->from = NULL;
183 #ifdef CONFIG_XFRM
184 dst->xfrm = NULL;
185 #endif
186 dst->input = dst_discard;
187 dst->output = dst_discard_sk;
188 dst->error = 0;
189 dst->obsolete = initial_obsolete;
190 dst->header_len = 0;
191 dst->trailer_len = 0;
192 #ifdef CONFIG_IP_ROUTE_CLASSID
193 dst->tclassid = 0;
194 #endif
195 atomic_set(&dst->__refcnt, initial_ref);
196 dst->__use = 0;
197 dst->lastuse = jiffies;
198 dst->flags = flags;
199 dst->pending_confirm = 0;
200 dst->next = NULL;
201 if (!(flags & DST_NOCOUNT))
202 dst_entries_add(ops, 1);
203 return dst;
204 }
205 EXPORT_SYMBOL(dst_alloc);
206
___dst_free(struct dst_entry * dst)207 static void ___dst_free(struct dst_entry *dst)
208 {
209 /* The first case (dev==NULL) is required, when
210 protocol module is unloaded.
211 */
212 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
213 dst->input = dst_discard;
214 dst->output = dst_discard_sk;
215 }
216 dst->obsolete = DST_OBSOLETE_DEAD;
217 }
218
__dst_free(struct dst_entry * dst)219 void __dst_free(struct dst_entry *dst)
220 {
221 spin_lock_bh(&dst_garbage.lock);
222 ___dst_free(dst);
223 dst->next = dst_garbage.list;
224 dst_garbage.list = dst;
225 if (dst_garbage.timer_inc > DST_GC_INC) {
226 dst_garbage.timer_inc = DST_GC_INC;
227 dst_garbage.timer_expires = DST_GC_MIN;
228 mod_delayed_work(system_wq, &dst_gc_work,
229 dst_garbage.timer_expires);
230 }
231 spin_unlock_bh(&dst_garbage.lock);
232 }
233 EXPORT_SYMBOL(__dst_free);
234
dst_destroy(struct dst_entry * dst)235 struct dst_entry *dst_destroy(struct dst_entry * dst)
236 {
237 struct dst_entry *child;
238
239 smp_rmb();
240
241 again:
242 child = dst->child;
243
244 if (!(dst->flags & DST_NOCOUNT))
245 dst_entries_add(dst->ops, -1);
246
247 if (dst->ops->destroy)
248 dst->ops->destroy(dst);
249 if (dst->dev)
250 dev_put(dst->dev);
251 kmem_cache_free(dst->ops->kmem_cachep, dst);
252
253 dst = child;
254 if (dst) {
255 int nohash = dst->flags & DST_NOHASH;
256
257 if (atomic_dec_and_test(&dst->__refcnt)) {
258 /* We were real parent of this dst, so kill child. */
259 if (nohash)
260 goto again;
261 } else {
262 /* Child is still referenced, return it for freeing. */
263 if (nohash)
264 return dst;
265 /* Child is still in his hash table */
266 }
267 }
268 return NULL;
269 }
270 EXPORT_SYMBOL(dst_destroy);
271
dst_destroy_rcu(struct rcu_head * head)272 static void dst_destroy_rcu(struct rcu_head *head)
273 {
274 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
275
276 dst = dst_destroy(dst);
277 if (dst)
278 __dst_free(dst);
279 }
280
dst_release(struct dst_entry * dst)281 void dst_release(struct dst_entry *dst)
282 {
283 if (dst) {
284 int newrefcnt;
285 unsigned short nocache = dst->flags & DST_NOCACHE;
286
287 newrefcnt = atomic_dec_return(&dst->__refcnt);
288 WARN_ON(newrefcnt < 0);
289 if (!newrefcnt && unlikely(nocache))
290 call_rcu(&dst->rcu_head, dst_destroy_rcu);
291 }
292 }
293 EXPORT_SYMBOL(dst_release);
294
dst_cow_metrics_generic(struct dst_entry * dst,unsigned long old)295 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
296 {
297 u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
298
299 if (p) {
300 u32 *old_p = __DST_METRICS_PTR(old);
301 unsigned long prev, new;
302
303 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
304
305 new = (unsigned long) p;
306 prev = cmpxchg(&dst->_metrics, old, new);
307
308 if (prev != old) {
309 kfree(p);
310 p = __DST_METRICS_PTR(prev);
311 if (prev & DST_METRICS_READ_ONLY)
312 p = NULL;
313 }
314 }
315 return p;
316 }
317 EXPORT_SYMBOL(dst_cow_metrics_generic);
318
319 /* Caller asserts that dst_metrics_read_only(dst) is false. */
__dst_destroy_metrics_generic(struct dst_entry * dst,unsigned long old)320 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
321 {
322 unsigned long prev, new;
323
324 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
325 prev = cmpxchg(&dst->_metrics, old, new);
326 if (prev == old)
327 kfree(__DST_METRICS_PTR(old));
328 }
329 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
330
331 /**
332 * __skb_dst_set_noref - sets skb dst, without a reference
333 * @skb: buffer
334 * @dst: dst entry
335 * @force: if force is set, use noref version even for DST_NOCACHE entries
336 *
337 * Sets skb dst, assuming a reference was not taken on dst
338 * skb_dst_drop() should not dst_release() this dst
339 */
__skb_dst_set_noref(struct sk_buff * skb,struct dst_entry * dst,bool force)340 void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, bool force)
341 {
342 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
343 /* If dst not in cache, we must take a reference, because
344 * dst_release() will destroy dst as soon as its refcount becomes zero
345 */
346 if (unlikely((dst->flags & DST_NOCACHE) && !force)) {
347 dst_hold(dst);
348 skb_dst_set(skb, dst);
349 } else {
350 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
351 }
352 }
353 EXPORT_SYMBOL(__skb_dst_set_noref);
354
355 /* Dirty hack. We did it in 2.2 (in __dst_free),
356 * we have _very_ good reasons not to repeat
357 * this mistake in 2.3, but we have no choice
358 * now. _It_ _is_ _explicit_ _deliberate_
359 * _race_ _condition_.
360 *
361 * Commented and originally written by Alexey.
362 */
dst_ifdown(struct dst_entry * dst,struct net_device * dev,int unregister)363 static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
364 int unregister)
365 {
366 if (dst->ops->ifdown)
367 dst->ops->ifdown(dst, dev, unregister);
368
369 if (dev != dst->dev)
370 return;
371
372 if (!unregister) {
373 dst->input = dst_discard;
374 dst->output = dst_discard_sk;
375 } else {
376 dst->dev = dev_net(dst->dev)->loopback_dev;
377 dev_hold(dst->dev);
378 dev_put(dev);
379 }
380 }
381
dst_dev_event(struct notifier_block * this,unsigned long event,void * ptr)382 static int dst_dev_event(struct notifier_block *this, unsigned long event,
383 void *ptr)
384 {
385 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
386 struct dst_entry *dst, *last = NULL;
387
388 switch (event) {
389 case NETDEV_UNREGISTER_FINAL:
390 case NETDEV_DOWN:
391 mutex_lock(&dst_gc_mutex);
392 for (dst = dst_busy_list; dst; dst = dst->next) {
393 last = dst;
394 dst_ifdown(dst, dev, event != NETDEV_DOWN);
395 }
396
397 spin_lock_bh(&dst_garbage.lock);
398 dst = dst_garbage.list;
399 dst_garbage.list = NULL;
400 /* The code in dst_ifdown places a hold on the loopback device.
401 * If the gc entry processing is set to expire after a lengthy
402 * interval, this hold can cause netdev_wait_allrefs() to hang
403 * out and wait for a long time -- until the the loopback
404 * interface is released. If we're really unlucky, it'll emit
405 * pr_emerg messages to console too. Reset the interval here,
406 * so dst cleanups occur in a more timely fashion.
407 */
408 if (dst_garbage.timer_inc > DST_GC_INC) {
409 dst_garbage.timer_inc = DST_GC_INC;
410 dst_garbage.timer_expires = DST_GC_MIN;
411 mod_delayed_work(system_wq, &dst_gc_work,
412 dst_garbage.timer_expires);
413 }
414 spin_unlock_bh(&dst_garbage.lock);
415
416 if (last)
417 last->next = dst;
418 else
419 dst_busy_list = dst;
420 for (; dst; dst = dst->next)
421 dst_ifdown(dst, dev, event != NETDEV_DOWN);
422 mutex_unlock(&dst_gc_mutex);
423 break;
424 }
425 return NOTIFY_DONE;
426 }
427
428 static struct notifier_block dst_dev_notifier = {
429 .notifier_call = dst_dev_event,
430 .priority = -10, /* must be called after other network notifiers */
431 };
432
dst_init(void)433 void __init dst_init(void)
434 {
435 register_netdevice_notifier(&dst_dev_notifier);
436 }
437