• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/core/dst.c	Protocol independent destination cache.
4  *
5  * Authors:		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/errno.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/workqueue.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <net/net_namespace.h>
22 #include <linux/sched.h>
23 #include <linux/prefetch.h>
24 #include <net/lwtunnel.h>
25 #include <net/xfrm.h>
26 
27 #include <net/dst.h>
28 #include <net/dst_metadata.h>
29 
dst_discard_out(struct net * net,struct sock * sk,struct sk_buff * skb)30 int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
31 {
32 	kfree_skb(skb);
33 	return 0;
34 }
35 EXPORT_SYMBOL(dst_discard_out);
36 
37 const struct dst_metrics dst_default_metrics = {
38 	/* This initializer is needed to force linker to place this variable
39 	 * into const section. Otherwise it might end into bss section.
40 	 * We really want to avoid false sharing on this variable, and catch
41 	 * any writes on it.
42 	 */
43 	.refcnt = REFCOUNT_INIT(1),
44 };
45 EXPORT_SYMBOL(dst_default_metrics);
46 
dst_init(struct dst_entry * dst,struct dst_ops * ops,struct net_device * dev,int initial_ref,int initial_obsolete,unsigned short flags)47 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
48 	      struct net_device *dev, int initial_ref, int initial_obsolete,
49 	      unsigned short flags)
50 {
51 	dst->dev = dev;
52 	if (dev)
53 		dev_hold(dev);
54 	dst->ops = ops;
55 	dst_init_metrics(dst, dst_default_metrics.metrics, true);
56 	dst->expires = 0UL;
57 #ifdef CONFIG_XFRM
58 	dst->xfrm = NULL;
59 #endif
60 	dst->input = dst_discard;
61 	dst->output = dst_discard_out;
62 	dst->error = 0;
63 	dst->obsolete = initial_obsolete;
64 	dst->header_len = 0;
65 	dst->trailer_len = 0;
66 #ifdef CONFIG_IP_ROUTE_CLASSID
67 	dst->tclassid = 0;
68 #endif
69 	dst->lwtstate = NULL;
70 	atomic_set(&dst->__refcnt, initial_ref);
71 	dst->__use = 0;
72 	dst->lastuse = jiffies;
73 	dst->flags = flags;
74 	if (!(flags & DST_NOCOUNT))
75 		dst_entries_add(ops, 1);
76 }
77 EXPORT_SYMBOL(dst_init);
78 
dst_alloc(struct dst_ops * ops,struct net_device * dev,int initial_ref,int initial_obsolete,unsigned short flags)79 void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
80 		int initial_ref, int initial_obsolete, unsigned short flags)
81 {
82 	struct dst_entry *dst;
83 
84 	if (ops->gc &&
85 	    !(flags & DST_NOCOUNT) &&
86 	    dst_entries_get_fast(ops) > ops->gc_thresh)
87 		ops->gc(ops);
88 
89 	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
90 	if (!dst)
91 		return NULL;
92 
93 	dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
94 
95 	return dst;
96 }
97 EXPORT_SYMBOL(dst_alloc);
98 
dst_destroy(struct dst_entry * dst)99 struct dst_entry *dst_destroy(struct dst_entry * dst)
100 {
101 	struct dst_entry *child = NULL;
102 
103 	smp_rmb();
104 
105 #ifdef CONFIG_XFRM
106 	if (dst->xfrm) {
107 		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
108 
109 		child = xdst->child;
110 	}
111 #endif
112 	if (dst->ops->destroy)
113 		dst->ops->destroy(dst);
114 	if (dst->dev)
115 		dev_put(dst->dev);
116 
117 	lwtstate_put(dst->lwtstate);
118 
119 	if (dst->flags & DST_METADATA)
120 		metadata_dst_free((struct metadata_dst *)dst);
121 	else
122 		kmem_cache_free(dst->ops->kmem_cachep, dst);
123 
124 	dst = child;
125 	if (dst)
126 		dst_release_immediate(dst);
127 	return NULL;
128 }
129 EXPORT_SYMBOL(dst_destroy);
130 
dst_destroy_rcu(struct rcu_head * head)131 static void dst_destroy_rcu(struct rcu_head *head)
132 {
133 	struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
134 
135 	dst = dst_destroy(dst);
136 }
137 
138 /* Operations to mark dst as DEAD and clean up the net device referenced
139  * by dst:
140  * 1. put the dst under blackhole interface and discard all tx/rx packets
141  *    on this route.
142  * 2. release the net_device
143  * This function should be called when removing routes from the fib tree
144  * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to
145  * make the next dst_ops->check() fail.
146  */
dst_dev_put(struct dst_entry * dst)147 void dst_dev_put(struct dst_entry *dst)
148 {
149 	struct net_device *dev = dst->dev;
150 
151 	dst->obsolete = DST_OBSOLETE_DEAD;
152 	if (dst->ops->ifdown)
153 		dst->ops->ifdown(dst, dev, true);
154 	dst->input = dst_discard;
155 	dst->output = dst_discard_out;
156 	dst->dev = blackhole_netdev;
157 	dev_hold(dst->dev);
158 	dev_put(dev);
159 }
160 EXPORT_SYMBOL(dst_dev_put);
161 
dst_count_dec(struct dst_entry * dst)162 static void dst_count_dec(struct dst_entry *dst)
163 {
164 	if (!(dst->flags & DST_NOCOUNT))
165 		dst_entries_add(dst->ops, -1);
166 }
167 
dst_release(struct dst_entry * dst)168 void dst_release(struct dst_entry *dst)
169 {
170 	if (dst) {
171 		int newrefcnt;
172 
173 		newrefcnt = atomic_dec_return(&dst->__refcnt);
174 		if (WARN_ONCE(newrefcnt < 0, "dst_release underflow"))
175 			net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
176 					     __func__, dst, newrefcnt);
177 		if (!newrefcnt) {
178 			dst_count_dec(dst);
179 			call_rcu(&dst->rcu_head, dst_destroy_rcu);
180 		}
181 	}
182 }
183 EXPORT_SYMBOL(dst_release);
184 
dst_release_immediate(struct dst_entry * dst)185 void dst_release_immediate(struct dst_entry *dst)
186 {
187 	if (dst) {
188 		int newrefcnt;
189 
190 		newrefcnt = atomic_dec_return(&dst->__refcnt);
191 		if (WARN_ONCE(newrefcnt < 0, "dst_release_immediate underflow"))
192 			net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
193 					     __func__, dst, newrefcnt);
194 		if (!newrefcnt) {
195 			dst_count_dec(dst);
196 			dst_destroy(dst);
197 		}
198 	}
199 }
200 EXPORT_SYMBOL(dst_release_immediate);
201 
dst_cow_metrics_generic(struct dst_entry * dst,unsigned long old)202 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
203 {
204 	struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
205 
206 	if (p) {
207 		struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
208 		unsigned long prev, new;
209 
210 		refcount_set(&p->refcnt, 1);
211 		memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
212 
213 		new = (unsigned long) p;
214 		prev = cmpxchg(&dst->_metrics, old, new);
215 
216 		if (prev != old) {
217 			kfree(p);
218 			p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
219 			if (prev & DST_METRICS_READ_ONLY)
220 				p = NULL;
221 		} else if (prev & DST_METRICS_REFCOUNTED) {
222 			if (refcount_dec_and_test(&old_p->refcnt))
223 				kfree(old_p);
224 		}
225 	}
226 	BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
227 	return (u32 *)p;
228 }
229 EXPORT_SYMBOL(dst_cow_metrics_generic);
230 
231 /* Caller asserts that dst_metrics_read_only(dst) is false.  */
__dst_destroy_metrics_generic(struct dst_entry * dst,unsigned long old)232 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
233 {
234 	unsigned long prev, new;
235 
236 	new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
237 	prev = cmpxchg(&dst->_metrics, old, new);
238 	if (prev == old)
239 		kfree(__DST_METRICS_PTR(old));
240 }
241 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
242 
dst_blackhole_check(struct dst_entry * dst,u32 cookie)243 struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie)
244 {
245 	return NULL;
246 }
247 
dst_blackhole_cow_metrics(struct dst_entry * dst,unsigned long old)248 u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old)
249 {
250 	return NULL;
251 }
252 
dst_blackhole_neigh_lookup(const struct dst_entry * dst,struct sk_buff * skb,const void * daddr)253 struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
254 					     struct sk_buff *skb,
255 					     const void *daddr)
256 {
257 	return NULL;
258 }
259 
dst_blackhole_update_pmtu(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb,u32 mtu,bool confirm_neigh)260 void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
261 			       struct sk_buff *skb, u32 mtu,
262 			       bool confirm_neigh)
263 {
264 }
265 EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu);
266 
dst_blackhole_redirect(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb)267 void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
268 			    struct sk_buff *skb)
269 {
270 }
271 EXPORT_SYMBOL_GPL(dst_blackhole_redirect);
272 
dst_blackhole_mtu(const struct dst_entry * dst)273 unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
274 {
275 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
276 
277 	return mtu ? : dst->dev->mtu;
278 }
279 EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
280 
281 static struct dst_ops dst_blackhole_ops = {
282 	.family		= AF_UNSPEC,
283 	.neigh_lookup	= dst_blackhole_neigh_lookup,
284 	.check		= dst_blackhole_check,
285 	.cow_metrics	= dst_blackhole_cow_metrics,
286 	.update_pmtu	= dst_blackhole_update_pmtu,
287 	.redirect	= dst_blackhole_redirect,
288 	.mtu		= dst_blackhole_mtu,
289 };
290 
__metadata_dst_init(struct metadata_dst * md_dst,enum metadata_type type,u8 optslen)291 static void __metadata_dst_init(struct metadata_dst *md_dst,
292 				enum metadata_type type, u8 optslen)
293 {
294 	struct dst_entry *dst;
295 
296 	dst = &md_dst->dst;
297 	dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE,
298 		 DST_METADATA | DST_NOCOUNT);
299 	memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
300 	md_dst->type = type;
301 }
302 
metadata_dst_alloc(u8 optslen,enum metadata_type type,gfp_t flags)303 struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
304 					gfp_t flags)
305 {
306 	struct metadata_dst *md_dst;
307 
308 	md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
309 	if (!md_dst)
310 		return NULL;
311 
312 	__metadata_dst_init(md_dst, type, optslen);
313 
314 	return md_dst;
315 }
316 EXPORT_SYMBOL_GPL(metadata_dst_alloc);
317 
metadata_dst_free(struct metadata_dst * md_dst)318 void metadata_dst_free(struct metadata_dst *md_dst)
319 {
320 #ifdef CONFIG_DST_CACHE
321 	if (md_dst->type == METADATA_IP_TUNNEL)
322 		dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
323 #endif
324 	kfree(md_dst);
325 }
326 EXPORT_SYMBOL_GPL(metadata_dst_free);
327 
328 struct metadata_dst __percpu *
metadata_dst_alloc_percpu(u8 optslen,enum metadata_type type,gfp_t flags)329 metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags)
330 {
331 	int cpu;
332 	struct metadata_dst __percpu *md_dst;
333 
334 	md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
335 				    __alignof__(struct metadata_dst), flags);
336 	if (!md_dst)
337 		return NULL;
338 
339 	for_each_possible_cpu(cpu)
340 		__metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen);
341 
342 	return md_dst;
343 }
344 EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
345 
metadata_dst_free_percpu(struct metadata_dst __percpu * md_dst)346 void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst)
347 {
348 #ifdef CONFIG_DST_CACHE
349 	int cpu;
350 
351 	for_each_possible_cpu(cpu) {
352 		struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu);
353 
354 		if (one_md_dst->type == METADATA_IP_TUNNEL)
355 			dst_cache_destroy(&one_md_dst->u.tun_info.dst_cache);
356 	}
357 #endif
358 	free_percpu(md_dst);
359 }
360 EXPORT_SYMBOL_GPL(metadata_dst_free_percpu);
361