• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * net/dst.h	Protocol independent destination cache definitions.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  */
8 
9 #ifndef _NET_DST_H
10 #define _NET_DST_H
11 
12 #include <net/dst_ops.h>
13 #include <linux/netdevice.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/rcupdate.h>
16 #include <linux/bug.h>
17 #include <linux/jiffies.h>
18 #include <linux/refcount.h>
19 #include <net/neighbour.h>
20 #include <asm/processor.h>
21 
22 struct sk_buff;
23 
24 struct dst_entry {
25 	struct net_device       *dev;
26 	struct  dst_ops	        *ops;
27 	unsigned long		_metrics;
28 	unsigned long           expires;
29 #ifdef CONFIG_XFRM
30 	struct xfrm_state	*xfrm;
31 #else
32 	void			*__pad1;
33 #endif
34 	int			(*input)(struct sk_buff *);
35 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
36 
37 	unsigned short		flags;
38 #define DST_HOST		0x0001
39 #define DST_NOXFRM		0x0002
40 #define DST_NOPOLICY		0x0004
41 #define DST_NOCOUNT		0x0008
42 #define DST_FAKE_RTABLE		0x0010
43 #define DST_XFRM_TUNNEL		0x0020
44 #define DST_XFRM_QUEUE		0x0040
45 #define DST_METADATA		0x0080
46 
47 	/* A non-zero value of dst->obsolete forces by-hand validation
48 	 * of the route entry.  Positive values are set by the generic
49 	 * dst layer to indicate that the entry has been forcefully
50 	 * destroyed.
51 	 *
52 	 * Negative values are used by the implementation layer code to
53 	 * force invocation of the dst_ops->check() method.
54 	 */
55 	short			obsolete;
56 #define DST_OBSOLETE_NONE	0
57 #define DST_OBSOLETE_DEAD	2
58 #define DST_OBSOLETE_FORCE_CHK	-1
59 #define DST_OBSOLETE_KILL	-2
60 	unsigned short		header_len;	/* more space at head required */
61 	unsigned short		trailer_len;	/* space to reserve at tail */
62 
63 	/*
64 	 * __refcnt wants to be on a different cache line from
65 	 * input/output/ops or performance tanks badly
66 	 */
67 #ifdef CONFIG_64BIT
68 	atomic_t		__refcnt;	/* 64-bit offset 64 */
69 #endif
70 	int			__use;
71 	unsigned long		lastuse;
72 	struct lwtunnel_state   *lwtstate;
73 	struct rcu_head		rcu_head;
74 	short			error;
75 	short			__pad;
76 	__u32			tclassid;
77 #ifndef CONFIG_64BIT
78 	atomic_t		__refcnt;	/* 32-bit offset 64 */
79 #endif
80 };
81 
82 struct dst_metrics {
83 	u32		metrics[RTAX_MAX];
84 	refcount_t	refcnt;
85 } __aligned(4);		/* Low pointer bits contain DST_METRICS_FLAGS */
86 extern const struct dst_metrics dst_default_metrics;
87 
88 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
89 
90 #define DST_METRICS_READ_ONLY		0x1UL
91 #define DST_METRICS_REFCOUNTED		0x2UL
92 #define DST_METRICS_FLAGS		0x3UL
93 #define __DST_METRICS_PTR(Y)	\
94 	((u32 *)((Y) & ~DST_METRICS_FLAGS))
95 #define DST_METRICS_PTR(X)	__DST_METRICS_PTR((X)->_metrics)
96 
dst_metrics_read_only(const struct dst_entry * dst)97 static inline bool dst_metrics_read_only(const struct dst_entry *dst)
98 {
99 	return dst->_metrics & DST_METRICS_READ_ONLY;
100 }
101 
102 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
103 
dst_destroy_metrics_generic(struct dst_entry * dst)104 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
105 {
106 	unsigned long val = dst->_metrics;
107 	if (!(val & DST_METRICS_READ_ONLY))
108 		__dst_destroy_metrics_generic(dst, val);
109 }
110 
dst_metrics_write_ptr(struct dst_entry * dst)111 static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
112 {
113 	unsigned long p = dst->_metrics;
114 
115 	BUG_ON(!p);
116 
117 	if (p & DST_METRICS_READ_ONLY)
118 		return dst->ops->cow_metrics(dst, p);
119 	return __DST_METRICS_PTR(p);
120 }
121 
122 /* This may only be invoked before the entry has reached global
123  * visibility.
124  */
dst_init_metrics(struct dst_entry * dst,const u32 * src_metrics,bool read_only)125 static inline void dst_init_metrics(struct dst_entry *dst,
126 				    const u32 *src_metrics,
127 				    bool read_only)
128 {
129 	dst->_metrics = ((unsigned long) src_metrics) |
130 		(read_only ? DST_METRICS_READ_ONLY : 0);
131 }
132 
dst_copy_metrics(struct dst_entry * dest,const struct dst_entry * src)133 static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
134 {
135 	u32 *dst_metrics = dst_metrics_write_ptr(dest);
136 
137 	if (dst_metrics) {
138 		u32 *src_metrics = DST_METRICS_PTR(src);
139 
140 		memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
141 	}
142 }
143 
dst_metrics_ptr(struct dst_entry * dst)144 static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
145 {
146 	return DST_METRICS_PTR(dst);
147 }
148 
149 static inline u32
dst_metric_raw(const struct dst_entry * dst,const int metric)150 dst_metric_raw(const struct dst_entry *dst, const int metric)
151 {
152 	u32 *p = DST_METRICS_PTR(dst);
153 
154 	return p[metric-1];
155 }
156 
157 static inline u32
dst_metric(const struct dst_entry * dst,const int metric)158 dst_metric(const struct dst_entry *dst, const int metric)
159 {
160 	WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
161 		     metric == RTAX_ADVMSS ||
162 		     metric == RTAX_MTU);
163 	return dst_metric_raw(dst, metric);
164 }
165 
166 static inline u32
dst_metric_advmss(const struct dst_entry * dst)167 dst_metric_advmss(const struct dst_entry *dst)
168 {
169 	u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
170 
171 	if (!advmss)
172 		advmss = dst->ops->default_advmss(dst);
173 
174 	return advmss;
175 }
176 
dst_metric_set(struct dst_entry * dst,int metric,u32 val)177 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
178 {
179 	u32 *p = dst_metrics_write_ptr(dst);
180 
181 	if (p)
182 		p[metric-1] = val;
183 }
184 
185 /* Kernel-internal feature bits that are unallocated in user space. */
186 #define DST_FEATURE_ECN_CA	(1U << 31)
187 
188 #define DST_FEATURE_MASK	(DST_FEATURE_ECN_CA)
189 #define DST_FEATURE_ECN_MASK	(DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
190 
191 static inline u32
dst_feature(const struct dst_entry * dst,u32 feature)192 dst_feature(const struct dst_entry *dst, u32 feature)
193 {
194 	return dst_metric(dst, RTAX_FEATURES) & feature;
195 }
196 
dst_mtu(const struct dst_entry * dst)197 static inline u32 dst_mtu(const struct dst_entry *dst)
198 {
199 	return dst->ops->mtu(dst);
200 }
201 
202 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
dst_metric_rtt(const struct dst_entry * dst,int metric)203 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
204 {
205 	return msecs_to_jiffies(dst_metric(dst, metric));
206 }
207 
208 static inline u32
dst_allfrag(const struct dst_entry * dst)209 dst_allfrag(const struct dst_entry *dst)
210 {
211 	int ret = dst_feature(dst,  RTAX_FEATURE_ALLFRAG);
212 	return ret;
213 }
214 
215 static inline int
dst_metric_locked(const struct dst_entry * dst,int metric)216 dst_metric_locked(const struct dst_entry *dst, int metric)
217 {
218 	return dst_metric(dst, RTAX_LOCK) & (1<<metric);
219 }
220 
dst_hold(struct dst_entry * dst)221 static inline void dst_hold(struct dst_entry *dst)
222 {
223 	/*
224 	 * If your kernel compilation stops here, please check
225 	 * the placement of __refcnt in struct dst_entry
226 	 */
227 	BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
228 	WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
229 }
230 
dst_use_noref(struct dst_entry * dst,unsigned long time)231 static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
232 {
233 	if (unlikely(time != dst->lastuse)) {
234 		dst->__use++;
235 		dst->lastuse = time;
236 	}
237 }
238 
dst_clone(struct dst_entry * dst)239 static inline struct dst_entry *dst_clone(struct dst_entry *dst)
240 {
241 	if (dst)
242 		dst_hold(dst);
243 	return dst;
244 }
245 
246 void dst_release(struct dst_entry *dst);
247 
248 void dst_release_immediate(struct dst_entry *dst);
249 
refdst_drop(unsigned long refdst)250 static inline void refdst_drop(unsigned long refdst)
251 {
252 	if (!(refdst & SKB_DST_NOREF))
253 		dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
254 }
255 
256 /**
257  * skb_dst_drop - drops skb dst
258  * @skb: buffer
259  *
260  * Drops dst reference count if a reference was taken.
261  */
skb_dst_drop(struct sk_buff * skb)262 static inline void skb_dst_drop(struct sk_buff *skb)
263 {
264 	if (skb->_skb_refdst) {
265 		refdst_drop(skb->_skb_refdst);
266 		skb->_skb_refdst = 0UL;
267 	}
268 }
269 
__skb_dst_copy(struct sk_buff * nskb,unsigned long refdst)270 static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
271 {
272 	nskb->_skb_refdst = refdst;
273 	if (!(nskb->_skb_refdst & SKB_DST_NOREF))
274 		dst_clone(skb_dst(nskb));
275 }
276 
skb_dst_copy(struct sk_buff * nskb,const struct sk_buff * oskb)277 static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
278 {
279 	__skb_dst_copy(nskb, oskb->_skb_refdst);
280 }
281 
282 /**
283  * dst_hold_safe - Take a reference on a dst if possible
284  * @dst: pointer to dst entry
285  *
286  * This helper returns false if it could not safely
287  * take a reference on a dst.
288  */
dst_hold_safe(struct dst_entry * dst)289 static inline bool dst_hold_safe(struct dst_entry *dst)
290 {
291 	return atomic_inc_not_zero(&dst->__refcnt);
292 }
293 
294 /**
295  * skb_dst_force - makes sure skb dst is refcounted
296  * @skb: buffer
297  *
298  * If dst is not yet refcounted and not destroyed, grab a ref on it.
299  * Returns true if dst is refcounted.
300  */
skb_dst_force(struct sk_buff * skb)301 static inline bool skb_dst_force(struct sk_buff *skb)
302 {
303 	if (skb_dst_is_noref(skb)) {
304 		struct dst_entry *dst = skb_dst(skb);
305 
306 		WARN_ON(!rcu_read_lock_held());
307 		if (!dst_hold_safe(dst))
308 			dst = NULL;
309 
310 		skb->_skb_refdst = (unsigned long)dst;
311 	}
312 
313 	return skb->_skb_refdst != 0UL;
314 }
315 
316 
317 /**
318  *	__skb_tunnel_rx - prepare skb for rx reinsert
319  *	@skb: buffer
320  *	@dev: tunnel device
321  *	@net: netns for packet i/o
322  *
323  *	After decapsulation, packet is going to re-enter (netif_rx()) our stack,
324  *	so make some cleanups. (no accounting done)
325  */
__skb_tunnel_rx(struct sk_buff * skb,struct net_device * dev,struct net * net)326 static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
327 				   struct net *net)
328 {
329 	skb->dev = dev;
330 
331 	/*
332 	 * Clear hash so that we can recalulate the hash for the
333 	 * encapsulated packet, unless we have already determine the hash
334 	 * over the L4 4-tuple.
335 	 */
336 	skb_clear_hash_if_not_l4(skb);
337 	skb_set_queue_mapping(skb, 0);
338 	skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
339 }
340 
341 /**
342  *	skb_tunnel_rx - prepare skb for rx reinsert
343  *	@skb: buffer
344  *	@dev: tunnel device
345  *	@net: netns for packet i/o
346  *
347  *	After decapsulation, packet is going to re-enter (netif_rx()) our stack,
348  *	so make some cleanups, and perform accounting.
349  *	Note: this accounting is not SMP safe.
350  */
skb_tunnel_rx(struct sk_buff * skb,struct net_device * dev,struct net * net)351 static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
352 				 struct net *net)
353 {
354 	/* TODO : stats should be SMP safe */
355 	dev->stats.rx_packets++;
356 	dev->stats.rx_bytes += skb->len;
357 	__skb_tunnel_rx(skb, dev, net);
358 }
359 
dst_tclassid(const struct sk_buff * skb)360 static inline u32 dst_tclassid(const struct sk_buff *skb)
361 {
362 #ifdef CONFIG_IP_ROUTE_CLASSID
363 	const struct dst_entry *dst;
364 
365 	dst = skb_dst(skb);
366 	if (dst)
367 		return dst->tclassid;
368 #endif
369 	return 0;
370 }
371 
372 int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
dst_discard(struct sk_buff * skb)373 static inline int dst_discard(struct sk_buff *skb)
374 {
375 	return dst_discard_out(&init_net, skb->sk, skb);
376 }
377 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
378 		int initial_obsolete, unsigned short flags);
379 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
380 	      struct net_device *dev, int initial_ref, int initial_obsolete,
381 	      unsigned short flags);
382 struct dst_entry *dst_destroy(struct dst_entry *dst);
383 void dst_dev_put(struct dst_entry *dst);
384 
dst_confirm(struct dst_entry * dst)385 static inline void dst_confirm(struct dst_entry *dst)
386 {
387 }
388 
dst_neigh_lookup(const struct dst_entry * dst,const void * daddr)389 static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
390 {
391 	struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
392 	return IS_ERR(n) ? NULL : n;
393 }
394 
dst_neigh_lookup_skb(const struct dst_entry * dst,struct sk_buff * skb)395 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
396 						     struct sk_buff *skb)
397 {
398 	struct neighbour *n = NULL;
399 
400 	/* The packets from tunnel devices (eg bareudp) may have only
401 	 * metadata in the dst pointer of skb. Hence a pointer check of
402 	 * neigh_lookup is needed.
403 	 */
404 	if (dst->ops->neigh_lookup)
405 		n = dst->ops->neigh_lookup(dst, skb, NULL);
406 
407 	return IS_ERR(n) ? NULL : n;
408 }
409 
dst_confirm_neigh(const struct dst_entry * dst,const void * daddr)410 static inline void dst_confirm_neigh(const struct dst_entry *dst,
411 				     const void *daddr)
412 {
413 	if (dst->ops->confirm_neigh)
414 		dst->ops->confirm_neigh(dst, daddr);
415 }
416 
dst_link_failure(struct sk_buff * skb)417 static inline void dst_link_failure(struct sk_buff *skb)
418 {
419 	struct dst_entry *dst = skb_dst(skb);
420 	if (dst && dst->ops && dst->ops->link_failure)
421 		dst->ops->link_failure(skb);
422 }
423 
dst_set_expires(struct dst_entry * dst,int timeout)424 static inline void dst_set_expires(struct dst_entry *dst, int timeout)
425 {
426 	unsigned long expires = jiffies + timeout;
427 
428 	if (expires == 0)
429 		expires = 1;
430 
431 	if (dst->expires == 0 || time_before(expires, dst->expires))
432 		dst->expires = expires;
433 }
434 
435 /* Output packet to network from transport.  */
dst_output(struct net * net,struct sock * sk,struct sk_buff * skb)436 static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
437 {
438 	return skb_dst(skb)->output(net, sk, skb);
439 }
440 
441 /* Input packet from network to transport.  */
dst_input(struct sk_buff * skb)442 static inline int dst_input(struct sk_buff *skb)
443 {
444 	return skb_dst(skb)->input(skb);
445 }
446 
dst_check(struct dst_entry * dst,u32 cookie)447 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
448 {
449 	if (dst->obsolete)
450 		dst = dst->ops->check(dst, cookie);
451 	return dst;
452 }
453 
454 /* Flags for xfrm_lookup flags argument. */
455 enum {
456 	XFRM_LOOKUP_ICMP = 1 << 0,
457 	XFRM_LOOKUP_QUEUE = 1 << 1,
458 	XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
459 };
460 
461 struct flowi;
462 #ifndef CONFIG_XFRM
xfrm_lookup(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)463 static inline struct dst_entry *xfrm_lookup(struct net *net,
464 					    struct dst_entry *dst_orig,
465 					    const struct flowi *fl,
466 					    const struct sock *sk,
467 					    int flags)
468 {
469 	return dst_orig;
470 }
471 
472 static inline struct dst_entry *
xfrm_lookup_with_ifid(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags,u32 if_id)473 xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig,
474 		      const struct flowi *fl, const struct sock *sk,
475 		      int flags, u32 if_id)
476 {
477 	return dst_orig;
478 }
479 
xfrm_lookup_route(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)480 static inline struct dst_entry *xfrm_lookup_route(struct net *net,
481 						  struct dst_entry *dst_orig,
482 						  const struct flowi *fl,
483 						  const struct sock *sk,
484 						  int flags)
485 {
486 	return dst_orig;
487 }
488 
dst_xfrm(const struct dst_entry * dst)489 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
490 {
491 	return NULL;
492 }
493 
494 #else
495 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
496 			      const struct flowi *fl, const struct sock *sk,
497 			      int flags);
498 
499 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
500 					struct dst_entry *dst_orig,
501 					const struct flowi *fl,
502 					const struct sock *sk, int flags,
503 					u32 if_id);
504 
505 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
506 				    const struct flowi *fl, const struct sock *sk,
507 				    int flags);
508 
509 /* skb attached with this dst needs transformation if dst->xfrm is valid */
dst_xfrm(const struct dst_entry * dst)510 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
511 {
512 	return dst->xfrm;
513 }
514 #endif
515 
skb_dst_update_pmtu(struct sk_buff * skb,u32 mtu)516 static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
517 {
518 	struct dst_entry *dst = skb_dst(skb);
519 
520 	if (dst && dst->ops->update_pmtu)
521 		dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
522 }
523 
524 /* update dst pmtu but not do neighbor confirm */
skb_dst_update_pmtu_no_confirm(struct sk_buff * skb,u32 mtu)525 static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
526 {
527 	struct dst_entry *dst = skb_dst(skb);
528 
529 	if (dst && dst->ops->update_pmtu)
530 		dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
531 }
532 
skb_tunnel_check_pmtu(struct sk_buff * skb,struct dst_entry * encap_dst,int headroom)533 static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
534 					 struct dst_entry *encap_dst,
535 					 int headroom)
536 {
537 	u32 encap_mtu = dst_mtu(encap_dst);
538 
539 	if (skb->len > encap_mtu - headroom)
540 		skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom);
541 }
542 
543 #endif /* _NET_DST_H */
544