• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Definitions for the IP module.
8  *
9  * Version:	@(#)ip.h	1.0.2	05/07/93
10  *
11  * Authors:	Ross Biro
12  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
14  *
15  * Changes:
16  *		Mike McLagan    :       Routing by source
17  */
18 #ifndef _IP_H
19 #define _IP_H
20 
21 #include <linux/types.h>
22 #include <linux/ip.h>
23 #include <linux/in.h>
24 #include <linux/skbuff.h>
25 #include <linux/jhash.h>
26 #include <linux/sockptr.h>
27 
28 #include <net/inet_sock.h>
29 #include <net/route.h>
30 #include <net/snmp.h>
31 #include <net/flow.h>
32 #include <net/flow_dissector.h>
33 #include <net/netns/hash.h>
34 #include <net/lwtunnel.h>
35 
36 #define IPV4_MAX_PMTU		65535U		/* RFC 2675, Section 5.1 */
37 #define IPV4_MIN_MTU		68			/* RFC 791 */
38 
39 extern unsigned int sysctl_fib_sync_mem;
40 extern unsigned int sysctl_fib_sync_mem_min;
41 extern unsigned int sysctl_fib_sync_mem_max;
42 
43 struct sock;
44 
45 struct inet_skb_parm {
46 	int			iif;
47 	struct ip_options	opt;		/* Compiled IP options		*/
48 	u16			flags;
49 
50 #define IPSKB_FORWARDED		BIT(0)
51 #define IPSKB_XFRM_TUNNEL_SIZE	BIT(1)
52 #define IPSKB_XFRM_TRANSFORMED	BIT(2)
53 #define IPSKB_FRAG_COMPLETE	BIT(3)
54 #define IPSKB_REROUTED		BIT(4)
55 #define IPSKB_DOREDIRECT	BIT(5)
56 #define IPSKB_FRAG_PMTU		BIT(6)
57 #define IPSKB_L3SLAVE		BIT(7)
58 #define IPSKB_NOPOLICY		BIT(8)
59 
60 	u16			frag_max_size;
61 };
62 
ipv4_l3mdev_skb(u16 flags)63 static inline bool ipv4_l3mdev_skb(u16 flags)
64 {
65 	return !!(flags & IPSKB_L3SLAVE);
66 }
67 
ip_hdrlen(const struct sk_buff * skb)68 static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
69 {
70 	return ip_hdr(skb)->ihl * 4;
71 }
72 
73 struct ipcm_cookie {
74 	struct sockcm_cookie	sockc;
75 	__be32			addr;
76 	int			oif;
77 	struct ip_options_rcu	*opt;
78 	__u8			protocol;
79 	__u8			ttl;
80 	__s16			tos;
81 	char			priority;
82 	__u16			gso_size;
83 };
84 
ipcm_init(struct ipcm_cookie * ipcm)85 static inline void ipcm_init(struct ipcm_cookie *ipcm)
86 {
87 	*ipcm = (struct ipcm_cookie) { .tos = -1 };
88 }
89 
ipcm_init_sk(struct ipcm_cookie * ipcm,const struct inet_sock * inet)90 static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
91 				const struct inet_sock *inet)
92 {
93 	ipcm_init(ipcm);
94 
95 	ipcm->sockc.mark = inet->sk.sk_mark;
96 	ipcm->sockc.tsflags = inet->sk.sk_tsflags;
97 	ipcm->oif = inet->sk.sk_bound_dev_if;
98 	ipcm->addr = inet->inet_saddr;
99 	ipcm->protocol = inet->inet_num;
100 }
101 
102 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
103 #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
104 
105 /* return enslaved device index if relevant */
inet_sdif(struct sk_buff * skb)106 static inline int inet_sdif(struct sk_buff *skb)
107 {
108 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
109 	if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
110 		return IPCB(skb)->iif;
111 #endif
112 	return 0;
113 }
114 
115 /* Special input handler for packets caught by router alert option.
116    They are selected only by protocol field, and then processed likely
117    local ones; but only if someone wants them! Otherwise, router
118    not running rsvpd will kill RSVP.
119 
120    It is user level problem, what it will make with them.
121    I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
122    but receiver should be enough clever f.e. to forward mtrace requests,
123    sent to multicast group to reach destination designated router.
124  */
125 
126 struct ip_ra_chain {
127 	struct ip_ra_chain __rcu *next;
128 	struct sock		*sk;
129 	union {
130 		void			(*destructor)(struct sock *);
131 		struct sock		*saved_sk;
132 	};
133 	struct rcu_head		rcu;
134 };
135 
136 /* IP flags. */
137 #define IP_CE		0x8000		/* Flag: "Congestion"		*/
138 #define IP_DF		0x4000		/* Flag: "Don't Fragment"	*/
139 #define IP_MF		0x2000		/* Flag: "More Fragments"	*/
140 #define IP_OFFSET	0x1FFF		/* "Fragment Offset" part	*/
141 
142 #define IP_FRAG_TIME	(30 * HZ)		/* fragment lifetime	*/
143 
144 struct msghdr;
145 struct net_device;
146 struct packet_type;
147 struct rtable;
148 struct sockaddr;
149 
150 int igmp_mc_init(void);
151 
152 /*
153  *	Functions provided by ip.c
154  */
155 
156 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
157 			  __be32 saddr, __be32 daddr,
158 			  struct ip_options_rcu *opt, u8 tos);
159 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
160 	   struct net_device *orig_dev);
161 void ip_list_rcv(struct list_head *head, struct packet_type *pt,
162 		 struct net_device *orig_dev);
163 int ip_local_deliver(struct sk_buff *skb);
164 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
165 int ip_mr_input(struct sk_buff *skb);
166 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
167 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
168 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
169 		   int (*output)(struct net *, struct sock *, struct sk_buff *));
170 
171 struct ip_fraglist_iter {
172 	struct sk_buff	*frag;
173 	struct iphdr	*iph;
174 	int		offset;
175 	unsigned int	hlen;
176 };
177 
178 void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
179 		      unsigned int hlen, struct ip_fraglist_iter *iter);
180 void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
181 
ip_fraglist_next(struct ip_fraglist_iter * iter)182 static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
183 {
184 	struct sk_buff *skb = iter->frag;
185 
186 	iter->frag = skb->next;
187 	skb_mark_not_on_list(skb);
188 
189 	return skb;
190 }
191 
192 struct ip_frag_state {
193 	bool		DF;
194 	unsigned int	hlen;
195 	unsigned int	ll_rs;
196 	unsigned int	mtu;
197 	unsigned int	left;
198 	int		offset;
199 	int		ptr;
200 	__be16		not_last_frag;
201 };
202 
203 void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
204 		  unsigned int mtu, bool DF, struct ip_frag_state *state);
205 struct sk_buff *ip_frag_next(struct sk_buff *skb,
206 			     struct ip_frag_state *state);
207 
208 void ip_send_check(struct iphdr *ip);
209 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
210 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
211 
212 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
213 		    __u8 tos);
214 void ip_init(void);
215 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
216 		   int getfrag(void *from, char *to, int offset, int len,
217 			       int odd, struct sk_buff *skb),
218 		   void *from, int len, int protolen,
219 		   struct ipcm_cookie *ipc,
220 		   struct rtable **rt,
221 		   unsigned int flags);
222 int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
223 		       struct sk_buff *skb);
224 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
225 		       int offset, size_t size, int flags);
226 struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
227 			      struct sk_buff_head *queue,
228 			      struct inet_cork *cork);
229 int ip_send_skb(struct net *net, struct sk_buff *skb);
230 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
231 void ip_flush_pending_frames(struct sock *sk);
232 struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
233 			    int getfrag(void *from, char *to, int offset,
234 					int len, int odd, struct sk_buff *skb),
235 			    void *from, int length, int transhdrlen,
236 			    struct ipcm_cookie *ipc, struct rtable **rtp,
237 			    struct inet_cork *cork, unsigned int flags);
238 
239 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
240 
ip_finish_skb(struct sock * sk,struct flowi4 * fl4)241 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
242 {
243 	return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
244 }
245 
get_rttos(struct ipcm_cookie * ipc,struct inet_sock * inet)246 static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
247 {
248 	return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
249 }
250 
get_rtconn_flags(struct ipcm_cookie * ipc,struct sock * sk)251 static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
252 {
253 	return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
254 }
255 
256 /* datagram.c */
257 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
258 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
259 
260 void ip4_datagram_release_cb(struct sock *sk);
261 
262 struct ip_reply_arg {
263 	struct kvec iov[1];
264 	int	    flags;
265 	__wsum 	    csum;
266 	int	    csumoffset; /* u16 offset of csum in iov[0].iov_base */
267 				/* -1 if not needed */
268 	int	    bound_dev_if;
269 	u8  	    tos;
270 	kuid_t	    uid;
271 };
272 
273 #define IP_REPLY_ARG_NOSRCCHECK 1
274 
ip_reply_arg_flowi_flags(const struct ip_reply_arg * arg)275 static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
276 {
277 	return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
278 }
279 
280 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
281 			   const struct ip_options *sopt,
282 			   __be32 daddr, __be32 saddr,
283 			   const struct ip_reply_arg *arg,
284 			   unsigned int len, u64 transmit_time);
285 
286 #define IP_INC_STATS(net, field)	SNMP_INC_STATS64((net)->mib.ip_statistics, field)
287 #define __IP_INC_STATS(net, field)	__SNMP_INC_STATS64((net)->mib.ip_statistics, field)
288 #define IP_ADD_STATS(net, field, val)	SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
289 #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
290 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
291 #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
292 #define NET_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.net_statistics, field)
293 #define __NET_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.net_statistics, field)
294 #define NET_ADD_STATS(net, field, adnd)	SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
295 #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
296 
297 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
298 unsigned long snmp_fold_field(void __percpu *mib, int offt);
299 #if BITS_PER_LONG==32
300 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
301 			 size_t syncp_offset);
302 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
303 #else
snmp_get_cpu_field64(void __percpu * mib,int cpu,int offct,size_t syncp_offset)304 static inline u64  snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
305 					size_t syncp_offset)
306 {
307 	return snmp_get_cpu_field(mib, cpu, offct);
308 
309 }
310 
snmp_fold_field64(void __percpu * mib,int offt,size_t syncp_off)311 static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
312 {
313 	return snmp_fold_field(mib, offt);
314 }
315 #endif
316 
317 #define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
318 { \
319 	int i, c; \
320 	for_each_possible_cpu(c) { \
321 		for (i = 0; stats_list[i].name; i++) \
322 			buff64[i] += snmp_get_cpu_field64( \
323 					mib_statistic, \
324 					c, stats_list[i].entry, \
325 					offset); \
326 	} \
327 }
328 
329 #define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
330 { \
331 	int i, c; \
332 	for_each_possible_cpu(c) { \
333 		for (i = 0; stats_list[i].name; i++) \
334 			buff[i] += snmp_get_cpu_field( \
335 						mib_statistic, \
336 						c, stats_list[i].entry); \
337 	} \
338 }
339 
340 void inet_get_local_port_range(struct net *net, int *low, int *high);
341 
342 #ifdef CONFIG_SYSCTL
inet_is_local_reserved_port(struct net * net,unsigned short port)343 static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
344 {
345 	if (!net->ipv4.sysctl_local_reserved_ports)
346 		return false;
347 	return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
348 }
349 
sysctl_dev_name_is_allowed(const char * name)350 static inline bool sysctl_dev_name_is_allowed(const char *name)
351 {
352 	return strcmp(name, "default") != 0  && strcmp(name, "all") != 0;
353 }
354 
inet_port_requires_bind_service(struct net * net,unsigned short port)355 static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
356 {
357 	return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
358 }
359 
360 #else
inet_is_local_reserved_port(struct net * net,unsigned short port)361 static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
362 {
363 	return false;
364 }
365 
inet_port_requires_bind_service(struct net * net,unsigned short port)366 static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
367 {
368 	return port < PROT_SOCK;
369 }
370 #endif
371 
372 __be32 inet_current_timestamp(void);
373 
374 /* From inetpeer.c */
375 extern int inet_peer_threshold;
376 extern int inet_peer_minttl;
377 extern int inet_peer_maxttl;
378 
379 void ipfrag_init(void);
380 
381 void ip_static_sysctl_init(void);
382 
383 #define IP4_REPLY_MARK(net, mark) \
384 	(READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
385 
ip_is_fragment(const struct iphdr * iph)386 static inline bool ip_is_fragment(const struct iphdr *iph)
387 {
388 	return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
389 }
390 
391 #ifdef CONFIG_INET
392 #include <net/dst.h>
393 
394 /* The function in 2.2 was invalid, producing wrong result for
395  * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
396 static inline
ip_decrease_ttl(struct iphdr * iph)397 int ip_decrease_ttl(struct iphdr *iph)
398 {
399 	u32 check = (__force u32)iph->check;
400 	check += (__force u32)htons(0x0100);
401 	iph->check = (__force __sum16)(check + (check>=0xFFFF));
402 	return --iph->ttl;
403 }
404 
ip_mtu_locked(const struct dst_entry * dst)405 static inline int ip_mtu_locked(const struct dst_entry *dst)
406 {
407 	const struct rtable *rt = (const struct rtable *)dst;
408 
409 	return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
410 }
411 
412 static inline
ip_dont_fragment(const struct sock * sk,const struct dst_entry * dst)413 int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
414 {
415 	u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
416 
417 	return  pmtudisc == IP_PMTUDISC_DO ||
418 		(pmtudisc == IP_PMTUDISC_WANT &&
419 		 !ip_mtu_locked(dst));
420 }
421 
ip_sk_accept_pmtu(const struct sock * sk)422 static inline bool ip_sk_accept_pmtu(const struct sock *sk)
423 {
424 	return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
425 	       inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
426 }
427 
ip_sk_use_pmtu(const struct sock * sk)428 static inline bool ip_sk_use_pmtu(const struct sock *sk)
429 {
430 	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
431 }
432 
ip_sk_ignore_df(const struct sock * sk)433 static inline bool ip_sk_ignore_df(const struct sock *sk)
434 {
435 	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
436 	       inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
437 }
438 
ip_dst_mtu_maybe_forward(const struct dst_entry * dst,bool forwarding)439 static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
440 						    bool forwarding)
441 {
442 	struct net *net = dev_net(dst->dev);
443 	unsigned int mtu;
444 
445 	if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
446 	    ip_mtu_locked(dst) ||
447 	    !forwarding)
448 		return dst_mtu(dst);
449 
450 	/* 'forwarding = true' case should always honour route mtu */
451 	mtu = dst_metric_raw(dst, RTAX_MTU);
452 	if (!mtu)
453 		mtu = min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
454 
455 	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
456 }
457 
ip_skb_dst_mtu(struct sock * sk,const struct sk_buff * skb)458 static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
459 					  const struct sk_buff *skb)
460 {
461 	unsigned int mtu;
462 
463 	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
464 		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
465 
466 		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
467 	}
468 
469 	mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
470 	return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
471 }
472 
473 struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
474 					int fc_mx_len,
475 					struct netlink_ext_ack *extack);
ip_fib_metrics_put(struct dst_metrics * fib_metrics)476 static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
477 {
478 	if (fib_metrics != &dst_default_metrics &&
479 	    refcount_dec_and_test(&fib_metrics->refcnt))
480 		kfree(fib_metrics);
481 }
482 
483 /* ipv4 and ipv6 both use refcounted metrics if it is not the default */
484 static inline
ip_dst_init_metrics(struct dst_entry * dst,struct dst_metrics * fib_metrics)485 void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
486 {
487 	dst_init_metrics(dst, fib_metrics->metrics, true);
488 
489 	if (fib_metrics != &dst_default_metrics) {
490 		dst->_metrics |= DST_METRICS_REFCOUNTED;
491 		refcount_inc(&fib_metrics->refcnt);
492 	}
493 }
494 
495 static inline
ip_dst_metrics_put(struct dst_entry * dst)496 void ip_dst_metrics_put(struct dst_entry *dst)
497 {
498 	struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
499 
500 	if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
501 		kfree(p);
502 }
503 
504 u32 ip_idents_reserve(u32 hash, int segs);
505 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
506 
ip_select_ident_segs(struct net * net,struct sk_buff * skb,struct sock * sk,int segs)507 static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
508 					struct sock *sk, int segs)
509 {
510 	struct iphdr *iph = ip_hdr(skb);
511 
512 	/* We had many attacks based on IPID, use the private
513 	 * generator as much as we can.
514 	 */
515 	if (sk && inet_sk(sk)->inet_daddr) {
516 		iph->id = htons(inet_sk(sk)->inet_id);
517 		inet_sk(sk)->inet_id += segs;
518 		return;
519 	}
520 	if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
521 		iph->id = 0;
522 	} else {
523 		/* Unfortunately we need the big hammer to get a suitable IPID */
524 		__ip_select_ident(net, iph, segs);
525 	}
526 }
527 
ip_select_ident(struct net * net,struct sk_buff * skb,struct sock * sk)528 static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
529 				   struct sock *sk)
530 {
531 	ip_select_ident_segs(net, skb, sk, 1);
532 }
533 
inet_compute_pseudo(struct sk_buff * skb,int proto)534 static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
535 {
536 	return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
537 				  skb->len, proto, 0);
538 }
539 
540 /* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
541  * Equivalent to :	flow->v4addrs.src = iph->saddr;
542  *			flow->v4addrs.dst = iph->daddr;
543  */
iph_to_flow_copy_v4addrs(struct flow_keys * flow,const struct iphdr * iph)544 static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
545 					    const struct iphdr *iph)
546 {
547 	BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
548 		     offsetof(typeof(flow->addrs), v4addrs.src) +
549 			      sizeof(flow->addrs.v4addrs.src));
550 	memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
551 	flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
552 }
553 
inet_gro_compute_pseudo(struct sk_buff * skb,int proto)554 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
555 {
556 	const struct iphdr *iph = skb_gro_network_header(skb);
557 
558 	return csum_tcpudp_nofold(iph->saddr, iph->daddr,
559 				  skb_gro_len(skb), proto, 0);
560 }
561 
562 /*
563  *	Map a multicast IP onto multicast MAC for type ethernet.
564  */
565 
ip_eth_mc_map(__be32 naddr,char * buf)566 static inline void ip_eth_mc_map(__be32 naddr, char *buf)
567 {
568 	__u32 addr=ntohl(naddr);
569 	buf[0]=0x01;
570 	buf[1]=0x00;
571 	buf[2]=0x5e;
572 	buf[5]=addr&0xFF;
573 	addr>>=8;
574 	buf[4]=addr&0xFF;
575 	addr>>=8;
576 	buf[3]=addr&0x7F;
577 }
578 
579 /*
580  *	Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
581  *	Leave P_Key as 0 to be filled in by driver.
582  */
583 
ip_ib_mc_map(__be32 naddr,const unsigned char * broadcast,char * buf)584 static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
585 {
586 	__u32 addr;
587 	unsigned char scope = broadcast[5] & 0xF;
588 
589 	buf[0]  = 0;		/* Reserved */
590 	buf[1]  = 0xff;		/* Multicast QPN */
591 	buf[2]  = 0xff;
592 	buf[3]  = 0xff;
593 	addr    = ntohl(naddr);
594 	buf[4]  = 0xff;
595 	buf[5]  = 0x10 | scope;	/* scope from broadcast address */
596 	buf[6]  = 0x40;		/* IPv4 signature */
597 	buf[7]  = 0x1b;
598 	buf[8]  = broadcast[8];		/* P_Key */
599 	buf[9]  = broadcast[9];
600 	buf[10] = 0;
601 	buf[11] = 0;
602 	buf[12] = 0;
603 	buf[13] = 0;
604 	buf[14] = 0;
605 	buf[15] = 0;
606 	buf[19] = addr & 0xff;
607 	addr  >>= 8;
608 	buf[18] = addr & 0xff;
609 	addr  >>= 8;
610 	buf[17] = addr & 0xff;
611 	addr  >>= 8;
612 	buf[16] = addr & 0x0f;
613 }
614 
ip_ipgre_mc_map(__be32 naddr,const unsigned char * broadcast,char * buf)615 static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
616 {
617 	if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
618 		memcpy(buf, broadcast, 4);
619 	else
620 		memcpy(buf, &naddr, sizeof(naddr));
621 }
622 
623 #if IS_ENABLED(CONFIG_IPV6)
624 #include <linux/ipv6.h>
625 #endif
626 
inet_reset_saddr(struct sock * sk)627 static __inline__ void inet_reset_saddr(struct sock *sk)
628 {
629 	inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
630 #if IS_ENABLED(CONFIG_IPV6)
631 	if (sk->sk_family == PF_INET6) {
632 		struct ipv6_pinfo *np = inet6_sk(sk);
633 
634 		memset(&np->saddr, 0, sizeof(np->saddr));
635 		memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
636 	}
637 #endif
638 }
639 
640 #endif
641 
ipv4_addr_hash(__be32 ip)642 static inline unsigned int ipv4_addr_hash(__be32 ip)
643 {
644 	return (__force unsigned int) ip;
645 }
646 
ipv4_portaddr_hash(const struct net * net,__be32 saddr,unsigned int port)647 static inline u32 ipv4_portaddr_hash(const struct net *net,
648 				     __be32 saddr,
649 				     unsigned int port)
650 {
651 	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
652 }
653 
654 bool ip_call_ra_chain(struct sk_buff *skb);
655 
656 /*
657  *	Functions provided by ip_fragment.c
658  */
659 
660 enum ip_defrag_users {
661 	IP_DEFRAG_LOCAL_DELIVER,
662 	IP_DEFRAG_CALL_RA_CHAIN,
663 	IP_DEFRAG_CONNTRACK_IN,
664 	__IP_DEFRAG_CONNTRACK_IN_END	= IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
665 	IP_DEFRAG_CONNTRACK_OUT,
666 	__IP_DEFRAG_CONNTRACK_OUT_END	= IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
667 	IP_DEFRAG_CONNTRACK_BRIDGE_IN,
668 	__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
669 	IP_DEFRAG_VS_IN,
670 	IP_DEFRAG_VS_OUT,
671 	IP_DEFRAG_VS_FWD,
672 	IP_DEFRAG_AF_PACKET,
673 	IP_DEFRAG_MACVLAN,
674 };
675 
676 /* Return true if the value of 'user' is between 'lower_bond'
677  * and 'upper_bond' inclusively.
678  */
ip_defrag_user_in_between(u32 user,enum ip_defrag_users lower_bond,enum ip_defrag_users upper_bond)679 static inline bool ip_defrag_user_in_between(u32 user,
680 					     enum ip_defrag_users lower_bond,
681 					     enum ip_defrag_users upper_bond)
682 {
683 	return user >= lower_bond && user <= upper_bond;
684 }
685 
686 int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
687 #ifdef CONFIG_INET
688 struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
689 #else
ip_check_defrag(struct net * net,struct sk_buff * skb,u32 user)690 static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
691 {
692 	return skb;
693 }
694 #endif
695 
696 /*
697  *	Functions provided by ip_forward.c
698  */
699 
700 int ip_forward(struct sk_buff *skb);
701 
702 /*
703  *	Functions provided by ip_options.c
704  */
705 
706 void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
707 		      __be32 daddr, struct rtable *rt, int is_frag);
708 
709 int __ip_options_echo(struct net *net, struct ip_options *dopt,
710 		      struct sk_buff *skb, const struct ip_options *sopt);
ip_options_echo(struct net * net,struct ip_options * dopt,struct sk_buff * skb)711 static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
712 				  struct sk_buff *skb)
713 {
714 	return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
715 }
716 
717 void ip_options_fragment(struct sk_buff *skb);
718 int __ip_options_compile(struct net *net, struct ip_options *opt,
719 			 struct sk_buff *skb, __be32 *info);
720 int ip_options_compile(struct net *net, struct ip_options *opt,
721 		       struct sk_buff *skb);
722 int ip_options_get(struct net *net, struct ip_options_rcu **optp,
723 		   sockptr_t data, int optlen);
724 void ip_options_undo(struct ip_options *opt);
725 void ip_forward_options(struct sk_buff *skb);
726 int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
727 
728 /*
729  *	Functions provided by ip_sockglue.c
730  */
731 
732 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
733 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
734 			 struct sk_buff *skb, int tlen, int offset);
735 int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
736 		 struct ipcm_cookie *ipc, bool allow_ipv6);
737 int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
738 		  unsigned int optlen);
739 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
740 		  int __user *optlen);
741 int ip_ra_control(struct sock *sk, unsigned char on,
742 		  void (*destructor)(struct sock *));
743 
744 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
745 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
746 		   u32 info, u8 *payload);
747 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
748 		    u32 info);
749 
ip_cmsg_recv(struct msghdr * msg,struct sk_buff * skb)750 static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
751 {
752 	ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
753 }
754 
755 bool icmp_global_allow(void);
756 extern int sysctl_icmp_msgs_per_sec;
757 extern int sysctl_icmp_msgs_burst;
758 
759 #ifdef CONFIG_PROC_FS
760 int ip_misc_proc_init(void);
761 #endif
762 
763 int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
764 				struct netlink_ext_ack *extack);
765 
inetdev_valid_mtu(unsigned int mtu)766 static inline bool inetdev_valid_mtu(unsigned int mtu)
767 {
768 	return likely(mtu >= IPV4_MIN_MTU);
769 }
770 
771 void ip_sock_set_freebind(struct sock *sk);
772 int ip_sock_set_mtu_discover(struct sock *sk, int val);
773 void ip_sock_set_pktinfo(struct sock *sk);
774 void ip_sock_set_recverr(struct sock *sk);
775 void ip_sock_set_tos(struct sock *sk, int val);
776 
777 #endif	/* _IP_H */
778