1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the IP module.
8 *
9 * Version: @(#)ip.h 1.0.2 05/07/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Alan Cox, <gw4pts@gw4pts.ampr.org>
14 *
15 * Changes:
16 * Mike McLagan : Routing by source
17 */
18 #ifndef _IP_H
19 #define _IP_H
20
21 #include <linux/types.h>
22 #include <linux/ip.h>
23 #include <linux/in.h>
24 #include <linux/skbuff.h>
25 #include <linux/jhash.h>
26 #include <linux/sockptr.h>
27
28 #include <net/inet_sock.h>
29 #include <net/route.h>
30 #include <net/snmp.h>
31 #include <net/flow.h>
32 #include <net/flow_dissector.h>
33 #include <net/netns/hash.h>
34 #include <net/lwtunnel.h>
35
36 #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
37 #define IPV4_MIN_MTU 68 /* RFC 791 */
38
39 extern unsigned int sysctl_fib_sync_mem;
40 extern unsigned int sysctl_fib_sync_mem_min;
41 extern unsigned int sysctl_fib_sync_mem_max;
42
43 struct sock;
44
45 struct inet_skb_parm {
46 int iif;
47 struct ip_options opt; /* Compiled IP options */
48 u16 flags;
49
50 #define IPSKB_FORWARDED BIT(0)
51 #define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
52 #define IPSKB_XFRM_TRANSFORMED BIT(2)
53 #define IPSKB_FRAG_COMPLETE BIT(3)
54 #define IPSKB_REROUTED BIT(4)
55 #define IPSKB_DOREDIRECT BIT(5)
56 #define IPSKB_FRAG_PMTU BIT(6)
57 #define IPSKB_L3SLAVE BIT(7)
58 #define IPSKB_NOPOLICY BIT(8)
59 #define IPSKB_MULTIPATH BIT(9)
60
61 u16 frag_max_size;
62 };
63
ipv4_l3mdev_skb(u16 flags)64 static inline bool ipv4_l3mdev_skb(u16 flags)
65 {
66 return !!(flags & IPSKB_L3SLAVE);
67 }
68
ip_hdrlen(const struct sk_buff * skb)69 static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
70 {
71 return ip_hdr(skb)->ihl * 4;
72 }
73
74 struct ipcm_cookie {
75 struct sockcm_cookie sockc;
76 __be32 addr;
77 int oif;
78 struct ip_options_rcu *opt;
79 __u8 protocol;
80 __u8 ttl;
81 __s16 tos;
82 char priority;
83 __u16 gso_size;
84 };
85
ipcm_init(struct ipcm_cookie * ipcm)86 static inline void ipcm_init(struct ipcm_cookie *ipcm)
87 {
88 *ipcm = (struct ipcm_cookie) { .tos = -1 };
89 }
90
ipcm_init_sk(struct ipcm_cookie * ipcm,const struct inet_sock * inet)91 static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
92 const struct inet_sock *inet)
93 {
94 ipcm_init(ipcm);
95
96 ipcm->sockc.mark = inet->sk.sk_mark;
97 ipcm->sockc.tsflags = inet->sk.sk_tsflags;
98 ipcm->oif = inet->sk.sk_bound_dev_if;
99 ipcm->addr = inet->inet_saddr;
100 ipcm->protocol = inet->inet_num;
101 }
102
103 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
104 #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
105
106 /* return enslaved device index if relevant */
inet_sdif(struct sk_buff * skb)107 static inline int inet_sdif(struct sk_buff *skb)
108 {
109 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
110 if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
111 return IPCB(skb)->iif;
112 #endif
113 return 0;
114 }
115
116 /* Special input handler for packets caught by router alert option.
117 They are selected only by protocol field, and then processed likely
118 local ones; but only if someone wants them! Otherwise, router
119 not running rsvpd will kill RSVP.
120
121 It is user level problem, what it will make with them.
122 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
123 but receiver should be enough clever f.e. to forward mtrace requests,
124 sent to multicast group to reach destination designated router.
125 */
126
127 struct ip_ra_chain {
128 struct ip_ra_chain __rcu *next;
129 struct sock *sk;
130 union {
131 void (*destructor)(struct sock *);
132 struct sock *saved_sk;
133 };
134 struct rcu_head rcu;
135 };
136
137 /* IP flags. */
138 #define IP_CE 0x8000 /* Flag: "Congestion" */
139 #define IP_DF 0x4000 /* Flag: "Don't Fragment" */
140 #define IP_MF 0x2000 /* Flag: "More Fragments" */
141 #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
142
143 #define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
144
145 struct msghdr;
146 struct net_device;
147 struct packet_type;
148 struct rtable;
149 struct sockaddr;
150
151 int igmp_mc_init(void);
152
153 /*
154 * Functions provided by ip.c
155 */
156
157 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
158 __be32 saddr, __be32 daddr,
159 struct ip_options_rcu *opt, u8 tos);
160 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
161 struct net_device *orig_dev);
162 void ip_list_rcv(struct list_head *head, struct packet_type *pt,
163 struct net_device *orig_dev);
164 int ip_local_deliver(struct sk_buff *skb);
165 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
166 int ip_mr_input(struct sk_buff *skb);
167 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
168 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
169 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
170 int (*output)(struct net *, struct sock *, struct sk_buff *));
171
172 struct ip_fraglist_iter {
173 struct sk_buff *frag;
174 struct iphdr *iph;
175 int offset;
176 unsigned int hlen;
177 };
178
179 void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
180 unsigned int hlen, struct ip_fraglist_iter *iter);
181 void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
182
ip_fraglist_next(struct ip_fraglist_iter * iter)183 static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
184 {
185 struct sk_buff *skb = iter->frag;
186
187 iter->frag = skb->next;
188 skb_mark_not_on_list(skb);
189
190 return skb;
191 }
192
193 struct ip_frag_state {
194 bool DF;
195 unsigned int hlen;
196 unsigned int ll_rs;
197 unsigned int mtu;
198 unsigned int left;
199 int offset;
200 int ptr;
201 __be16 not_last_frag;
202 };
203
204 void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
205 unsigned int mtu, bool DF, struct ip_frag_state *state);
206 struct sk_buff *ip_frag_next(struct sk_buff *skb,
207 struct ip_frag_state *state);
208
209 void ip_send_check(struct iphdr *ip);
210 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
211 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
212
213 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
214 __u8 tos);
215 void ip_init(void);
216 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
217 int getfrag(void *from, char *to, int offset, int len,
218 int odd, struct sk_buff *skb),
219 void *from, int len, int protolen,
220 struct ipcm_cookie *ipc,
221 struct rtable **rt,
222 unsigned int flags);
223 int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
224 struct sk_buff *skb);
225 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
226 int offset, size_t size, int flags);
227 struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
228 struct sk_buff_head *queue,
229 struct inet_cork *cork);
230 int ip_send_skb(struct net *net, struct sk_buff *skb);
231 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
232 void ip_flush_pending_frames(struct sock *sk);
233 struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
234 int getfrag(void *from, char *to, int offset,
235 int len, int odd, struct sk_buff *skb),
236 void *from, int length, int transhdrlen,
237 struct ipcm_cookie *ipc, struct rtable **rtp,
238 struct inet_cork *cork, unsigned int flags);
239
240 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
241
ip_finish_skb(struct sock * sk,struct flowi4 * fl4)242 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
243 {
244 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
245 }
246
get_rttos(struct ipcm_cookie * ipc,struct inet_sock * inet)247 static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
248 {
249 return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
250 }
251
get_rtconn_flags(struct ipcm_cookie * ipc,struct sock * sk)252 static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
253 {
254 return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
255 }
256
257 /* datagram.c */
258 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
259 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
260
261 void ip4_datagram_release_cb(struct sock *sk);
262
263 struct ip_reply_arg {
264 struct kvec iov[1];
265 int flags;
266 __wsum csum;
267 int csumoffset; /* u16 offset of csum in iov[0].iov_base */
268 /* -1 if not needed */
269 int bound_dev_if;
270 u8 tos;
271 kuid_t uid;
272 };
273
274 #define IP_REPLY_ARG_NOSRCCHECK 1
275
ip_reply_arg_flowi_flags(const struct ip_reply_arg * arg)276 static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
277 {
278 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
279 }
280
281 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
282 const struct ip_options *sopt,
283 __be32 daddr, __be32 saddr,
284 const struct ip_reply_arg *arg,
285 unsigned int len, u64 transmit_time);
286
287 #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
288 #define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
289 #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
290 #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
291 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
292 #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
293 #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
294 #define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
295 #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
296 #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
297
298 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
299 unsigned long snmp_fold_field(void __percpu *mib, int offt);
300 #if BITS_PER_LONG==32
301 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
302 size_t syncp_offset);
303 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
304 #else
snmp_get_cpu_field64(void __percpu * mib,int cpu,int offct,size_t syncp_offset)305 static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
306 size_t syncp_offset)
307 {
308 return snmp_get_cpu_field(mib, cpu, offct);
309
310 }
311
snmp_fold_field64(void __percpu * mib,int offt,size_t syncp_off)312 static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
313 {
314 return snmp_fold_field(mib, offt);
315 }
316 #endif
317
318 #define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
319 { \
320 int i, c; \
321 for_each_possible_cpu(c) { \
322 for (i = 0; stats_list[i].name; i++) \
323 buff64[i] += snmp_get_cpu_field64( \
324 mib_statistic, \
325 c, stats_list[i].entry, \
326 offset); \
327 } \
328 }
329
330 #define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
331 { \
332 int i, c; \
333 for_each_possible_cpu(c) { \
334 for (i = 0; stats_list[i].name; i++) \
335 buff[i] += snmp_get_cpu_field( \
336 mib_statistic, \
337 c, stats_list[i].entry); \
338 } \
339 }
340
341 void inet_get_local_port_range(struct net *net, int *low, int *high);
342
343 #ifdef CONFIG_SYSCTL
inet_is_local_reserved_port(struct net * net,unsigned short port)344 static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
345 {
346 if (!net->ipv4.sysctl_local_reserved_ports)
347 return false;
348 return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
349 }
350
sysctl_dev_name_is_allowed(const char * name)351 static inline bool sysctl_dev_name_is_allowed(const char *name)
352 {
353 return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
354 }
355
inet_port_requires_bind_service(struct net * net,unsigned short port)356 static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
357 {
358 return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
359 }
360
361 #else
inet_is_local_reserved_port(struct net * net,unsigned short port)362 static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
363 {
364 return false;
365 }
366
inet_port_requires_bind_service(struct net * net,unsigned short port)367 static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
368 {
369 return port < PROT_SOCK;
370 }
371 #endif
372
373 __be32 inet_current_timestamp(void);
374
375 /* From inetpeer.c */
376 extern int inet_peer_threshold;
377 extern int inet_peer_minttl;
378 extern int inet_peer_maxttl;
379
380 void ipfrag_init(void);
381
382 void ip_static_sysctl_init(void);
383
384 #define IP4_REPLY_MARK(net, mark) \
385 (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
386
ip_is_fragment(const struct iphdr * iph)387 static inline bool ip_is_fragment(const struct iphdr *iph)
388 {
389 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
390 }
391
392 #ifdef CONFIG_INET
393 #include <net/dst.h>
394
395 /* The function in 2.2 was invalid, producing wrong result for
396 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
397 static inline
ip_decrease_ttl(struct iphdr * iph)398 int ip_decrease_ttl(struct iphdr *iph)
399 {
400 u32 check = (__force u32)iph->check;
401 check += (__force u32)htons(0x0100);
402 iph->check = (__force __sum16)(check + (check>=0xFFFF));
403 return --iph->ttl;
404 }
405
ip_mtu_locked(const struct dst_entry * dst)406 static inline int ip_mtu_locked(const struct dst_entry *dst)
407 {
408 const struct rtable *rt = (const struct rtable *)dst;
409
410 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
411 }
412
413 static inline
ip_dont_fragment(const struct sock * sk,const struct dst_entry * dst)414 int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
415 {
416 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
417
418 return pmtudisc == IP_PMTUDISC_DO ||
419 (pmtudisc == IP_PMTUDISC_WANT &&
420 !ip_mtu_locked(dst));
421 }
422
ip_sk_accept_pmtu(const struct sock * sk)423 static inline bool ip_sk_accept_pmtu(const struct sock *sk)
424 {
425 return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
426 inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
427 }
428
ip_sk_use_pmtu(const struct sock * sk)429 static inline bool ip_sk_use_pmtu(const struct sock *sk)
430 {
431 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
432 }
433
ip_sk_ignore_df(const struct sock * sk)434 static inline bool ip_sk_ignore_df(const struct sock *sk)
435 {
436 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
437 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
438 }
439
ip_dst_mtu_maybe_forward(const struct dst_entry * dst,bool forwarding)440 static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
441 bool forwarding)
442 {
443 struct net *net = dev_net(dst->dev);
444 unsigned int mtu;
445
446 if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
447 ip_mtu_locked(dst) ||
448 !forwarding)
449 return dst_mtu(dst);
450
451 /* 'forwarding = true' case should always honour route mtu */
452 mtu = dst_metric_raw(dst, RTAX_MTU);
453 if (!mtu)
454 mtu = min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
455
456 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
457 }
458
ip_skb_dst_mtu(struct sock * sk,const struct sk_buff * skb)459 static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
460 const struct sk_buff *skb)
461 {
462 unsigned int mtu;
463
464 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
465 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
466
467 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
468 }
469
470 mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
471 return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
472 }
473
474 struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
475 int fc_mx_len,
476 struct netlink_ext_ack *extack);
ip_fib_metrics_put(struct dst_metrics * fib_metrics)477 static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
478 {
479 if (fib_metrics != &dst_default_metrics &&
480 refcount_dec_and_test(&fib_metrics->refcnt))
481 kfree(fib_metrics);
482 }
483
484 /* ipv4 and ipv6 both use refcounted metrics if it is not the default */
485 static inline
ip_dst_init_metrics(struct dst_entry * dst,struct dst_metrics * fib_metrics)486 void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
487 {
488 dst_init_metrics(dst, fib_metrics->metrics, true);
489
490 if (fib_metrics != &dst_default_metrics) {
491 dst->_metrics |= DST_METRICS_REFCOUNTED;
492 refcount_inc(&fib_metrics->refcnt);
493 }
494 }
495
496 static inline
ip_dst_metrics_put(struct dst_entry * dst)497 void ip_dst_metrics_put(struct dst_entry *dst)
498 {
499 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
500
501 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
502 kfree(p);
503 }
504
505 u32 ip_idents_reserve(u32 hash, int segs);
506 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
507
ip_select_ident_segs(struct net * net,struct sk_buff * skb,struct sock * sk,int segs)508 static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
509 struct sock *sk, int segs)
510 {
511 struct iphdr *iph = ip_hdr(skb);
512
513 /* We had many attacks based on IPID, use the private
514 * generator as much as we can.
515 */
516 if (sk && inet_sk(sk)->inet_daddr) {
517 iph->id = htons(inet_sk(sk)->inet_id);
518 inet_sk(sk)->inet_id += segs;
519 return;
520 }
521 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
522 iph->id = 0;
523 } else {
524 /* Unfortunately we need the big hammer to get a suitable IPID */
525 __ip_select_ident(net, iph, segs);
526 }
527 }
528
ip_select_ident(struct net * net,struct sk_buff * skb,struct sock * sk)529 static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
530 struct sock *sk)
531 {
532 ip_select_ident_segs(net, skb, sk, 1);
533 }
534
inet_compute_pseudo(struct sk_buff * skb,int proto)535 static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
536 {
537 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
538 skb->len, proto, 0);
539 }
540
541 /* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
542 * Equivalent to : flow->v4addrs.src = iph->saddr;
543 * flow->v4addrs.dst = iph->daddr;
544 */
iph_to_flow_copy_v4addrs(struct flow_keys * flow,const struct iphdr * iph)545 static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
546 const struct iphdr *iph)
547 {
548 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
549 offsetof(typeof(flow->addrs), v4addrs.src) +
550 sizeof(flow->addrs.v4addrs.src));
551 memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
552 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
553 }
554
inet_gro_compute_pseudo(struct sk_buff * skb,int proto)555 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
556 {
557 const struct iphdr *iph = skb_gro_network_header(skb);
558
559 return csum_tcpudp_nofold(iph->saddr, iph->daddr,
560 skb_gro_len(skb), proto, 0);
561 }
562
563 /*
564 * Map a multicast IP onto multicast MAC for type ethernet.
565 */
566
ip_eth_mc_map(__be32 naddr,char * buf)567 static inline void ip_eth_mc_map(__be32 naddr, char *buf)
568 {
569 __u32 addr=ntohl(naddr);
570 buf[0]=0x01;
571 buf[1]=0x00;
572 buf[2]=0x5e;
573 buf[5]=addr&0xFF;
574 addr>>=8;
575 buf[4]=addr&0xFF;
576 addr>>=8;
577 buf[3]=addr&0x7F;
578 }
579
580 /*
581 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
582 * Leave P_Key as 0 to be filled in by driver.
583 */
584
ip_ib_mc_map(__be32 naddr,const unsigned char * broadcast,char * buf)585 static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
586 {
587 __u32 addr;
588 unsigned char scope = broadcast[5] & 0xF;
589
590 buf[0] = 0; /* Reserved */
591 buf[1] = 0xff; /* Multicast QPN */
592 buf[2] = 0xff;
593 buf[3] = 0xff;
594 addr = ntohl(naddr);
595 buf[4] = 0xff;
596 buf[5] = 0x10 | scope; /* scope from broadcast address */
597 buf[6] = 0x40; /* IPv4 signature */
598 buf[7] = 0x1b;
599 buf[8] = broadcast[8]; /* P_Key */
600 buf[9] = broadcast[9];
601 buf[10] = 0;
602 buf[11] = 0;
603 buf[12] = 0;
604 buf[13] = 0;
605 buf[14] = 0;
606 buf[15] = 0;
607 buf[19] = addr & 0xff;
608 addr >>= 8;
609 buf[18] = addr & 0xff;
610 addr >>= 8;
611 buf[17] = addr & 0xff;
612 addr >>= 8;
613 buf[16] = addr & 0x0f;
614 }
615
ip_ipgre_mc_map(__be32 naddr,const unsigned char * broadcast,char * buf)616 static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
617 {
618 if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
619 memcpy(buf, broadcast, 4);
620 else
621 memcpy(buf, &naddr, sizeof(naddr));
622 }
623
624 #if IS_ENABLED(CONFIG_IPV6)
625 #include <linux/ipv6.h>
626 #endif
627
inet_reset_saddr(struct sock * sk)628 static __inline__ void inet_reset_saddr(struct sock *sk)
629 {
630 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
631 #if IS_ENABLED(CONFIG_IPV6)
632 if (sk->sk_family == PF_INET6) {
633 struct ipv6_pinfo *np = inet6_sk(sk);
634
635 memset(&np->saddr, 0, sizeof(np->saddr));
636 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
637 }
638 #endif
639 }
640
641 #endif
642
ipv4_addr_hash(__be32 ip)643 static inline unsigned int ipv4_addr_hash(__be32 ip)
644 {
645 return (__force unsigned int) ip;
646 }
647
ipv4_portaddr_hash(const struct net * net,__be32 saddr,unsigned int port)648 static inline u32 ipv4_portaddr_hash(const struct net *net,
649 __be32 saddr,
650 unsigned int port)
651 {
652 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
653 }
654
655 bool ip_call_ra_chain(struct sk_buff *skb);
656
657 /*
658 * Functions provided by ip_fragment.c
659 */
660
661 enum ip_defrag_users {
662 IP_DEFRAG_LOCAL_DELIVER,
663 IP_DEFRAG_CALL_RA_CHAIN,
664 IP_DEFRAG_CONNTRACK_IN,
665 __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
666 IP_DEFRAG_CONNTRACK_OUT,
667 __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
668 IP_DEFRAG_CONNTRACK_BRIDGE_IN,
669 __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
670 IP_DEFRAG_VS_IN,
671 IP_DEFRAG_VS_OUT,
672 IP_DEFRAG_VS_FWD,
673 IP_DEFRAG_AF_PACKET,
674 IP_DEFRAG_MACVLAN,
675 };
676
677 /* Return true if the value of 'user' is between 'lower_bond'
678 * and 'upper_bond' inclusively.
679 */
ip_defrag_user_in_between(u32 user,enum ip_defrag_users lower_bond,enum ip_defrag_users upper_bond)680 static inline bool ip_defrag_user_in_between(u32 user,
681 enum ip_defrag_users lower_bond,
682 enum ip_defrag_users upper_bond)
683 {
684 return user >= lower_bond && user <= upper_bond;
685 }
686
687 int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
688 #ifdef CONFIG_INET
689 struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
690 #else
ip_check_defrag(struct net * net,struct sk_buff * skb,u32 user)691 static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
692 {
693 return skb;
694 }
695 #endif
696
697 /*
698 * Functions provided by ip_forward.c
699 */
700
701 int ip_forward(struct sk_buff *skb);
702
703 /*
704 * Functions provided by ip_options.c
705 */
706
707 void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
708 __be32 daddr, struct rtable *rt, int is_frag);
709
710 int __ip_options_echo(struct net *net, struct ip_options *dopt,
711 struct sk_buff *skb, const struct ip_options *sopt);
ip_options_echo(struct net * net,struct ip_options * dopt,struct sk_buff * skb)712 static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
713 struct sk_buff *skb)
714 {
715 return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
716 }
717
718 void ip_options_fragment(struct sk_buff *skb);
719 int __ip_options_compile(struct net *net, struct ip_options *opt,
720 struct sk_buff *skb, __be32 *info);
721 int ip_options_compile(struct net *net, struct ip_options *opt,
722 struct sk_buff *skb);
723 int ip_options_get(struct net *net, struct ip_options_rcu **optp,
724 sockptr_t data, int optlen);
725 void ip_options_undo(struct ip_options *opt);
726 void ip_forward_options(struct sk_buff *skb);
727 int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
728
729 /*
730 * Functions provided by ip_sockglue.c
731 */
732
733 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
734 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
735 struct sk_buff *skb, int tlen, int offset);
736 int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
737 struct ipcm_cookie *ipc, bool allow_ipv6);
738 int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
739 unsigned int optlen);
740 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
741 int __user *optlen);
742 int ip_ra_control(struct sock *sk, unsigned char on,
743 void (*destructor)(struct sock *));
744
745 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
746 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
747 u32 info, u8 *payload);
748 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
749 u32 info);
750
ip_cmsg_recv(struct msghdr * msg,struct sk_buff * skb)751 static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
752 {
753 ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
754 }
755
756 bool icmp_global_allow(void);
757 void icmp_global_consume(void);
758
759 extern int sysctl_icmp_msgs_per_sec;
760 extern int sysctl_icmp_msgs_burst;
761
762 #ifdef CONFIG_PROC_FS
763 int ip_misc_proc_init(void);
764 #endif
765
766 int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
767 struct netlink_ext_ack *extack);
768
inetdev_valid_mtu(unsigned int mtu)769 static inline bool inetdev_valid_mtu(unsigned int mtu)
770 {
771 return likely(mtu >= IPV4_MIN_MTU);
772 }
773
774 void ip_sock_set_freebind(struct sock *sk);
775 int ip_sock_set_mtu_discover(struct sock *sk, int val);
776 void ip_sock_set_pktinfo(struct sock *sk);
777 void ip_sock_set_recverr(struct sock *sk);
778 void ip_sock_set_tos(struct sock *sk, int val);
779
780 #endif /* _IP_H */
781