1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The IP to API glue.
8 *
9 * Authors: see ip.c
10 *
11 * Fixes:
12 * Many : Split from ip.c , see ip.c for history.
13 * Martin Mares : TOS setting fixed.
14 * Alan Cox : Fixed a couple of oopses in Martin's
15 * TOS tweaks.
16 * Mike McLagan : Routing by source
17 */
18
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/mm.h>
22 #include <linux/skbuff.h>
23 #include <linux/ip.h>
24 #include <linux/icmp.h>
25 #include <linux/inetdevice.h>
26 #include <linux/netdevice.h>
27 #include <linux/slab.h>
28 #include <net/sock.h>
29 #include <net/ip.h>
30 #include <net/icmp.h>
31 #include <net/tcp_states.h>
32 #include <linux/udp.h>
33 #include <linux/igmp.h>
34 #include <linux/netfilter.h>
35 #include <linux/route.h>
36 #include <linux/mroute.h>
37 #include <net/inet_ecn.h>
38 #include <net/route.h>
39 #include <net/xfrm.h>
40 #include <net/compat.h>
41 #include <net/checksum.h>
42 #if IS_ENABLED(CONFIG_IPV6)
43 #include <net/transp_v6.h>
44 #endif
45 #include <net/ip_fib.h>
46
47 #include <linux/errqueue.h>
48 #include <linux/uaccess.h>
49
50 #include <linux/bpfilter.h>
51
52 /*
53 * SOL_IP control messages.
54 */
55
ip_cmsg_recv_pktinfo(struct msghdr * msg,struct sk_buff * skb)56 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
57 {
58 struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
59
60 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
61
62 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
63 }
64
ip_cmsg_recv_ttl(struct msghdr * msg,struct sk_buff * skb)65 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
66 {
67 int ttl = ip_hdr(skb)->ttl;
68 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
69 }
70
ip_cmsg_recv_tos(struct msghdr * msg,struct sk_buff * skb)71 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
72 {
73 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
74 }
75
ip_cmsg_recv_opts(struct msghdr * msg,struct sk_buff * skb)76 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
77 {
78 if (IPCB(skb)->opt.optlen == 0)
79 return;
80
81 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
82 ip_hdr(skb) + 1);
83 }
84
85
ip_cmsg_recv_retopts(struct net * net,struct msghdr * msg,struct sk_buff * skb)86 static void ip_cmsg_recv_retopts(struct net *net, struct msghdr *msg,
87 struct sk_buff *skb)
88 {
89 unsigned char optbuf[sizeof(struct ip_options) + 40];
90 struct ip_options *opt = (struct ip_options *)optbuf;
91
92 if (IPCB(skb)->opt.optlen == 0)
93 return;
94
95 if (ip_options_echo(net, opt, skb)) {
96 msg->msg_flags |= MSG_CTRUNC;
97 return;
98 }
99 ip_options_undo(opt);
100
101 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
102 }
103
ip_cmsg_recv_fragsize(struct msghdr * msg,struct sk_buff * skb)104 static void ip_cmsg_recv_fragsize(struct msghdr *msg, struct sk_buff *skb)
105 {
106 int val;
107
108 if (IPCB(skb)->frag_max_size == 0)
109 return;
110
111 val = IPCB(skb)->frag_max_size;
112 put_cmsg(msg, SOL_IP, IP_RECVFRAGSIZE, sizeof(val), &val);
113 }
114
ip_cmsg_recv_checksum(struct msghdr * msg,struct sk_buff * skb,int tlen,int offset)115 static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
116 int tlen, int offset)
117 {
118 __wsum csum = skb->csum;
119
120 if (skb->ip_summed != CHECKSUM_COMPLETE)
121 return;
122
123 if (offset != 0) {
124 int tend_off = skb_transport_offset(skb) + tlen;
125 csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0));
126 }
127
128 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
129 }
130
ip_cmsg_recv_security(struct msghdr * msg,struct sk_buff * skb)131 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
132 {
133 char *secdata;
134 u32 seclen, secid;
135 int err;
136
137 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
138 if (err)
139 return;
140
141 err = security_secid_to_secctx(secid, &secdata, &seclen);
142 if (err)
143 return;
144
145 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
146 security_release_secctx(secdata, seclen);
147 }
148
ip_cmsg_recv_dstaddr(struct msghdr * msg,struct sk_buff * skb)149 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
150 {
151 __be16 _ports[2], *ports;
152 struct sockaddr_in sin;
153
154 /* All current transport protocols have the port numbers in the
155 * first four bytes of the transport header and this function is
156 * written with this assumption in mind.
157 */
158 ports = skb_header_pointer(skb, skb_transport_offset(skb),
159 sizeof(_ports), &_ports);
160 if (!ports)
161 return;
162
163 sin.sin_family = AF_INET;
164 sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
165 sin.sin_port = ports[1];
166 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
167
168 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
169 }
170
ip_cmsg_recv_offset(struct msghdr * msg,struct sock * sk,struct sk_buff * skb,int tlen,int offset)171 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
172 struct sk_buff *skb, int tlen, int offset)
173 {
174 struct inet_sock *inet = inet_sk(sk);
175 unsigned int flags = inet->cmsg_flags;
176
177 /* Ordered by supposed usage frequency */
178 if (flags & IP_CMSG_PKTINFO) {
179 ip_cmsg_recv_pktinfo(msg, skb);
180
181 flags &= ~IP_CMSG_PKTINFO;
182 if (!flags)
183 return;
184 }
185
186 if (flags & IP_CMSG_TTL) {
187 ip_cmsg_recv_ttl(msg, skb);
188
189 flags &= ~IP_CMSG_TTL;
190 if (!flags)
191 return;
192 }
193
194 if (flags & IP_CMSG_TOS) {
195 ip_cmsg_recv_tos(msg, skb);
196
197 flags &= ~IP_CMSG_TOS;
198 if (!flags)
199 return;
200 }
201
202 if (flags & IP_CMSG_RECVOPTS) {
203 ip_cmsg_recv_opts(msg, skb);
204
205 flags &= ~IP_CMSG_RECVOPTS;
206 if (!flags)
207 return;
208 }
209
210 if (flags & IP_CMSG_RETOPTS) {
211 ip_cmsg_recv_retopts(sock_net(sk), msg, skb);
212
213 flags &= ~IP_CMSG_RETOPTS;
214 if (!flags)
215 return;
216 }
217
218 if (flags & IP_CMSG_PASSSEC) {
219 ip_cmsg_recv_security(msg, skb);
220
221 flags &= ~IP_CMSG_PASSSEC;
222 if (!flags)
223 return;
224 }
225
226 if (flags & IP_CMSG_ORIGDSTADDR) {
227 ip_cmsg_recv_dstaddr(msg, skb);
228
229 flags &= ~IP_CMSG_ORIGDSTADDR;
230 if (!flags)
231 return;
232 }
233
234 if (flags & IP_CMSG_CHECKSUM)
235 ip_cmsg_recv_checksum(msg, skb, tlen, offset);
236
237 if (flags & IP_CMSG_RECVFRAGSIZE)
238 ip_cmsg_recv_fragsize(msg, skb);
239 }
240 EXPORT_SYMBOL(ip_cmsg_recv_offset);
241
ip_cmsg_send(struct sock * sk,struct msghdr * msg,struct ipcm_cookie * ipc,bool allow_ipv6)242 int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
243 bool allow_ipv6)
244 {
245 int err, val;
246 struct cmsghdr *cmsg;
247 struct net *net = sock_net(sk);
248
249 for_each_cmsghdr(cmsg, msg) {
250 if (!CMSG_OK(msg, cmsg))
251 return -EINVAL;
252 #if IS_ENABLED(CONFIG_IPV6)
253 if (allow_ipv6 &&
254 cmsg->cmsg_level == SOL_IPV6 &&
255 cmsg->cmsg_type == IPV6_PKTINFO) {
256 struct in6_pktinfo *src_info;
257
258 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
259 return -EINVAL;
260 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
261 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
262 return -EINVAL;
263 if (src_info->ipi6_ifindex)
264 ipc->oif = src_info->ipi6_ifindex;
265 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
266 continue;
267 }
268 #endif
269 if (cmsg->cmsg_level == SOL_SOCKET) {
270 err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
271 if (err)
272 return err;
273 continue;
274 }
275
276 if (cmsg->cmsg_level != SOL_IP)
277 continue;
278 switch (cmsg->cmsg_type) {
279 case IP_RETOPTS:
280 err = cmsg->cmsg_len - sizeof(struct cmsghdr);
281
282 /* Our caller is responsible for freeing ipc->opt */
283 err = ip_options_get(net, &ipc->opt,
284 KERNEL_SOCKPTR(CMSG_DATA(cmsg)),
285 err < 40 ? err : 40);
286 if (err)
287 return err;
288 break;
289 case IP_PKTINFO:
290 {
291 struct in_pktinfo *info;
292 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
293 return -EINVAL;
294 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
295 if (info->ipi_ifindex)
296 ipc->oif = info->ipi_ifindex;
297 ipc->addr = info->ipi_spec_dst.s_addr;
298 break;
299 }
300 case IP_TTL:
301 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
302 return -EINVAL;
303 val = *(int *)CMSG_DATA(cmsg);
304 if (val < 1 || val > 255)
305 return -EINVAL;
306 ipc->ttl = val;
307 break;
308 case IP_TOS:
309 if (cmsg->cmsg_len == CMSG_LEN(sizeof(int)))
310 val = *(int *)CMSG_DATA(cmsg);
311 else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8)))
312 val = *(u8 *)CMSG_DATA(cmsg);
313 else
314 return -EINVAL;
315 if (val < 0 || val > 255)
316 return -EINVAL;
317 ipc->tos = val;
318 ipc->priority = rt_tos2priority(ipc->tos);
319 break;
320 case IP_PROTOCOL:
321 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
322 return -EINVAL;
323 val = *(int *)CMSG_DATA(cmsg);
324 if (val < 1 || val > 255)
325 return -EINVAL;
326 ipc->protocol = val;
327 break;
328 default:
329 return -EINVAL;
330 }
331 }
332 return 0;
333 }
334
ip_ra_destroy_rcu(struct rcu_head * head)335 static void ip_ra_destroy_rcu(struct rcu_head *head)
336 {
337 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
338
339 sock_put(ra->saved_sk);
340 kfree(ra);
341 }
342
ip_ra_control(struct sock * sk,unsigned char on,void (* destructor)(struct sock *))343 int ip_ra_control(struct sock *sk, unsigned char on,
344 void (*destructor)(struct sock *))
345 {
346 struct ip_ra_chain *ra, *new_ra;
347 struct ip_ra_chain __rcu **rap;
348 struct net *net = sock_net(sk);
349
350 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
351 return -EINVAL;
352
353 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
354 if (on && !new_ra)
355 return -ENOMEM;
356
357 mutex_lock(&net->ipv4.ra_mutex);
358 for (rap = &net->ipv4.ra_chain;
359 (ra = rcu_dereference_protected(*rap,
360 lockdep_is_held(&net->ipv4.ra_mutex))) != NULL;
361 rap = &ra->next) {
362 if (ra->sk == sk) {
363 if (on) {
364 mutex_unlock(&net->ipv4.ra_mutex);
365 kfree(new_ra);
366 return -EADDRINUSE;
367 }
368 /* dont let ip_call_ra_chain() use sk again */
369 ra->sk = NULL;
370 RCU_INIT_POINTER(*rap, ra->next);
371 mutex_unlock(&net->ipv4.ra_mutex);
372
373 if (ra->destructor)
374 ra->destructor(sk);
375 /*
376 * Delay sock_put(sk) and kfree(ra) after one rcu grace
377 * period. This guarantee ip_call_ra_chain() dont need
378 * to mess with socket refcounts.
379 */
380 ra->saved_sk = sk;
381 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
382 return 0;
383 }
384 }
385 if (!new_ra) {
386 mutex_unlock(&net->ipv4.ra_mutex);
387 return -ENOBUFS;
388 }
389 new_ra->sk = sk;
390 new_ra->destructor = destructor;
391
392 RCU_INIT_POINTER(new_ra->next, ra);
393 rcu_assign_pointer(*rap, new_ra);
394 sock_hold(sk);
395 mutex_unlock(&net->ipv4.ra_mutex);
396
397 return 0;
398 }
399
ipv4_icmp_error_rfc4884(const struct sk_buff * skb,struct sock_ee_data_rfc4884 * out)400 static void ipv4_icmp_error_rfc4884(const struct sk_buff *skb,
401 struct sock_ee_data_rfc4884 *out)
402 {
403 switch (icmp_hdr(skb)->type) {
404 case ICMP_DEST_UNREACH:
405 case ICMP_TIME_EXCEEDED:
406 case ICMP_PARAMETERPROB:
407 ip_icmp_error_rfc4884(skb, out, sizeof(struct icmphdr),
408 icmp_hdr(skb)->un.reserved[1] * 4);
409 }
410 }
411
ip_icmp_error(struct sock * sk,struct sk_buff * skb,int err,__be16 port,u32 info,u8 * payload)412 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
413 __be16 port, u32 info, u8 *payload)
414 {
415 struct sock_exterr_skb *serr;
416
417 skb = skb_clone(skb, GFP_ATOMIC);
418 if (!skb)
419 return;
420
421 serr = SKB_EXT_ERR(skb);
422 serr->ee.ee_errno = err;
423 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
424 serr->ee.ee_type = icmp_hdr(skb)->type;
425 serr->ee.ee_code = icmp_hdr(skb)->code;
426 serr->ee.ee_pad = 0;
427 serr->ee.ee_info = info;
428 serr->ee.ee_data = 0;
429 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
430 skb_network_header(skb);
431 serr->port = port;
432
433 if (skb_pull(skb, payload - skb->data)) {
434 if (inet_sk(sk)->recverr_rfc4884)
435 ipv4_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884);
436
437 skb_reset_transport_header(skb);
438 if (sock_queue_err_skb(sk, skb) == 0)
439 return;
440 }
441 kfree_skb(skb);
442 }
443
ip_local_error(struct sock * sk,int err,__be32 daddr,__be16 port,u32 info)444 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
445 {
446 struct inet_sock *inet = inet_sk(sk);
447 struct sock_exterr_skb *serr;
448 struct iphdr *iph;
449 struct sk_buff *skb;
450
451 if (!inet->recverr)
452 return;
453
454 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
455 if (!skb)
456 return;
457
458 skb_put(skb, sizeof(struct iphdr));
459 skb_reset_network_header(skb);
460 iph = ip_hdr(skb);
461 iph->daddr = daddr;
462
463 serr = SKB_EXT_ERR(skb);
464 serr->ee.ee_errno = err;
465 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
466 serr->ee.ee_type = 0;
467 serr->ee.ee_code = 0;
468 serr->ee.ee_pad = 0;
469 serr->ee.ee_info = info;
470 serr->ee.ee_data = 0;
471 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
472 serr->port = port;
473
474 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
475 skb_reset_transport_header(skb);
476
477 if (sock_queue_err_skb(sk, skb))
478 kfree_skb(skb);
479 }
480
481 /* For some errors we have valid addr_offset even with zero payload and
482 * zero port. Also, addr_offset should be supported if port is set.
483 */
ipv4_datagram_support_addr(struct sock_exterr_skb * serr)484 static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
485 {
486 return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
487 serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
488 }
489
490 /* IPv4 supports cmsg on all imcp errors and some timestamps
491 *
492 * Timestamp code paths do not initialize the fields expected by cmsg:
493 * the PKTINFO fields in skb->cb[]. Fill those in here.
494 */
ipv4_datagram_support_cmsg(const struct sock * sk,struct sk_buff * skb,int ee_origin)495 static bool ipv4_datagram_support_cmsg(const struct sock *sk,
496 struct sk_buff *skb,
497 int ee_origin)
498 {
499 struct in_pktinfo *info;
500
501 if (ee_origin == SO_EE_ORIGIN_ICMP)
502 return true;
503
504 if (ee_origin == SO_EE_ORIGIN_LOCAL)
505 return false;
506
507 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
508 * timestamp with egress dev. Not possible for packets without iif
509 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
510 */
511 info = PKTINFO_SKB_CB(skb);
512 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
513 !info->ipi_ifindex)
514 return false;
515
516 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
517 return true;
518 }
519
520 /*
521 * Handle MSG_ERRQUEUE
522 */
ip_recv_error(struct sock * sk,struct msghdr * msg,int len,int * addr_len)523 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
524 {
525 struct sock_exterr_skb *serr;
526 struct sk_buff *skb;
527 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
528 struct {
529 struct sock_extended_err ee;
530 struct sockaddr_in offender;
531 } errhdr;
532 int err;
533 int copied;
534
535 err = -EAGAIN;
536 skb = sock_dequeue_err_skb(sk);
537 if (!skb)
538 goto out;
539
540 copied = skb->len;
541 if (copied > len) {
542 msg->msg_flags |= MSG_TRUNC;
543 copied = len;
544 }
545 err = skb_copy_datagram_msg(skb, 0, msg, copied);
546 if (unlikely(err)) {
547 kfree_skb(skb);
548 return err;
549 }
550 sock_recv_timestamp(msg, sk, skb);
551
552 serr = SKB_EXT_ERR(skb);
553
554 if (sin && ipv4_datagram_support_addr(serr)) {
555 sin->sin_family = AF_INET;
556 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
557 serr->addr_offset);
558 sin->sin_port = serr->port;
559 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
560 *addr_len = sizeof(*sin);
561 }
562
563 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
564 sin = &errhdr.offender;
565 memset(sin, 0, sizeof(*sin));
566
567 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
568 sin->sin_family = AF_INET;
569 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
570 if (inet_sk(sk)->cmsg_flags)
571 ip_cmsg_recv(msg, skb);
572 }
573
574 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
575
576 /* Now we could try to dump offended packet options */
577
578 msg->msg_flags |= MSG_ERRQUEUE;
579 err = copied;
580
581 consume_skb(skb);
582 out:
583 return err;
584 }
585
__ip_sock_set_tos(struct sock * sk,int val)586 static void __ip_sock_set_tos(struct sock *sk, int val)
587 {
588 if (sk->sk_type == SOCK_STREAM) {
589 val &= ~INET_ECN_MASK;
590 val |= inet_sk(sk)->tos & INET_ECN_MASK;
591 }
592 if (inet_sk(sk)->tos != val) {
593 inet_sk(sk)->tos = val;
594 sk->sk_priority = rt_tos2priority(val);
595 sk_dst_reset(sk);
596 }
597 }
598
ip_sock_set_tos(struct sock * sk,int val)599 void ip_sock_set_tos(struct sock *sk, int val)
600 {
601 lock_sock(sk);
602 __ip_sock_set_tos(sk, val);
603 release_sock(sk);
604 }
605 EXPORT_SYMBOL(ip_sock_set_tos);
606
ip_sock_set_freebind(struct sock * sk)607 void ip_sock_set_freebind(struct sock *sk)
608 {
609 lock_sock(sk);
610 inet_sk(sk)->freebind = true;
611 release_sock(sk);
612 }
613 EXPORT_SYMBOL(ip_sock_set_freebind);
614
ip_sock_set_recverr(struct sock * sk)615 void ip_sock_set_recverr(struct sock *sk)
616 {
617 lock_sock(sk);
618 inet_sk(sk)->recverr = true;
619 release_sock(sk);
620 }
621 EXPORT_SYMBOL(ip_sock_set_recverr);
622
ip_sock_set_mtu_discover(struct sock * sk,int val)623 int ip_sock_set_mtu_discover(struct sock *sk, int val)
624 {
625 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
626 return -EINVAL;
627 lock_sock(sk);
628 inet_sk(sk)->pmtudisc = val;
629 release_sock(sk);
630 return 0;
631 }
632 EXPORT_SYMBOL(ip_sock_set_mtu_discover);
633
ip_sock_set_pktinfo(struct sock * sk)634 void ip_sock_set_pktinfo(struct sock *sk)
635 {
636 lock_sock(sk);
637 inet_sk(sk)->cmsg_flags |= IP_CMSG_PKTINFO;
638 release_sock(sk);
639 }
640 EXPORT_SYMBOL(ip_sock_set_pktinfo);
641
642 /*
643 * Socket option code for IP. This is the end of the line after any
644 * TCP,UDP etc options on an IP socket.
645 */
setsockopt_needs_rtnl(int optname)646 static bool setsockopt_needs_rtnl(int optname)
647 {
648 switch (optname) {
649 case IP_ADD_MEMBERSHIP:
650 case IP_ADD_SOURCE_MEMBERSHIP:
651 case IP_BLOCK_SOURCE:
652 case IP_DROP_MEMBERSHIP:
653 case IP_DROP_SOURCE_MEMBERSHIP:
654 case IP_MSFILTER:
655 case IP_UNBLOCK_SOURCE:
656 case MCAST_BLOCK_SOURCE:
657 case MCAST_MSFILTER:
658 case MCAST_JOIN_GROUP:
659 case MCAST_JOIN_SOURCE_GROUP:
660 case MCAST_LEAVE_GROUP:
661 case MCAST_LEAVE_SOURCE_GROUP:
662 case MCAST_UNBLOCK_SOURCE:
663 return true;
664 }
665 return false;
666 }
667
set_mcast_msfilter(struct sock * sk,int ifindex,int numsrc,int fmode,struct sockaddr_storage * group,struct sockaddr_storage * list)668 static int set_mcast_msfilter(struct sock *sk, int ifindex,
669 int numsrc, int fmode,
670 struct sockaddr_storage *group,
671 struct sockaddr_storage *list)
672 {
673 struct ip_msfilter *msf;
674 struct sockaddr_in *psin;
675 int err, i;
676
677 msf = kmalloc(IP_MSFILTER_SIZE(numsrc), GFP_KERNEL);
678 if (!msf)
679 return -ENOBUFS;
680
681 psin = (struct sockaddr_in *)group;
682 if (psin->sin_family != AF_INET)
683 goto Eaddrnotavail;
684 msf->imsf_multiaddr = psin->sin_addr.s_addr;
685 msf->imsf_interface = 0;
686 msf->imsf_fmode = fmode;
687 msf->imsf_numsrc = numsrc;
688 for (i = 0; i < numsrc; ++i) {
689 psin = (struct sockaddr_in *)&list[i];
690
691 if (psin->sin_family != AF_INET)
692 goto Eaddrnotavail;
693 msf->imsf_slist_flex[i] = psin->sin_addr.s_addr;
694 }
695 err = ip_mc_msfilter(sk, msf, ifindex);
696 kfree(msf);
697 return err;
698
699 Eaddrnotavail:
700 kfree(msf);
701 return -EADDRNOTAVAIL;
702 }
703
copy_group_source_from_sockptr(struct group_source_req * greqs,sockptr_t optval,int optlen)704 static int copy_group_source_from_sockptr(struct group_source_req *greqs,
705 sockptr_t optval, int optlen)
706 {
707 if (in_compat_syscall()) {
708 struct compat_group_source_req gr32;
709
710 if (optlen != sizeof(gr32))
711 return -EINVAL;
712 if (copy_from_sockptr(&gr32, optval, sizeof(gr32)))
713 return -EFAULT;
714 greqs->gsr_interface = gr32.gsr_interface;
715 greqs->gsr_group = gr32.gsr_group;
716 greqs->gsr_source = gr32.gsr_source;
717 } else {
718 if (optlen != sizeof(*greqs))
719 return -EINVAL;
720 if (copy_from_sockptr(greqs, optval, sizeof(*greqs)))
721 return -EFAULT;
722 }
723
724 return 0;
725 }
726
do_mcast_group_source(struct sock * sk,int optname,sockptr_t optval,int optlen)727 static int do_mcast_group_source(struct sock *sk, int optname,
728 sockptr_t optval, int optlen)
729 {
730 struct group_source_req greqs;
731 struct ip_mreq_source mreqs;
732 struct sockaddr_in *psin;
733 int omode, add, err;
734
735 err = copy_group_source_from_sockptr(&greqs, optval, optlen);
736 if (err)
737 return err;
738
739 if (greqs.gsr_group.ss_family != AF_INET ||
740 greqs.gsr_source.ss_family != AF_INET)
741 return -EADDRNOTAVAIL;
742
743 psin = (struct sockaddr_in *)&greqs.gsr_group;
744 mreqs.imr_multiaddr = psin->sin_addr.s_addr;
745 psin = (struct sockaddr_in *)&greqs.gsr_source;
746 mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
747 mreqs.imr_interface = 0; /* use index for mc_source */
748
749 if (optname == MCAST_BLOCK_SOURCE) {
750 omode = MCAST_EXCLUDE;
751 add = 1;
752 } else if (optname == MCAST_UNBLOCK_SOURCE) {
753 omode = MCAST_EXCLUDE;
754 add = 0;
755 } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
756 struct ip_mreqn mreq;
757
758 psin = (struct sockaddr_in *)&greqs.gsr_group;
759 mreq.imr_multiaddr = psin->sin_addr;
760 mreq.imr_address.s_addr = 0;
761 mreq.imr_ifindex = greqs.gsr_interface;
762 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
763 if (err && err != -EADDRINUSE)
764 return err;
765 greqs.gsr_interface = mreq.imr_ifindex;
766 omode = MCAST_INCLUDE;
767 add = 1;
768 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
769 omode = MCAST_INCLUDE;
770 add = 0;
771 }
772 return ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface);
773 }
774
ip_set_mcast_msfilter(struct sock * sk,sockptr_t optval,int optlen)775 static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
776 {
777 struct group_filter *gsf = NULL;
778 int err;
779
780 if (optlen < GROUP_FILTER_SIZE(0))
781 return -EINVAL;
782 if (optlen > READ_ONCE(sysctl_optmem_max))
783 return -ENOBUFS;
784
785 gsf = memdup_sockptr(optval, optlen);
786 if (IS_ERR(gsf))
787 return PTR_ERR(gsf);
788
789 /* numsrc >= (4G-140)/128 overflow in 32 bits */
790 err = -ENOBUFS;
791 if (gsf->gf_numsrc >= 0x1ffffff ||
792 gsf->gf_numsrc > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
793 goto out_free_gsf;
794
795 err = -EINVAL;
796 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen)
797 goto out_free_gsf;
798
799 err = set_mcast_msfilter(sk, gsf->gf_interface, gsf->gf_numsrc,
800 gsf->gf_fmode, &gsf->gf_group,
801 gsf->gf_slist_flex);
802 out_free_gsf:
803 kfree(gsf);
804 return err;
805 }
806
compat_ip_set_mcast_msfilter(struct sock * sk,sockptr_t optval,int optlen)807 static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
808 int optlen)
809 {
810 const int size0 = offsetof(struct compat_group_filter, gf_slist_flex);
811 struct compat_group_filter *gf32;
812 unsigned int n;
813 void *p;
814 int err;
815
816 if (optlen < size0)
817 return -EINVAL;
818 if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
819 return -ENOBUFS;
820
821 p = kmalloc(optlen + 4, GFP_KERNEL);
822 if (!p)
823 return -ENOMEM;
824 gf32 = p + 4; /* we want ->gf_group and ->gf_slist_flex aligned */
825
826 err = -EFAULT;
827 if (copy_from_sockptr(gf32, optval, optlen))
828 goto out_free_gsf;
829
830 /* numsrc >= (4G-140)/128 overflow in 32 bits */
831 n = gf32->gf_numsrc;
832 err = -ENOBUFS;
833 if (n >= 0x1ffffff)
834 goto out_free_gsf;
835
836 err = -EINVAL;
837 if (offsetof(struct compat_group_filter, gf_slist_flex[n]) > optlen)
838 goto out_free_gsf;
839
840 /* numsrc >= (4G-140)/128 overflow in 32 bits */
841 err = -ENOBUFS;
842 if (n > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
843 goto out_free_gsf;
844 err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode,
845 &gf32->gf_group, gf32->gf_slist_flex);
846 out_free_gsf:
847 kfree(p);
848 return err;
849 }
850
ip_mcast_join_leave(struct sock * sk,int optname,sockptr_t optval,int optlen)851 static int ip_mcast_join_leave(struct sock *sk, int optname,
852 sockptr_t optval, int optlen)
853 {
854 struct ip_mreqn mreq = { };
855 struct sockaddr_in *psin;
856 struct group_req greq;
857
858 if (optlen < sizeof(struct group_req))
859 return -EINVAL;
860 if (copy_from_sockptr(&greq, optval, sizeof(greq)))
861 return -EFAULT;
862
863 psin = (struct sockaddr_in *)&greq.gr_group;
864 if (psin->sin_family != AF_INET)
865 return -EINVAL;
866 mreq.imr_multiaddr = psin->sin_addr;
867 mreq.imr_ifindex = greq.gr_interface;
868 if (optname == MCAST_JOIN_GROUP)
869 return ip_mc_join_group(sk, &mreq);
870 return ip_mc_leave_group(sk, &mreq);
871 }
872
compat_ip_mcast_join_leave(struct sock * sk,int optname,sockptr_t optval,int optlen)873 static int compat_ip_mcast_join_leave(struct sock *sk, int optname,
874 sockptr_t optval, int optlen)
875 {
876 struct compat_group_req greq;
877 struct ip_mreqn mreq = { };
878 struct sockaddr_in *psin;
879
880 if (optlen < sizeof(struct compat_group_req))
881 return -EINVAL;
882 if (copy_from_sockptr(&greq, optval, sizeof(greq)))
883 return -EFAULT;
884
885 psin = (struct sockaddr_in *)&greq.gr_group;
886 if (psin->sin_family != AF_INET)
887 return -EINVAL;
888 mreq.imr_multiaddr = psin->sin_addr;
889 mreq.imr_ifindex = greq.gr_interface;
890
891 if (optname == MCAST_JOIN_GROUP)
892 return ip_mc_join_group(sk, &mreq);
893 return ip_mc_leave_group(sk, &mreq);
894 }
895
do_ip_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)896 static int do_ip_setsockopt(struct sock *sk, int level, int optname,
897 sockptr_t optval, unsigned int optlen)
898 {
899 struct inet_sock *inet = inet_sk(sk);
900 struct net *net = sock_net(sk);
901 int val = 0, err;
902 bool needs_rtnl = setsockopt_needs_rtnl(optname);
903
904 switch (optname) {
905 case IP_PKTINFO:
906 case IP_RECVTTL:
907 case IP_RECVOPTS:
908 case IP_RECVTOS:
909 case IP_RETOPTS:
910 case IP_TOS:
911 case IP_TTL:
912 case IP_HDRINCL:
913 case IP_MTU_DISCOVER:
914 case IP_RECVERR:
915 case IP_ROUTER_ALERT:
916 case IP_FREEBIND:
917 case IP_PASSSEC:
918 case IP_TRANSPARENT:
919 case IP_MINTTL:
920 case IP_NODEFRAG:
921 case IP_BIND_ADDRESS_NO_PORT:
922 case IP_UNICAST_IF:
923 case IP_MULTICAST_TTL:
924 case IP_MULTICAST_ALL:
925 case IP_MULTICAST_LOOP:
926 case IP_RECVORIGDSTADDR:
927 case IP_CHECKSUM:
928 case IP_RECVFRAGSIZE:
929 case IP_RECVERR_RFC4884:
930 if (optlen >= sizeof(int)) {
931 if (copy_from_sockptr(&val, optval, sizeof(val)))
932 return -EFAULT;
933 } else if (optlen >= sizeof(char)) {
934 unsigned char ucval;
935
936 if (copy_from_sockptr(&ucval, optval, sizeof(ucval)))
937 return -EFAULT;
938 val = (int) ucval;
939 }
940 }
941
942 /* If optlen==0, it is equivalent to val == 0 */
943
944 if (optname == IP_ROUTER_ALERT)
945 return ip_ra_control(sk, val ? 1 : 0, NULL);
946 if (ip_mroute_opt(optname))
947 return ip_mroute_setsockopt(sk, optname, optval, optlen);
948
949 err = 0;
950 if (needs_rtnl)
951 rtnl_lock();
952 lock_sock(sk);
953
954 switch (optname) {
955 case IP_OPTIONS:
956 {
957 struct ip_options_rcu *old, *opt = NULL;
958
959 if (optlen > 40)
960 goto e_inval;
961 err = ip_options_get(sock_net(sk), &opt, optval, optlen);
962 if (err)
963 break;
964 old = rcu_dereference_protected(inet->inet_opt,
965 lockdep_sock_is_held(sk));
966 if (inet->is_icsk) {
967 struct inet_connection_sock *icsk = inet_csk(sk);
968 #if IS_ENABLED(CONFIG_IPV6)
969 if (sk->sk_family == PF_INET ||
970 (!((1 << sk->sk_state) &
971 (TCPF_LISTEN | TCPF_CLOSE)) &&
972 inet->inet_daddr != LOOPBACK4_IPV6)) {
973 #endif
974 if (old)
975 icsk->icsk_ext_hdr_len -= old->opt.optlen;
976 if (opt)
977 icsk->icsk_ext_hdr_len += opt->opt.optlen;
978 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
979 #if IS_ENABLED(CONFIG_IPV6)
980 }
981 #endif
982 }
983 rcu_assign_pointer(inet->inet_opt, opt);
984 if (old)
985 kfree_rcu(old, rcu);
986 break;
987 }
988 case IP_PKTINFO:
989 if (val)
990 inet->cmsg_flags |= IP_CMSG_PKTINFO;
991 else
992 inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
993 break;
994 case IP_RECVTTL:
995 if (val)
996 inet->cmsg_flags |= IP_CMSG_TTL;
997 else
998 inet->cmsg_flags &= ~IP_CMSG_TTL;
999 break;
1000 case IP_RECVTOS:
1001 if (val)
1002 inet->cmsg_flags |= IP_CMSG_TOS;
1003 else
1004 inet->cmsg_flags &= ~IP_CMSG_TOS;
1005 break;
1006 case IP_RECVOPTS:
1007 if (val)
1008 inet->cmsg_flags |= IP_CMSG_RECVOPTS;
1009 else
1010 inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
1011 break;
1012 case IP_RETOPTS:
1013 if (val)
1014 inet->cmsg_flags |= IP_CMSG_RETOPTS;
1015 else
1016 inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
1017 break;
1018 case IP_PASSSEC:
1019 if (val)
1020 inet->cmsg_flags |= IP_CMSG_PASSSEC;
1021 else
1022 inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
1023 break;
1024 case IP_RECVORIGDSTADDR:
1025 if (val)
1026 inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
1027 else
1028 inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
1029 break;
1030 case IP_CHECKSUM:
1031 if (val) {
1032 if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
1033 inet_inc_convert_csum(sk);
1034 inet->cmsg_flags |= IP_CMSG_CHECKSUM;
1035 }
1036 } else {
1037 if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
1038 inet_dec_convert_csum(sk);
1039 inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
1040 }
1041 }
1042 break;
1043 case IP_RECVFRAGSIZE:
1044 if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM)
1045 goto e_inval;
1046 if (val)
1047 inet->cmsg_flags |= IP_CMSG_RECVFRAGSIZE;
1048 else
1049 inet->cmsg_flags &= ~IP_CMSG_RECVFRAGSIZE;
1050 break;
1051 case IP_TOS: /* This sets both TOS and Precedence */
1052 __ip_sock_set_tos(sk, val);
1053 break;
1054 case IP_TTL:
1055 if (optlen < 1)
1056 goto e_inval;
1057 if (val != -1 && (val < 1 || val > 255))
1058 goto e_inval;
1059 inet->uc_ttl = val;
1060 break;
1061 case IP_HDRINCL:
1062 if (sk->sk_type != SOCK_RAW) {
1063 err = -ENOPROTOOPT;
1064 break;
1065 }
1066 inet->hdrincl = val ? 1 : 0;
1067 break;
1068 case IP_NODEFRAG:
1069 if (sk->sk_type != SOCK_RAW) {
1070 err = -ENOPROTOOPT;
1071 break;
1072 }
1073 inet->nodefrag = val ? 1 : 0;
1074 break;
1075 case IP_BIND_ADDRESS_NO_PORT:
1076 inet->bind_address_no_port = val ? 1 : 0;
1077 break;
1078 case IP_MTU_DISCOVER:
1079 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
1080 goto e_inval;
1081 inet->pmtudisc = val;
1082 break;
1083 case IP_RECVERR:
1084 inet->recverr = !!val;
1085 if (!val)
1086 skb_queue_purge(&sk->sk_error_queue);
1087 break;
1088 case IP_RECVERR_RFC4884:
1089 if (val < 0 || val > 1)
1090 goto e_inval;
1091 inet->recverr_rfc4884 = !!val;
1092 break;
1093 case IP_MULTICAST_TTL:
1094 if (sk->sk_type == SOCK_STREAM)
1095 goto e_inval;
1096 if (optlen < 1)
1097 goto e_inval;
1098 if (val == -1)
1099 val = 1;
1100 if (val < 0 || val > 255)
1101 goto e_inval;
1102 inet->mc_ttl = val;
1103 break;
1104 case IP_MULTICAST_LOOP:
1105 if (optlen < 1)
1106 goto e_inval;
1107 inet->mc_loop = !!val;
1108 break;
1109 case IP_UNICAST_IF:
1110 {
1111 struct net_device *dev = NULL;
1112 int ifindex;
1113 int midx;
1114
1115 if (optlen != sizeof(int))
1116 goto e_inval;
1117
1118 ifindex = (__force int)ntohl((__force __be32)val);
1119 if (ifindex == 0) {
1120 inet->uc_index = 0;
1121 err = 0;
1122 break;
1123 }
1124
1125 dev = dev_get_by_index(sock_net(sk), ifindex);
1126 err = -EADDRNOTAVAIL;
1127 if (!dev)
1128 break;
1129
1130 midx = l3mdev_master_ifindex(dev);
1131 dev_put(dev);
1132
1133 err = -EINVAL;
1134 if (sk->sk_bound_dev_if && midx != sk->sk_bound_dev_if)
1135 break;
1136
1137 inet->uc_index = ifindex;
1138 err = 0;
1139 break;
1140 }
1141 case IP_MULTICAST_IF:
1142 {
1143 struct ip_mreqn mreq;
1144 struct net_device *dev = NULL;
1145 int midx;
1146
1147 if (sk->sk_type == SOCK_STREAM)
1148 goto e_inval;
1149 /*
1150 * Check the arguments are allowable
1151 */
1152
1153 if (optlen < sizeof(struct in_addr))
1154 goto e_inval;
1155
1156 err = -EFAULT;
1157 if (optlen >= sizeof(struct ip_mreqn)) {
1158 if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
1159 break;
1160 } else {
1161 memset(&mreq, 0, sizeof(mreq));
1162 if (optlen >= sizeof(struct ip_mreq)) {
1163 if (copy_from_sockptr(&mreq, optval,
1164 sizeof(struct ip_mreq)))
1165 break;
1166 } else if (optlen >= sizeof(struct in_addr)) {
1167 if (copy_from_sockptr(&mreq.imr_address, optval,
1168 sizeof(struct in_addr)))
1169 break;
1170 }
1171 }
1172
1173 if (!mreq.imr_ifindex) {
1174 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
1175 inet->mc_index = 0;
1176 inet->mc_addr = 0;
1177 err = 0;
1178 break;
1179 }
1180 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
1181 if (dev)
1182 mreq.imr_ifindex = dev->ifindex;
1183 } else
1184 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
1185
1186
1187 err = -EADDRNOTAVAIL;
1188 if (!dev)
1189 break;
1190
1191 midx = l3mdev_master_ifindex(dev);
1192
1193 dev_put(dev);
1194
1195 err = -EINVAL;
1196 if (sk->sk_bound_dev_if &&
1197 mreq.imr_ifindex != sk->sk_bound_dev_if &&
1198 midx != sk->sk_bound_dev_if)
1199 break;
1200
1201 inet->mc_index = mreq.imr_ifindex;
1202 inet->mc_addr = mreq.imr_address.s_addr;
1203 err = 0;
1204 break;
1205 }
1206
1207 case IP_ADD_MEMBERSHIP:
1208 case IP_DROP_MEMBERSHIP:
1209 {
1210 struct ip_mreqn mreq;
1211
1212 err = -EPROTO;
1213 if (inet_sk(sk)->is_icsk)
1214 break;
1215
1216 if (optlen < sizeof(struct ip_mreq))
1217 goto e_inval;
1218 err = -EFAULT;
1219 if (optlen >= sizeof(struct ip_mreqn)) {
1220 if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
1221 break;
1222 } else {
1223 memset(&mreq, 0, sizeof(mreq));
1224 if (copy_from_sockptr(&mreq, optval,
1225 sizeof(struct ip_mreq)))
1226 break;
1227 }
1228
1229 if (optname == IP_ADD_MEMBERSHIP)
1230 err = ip_mc_join_group(sk, &mreq);
1231 else
1232 err = ip_mc_leave_group(sk, &mreq);
1233 break;
1234 }
1235 case IP_MSFILTER:
1236 {
1237 struct ip_msfilter *msf;
1238
1239 if (optlen < IP_MSFILTER_SIZE(0))
1240 goto e_inval;
1241 if (optlen > READ_ONCE(sysctl_optmem_max)) {
1242 err = -ENOBUFS;
1243 break;
1244 }
1245 msf = memdup_sockptr(optval, optlen);
1246 if (IS_ERR(msf)) {
1247 err = PTR_ERR(msf);
1248 break;
1249 }
1250 /* numsrc >= (1G-4) overflow in 32 bits */
1251 if (msf->imsf_numsrc >= 0x3ffffffcU ||
1252 msf->imsf_numsrc > READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) {
1253 kfree(msf);
1254 err = -ENOBUFS;
1255 break;
1256 }
1257 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
1258 kfree(msf);
1259 err = -EINVAL;
1260 break;
1261 }
1262 err = ip_mc_msfilter(sk, msf, 0);
1263 kfree(msf);
1264 break;
1265 }
1266 case IP_BLOCK_SOURCE:
1267 case IP_UNBLOCK_SOURCE:
1268 case IP_ADD_SOURCE_MEMBERSHIP:
1269 case IP_DROP_SOURCE_MEMBERSHIP:
1270 {
1271 struct ip_mreq_source mreqs;
1272 int omode, add;
1273
1274 if (optlen != sizeof(struct ip_mreq_source))
1275 goto e_inval;
1276 if (copy_from_sockptr(&mreqs, optval, sizeof(mreqs))) {
1277 err = -EFAULT;
1278 break;
1279 }
1280 if (optname == IP_BLOCK_SOURCE) {
1281 omode = MCAST_EXCLUDE;
1282 add = 1;
1283 } else if (optname == IP_UNBLOCK_SOURCE) {
1284 omode = MCAST_EXCLUDE;
1285 add = 0;
1286 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
1287 struct ip_mreqn mreq;
1288
1289 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
1290 mreq.imr_address.s_addr = mreqs.imr_interface;
1291 mreq.imr_ifindex = 0;
1292 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
1293 if (err && err != -EADDRINUSE)
1294 break;
1295 omode = MCAST_INCLUDE;
1296 add = 1;
1297 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
1298 omode = MCAST_INCLUDE;
1299 add = 0;
1300 }
1301 err = ip_mc_source(add, omode, sk, &mreqs, 0);
1302 break;
1303 }
1304 case MCAST_JOIN_GROUP:
1305 case MCAST_LEAVE_GROUP:
1306 if (in_compat_syscall())
1307 err = compat_ip_mcast_join_leave(sk, optname, optval,
1308 optlen);
1309 else
1310 err = ip_mcast_join_leave(sk, optname, optval, optlen);
1311 break;
1312 case MCAST_JOIN_SOURCE_GROUP:
1313 case MCAST_LEAVE_SOURCE_GROUP:
1314 case MCAST_BLOCK_SOURCE:
1315 case MCAST_UNBLOCK_SOURCE:
1316 err = do_mcast_group_source(sk, optname, optval, optlen);
1317 break;
1318 case MCAST_MSFILTER:
1319 if (in_compat_syscall())
1320 err = compat_ip_set_mcast_msfilter(sk, optval, optlen);
1321 else
1322 err = ip_set_mcast_msfilter(sk, optval, optlen);
1323 break;
1324 case IP_MULTICAST_ALL:
1325 if (optlen < 1)
1326 goto e_inval;
1327 if (val != 0 && val != 1)
1328 goto e_inval;
1329 inet->mc_all = val;
1330 break;
1331
1332 case IP_FREEBIND:
1333 if (optlen < 1)
1334 goto e_inval;
1335 inet->freebind = !!val;
1336 break;
1337
1338 case IP_IPSEC_POLICY:
1339 case IP_XFRM_POLICY:
1340 err = -EPERM;
1341 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1342 break;
1343 err = xfrm_user_policy(sk, optname, optval, optlen);
1344 break;
1345
1346 case IP_TRANSPARENT:
1347 if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1348 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1349 err = -EPERM;
1350 break;
1351 }
1352 if (optlen < 1)
1353 goto e_inval;
1354 inet->transparent = !!val;
1355 break;
1356
1357 case IP_MINTTL:
1358 if (optlen < 1)
1359 goto e_inval;
1360 if (val < 0 || val > 255)
1361 goto e_inval;
1362 inet->min_ttl = val;
1363 break;
1364
1365 default:
1366 err = -ENOPROTOOPT;
1367 break;
1368 }
1369 release_sock(sk);
1370 if (needs_rtnl)
1371 rtnl_unlock();
1372 return err;
1373
1374 e_inval:
1375 release_sock(sk);
1376 if (needs_rtnl)
1377 rtnl_unlock();
1378 return -EINVAL;
1379 }
1380
1381 /**
1382 * ipv4_pktinfo_prepare - transfer some info from rtable to skb
1383 * @sk: socket
1384 * @skb: buffer
1385 *
1386 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1387 * destination in skb->cb[] before dst drop.
1388 * This way, receiver doesn't make cache line misses to read rtable.
1389 */
ipv4_pktinfo_prepare(const struct sock * sk,struct sk_buff * skb)1390 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
1391 {
1392 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1393 bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
1394 ipv6_sk_rxinfo(sk);
1395
1396 if (prepare && skb_rtable(skb)) {
1397 /* skb->cb is overloaded: prior to this point it is IP{6}CB
1398 * which has interface index (iif) as the first member of the
1399 * underlying inet{6}_skb_parm struct. This code then overlays
1400 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
1401 * element so the iif is picked up from the prior IPCB. If iif
1402 * is the loopback interface, then return the sending interface
1403 * (e.g., process binds socket to eth0 for Tx which is
1404 * redirected to loopback in the rtable/dst).
1405 */
1406 struct rtable *rt = skb_rtable(skb);
1407 bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags);
1408
1409 if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
1410 pktinfo->ipi_ifindex = inet_iif(skb);
1411 else if (l3slave && rt && rt->rt_iif)
1412 pktinfo->ipi_ifindex = rt->rt_iif;
1413
1414 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1415 } else {
1416 pktinfo->ipi_ifindex = 0;
1417 pktinfo->ipi_spec_dst.s_addr = 0;
1418 }
1419 skb_dst_drop(skb);
1420 }
1421
ip_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)1422 int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1423 unsigned int optlen)
1424 {
1425 int err;
1426
1427 if (level != SOL_IP)
1428 return -ENOPROTOOPT;
1429
1430 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1431 #if IS_ENABLED(CONFIG_BPFILTER_UMH)
1432 if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
1433 optname < BPFILTER_IPT_SET_MAX)
1434 err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
1435 #endif
1436 #ifdef CONFIG_NETFILTER
1437 /* we need to exclude all possible ENOPROTOOPTs except default case */
1438 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1439 optname != IP_IPSEC_POLICY &&
1440 optname != IP_XFRM_POLICY &&
1441 !ip_mroute_opt(optname))
1442 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1443 #endif
1444 return err;
1445 }
1446 EXPORT_SYMBOL(ip_setsockopt);
1447
1448 /*
1449 * Get the options. Note for future reference. The GET of IP options gets
1450 * the _received_ ones. The set sets the _sent_ ones.
1451 */
1452
getsockopt_needs_rtnl(int optname)1453 static bool getsockopt_needs_rtnl(int optname)
1454 {
1455 switch (optname) {
1456 case IP_MSFILTER:
1457 case MCAST_MSFILTER:
1458 return true;
1459 }
1460 return false;
1461 }
1462
ip_get_mcast_msfilter(struct sock * sk,void __user * optval,int __user * optlen,int len)1463 static int ip_get_mcast_msfilter(struct sock *sk, void __user *optval,
1464 int __user *optlen, int len)
1465 {
1466 const int size0 = offsetof(struct group_filter, gf_slist_flex);
1467 struct group_filter __user *p = optval;
1468 struct group_filter gsf;
1469 int num;
1470 int err;
1471
1472 if (len < size0)
1473 return -EINVAL;
1474 if (copy_from_user(&gsf, p, size0))
1475 return -EFAULT;
1476
1477 num = gsf.gf_numsrc;
1478 err = ip_mc_gsfget(sk, &gsf, p->gf_slist_flex);
1479 if (err)
1480 return err;
1481 if (gsf.gf_numsrc < num)
1482 num = gsf.gf_numsrc;
1483 if (put_user(GROUP_FILTER_SIZE(num), optlen) ||
1484 copy_to_user(p, &gsf, size0))
1485 return -EFAULT;
1486 return 0;
1487 }
1488
compat_ip_get_mcast_msfilter(struct sock * sk,void __user * optval,int __user * optlen,int len)1489 static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval,
1490 int __user *optlen, int len)
1491 {
1492 const int size0 = offsetof(struct compat_group_filter, gf_slist_flex);
1493 struct compat_group_filter __user *p = optval;
1494 struct compat_group_filter gf32;
1495 struct group_filter gf;
1496 int num;
1497 int err;
1498
1499 if (len < size0)
1500 return -EINVAL;
1501 if (copy_from_user(&gf32, p, size0))
1502 return -EFAULT;
1503
1504 gf.gf_interface = gf32.gf_interface;
1505 gf.gf_fmode = gf32.gf_fmode;
1506 num = gf.gf_numsrc = gf32.gf_numsrc;
1507 gf.gf_group = gf32.gf_group;
1508
1509 err = ip_mc_gsfget(sk, &gf, p->gf_slist_flex);
1510 if (err)
1511 return err;
1512 if (gf.gf_numsrc < num)
1513 num = gf.gf_numsrc;
1514 len = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32));
1515 if (put_user(len, optlen) ||
1516 put_user(gf.gf_fmode, &p->gf_fmode) ||
1517 put_user(gf.gf_numsrc, &p->gf_numsrc))
1518 return -EFAULT;
1519 return 0;
1520 }
1521
do_ip_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1522 static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1523 char __user *optval, int __user *optlen)
1524 {
1525 struct inet_sock *inet = inet_sk(sk);
1526 bool needs_rtnl = getsockopt_needs_rtnl(optname);
1527 int val, err = 0;
1528 int len;
1529
1530 if (level != SOL_IP)
1531 return -EOPNOTSUPP;
1532
1533 if (ip_mroute_opt(optname))
1534 return ip_mroute_getsockopt(sk, optname, optval, optlen);
1535
1536 if (get_user(len, optlen))
1537 return -EFAULT;
1538 if (len < 0)
1539 return -EINVAL;
1540
1541 if (needs_rtnl)
1542 rtnl_lock();
1543 lock_sock(sk);
1544
1545 switch (optname) {
1546 case IP_OPTIONS:
1547 {
1548 unsigned char optbuf[sizeof(struct ip_options)+40];
1549 struct ip_options *opt = (struct ip_options *)optbuf;
1550 struct ip_options_rcu *inet_opt;
1551
1552 inet_opt = rcu_dereference_protected(inet->inet_opt,
1553 lockdep_sock_is_held(sk));
1554 opt->optlen = 0;
1555 if (inet_opt)
1556 memcpy(optbuf, &inet_opt->opt,
1557 sizeof(struct ip_options) +
1558 inet_opt->opt.optlen);
1559 release_sock(sk);
1560
1561 if (opt->optlen == 0)
1562 return put_user(0, optlen);
1563
1564 ip_options_undo(opt);
1565
1566 len = min_t(unsigned int, len, opt->optlen);
1567 if (put_user(len, optlen))
1568 return -EFAULT;
1569 if (copy_to_user(optval, opt->__data, len))
1570 return -EFAULT;
1571 return 0;
1572 }
1573 case IP_PKTINFO:
1574 val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
1575 break;
1576 case IP_RECVTTL:
1577 val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
1578 break;
1579 case IP_RECVTOS:
1580 val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
1581 break;
1582 case IP_RECVOPTS:
1583 val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
1584 break;
1585 case IP_RETOPTS:
1586 val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
1587 break;
1588 case IP_PASSSEC:
1589 val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
1590 break;
1591 case IP_RECVORIGDSTADDR:
1592 val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
1593 break;
1594 case IP_CHECKSUM:
1595 val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
1596 break;
1597 case IP_RECVFRAGSIZE:
1598 val = (inet->cmsg_flags & IP_CMSG_RECVFRAGSIZE) != 0;
1599 break;
1600 case IP_TOS:
1601 val = inet->tos;
1602 break;
1603 case IP_TTL:
1604 {
1605 struct net *net = sock_net(sk);
1606 val = (inet->uc_ttl == -1 ?
1607 READ_ONCE(net->ipv4.sysctl_ip_default_ttl) :
1608 inet->uc_ttl);
1609 break;
1610 }
1611 case IP_HDRINCL:
1612 val = inet->hdrincl;
1613 break;
1614 case IP_NODEFRAG:
1615 val = inet->nodefrag;
1616 break;
1617 case IP_BIND_ADDRESS_NO_PORT:
1618 val = inet->bind_address_no_port;
1619 break;
1620 case IP_MTU_DISCOVER:
1621 val = inet->pmtudisc;
1622 break;
1623 case IP_MTU:
1624 {
1625 struct dst_entry *dst;
1626 val = 0;
1627 dst = sk_dst_get(sk);
1628 if (dst) {
1629 val = dst_mtu(dst);
1630 dst_release(dst);
1631 }
1632 if (!val) {
1633 release_sock(sk);
1634 return -ENOTCONN;
1635 }
1636 break;
1637 }
1638 case IP_RECVERR:
1639 val = inet->recverr;
1640 break;
1641 case IP_RECVERR_RFC4884:
1642 val = inet->recverr_rfc4884;
1643 break;
1644 case IP_MULTICAST_TTL:
1645 val = inet->mc_ttl;
1646 break;
1647 case IP_MULTICAST_LOOP:
1648 val = inet->mc_loop;
1649 break;
1650 case IP_UNICAST_IF:
1651 val = (__force int)htonl((__u32) inet->uc_index);
1652 break;
1653 case IP_MULTICAST_IF:
1654 {
1655 struct in_addr addr;
1656 len = min_t(unsigned int, len, sizeof(struct in_addr));
1657 addr.s_addr = inet->mc_addr;
1658 release_sock(sk);
1659
1660 if (put_user(len, optlen))
1661 return -EFAULT;
1662 if (copy_to_user(optval, &addr, len))
1663 return -EFAULT;
1664 return 0;
1665 }
1666 case IP_MSFILTER:
1667 {
1668 struct ip_msfilter msf;
1669
1670 if (len < IP_MSFILTER_SIZE(0)) {
1671 err = -EINVAL;
1672 goto out;
1673 }
1674 if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
1675 err = -EFAULT;
1676 goto out;
1677 }
1678 err = ip_mc_msfget(sk, &msf,
1679 (struct ip_msfilter __user *)optval, optlen);
1680 goto out;
1681 }
1682 case MCAST_MSFILTER:
1683 if (in_compat_syscall())
1684 err = compat_ip_get_mcast_msfilter(sk, optval, optlen,
1685 len);
1686 else
1687 err = ip_get_mcast_msfilter(sk, optval, optlen, len);
1688 goto out;
1689 case IP_MULTICAST_ALL:
1690 val = inet->mc_all;
1691 break;
1692 case IP_PKTOPTIONS:
1693 {
1694 struct msghdr msg;
1695
1696 release_sock(sk);
1697
1698 if (sk->sk_type != SOCK_STREAM)
1699 return -ENOPROTOOPT;
1700
1701 msg.msg_control_is_user = true;
1702 msg.msg_control_user = optval;
1703 msg.msg_controllen = len;
1704 msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0;
1705
1706 if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
1707 struct in_pktinfo info;
1708
1709 info.ipi_addr.s_addr = inet->inet_rcv_saddr;
1710 info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
1711 info.ipi_ifindex = inet->mc_index;
1712 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
1713 }
1714 if (inet->cmsg_flags & IP_CMSG_TTL) {
1715 int hlim = inet->mc_ttl;
1716 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1717 }
1718 if (inet->cmsg_flags & IP_CMSG_TOS) {
1719 int tos = inet->rcv_tos;
1720 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1721 }
1722 len -= msg.msg_controllen;
1723 return put_user(len, optlen);
1724 }
1725 case IP_FREEBIND:
1726 val = inet->freebind;
1727 break;
1728 case IP_TRANSPARENT:
1729 val = inet->transparent;
1730 break;
1731 case IP_MINTTL:
1732 val = inet->min_ttl;
1733 break;
1734 case IP_PROTOCOL:
1735 val = inet_sk(sk)->inet_num;
1736 break;
1737 default:
1738 release_sock(sk);
1739 return -ENOPROTOOPT;
1740 }
1741 release_sock(sk);
1742
1743 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1744 unsigned char ucval = (unsigned char)val;
1745 len = 1;
1746 if (put_user(len, optlen))
1747 return -EFAULT;
1748 if (copy_to_user(optval, &ucval, 1))
1749 return -EFAULT;
1750 } else {
1751 len = min_t(unsigned int, sizeof(int), len);
1752 if (put_user(len, optlen))
1753 return -EFAULT;
1754 if (copy_to_user(optval, &val, len))
1755 return -EFAULT;
1756 }
1757 return 0;
1758
1759 out:
1760 release_sock(sk);
1761 if (needs_rtnl)
1762 rtnl_unlock();
1763 return err;
1764 }
1765
ip_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1766 int ip_getsockopt(struct sock *sk, int level,
1767 int optname, char __user *optval, int __user *optlen)
1768 {
1769 int err;
1770
1771 err = do_ip_getsockopt(sk, level, optname, optval, optlen);
1772
1773 #if IS_ENABLED(CONFIG_BPFILTER_UMH)
1774 if (optname >= BPFILTER_IPT_SO_GET_INFO &&
1775 optname < BPFILTER_IPT_GET_MAX)
1776 err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
1777 #endif
1778 #ifdef CONFIG_NETFILTER
1779 /* we need to exclude all possible ENOPROTOOPTs except default case */
1780 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1781 !ip_mroute_opt(optname)) {
1782 int len;
1783
1784 if (get_user(len, optlen))
1785 return -EFAULT;
1786
1787 err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
1788 if (err >= 0)
1789 err = put_user(len, optlen);
1790 return err;
1791 }
1792 #endif
1793 return err;
1794 }
1795 EXPORT_SYMBOL(ip_getsockopt);
1796