1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * UDP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/udp.c
10 *
11 * Fixes:
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 */
19
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/in6.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_arp.h>
28 #include <linux/ipv6.h>
29 #include <linux/icmpv6.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/skbuff.h>
33 #include <linux/slab.h>
34 #include <linux/uaccess.h>
35 #include <linux/indirect_call_wrapper.h>
36
37 #include <net/addrconf.h>
38 #include <net/ndisc.h>
39 #include <net/protocol.h>
40 #include <net/transp_v6.h>
41 #include <net/ip6_route.h>
42 #include <net/raw.h>
43 #include <net/tcp_states.h>
44 #include <net/ip6_checksum.h>
45 #include <net/ip6_tunnel.h>
46 #include <net/xfrm.h>
47 #include <net/inet_hashtables.h>
48 #include <net/inet6_hashtables.h>
49 #include <net/busy_poll.h>
50 #include <net/sock_reuseport.h>
51
52 #include <linux/proc_fs.h>
53 #include <linux/seq_file.h>
54 #include <trace/events/skb.h>
55 #include "udp_impl.h"
56
udp6_ehashfn(const struct net * net,const struct in6_addr * laddr,const u16 lport,const struct in6_addr * faddr,const __be16 fport)57 static u32 udp6_ehashfn(const struct net *net,
58 const struct in6_addr *laddr,
59 const u16 lport,
60 const struct in6_addr *faddr,
61 const __be16 fport)
62 {
63 static u32 udp6_ehash_secret __read_mostly;
64 static u32 udp_ipv6_hash_secret __read_mostly;
65
66 u32 lhash, fhash;
67
68 net_get_random_once(&udp6_ehash_secret,
69 sizeof(udp6_ehash_secret));
70 net_get_random_once(&udp_ipv6_hash_secret,
71 sizeof(udp_ipv6_hash_secret));
72
73 lhash = (__force u32)laddr->s6_addr32[3];
74 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
75
76 return __inet6_ehashfn(lhash, lport, fhash, fport,
77 udp_ipv6_hash_secret + net_hash_mix(net));
78 }
79
udp_v6_get_port(struct sock * sk,unsigned short snum)80 int udp_v6_get_port(struct sock *sk, unsigned short snum)
81 {
82 unsigned int hash2_nulladdr =
83 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
84 unsigned int hash2_partial =
85 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
86
87 /* precompute partial secondary hash */
88 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
89 return udp_lib_get_port(sk, snum, hash2_nulladdr);
90 }
91
udp_v6_rehash(struct sock * sk)92 void udp_v6_rehash(struct sock *sk)
93 {
94 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
95 &sk->sk_v6_rcv_saddr,
96 inet_sk(sk)->inet_num);
97
98 udp_lib_rehash(sk, new_hash);
99 }
100
compute_score(struct sock * sk,struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned short hnum,int dif,int sdif)101 static int compute_score(struct sock *sk, struct net *net,
102 const struct in6_addr *saddr, __be16 sport,
103 const struct in6_addr *daddr, unsigned short hnum,
104 int dif, int sdif)
105 {
106 int score;
107 struct inet_sock *inet;
108 bool dev_match;
109
110 if (!net_eq(sock_net(sk), net) ||
111 udp_sk(sk)->udp_port_hash != hnum ||
112 sk->sk_family != PF_INET6)
113 return -1;
114
115 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
116 return -1;
117
118 score = 0;
119 inet = inet_sk(sk);
120
121 if (inet->inet_dport) {
122 if (inet->inet_dport != sport)
123 return -1;
124 score++;
125 }
126
127 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
128 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
129 return -1;
130 score++;
131 }
132
133 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
134 if (!dev_match)
135 return -1;
136 if (sk->sk_bound_dev_if)
137 score++;
138
139 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
140 score++;
141
142 return score;
143 }
144
lookup_reuseport(struct net * net,struct sock * sk,struct sk_buff * skb,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned int hnum)145 static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
146 struct sk_buff *skb,
147 const struct in6_addr *saddr,
148 __be16 sport,
149 const struct in6_addr *daddr,
150 unsigned int hnum)
151 {
152 struct sock *reuse_sk = NULL;
153 u32 hash;
154
155 if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
156 hash = udp6_ehashfn(net, daddr, hnum, saddr, sport);
157 reuse_sk = reuseport_select_sock(sk, hash, skb,
158 sizeof(struct udphdr));
159 }
160 return reuse_sk;
161 }
162
163 /* called with rcu_read_lock() */
udp6_lib_lookup2(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned int hnum,int dif,int sdif,struct udp_hslot * hslot2,struct sk_buff * skb)164 static struct sock *udp6_lib_lookup2(struct net *net,
165 const struct in6_addr *saddr, __be16 sport,
166 const struct in6_addr *daddr, unsigned int hnum,
167 int dif, int sdif, struct udp_hslot *hslot2,
168 struct sk_buff *skb)
169 {
170 struct sock *sk, *result;
171 int score, badness;
172
173 result = NULL;
174 badness = -1;
175 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
176 score = compute_score(sk, net, saddr, sport,
177 daddr, hnum, dif, sdif);
178 if (score > badness) {
179 result = lookup_reuseport(net, sk, skb,
180 saddr, sport, daddr, hnum);
181 /* Fall back to scoring if group has connections */
182 if (result && !reuseport_has_conns(sk))
183 return result;
184
185 result = result ? : sk;
186 badness = score;
187 }
188 }
189 return result;
190 }
191
udp6_lookup_run_bpf(struct net * net,struct udp_table * udptable,struct sk_buff * skb,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,u16 hnum)192 static inline struct sock *udp6_lookup_run_bpf(struct net *net,
193 struct udp_table *udptable,
194 struct sk_buff *skb,
195 const struct in6_addr *saddr,
196 __be16 sport,
197 const struct in6_addr *daddr,
198 u16 hnum)
199 {
200 struct sock *sk, *reuse_sk;
201 bool no_reuseport;
202
203 if (udptable != &udp_table)
204 return NULL; /* only UDP is supported */
205
206 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP,
207 saddr, sport, daddr, hnum, &sk);
208 if (no_reuseport || IS_ERR_OR_NULL(sk))
209 return sk;
210
211 reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
212 if (reuse_sk)
213 sk = reuse_sk;
214 return sk;
215 }
216
217 /* rcu_read_lock() must be held */
__udp6_lib_lookup(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif,int sdif,struct udp_table * udptable,struct sk_buff * skb)218 struct sock *__udp6_lib_lookup(struct net *net,
219 const struct in6_addr *saddr, __be16 sport,
220 const struct in6_addr *daddr, __be16 dport,
221 int dif, int sdif, struct udp_table *udptable,
222 struct sk_buff *skb)
223 {
224 unsigned short hnum = ntohs(dport);
225 unsigned int hash2, slot2;
226 struct udp_hslot *hslot2;
227 struct sock *result, *sk;
228
229 hash2 = ipv6_portaddr_hash(net, daddr, hnum);
230 slot2 = hash2 & udptable->mask;
231 hslot2 = &udptable->hash2[slot2];
232
233 /* Lookup connected or non-wildcard sockets */
234 result = udp6_lib_lookup2(net, saddr, sport,
235 daddr, hnum, dif, sdif,
236 hslot2, skb);
237 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
238 goto done;
239
240 /* Lookup redirect from BPF */
241 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
242 sk = udp6_lookup_run_bpf(net, udptable, skb,
243 saddr, sport, daddr, hnum);
244 if (sk) {
245 result = sk;
246 goto done;
247 }
248 }
249
250 /* Got non-wildcard socket or error on first lookup */
251 if (result)
252 goto done;
253
254 /* Lookup wildcard sockets */
255 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
256 slot2 = hash2 & udptable->mask;
257 hslot2 = &udptable->hash2[slot2];
258
259 result = udp6_lib_lookup2(net, saddr, sport,
260 &in6addr_any, hnum, dif, sdif,
261 hslot2, skb);
262 done:
263 if (IS_ERR(result))
264 return NULL;
265 return result;
266 }
267 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
268
__udp6_lib_lookup_skb(struct sk_buff * skb,__be16 sport,__be16 dport,struct udp_table * udptable)269 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
270 __be16 sport, __be16 dport,
271 struct udp_table *udptable)
272 {
273 const struct ipv6hdr *iph = ipv6_hdr(skb);
274
275 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
276 &iph->daddr, dport, inet6_iif(skb),
277 inet6_sdif(skb), udptable, skb);
278 }
279
udp6_lib_lookup_skb(struct sk_buff * skb,__be16 sport,__be16 dport)280 struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
281 __be16 sport, __be16 dport)
282 {
283 const struct ipv6hdr *iph = ipv6_hdr(skb);
284
285 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
286 &iph->daddr, dport, inet6_iif(skb),
287 inet6_sdif(skb), &udp_table, NULL);
288 }
289 EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
290
291 /* Must be called under rcu_read_lock().
292 * Does increment socket refcount.
293 */
294 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
udp6_lib_lookup(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif)295 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
296 const struct in6_addr *daddr, __be16 dport, int dif)
297 {
298 struct sock *sk;
299
300 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
301 dif, 0, &udp_table, NULL);
302 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
303 sk = NULL;
304 return sk;
305 }
306 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
307 #endif
308
309 /* do not use the scratch area len for jumbogram: their length execeeds the
310 * scratch area space; note that the IP6CB flags is still in the first
311 * cacheline, so checking for jumbograms is cheap
312 */
udp6_skb_len(struct sk_buff * skb)313 static int udp6_skb_len(struct sk_buff *skb)
314 {
315 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
316 }
317
318 /*
319 * This should be easy, if there is something there we
320 * return it, otherwise we block.
321 */
322
udpv6_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int noblock,int flags,int * addr_len)323 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
324 int noblock, int flags, int *addr_len)
325 {
326 struct ipv6_pinfo *np = inet6_sk(sk);
327 struct inet_sock *inet = inet_sk(sk);
328 struct sk_buff *skb;
329 unsigned int ulen, copied;
330 int off, err, peeking = flags & MSG_PEEK;
331 int is_udplite = IS_UDPLITE(sk);
332 struct udp_mib __percpu *mib;
333 bool checksum_valid = false;
334 int is_udp4;
335
336 if (flags & MSG_ERRQUEUE)
337 return ipv6_recv_error(sk, msg, len, addr_len);
338
339 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
340 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
341
342 try_again:
343 off = sk_peek_offset(sk, flags);
344 skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
345 if (!skb)
346 return err;
347
348 ulen = udp6_skb_len(skb);
349 copied = len;
350 if (copied > ulen - off)
351 copied = ulen - off;
352 else if (copied < ulen)
353 msg->msg_flags |= MSG_TRUNC;
354
355 is_udp4 = (skb->protocol == htons(ETH_P_IP));
356 mib = __UDPX_MIB(sk, is_udp4);
357
358 /*
359 * If checksum is needed at all, try to do it while copying the
360 * data. If the data is truncated, or if we only want a partial
361 * coverage checksum (UDP-Lite), do it before the copy.
362 */
363
364 if (copied < ulen || peeking ||
365 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
366 checksum_valid = udp_skb_csum_unnecessary(skb) ||
367 !__udp_lib_checksum_complete(skb);
368 if (!checksum_valid)
369 goto csum_copy_err;
370 }
371
372 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
373 if (udp_skb_is_linear(skb))
374 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
375 else
376 err = skb_copy_datagram_msg(skb, off, msg, copied);
377 } else {
378 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
379 if (err == -EINVAL)
380 goto csum_copy_err;
381 }
382 if (unlikely(err)) {
383 if (!peeking) {
384 atomic_inc(&sk->sk_drops);
385 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
386 }
387 kfree_skb(skb);
388 return err;
389 }
390 if (!peeking)
391 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
392
393 sock_recv_ts_and_drops(msg, sk, skb);
394
395 /* Copy the address. */
396 if (msg->msg_name) {
397 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
398 sin6->sin6_family = AF_INET6;
399 sin6->sin6_port = udp_hdr(skb)->source;
400 sin6->sin6_flowinfo = 0;
401
402 if (is_udp4) {
403 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
404 &sin6->sin6_addr);
405 sin6->sin6_scope_id = 0;
406 } else {
407 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
408 sin6->sin6_scope_id =
409 ipv6_iface_scope_id(&sin6->sin6_addr,
410 inet6_iif(skb));
411 }
412 *addr_len = sizeof(*sin6);
413
414 if (cgroup_bpf_enabled)
415 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
416 (struct sockaddr *)sin6);
417 }
418
419 if (udp_sk(sk)->gro_enabled)
420 udp_cmsg_recv(msg, sk, skb);
421
422 if (np->rxopt.all)
423 ip6_datagram_recv_common_ctl(sk, msg, skb);
424
425 if (is_udp4) {
426 if (inet->cmsg_flags)
427 ip_cmsg_recv_offset(msg, sk, skb,
428 sizeof(struct udphdr), off);
429 } else {
430 if (np->rxopt.all)
431 ip6_datagram_recv_specific_ctl(sk, msg, skb);
432 }
433
434 err = copied;
435 if (flags & MSG_TRUNC)
436 err = ulen;
437
438 skb_consume_udp(sk, skb, peeking ? -err : err);
439 return err;
440
441 csum_copy_err:
442 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
443 udp_skb_destructor)) {
444 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
445 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
446 }
447 kfree_skb(skb);
448
449 /* starting over for a new packet, but check if we need to yield */
450 cond_resched();
451 msg->msg_flags &= ~MSG_TRUNC;
452 goto try_again;
453 }
454
455 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
udpv6_encap_enable(void)456 void udpv6_encap_enable(void)
457 {
458 static_branch_inc(&udpv6_encap_needed_key);
459 }
460 EXPORT_SYMBOL(udpv6_encap_enable);
461
462 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
463 * through error handlers in encapsulations looking for a match.
464 */
__udp6_lib_err_encap_no_sk(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)465 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
466 struct inet6_skb_parm *opt,
467 u8 type, u8 code, int offset, __be32 info)
468 {
469 int i;
470
471 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
472 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
473 u8 type, u8 code, int offset, __be32 info);
474 const struct ip6_tnl_encap_ops *encap;
475
476 encap = rcu_dereference(ip6tun_encaps[i]);
477 if (!encap)
478 continue;
479 handler = encap->err_handler;
480 if (handler && !handler(skb, opt, type, code, offset, info))
481 return 0;
482 }
483
484 return -ENOENT;
485 }
486
487 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
488 * reversing source and destination port: this will match tunnels that force the
489 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
490 * lwtunnels might actually break this assumption by being configured with
491 * different destination ports on endpoints, in this case we won't be able to
492 * trace ICMP messages back to them.
493 *
494 * If this doesn't match any socket, probe tunnels with arbitrary destination
495 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
496 * we've sent packets to won't necessarily match the local destination port.
497 *
498 * Then ask the tunnel implementation to match the error against a valid
499 * association.
500 *
501 * Return an error if we can't find a match, the socket if we need further
502 * processing, zero otherwise.
503 */
__udp6_lib_err_encap(struct net * net,const struct ipv6hdr * hdr,int offset,struct udphdr * uh,struct udp_table * udptable,struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,__be32 info)504 static struct sock *__udp6_lib_err_encap(struct net *net,
505 const struct ipv6hdr *hdr, int offset,
506 struct udphdr *uh,
507 struct udp_table *udptable,
508 struct sk_buff *skb,
509 struct inet6_skb_parm *opt,
510 u8 type, u8 code, __be32 info)
511 {
512 int network_offset, transport_offset;
513 struct sock *sk;
514
515 network_offset = skb_network_offset(skb);
516 transport_offset = skb_transport_offset(skb);
517
518 /* Network header needs to point to the outer IPv6 header inside ICMP */
519 skb_reset_network_header(skb);
520
521 /* Transport header needs to point to the UDP header */
522 skb_set_transport_header(skb, offset);
523
524 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
525 &hdr->saddr, uh->dest,
526 inet6_iif(skb), 0, udptable, skb);
527 if (sk) {
528 int (*lookup)(struct sock *sk, struct sk_buff *skb);
529 struct udp_sock *up = udp_sk(sk);
530
531 lookup = READ_ONCE(up->encap_err_lookup);
532 if (!lookup || lookup(sk, skb))
533 sk = NULL;
534 }
535
536 if (!sk) {
537 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
538 offset, info));
539 }
540
541 skb_set_transport_header(skb, transport_offset);
542 skb_set_network_header(skb, network_offset);
543
544 return sk;
545 }
546
__udp6_lib_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info,struct udp_table * udptable)547 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
548 u8 type, u8 code, int offset, __be32 info,
549 struct udp_table *udptable)
550 {
551 struct ipv6_pinfo *np;
552 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
553 const struct in6_addr *saddr = &hdr->saddr;
554 const struct in6_addr *daddr = &hdr->daddr;
555 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
556 bool tunnel = false;
557 struct sock *sk;
558 int harderr;
559 int err;
560 struct net *net = dev_net(skb->dev);
561
562 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
563 inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
564 if (!sk) {
565 /* No socket for error: try tunnels before discarding */
566 sk = ERR_PTR(-ENOENT);
567 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
568 sk = __udp6_lib_err_encap(net, hdr, offset, uh,
569 udptable, skb,
570 opt, type, code, info);
571 if (!sk)
572 return 0;
573 }
574
575 if (IS_ERR(sk)) {
576 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
577 ICMP6_MIB_INERRORS);
578 return PTR_ERR(sk);
579 }
580
581 tunnel = true;
582 }
583
584 harderr = icmpv6_err_convert(type, code, &err);
585 np = inet6_sk(sk);
586
587 if (type == ICMPV6_PKT_TOOBIG) {
588 if (!ip6_sk_accept_pmtu(sk))
589 goto out;
590 ip6_sk_update_pmtu(skb, sk, info);
591 if (np->pmtudisc != IPV6_PMTUDISC_DONT)
592 harderr = 1;
593 }
594 if (type == NDISC_REDIRECT) {
595 if (tunnel) {
596 ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
597 sk->sk_mark, sk->sk_uid);
598 } else {
599 ip6_sk_redirect(skb, sk);
600 }
601 goto out;
602 }
603
604 /* Tunnels don't have an application socket: don't pass errors back */
605 if (tunnel)
606 goto out;
607
608 if (!np->recverr) {
609 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
610 goto out;
611 } else {
612 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
613 }
614
615 sk->sk_err = err;
616 sk->sk_error_report(sk);
617 out:
618 return 0;
619 }
620
__udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)621 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
622 {
623 int rc;
624
625 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
626 sock_rps_save_rxhash(sk, skb);
627 sk_mark_napi_id(sk, skb);
628 sk_incoming_cpu_update(sk);
629 } else {
630 sk_mark_napi_id_once(sk, skb);
631 }
632
633 rc = __udp_enqueue_schedule_skb(sk, skb);
634 if (rc < 0) {
635 int is_udplite = IS_UDPLITE(sk);
636
637 /* Note that an ENOMEM error is charged twice */
638 if (rc == -ENOMEM)
639 UDP6_INC_STATS(sock_net(sk),
640 UDP_MIB_RCVBUFERRORS, is_udplite);
641 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
642 kfree_skb(skb);
643 return -1;
644 }
645
646 return 0;
647 }
648
udpv6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)649 static __inline__ int udpv6_err(struct sk_buff *skb,
650 struct inet6_skb_parm *opt, u8 type,
651 u8 code, int offset, __be32 info)
652 {
653 return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
654 }
655
udpv6_queue_rcv_one_skb(struct sock * sk,struct sk_buff * skb)656 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
657 {
658 struct udp_sock *up = udp_sk(sk);
659 int is_udplite = IS_UDPLITE(sk);
660
661 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
662 goto drop;
663
664 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
665 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
666
667 /*
668 * This is an encapsulation socket so pass the skb to
669 * the socket's udp_encap_rcv() hook. Otherwise, just
670 * fall through and pass this up the UDP socket.
671 * up->encap_rcv() returns the following value:
672 * =0 if skb was successfully passed to the encap
673 * handler or was discarded by it.
674 * >0 if skb should be passed on to UDP.
675 * <0 if skb should be resubmitted as proto -N
676 */
677
678 /* if we're overly short, let UDP handle it */
679 encap_rcv = READ_ONCE(up->encap_rcv);
680 if (encap_rcv) {
681 int ret;
682
683 /* Verify checksum before giving to encap */
684 if (udp_lib_checksum_complete(skb))
685 goto csum_error;
686
687 ret = encap_rcv(sk, skb);
688 if (ret <= 0) {
689 __UDP_INC_STATS(sock_net(sk),
690 UDP_MIB_INDATAGRAMS,
691 is_udplite);
692 return -ret;
693 }
694 }
695
696 /* FALLTHROUGH -- it's a UDP Packet */
697 }
698
699 /*
700 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
701 */
702 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
703
704 if (up->pcrlen == 0) { /* full coverage was set */
705 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
706 UDP_SKB_CB(skb)->cscov, skb->len);
707 goto drop;
708 }
709 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
710 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
711 UDP_SKB_CB(skb)->cscov, up->pcrlen);
712 goto drop;
713 }
714 }
715
716 prefetch(&sk->sk_rmem_alloc);
717 if (rcu_access_pointer(sk->sk_filter) &&
718 udp_lib_checksum_complete(skb))
719 goto csum_error;
720
721 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
722 goto drop;
723
724 udp_csum_pull_header(skb);
725
726 skb_dst_drop(skb);
727
728 return __udpv6_queue_rcv_skb(sk, skb);
729
730 csum_error:
731 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
732 drop:
733 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
734 atomic_inc(&sk->sk_drops);
735 kfree_skb(skb);
736 return -1;
737 }
738
udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)739 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
740 {
741 struct sk_buff *next, *segs;
742 int ret;
743
744 if (likely(!udp_unexpected_gso(sk, skb)))
745 return udpv6_queue_rcv_one_skb(sk, skb);
746
747 __skb_push(skb, -skb_mac_offset(skb));
748 segs = udp_rcv_segment(sk, skb, false);
749 skb_list_walk_safe(segs, skb, next) {
750 __skb_pull(skb, skb_transport_offset(skb));
751
752 ret = udpv6_queue_rcv_one_skb(sk, skb);
753 if (ret > 0)
754 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
755 true);
756 }
757 return 0;
758 }
759
__udp_v6_is_mcast_sock(struct net * net,struct sock * sk,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif,unsigned short hnum)760 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
761 __be16 loc_port, const struct in6_addr *loc_addr,
762 __be16 rmt_port, const struct in6_addr *rmt_addr,
763 int dif, int sdif, unsigned short hnum)
764 {
765 struct inet_sock *inet = inet_sk(sk);
766
767 if (!net_eq(sock_net(sk), net))
768 return false;
769
770 if (udp_sk(sk)->udp_port_hash != hnum ||
771 sk->sk_family != PF_INET6 ||
772 (inet->inet_dport && inet->inet_dport != rmt_port) ||
773 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
774 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
775 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) ||
776 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
777 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
778 return false;
779 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
780 return false;
781 return true;
782 }
783
udp6_csum_zero_error(struct sk_buff * skb)784 static void udp6_csum_zero_error(struct sk_buff *skb)
785 {
786 /* RFC 2460 section 8.1 says that we SHOULD log
787 * this error. Well, it is reasonable.
788 */
789 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
790 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
791 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
792 }
793
794 /*
795 * Note: called only from the BH handler context,
796 * so we don't need to lock the hashes.
797 */
__udp6_lib_mcast_deliver(struct net * net,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,struct udp_table * udptable,int proto)798 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
799 const struct in6_addr *saddr, const struct in6_addr *daddr,
800 struct udp_table *udptable, int proto)
801 {
802 struct sock *sk, *first = NULL;
803 const struct udphdr *uh = udp_hdr(skb);
804 unsigned short hnum = ntohs(uh->dest);
805 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
806 unsigned int offset = offsetof(typeof(*sk), sk_node);
807 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
808 int dif = inet6_iif(skb);
809 int sdif = inet6_sdif(skb);
810 struct hlist_node *node;
811 struct sk_buff *nskb;
812
813 if (use_hash2) {
814 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
815 udptable->mask;
816 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
817 start_lookup:
818 hslot = &udptable->hash2[hash2];
819 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
820 }
821
822 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
823 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
824 uh->source, saddr, dif, sdif,
825 hnum))
826 continue;
827 /* If zero checksum and no_check is not on for
828 * the socket then skip it.
829 */
830 if (!uh->check && !udp_sk(sk)->no_check6_rx)
831 continue;
832 if (!first) {
833 first = sk;
834 continue;
835 }
836 nskb = skb_clone(skb, GFP_ATOMIC);
837 if (unlikely(!nskb)) {
838 atomic_inc(&sk->sk_drops);
839 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
840 IS_UDPLITE(sk));
841 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
842 IS_UDPLITE(sk));
843 continue;
844 }
845
846 if (udpv6_queue_rcv_skb(sk, nskb) > 0)
847 consume_skb(nskb);
848 }
849
850 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
851 if (use_hash2 && hash2 != hash2_any) {
852 hash2 = hash2_any;
853 goto start_lookup;
854 }
855
856 if (first) {
857 if (udpv6_queue_rcv_skb(first, skb) > 0)
858 consume_skb(skb);
859 } else {
860 kfree_skb(skb);
861 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
862 proto == IPPROTO_UDPLITE);
863 }
864 return 0;
865 }
866
udp6_sk_rx_dst_set(struct sock * sk,struct dst_entry * dst)867 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
868 {
869 if (udp_sk_rx_dst_set(sk, dst)) {
870 const struct rt6_info *rt = (const struct rt6_info *)dst;
871
872 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
873 }
874 }
875
876 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
877 * return code conversion for ip layer consumption
878 */
udp6_unicast_rcv_skb(struct sock * sk,struct sk_buff * skb,struct udphdr * uh)879 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
880 struct udphdr *uh)
881 {
882 int ret;
883
884 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
885 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
886
887 ret = udpv6_queue_rcv_skb(sk, skb);
888
889 /* a return value > 0 means to resubmit the input */
890 if (ret > 0)
891 return ret;
892 return 0;
893 }
894
__udp6_lib_rcv(struct sk_buff * skb,struct udp_table * udptable,int proto)895 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
896 int proto)
897 {
898 const struct in6_addr *saddr, *daddr;
899 struct net *net = dev_net(skb->dev);
900 struct udphdr *uh;
901 struct sock *sk;
902 bool refcounted;
903 u32 ulen = 0;
904
905 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
906 goto discard;
907
908 saddr = &ipv6_hdr(skb)->saddr;
909 daddr = &ipv6_hdr(skb)->daddr;
910 uh = udp_hdr(skb);
911
912 ulen = ntohs(uh->len);
913 if (ulen > skb->len)
914 goto short_packet;
915
916 if (proto == IPPROTO_UDP) {
917 /* UDP validates ulen. */
918
919 /* Check for jumbo payload */
920 if (ulen == 0)
921 ulen = skb->len;
922
923 if (ulen < sizeof(*uh))
924 goto short_packet;
925
926 if (ulen < skb->len) {
927 if (pskb_trim_rcsum(skb, ulen))
928 goto short_packet;
929 saddr = &ipv6_hdr(skb)->saddr;
930 daddr = &ipv6_hdr(skb)->daddr;
931 uh = udp_hdr(skb);
932 }
933 }
934
935 if (udp6_csum_init(skb, uh, proto))
936 goto csum_error;
937
938 /* Check if the socket is already available, e.g. due to early demux */
939 sk = skb_steal_sock(skb, &refcounted);
940 if (sk) {
941 struct dst_entry *dst = skb_dst(skb);
942 int ret;
943
944 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
945 udp6_sk_rx_dst_set(sk, dst);
946
947 if (!uh->check && !udp_sk(sk)->no_check6_rx) {
948 if (refcounted)
949 sock_put(sk);
950 goto report_csum_error;
951 }
952
953 ret = udp6_unicast_rcv_skb(sk, skb, uh);
954 if (refcounted)
955 sock_put(sk);
956 return ret;
957 }
958
959 /*
960 * Multicast receive code
961 */
962 if (ipv6_addr_is_multicast(daddr))
963 return __udp6_lib_mcast_deliver(net, skb,
964 saddr, daddr, udptable, proto);
965
966 /* Unicast */
967 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
968 if (sk) {
969 if (!uh->check && !udp_sk(sk)->no_check6_rx)
970 goto report_csum_error;
971 return udp6_unicast_rcv_skb(sk, skb, uh);
972 }
973
974 if (!uh->check)
975 goto report_csum_error;
976
977 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
978 goto discard;
979
980 if (udp_lib_checksum_complete(skb))
981 goto csum_error;
982
983 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
984 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
985
986 kfree_skb(skb);
987 return 0;
988
989 short_packet:
990 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
991 proto == IPPROTO_UDPLITE ? "-Lite" : "",
992 saddr, ntohs(uh->source),
993 ulen, skb->len,
994 daddr, ntohs(uh->dest));
995 goto discard;
996
997 report_csum_error:
998 udp6_csum_zero_error(skb);
999 csum_error:
1000 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1001 discard:
1002 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1003 kfree_skb(skb);
1004 return 0;
1005 }
1006
1007
__udp6_lib_demux_lookup(struct net * net,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif)1008 static struct sock *__udp6_lib_demux_lookup(struct net *net,
1009 __be16 loc_port, const struct in6_addr *loc_addr,
1010 __be16 rmt_port, const struct in6_addr *rmt_addr,
1011 int dif, int sdif)
1012 {
1013 unsigned short hnum = ntohs(loc_port);
1014 unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1015 unsigned int slot2 = hash2 & udp_table.mask;
1016 struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
1017 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
1018 struct sock *sk;
1019
1020 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1021 if (sk->sk_state == TCP_ESTABLISHED &&
1022 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1023 return sk;
1024 /* Only check first socket in chain */
1025 break;
1026 }
1027 return NULL;
1028 }
1029
udp_v6_early_demux(struct sk_buff * skb)1030 void udp_v6_early_demux(struct sk_buff *skb)
1031 {
1032 struct net *net = dev_net(skb->dev);
1033 const struct udphdr *uh;
1034 struct sock *sk;
1035 struct dst_entry *dst;
1036 int dif = skb->dev->ifindex;
1037 int sdif = inet6_sdif(skb);
1038
1039 if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1040 sizeof(struct udphdr)))
1041 return;
1042
1043 uh = udp_hdr(skb);
1044
1045 if (skb->pkt_type == PACKET_HOST)
1046 sk = __udp6_lib_demux_lookup(net, uh->dest,
1047 &ipv6_hdr(skb)->daddr,
1048 uh->source, &ipv6_hdr(skb)->saddr,
1049 dif, sdif);
1050 else
1051 return;
1052
1053 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1054 return;
1055
1056 skb->sk = sk;
1057 skb->destructor = sock_efree;
1058 dst = rcu_dereference(sk->sk_rx_dst);
1059
1060 if (dst)
1061 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1062 if (dst) {
1063 /* set noref for now.
1064 * any place which wants to hold dst has to call
1065 * dst_hold_safe()
1066 */
1067 skb_dst_set_noref(skb, dst);
1068 }
1069 }
1070
udpv6_rcv(struct sk_buff * skb)1071 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1072 {
1073 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1074 }
1075
1076 /*
1077 * Throw away all pending data and cancel the corking. Socket is locked.
1078 */
udp_v6_flush_pending_frames(struct sock * sk)1079 static void udp_v6_flush_pending_frames(struct sock *sk)
1080 {
1081 struct udp_sock *up = udp_sk(sk);
1082
1083 if (up->pending == AF_INET)
1084 udp_flush_pending_frames(sk);
1085 else if (up->pending) {
1086 up->len = 0;
1087 up->pending = 0;
1088 ip6_flush_pending_frames(sk);
1089 }
1090 }
1091
udpv6_pre_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)1092 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1093 int addr_len)
1094 {
1095 if (addr_len < offsetofend(struct sockaddr, sa_family))
1096 return -EINVAL;
1097 /* The following checks are replicated from __ip6_datagram_connect()
1098 * and intended to prevent BPF program called below from accessing
1099 * bytes that are out of the bound specified by user in addr_len.
1100 */
1101 if (uaddr->sa_family == AF_INET) {
1102 if (__ipv6_only_sock(sk))
1103 return -EAFNOSUPPORT;
1104 return udp_pre_connect(sk, uaddr, addr_len);
1105 }
1106
1107 if (addr_len < SIN6_LEN_RFC2133)
1108 return -EINVAL;
1109
1110 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
1111 }
1112
1113 /**
1114 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1115 * @sk: socket we are sending on
1116 * @skb: sk_buff containing the filled-in UDP header
1117 * (checksum field must be zeroed out)
1118 * @saddr: source address
1119 * @daddr: destination address
1120 * @len: length of packet
1121 */
udp6_hwcsum_outgoing(struct sock * sk,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,int len)1122 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1123 const struct in6_addr *saddr,
1124 const struct in6_addr *daddr, int len)
1125 {
1126 unsigned int offset;
1127 struct udphdr *uh = udp_hdr(skb);
1128 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1129 __wsum csum = 0;
1130
1131 if (!frags) {
1132 /* Only one fragment on the socket. */
1133 skb->csum_start = skb_transport_header(skb) - skb->head;
1134 skb->csum_offset = offsetof(struct udphdr, check);
1135 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1136 } else {
1137 /*
1138 * HW-checksum won't work as there are two or more
1139 * fragments on the socket so that all csums of sk_buffs
1140 * should be together
1141 */
1142 offset = skb_transport_offset(skb);
1143 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1144 csum = skb->csum;
1145
1146 skb->ip_summed = CHECKSUM_NONE;
1147
1148 do {
1149 csum = csum_add(csum, frags->csum);
1150 } while ((frags = frags->next));
1151
1152 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1153 csum);
1154 if (uh->check == 0)
1155 uh->check = CSUM_MANGLED_0;
1156 }
1157 }
1158
1159 /*
1160 * Sending
1161 */
1162
udp_v6_send_skb(struct sk_buff * skb,struct flowi6 * fl6,struct inet_cork * cork)1163 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1164 struct inet_cork *cork)
1165 {
1166 struct sock *sk = skb->sk;
1167 struct udphdr *uh;
1168 int err = 0;
1169 int is_udplite = IS_UDPLITE(sk);
1170 __wsum csum = 0;
1171 int offset = skb_transport_offset(skb);
1172 int len = skb->len - offset;
1173 int datalen = len - sizeof(*uh);
1174
1175 /*
1176 * Create a UDP header
1177 */
1178 uh = udp_hdr(skb);
1179 uh->source = fl6->fl6_sport;
1180 uh->dest = fl6->fl6_dport;
1181 uh->len = htons(len);
1182 uh->check = 0;
1183
1184 if (cork->gso_size) {
1185 const int hlen = skb_network_header_len(skb) +
1186 sizeof(struct udphdr);
1187
1188 if (hlen + cork->gso_size > cork->fragsize) {
1189 kfree_skb(skb);
1190 return -EINVAL;
1191 }
1192 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1193 kfree_skb(skb);
1194 return -EINVAL;
1195 }
1196 if (udp_sk(sk)->no_check6_tx) {
1197 kfree_skb(skb);
1198 return -EINVAL;
1199 }
1200 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1201 dst_xfrm(skb_dst(skb))) {
1202 kfree_skb(skb);
1203 return -EIO;
1204 }
1205
1206 if (datalen > cork->gso_size) {
1207 skb_shinfo(skb)->gso_size = cork->gso_size;
1208 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1209 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1210 cork->gso_size);
1211 }
1212 goto csum_partial;
1213 }
1214
1215 if (is_udplite)
1216 csum = udplite_csum(skb);
1217 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
1218 skb->ip_summed = CHECKSUM_NONE;
1219 goto send;
1220 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1221 csum_partial:
1222 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1223 goto send;
1224 } else
1225 csum = udp_csum(skb);
1226
1227 /* add protocol-dependent pseudo-header */
1228 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1229 len, fl6->flowi6_proto, csum);
1230 if (uh->check == 0)
1231 uh->check = CSUM_MANGLED_0;
1232
1233 send:
1234 err = ip6_send_skb(skb);
1235 if (err) {
1236 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1237 UDP6_INC_STATS(sock_net(sk),
1238 UDP_MIB_SNDBUFERRORS, is_udplite);
1239 err = 0;
1240 }
1241 } else {
1242 UDP6_INC_STATS(sock_net(sk),
1243 UDP_MIB_OUTDATAGRAMS, is_udplite);
1244 }
1245 return err;
1246 }
1247
udp_v6_push_pending_frames(struct sock * sk)1248 static int udp_v6_push_pending_frames(struct sock *sk)
1249 {
1250 struct sk_buff *skb;
1251 struct udp_sock *up = udp_sk(sk);
1252 struct flowi6 fl6;
1253 int err = 0;
1254
1255 if (up->pending == AF_INET)
1256 return udp_push_pending_frames(sk);
1257
1258 /* ip6_finish_skb will release the cork, so make a copy of
1259 * fl6 here.
1260 */
1261 fl6 = inet_sk(sk)->cork.fl.u.ip6;
1262
1263 skb = ip6_finish_skb(sk);
1264 if (!skb)
1265 goto out;
1266
1267 err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base);
1268
1269 out:
1270 up->len = 0;
1271 up->pending = 0;
1272 return err;
1273 }
1274
udpv6_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)1275 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1276 {
1277 struct ipv6_txoptions opt_space;
1278 struct udp_sock *up = udp_sk(sk);
1279 struct inet_sock *inet = inet_sk(sk);
1280 struct ipv6_pinfo *np = inet6_sk(sk);
1281 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1282 struct in6_addr *daddr, *final_p, final;
1283 struct ipv6_txoptions *opt = NULL;
1284 struct ipv6_txoptions *opt_to_free = NULL;
1285 struct ip6_flowlabel *flowlabel = NULL;
1286 struct flowi6 fl6;
1287 struct dst_entry *dst;
1288 struct ipcm6_cookie ipc6;
1289 int addr_len = msg->msg_namelen;
1290 bool connected = false;
1291 int ulen = len;
1292 int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
1293 int err;
1294 int is_udplite = IS_UDPLITE(sk);
1295 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1296
1297 ipcm6_init(&ipc6);
1298 ipc6.gso_size = READ_ONCE(up->gso_size);
1299 ipc6.sockc.tsflags = sk->sk_tsflags;
1300 ipc6.sockc.mark = sk->sk_mark;
1301
1302 /* destination address check */
1303 if (sin6) {
1304 if (addr_len < offsetof(struct sockaddr, sa_data))
1305 return -EINVAL;
1306
1307 switch (sin6->sin6_family) {
1308 case AF_INET6:
1309 if (addr_len < SIN6_LEN_RFC2133)
1310 return -EINVAL;
1311 daddr = &sin6->sin6_addr;
1312 if (ipv6_addr_any(daddr) &&
1313 ipv6_addr_v4mapped(&np->saddr))
1314 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1315 daddr);
1316 break;
1317 case AF_INET:
1318 goto do_udp_sendmsg;
1319 case AF_UNSPEC:
1320 msg->msg_name = sin6 = NULL;
1321 msg->msg_namelen = addr_len = 0;
1322 daddr = NULL;
1323 break;
1324 default:
1325 return -EINVAL;
1326 }
1327 } else if (!up->pending) {
1328 if (sk->sk_state != TCP_ESTABLISHED)
1329 return -EDESTADDRREQ;
1330 daddr = &sk->sk_v6_daddr;
1331 } else
1332 daddr = NULL;
1333
1334 if (daddr) {
1335 if (ipv6_addr_v4mapped(daddr)) {
1336 struct sockaddr_in sin;
1337 sin.sin_family = AF_INET;
1338 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1339 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1340 msg->msg_name = &sin;
1341 msg->msg_namelen = sizeof(sin);
1342 do_udp_sendmsg:
1343 if (__ipv6_only_sock(sk))
1344 return -ENETUNREACH;
1345 return udp_sendmsg(sk, msg, len);
1346 }
1347 }
1348
1349 if (up->pending == AF_INET)
1350 return udp_sendmsg(sk, msg, len);
1351
1352 /* Rough check on arithmetic overflow,
1353 better check is made in ip6_append_data().
1354 */
1355 if (len > INT_MAX - sizeof(struct udphdr))
1356 return -EMSGSIZE;
1357
1358 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1359 if (up->pending) {
1360 /*
1361 * There are pending frames.
1362 * The socket lock must be held while it's corked.
1363 */
1364 lock_sock(sk);
1365 if (likely(up->pending)) {
1366 if (unlikely(up->pending != AF_INET6)) {
1367 release_sock(sk);
1368 return -EAFNOSUPPORT;
1369 }
1370 dst = NULL;
1371 goto do_append_data;
1372 }
1373 release_sock(sk);
1374 }
1375 ulen += sizeof(struct udphdr);
1376
1377 memset(&fl6, 0, sizeof(fl6));
1378
1379 if (sin6) {
1380 if (sin6->sin6_port == 0)
1381 return -EINVAL;
1382
1383 fl6.fl6_dport = sin6->sin6_port;
1384 daddr = &sin6->sin6_addr;
1385
1386 if (np->sndflow) {
1387 fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1388 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1389 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1390 if (IS_ERR(flowlabel))
1391 return -EINVAL;
1392 }
1393 }
1394
1395 /*
1396 * Otherwise it will be difficult to maintain
1397 * sk->sk_dst_cache.
1398 */
1399 if (sk->sk_state == TCP_ESTABLISHED &&
1400 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1401 daddr = &sk->sk_v6_daddr;
1402
1403 if (addr_len >= sizeof(struct sockaddr_in6) &&
1404 sin6->sin6_scope_id &&
1405 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1406 fl6.flowi6_oif = sin6->sin6_scope_id;
1407 } else {
1408 if (sk->sk_state != TCP_ESTABLISHED)
1409 return -EDESTADDRREQ;
1410
1411 fl6.fl6_dport = inet->inet_dport;
1412 daddr = &sk->sk_v6_daddr;
1413 fl6.flowlabel = np->flow_label;
1414 connected = true;
1415 }
1416
1417 if (!fl6.flowi6_oif)
1418 fl6.flowi6_oif = sk->sk_bound_dev_if;
1419
1420 if (!fl6.flowi6_oif)
1421 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1422
1423 fl6.flowi6_uid = sk->sk_uid;
1424
1425 if (msg->msg_controllen) {
1426 opt = &opt_space;
1427 memset(opt, 0, sizeof(struct ipv6_txoptions));
1428 opt->tot_len = sizeof(*opt);
1429 ipc6.opt = opt;
1430
1431 err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1432 if (err > 0)
1433 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
1434 &ipc6);
1435 if (err < 0) {
1436 fl6_sock_release(flowlabel);
1437 return err;
1438 }
1439 if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1440 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1441 if (IS_ERR(flowlabel))
1442 return -EINVAL;
1443 }
1444 if (!(opt->opt_nflen|opt->opt_flen))
1445 opt = NULL;
1446 connected = false;
1447 }
1448 if (!opt) {
1449 opt = txopt_get(np);
1450 opt_to_free = opt;
1451 }
1452 if (flowlabel)
1453 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1454 opt = ipv6_fixup_options(&opt_space, opt);
1455 ipc6.opt = opt;
1456
1457 fl6.flowi6_proto = sk->sk_protocol;
1458 fl6.flowi6_mark = ipc6.sockc.mark;
1459 fl6.daddr = *daddr;
1460 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
1461 fl6.saddr = np->saddr;
1462 fl6.fl6_sport = inet->inet_sport;
1463
1464 if (cgroup_bpf_enabled && !connected) {
1465 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1466 (struct sockaddr *)sin6, &fl6.saddr);
1467 if (err)
1468 goto out_no_dst;
1469 if (sin6) {
1470 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1471 /* BPF program rewrote IPv6-only by IPv4-mapped
1472 * IPv6. It's currently unsupported.
1473 */
1474 err = -ENOTSUPP;
1475 goto out_no_dst;
1476 }
1477 if (sin6->sin6_port == 0) {
1478 /* BPF program set invalid port. Reject it. */
1479 err = -EINVAL;
1480 goto out_no_dst;
1481 }
1482 fl6.fl6_dport = sin6->sin6_port;
1483 fl6.daddr = sin6->sin6_addr;
1484 }
1485 }
1486
1487 if (ipv6_addr_any(&fl6.daddr))
1488 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1489
1490 final_p = fl6_update_dst(&fl6, opt, &final);
1491 if (final_p)
1492 connected = false;
1493
1494 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
1495 fl6.flowi6_oif = np->mcast_oif;
1496 connected = false;
1497 } else if (!fl6.flowi6_oif)
1498 fl6.flowi6_oif = np->ucast_oif;
1499
1500 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
1501
1502 if (ipc6.tclass < 0)
1503 ipc6.tclass = np->tclass;
1504
1505 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
1506
1507 dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected);
1508 if (IS_ERR(dst)) {
1509 err = PTR_ERR(dst);
1510 dst = NULL;
1511 goto out;
1512 }
1513
1514 if (ipc6.hlimit < 0)
1515 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1516
1517 if (msg->msg_flags&MSG_CONFIRM)
1518 goto do_confirm;
1519 back_from_confirm:
1520
1521 /* Lockless fast path for the non-corking case */
1522 if (!corkreq) {
1523 struct inet_cork_full cork;
1524 struct sk_buff *skb;
1525
1526 skb = ip6_make_skb(sk, getfrag, msg, ulen,
1527 sizeof(struct udphdr), &ipc6,
1528 &fl6, (struct rt6_info *)dst,
1529 msg->msg_flags, &cork);
1530 err = PTR_ERR(skb);
1531 if (!IS_ERR_OR_NULL(skb))
1532 err = udp_v6_send_skb(skb, &fl6, &cork.base);
1533 goto out;
1534 }
1535
1536 lock_sock(sk);
1537 if (unlikely(up->pending)) {
1538 /* The socket is already corked while preparing it. */
1539 /* ... which is an evident application bug. --ANK */
1540 release_sock(sk);
1541
1542 net_dbg_ratelimited("udp cork app bug 2\n");
1543 err = -EINVAL;
1544 goto out;
1545 }
1546
1547 up->pending = AF_INET6;
1548
1549 do_append_data:
1550 if (ipc6.dontfrag < 0)
1551 ipc6.dontfrag = np->dontfrag;
1552 up->len += ulen;
1553 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1554 &ipc6, &fl6, (struct rt6_info *)dst,
1555 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1556 if (err)
1557 udp_v6_flush_pending_frames(sk);
1558 else if (!corkreq)
1559 err = udp_v6_push_pending_frames(sk);
1560 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1561 up->pending = 0;
1562
1563 if (err > 0)
1564 err = np->recverr ? net_xmit_errno(err) : 0;
1565 release_sock(sk);
1566
1567 out:
1568 dst_release(dst);
1569 out_no_dst:
1570 fl6_sock_release(flowlabel);
1571 txopt_put(opt_to_free);
1572 if (!err)
1573 return len;
1574 /*
1575 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1576 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1577 * we don't have a good statistic (IpOutDiscards but it can be too many
1578 * things). We could add another new stat but at least for now that
1579 * seems like overkill.
1580 */
1581 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1582 UDP6_INC_STATS(sock_net(sk),
1583 UDP_MIB_SNDBUFERRORS, is_udplite);
1584 }
1585 return err;
1586
1587 do_confirm:
1588 if (msg->msg_flags & MSG_PROBE)
1589 dst_confirm_neigh(dst, &fl6.daddr);
1590 if (!(msg->msg_flags&MSG_PROBE) || len)
1591 goto back_from_confirm;
1592 err = 0;
1593 goto out;
1594 }
1595
udpv6_destroy_sock(struct sock * sk)1596 void udpv6_destroy_sock(struct sock *sk)
1597 {
1598 struct udp_sock *up = udp_sk(sk);
1599 lock_sock(sk);
1600
1601 /* protects from races with udp_abort() */
1602 sock_set_flag(sk, SOCK_DEAD);
1603 udp_v6_flush_pending_frames(sk);
1604 release_sock(sk);
1605
1606 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1607 if (up->encap_type) {
1608 void (*encap_destroy)(struct sock *sk);
1609 encap_destroy = READ_ONCE(up->encap_destroy);
1610 if (encap_destroy)
1611 encap_destroy(sk);
1612 }
1613 if (up->encap_enabled) {
1614 static_branch_dec(&udpv6_encap_needed_key);
1615 udp_encap_disable();
1616 }
1617 }
1618
1619 inet6_destroy_sock(sk);
1620 }
1621
1622 /*
1623 * Socket option code for UDP
1624 */
udpv6_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)1625 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1626 unsigned int optlen)
1627 {
1628 if (level == SOL_UDP || level == SOL_UDPLITE)
1629 return udp_lib_setsockopt(sk, level, optname,
1630 optval, optlen,
1631 udp_v6_push_pending_frames);
1632 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1633 }
1634
udpv6_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1635 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1636 char __user *optval, int __user *optlen)
1637 {
1638 if (level == SOL_UDP || level == SOL_UDPLITE)
1639 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1640 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1641 }
1642
1643 static const struct inet6_protocol udpv6_protocol = {
1644 .handler = udpv6_rcv,
1645 .err_handler = udpv6_err,
1646 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1647 };
1648
1649 /* ------------------------------------------------------------------------ */
1650 #ifdef CONFIG_PROC_FS
udp6_seq_show(struct seq_file * seq,void * v)1651 int udp6_seq_show(struct seq_file *seq, void *v)
1652 {
1653 if (v == SEQ_START_TOKEN) {
1654 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1655 } else {
1656 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1657 struct inet_sock *inet = inet_sk(v);
1658 __u16 srcp = ntohs(inet->inet_sport);
1659 __u16 destp = ntohs(inet->inet_dport);
1660 __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1661 udp_rqueue_get(v), bucket);
1662 }
1663 return 0;
1664 }
1665
1666 const struct seq_operations udp6_seq_ops = {
1667 .start = udp_seq_start,
1668 .next = udp_seq_next,
1669 .stop = udp_seq_stop,
1670 .show = udp6_seq_show,
1671 };
1672 EXPORT_SYMBOL(udp6_seq_ops);
1673
1674 static struct udp_seq_afinfo udp6_seq_afinfo = {
1675 .family = AF_INET6,
1676 .udp_table = &udp_table,
1677 };
1678
udp6_proc_init(struct net * net)1679 int __net_init udp6_proc_init(struct net *net)
1680 {
1681 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1682 sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1683 return -ENOMEM;
1684 return 0;
1685 }
1686
udp6_proc_exit(struct net * net)1687 void udp6_proc_exit(struct net *net)
1688 {
1689 remove_proc_entry("udp6", net->proc_net);
1690 }
1691 #endif /* CONFIG_PROC_FS */
1692
1693 /* ------------------------------------------------------------------------ */
1694
1695 struct proto udpv6_prot = {
1696 .name = "UDPv6",
1697 .owner = THIS_MODULE,
1698 .close = udp_lib_close,
1699 .pre_connect = udpv6_pre_connect,
1700 .connect = ip6_datagram_connect,
1701 .disconnect = udp_disconnect,
1702 .ioctl = udp_ioctl,
1703 .init = udp_init_sock,
1704 .destroy = udpv6_destroy_sock,
1705 .setsockopt = udpv6_setsockopt,
1706 .getsockopt = udpv6_getsockopt,
1707 .sendmsg = udpv6_sendmsg,
1708 .recvmsg = udpv6_recvmsg,
1709 .release_cb = ip6_datagram_release_cb,
1710 .hash = udp_lib_hash,
1711 .unhash = udp_lib_unhash,
1712 .rehash = udp_v6_rehash,
1713 .get_port = udp_v6_get_port,
1714 .memory_allocated = &udp_memory_allocated,
1715 .sysctl_mem = sysctl_udp_mem,
1716 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1717 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1718 .obj_size = sizeof(struct udp6_sock),
1719 .h.udp_table = &udp_table,
1720 .diag_destroy = udp_abort,
1721 };
1722
1723 static struct inet_protosw udpv6_protosw = {
1724 .type = SOCK_DGRAM,
1725 .protocol = IPPROTO_UDP,
1726 .prot = &udpv6_prot,
1727 .ops = &inet6_dgram_ops,
1728 .flags = INET_PROTOSW_PERMANENT,
1729 };
1730
udpv6_init(void)1731 int __init udpv6_init(void)
1732 {
1733 int ret;
1734
1735 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1736 if (ret)
1737 goto out;
1738
1739 ret = inet6_register_protosw(&udpv6_protosw);
1740 if (ret)
1741 goto out_udpv6_protocol;
1742 out:
1743 return ret;
1744
1745 out_udpv6_protocol:
1746 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1747 goto out;
1748 }
1749
udpv6_exit(void)1750 void udpv6_exit(void)
1751 {
1752 inet6_unregister_protosw(&udpv6_protosw);
1753 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1754 }
1755