1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * UDP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/udp.c
10 *
11 * Fixes:
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 */
19
20 #include <linux/bpf-cgroup.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/in6.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/ipv6.h>
30 #include <linux/icmpv6.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
33 #include <linux/skbuff.h>
34 #include <linux/slab.h>
35 #include <linux/uaccess.h>
36 #include <linux/indirect_call_wrapper.h>
37
38 #include <net/addrconf.h>
39 #include <net/ndisc.h>
40 #include <net/protocol.h>
41 #include <net/transp_v6.h>
42 #include <net/ip6_route.h>
43 #include <net/raw.h>
44 #include <net/seg6.h>
45 #include <net/tcp_states.h>
46 #include <net/ip6_checksum.h>
47 #include <net/ip6_tunnel.h>
48 #include <trace/events/udp.h>
49 #include <net/xfrm.h>
50 #include <net/inet_hashtables.h>
51 #include <net/inet6_hashtables.h>
52 #include <net/busy_poll.h>
53 #include <net/sock_reuseport.h>
54 #include <net/gro.h>
55
56 #include <linux/proc_fs.h>
57 #include <linux/seq_file.h>
58 #include <trace/events/skb.h>
59 #include "udp_impl.h"
60
udpv6_destruct_sock(struct sock * sk)61 static void udpv6_destruct_sock(struct sock *sk)
62 {
63 udp_destruct_common(sk);
64 inet6_sock_destruct(sk);
65 }
66
udpv6_init_sock(struct sock * sk)67 int udpv6_init_sock(struct sock *sk)
68 {
69 udp_lib_init_sock(sk);
70 sk->sk_destruct = udpv6_destruct_sock;
71 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
72 return 0;
73 }
74
75 INDIRECT_CALLABLE_SCOPE
udp6_ehashfn(const struct net * net,const struct in6_addr * laddr,const u16 lport,const struct in6_addr * faddr,const __be16 fport)76 u32 udp6_ehashfn(const struct net *net,
77 const struct in6_addr *laddr,
78 const u16 lport,
79 const struct in6_addr *faddr,
80 const __be16 fport)
81 {
82 static u32 udp6_ehash_secret __read_mostly;
83 static u32 udp_ipv6_hash_secret __read_mostly;
84
85 u32 lhash, fhash;
86
87 net_get_random_once(&udp6_ehash_secret,
88 sizeof(udp6_ehash_secret));
89 net_get_random_once(&udp_ipv6_hash_secret,
90 sizeof(udp_ipv6_hash_secret));
91
92 lhash = (__force u32)laddr->s6_addr32[3];
93 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
94
95 return __inet6_ehashfn(lhash, lport, fhash, fport,
96 udp6_ehash_secret + net_hash_mix(net));
97 }
98
udp_v6_get_port(struct sock * sk,unsigned short snum)99 int udp_v6_get_port(struct sock *sk, unsigned short snum)
100 {
101 unsigned int hash2_nulladdr =
102 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
103 unsigned int hash2_partial =
104 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
105
106 /* precompute partial secondary hash */
107 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
108 return udp_lib_get_port(sk, snum, hash2_nulladdr);
109 }
110
udp_v6_rehash(struct sock * sk)111 void udp_v6_rehash(struct sock *sk)
112 {
113 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
114 &sk->sk_v6_rcv_saddr,
115 inet_sk(sk)->inet_num);
116
117 udp_lib_rehash(sk, new_hash);
118 }
119
compute_score(struct sock * sk,struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned short hnum,int dif,int sdif)120 static int compute_score(struct sock *sk, struct net *net,
121 const struct in6_addr *saddr, __be16 sport,
122 const struct in6_addr *daddr, unsigned short hnum,
123 int dif, int sdif)
124 {
125 int bound_dev_if, score;
126 struct inet_sock *inet;
127 bool dev_match;
128
129 if (!net_eq(sock_net(sk), net) ||
130 udp_sk(sk)->udp_port_hash != hnum ||
131 sk->sk_family != PF_INET6)
132 return -1;
133
134 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
135 return -1;
136
137 score = 0;
138 inet = inet_sk(sk);
139
140 if (inet->inet_dport) {
141 if (inet->inet_dport != sport)
142 return -1;
143 score++;
144 }
145
146 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
147 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
148 return -1;
149 score++;
150 }
151
152 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
153 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
154 if (!dev_match)
155 return -1;
156 if (bound_dev_if)
157 score++;
158
159 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
160 score++;
161
162 return score;
163 }
164
165 /**
166 * udp6_lib_lookup1() - Simplified lookup using primary hash (destination port)
167 * @net: Network namespace
168 * @saddr: Source address, network order
169 * @sport: Source port, network order
170 * @daddr: Destination address, network order
171 * @hnum: Destination port, host order
172 * @dif: Destination interface index
173 * @sdif: Destination bridge port index, if relevant
174 * @udptable: Set of UDP hash tables
175 *
176 * Simplified lookup to be used as fallback if no sockets are found due to a
177 * potential race between (receive) address change, and lookup happening before
178 * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
179 * result sockets, because if we have one, we don't need the fallback at all.
180 *
181 * Called under rcu_read_lock().
182 *
183 * Return: socket with highest matching score if any, NULL if none
184 */
udp6_lib_lookup1(const struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned int hnum,int dif,int sdif,const struct udp_table * udptable)185 static struct sock *udp6_lib_lookup1(const struct net *net,
186 const struct in6_addr *saddr, __be16 sport,
187 const struct in6_addr *daddr,
188 unsigned int hnum, int dif, int sdif,
189 const struct udp_table *udptable)
190 {
191 unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
192 struct udp_hslot *hslot = &udptable->hash[slot];
193 struct sock *sk, *result = NULL;
194 int score, badness = 0;
195
196 sk_for_each_rcu(sk, &hslot->head) {
197 score = compute_score(sk, (struct net *)net,
198 (struct in6_addr *)saddr, sport,
199 (struct in6_addr *)daddr, hnum,
200 dif, sdif);
201 if (score > badness) {
202 result = sk;
203 badness = score;
204 }
205 }
206
207 return result;
208 }
209
210 /* called with rcu_read_lock() */
udp6_lib_lookup2(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned int hnum,int dif,int sdif,struct udp_hslot * hslot2,struct sk_buff * skb)211 static struct sock *udp6_lib_lookup2(struct net *net,
212 const struct in6_addr *saddr, __be16 sport,
213 const struct in6_addr *daddr, unsigned int hnum,
214 int dif, int sdif, struct udp_hslot *hslot2,
215 struct sk_buff *skb)
216 {
217 struct sock *sk, *result;
218 int score, badness;
219 bool need_rescore;
220
221 result = NULL;
222 badness = -1;
223 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
224 need_rescore = false;
225 rescore:
226 score = compute_score(need_rescore ? result : sk, net, saddr,
227 sport, daddr, hnum, dif, sdif);
228 if (score > badness) {
229 badness = score;
230
231 if (need_rescore)
232 continue;
233
234 if (sk->sk_state == TCP_ESTABLISHED) {
235 result = sk;
236 continue;
237 }
238
239 result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
240 saddr, sport, daddr, hnum, udp6_ehashfn);
241 if (!result) {
242 result = sk;
243 continue;
244 }
245
246 /* Fall back to scoring if group has connections */
247 if (!reuseport_has_conns(sk))
248 return result;
249
250 /* Reuseport logic returned an error, keep original score. */
251 if (IS_ERR(result))
252 continue;
253
254 /* compute_score is too long of a function to be
255 * inlined, and calling it again here yields
256 * measureable overhead for some
257 * workloads. Work around it by jumping
258 * backwards to rescore 'result'.
259 */
260 need_rescore = true;
261 goto rescore;
262 }
263 }
264 return result;
265 }
266
267 /* rcu_read_lock() must be held */
__udp6_lib_lookup(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif,int sdif,struct udp_table * udptable,struct sk_buff * skb)268 struct sock *__udp6_lib_lookup(struct net *net,
269 const struct in6_addr *saddr, __be16 sport,
270 const struct in6_addr *daddr, __be16 dport,
271 int dif, int sdif, struct udp_table *udptable,
272 struct sk_buff *skb)
273 {
274 unsigned short hnum = ntohs(dport);
275 unsigned int hash2, slot2;
276 struct udp_hslot *hslot2;
277 struct sock *result, *sk;
278
279 hash2 = ipv6_portaddr_hash(net, daddr, hnum);
280 slot2 = hash2 & udptable->mask;
281 hslot2 = &udptable->hash2[slot2];
282
283 /* Lookup connected or non-wildcard sockets */
284 result = udp6_lib_lookup2(net, saddr, sport,
285 daddr, hnum, dif, sdif,
286 hslot2, skb);
287 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
288 goto done;
289
290 /* Lookup redirect from BPF */
291 if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
292 udptable == net->ipv4.udp_table) {
293 sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
294 saddr, sport, daddr, hnum, dif,
295 udp6_ehashfn);
296 if (sk) {
297 result = sk;
298 goto done;
299 }
300 }
301
302 /* Got non-wildcard socket or error on first lookup */
303 if (result)
304 goto done;
305
306 /* Lookup wildcard sockets */
307 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
308 slot2 = hash2 & udptable->mask;
309 hslot2 = &udptable->hash2[slot2];
310
311 result = udp6_lib_lookup2(net, saddr, sport,
312 &in6addr_any, hnum, dif, sdif,
313 hslot2, skb);
314 if (!IS_ERR_OR_NULL(result))
315 goto done;
316
317 /* Cover address change/lookup/rehash race: see __udp4_lib_lookup() */
318 result = udp6_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
319 udptable);
320
321 done:
322 if (IS_ERR(result))
323 return NULL;
324 return result;
325 }
326 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
327
__udp6_lib_lookup_skb(struct sk_buff * skb,__be16 sport,__be16 dport,struct udp_table * udptable)328 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
329 __be16 sport, __be16 dport,
330 struct udp_table *udptable)
331 {
332 const struct ipv6hdr *iph = ipv6_hdr(skb);
333
334 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
335 &iph->daddr, dport, inet6_iif(skb),
336 inet6_sdif(skb), udptable, skb);
337 }
338
udp6_lib_lookup_skb(const struct sk_buff * skb,__be16 sport,__be16 dport)339 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
340 __be16 sport, __be16 dport)
341 {
342 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
343 const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
344 struct net *net = dev_net(skb->dev);
345 int iif, sdif;
346
347 inet6_get_iif_sdif(skb, &iif, &sdif);
348
349 return __udp6_lib_lookup(net, &iph->saddr, sport,
350 &iph->daddr, dport, iif,
351 sdif, net->ipv4.udp_table, NULL);
352 }
353
354 /* Must be called under rcu_read_lock().
355 * Does increment socket refcount.
356 */
357 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
udp6_lib_lookup(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif)358 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
359 const struct in6_addr *daddr, __be16 dport, int dif)
360 {
361 struct sock *sk;
362
363 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
364 dif, 0, net->ipv4.udp_table, NULL);
365 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
366 sk = NULL;
367 return sk;
368 }
369 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
370 #endif
371
372 /* do not use the scratch area len for jumbogram: their length execeeds the
373 * scratch area space; note that the IP6CB flags is still in the first
374 * cacheline, so checking for jumbograms is cheap
375 */
udp6_skb_len(struct sk_buff * skb)376 static int udp6_skb_len(struct sk_buff *skb)
377 {
378 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
379 }
380
381 /*
382 * This should be easy, if there is something there we
383 * return it, otherwise we block.
384 */
385
udpv6_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)386 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
387 int flags, int *addr_len)
388 {
389 struct ipv6_pinfo *np = inet6_sk(sk);
390 struct inet_sock *inet = inet_sk(sk);
391 struct sk_buff *skb;
392 unsigned int ulen, copied;
393 int off, err, peeking = flags & MSG_PEEK;
394 int is_udplite = IS_UDPLITE(sk);
395 struct udp_mib __percpu *mib;
396 bool checksum_valid = false;
397 int is_udp4;
398
399 if (flags & MSG_ERRQUEUE)
400 return ipv6_recv_error(sk, msg, len, addr_len);
401
402 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
403 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
404
405 try_again:
406 off = sk_peek_offset(sk, flags);
407 skb = __skb_recv_udp(sk, flags, &off, &err);
408 if (!skb)
409 return err;
410
411 ulen = udp6_skb_len(skb);
412 copied = len;
413 if (copied > ulen - off)
414 copied = ulen - off;
415 else if (copied < ulen)
416 msg->msg_flags |= MSG_TRUNC;
417
418 is_udp4 = (skb->protocol == htons(ETH_P_IP));
419 mib = __UDPX_MIB(sk, is_udp4);
420
421 /*
422 * If checksum is needed at all, try to do it while copying the
423 * data. If the data is truncated, or if we only want a partial
424 * coverage checksum (UDP-Lite), do it before the copy.
425 */
426
427 if (copied < ulen || peeking ||
428 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
429 checksum_valid = udp_skb_csum_unnecessary(skb) ||
430 !__udp_lib_checksum_complete(skb);
431 if (!checksum_valid)
432 goto csum_copy_err;
433 }
434
435 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
436 if (udp_skb_is_linear(skb))
437 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
438 else
439 err = skb_copy_datagram_msg(skb, off, msg, copied);
440 } else {
441 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
442 if (err == -EINVAL)
443 goto csum_copy_err;
444 }
445 if (unlikely(err)) {
446 if (!peeking) {
447 atomic_inc(&sk->sk_drops);
448 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
449 }
450 kfree_skb(skb);
451 return err;
452 }
453 if (!peeking)
454 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
455
456 sock_recv_cmsgs(msg, sk, skb);
457
458 /* Copy the address. */
459 if (msg->msg_name) {
460 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
461 sin6->sin6_family = AF_INET6;
462 sin6->sin6_port = udp_hdr(skb)->source;
463 sin6->sin6_flowinfo = 0;
464
465 if (is_udp4) {
466 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
467 &sin6->sin6_addr);
468 sin6->sin6_scope_id = 0;
469 } else {
470 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
471 sin6->sin6_scope_id =
472 ipv6_iface_scope_id(&sin6->sin6_addr,
473 inet6_iif(skb));
474 }
475 *addr_len = sizeof(*sin6);
476
477 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
478 (struct sockaddr *)sin6,
479 addr_len);
480 }
481
482 if (udp_test_bit(GRO_ENABLED, sk))
483 udp_cmsg_recv(msg, sk, skb);
484
485 if (np->rxopt.all)
486 ip6_datagram_recv_common_ctl(sk, msg, skb);
487
488 if (is_udp4) {
489 if (inet_cmsg_flags(inet))
490 ip_cmsg_recv_offset(msg, sk, skb,
491 sizeof(struct udphdr), off);
492 } else {
493 if (np->rxopt.all)
494 ip6_datagram_recv_specific_ctl(sk, msg, skb);
495 }
496
497 err = copied;
498 if (flags & MSG_TRUNC)
499 err = ulen;
500
501 skb_consume_udp(sk, skb, peeking ? -err : err);
502 return err;
503
504 csum_copy_err:
505 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
506 udp_skb_destructor)) {
507 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
508 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
509 }
510 kfree_skb(skb);
511
512 /* starting over for a new packet, but check if we need to yield */
513 cond_resched();
514 msg->msg_flags &= ~MSG_TRUNC;
515 goto try_again;
516 }
517
518 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
udpv6_encap_enable(void)519 void udpv6_encap_enable(void)
520 {
521 static_branch_inc(&udpv6_encap_needed_key);
522 }
523 EXPORT_SYMBOL(udpv6_encap_enable);
524
525 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
526 * through error handlers in encapsulations looking for a match.
527 */
__udp6_lib_err_encap_no_sk(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)528 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
529 struct inet6_skb_parm *opt,
530 u8 type, u8 code, int offset, __be32 info)
531 {
532 int i;
533
534 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
535 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
536 u8 type, u8 code, int offset, __be32 info);
537 const struct ip6_tnl_encap_ops *encap;
538
539 encap = rcu_dereference(ip6tun_encaps[i]);
540 if (!encap)
541 continue;
542 handler = encap->err_handler;
543 if (handler && !handler(skb, opt, type, code, offset, info))
544 return 0;
545 }
546
547 return -ENOENT;
548 }
549
550 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
551 * reversing source and destination port: this will match tunnels that force the
552 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
553 * lwtunnels might actually break this assumption by being configured with
554 * different destination ports on endpoints, in this case we won't be able to
555 * trace ICMP messages back to them.
556 *
557 * If this doesn't match any socket, probe tunnels with arbitrary destination
558 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
559 * we've sent packets to won't necessarily match the local destination port.
560 *
561 * Then ask the tunnel implementation to match the error against a valid
562 * association.
563 *
564 * Return an error if we can't find a match, the socket if we need further
565 * processing, zero otherwise.
566 */
__udp6_lib_err_encap(struct net * net,const struct ipv6hdr * hdr,int offset,struct udphdr * uh,struct udp_table * udptable,struct sock * sk,struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,__be32 info)567 static struct sock *__udp6_lib_err_encap(struct net *net,
568 const struct ipv6hdr *hdr, int offset,
569 struct udphdr *uh,
570 struct udp_table *udptable,
571 struct sock *sk,
572 struct sk_buff *skb,
573 struct inet6_skb_parm *opt,
574 u8 type, u8 code, __be32 info)
575 {
576 int (*lookup)(struct sock *sk, struct sk_buff *skb);
577 int network_offset, transport_offset;
578 struct udp_sock *up;
579
580 network_offset = skb_network_offset(skb);
581 transport_offset = skb_transport_offset(skb);
582
583 /* Network header needs to point to the outer IPv6 header inside ICMP */
584 skb_reset_network_header(skb);
585
586 /* Transport header needs to point to the UDP header */
587 skb_set_transport_header(skb, offset);
588
589 if (sk) {
590 up = udp_sk(sk);
591
592 lookup = READ_ONCE(up->encap_err_lookup);
593 if (lookup && lookup(sk, skb))
594 sk = NULL;
595
596 goto out;
597 }
598
599 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
600 &hdr->saddr, uh->dest,
601 inet6_iif(skb), 0, udptable, skb);
602 if (sk) {
603 up = udp_sk(sk);
604
605 lookup = READ_ONCE(up->encap_err_lookup);
606 if (!lookup || lookup(sk, skb))
607 sk = NULL;
608 }
609
610 out:
611 if (!sk) {
612 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
613 offset, info));
614 }
615
616 skb_set_transport_header(skb, transport_offset);
617 skb_set_network_header(skb, network_offset);
618
619 return sk;
620 }
621
__udp6_lib_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info,struct udp_table * udptable)622 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
623 u8 type, u8 code, int offset, __be32 info,
624 struct udp_table *udptable)
625 {
626 struct ipv6_pinfo *np;
627 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
628 const struct in6_addr *saddr = &hdr->saddr;
629 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
630 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
631 bool tunnel = false;
632 struct sock *sk;
633 int harderr;
634 int err;
635 struct net *net = dev_net(skb->dev);
636
637 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
638 inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
639
640 if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
641 /* No socket for error: try tunnels before discarding */
642 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
643 sk = __udp6_lib_err_encap(net, hdr, offset, uh,
644 udptable, sk, skb,
645 opt, type, code, info);
646 if (!sk)
647 return 0;
648 } else
649 sk = ERR_PTR(-ENOENT);
650
651 if (IS_ERR(sk)) {
652 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
653 ICMP6_MIB_INERRORS);
654 return PTR_ERR(sk);
655 }
656
657 tunnel = true;
658 }
659
660 harderr = icmpv6_err_convert(type, code, &err);
661 np = inet6_sk(sk);
662
663 if (type == ICMPV6_PKT_TOOBIG) {
664 if (!ip6_sk_accept_pmtu(sk))
665 goto out;
666 ip6_sk_update_pmtu(skb, sk, info);
667 if (np->pmtudisc != IPV6_PMTUDISC_DONT)
668 harderr = 1;
669 }
670 if (type == NDISC_REDIRECT) {
671 if (tunnel) {
672 ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
673 READ_ONCE(sk->sk_mark), sk->sk_uid);
674 } else {
675 ip6_sk_redirect(skb, sk);
676 }
677 goto out;
678 }
679
680 /* Tunnels don't have an application socket: don't pass errors back */
681 if (tunnel) {
682 if (udp_sk(sk)->encap_err_rcv)
683 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
684 ntohl(info), (u8 *)(uh+1));
685 goto out;
686 }
687
688 if (!np->recverr) {
689 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
690 goto out;
691 } else {
692 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
693 }
694
695 sk->sk_err = err;
696 sk_error_report(sk);
697 out:
698 return 0;
699 }
700
__udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)701 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
702 {
703 int rc;
704
705 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
706 sock_rps_save_rxhash(sk, skb);
707 sk_mark_napi_id(sk, skb);
708 sk_incoming_cpu_update(sk);
709 } else {
710 sk_mark_napi_id_once(sk, skb);
711 }
712
713 rc = __udp_enqueue_schedule_skb(sk, skb);
714 if (rc < 0) {
715 int is_udplite = IS_UDPLITE(sk);
716 enum skb_drop_reason drop_reason;
717
718 /* Note that an ENOMEM error is charged twice */
719 if (rc == -ENOMEM) {
720 UDP6_INC_STATS(sock_net(sk),
721 UDP_MIB_RCVBUFERRORS, is_udplite);
722 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
723 } else {
724 UDP6_INC_STATS(sock_net(sk),
725 UDP_MIB_MEMERRORS, is_udplite);
726 drop_reason = SKB_DROP_REASON_PROTO_MEM;
727 }
728 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
729 kfree_skb_reason(skb, drop_reason);
730 trace_udp_fail_queue_rcv_skb(rc, sk);
731 return -1;
732 }
733
734 return 0;
735 }
736
udpv6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)737 static __inline__ int udpv6_err(struct sk_buff *skb,
738 struct inet6_skb_parm *opt, u8 type,
739 u8 code, int offset, __be32 info)
740 {
741 return __udp6_lib_err(skb, opt, type, code, offset, info,
742 dev_net(skb->dev)->ipv4.udp_table);
743 }
744
udpv6_queue_rcv_one_skb(struct sock * sk,struct sk_buff * skb)745 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
746 {
747 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
748 struct udp_sock *up = udp_sk(sk);
749 int is_udplite = IS_UDPLITE(sk);
750
751 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
752 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
753 goto drop;
754 }
755 nf_reset_ct(skb);
756
757 if (static_branch_unlikely(&udpv6_encap_needed_key) &&
758 READ_ONCE(up->encap_type)) {
759 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
760
761 /*
762 * This is an encapsulation socket so pass the skb to
763 * the socket's udp_encap_rcv() hook. Otherwise, just
764 * fall through and pass this up the UDP socket.
765 * up->encap_rcv() returns the following value:
766 * =0 if skb was successfully passed to the encap
767 * handler or was discarded by it.
768 * >0 if skb should be passed on to UDP.
769 * <0 if skb should be resubmitted as proto -N
770 */
771
772 /* if we're overly short, let UDP handle it */
773 encap_rcv = READ_ONCE(up->encap_rcv);
774 if (encap_rcv) {
775 int ret;
776
777 /* Verify checksum before giving to encap */
778 if (udp_lib_checksum_complete(skb))
779 goto csum_error;
780
781 ret = encap_rcv(sk, skb);
782 if (ret <= 0) {
783 __UDP6_INC_STATS(sock_net(sk),
784 UDP_MIB_INDATAGRAMS,
785 is_udplite);
786 return -ret;
787 }
788 }
789
790 /* FALLTHROUGH -- it's a UDP Packet */
791 }
792
793 /*
794 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
795 */
796 if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
797 u16 pcrlen = READ_ONCE(up->pcrlen);
798
799 if (pcrlen == 0) { /* full coverage was set */
800 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
801 UDP_SKB_CB(skb)->cscov, skb->len);
802 goto drop;
803 }
804 if (UDP_SKB_CB(skb)->cscov < pcrlen) {
805 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
806 UDP_SKB_CB(skb)->cscov, pcrlen);
807 goto drop;
808 }
809 }
810
811 prefetch(&sk->sk_rmem_alloc);
812 if (rcu_access_pointer(sk->sk_filter) &&
813 udp_lib_checksum_complete(skb))
814 goto csum_error;
815
816 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
817 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
818 goto drop;
819 }
820
821 udp_csum_pull_header(skb);
822
823 skb_dst_drop(skb);
824
825 return __udpv6_queue_rcv_skb(sk, skb);
826
827 csum_error:
828 drop_reason = SKB_DROP_REASON_UDP_CSUM;
829 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
830 drop:
831 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
832 atomic_inc(&sk->sk_drops);
833 kfree_skb_reason(skb, drop_reason);
834 return -1;
835 }
836
udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)837 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
838 {
839 struct sk_buff *next, *segs;
840 int ret;
841
842 if (likely(!udp_unexpected_gso(sk, skb)))
843 return udpv6_queue_rcv_one_skb(sk, skb);
844
845 __skb_push(skb, -skb_mac_offset(skb));
846 segs = udp_rcv_segment(sk, skb, false);
847 skb_list_walk_safe(segs, skb, next) {
848 __skb_pull(skb, skb_transport_offset(skb));
849
850 udp_post_segment_fix_csum(skb);
851 ret = udpv6_queue_rcv_one_skb(sk, skb);
852 if (ret > 0)
853 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
854 true);
855 }
856 return 0;
857 }
858
__udp_v6_is_mcast_sock(struct net * net,const struct sock * sk,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif,unsigned short hnum)859 static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
860 __be16 loc_port, const struct in6_addr *loc_addr,
861 __be16 rmt_port, const struct in6_addr *rmt_addr,
862 int dif, int sdif, unsigned short hnum)
863 {
864 const struct inet_sock *inet = inet_sk(sk);
865
866 if (!net_eq(sock_net(sk), net))
867 return false;
868
869 if (udp_sk(sk)->udp_port_hash != hnum ||
870 sk->sk_family != PF_INET6 ||
871 (inet->inet_dport && inet->inet_dport != rmt_port) ||
872 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
873 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
874 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
875 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
876 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
877 return false;
878 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
879 return false;
880 return true;
881 }
882
udp6_csum_zero_error(struct sk_buff * skb)883 static void udp6_csum_zero_error(struct sk_buff *skb)
884 {
885 /* RFC 2460 section 8.1 says that we SHOULD log
886 * this error. Well, it is reasonable.
887 */
888 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
889 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
890 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
891 }
892
893 /*
894 * Note: called only from the BH handler context,
895 * so we don't need to lock the hashes.
896 */
__udp6_lib_mcast_deliver(struct net * net,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,struct udp_table * udptable,int proto)897 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
898 const struct in6_addr *saddr, const struct in6_addr *daddr,
899 struct udp_table *udptable, int proto)
900 {
901 struct sock *sk, *first = NULL;
902 const struct udphdr *uh = udp_hdr(skb);
903 unsigned short hnum = ntohs(uh->dest);
904 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
905 unsigned int offset = offsetof(typeof(*sk), sk_node);
906 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
907 int dif = inet6_iif(skb);
908 int sdif = inet6_sdif(skb);
909 struct hlist_node *node;
910 struct sk_buff *nskb;
911
912 if (use_hash2) {
913 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
914 udptable->mask;
915 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
916 start_lookup:
917 hslot = &udptable->hash2[hash2];
918 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
919 }
920
921 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
922 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
923 uh->source, saddr, dif, sdif,
924 hnum))
925 continue;
926 /* If zero checksum and no_check is not on for
927 * the socket then skip it.
928 */
929 if (!uh->check && !udp_get_no_check6_rx(sk))
930 continue;
931 if (!first) {
932 first = sk;
933 continue;
934 }
935 nskb = skb_clone(skb, GFP_ATOMIC);
936 if (unlikely(!nskb)) {
937 atomic_inc(&sk->sk_drops);
938 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
939 IS_UDPLITE(sk));
940 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
941 IS_UDPLITE(sk));
942 continue;
943 }
944
945 if (udpv6_queue_rcv_skb(sk, nskb) > 0)
946 consume_skb(nskb);
947 }
948
949 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
950 if (use_hash2 && hash2 != hash2_any) {
951 hash2 = hash2_any;
952 goto start_lookup;
953 }
954
955 if (first) {
956 if (udpv6_queue_rcv_skb(first, skb) > 0)
957 consume_skb(skb);
958 } else {
959 kfree_skb(skb);
960 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
961 proto == IPPROTO_UDPLITE);
962 }
963 return 0;
964 }
965
udp6_sk_rx_dst_set(struct sock * sk,struct dst_entry * dst)966 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
967 {
968 if (udp_sk_rx_dst_set(sk, dst))
969 sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
970 }
971
972 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
973 * return code conversion for ip layer consumption
974 */
udp6_unicast_rcv_skb(struct sock * sk,struct sk_buff * skb,struct udphdr * uh)975 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
976 struct udphdr *uh)
977 {
978 int ret;
979
980 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
981 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
982
983 ret = udpv6_queue_rcv_skb(sk, skb);
984
985 /* a return value > 0 means to resubmit the input */
986 if (ret > 0)
987 return ret;
988 return 0;
989 }
990
__udp6_lib_rcv(struct sk_buff * skb,struct udp_table * udptable,int proto)991 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
992 int proto)
993 {
994 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
995 const struct in6_addr *saddr, *daddr;
996 struct net *net = dev_net(skb->dev);
997 struct udphdr *uh;
998 struct sock *sk;
999 bool refcounted;
1000 u32 ulen = 0;
1001
1002 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1003 goto discard;
1004
1005 saddr = &ipv6_hdr(skb)->saddr;
1006 daddr = &ipv6_hdr(skb)->daddr;
1007 uh = udp_hdr(skb);
1008
1009 ulen = ntohs(uh->len);
1010 if (ulen > skb->len)
1011 goto short_packet;
1012
1013 if (proto == IPPROTO_UDP) {
1014 /* UDP validates ulen. */
1015
1016 /* Check for jumbo payload */
1017 if (ulen == 0)
1018 ulen = skb->len;
1019
1020 if (ulen < sizeof(*uh))
1021 goto short_packet;
1022
1023 if (ulen < skb->len) {
1024 if (pskb_trim_rcsum(skb, ulen))
1025 goto short_packet;
1026 saddr = &ipv6_hdr(skb)->saddr;
1027 daddr = &ipv6_hdr(skb)->daddr;
1028 uh = udp_hdr(skb);
1029 }
1030 }
1031
1032 if (udp6_csum_init(skb, uh, proto))
1033 goto csum_error;
1034
1035 /* Check if the socket is already available, e.g. due to early demux */
1036 sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
1037 &refcounted, udp6_ehashfn);
1038 if (IS_ERR(sk))
1039 goto no_sk;
1040
1041 if (sk) {
1042 struct dst_entry *dst = skb_dst(skb);
1043 int ret;
1044
1045 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
1046 udp6_sk_rx_dst_set(sk, dst);
1047
1048 if (!uh->check && !udp_get_no_check6_rx(sk)) {
1049 if (refcounted)
1050 sock_put(sk);
1051 goto report_csum_error;
1052 }
1053
1054 ret = udp6_unicast_rcv_skb(sk, skb, uh);
1055 if (refcounted)
1056 sock_put(sk);
1057 return ret;
1058 }
1059
1060 /*
1061 * Multicast receive code
1062 */
1063 if (ipv6_addr_is_multicast(daddr))
1064 return __udp6_lib_mcast_deliver(net, skb,
1065 saddr, daddr, udptable, proto);
1066
1067 /* Unicast */
1068 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1069 if (sk) {
1070 if (!uh->check && !udp_get_no_check6_rx(sk))
1071 goto report_csum_error;
1072 return udp6_unicast_rcv_skb(sk, skb, uh);
1073 }
1074 no_sk:
1075 reason = SKB_DROP_REASON_NO_SOCKET;
1076
1077 if (!uh->check)
1078 goto report_csum_error;
1079
1080 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1081 goto discard;
1082 nf_reset_ct(skb);
1083
1084 if (udp_lib_checksum_complete(skb))
1085 goto csum_error;
1086
1087 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1088 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1089
1090 kfree_skb_reason(skb, reason);
1091 return 0;
1092
1093 short_packet:
1094 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1095 reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1096 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1097 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1098 saddr, ntohs(uh->source),
1099 ulen, skb->len,
1100 daddr, ntohs(uh->dest));
1101 goto discard;
1102
1103 report_csum_error:
1104 udp6_csum_zero_error(skb);
1105 csum_error:
1106 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1107 reason = SKB_DROP_REASON_UDP_CSUM;
1108 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1109 discard:
1110 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1111 kfree_skb_reason(skb, reason);
1112 return 0;
1113 }
1114
1115
__udp6_lib_demux_lookup(struct net * net,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif)1116 static struct sock *__udp6_lib_demux_lookup(struct net *net,
1117 __be16 loc_port, const struct in6_addr *loc_addr,
1118 __be16 rmt_port, const struct in6_addr *rmt_addr,
1119 int dif, int sdif)
1120 {
1121 struct udp_table *udptable = net->ipv4.udp_table;
1122 unsigned short hnum = ntohs(loc_port);
1123 unsigned int hash2, slot2;
1124 struct udp_hslot *hslot2;
1125 __portpair ports;
1126 struct sock *sk;
1127
1128 hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1129 slot2 = hash2 & udptable->mask;
1130 hslot2 = &udptable->hash2[slot2];
1131 ports = INET_COMBINED_PORTS(rmt_port, hnum);
1132
1133 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1134 if (sk->sk_state == TCP_ESTABLISHED &&
1135 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1136 return sk;
1137 /* Only check first socket in chain */
1138 break;
1139 }
1140 return NULL;
1141 }
1142
udp_v6_early_demux(struct sk_buff * skb)1143 void udp_v6_early_demux(struct sk_buff *skb)
1144 {
1145 struct net *net = dev_net(skb->dev);
1146 const struct udphdr *uh;
1147 struct sock *sk;
1148 struct dst_entry *dst;
1149 int dif = skb->dev->ifindex;
1150 int sdif = inet6_sdif(skb);
1151
1152 if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1153 sizeof(struct udphdr)))
1154 return;
1155
1156 uh = udp_hdr(skb);
1157
1158 if (skb->pkt_type == PACKET_HOST)
1159 sk = __udp6_lib_demux_lookup(net, uh->dest,
1160 &ipv6_hdr(skb)->daddr,
1161 uh->source, &ipv6_hdr(skb)->saddr,
1162 dif, sdif);
1163 else
1164 return;
1165
1166 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1167 return;
1168
1169 skb->sk = sk;
1170 skb->destructor = sock_efree;
1171 dst = rcu_dereference(sk->sk_rx_dst);
1172
1173 if (dst)
1174 dst = dst_check(dst, sk->sk_rx_dst_cookie);
1175 if (dst) {
1176 /* set noref for now.
1177 * any place which wants to hold dst has to call
1178 * dst_hold_safe()
1179 */
1180 skb_dst_set_noref(skb, dst);
1181 }
1182 }
1183
udpv6_rcv(struct sk_buff * skb)1184 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1185 {
1186 return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1187 }
1188
1189 /*
1190 * Throw away all pending data and cancel the corking. Socket is locked.
1191 */
udp_v6_flush_pending_frames(struct sock * sk)1192 static void udp_v6_flush_pending_frames(struct sock *sk)
1193 {
1194 struct udp_sock *up = udp_sk(sk);
1195
1196 if (up->pending == AF_INET)
1197 udp_flush_pending_frames(sk);
1198 else if (up->pending) {
1199 up->len = 0;
1200 WRITE_ONCE(up->pending, 0);
1201 ip6_flush_pending_frames(sk);
1202 }
1203 }
1204
udpv6_pre_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)1205 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1206 int addr_len)
1207 {
1208 if (addr_len < offsetofend(struct sockaddr, sa_family))
1209 return -EINVAL;
1210 /* The following checks are replicated from __ip6_datagram_connect()
1211 * and intended to prevent BPF program called below from accessing
1212 * bytes that are out of the bound specified by user in addr_len.
1213 */
1214 if (uaddr->sa_family == AF_INET) {
1215 if (ipv6_only_sock(sk))
1216 return -EAFNOSUPPORT;
1217 return udp_pre_connect(sk, uaddr, addr_len);
1218 }
1219
1220 if (addr_len < SIN6_LEN_RFC2133)
1221 return -EINVAL;
1222
1223 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
1224 }
1225
1226 /**
1227 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1228 * @sk: socket we are sending on
1229 * @skb: sk_buff containing the filled-in UDP header
1230 * (checksum field must be zeroed out)
1231 * @saddr: source address
1232 * @daddr: destination address
1233 * @len: length of packet
1234 */
udp6_hwcsum_outgoing(struct sock * sk,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,int len)1235 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1236 const struct in6_addr *saddr,
1237 const struct in6_addr *daddr, int len)
1238 {
1239 unsigned int offset;
1240 struct udphdr *uh = udp_hdr(skb);
1241 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1242 __wsum csum = 0;
1243
1244 if (!frags) {
1245 /* Only one fragment on the socket. */
1246 skb->csum_start = skb_transport_header(skb) - skb->head;
1247 skb->csum_offset = offsetof(struct udphdr, check);
1248 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1249 } else {
1250 /*
1251 * HW-checksum won't work as there are two or more
1252 * fragments on the socket so that all csums of sk_buffs
1253 * should be together
1254 */
1255 offset = skb_transport_offset(skb);
1256 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1257 csum = skb->csum;
1258
1259 skb->ip_summed = CHECKSUM_NONE;
1260
1261 do {
1262 csum = csum_add(csum, frags->csum);
1263 } while ((frags = frags->next));
1264
1265 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1266 csum);
1267 if (uh->check == 0)
1268 uh->check = CSUM_MANGLED_0;
1269 }
1270 }
1271
1272 /*
1273 * Sending
1274 */
1275
udp_v6_send_skb(struct sk_buff * skb,struct flowi6 * fl6,struct inet_cork * cork)1276 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1277 struct inet_cork *cork)
1278 {
1279 struct sock *sk = skb->sk;
1280 struct udphdr *uh;
1281 int err = 0;
1282 int is_udplite = IS_UDPLITE(sk);
1283 __wsum csum = 0;
1284 int offset = skb_transport_offset(skb);
1285 int len = skb->len - offset;
1286 int datalen = len - sizeof(*uh);
1287
1288 /*
1289 * Create a UDP header
1290 */
1291 uh = udp_hdr(skb);
1292 uh->source = fl6->fl6_sport;
1293 uh->dest = fl6->fl6_dport;
1294 uh->len = htons(len);
1295 uh->check = 0;
1296
1297 if (cork->gso_size) {
1298 const int hlen = skb_network_header_len(skb) +
1299 sizeof(struct udphdr);
1300
1301 if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
1302 kfree_skb(skb);
1303 return -EMSGSIZE;
1304 }
1305 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1306 kfree_skb(skb);
1307 return -EINVAL;
1308 }
1309 if (udp_get_no_check6_tx(sk)) {
1310 kfree_skb(skb);
1311 return -EINVAL;
1312 }
1313 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1314 dst_xfrm(skb_dst(skb))) {
1315 kfree_skb(skb);
1316 return -EIO;
1317 }
1318
1319 if (datalen > cork->gso_size) {
1320 skb_shinfo(skb)->gso_size = cork->gso_size;
1321 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1322 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1323 cork->gso_size);
1324 }
1325 goto csum_partial;
1326 }
1327
1328 if (is_udplite)
1329 csum = udplite_csum(skb);
1330 else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */
1331 skb->ip_summed = CHECKSUM_NONE;
1332 goto send;
1333 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1334 csum_partial:
1335 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1336 goto send;
1337 } else
1338 csum = udp_csum(skb);
1339
1340 /* add protocol-dependent pseudo-header */
1341 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1342 len, fl6->flowi6_proto, csum);
1343 if (uh->check == 0)
1344 uh->check = CSUM_MANGLED_0;
1345
1346 send:
1347 err = ip6_send_skb(skb);
1348 if (err) {
1349 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1350 UDP6_INC_STATS(sock_net(sk),
1351 UDP_MIB_SNDBUFERRORS, is_udplite);
1352 err = 0;
1353 }
1354 } else {
1355 UDP6_INC_STATS(sock_net(sk),
1356 UDP_MIB_OUTDATAGRAMS, is_udplite);
1357 }
1358 return err;
1359 }
1360
udp_v6_push_pending_frames(struct sock * sk)1361 static int udp_v6_push_pending_frames(struct sock *sk)
1362 {
1363 struct sk_buff *skb;
1364 struct udp_sock *up = udp_sk(sk);
1365 int err = 0;
1366
1367 if (up->pending == AF_INET)
1368 return udp_push_pending_frames(sk);
1369
1370 skb = ip6_finish_skb(sk);
1371 if (!skb)
1372 goto out;
1373
1374 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1375 &inet_sk(sk)->cork.base);
1376 out:
1377 up->len = 0;
1378 WRITE_ONCE(up->pending, 0);
1379 return err;
1380 }
1381
udpv6_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)1382 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1383 {
1384 struct ipv6_txoptions opt_space;
1385 struct udp_sock *up = udp_sk(sk);
1386 struct inet_sock *inet = inet_sk(sk);
1387 struct ipv6_pinfo *np = inet6_sk(sk);
1388 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1389 struct in6_addr *daddr, *final_p, final;
1390 struct ipv6_txoptions *opt = NULL;
1391 struct ipv6_txoptions *opt_to_free = NULL;
1392 struct ip6_flowlabel *flowlabel = NULL;
1393 struct inet_cork_full cork;
1394 struct flowi6 *fl6 = &cork.fl.u.ip6;
1395 struct dst_entry *dst;
1396 struct ipcm6_cookie ipc6;
1397 int addr_len = msg->msg_namelen;
1398 bool connected = false;
1399 int ulen = len;
1400 int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1401 int err;
1402 int is_udplite = IS_UDPLITE(sk);
1403 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1404
1405 ipcm6_init(&ipc6);
1406 ipc6.gso_size = READ_ONCE(up->gso_size);
1407 ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
1408 ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
1409
1410 /* destination address check */
1411 if (sin6) {
1412 if (addr_len < offsetof(struct sockaddr, sa_data))
1413 return -EINVAL;
1414
1415 switch (sin6->sin6_family) {
1416 case AF_INET6:
1417 if (addr_len < SIN6_LEN_RFC2133)
1418 return -EINVAL;
1419 daddr = &sin6->sin6_addr;
1420 if (ipv6_addr_any(daddr) &&
1421 ipv6_addr_v4mapped(&np->saddr))
1422 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1423 daddr);
1424 break;
1425 case AF_INET:
1426 goto do_udp_sendmsg;
1427 case AF_UNSPEC:
1428 msg->msg_name = sin6 = NULL;
1429 msg->msg_namelen = addr_len = 0;
1430 daddr = NULL;
1431 break;
1432 default:
1433 return -EINVAL;
1434 }
1435 } else if (!READ_ONCE(up->pending)) {
1436 if (sk->sk_state != TCP_ESTABLISHED)
1437 return -EDESTADDRREQ;
1438 daddr = &sk->sk_v6_daddr;
1439 } else
1440 daddr = NULL;
1441
1442 if (daddr) {
1443 if (ipv6_addr_v4mapped(daddr)) {
1444 struct sockaddr_in sin;
1445 sin.sin_family = AF_INET;
1446 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1447 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1448 msg->msg_name = &sin;
1449 msg->msg_namelen = sizeof(sin);
1450 do_udp_sendmsg:
1451 err = ipv6_only_sock(sk) ?
1452 -ENETUNREACH : udp_sendmsg(sk, msg, len);
1453 msg->msg_name = sin6;
1454 msg->msg_namelen = addr_len;
1455 return err;
1456 }
1457 }
1458
1459 /* Rough check on arithmetic overflow,
1460 better check is made in ip6_append_data().
1461 */
1462 if (len > INT_MAX - sizeof(struct udphdr))
1463 return -EMSGSIZE;
1464
1465 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1466 if (READ_ONCE(up->pending)) {
1467 if (READ_ONCE(up->pending) == AF_INET)
1468 return udp_sendmsg(sk, msg, len);
1469 /*
1470 * There are pending frames.
1471 * The socket lock must be held while it's corked.
1472 */
1473 lock_sock(sk);
1474 if (likely(up->pending)) {
1475 if (unlikely(up->pending != AF_INET6)) {
1476 release_sock(sk);
1477 return -EAFNOSUPPORT;
1478 }
1479 dst = NULL;
1480 goto do_append_data;
1481 }
1482 release_sock(sk);
1483 }
1484 ulen += sizeof(struct udphdr);
1485
1486 memset(fl6, 0, sizeof(*fl6));
1487
1488 if (sin6) {
1489 if (sin6->sin6_port == 0)
1490 return -EINVAL;
1491
1492 fl6->fl6_dport = sin6->sin6_port;
1493 daddr = &sin6->sin6_addr;
1494
1495 if (np->sndflow) {
1496 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1497 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1498 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1499 if (IS_ERR(flowlabel))
1500 return -EINVAL;
1501 }
1502 }
1503
1504 /*
1505 * Otherwise it will be difficult to maintain
1506 * sk->sk_dst_cache.
1507 */
1508 if (sk->sk_state == TCP_ESTABLISHED &&
1509 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1510 daddr = &sk->sk_v6_daddr;
1511
1512 if (addr_len >= sizeof(struct sockaddr_in6) &&
1513 sin6->sin6_scope_id &&
1514 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1515 fl6->flowi6_oif = sin6->sin6_scope_id;
1516 } else {
1517 if (sk->sk_state != TCP_ESTABLISHED)
1518 return -EDESTADDRREQ;
1519
1520 fl6->fl6_dport = inet->inet_dport;
1521 daddr = &sk->sk_v6_daddr;
1522 fl6->flowlabel = np->flow_label;
1523 connected = true;
1524 }
1525
1526 if (!fl6->flowi6_oif)
1527 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1528
1529 if (!fl6->flowi6_oif)
1530 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1531
1532 fl6->flowi6_uid = sk->sk_uid;
1533
1534 if (msg->msg_controllen) {
1535 opt = &opt_space;
1536 memset(opt, 0, sizeof(struct ipv6_txoptions));
1537 opt->tot_len = sizeof(*opt);
1538 ipc6.opt = opt;
1539
1540 err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1541 if (err > 0) {
1542 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1543 &ipc6);
1544 connected = false;
1545 }
1546 if (err < 0) {
1547 fl6_sock_release(flowlabel);
1548 return err;
1549 }
1550 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1551 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1552 if (IS_ERR(flowlabel))
1553 return -EINVAL;
1554 }
1555 if (!(opt->opt_nflen|opt->opt_flen))
1556 opt = NULL;
1557 }
1558 if (!opt) {
1559 opt = txopt_get(np);
1560 opt_to_free = opt;
1561 }
1562 if (flowlabel)
1563 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1564 opt = ipv6_fixup_options(&opt_space, opt);
1565 ipc6.opt = opt;
1566
1567 fl6->flowi6_proto = sk->sk_protocol;
1568 fl6->flowi6_mark = ipc6.sockc.mark;
1569 fl6->daddr = *daddr;
1570 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1571 fl6->saddr = np->saddr;
1572 fl6->fl6_sport = inet->inet_sport;
1573
1574 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1575 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1576 (struct sockaddr *)sin6,
1577 &addr_len,
1578 &fl6->saddr);
1579 if (err)
1580 goto out_no_dst;
1581 if (sin6) {
1582 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1583 /* BPF program rewrote IPv6-only by IPv4-mapped
1584 * IPv6. It's currently unsupported.
1585 */
1586 err = -ENOTSUPP;
1587 goto out_no_dst;
1588 }
1589 if (sin6->sin6_port == 0) {
1590 /* BPF program set invalid port. Reject it. */
1591 err = -EINVAL;
1592 goto out_no_dst;
1593 }
1594 fl6->fl6_dport = sin6->sin6_port;
1595 fl6->daddr = sin6->sin6_addr;
1596 }
1597 }
1598
1599 if (ipv6_addr_any(&fl6->daddr))
1600 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1601
1602 final_p = fl6_update_dst(fl6, opt, &final);
1603 if (final_p)
1604 connected = false;
1605
1606 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1607 fl6->flowi6_oif = np->mcast_oif;
1608 connected = false;
1609 } else if (!fl6->flowi6_oif)
1610 fl6->flowi6_oif = np->ucast_oif;
1611
1612 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1613
1614 if (ipc6.tclass < 0)
1615 ipc6.tclass = np->tclass;
1616
1617 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1618
1619 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1620 if (IS_ERR(dst)) {
1621 err = PTR_ERR(dst);
1622 dst = NULL;
1623 goto out;
1624 }
1625
1626 if (ipc6.hlimit < 0)
1627 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1628
1629 if (msg->msg_flags&MSG_CONFIRM)
1630 goto do_confirm;
1631 back_from_confirm:
1632
1633 /* Lockless fast path for the non-corking case */
1634 if (!corkreq) {
1635 struct sk_buff *skb;
1636
1637 skb = ip6_make_skb(sk, getfrag, msg, ulen,
1638 sizeof(struct udphdr), &ipc6,
1639 dst_rt6_info(dst),
1640 msg->msg_flags, &cork);
1641 err = PTR_ERR(skb);
1642 if (!IS_ERR_OR_NULL(skb))
1643 err = udp_v6_send_skb(skb, fl6, &cork.base);
1644 /* ip6_make_skb steals dst reference */
1645 goto out_no_dst;
1646 }
1647
1648 lock_sock(sk);
1649 if (unlikely(up->pending)) {
1650 /* The socket is already corked while preparing it. */
1651 /* ... which is an evident application bug. --ANK */
1652 release_sock(sk);
1653
1654 net_dbg_ratelimited("udp cork app bug 2\n");
1655 err = -EINVAL;
1656 goto out;
1657 }
1658
1659 WRITE_ONCE(up->pending, AF_INET6);
1660
1661 do_append_data:
1662 if (ipc6.dontfrag < 0)
1663 ipc6.dontfrag = np->dontfrag;
1664 up->len += ulen;
1665 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1666 &ipc6, fl6, dst_rt6_info(dst),
1667 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1668 if (err)
1669 udp_v6_flush_pending_frames(sk);
1670 else if (!corkreq)
1671 err = udp_v6_push_pending_frames(sk);
1672 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1673 WRITE_ONCE(up->pending, 0);
1674
1675 if (err > 0)
1676 err = np->recverr ? net_xmit_errno(err) : 0;
1677 release_sock(sk);
1678
1679 out:
1680 dst_release(dst);
1681 out_no_dst:
1682 fl6_sock_release(flowlabel);
1683 txopt_put(opt_to_free);
1684 if (!err)
1685 return len;
1686 /*
1687 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1688 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1689 * we don't have a good statistic (IpOutDiscards but it can be too many
1690 * things). We could add another new stat but at least for now that
1691 * seems like overkill.
1692 */
1693 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1694 UDP6_INC_STATS(sock_net(sk),
1695 UDP_MIB_SNDBUFERRORS, is_udplite);
1696 }
1697 return err;
1698
1699 do_confirm:
1700 if (msg->msg_flags & MSG_PROBE)
1701 dst_confirm_neigh(dst, &fl6->daddr);
1702 if (!(msg->msg_flags&MSG_PROBE) || len)
1703 goto back_from_confirm;
1704 err = 0;
1705 goto out;
1706 }
1707 EXPORT_SYMBOL(udpv6_sendmsg);
1708
udpv6_splice_eof(struct socket * sock)1709 static void udpv6_splice_eof(struct socket *sock)
1710 {
1711 struct sock *sk = sock->sk;
1712 struct udp_sock *up = udp_sk(sk);
1713
1714 if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1715 return;
1716
1717 lock_sock(sk);
1718 if (up->pending && !udp_test_bit(CORK, sk))
1719 udp_v6_push_pending_frames(sk);
1720 release_sock(sk);
1721 }
1722
udpv6_destroy_sock(struct sock * sk)1723 void udpv6_destroy_sock(struct sock *sk)
1724 {
1725 struct udp_sock *up = udp_sk(sk);
1726 lock_sock(sk);
1727
1728 /* protects from races with udp_abort() */
1729 sock_set_flag(sk, SOCK_DEAD);
1730 udp_v6_flush_pending_frames(sk);
1731 release_sock(sk);
1732
1733 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1734 if (up->encap_type) {
1735 void (*encap_destroy)(struct sock *sk);
1736 encap_destroy = READ_ONCE(up->encap_destroy);
1737 if (encap_destroy)
1738 encap_destroy(sk);
1739 }
1740 if (udp_test_bit(ENCAP_ENABLED, sk)) {
1741 static_branch_dec(&udpv6_encap_needed_key);
1742 udp_encap_disable();
1743 }
1744 }
1745 }
1746
1747 /*
1748 * Socket option code for UDP
1749 */
udpv6_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)1750 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1751 unsigned int optlen)
1752 {
1753 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET)
1754 return udp_lib_setsockopt(sk, level, optname,
1755 optval, optlen,
1756 udp_v6_push_pending_frames);
1757 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1758 }
1759
udpv6_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1760 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1761 char __user *optval, int __user *optlen)
1762 {
1763 if (level == SOL_UDP || level == SOL_UDPLITE)
1764 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1765 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1766 }
1767
1768 static const struct inet6_protocol udpv6_protocol = {
1769 .handler = udpv6_rcv,
1770 .err_handler = udpv6_err,
1771 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1772 };
1773
1774 /* ------------------------------------------------------------------------ */
1775 #ifdef CONFIG_PROC_FS
udp6_seq_show(struct seq_file * seq,void * v)1776 int udp6_seq_show(struct seq_file *seq, void *v)
1777 {
1778 if (v == SEQ_START_TOKEN) {
1779 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1780 } else {
1781 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1782 const struct inet_sock *inet = inet_sk((const struct sock *)v);
1783 __u16 srcp = ntohs(inet->inet_sport);
1784 __u16 destp = ntohs(inet->inet_dport);
1785 __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1786 udp_rqueue_get(v), bucket);
1787 }
1788 return 0;
1789 }
1790
1791 const struct seq_operations udp6_seq_ops = {
1792 .start = udp_seq_start,
1793 .next = udp_seq_next,
1794 .stop = udp_seq_stop,
1795 .show = udp6_seq_show,
1796 };
1797 EXPORT_SYMBOL(udp6_seq_ops);
1798
1799 static struct udp_seq_afinfo udp6_seq_afinfo = {
1800 .family = AF_INET6,
1801 .udp_table = NULL,
1802 };
1803
udp6_proc_init(struct net * net)1804 int __net_init udp6_proc_init(struct net *net)
1805 {
1806 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1807 sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1808 return -ENOMEM;
1809 return 0;
1810 }
1811
udp6_proc_exit(struct net * net)1812 void udp6_proc_exit(struct net *net)
1813 {
1814 remove_proc_entry("udp6", net->proc_net);
1815 }
1816 #endif /* CONFIG_PROC_FS */
1817
1818 /* ------------------------------------------------------------------------ */
1819
1820 struct proto udpv6_prot = {
1821 .name = "UDPv6",
1822 .owner = THIS_MODULE,
1823 .close = udp_lib_close,
1824 .pre_connect = udpv6_pre_connect,
1825 .connect = ip6_datagram_connect,
1826 .disconnect = udp_disconnect,
1827 .ioctl = udp_ioctl,
1828 .init = udpv6_init_sock,
1829 .destroy = udpv6_destroy_sock,
1830 .setsockopt = udpv6_setsockopt,
1831 .getsockopt = udpv6_getsockopt,
1832 .sendmsg = udpv6_sendmsg,
1833 .recvmsg = udpv6_recvmsg,
1834 .splice_eof = udpv6_splice_eof,
1835 .release_cb = ip6_datagram_release_cb,
1836 .hash = udp_lib_hash,
1837 .unhash = udp_lib_unhash,
1838 .rehash = udp_v6_rehash,
1839 .get_port = udp_v6_get_port,
1840 .put_port = udp_lib_unhash,
1841 #ifdef CONFIG_BPF_SYSCALL
1842 .psock_update_sk_prot = udp_bpf_update_proto,
1843 #endif
1844
1845 .memory_allocated = &udp_memory_allocated,
1846 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
1847
1848 .sysctl_mem = sysctl_udp_mem,
1849 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1850 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1851 .obj_size = sizeof(struct udp6_sock),
1852 .ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1853 .h.udp_table = NULL,
1854 .diag_destroy = udp_abort,
1855 };
1856
1857 static struct inet_protosw udpv6_protosw = {
1858 .type = SOCK_DGRAM,
1859 .protocol = IPPROTO_UDP,
1860 .prot = &udpv6_prot,
1861 .ops = &inet6_dgram_ops,
1862 .flags = INET_PROTOSW_PERMANENT,
1863 };
1864
udpv6_init(void)1865 int __init udpv6_init(void)
1866 {
1867 int ret;
1868
1869 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1870 if (ret)
1871 goto out;
1872
1873 ret = inet6_register_protosw(&udpv6_protosw);
1874 if (ret)
1875 goto out_udpv6_protocol;
1876 out:
1877 return ret;
1878
1879 out_udpv6_protocol:
1880 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1881 goto out;
1882 }
1883
udpv6_exit(void)1884 void udpv6_exit(void)
1885 {
1886 inet6_unregister_protosw(&udpv6_protosw);
1887 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1888 }
1889