1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * UDP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/udp.c
10 *
11 * Fixes:
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 */
19
20 #include <linux/bpf-cgroup.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/in6.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/ipv6.h>
30 #include <linux/icmpv6.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
33 #include <linux/skbuff.h>
34 #include <linux/slab.h>
35 #include <linux/uaccess.h>
36 #include <linux/indirect_call_wrapper.h>
37
38 #include <net/addrconf.h>
39 #include <net/ndisc.h>
40 #include <net/protocol.h>
41 #include <net/transp_v6.h>
42 #include <net/ip6_route.h>
43 #include <net/raw.h>
44 #include <net/seg6.h>
45 #include <net/tcp_states.h>
46 #include <net/ip6_checksum.h>
47 #include <net/ip6_tunnel.h>
48 #include <trace/events/udp.h>
49 #include <net/xfrm.h>
50 #include <net/inet_hashtables.h>
51 #include <net/inet6_hashtables.h>
52 #include <net/busy_poll.h>
53 #include <net/sock_reuseport.h>
54 #include <net/gro.h>
55
56 #include <linux/proc_fs.h>
57 #include <linux/seq_file.h>
58 #include <trace/events/skb.h>
59 #include "udp_impl.h"
60 #include <trace/hooks/net.h>
61
udpv6_destruct_sock(struct sock * sk)62 static void udpv6_destruct_sock(struct sock *sk)
63 {
64 udp_destruct_common(sk);
65 inet6_sock_destruct(sk);
66 }
67
udpv6_init_sock(struct sock * sk)68 int udpv6_init_sock(struct sock *sk)
69 {
70 udp_lib_init_sock(sk);
71 sk->sk_destruct = udpv6_destruct_sock;
72 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
73 return 0;
74 }
75
76 INDIRECT_CALLABLE_SCOPE
udp6_ehashfn(const struct net * net,const struct in6_addr * laddr,const u16 lport,const struct in6_addr * faddr,const __be16 fport)77 u32 udp6_ehashfn(const struct net *net,
78 const struct in6_addr *laddr,
79 const u16 lport,
80 const struct in6_addr *faddr,
81 const __be16 fport)
82 {
83 static u32 udp6_ehash_secret __read_mostly;
84 static u32 udp_ipv6_hash_secret __read_mostly;
85
86 u32 lhash, fhash;
87
88 net_get_random_once(&udp6_ehash_secret,
89 sizeof(udp6_ehash_secret));
90 net_get_random_once(&udp_ipv6_hash_secret,
91 sizeof(udp_ipv6_hash_secret));
92
93 lhash = (__force u32)laddr->s6_addr32[3];
94 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
95
96 return __inet6_ehashfn(lhash, lport, fhash, fport,
97 udp6_ehash_secret + net_hash_mix(net));
98 }
99
udp_v6_get_port(struct sock * sk,unsigned short snum)100 int udp_v6_get_port(struct sock *sk, unsigned short snum)
101 {
102 unsigned int hash2_nulladdr =
103 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
104 unsigned int hash2_partial =
105 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
106
107 /* precompute partial secondary hash */
108 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
109 return udp_lib_get_port(sk, snum, hash2_nulladdr);
110 }
111
udp_v6_rehash(struct sock * sk)112 void udp_v6_rehash(struct sock *sk)
113 {
114 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
115 &sk->sk_v6_rcv_saddr,
116 inet_sk(sk)->inet_num);
117
118 udp_lib_rehash(sk, new_hash);
119 }
120
compute_score(struct sock * sk,struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned short hnum,int dif,int sdif)121 static int compute_score(struct sock *sk, struct net *net,
122 const struct in6_addr *saddr, __be16 sport,
123 const struct in6_addr *daddr, unsigned short hnum,
124 int dif, int sdif)
125 {
126 int bound_dev_if, score;
127 struct inet_sock *inet;
128 bool dev_match;
129
130 if (!net_eq(sock_net(sk), net) ||
131 udp_sk(sk)->udp_port_hash != hnum ||
132 sk->sk_family != PF_INET6)
133 return -1;
134
135 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
136 return -1;
137
138 score = 0;
139 inet = inet_sk(sk);
140
141 if (inet->inet_dport) {
142 if (inet->inet_dport != sport)
143 return -1;
144 score++;
145 }
146
147 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
148 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
149 return -1;
150 score++;
151 }
152
153 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
154 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
155 if (!dev_match)
156 return -1;
157 if (bound_dev_if)
158 score++;
159
160 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
161 score++;
162
163 return score;
164 }
165
166 /* called with rcu_read_lock() */
udp6_lib_lookup2(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned int hnum,int dif,int sdif,struct udp_hslot * hslot2,struct sk_buff * skb)167 static struct sock *udp6_lib_lookup2(struct net *net,
168 const struct in6_addr *saddr, __be16 sport,
169 const struct in6_addr *daddr, unsigned int hnum,
170 int dif, int sdif, struct udp_hslot *hslot2,
171 struct sk_buff *skb)
172 {
173 struct sock *sk, *result;
174 int score, badness;
175 bool need_rescore;
176
177 result = NULL;
178 badness = -1;
179 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
180 need_rescore = false;
181 rescore:
182 score = compute_score(need_rescore ? result : sk, net, saddr,
183 sport, daddr, hnum, dif, sdif);
184 if (score > badness) {
185 badness = score;
186
187 if (need_rescore)
188 continue;
189
190 if (sk->sk_state == TCP_ESTABLISHED) {
191 result = sk;
192 continue;
193 }
194
195 result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
196 saddr, sport, daddr, hnum, udp6_ehashfn);
197 if (!result) {
198 result = sk;
199 continue;
200 }
201
202 /* Fall back to scoring if group has connections */
203 if (!reuseport_has_conns(sk))
204 return result;
205
206 /* Reuseport logic returned an error, keep original score. */
207 if (IS_ERR(result))
208 continue;
209
210 /* compute_score is too long of a function to be
211 * inlined, and calling it again here yields
212 * measureable overhead for some
213 * workloads. Work around it by jumping
214 * backwards to rescore 'result'.
215 */
216 need_rescore = true;
217 goto rescore;
218 }
219 }
220 return result;
221 }
222
223 /* rcu_read_lock() must be held */
__udp6_lib_lookup(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif,int sdif,struct udp_table * udptable,struct sk_buff * skb)224 struct sock *__udp6_lib_lookup(struct net *net,
225 const struct in6_addr *saddr, __be16 sport,
226 const struct in6_addr *daddr, __be16 dport,
227 int dif, int sdif, struct udp_table *udptable,
228 struct sk_buff *skb)
229 {
230 unsigned short hnum = ntohs(dport);
231 unsigned int hash2, slot2;
232 struct udp_hslot *hslot2;
233 struct sock *result, *sk;
234
235 hash2 = ipv6_portaddr_hash(net, daddr, hnum);
236 slot2 = hash2 & udptable->mask;
237 hslot2 = &udptable->hash2[slot2];
238
239 /* Lookup connected or non-wildcard sockets */
240 result = udp6_lib_lookup2(net, saddr, sport,
241 daddr, hnum, dif, sdif,
242 hslot2, skb);
243 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
244 goto done;
245
246 /* Lookup redirect from BPF */
247 if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
248 udptable == net->ipv4.udp_table) {
249 sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
250 saddr, sport, daddr, hnum, dif,
251 udp6_ehashfn);
252 if (sk) {
253 result = sk;
254 goto done;
255 }
256 }
257
258 /* Got non-wildcard socket or error on first lookup */
259 if (result)
260 goto done;
261
262 /* Lookup wildcard sockets */
263 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
264 slot2 = hash2 & udptable->mask;
265 hslot2 = &udptable->hash2[slot2];
266
267 result = udp6_lib_lookup2(net, saddr, sport,
268 &in6addr_any, hnum, dif, sdif,
269 hslot2, skb);
270 done:
271 if (IS_ERR(result))
272 return NULL;
273 return result;
274 }
275 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
276
__udp6_lib_lookup_skb(struct sk_buff * skb,__be16 sport,__be16 dport,struct udp_table * udptable)277 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
278 __be16 sport, __be16 dport,
279 struct udp_table *udptable)
280 {
281 const struct ipv6hdr *iph = ipv6_hdr(skb);
282
283 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
284 &iph->daddr, dport, inet6_iif(skb),
285 inet6_sdif(skb), udptable, skb);
286 }
287
udp6_lib_lookup_skb(const struct sk_buff * skb,__be16 sport,__be16 dport)288 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
289 __be16 sport, __be16 dport)
290 {
291 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
292 const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
293 struct net *net = dev_net(skb->dev);
294 int iif, sdif;
295
296 inet6_get_iif_sdif(skb, &iif, &sdif);
297
298 return __udp6_lib_lookup(net, &iph->saddr, sport,
299 &iph->daddr, dport, iif,
300 sdif, net->ipv4.udp_table, NULL);
301 }
302
303 /* Must be called under rcu_read_lock().
304 * Does increment socket refcount.
305 */
306 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
udp6_lib_lookup(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif)307 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
308 const struct in6_addr *daddr, __be16 dport, int dif)
309 {
310 struct sock *sk;
311
312 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
313 dif, 0, net->ipv4.udp_table, NULL);
314 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
315 sk = NULL;
316 return sk;
317 }
318 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
319 #endif
320
321 /* do not use the scratch area len for jumbogram: their length execeeds the
322 * scratch area space; note that the IP6CB flags is still in the first
323 * cacheline, so checking for jumbograms is cheap
324 */
udp6_skb_len(struct sk_buff * skb)325 static int udp6_skb_len(struct sk_buff *skb)
326 {
327 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
328 }
329
330 /*
331 * This should be easy, if there is something there we
332 * return it, otherwise we block.
333 */
334
udpv6_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)335 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
336 int flags, int *addr_len)
337 {
338 struct ipv6_pinfo *np = inet6_sk(sk);
339 struct inet_sock *inet = inet_sk(sk);
340 struct sk_buff *skb;
341 unsigned int ulen, copied;
342 int off, err, peeking = flags & MSG_PEEK;
343 int is_udplite = IS_UDPLITE(sk);
344 struct udp_mib __percpu *mib;
345 bool checksum_valid = false;
346 int is_udp4;
347
348 if (flags & MSG_ERRQUEUE)
349 return ipv6_recv_error(sk, msg, len, addr_len);
350
351 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
352 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
353
354 try_again:
355 off = sk_peek_offset(sk, flags);
356 skb = __skb_recv_udp(sk, flags, &off, &err);
357 if (!skb)
358 return err;
359
360 ulen = udp6_skb_len(skb);
361 copied = len;
362 if (copied > ulen - off)
363 copied = ulen - off;
364 else if (copied < ulen)
365 msg->msg_flags |= MSG_TRUNC;
366
367 is_udp4 = (skb->protocol == htons(ETH_P_IP));
368 mib = __UDPX_MIB(sk, is_udp4);
369
370 /*
371 * If checksum is needed at all, try to do it while copying the
372 * data. If the data is truncated, or if we only want a partial
373 * coverage checksum (UDP-Lite), do it before the copy.
374 */
375
376 if (copied < ulen || peeking ||
377 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
378 checksum_valid = udp_skb_csum_unnecessary(skb) ||
379 !__udp_lib_checksum_complete(skb);
380 if (!checksum_valid)
381 goto csum_copy_err;
382 }
383
384 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
385 if (udp_skb_is_linear(skb))
386 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
387 else
388 err = skb_copy_datagram_msg(skb, off, msg, copied);
389 } else {
390 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
391 if (err == -EINVAL)
392 goto csum_copy_err;
393 }
394 if (unlikely(err)) {
395 if (!peeking) {
396 atomic_inc(&sk->sk_drops);
397 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
398 }
399 kfree_skb(skb);
400 return err;
401 }
402 if (!peeking)
403 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
404
405 sock_recv_cmsgs(msg, sk, skb);
406
407 /* Copy the address. */
408 if (msg->msg_name) {
409 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
410 sin6->sin6_family = AF_INET6;
411 sin6->sin6_port = udp_hdr(skb)->source;
412 sin6->sin6_flowinfo = 0;
413
414 if (is_udp4) {
415 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
416 &sin6->sin6_addr);
417 sin6->sin6_scope_id = 0;
418 } else {
419 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
420 sin6->sin6_scope_id =
421 ipv6_iface_scope_id(&sin6->sin6_addr,
422 inet6_iif(skb));
423 }
424 *addr_len = sizeof(*sin6);
425
426 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
427 (struct sockaddr *)sin6,
428 addr_len);
429 }
430
431 if (udp_test_bit(GRO_ENABLED, sk))
432 udp_cmsg_recv(msg, sk, skb);
433
434 if (np->rxopt.all)
435 ip6_datagram_recv_common_ctl(sk, msg, skb);
436
437 if (is_udp4) {
438 if (inet_cmsg_flags(inet))
439 ip_cmsg_recv_offset(msg, sk, skb,
440 sizeof(struct udphdr), off);
441 } else {
442 if (np->rxopt.all)
443 ip6_datagram_recv_specific_ctl(sk, msg, skb);
444 }
445
446 err = copied;
447 if (flags & MSG_TRUNC)
448 err = ulen;
449
450 trace_android_rvh_udpv6_recvmsg(sk, msg, len, flags, addr_len);
451
452 skb_consume_udp(sk, skb, peeking ? -err : err);
453 return err;
454
455 csum_copy_err:
456 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
457 udp_skb_destructor)) {
458 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
459 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
460 }
461 kfree_skb(skb);
462
463 /* starting over for a new packet, but check if we need to yield */
464 cond_resched();
465 msg->msg_flags &= ~MSG_TRUNC;
466 goto try_again;
467 }
468
469 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
udpv6_encap_enable(void)470 void udpv6_encap_enable(void)
471 {
472 static_branch_inc(&udpv6_encap_needed_key);
473 }
474 EXPORT_SYMBOL(udpv6_encap_enable);
475
476 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
477 * through error handlers in encapsulations looking for a match.
478 */
__udp6_lib_err_encap_no_sk(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)479 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
480 struct inet6_skb_parm *opt,
481 u8 type, u8 code, int offset, __be32 info)
482 {
483 int i;
484
485 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
486 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
487 u8 type, u8 code, int offset, __be32 info);
488 const struct ip6_tnl_encap_ops *encap;
489
490 encap = rcu_dereference(ip6tun_encaps[i]);
491 if (!encap)
492 continue;
493 handler = encap->err_handler;
494 if (handler && !handler(skb, opt, type, code, offset, info))
495 return 0;
496 }
497
498 return -ENOENT;
499 }
500
501 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
502 * reversing source and destination port: this will match tunnels that force the
503 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
504 * lwtunnels might actually break this assumption by being configured with
505 * different destination ports on endpoints, in this case we won't be able to
506 * trace ICMP messages back to them.
507 *
508 * If this doesn't match any socket, probe tunnels with arbitrary destination
509 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
510 * we've sent packets to won't necessarily match the local destination port.
511 *
512 * Then ask the tunnel implementation to match the error against a valid
513 * association.
514 *
515 * Return an error if we can't find a match, the socket if we need further
516 * processing, zero otherwise.
517 */
__udp6_lib_err_encap(struct net * net,const struct ipv6hdr * hdr,int offset,struct udphdr * uh,struct udp_table * udptable,struct sock * sk,struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,__be32 info)518 static struct sock *__udp6_lib_err_encap(struct net *net,
519 const struct ipv6hdr *hdr, int offset,
520 struct udphdr *uh,
521 struct udp_table *udptable,
522 struct sock *sk,
523 struct sk_buff *skb,
524 struct inet6_skb_parm *opt,
525 u8 type, u8 code, __be32 info)
526 {
527 int (*lookup)(struct sock *sk, struct sk_buff *skb);
528 int network_offset, transport_offset;
529 struct udp_sock *up;
530
531 network_offset = skb_network_offset(skb);
532 transport_offset = skb_transport_offset(skb);
533
534 /* Network header needs to point to the outer IPv6 header inside ICMP */
535 skb_reset_network_header(skb);
536
537 /* Transport header needs to point to the UDP header */
538 skb_set_transport_header(skb, offset);
539
540 if (sk) {
541 up = udp_sk(sk);
542
543 lookup = READ_ONCE(up->encap_err_lookup);
544 if (lookup && lookup(sk, skb))
545 sk = NULL;
546
547 goto out;
548 }
549
550 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
551 &hdr->saddr, uh->dest,
552 inet6_iif(skb), 0, udptable, skb);
553 if (sk) {
554 up = udp_sk(sk);
555
556 lookup = READ_ONCE(up->encap_err_lookup);
557 if (!lookup || lookup(sk, skb))
558 sk = NULL;
559 }
560
561 out:
562 if (!sk) {
563 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
564 offset, info));
565 }
566
567 skb_set_transport_header(skb, transport_offset);
568 skb_set_network_header(skb, network_offset);
569
570 return sk;
571 }
572
__udp6_lib_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info,struct udp_table * udptable)573 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
574 u8 type, u8 code, int offset, __be32 info,
575 struct udp_table *udptable)
576 {
577 struct ipv6_pinfo *np;
578 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
579 const struct in6_addr *saddr = &hdr->saddr;
580 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
581 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
582 bool tunnel = false;
583 struct sock *sk;
584 int harderr;
585 int err;
586 struct net *net = dev_net(skb->dev);
587
588 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
589 inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
590
591 if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
592 /* No socket for error: try tunnels before discarding */
593 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
594 sk = __udp6_lib_err_encap(net, hdr, offset, uh,
595 udptable, sk, skb,
596 opt, type, code, info);
597 if (!sk)
598 return 0;
599 } else
600 sk = ERR_PTR(-ENOENT);
601
602 if (IS_ERR(sk)) {
603 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
604 ICMP6_MIB_INERRORS);
605 return PTR_ERR(sk);
606 }
607
608 tunnel = true;
609 }
610
611 harderr = icmpv6_err_convert(type, code, &err);
612 np = inet6_sk(sk);
613
614 if (type == ICMPV6_PKT_TOOBIG) {
615 if (!ip6_sk_accept_pmtu(sk))
616 goto out;
617 ip6_sk_update_pmtu(skb, sk, info);
618 if (np->pmtudisc != IPV6_PMTUDISC_DONT)
619 harderr = 1;
620 }
621 if (type == NDISC_REDIRECT) {
622 if (tunnel) {
623 ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
624 READ_ONCE(sk->sk_mark), sk->sk_uid);
625 } else {
626 ip6_sk_redirect(skb, sk);
627 }
628 goto out;
629 }
630
631 /* Tunnels don't have an application socket: don't pass errors back */
632 if (tunnel) {
633 if (udp_sk(sk)->encap_err_rcv)
634 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
635 ntohl(info), (u8 *)(uh+1));
636 goto out;
637 }
638
639 if (!np->recverr) {
640 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
641 goto out;
642 } else {
643 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
644 }
645
646 sk->sk_err = err;
647 sk_error_report(sk);
648 out:
649 return 0;
650 }
651
__udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)652 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
653 {
654 int rc;
655
656 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
657 sock_rps_save_rxhash(sk, skb);
658 sk_mark_napi_id(sk, skb);
659 sk_incoming_cpu_update(sk);
660 } else {
661 sk_mark_napi_id_once(sk, skb);
662 }
663
664 rc = __udp_enqueue_schedule_skb(sk, skb);
665 if (rc < 0) {
666 int is_udplite = IS_UDPLITE(sk);
667 enum skb_drop_reason drop_reason;
668
669 /* Note that an ENOMEM error is charged twice */
670 if (rc == -ENOMEM) {
671 UDP6_INC_STATS(sock_net(sk),
672 UDP_MIB_RCVBUFERRORS, is_udplite);
673 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
674 } else {
675 UDP6_INC_STATS(sock_net(sk),
676 UDP_MIB_MEMERRORS, is_udplite);
677 drop_reason = SKB_DROP_REASON_PROTO_MEM;
678 }
679 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
680 kfree_skb_reason(skb, drop_reason);
681 trace_udp_fail_queue_rcv_skb(rc, sk);
682 return -1;
683 }
684
685 return 0;
686 }
687
udpv6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)688 static __inline__ int udpv6_err(struct sk_buff *skb,
689 struct inet6_skb_parm *opt, u8 type,
690 u8 code, int offset, __be32 info)
691 {
692 return __udp6_lib_err(skb, opt, type, code, offset, info,
693 dev_net(skb->dev)->ipv4.udp_table);
694 }
695
udpv6_queue_rcv_one_skb(struct sock * sk,struct sk_buff * skb)696 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
697 {
698 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
699 struct udp_sock *up = udp_sk(sk);
700 int is_udplite = IS_UDPLITE(sk);
701
702 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
703 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
704 goto drop;
705 }
706 nf_reset_ct(skb);
707
708 if (static_branch_unlikely(&udpv6_encap_needed_key) &&
709 READ_ONCE(up->encap_type)) {
710 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
711
712 /*
713 * This is an encapsulation socket so pass the skb to
714 * the socket's udp_encap_rcv() hook. Otherwise, just
715 * fall through and pass this up the UDP socket.
716 * up->encap_rcv() returns the following value:
717 * =0 if skb was successfully passed to the encap
718 * handler or was discarded by it.
719 * >0 if skb should be passed on to UDP.
720 * <0 if skb should be resubmitted as proto -N
721 */
722
723 /* if we're overly short, let UDP handle it */
724 encap_rcv = READ_ONCE(up->encap_rcv);
725 if (encap_rcv) {
726 int ret;
727
728 /* Verify checksum before giving to encap */
729 if (udp_lib_checksum_complete(skb))
730 goto csum_error;
731
732 ret = encap_rcv(sk, skb);
733 if (ret <= 0) {
734 __UDP6_INC_STATS(sock_net(sk),
735 UDP_MIB_INDATAGRAMS,
736 is_udplite);
737 return -ret;
738 }
739 }
740
741 /* FALLTHROUGH -- it's a UDP Packet */
742 }
743
744 /*
745 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
746 */
747 if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
748 u16 pcrlen = READ_ONCE(up->pcrlen);
749
750 if (pcrlen == 0) { /* full coverage was set */
751 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
752 UDP_SKB_CB(skb)->cscov, skb->len);
753 goto drop;
754 }
755 if (UDP_SKB_CB(skb)->cscov < pcrlen) {
756 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
757 UDP_SKB_CB(skb)->cscov, pcrlen);
758 goto drop;
759 }
760 }
761
762 prefetch(&sk->sk_rmem_alloc);
763 if (rcu_access_pointer(sk->sk_filter) &&
764 udp_lib_checksum_complete(skb))
765 goto csum_error;
766
767 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
768 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
769 goto drop;
770 }
771
772 udp_csum_pull_header(skb);
773
774 skb_dst_drop(skb);
775
776 return __udpv6_queue_rcv_skb(sk, skb);
777
778 csum_error:
779 drop_reason = SKB_DROP_REASON_UDP_CSUM;
780 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
781 drop:
782 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
783 atomic_inc(&sk->sk_drops);
784 kfree_skb_reason(skb, drop_reason);
785 return -1;
786 }
787
udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)788 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
789 {
790 struct sk_buff *next, *segs;
791 int ret;
792
793 if (likely(!udp_unexpected_gso(sk, skb)))
794 return udpv6_queue_rcv_one_skb(sk, skb);
795
796 __skb_push(skb, -skb_mac_offset(skb));
797 segs = udp_rcv_segment(sk, skb, false);
798 skb_list_walk_safe(segs, skb, next) {
799 __skb_pull(skb, skb_transport_offset(skb));
800
801 udp_post_segment_fix_csum(skb);
802 ret = udpv6_queue_rcv_one_skb(sk, skb);
803 if (ret > 0)
804 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
805 true);
806 }
807 return 0;
808 }
809
__udp_v6_is_mcast_sock(struct net * net,const struct sock * sk,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif,unsigned short hnum)810 static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
811 __be16 loc_port, const struct in6_addr *loc_addr,
812 __be16 rmt_port, const struct in6_addr *rmt_addr,
813 int dif, int sdif, unsigned short hnum)
814 {
815 const struct inet_sock *inet = inet_sk(sk);
816
817 if (!net_eq(sock_net(sk), net))
818 return false;
819
820 if (udp_sk(sk)->udp_port_hash != hnum ||
821 sk->sk_family != PF_INET6 ||
822 (inet->inet_dport && inet->inet_dport != rmt_port) ||
823 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
824 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
825 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
826 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
827 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
828 return false;
829 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
830 return false;
831 return true;
832 }
833
udp6_csum_zero_error(struct sk_buff * skb)834 static void udp6_csum_zero_error(struct sk_buff *skb)
835 {
836 /* RFC 2460 section 8.1 says that we SHOULD log
837 * this error. Well, it is reasonable.
838 */
839 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
840 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
841 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
842 }
843
844 /*
845 * Note: called only from the BH handler context,
846 * so we don't need to lock the hashes.
847 */
__udp6_lib_mcast_deliver(struct net * net,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,struct udp_table * udptable,int proto)848 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
849 const struct in6_addr *saddr, const struct in6_addr *daddr,
850 struct udp_table *udptable, int proto)
851 {
852 struct sock *sk, *first = NULL;
853 const struct udphdr *uh = udp_hdr(skb);
854 unsigned short hnum = ntohs(uh->dest);
855 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
856 unsigned int offset = offsetof(typeof(*sk), sk_node);
857 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
858 int dif = inet6_iif(skb);
859 int sdif = inet6_sdif(skb);
860 struct hlist_node *node;
861 struct sk_buff *nskb;
862
863 if (use_hash2) {
864 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
865 udptable->mask;
866 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
867 start_lookup:
868 hslot = &udptable->hash2[hash2];
869 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
870 }
871
872 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
873 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
874 uh->source, saddr, dif, sdif,
875 hnum))
876 continue;
877 /* If zero checksum and no_check is not on for
878 * the socket then skip it.
879 */
880 if (!uh->check && !udp_get_no_check6_rx(sk))
881 continue;
882 if (!first) {
883 first = sk;
884 continue;
885 }
886 nskb = skb_clone(skb, GFP_ATOMIC);
887 if (unlikely(!nskb)) {
888 atomic_inc(&sk->sk_drops);
889 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
890 IS_UDPLITE(sk));
891 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
892 IS_UDPLITE(sk));
893 continue;
894 }
895
896 if (udpv6_queue_rcv_skb(sk, nskb) > 0)
897 consume_skb(nskb);
898 }
899
900 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
901 if (use_hash2 && hash2 != hash2_any) {
902 hash2 = hash2_any;
903 goto start_lookup;
904 }
905
906 if (first) {
907 if (udpv6_queue_rcv_skb(first, skb) > 0)
908 consume_skb(skb);
909 } else {
910 kfree_skb(skb);
911 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
912 proto == IPPROTO_UDPLITE);
913 }
914 return 0;
915 }
916
udp6_sk_rx_dst_set(struct sock * sk,struct dst_entry * dst)917 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
918 {
919 if (udp_sk_rx_dst_set(sk, dst)) {
920 const struct rt6_info *rt = (const struct rt6_info *)dst;
921
922 sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
923 }
924 }
925
926 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
927 * return code conversion for ip layer consumption
928 */
udp6_unicast_rcv_skb(struct sock * sk,struct sk_buff * skb,struct udphdr * uh)929 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
930 struct udphdr *uh)
931 {
932 int ret;
933
934 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
935 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
936
937 ret = udpv6_queue_rcv_skb(sk, skb);
938
939 /* a return value > 0 means to resubmit the input */
940 if (ret > 0)
941 return ret;
942 return 0;
943 }
944
__udp6_lib_rcv(struct sk_buff * skb,struct udp_table * udptable,int proto)945 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
946 int proto)
947 {
948 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
949 const struct in6_addr *saddr, *daddr;
950 struct net *net = dev_net(skb->dev);
951 struct udphdr *uh;
952 struct sock *sk;
953 bool refcounted;
954 u32 ulen = 0;
955
956 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
957 goto discard;
958
959 saddr = &ipv6_hdr(skb)->saddr;
960 daddr = &ipv6_hdr(skb)->daddr;
961 uh = udp_hdr(skb);
962
963 ulen = ntohs(uh->len);
964 if (ulen > skb->len)
965 goto short_packet;
966
967 if (proto == IPPROTO_UDP) {
968 /* UDP validates ulen. */
969
970 /* Check for jumbo payload */
971 if (ulen == 0)
972 ulen = skb->len;
973
974 if (ulen < sizeof(*uh))
975 goto short_packet;
976
977 if (ulen < skb->len) {
978 if (pskb_trim_rcsum(skb, ulen))
979 goto short_packet;
980 saddr = &ipv6_hdr(skb)->saddr;
981 daddr = &ipv6_hdr(skb)->daddr;
982 uh = udp_hdr(skb);
983 }
984 }
985
986 if (udp6_csum_init(skb, uh, proto))
987 goto csum_error;
988
989 /* Check if the socket is already available, e.g. due to early demux */
990 sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
991 &refcounted, udp6_ehashfn);
992 if (IS_ERR(sk))
993 goto no_sk;
994
995 if (sk) {
996 struct dst_entry *dst = skb_dst(skb);
997 int ret;
998
999 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
1000 udp6_sk_rx_dst_set(sk, dst);
1001
1002 if (!uh->check && !udp_get_no_check6_rx(sk)) {
1003 if (refcounted)
1004 sock_put(sk);
1005 goto report_csum_error;
1006 }
1007
1008 ret = udp6_unicast_rcv_skb(sk, skb, uh);
1009 if (refcounted)
1010 sock_put(sk);
1011 return ret;
1012 }
1013
1014 /*
1015 * Multicast receive code
1016 */
1017 if (ipv6_addr_is_multicast(daddr))
1018 return __udp6_lib_mcast_deliver(net, skb,
1019 saddr, daddr, udptable, proto);
1020
1021 /* Unicast */
1022 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1023 if (sk) {
1024 if (!uh->check && !udp_get_no_check6_rx(sk))
1025 goto report_csum_error;
1026 return udp6_unicast_rcv_skb(sk, skb, uh);
1027 }
1028 no_sk:
1029 reason = SKB_DROP_REASON_NO_SOCKET;
1030
1031 if (!uh->check)
1032 goto report_csum_error;
1033
1034 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1035 goto discard;
1036 nf_reset_ct(skb);
1037
1038 if (udp_lib_checksum_complete(skb))
1039 goto csum_error;
1040
1041 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1042 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1043
1044 kfree_skb_reason(skb, reason);
1045 return 0;
1046
1047 short_packet:
1048 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1049 reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1050 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1051 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1052 saddr, ntohs(uh->source),
1053 ulen, skb->len,
1054 daddr, ntohs(uh->dest));
1055 goto discard;
1056
1057 report_csum_error:
1058 udp6_csum_zero_error(skb);
1059 csum_error:
1060 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1061 reason = SKB_DROP_REASON_UDP_CSUM;
1062 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1063 discard:
1064 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1065 kfree_skb_reason(skb, reason);
1066 return 0;
1067 }
1068
1069
__udp6_lib_demux_lookup(struct net * net,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif)1070 static struct sock *__udp6_lib_demux_lookup(struct net *net,
1071 __be16 loc_port, const struct in6_addr *loc_addr,
1072 __be16 rmt_port, const struct in6_addr *rmt_addr,
1073 int dif, int sdif)
1074 {
1075 struct udp_table *udptable = net->ipv4.udp_table;
1076 unsigned short hnum = ntohs(loc_port);
1077 unsigned int hash2, slot2;
1078 struct udp_hslot *hslot2;
1079 __portpair ports;
1080 struct sock *sk;
1081
1082 hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1083 slot2 = hash2 & udptable->mask;
1084 hslot2 = &udptable->hash2[slot2];
1085 ports = INET_COMBINED_PORTS(rmt_port, hnum);
1086
1087 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1088 if (sk->sk_state == TCP_ESTABLISHED &&
1089 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1090 return sk;
1091 /* Only check first socket in chain */
1092 break;
1093 }
1094 return NULL;
1095 }
1096
udp_v6_early_demux(struct sk_buff * skb)1097 void udp_v6_early_demux(struct sk_buff *skb)
1098 {
1099 struct net *net = dev_net(skb->dev);
1100 const struct udphdr *uh;
1101 struct sock *sk;
1102 struct dst_entry *dst;
1103 int dif = skb->dev->ifindex;
1104 int sdif = inet6_sdif(skb);
1105
1106 if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1107 sizeof(struct udphdr)))
1108 return;
1109
1110 uh = udp_hdr(skb);
1111
1112 if (skb->pkt_type == PACKET_HOST)
1113 sk = __udp6_lib_demux_lookup(net, uh->dest,
1114 &ipv6_hdr(skb)->daddr,
1115 uh->source, &ipv6_hdr(skb)->saddr,
1116 dif, sdif);
1117 else
1118 return;
1119
1120 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1121 return;
1122
1123 skb->sk = sk;
1124 skb->destructor = sock_efree;
1125 dst = rcu_dereference(sk->sk_rx_dst);
1126
1127 if (dst)
1128 dst = dst_check(dst, sk->sk_rx_dst_cookie);
1129 if (dst) {
1130 /* set noref for now.
1131 * any place which wants to hold dst has to call
1132 * dst_hold_safe()
1133 */
1134 skb_dst_set_noref(skb, dst);
1135 }
1136 }
1137
udpv6_rcv(struct sk_buff * skb)1138 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1139 {
1140 return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1141 }
1142
1143 /*
1144 * Throw away all pending data and cancel the corking. Socket is locked.
1145 */
udp_v6_flush_pending_frames(struct sock * sk)1146 static void udp_v6_flush_pending_frames(struct sock *sk)
1147 {
1148 struct udp_sock *up = udp_sk(sk);
1149
1150 if (up->pending == AF_INET)
1151 udp_flush_pending_frames(sk);
1152 else if (up->pending) {
1153 up->len = 0;
1154 WRITE_ONCE(up->pending, 0);
1155 ip6_flush_pending_frames(sk);
1156 }
1157 }
1158
udpv6_pre_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)1159 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1160 int addr_len)
1161 {
1162 if (addr_len < offsetofend(struct sockaddr, sa_family))
1163 return -EINVAL;
1164 /* The following checks are replicated from __ip6_datagram_connect()
1165 * and intended to prevent BPF program called below from accessing
1166 * bytes that are out of the bound specified by user in addr_len.
1167 */
1168 if (uaddr->sa_family == AF_INET) {
1169 if (ipv6_only_sock(sk))
1170 return -EAFNOSUPPORT;
1171 return udp_pre_connect(sk, uaddr, addr_len);
1172 }
1173
1174 if (addr_len < SIN6_LEN_RFC2133)
1175 return -EINVAL;
1176
1177 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
1178 }
1179
1180 /**
1181 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1182 * @sk: socket we are sending on
1183 * @skb: sk_buff containing the filled-in UDP header
1184 * (checksum field must be zeroed out)
1185 * @saddr: source address
1186 * @daddr: destination address
1187 * @len: length of packet
1188 */
udp6_hwcsum_outgoing(struct sock * sk,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,int len)1189 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1190 const struct in6_addr *saddr,
1191 const struct in6_addr *daddr, int len)
1192 {
1193 unsigned int offset;
1194 struct udphdr *uh = udp_hdr(skb);
1195 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1196 __wsum csum = 0;
1197
1198 if (!frags) {
1199 /* Only one fragment on the socket. */
1200 skb->csum_start = skb_transport_header(skb) - skb->head;
1201 skb->csum_offset = offsetof(struct udphdr, check);
1202 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1203 } else {
1204 /*
1205 * HW-checksum won't work as there are two or more
1206 * fragments on the socket so that all csums of sk_buffs
1207 * should be together
1208 */
1209 offset = skb_transport_offset(skb);
1210 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1211 csum = skb->csum;
1212
1213 skb->ip_summed = CHECKSUM_NONE;
1214
1215 do {
1216 csum = csum_add(csum, frags->csum);
1217 } while ((frags = frags->next));
1218
1219 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1220 csum);
1221 if (uh->check == 0)
1222 uh->check = CSUM_MANGLED_0;
1223 }
1224 }
1225
1226 /*
1227 * Sending
1228 */
1229
udp_v6_send_skb(struct sk_buff * skb,struct flowi6 * fl6,struct inet_cork * cork)1230 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1231 struct inet_cork *cork)
1232 {
1233 struct sock *sk = skb->sk;
1234 struct udphdr *uh;
1235 int err = 0;
1236 int is_udplite = IS_UDPLITE(sk);
1237 __wsum csum = 0;
1238 int offset = skb_transport_offset(skb);
1239 int len = skb->len - offset;
1240 int datalen = len - sizeof(*uh);
1241
1242 /*
1243 * Create a UDP header
1244 */
1245 uh = udp_hdr(skb);
1246 uh->source = fl6->fl6_sport;
1247 uh->dest = fl6->fl6_dport;
1248 uh->len = htons(len);
1249 uh->check = 0;
1250
1251 if (cork->gso_size) {
1252 const int hlen = skb_network_header_len(skb) +
1253 sizeof(struct udphdr);
1254
1255 if (hlen + cork->gso_size > cork->fragsize) {
1256 kfree_skb(skb);
1257 return -EINVAL;
1258 }
1259 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1260 kfree_skb(skb);
1261 return -EINVAL;
1262 }
1263 if (udp_get_no_check6_tx(sk)) {
1264 kfree_skb(skb);
1265 return -EINVAL;
1266 }
1267 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1268 dst_xfrm(skb_dst(skb))) {
1269 kfree_skb(skb);
1270 return -EIO;
1271 }
1272
1273 if (datalen > cork->gso_size) {
1274 skb_shinfo(skb)->gso_size = cork->gso_size;
1275 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1276 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1277 cork->gso_size);
1278 }
1279 goto csum_partial;
1280 }
1281
1282 if (is_udplite)
1283 csum = udplite_csum(skb);
1284 else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */
1285 skb->ip_summed = CHECKSUM_NONE;
1286 goto send;
1287 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1288 csum_partial:
1289 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1290 goto send;
1291 } else
1292 csum = udp_csum(skb);
1293
1294 /* add protocol-dependent pseudo-header */
1295 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1296 len, fl6->flowi6_proto, csum);
1297 if (uh->check == 0)
1298 uh->check = CSUM_MANGLED_0;
1299
1300 send:
1301 err = ip6_send_skb(skb);
1302 if (err) {
1303 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1304 UDP6_INC_STATS(sock_net(sk),
1305 UDP_MIB_SNDBUFERRORS, is_udplite);
1306 err = 0;
1307 }
1308 } else {
1309 UDP6_INC_STATS(sock_net(sk),
1310 UDP_MIB_OUTDATAGRAMS, is_udplite);
1311 }
1312 return err;
1313 }
1314
udp_v6_push_pending_frames(struct sock * sk)1315 static int udp_v6_push_pending_frames(struct sock *sk)
1316 {
1317 struct sk_buff *skb;
1318 struct udp_sock *up = udp_sk(sk);
1319 int err = 0;
1320
1321 if (up->pending == AF_INET)
1322 return udp_push_pending_frames(sk);
1323
1324 skb = ip6_finish_skb(sk);
1325 if (!skb)
1326 goto out;
1327
1328 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1329 &inet_sk(sk)->cork.base);
1330 out:
1331 up->len = 0;
1332 WRITE_ONCE(up->pending, 0);
1333 return err;
1334 }
1335
udpv6_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)1336 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1337 {
1338 struct ipv6_txoptions opt_space;
1339 struct udp_sock *up = udp_sk(sk);
1340 struct inet_sock *inet = inet_sk(sk);
1341 struct ipv6_pinfo *np = inet6_sk(sk);
1342 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1343 struct in6_addr *daddr, *final_p, final;
1344 struct ipv6_txoptions *opt = NULL;
1345 struct ipv6_txoptions *opt_to_free = NULL;
1346 struct ip6_flowlabel *flowlabel = NULL;
1347 struct inet_cork_full cork;
1348 struct flowi6 *fl6 = &cork.fl.u.ip6;
1349 struct dst_entry *dst;
1350 struct ipcm6_cookie ipc6;
1351 int addr_len = msg->msg_namelen;
1352 bool connected = false;
1353 int ulen = len;
1354 int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1355 int err;
1356 int is_udplite = IS_UDPLITE(sk);
1357 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1358
1359 trace_android_rvh_udpv6_sendmsg(sk, msg, len);
1360
1361 ipcm6_init(&ipc6);
1362 ipc6.gso_size = READ_ONCE(up->gso_size);
1363 ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
1364 ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
1365
1366 /* destination address check */
1367 if (sin6) {
1368 if (addr_len < offsetof(struct sockaddr, sa_data))
1369 return -EINVAL;
1370
1371 switch (sin6->sin6_family) {
1372 case AF_INET6:
1373 if (addr_len < SIN6_LEN_RFC2133)
1374 return -EINVAL;
1375 daddr = &sin6->sin6_addr;
1376 if (ipv6_addr_any(daddr) &&
1377 ipv6_addr_v4mapped(&np->saddr))
1378 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1379 daddr);
1380 break;
1381 case AF_INET:
1382 goto do_udp_sendmsg;
1383 case AF_UNSPEC:
1384 msg->msg_name = sin6 = NULL;
1385 msg->msg_namelen = addr_len = 0;
1386 daddr = NULL;
1387 break;
1388 default:
1389 return -EINVAL;
1390 }
1391 } else if (!READ_ONCE(up->pending)) {
1392 if (sk->sk_state != TCP_ESTABLISHED)
1393 return -EDESTADDRREQ;
1394 daddr = &sk->sk_v6_daddr;
1395 } else
1396 daddr = NULL;
1397
1398 if (daddr) {
1399 if (ipv6_addr_v4mapped(daddr)) {
1400 struct sockaddr_in sin;
1401 sin.sin_family = AF_INET;
1402 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1403 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1404 msg->msg_name = &sin;
1405 msg->msg_namelen = sizeof(sin);
1406 do_udp_sendmsg:
1407 err = ipv6_only_sock(sk) ?
1408 -ENETUNREACH : udp_sendmsg(sk, msg, len);
1409 msg->msg_name = sin6;
1410 msg->msg_namelen = addr_len;
1411 return err;
1412 }
1413 }
1414
1415 /* Rough check on arithmetic overflow,
1416 better check is made in ip6_append_data().
1417 */
1418 if (len > INT_MAX - sizeof(struct udphdr))
1419 return -EMSGSIZE;
1420
1421 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1422 if (READ_ONCE(up->pending)) {
1423 if (READ_ONCE(up->pending) == AF_INET)
1424 return udp_sendmsg(sk, msg, len);
1425 /*
1426 * There are pending frames.
1427 * The socket lock must be held while it's corked.
1428 */
1429 lock_sock(sk);
1430 if (likely(up->pending)) {
1431 if (unlikely(up->pending != AF_INET6)) {
1432 release_sock(sk);
1433 return -EAFNOSUPPORT;
1434 }
1435 dst = NULL;
1436 goto do_append_data;
1437 }
1438 release_sock(sk);
1439 }
1440 ulen += sizeof(struct udphdr);
1441
1442 memset(fl6, 0, sizeof(*fl6));
1443
1444 if (sin6) {
1445 if (sin6->sin6_port == 0)
1446 return -EINVAL;
1447
1448 fl6->fl6_dport = sin6->sin6_port;
1449 daddr = &sin6->sin6_addr;
1450
1451 if (np->sndflow) {
1452 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1453 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1454 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1455 if (IS_ERR(flowlabel))
1456 return -EINVAL;
1457 }
1458 }
1459
1460 /*
1461 * Otherwise it will be difficult to maintain
1462 * sk->sk_dst_cache.
1463 */
1464 if (sk->sk_state == TCP_ESTABLISHED &&
1465 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1466 daddr = &sk->sk_v6_daddr;
1467
1468 if (addr_len >= sizeof(struct sockaddr_in6) &&
1469 sin6->sin6_scope_id &&
1470 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1471 fl6->flowi6_oif = sin6->sin6_scope_id;
1472 } else {
1473 if (sk->sk_state != TCP_ESTABLISHED)
1474 return -EDESTADDRREQ;
1475
1476 fl6->fl6_dport = inet->inet_dport;
1477 daddr = &sk->sk_v6_daddr;
1478 fl6->flowlabel = np->flow_label;
1479 connected = true;
1480 }
1481
1482 trace_android_vh_udp_v6_connect(sk, sin6);
1483
1484 if (!fl6->flowi6_oif)
1485 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1486
1487 if (!fl6->flowi6_oif)
1488 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1489
1490 fl6->flowi6_uid = sk->sk_uid;
1491
1492 if (msg->msg_controllen) {
1493 opt = &opt_space;
1494 memset(opt, 0, sizeof(struct ipv6_txoptions));
1495 opt->tot_len = sizeof(*opt);
1496 ipc6.opt = opt;
1497
1498 err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1499 if (err > 0) {
1500 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1501 &ipc6);
1502 connected = false;
1503 }
1504 if (err < 0) {
1505 fl6_sock_release(flowlabel);
1506 return err;
1507 }
1508 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1509 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1510 if (IS_ERR(flowlabel))
1511 return -EINVAL;
1512 }
1513 if (!(opt->opt_nflen|opt->opt_flen))
1514 opt = NULL;
1515 }
1516 if (!opt) {
1517 opt = txopt_get(np);
1518 opt_to_free = opt;
1519 }
1520 if (flowlabel)
1521 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1522 opt = ipv6_fixup_options(&opt_space, opt);
1523 ipc6.opt = opt;
1524
1525 fl6->flowi6_proto = sk->sk_protocol;
1526 fl6->flowi6_mark = ipc6.sockc.mark;
1527 fl6->daddr = *daddr;
1528 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1529 fl6->saddr = np->saddr;
1530 fl6->fl6_sport = inet->inet_sport;
1531
1532 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1533 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1534 (struct sockaddr *)sin6,
1535 &addr_len,
1536 &fl6->saddr);
1537 if (err)
1538 goto out_no_dst;
1539 if (sin6) {
1540 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1541 /* BPF program rewrote IPv6-only by IPv4-mapped
1542 * IPv6. It's currently unsupported.
1543 */
1544 err = -ENOTSUPP;
1545 goto out_no_dst;
1546 }
1547 if (sin6->sin6_port == 0) {
1548 /* BPF program set invalid port. Reject it. */
1549 err = -EINVAL;
1550 goto out_no_dst;
1551 }
1552 fl6->fl6_dport = sin6->sin6_port;
1553 fl6->daddr = sin6->sin6_addr;
1554 }
1555 }
1556
1557 if (ipv6_addr_any(&fl6->daddr))
1558 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1559
1560 final_p = fl6_update_dst(fl6, opt, &final);
1561 if (final_p)
1562 connected = false;
1563
1564 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1565 fl6->flowi6_oif = np->mcast_oif;
1566 connected = false;
1567 } else if (!fl6->flowi6_oif)
1568 fl6->flowi6_oif = np->ucast_oif;
1569
1570 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1571
1572 if (ipc6.tclass < 0)
1573 ipc6.tclass = np->tclass;
1574
1575 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1576
1577 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1578 if (IS_ERR(dst)) {
1579 err = PTR_ERR(dst);
1580 dst = NULL;
1581 goto out;
1582 }
1583
1584 if (ipc6.hlimit < 0)
1585 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1586
1587 if (msg->msg_flags&MSG_CONFIRM)
1588 goto do_confirm;
1589 back_from_confirm:
1590
1591 /* Lockless fast path for the non-corking case */
1592 if (!corkreq) {
1593 struct sk_buff *skb;
1594
1595 skb = ip6_make_skb(sk, getfrag, msg, ulen,
1596 sizeof(struct udphdr), &ipc6,
1597 (struct rt6_info *)dst,
1598 msg->msg_flags, &cork);
1599 err = PTR_ERR(skb);
1600 if (!IS_ERR_OR_NULL(skb))
1601 err = udp_v6_send_skb(skb, fl6, &cork.base);
1602 /* ip6_make_skb steals dst reference */
1603 goto out_no_dst;
1604 }
1605
1606 lock_sock(sk);
1607 if (unlikely(up->pending)) {
1608 /* The socket is already corked while preparing it. */
1609 /* ... which is an evident application bug. --ANK */
1610 release_sock(sk);
1611
1612 net_dbg_ratelimited("udp cork app bug 2\n");
1613 err = -EINVAL;
1614 goto out;
1615 }
1616
1617 WRITE_ONCE(up->pending, AF_INET6);
1618
1619 do_append_data:
1620 if (ipc6.dontfrag < 0)
1621 ipc6.dontfrag = np->dontfrag;
1622 up->len += ulen;
1623 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1624 &ipc6, fl6, (struct rt6_info *)dst,
1625 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1626 if (err)
1627 udp_v6_flush_pending_frames(sk);
1628 else if (!corkreq)
1629 err = udp_v6_push_pending_frames(sk);
1630 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1631 WRITE_ONCE(up->pending, 0);
1632
1633 if (err > 0)
1634 err = np->recverr ? net_xmit_errno(err) : 0;
1635 release_sock(sk);
1636
1637 out:
1638 dst_release(dst);
1639 out_no_dst:
1640 fl6_sock_release(flowlabel);
1641 txopt_put(opt_to_free);
1642 if (!err)
1643 return len;
1644 /*
1645 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1646 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1647 * we don't have a good statistic (IpOutDiscards but it can be too many
1648 * things). We could add another new stat but at least for now that
1649 * seems like overkill.
1650 */
1651 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1652 UDP6_INC_STATS(sock_net(sk),
1653 UDP_MIB_SNDBUFERRORS, is_udplite);
1654 }
1655 return err;
1656
1657 do_confirm:
1658 if (msg->msg_flags & MSG_PROBE)
1659 dst_confirm_neigh(dst, &fl6->daddr);
1660 if (!(msg->msg_flags&MSG_PROBE) || len)
1661 goto back_from_confirm;
1662 err = 0;
1663 goto out;
1664 }
1665 EXPORT_SYMBOL(udpv6_sendmsg);
1666
udpv6_splice_eof(struct socket * sock)1667 static void udpv6_splice_eof(struct socket *sock)
1668 {
1669 struct sock *sk = sock->sk;
1670 struct udp_sock *up = udp_sk(sk);
1671
1672 if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1673 return;
1674
1675 lock_sock(sk);
1676 if (up->pending && !udp_test_bit(CORK, sk))
1677 udp_v6_push_pending_frames(sk);
1678 release_sock(sk);
1679 }
1680
udpv6_destroy_sock(struct sock * sk)1681 void udpv6_destroy_sock(struct sock *sk)
1682 {
1683 struct udp_sock *up = udp_sk(sk);
1684 lock_sock(sk);
1685
1686 /* protects from races with udp_abort() */
1687 sock_set_flag(sk, SOCK_DEAD);
1688 udp_v6_flush_pending_frames(sk);
1689 release_sock(sk);
1690
1691 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1692 if (up->encap_type) {
1693 void (*encap_destroy)(struct sock *sk);
1694 encap_destroy = READ_ONCE(up->encap_destroy);
1695 if (encap_destroy)
1696 encap_destroy(sk);
1697 }
1698 if (udp_test_bit(ENCAP_ENABLED, sk)) {
1699 static_branch_dec(&udpv6_encap_needed_key);
1700 udp_encap_disable();
1701 }
1702 }
1703 }
1704
1705 /*
1706 * Socket option code for UDP
1707 */
udpv6_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)1708 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1709 unsigned int optlen)
1710 {
1711 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET)
1712 return udp_lib_setsockopt(sk, level, optname,
1713 optval, optlen,
1714 udp_v6_push_pending_frames);
1715 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1716 }
1717
udpv6_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1718 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1719 char __user *optval, int __user *optlen)
1720 {
1721 if (level == SOL_UDP || level == SOL_UDPLITE)
1722 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1723 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1724 }
1725
1726 static const struct inet6_protocol udpv6_protocol = {
1727 .handler = udpv6_rcv,
1728 .err_handler = udpv6_err,
1729 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1730 };
1731
1732 /* ------------------------------------------------------------------------ */
1733 #ifdef CONFIG_PROC_FS
udp6_seq_show(struct seq_file * seq,void * v)1734 int udp6_seq_show(struct seq_file *seq, void *v)
1735 {
1736 if (v == SEQ_START_TOKEN) {
1737 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1738 } else {
1739 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1740 const struct inet_sock *inet = inet_sk((const struct sock *)v);
1741 __u16 srcp = ntohs(inet->inet_sport);
1742 __u16 destp = ntohs(inet->inet_dport);
1743 __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1744 udp_rqueue_get(v), bucket);
1745 }
1746 return 0;
1747 }
1748
1749 const struct seq_operations udp6_seq_ops = {
1750 .start = udp_seq_start,
1751 .next = udp_seq_next,
1752 .stop = udp_seq_stop,
1753 .show = udp6_seq_show,
1754 };
1755 EXPORT_SYMBOL(udp6_seq_ops);
1756
1757 static struct udp_seq_afinfo udp6_seq_afinfo = {
1758 .family = AF_INET6,
1759 .udp_table = NULL,
1760 };
1761
udp6_proc_init(struct net * net)1762 int __net_init udp6_proc_init(struct net *net)
1763 {
1764 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1765 sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1766 return -ENOMEM;
1767 return 0;
1768 }
1769
udp6_proc_exit(struct net * net)1770 void udp6_proc_exit(struct net *net)
1771 {
1772 remove_proc_entry("udp6", net->proc_net);
1773 }
1774 #endif /* CONFIG_PROC_FS */
1775
1776 /* ------------------------------------------------------------------------ */
1777
1778 struct proto udpv6_prot = {
1779 .name = "UDPv6",
1780 .owner = THIS_MODULE,
1781 .close = udp_lib_close,
1782 .pre_connect = udpv6_pre_connect,
1783 .connect = ip6_datagram_connect,
1784 .disconnect = udp_disconnect,
1785 .ioctl = udp_ioctl,
1786 .init = udpv6_init_sock,
1787 .destroy = udpv6_destroy_sock,
1788 .setsockopt = udpv6_setsockopt,
1789 .getsockopt = udpv6_getsockopt,
1790 .sendmsg = udpv6_sendmsg,
1791 .recvmsg = udpv6_recvmsg,
1792 .splice_eof = udpv6_splice_eof,
1793 .release_cb = ip6_datagram_release_cb,
1794 .hash = udp_lib_hash,
1795 .unhash = udp_lib_unhash,
1796 .rehash = udp_v6_rehash,
1797 .get_port = udp_v6_get_port,
1798 .put_port = udp_lib_unhash,
1799 #ifdef CONFIG_BPF_SYSCALL
1800 .psock_update_sk_prot = udp_bpf_update_proto,
1801 #endif
1802
1803 .memory_allocated = &udp_memory_allocated,
1804 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
1805
1806 .sysctl_mem = sysctl_udp_mem,
1807 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1808 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1809 .obj_size = sizeof(struct udp6_sock),
1810 .ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1811 .h.udp_table = NULL,
1812 .diag_destroy = udp_abort,
1813 };
1814
1815 static struct inet_protosw udpv6_protosw = {
1816 .type = SOCK_DGRAM,
1817 .protocol = IPPROTO_UDP,
1818 .prot = &udpv6_prot,
1819 .ops = &inet6_dgram_ops,
1820 .flags = INET_PROTOSW_PERMANENT,
1821 };
1822
udpv6_init(void)1823 int __init udpv6_init(void)
1824 {
1825 int ret;
1826
1827 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1828 if (ret)
1829 goto out;
1830
1831 ret = inet6_register_protosw(&udpv6_protosw);
1832 if (ret)
1833 goto out_udpv6_protocol;
1834 out:
1835 return ret;
1836
1837 out_udpv6_protocol:
1838 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1839 goto out;
1840 }
1841
udpv6_exit(void)1842 void udpv6_exit(void)
1843 {
1844 inet6_unregister_protosw(&udpv6_protosw);
1845 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1846 }
1847