Lines Matching refs:sk
131 struct sock *sk, unsigned int log) in udp_lib_lport_inuse() argument
134 kuid_t uid = sock_i_uid(sk); in udp_lib_lport_inuse()
138 sk2 != sk && in udp_lib_lport_inuse()
140 (!sk2->sk_reuse || !sk->sk_reuse) && in udp_lib_lport_inuse()
141 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || in udp_lib_lport_inuse()
142 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && in udp_lib_lport_inuse()
143 inet_rcv_saddr_equal(sk, sk2, true)) { in udp_lib_lport_inuse()
144 if (sk2->sk_reuseport && sk->sk_reuseport && in udp_lib_lport_inuse()
145 !rcu_access_pointer(sk->sk_reuseport_cb) && in udp_lib_lport_inuse()
166 struct sock *sk) in udp_lib_lport_inuse2() argument
169 kuid_t uid = sock_i_uid(sk); in udp_lib_lport_inuse2()
175 sk2 != sk && in udp_lib_lport_inuse2()
177 (!sk2->sk_reuse || !sk->sk_reuse) && in udp_lib_lport_inuse2()
178 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || in udp_lib_lport_inuse2()
179 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && in udp_lib_lport_inuse2()
180 inet_rcv_saddr_equal(sk, sk2, true)) { in udp_lib_lport_inuse2()
181 if (sk2->sk_reuseport && sk->sk_reuseport && in udp_lib_lport_inuse2()
182 !rcu_access_pointer(sk->sk_reuseport_cb) && in udp_lib_lport_inuse2()
195 static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) in udp_reuseport_add_sock() argument
197 struct net *net = sock_net(sk); in udp_reuseport_add_sock()
198 kuid_t uid = sock_i_uid(sk); in udp_reuseport_add_sock()
203 sk2 != sk && in udp_reuseport_add_sock()
204 sk2->sk_family == sk->sk_family && in udp_reuseport_add_sock()
205 ipv6_only_sock(sk2) == ipv6_only_sock(sk) && in udp_reuseport_add_sock()
206 (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && in udp_reuseport_add_sock()
207 (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && in udp_reuseport_add_sock()
209 inet_rcv_saddr_equal(sk, sk2, false)) { in udp_reuseport_add_sock()
210 return reuseport_add_sock(sk, sk2, in udp_reuseport_add_sock()
211 inet_rcv_saddr_any(sk)); in udp_reuseport_add_sock()
215 return reuseport_alloc(sk, inet_rcv_saddr_any(sk)); in udp_reuseport_add_sock()
226 int udp_lib_get_port(struct sock *sk, unsigned short snum, in udp_lib_get_port() argument
230 struct udp_table *udptable = sk->sk_prot->h.udp_table; in udp_lib_get_port()
232 struct net *net = sock_net(sk); in udp_lib_get_port()
254 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, in udp_lib_get_port()
279 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; in udp_lib_get_port()
288 exist = udp_lib_lport_inuse2(net, snum, hslot2, sk); in udp_lib_get_port()
292 sk); in udp_lib_get_port()
300 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0)) in udp_lib_get_port()
304 inet_sk(sk)->inet_num = snum; in udp_lib_get_port()
305 udp_sk(sk)->udp_port_hash = snum; in udp_lib_get_port()
306 udp_sk(sk)->udp_portaddr_hash ^= snum; in udp_lib_get_port()
307 if (sk_unhashed(sk)) { in udp_lib_get_port()
308 if (sk->sk_reuseport && in udp_lib_get_port()
309 udp_reuseport_add_sock(sk, hslot)) { in udp_lib_get_port()
310 inet_sk(sk)->inet_num = 0; in udp_lib_get_port()
311 udp_sk(sk)->udp_port_hash = 0; in udp_lib_get_port()
312 udp_sk(sk)->udp_portaddr_hash ^= snum; in udp_lib_get_port()
316 sk_add_node_rcu(sk, &hslot->head); in udp_lib_get_port()
318 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); in udp_lib_get_port()
320 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_lib_get_port()
322 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && in udp_lib_get_port()
323 sk->sk_family == AF_INET6) in udp_lib_get_port()
324 hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, in udp_lib_get_port()
327 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, in udp_lib_get_port()
332 sock_set_flag(sk, SOCK_RCU_FREE); in udp_lib_get_port()
341 int udp_v4_get_port(struct sock *sk, unsigned short snum) in udp_v4_get_port() argument
344 ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); in udp_v4_get_port()
346 ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); in udp_v4_get_port()
349 udp_sk(sk)->udp_portaddr_hash = hash2_partial; in udp_v4_get_port()
350 return udp_lib_get_port(sk, snum, hash2_nulladdr); in udp_v4_get_port()
353 static int compute_score(struct sock *sk, struct net *net, in compute_score() argument
362 if (!net_eq(sock_net(sk), net) || in compute_score()
363 udp_sk(sk)->udp_port_hash != hnum || in compute_score()
364 ipv6_only_sock(sk)) in compute_score()
367 if (sk->sk_rcv_saddr != daddr) in compute_score()
370 score = (sk->sk_family == PF_INET) ? 2 : 1; in compute_score()
372 inet = inet_sk(sk); in compute_score()
385 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, in compute_score()
391 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) in compute_score()
416 struct sock *sk, *result; in udp4_lib_lookup2() local
422 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { in udp4_lib_lookup2()
423 score = compute_score(sk, net, saddr, sport, in udp4_lib_lookup2()
426 if (sk->sk_reuseport && in udp4_lib_lookup2()
427 sk->sk_state != TCP_ESTABLISHED) { in udp4_lib_lookup2()
430 result = reuseport_select_sock(sk, hash, skb, in udp4_lib_lookup2()
432 if (result && !reuseport_has_conns(sk, false)) in udp4_lib_lookup2()
436 result = sk; in udp4_lib_lookup2()
505 struct sock *sk; in udp4_lib_lookup() local
507 sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, in udp4_lib_lookup()
509 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) in udp4_lib_lookup()
510 sk = NULL; in udp4_lib_lookup()
511 return sk; in udp4_lib_lookup()
516 static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, in __udp_is_mcast_sock() argument
521 struct inet_sock *inet = inet_sk(sk); in __udp_is_mcast_sock()
523 if (!net_eq(sock_net(sk), net) || in __udp_is_mcast_sock()
524 udp_sk(sk)->udp_port_hash != hnum || in __udp_is_mcast_sock()
528 ipv6_only_sock(sk) || in __udp_is_mcast_sock()
529 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) in __udp_is_mcast_sock()
531 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif)) in __udp_is_mcast_sock()
589 struct sock *sk; in __udp4_lib_err_encap() local
600 sk = __udp4_lib_lookup(net, iph->daddr, uh->source, in __udp4_lib_err_encap()
603 if (sk) { in __udp4_lib_err_encap()
604 int (*lookup)(struct sock *sk, struct sk_buff *skb); in __udp4_lib_err_encap()
605 struct udp_sock *up = udp_sk(sk); in __udp4_lib_err_encap()
608 if (!lookup || lookup(sk, skb)) in __udp4_lib_err_encap()
609 sk = NULL; in __udp4_lib_err_encap()
612 if (!sk) in __udp4_lib_err_encap()
613 sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info)); in __udp4_lib_err_encap()
618 return sk; in __udp4_lib_err_encap()
640 struct sock *sk; in __udp4_lib_err() local
645 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, in __udp4_lib_err()
648 if (!sk) { in __udp4_lib_err()
650 sk = ERR_PTR(-ENOENT); in __udp4_lib_err()
652 sk = __udp4_lib_err_encap(net, iph, uh, udptable, skb, in __udp4_lib_err()
654 if (!sk) in __udp4_lib_err()
658 if (IS_ERR(sk)) { in __udp4_lib_err()
660 return PTR_ERR(sk); in __udp4_lib_err()
668 inet = inet_sk(sk); in __udp4_lib_err()
683 ipv4_sk_update_pmtu(skb, sk, info); in __udp4_lib_err()
698 ipv4_sk_redirect(skb, sk); in __udp4_lib_err()
711 if (!harderr || sk->sk_state != TCP_ESTABLISHED) in __udp4_lib_err()
714 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); in __udp4_lib_err()
716 sk->sk_err = err; in __udp4_lib_err()
717 sk->sk_error_report(sk); in __udp4_lib_err()
730 void udp_flush_pending_frames(struct sock *sk) in udp_flush_pending_frames() argument
732 struct udp_sock *up = udp_sk(sk); in udp_flush_pending_frames()
737 ip_flush_pending_frames(sk); in udp_flush_pending_frames()
817 struct sock *sk = skb->sk; in udp_send_skb() local
818 struct inet_sock *inet = inet_sk(sk); in udp_send_skb()
821 int is_udplite = IS_UDPLITE(sk); in udp_send_skb()
848 if (sk->sk_no_check_tx) { in udp_send_skb()
870 else if (sk->sk_no_check_tx) { /* UDP csum off */ in udp_send_skb()
886 sk->sk_protocol, csum); in udp_send_skb()
891 err = ip_send_skb(sock_net(sk), skb); in udp_send_skb()
894 UDP_INC_STATS(sock_net(sk), in udp_send_skb()
899 UDP_INC_STATS(sock_net(sk), in udp_send_skb()
907 int udp_push_pending_frames(struct sock *sk) in udp_push_pending_frames() argument
909 struct udp_sock *up = udp_sk(sk); in udp_push_pending_frames()
910 struct inet_sock *inet = inet_sk(sk); in udp_push_pending_frames()
915 skb = ip_finish_skb(sk, fl4); in udp_push_pending_frames()
941 int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size) in udp_cmsg_send() argument
965 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) in udp_sendmsg() argument
967 struct inet_sock *inet = inet_sk(sk); in udp_sendmsg()
968 struct udp_sock *up = udp_sk(sk); in udp_sendmsg()
980 int err, is_udplite = IS_UDPLITE(sk); in udp_sendmsg()
1004 lock_sock(sk); in udp_sendmsg()
1007 release_sock(sk); in udp_sendmsg()
1012 release_sock(sk); in udp_sendmsg()
1032 if (sk->sk_state != TCP_ESTABLISHED) in udp_sendmsg()
1046 err = udp_cmsg_send(sk, msg, &ipc.gso_size); in udp_sendmsg()
1048 err = ip_cmsg_send(sk, msg, &ipc, in udp_sendmsg()
1049 sk->sk_family == AF_INET6); in udp_sendmsg()
1072 err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, in udp_sendmsg()
1099 if (sock_flag(sk, SOCK_LOCALROUTE) || in udp_sendmsg()
1107 if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) in udp_sendmsg()
1122 ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk), in udp_sendmsg()
1129 rt = (struct rtable *)sk_dst_check(sk, 0); in udp_sendmsg()
1132 struct net *net = sock_net(sk); in udp_sendmsg()
1133 __u8 flow_flags = inet_sk_flowi_flags(sk); in udp_sendmsg()
1138 RT_SCOPE_UNIVERSE, sk->sk_protocol, in udp_sendmsg()
1141 sk->sk_uid); in udp_sendmsg()
1143 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); in udp_sendmsg()
1144 rt = ip_route_output_flow(net, fl4, sk); in udp_sendmsg()
1155 !sock_flag(sk, SOCK_BROADCAST)) in udp_sendmsg()
1158 sk_dst_set(sk, dst_clone(&rt->dst)); in udp_sendmsg()
1173 skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, in udp_sendmsg()
1182 lock_sock(sk); in udp_sendmsg()
1186 release_sock(sk); in udp_sendmsg()
1204 err = ip_append_data(sk, fl4, getfrag, msg, ulen, in udp_sendmsg()
1208 udp_flush_pending_frames(sk); in udp_sendmsg()
1210 err = udp_push_pending_frames(sk); in udp_sendmsg()
1211 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) in udp_sendmsg()
1213 release_sock(sk); in udp_sendmsg()
1229 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in udp_sendmsg()
1230 UDP_INC_STATS(sock_net(sk), in udp_sendmsg()
1245 int udp_sendpage(struct sock *sk, struct page *page, int offset, in udp_sendpage() argument
1248 struct inet_sock *inet = inet_sk(sk); in udp_sendpage()
1249 struct udp_sock *up = udp_sk(sk); in udp_sendpage()
1262 ret = udp_sendmsg(sk, &msg, 0); in udp_sendpage()
1267 lock_sock(sk); in udp_sendpage()
1270 release_sock(sk); in udp_sendpage()
1276 ret = ip_append_page(sk, &inet->cork.fl.u.ip4, in udp_sendpage()
1279 release_sock(sk); in udp_sendpage()
1280 return sock_no_sendpage(sk->sk_socket, page, offset, in udp_sendpage()
1284 udp_flush_pending_frames(sk); in udp_sendpage()
1290 ret = udp_push_pending_frames(sk); in udp_sendpage()
1294 release_sock(sk); in udp_sendpage()
1361 static void udp_rmem_release(struct sock *sk, int size, int partial, in udp_rmem_release() argument
1364 struct udp_sock *up = udp_sk(sk); in udp_rmem_release()
1371 if (size < (sk->sk_rcvbuf >> 2) && in udp_rmem_release()
1382 sk_queue = &sk->sk_receive_queue; in udp_rmem_release()
1387 sk->sk_forward_alloc += size; in udp_rmem_release()
1388 amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1); in udp_rmem_release()
1389 sk->sk_forward_alloc -= amt; in udp_rmem_release()
1392 __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT); in udp_rmem_release()
1394 atomic_sub(size, &sk->sk_rmem_alloc); in udp_rmem_release()
1408 void udp_skb_destructor(struct sock *sk, struct sk_buff *skb) in udp_skb_destructor() argument
1411 udp_rmem_release(sk, udp_skb_truesize(skb), 1, false); in udp_skb_destructor()
1416 static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb) in udp_skb_dtor_locked() argument
1419 udp_rmem_release(sk, udp_skb_truesize(skb), 1, true); in udp_skb_dtor_locked()
1447 int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) in __udp_enqueue_schedule_skb() argument
1449 struct sk_buff_head *list = &sk->sk_receive_queue; in __udp_enqueue_schedule_skb()
1457 rmem = atomic_read(&sk->sk_rmem_alloc); in __udp_enqueue_schedule_skb()
1458 if (rmem > sk->sk_rcvbuf) in __udp_enqueue_schedule_skb()
1467 if (rmem > (sk->sk_rcvbuf >> 1)) { in __udp_enqueue_schedule_skb()
1470 busy = busylock_acquire(sk); in __udp_enqueue_schedule_skb()
1478 rmem = atomic_add_return(size, &sk->sk_rmem_alloc); in __udp_enqueue_schedule_skb()
1479 if (rmem > (size + (unsigned int)sk->sk_rcvbuf)) in __udp_enqueue_schedule_skb()
1483 if (size >= sk->sk_forward_alloc) { in __udp_enqueue_schedule_skb()
1486 if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) { in __udp_enqueue_schedule_skb()
1492 sk->sk_forward_alloc += delta; in __udp_enqueue_schedule_skb()
1495 sk->sk_forward_alloc -= size; in __udp_enqueue_schedule_skb()
1500 sock_skb_set_dropcount(sk, skb); in __udp_enqueue_schedule_skb()
1505 if (!sock_flag(sk, SOCK_DEAD)) in __udp_enqueue_schedule_skb()
1506 sk->sk_data_ready(sk); in __udp_enqueue_schedule_skb()
1512 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in __udp_enqueue_schedule_skb()
1515 atomic_inc(&sk->sk_drops); in __udp_enqueue_schedule_skb()
1521 void udp_destruct_sock(struct sock *sk) in udp_destruct_sock() argument
1524 struct udp_sock *up = udp_sk(sk); in udp_destruct_sock()
1528 skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue); in udp_destruct_sock()
1533 udp_rmem_release(sk, total, 0, true); in udp_destruct_sock()
1535 inet_sock_destruct(sk); in udp_destruct_sock()
1539 int udp_init_sock(struct sock *sk) in udp_init_sock() argument
1541 skb_queue_head_init(&udp_sk(sk)->reader_queue); in udp_init_sock()
1542 sk->sk_destruct = udp_destruct_sock; in udp_init_sock()
1547 void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) in skb_consume_udp() argument
1549 if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) { in skb_consume_udp()
1550 bool slow = lock_sock_fast(sk); in skb_consume_udp()
1552 sk_peek_offset_bwd(sk, len); in skb_consume_udp()
1553 unlock_sock_fast(sk, slow); in skb_consume_udp()
1568 static struct sk_buff *__first_packet_length(struct sock *sk, in __first_packet_length() argument
1576 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, in __first_packet_length()
1577 IS_UDPLITE(sk)); in __first_packet_length()
1578 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, in __first_packet_length()
1579 IS_UDPLITE(sk)); in __first_packet_length()
1580 atomic_inc(&sk->sk_drops); in __first_packet_length()
1599 static int first_packet_length(struct sock *sk) in first_packet_length() argument
1601 struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue; in first_packet_length()
1602 struct sk_buff_head *sk_queue = &sk->sk_receive_queue; in first_packet_length()
1608 skb = __first_packet_length(sk, rcvq, &total); in first_packet_length()
1614 skb = __first_packet_length(sk, rcvq, &total); in first_packet_length()
1618 udp_rmem_release(sk, total, 1, false); in first_packet_length()
1627 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) in udp_ioctl() argument
1632 int amount = sk_wmem_alloc_get(sk); in udp_ioctl()
1639 int amount = max_t(int, 0, first_packet_length(sk)); in udp_ioctl()
1652 struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, in __skb_recv_udp() argument
1655 struct sk_buff_head *sk_queue = &sk->sk_receive_queue; in __skb_recv_udp()
1661 queue = &udp_sk(sk)->reader_queue; in __skb_recv_udp()
1663 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); in __skb_recv_udp()
1667 error = sock_error(sk); in __skb_recv_udp()
1674 skb = __skb_try_recv_from_queue(sk, queue, flags, in __skb_recv_udp()
1695 skb = __skb_try_recv_from_queue(sk, queue, flags, in __skb_recv_udp()
1704 if (!sk_can_busy_loop(sk)) in __skb_recv_udp()
1707 sk_busy_loop(sk, flags & MSG_DONTWAIT); in __skb_recv_udp()
1712 !__skb_wait_for_more_packets(sk, &error, &timeo, in __skb_recv_udp()
1725 int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, in udp_recvmsg() argument
1728 struct inet_sock *inet = inet_sk(sk); in udp_recvmsg()
1733 int is_udplite = IS_UDPLITE(sk); in udp_recvmsg()
1737 return ip_recv_error(sk, msg, len, addr_len); in udp_recvmsg()
1740 off = sk_peek_offset(sk, flags); in udp_recvmsg()
1741 skb = __skb_recv_udp(sk, flags, noblock, &off, &err); in udp_recvmsg()
1780 atomic_inc(&sk->sk_drops); in udp_recvmsg()
1781 UDP_INC_STATS(sock_net(sk), in udp_recvmsg()
1789 UDP_INC_STATS(sock_net(sk), in udp_recvmsg()
1792 sock_recv_ts_and_drops(msg, sk, skb); in udp_recvmsg()
1803 BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, in udp_recvmsg()
1807 if (udp_sk(sk)->gro_enabled) in udp_recvmsg()
1808 udp_cmsg_recv(msg, sk, skb); in udp_recvmsg()
1811 ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off); in udp_recvmsg()
1817 skb_consume_udp(sk, skb, peeking ? -err : err); in udp_recvmsg()
1821 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, in udp_recvmsg()
1823 UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); in udp_recvmsg()
1824 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); in udp_recvmsg()
1834 int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) in udp_pre_connect() argument
1843 return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr); in udp_pre_connect()
1847 int __udp_disconnect(struct sock *sk, int flags) in __udp_disconnect() argument
1849 struct inet_sock *inet = inet_sk(sk); in __udp_disconnect()
1854 sk->sk_state = TCP_CLOSE; in __udp_disconnect()
1857 sock_rps_reset_rxhash(sk); in __udp_disconnect()
1858 sk->sk_bound_dev_if = 0; in __udp_disconnect()
1859 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) in __udp_disconnect()
1860 inet_reset_saddr(sk); in __udp_disconnect()
1862 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { in __udp_disconnect()
1863 sk->sk_prot->unhash(sk); in __udp_disconnect()
1866 sk_dst_reset(sk); in __udp_disconnect()
1871 int udp_disconnect(struct sock *sk, int flags) in udp_disconnect() argument
1873 lock_sock(sk); in udp_disconnect()
1874 __udp_disconnect(sk, flags); in udp_disconnect()
1875 release_sock(sk); in udp_disconnect()
1880 void udp_lib_unhash(struct sock *sk) in udp_lib_unhash() argument
1882 if (sk_hashed(sk)) { in udp_lib_unhash()
1883 struct udp_table *udptable = sk->sk_prot->h.udp_table; in udp_lib_unhash()
1886 hslot = udp_hashslot(udptable, sock_net(sk), in udp_lib_unhash()
1887 udp_sk(sk)->udp_port_hash); in udp_lib_unhash()
1888 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_lib_unhash()
1891 if (rcu_access_pointer(sk->sk_reuseport_cb)) in udp_lib_unhash()
1892 reuseport_detach_sock(sk); in udp_lib_unhash()
1893 if (sk_del_node_init_rcu(sk)) { in udp_lib_unhash()
1895 inet_sk(sk)->inet_num = 0; in udp_lib_unhash()
1896 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); in udp_lib_unhash()
1899 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); in udp_lib_unhash()
1911 void udp_lib_rehash(struct sock *sk, u16 newhash) in udp_lib_rehash() argument
1913 if (sk_hashed(sk)) { in udp_lib_rehash()
1914 struct udp_table *udptable = sk->sk_prot->h.udp_table; in udp_lib_rehash()
1917 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_lib_rehash()
1919 udp_sk(sk)->udp_portaddr_hash = newhash; in udp_lib_rehash()
1922 rcu_access_pointer(sk->sk_reuseport_cb)) { in udp_lib_rehash()
1923 hslot = udp_hashslot(udptable, sock_net(sk), in udp_lib_rehash()
1924 udp_sk(sk)->udp_port_hash); in udp_lib_rehash()
1927 if (rcu_access_pointer(sk->sk_reuseport_cb)) in udp_lib_rehash()
1928 reuseport_detach_sock(sk); in udp_lib_rehash()
1932 hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); in udp_lib_rehash()
1937 hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, in udp_lib_rehash()
1949 void udp_v4_rehash(struct sock *sk) in udp_v4_rehash() argument
1951 u16 new_hash = ipv4_portaddr_hash(sock_net(sk), in udp_v4_rehash()
1952 inet_sk(sk)->inet_rcv_saddr, in udp_v4_rehash()
1953 inet_sk(sk)->inet_num); in udp_v4_rehash()
1954 udp_lib_rehash(sk, new_hash); in udp_v4_rehash()
1957 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in __udp_queue_rcv_skb() argument
1961 if (inet_sk(sk)->inet_daddr) { in __udp_queue_rcv_skb()
1962 sock_rps_save_rxhash(sk, skb); in __udp_queue_rcv_skb()
1963 sk_mark_napi_id(sk, skb); in __udp_queue_rcv_skb()
1964 sk_incoming_cpu_update(sk); in __udp_queue_rcv_skb()
1966 sk_mark_napi_id_once(sk, skb); in __udp_queue_rcv_skb()
1969 rc = __udp_enqueue_schedule_skb(sk, skb); in __udp_queue_rcv_skb()
1971 int is_udplite = IS_UDPLITE(sk); in __udp_queue_rcv_skb()
1975 UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, in __udp_queue_rcv_skb()
1977 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); in __udp_queue_rcv_skb()
1979 trace_udp_fail_queue_rcv_skb(rc, sk); in __udp_queue_rcv_skb()
1994 static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) in udp_queue_rcv_one_skb() argument
1996 struct udp_sock *up = udp_sk(sk); in udp_queue_rcv_one_skb()
1997 int is_udplite = IS_UDPLITE(sk); in udp_queue_rcv_one_skb()
2002 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) in udp_queue_rcv_one_skb()
2007 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); in udp_queue_rcv_one_skb()
2029 ret = encap_rcv(sk, skb); in udp_queue_rcv_one_skb()
2031 __UDP_INC_STATS(sock_net(sk), in udp_queue_rcv_one_skb()
2075 prefetch(&sk->sk_rmem_alloc); in udp_queue_rcv_one_skb()
2076 if (rcu_access_pointer(sk->sk_filter) && in udp_queue_rcv_one_skb()
2080 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) in udp_queue_rcv_one_skb()
2085 ipv4_pktinfo_prepare(sk, skb); in udp_queue_rcv_one_skb()
2086 return __udp_queue_rcv_skb(sk, skb); in udp_queue_rcv_one_skb()
2089 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); in udp_queue_rcv_one_skb()
2091 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); in udp_queue_rcv_one_skb()
2092 atomic_inc(&sk->sk_drops); in udp_queue_rcv_one_skb()
2097 static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in udp_queue_rcv_skb() argument
2102 if (likely(!udp_unexpected_gso(sk, skb))) in udp_queue_rcv_skb()
2103 return udp_queue_rcv_one_skb(sk, skb); in udp_queue_rcv_skb()
2107 segs = udp_rcv_segment(sk, skb, true); in udp_queue_rcv_skb()
2111 ret = udp_queue_rcv_one_skb(sk, skb); in udp_queue_rcv_skb()
2121 bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) in udp_sk_rx_dst_set() argument
2126 old = xchg(&sk->sk_rx_dst, dst); in udp_sk_rx_dst_set()
2145 struct sock *sk, *first = NULL; in __udp4_lib_mcast_deliver() local
2149 unsigned int offset = offsetof(typeof(*sk), sk_node); in __udp4_lib_mcast_deliver()
2161 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); in __udp4_lib_mcast_deliver()
2164 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { in __udp4_lib_mcast_deliver()
2165 if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr, in __udp4_lib_mcast_deliver()
2170 first = sk; in __udp4_lib_mcast_deliver()
2176 atomic_inc(&sk->sk_drops); in __udp4_lib_mcast_deliver()
2178 IS_UDPLITE(sk)); in __udp4_lib_mcast_deliver()
2180 IS_UDPLITE(sk)); in __udp4_lib_mcast_deliver()
2183 if (udp_queue_rcv_skb(sk, nskb) > 0) in __udp4_lib_mcast_deliver()
2254 static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, in udp_unicast_rcv_skb() argument
2259 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) in udp_unicast_rcv_skb()
2262 ret = udp_queue_rcv_skb(sk, skb); in udp_unicast_rcv_skb()
2279 struct sock *sk; in __udp4_lib_rcv() local
2310 sk = skb_steal_sock(skb); in __udp4_lib_rcv()
2311 if (sk) { in __udp4_lib_rcv()
2315 if (unlikely(sk->sk_rx_dst != dst)) in __udp4_lib_rcv()
2316 udp_sk_rx_dst_set(sk, dst); in __udp4_lib_rcv()
2318 ret = udp_unicast_rcv_skb(sk, skb, uh); in __udp4_lib_rcv()
2319 sock_put(sk); in __udp4_lib_rcv()
2327 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); in __udp4_lib_rcv()
2328 if (sk) in __udp4_lib_rcv()
2329 return udp_unicast_rcv_skb(sk, skb, uh); in __udp4_lib_rcv()
2381 struct sock *sk, *result; in __udp4_lib_mcast_demux_lookup() local
2391 sk_for_each_rcu(sk, &hslot->head) { in __udp4_lib_mcast_demux_lookup()
2392 if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, in __udp4_lib_mcast_demux_lookup()
2396 result = sk; in __udp4_lib_mcast_demux_lookup()
2418 struct sock *sk; in __udp4_lib_demux_lookup() local
2420 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { in __udp4_lib_demux_lookup()
2421 if (INET_MATCH(sk, net, acookie, rmt_addr, in __udp4_lib_demux_lookup()
2423 return sk; in __udp4_lib_demux_lookup()
2436 struct sock *sk = NULL; in udp_v4_early_demux() local
2460 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, in udp_v4_early_demux()
2464 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, in udp_v4_early_demux()
2468 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) in udp_v4_early_demux()
2471 skb->sk = sk; in udp_v4_early_demux()
2473 dst = READ_ONCE(sk->sk_rx_dst); in udp_v4_early_demux()
2489 if (!inet_sk(sk)->inet_daddr && in_dev) in udp_v4_early_demux()
2502 void udp_destroy_sock(struct sock *sk) in udp_destroy_sock() argument
2504 struct udp_sock *up = udp_sk(sk); in udp_destroy_sock()
2505 bool slow = lock_sock_fast(sk); in udp_destroy_sock()
2506 udp_flush_pending_frames(sk); in udp_destroy_sock()
2507 unlock_sock_fast(sk, slow); in udp_destroy_sock()
2510 void (*encap_destroy)(struct sock *sk); in udp_destroy_sock()
2513 encap_destroy(sk); in udp_destroy_sock()
2523 int udp_lib_setsockopt(struct sock *sk, int level, int optname, in udp_lib_setsockopt() argument
2527 struct udp_sock *up = udp_sk(sk); in udp_lib_setsockopt()
2530 int is_udplite = IS_UDPLITE(sk); in udp_lib_setsockopt()
2546 lock_sock(sk); in udp_lib_setsockopt()
2547 push_pending_frames(sk); in udp_lib_setsockopt()
2548 release_sock(sk); in udp_lib_setsockopt()
2561 lock_sock(sk); in udp_lib_setsockopt()
2562 udp_tunnel_encap_enable(sk->sk_socket); in udp_lib_setsockopt()
2563 release_sock(sk); in udp_lib_setsockopt()
2586 lock_sock(sk); in udp_lib_setsockopt()
2588 udp_tunnel_encap_enable(sk->sk_socket); in udp_lib_setsockopt()
2590 release_sock(sk); in udp_lib_setsockopt()
2632 int udp_setsockopt(struct sock *sk, int level, int optname, in udp_setsockopt() argument
2636 return udp_lib_setsockopt(sk, level, optname, optval, optlen, in udp_setsockopt()
2638 return ip_setsockopt(sk, level, optname, optval, optlen); in udp_setsockopt()
2642 int compat_udp_setsockopt(struct sock *sk, int level, int optname, in compat_udp_setsockopt() argument
2646 return udp_lib_setsockopt(sk, level, optname, optval, optlen, in compat_udp_setsockopt()
2648 return compat_ip_setsockopt(sk, level, optname, optval, optlen); in compat_udp_setsockopt()
2652 int udp_lib_getsockopt(struct sock *sk, int level, int optname, in udp_lib_getsockopt() argument
2655 struct udp_sock *up = udp_sk(sk); in udp_lib_getsockopt()
2709 int udp_getsockopt(struct sock *sk, int level, int optname, in udp_getsockopt() argument
2713 return udp_lib_getsockopt(sk, level, optname, optval, optlen); in udp_getsockopt()
2714 return ip_getsockopt(sk, level, optname, optval, optlen); in udp_getsockopt()
2718 int compat_udp_getsockopt(struct sock *sk, int level, int optname, in compat_udp_getsockopt() argument
2722 return udp_lib_getsockopt(sk, level, optname, optval, optlen); in compat_udp_getsockopt()
2723 return compat_ip_getsockopt(sk, level, optname, optval, optlen); in compat_udp_getsockopt()
2742 struct sock *sk = sock->sk; in udp_poll() local
2744 if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue)) in udp_poll()
2749 !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) in udp_poll()
2757 int udp_abort(struct sock *sk, int err) in udp_abort() argument
2759 lock_sock(sk); in udp_abort()
2761 sk->sk_err = err; in udp_abort()
2762 sk->sk_error_report(sk); in udp_abort()
2763 __udp_disconnect(sk, 0); in udp_abort()
2765 release_sock(sk); in udp_abort()
2810 struct sock *sk; in udp_get_first() local
2823 sk_for_each(sk, &hslot->head) { in udp_get_first()
2824 if (!net_eq(sock_net(sk), net)) in udp_get_first()
2826 if (sk->sk_family == afinfo->family) in udp_get_first()
2831 sk = NULL; in udp_get_first()
2833 return sk; in udp_get_first()
2836 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) in udp_get_next() argument
2843 sk = sk_next(sk); in udp_get_next()
2844 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != afinfo->family)); in udp_get_next()
2846 if (!sk) { in udp_get_next()
2851 return sk; in udp_get_next()
2856 struct sock *sk = udp_get_first(seq, 0); in udp_get_idx() local
2858 if (sk) in udp_get_idx()
2859 while (pos && (sk = udp_get_next(seq, sk)) != NULL) in udp_get_idx()
2861 return pos ? NULL : sk; in udp_get_idx()
2875 struct sock *sk; in udp_seq_next() local
2878 sk = udp_get_idx(seq, 0); in udp_seq_next()
2880 sk = udp_get_next(seq, v); in udp_seq_next()
2883 return sk; in udp_seq_next()