Lines Matching refs:sk
158 bool sk_ns_capable(const struct sock *sk, in sk_ns_capable() argument
161 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && in sk_ns_capable()
175 bool sk_capable(const struct sock *sk, int cap) in sk_capable() argument
177 return sk_ns_capable(sk, &init_user_ns, cap); in sk_capable()
190 bool sk_net_capable(const struct sock *sk, int cap) in sk_net_capable() argument
192 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); in sk_net_capable()
294 void sk_set_memalloc(struct sock *sk) in sk_set_memalloc() argument
296 sock_set_flag(sk, SOCK_MEMALLOC); in sk_set_memalloc()
297 sk->sk_allocation |= __GFP_MEMALLOC; in sk_set_memalloc()
302 void sk_clear_memalloc(struct sock *sk) in sk_clear_memalloc() argument
304 sock_reset_flag(sk, SOCK_MEMALLOC); in sk_clear_memalloc()
305 sk->sk_allocation &= ~__GFP_MEMALLOC; in sk_clear_memalloc()
315 sk_mem_reclaim(sk); in sk_clear_memalloc()
319 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) in __sk_backlog_rcv() argument
325 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); in __sk_backlog_rcv()
328 ret = sk->sk_backlog_rcv(sk, skb); in __sk_backlog_rcv()
417 static bool sock_needs_netstamp(const struct sock *sk) in sock_needs_netstamp() argument
419 switch (sk->sk_family) { in sock_needs_netstamp()
428 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) in sock_disable_timestamp() argument
430 if (sk->sk_flags & flags) { in sock_disable_timestamp()
431 sk->sk_flags &= ~flags; in sock_disable_timestamp()
432 if (sock_needs_netstamp(sk) && in sock_disable_timestamp()
433 !(sk->sk_flags & SK_FLAGS_TIMESTAMP)) in sock_disable_timestamp()
439 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in __sock_queue_rcv_skb() argument
442 struct sk_buff_head *list = &sk->sk_receive_queue; in __sock_queue_rcv_skb()
444 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { in __sock_queue_rcv_skb()
445 atomic_inc(&sk->sk_drops); in __sock_queue_rcv_skb()
446 trace_sock_rcvqueue_full(sk, skb); in __sock_queue_rcv_skb()
450 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { in __sock_queue_rcv_skb()
451 atomic_inc(&sk->sk_drops); in __sock_queue_rcv_skb()
456 skb_set_owner_r(skb, sk); in __sock_queue_rcv_skb()
464 sock_skb_set_dropcount(sk, skb); in __sock_queue_rcv_skb()
468 if (!sock_flag(sk, SOCK_DEAD)) in __sock_queue_rcv_skb()
469 sk->sk_data_ready(sk); in __sock_queue_rcv_skb()
474 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_rcv_skb() argument
478 err = sk_filter(sk, skb); in sock_queue_rcv_skb()
482 return __sock_queue_rcv_skb(sk, skb); in sock_queue_rcv_skb()
486 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, in __sk_receive_skb() argument
491 if (sk_filter_trim_cap(sk, skb, trim_cap)) in __sk_receive_skb()
496 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { in __sk_receive_skb()
497 atomic_inc(&sk->sk_drops); in __sk_receive_skb()
501 bh_lock_sock_nested(sk); in __sk_receive_skb()
503 bh_lock_sock(sk); in __sk_receive_skb()
504 if (!sock_owned_by_user(sk)) { in __sk_receive_skb()
508 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); in __sk_receive_skb()
510 rc = sk_backlog_rcv(sk, skb); in __sk_receive_skb()
512 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); in __sk_receive_skb()
513 } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) { in __sk_receive_skb()
514 bh_unlock_sock(sk); in __sk_receive_skb()
515 atomic_inc(&sk->sk_drops); in __sk_receive_skb()
519 bh_unlock_sock(sk); in __sk_receive_skb()
522 sock_put(sk); in __sk_receive_skb()
530 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) in __sk_dst_check() argument
532 struct dst_entry *dst = __sk_dst_get(sk); in __sk_dst_check()
535 sk_tx_queue_clear(sk); in __sk_dst_check()
536 WRITE_ONCE(sk->sk_dst_pending_confirm, 0); in __sk_dst_check()
537 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); in __sk_dst_check()
546 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) in sk_dst_check() argument
548 struct dst_entry *dst = sk_dst_get(sk); in sk_dst_check()
551 sk_dst_reset(sk); in sk_dst_check()
560 static int sock_bindtoindex_locked(struct sock *sk, int ifindex) in sock_bindtoindex_locked() argument
564 struct net *net = sock_net(sk); in sock_bindtoindex_locked()
568 if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW)) in sock_bindtoindex_locked()
575 sk->sk_bound_dev_if = ifindex; in sock_bindtoindex_locked()
576 if (sk->sk_prot->rehash) in sock_bindtoindex_locked()
577 sk->sk_prot->rehash(sk); in sock_bindtoindex_locked()
578 sk_dst_reset(sk); in sock_bindtoindex_locked()
588 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk) in sock_bindtoindex() argument
593 lock_sock(sk); in sock_bindtoindex()
594 ret = sock_bindtoindex_locked(sk, ifindex); in sock_bindtoindex()
596 release_sock(sk); in sock_bindtoindex()
602 static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen) in sock_setbindtodevice() argument
606 struct net *net = sock_net(sk); in sock_setbindtodevice()
641 return sock_bindtoindex(sk, index, true); in sock_setbindtodevice()
648 static int sock_getbindtodevice(struct sock *sk, sockptr_t optval, in sock_getbindtodevice() argument
653 struct net *net = sock_net(sk); in sock_getbindtodevice()
656 if (sk->sk_bound_dev_if == 0) { in sock_getbindtodevice()
665 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if); in sock_getbindtodevice()
688 bool sk_mc_loop(struct sock *sk) in sk_mc_loop() argument
692 if (!sk) in sk_mc_loop()
695 switch (READ_ONCE(sk->sk_family)) { in sk_mc_loop()
697 return inet_sk(sk)->mc_loop; in sk_mc_loop()
700 return inet6_sk(sk)->mc_loop; in sk_mc_loop()
708 void sock_set_reuseaddr(struct sock *sk) in sock_set_reuseaddr() argument
710 lock_sock(sk); in sock_set_reuseaddr()
711 sk->sk_reuse = SK_CAN_REUSE; in sock_set_reuseaddr()
712 release_sock(sk); in sock_set_reuseaddr()
716 void sock_set_reuseport(struct sock *sk) in sock_set_reuseport() argument
718 lock_sock(sk); in sock_set_reuseport()
719 sk->sk_reuseport = true; in sock_set_reuseport()
720 release_sock(sk); in sock_set_reuseport()
724 void sock_no_linger(struct sock *sk) in sock_no_linger() argument
726 lock_sock(sk); in sock_no_linger()
727 sk->sk_lingertime = 0; in sock_no_linger()
728 sock_set_flag(sk, SOCK_LINGER); in sock_no_linger()
729 release_sock(sk); in sock_no_linger()
733 void sock_set_priority(struct sock *sk, u32 priority) in sock_set_priority() argument
735 lock_sock(sk); in sock_set_priority()
736 sk->sk_priority = priority; in sock_set_priority()
737 release_sock(sk); in sock_set_priority()
741 void sock_set_sndtimeo(struct sock *sk, s64 secs) in sock_set_sndtimeo() argument
743 lock_sock(sk); in sock_set_sndtimeo()
745 sk->sk_sndtimeo = secs * HZ; in sock_set_sndtimeo()
747 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; in sock_set_sndtimeo()
748 release_sock(sk); in sock_set_sndtimeo()
752 static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns) in __sock_set_timestamps() argument
755 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new); in __sock_set_timestamps()
756 sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, ns); in __sock_set_timestamps()
757 sock_set_flag(sk, SOCK_RCVTSTAMP); in __sock_set_timestamps()
758 sock_enable_timestamp(sk, SOCK_TIMESTAMP); in __sock_set_timestamps()
760 sock_reset_flag(sk, SOCK_RCVTSTAMP); in __sock_set_timestamps()
761 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); in __sock_set_timestamps()
765 void sock_enable_timestamps(struct sock *sk) in sock_enable_timestamps() argument
767 lock_sock(sk); in sock_enable_timestamps()
768 __sock_set_timestamps(sk, true, false, true); in sock_enable_timestamps()
769 release_sock(sk); in sock_enable_timestamps()
773 void sock_set_keepalive(struct sock *sk) in sock_set_keepalive() argument
775 lock_sock(sk); in sock_set_keepalive()
776 if (sk->sk_prot->keepalive) in sock_set_keepalive()
777 sk->sk_prot->keepalive(sk, true); in sock_set_keepalive()
778 sock_valbool_flag(sk, SOCK_KEEPOPEN, true); in sock_set_keepalive()
779 release_sock(sk); in sock_set_keepalive()
783 static void __sock_set_rcvbuf(struct sock *sk, int val) in __sock_set_rcvbuf() argument
789 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; in __sock_set_rcvbuf()
801 WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF)); in __sock_set_rcvbuf()
804 void sock_set_rcvbuf(struct sock *sk, int val) in sock_set_rcvbuf() argument
806 lock_sock(sk); in sock_set_rcvbuf()
807 __sock_set_rcvbuf(sk, val); in sock_set_rcvbuf()
808 release_sock(sk); in sock_set_rcvbuf()
812 static void __sock_set_mark(struct sock *sk, u32 val) in __sock_set_mark() argument
814 if (val != sk->sk_mark) { in __sock_set_mark()
815 sk->sk_mark = val; in __sock_set_mark()
816 sk_dst_reset(sk); in __sock_set_mark()
820 void sock_set_mark(struct sock *sk, u32 val) in sock_set_mark() argument
822 lock_sock(sk); in sock_set_mark()
823 __sock_set_mark(sk, val); in sock_set_mark()
824 release_sock(sk); in sock_set_mark()
837 struct sock *sk = sock->sk; in sock_setsockopt() local
848 return sock_setbindtodevice(sk, optval, optlen); in sock_setsockopt()
858 lock_sock(sk); in sock_setsockopt()
865 sock_valbool_flag(sk, SOCK_DBG, valbool); in sock_setsockopt()
868 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); in sock_setsockopt()
871 sk->sk_reuseport = valbool; in sock_setsockopt()
880 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); in sock_setsockopt()
881 sk_dst_reset(sk); in sock_setsockopt()
884 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); in sock_setsockopt()
898 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; in sock_setsockopt()
899 WRITE_ONCE(sk->sk_sndbuf, in sock_setsockopt()
902 sk->sk_write_space(sk); in sock_setsockopt()
924 __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max))); in sock_setsockopt()
936 __sock_set_rcvbuf(sk, max(val, 0)); in sock_setsockopt()
940 if (sk->sk_prot->keepalive) in sock_setsockopt()
941 sk->sk_prot->keepalive(sk, valbool); in sock_setsockopt()
942 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); in sock_setsockopt()
946 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); in sock_setsockopt()
950 sk->sk_no_check_tx = valbool; in sock_setsockopt()
955 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) in sock_setsockopt()
956 sk->sk_priority = val; in sock_setsockopt()
971 sock_reset_flag(sk, SOCK_LINGER); in sock_setsockopt()
975 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; in sock_setsockopt()
978 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; in sock_setsockopt()
979 sock_set_flag(sk, SOCK_LINGER); in sock_setsockopt()
994 __sock_set_timestamps(sk, valbool, false, false); in sock_setsockopt()
997 __sock_set_timestamps(sk, valbool, true, false); in sock_setsockopt()
1000 __sock_set_timestamps(sk, valbool, false, true); in sock_setsockopt()
1003 __sock_set_timestamps(sk, valbool, true, true); in sock_setsockopt()
1013 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { in sock_setsockopt()
1014 if (sk->sk_protocol == IPPROTO_TCP && in sock_setsockopt()
1015 sk->sk_type == SOCK_STREAM) { in sock_setsockopt()
1016 if ((1 << sk->sk_state) & in sock_setsockopt()
1021 sk->sk_tskey = tcp_sk(sk)->snd_una; in sock_setsockopt()
1023 sk->sk_tskey = 0; in sock_setsockopt()
1033 sk->sk_tsflags = val; in sock_setsockopt()
1034 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); in sock_setsockopt()
1037 sock_enable_timestamp(sk, in sock_setsockopt()
1040 sock_disable_timestamp(sk, in sock_setsockopt()
1048 ret = sock->ops->set_rcvlowat(sk, val); in sock_setsockopt()
1050 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); in sock_setsockopt()
1055 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, in sock_setsockopt()
1061 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, in sock_setsockopt()
1070 ret = sk_attach_filter(&fprog, sk); in sock_setsockopt()
1082 ret = sk_attach_bpf(ufd, sk); in sock_setsockopt()
1091 ret = sk_reuseport_attach_filter(&fprog, sk); in sock_setsockopt()
1103 ret = sk_reuseport_attach_bpf(ufd, sk); in sock_setsockopt()
1108 ret = reuseport_detach_prog(sk); in sock_setsockopt()
1112 ret = sk_detach_filter(sk); in sock_setsockopt()
1116 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) in sock_setsockopt()
1119 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); in sock_setsockopt()
1129 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { in sock_setsockopt()
1134 __sock_set_mark(sk, val); in sock_setsockopt()
1138 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); in sock_setsockopt()
1142 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); in sock_setsockopt()
1147 ret = sock->ops->set_peek_off(sk, val); in sock_setsockopt()
1153 sock_valbool_flag(sk, SOCK_NOFCS, valbool); in sock_setsockopt()
1157 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); in sock_setsockopt()
1163 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) in sock_setsockopt()
1169 WRITE_ONCE(sk->sk_ll_usec, val); in sock_setsockopt()
1185 cmpxchg(&sk->sk_pacing_status, in sock_setsockopt()
1189 WRITE_ONCE(sk->sk_max_pacing_rate, ulval); in sock_setsockopt()
1190 sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval); in sock_setsockopt()
1194 WRITE_ONCE(sk->sk_incoming_cpu, val); in sock_setsockopt()
1199 dst_negative_advice(sk); in sock_setsockopt()
1203 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) { in sock_setsockopt()
1204 if (!((sk->sk_type == SOCK_STREAM && in sock_setsockopt()
1205 sk->sk_protocol == IPPROTO_TCP) || in sock_setsockopt()
1206 (sk->sk_type == SOCK_DGRAM && in sock_setsockopt()
1207 sk->sk_protocol == IPPROTO_UDP))) in sock_setsockopt()
1209 } else if (sk->sk_family != PF_RDS) { in sock_setsockopt()
1216 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool); in sock_setsockopt()
1236 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { in sock_setsockopt()
1240 sock_valbool_flag(sk, SOCK_TXTIME, true); in sock_setsockopt()
1241 sk->sk_clockid = sk_txtime.clockid; in sock_setsockopt()
1242 sk->sk_txtime_deadline_mode = in sock_setsockopt()
1244 sk->sk_txtime_report_errors = in sock_setsockopt()
1249 ret = sock_bindtoindex_locked(sk, val); in sock_setsockopt()
1256 release_sock(sk); in sock_setsockopt()
1261 static const struct cred *sk_get_peer_cred(struct sock *sk) in sk_get_peer_cred() argument
1265 spin_lock(&sk->sk_peer_lock); in sk_get_peer_cred()
1266 cred = get_cred(sk->sk_peer_cred); in sk_get_peer_cred()
1267 spin_unlock(&sk->sk_peer_lock); in sk_get_peer_cred()
1300 static int sk_getsockopt(struct sock *sk, int level, int optname, in sk_getsockopt() argument
1303 struct socket *sock = sk->sk_socket; in sk_getsockopt()
1328 v.val = sock_flag(sk, SOCK_DBG); in sk_getsockopt()
1332 v.val = sock_flag(sk, SOCK_LOCALROUTE); in sk_getsockopt()
1336 v.val = sock_flag(sk, SOCK_BROADCAST); in sk_getsockopt()
1340 v.val = READ_ONCE(sk->sk_sndbuf); in sk_getsockopt()
1344 v.val = READ_ONCE(sk->sk_rcvbuf); in sk_getsockopt()
1348 v.val = sk->sk_reuse; in sk_getsockopt()
1352 v.val = sk->sk_reuseport; in sk_getsockopt()
1356 v.val = sock_flag(sk, SOCK_KEEPOPEN); in sk_getsockopt()
1360 v.val = sk->sk_type; in sk_getsockopt()
1364 v.val = sk->sk_protocol; in sk_getsockopt()
1368 v.val = sk->sk_family; in sk_getsockopt()
1372 v.val = -sock_error(sk); in sk_getsockopt()
1374 v.val = xchg(&sk->sk_err_soft, 0); in sk_getsockopt()
1378 v.val = sock_flag(sk, SOCK_URGINLINE); in sk_getsockopt()
1382 v.val = sk->sk_no_check_tx; in sk_getsockopt()
1386 v.val = sk->sk_priority; in sk_getsockopt()
1391 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); in sk_getsockopt()
1392 v.ling.l_linger = sk->sk_lingertime / HZ; in sk_getsockopt()
1399 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && in sk_getsockopt()
1400 !sock_flag(sk, SOCK_TSTAMP_NEW) && in sk_getsockopt()
1401 !sock_flag(sk, SOCK_RCVTSTAMPNS); in sk_getsockopt()
1405 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW); in sk_getsockopt()
1409 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW); in sk_getsockopt()
1413 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW); in sk_getsockopt()
1417 v.val = sk->sk_tsflags; in sk_getsockopt()
1422 lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname); in sk_getsockopt()
1427 lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname); in sk_getsockopt()
1431 v.val = READ_ONCE(sk->sk_rcvlowat); in sk_getsockopt()
1448 spin_lock(&sk->sk_peer_lock); in sk_getsockopt()
1449 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); in sk_getsockopt()
1450 spin_unlock(&sk->sk_peer_lock); in sk_getsockopt()
1462 cred = sk_get_peer_cred(sk); in sk_getsockopt()
1499 v.val = sk->sk_state == TCP_LISTEN; in sk_getsockopt()
1511 v.val = sk->sk_mark; in sk_getsockopt()
1515 v.val = sock_flag(sk, SOCK_RXQ_OVFL); in sk_getsockopt()
1519 v.val = sock_flag(sk, SOCK_WIFI_STATUS); in sk_getsockopt()
1526 v.val = READ_ONCE(sk->sk_peek_off); in sk_getsockopt()
1529 v.val = sock_flag(sk, SOCK_NOFCS); in sk_getsockopt()
1533 return sock_getbindtodevice(sk, optval, optlen, len); in sk_getsockopt()
1536 len = sk_get_filter(sk, optval, len); in sk_getsockopt()
1543 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); in sk_getsockopt()
1551 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); in sk_getsockopt()
1556 v.val = READ_ONCE(sk->sk_ll_usec); in sk_getsockopt()
1564 v.ulval = READ_ONCE(sk->sk_max_pacing_rate); in sk_getsockopt()
1568 READ_ONCE(sk->sk_max_pacing_rate)); in sk_getsockopt()
1573 v.val = READ_ONCE(sk->sk_incoming_cpu); in sk_getsockopt()
1580 sk_get_meminfo(sk, meminfo); in sk_getsockopt()
1591 v.val = READ_ONCE(sk->sk_napi_id); in sk_getsockopt()
1604 v.val64 = sock_gen_cookie(sk); in sk_getsockopt()
1608 v.val = sock_flag(sk, SOCK_ZEROCOPY); in sk_getsockopt()
1613 v.txtime.clockid = sk->sk_clockid; in sk_getsockopt()
1614 v.txtime.flags |= sk->sk_txtime_deadline_mode ? in sk_getsockopt()
1616 v.txtime.flags |= sk->sk_txtime_report_errors ? in sk_getsockopt()
1621 v.val = sk->sk_bound_dev_if; in sk_getsockopt()
1628 v.val64 = atomic64_read(&sock_net(sk)->net_cookie); in sk_getsockopt()
1651 return sk_getsockopt(sock->sk, level, optname, in sock_getsockopt()
1661 static inline void sock_lock_init(struct sock *sk) in sock_lock_init() argument
1663 if (sk->sk_kern_sock) in sock_lock_init()
1665 sk, in sock_lock_init()
1666 af_family_kern_slock_key_strings[sk->sk_family], in sock_lock_init()
1667 af_family_kern_slock_keys + sk->sk_family, in sock_lock_init()
1668 af_family_kern_key_strings[sk->sk_family], in sock_lock_init()
1669 af_family_kern_keys + sk->sk_family); in sock_lock_init()
1672 sk, in sock_lock_init()
1673 af_family_slock_key_strings[sk->sk_family], in sock_lock_init()
1674 af_family_slock_keys + sk->sk_family, in sock_lock_init()
1675 af_family_key_strings[sk->sk_family], in sock_lock_init()
1676 af_family_keys + sk->sk_family); in sock_lock_init()
1704 struct sock *sk; in sk_prot_alloc() local
1709 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); in sk_prot_alloc()
1710 if (!sk) in sk_prot_alloc()
1711 return sk; in sk_prot_alloc()
1713 sk_prot_clear_nulls(sk, prot->obj_size); in sk_prot_alloc()
1715 sk = kmalloc(prot->obj_size, priority); in sk_prot_alloc()
1717 if (sk != NULL) { in sk_prot_alloc()
1718 if (security_sk_alloc(sk, family, priority)) in sk_prot_alloc()
1723 sk_tx_queue_clear(sk); in sk_prot_alloc()
1726 return sk; in sk_prot_alloc()
1729 security_sk_free(sk); in sk_prot_alloc()
1732 kmem_cache_free(slab, sk); in sk_prot_alloc()
1734 kfree(sk); in sk_prot_alloc()
1738 static void sk_prot_free(struct proto *prot, struct sock *sk) in sk_prot_free() argument
1746 cgroup_sk_free(&sk->sk_cgrp_data); in sk_prot_free()
1747 mem_cgroup_sk_free(sk); in sk_prot_free()
1748 security_sk_free(sk); in sk_prot_free()
1750 kmem_cache_free(slab, sk); in sk_prot_free()
1752 kfree(sk); in sk_prot_free()
1767 struct sock *sk; in sk_alloc() local
1769 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); in sk_alloc()
1770 if (sk) { in sk_alloc()
1771 sk->sk_family = family; in sk_alloc()
1776 sk->sk_prot = sk->sk_prot_creator = prot; in sk_alloc()
1777 sk->sk_kern_sock = kern; in sk_alloc()
1778 sock_lock_init(sk); in sk_alloc()
1779 sk->sk_net_refcnt = kern ? 0 : 1; in sk_alloc()
1780 if (likely(sk->sk_net_refcnt)) { in sk_alloc()
1785 sock_net_set(sk, net); in sk_alloc()
1786 refcount_set(&sk->sk_wmem_alloc, 1); in sk_alloc()
1788 mem_cgroup_sk_alloc(sk); in sk_alloc()
1789 cgroup_sk_alloc(&sk->sk_cgrp_data); in sk_alloc()
1790 sock_update_classid(&sk->sk_cgrp_data); in sk_alloc()
1791 sock_update_netprioidx(&sk->sk_cgrp_data); in sk_alloc()
1792 sk_tx_queue_clear(sk); in sk_alloc()
1795 return sk; in sk_alloc()
1804 struct sock *sk = container_of(head, struct sock, sk_rcu); in __sk_destruct() local
1807 if (sk->sk_destruct) in __sk_destruct()
1808 sk->sk_destruct(sk); in __sk_destruct()
1810 filter = rcu_dereference_check(sk->sk_filter, in __sk_destruct()
1811 refcount_read(&sk->sk_wmem_alloc) == 0); in __sk_destruct()
1813 sk_filter_uncharge(sk, filter); in __sk_destruct()
1814 RCU_INIT_POINTER(sk->sk_filter, NULL); in __sk_destruct()
1817 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); in __sk_destruct()
1820 bpf_sk_storage_free(sk); in __sk_destruct()
1823 if (atomic_read(&sk->sk_omem_alloc)) in __sk_destruct()
1825 __func__, atomic_read(&sk->sk_omem_alloc)); in __sk_destruct()
1827 if (sk->sk_frag.page) { in __sk_destruct()
1828 put_page(sk->sk_frag.page); in __sk_destruct()
1829 sk->sk_frag.page = NULL; in __sk_destruct()
1833 put_cred(sk->sk_peer_cred); in __sk_destruct()
1834 put_pid(sk->sk_peer_pid); in __sk_destruct()
1836 if (likely(sk->sk_net_refcnt)) in __sk_destruct()
1837 put_net(sock_net(sk)); in __sk_destruct()
1838 sk_prot_free(sk->sk_prot_creator, sk); in __sk_destruct()
1841 void sk_destruct(struct sock *sk) in sk_destruct() argument
1843 bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); in sk_destruct()
1845 if (rcu_access_pointer(sk->sk_reuseport_cb)) { in sk_destruct()
1846 reuseport_detach_sock(sk); in sk_destruct()
1851 call_rcu(&sk->sk_rcu, __sk_destruct); in sk_destruct()
1853 __sk_destruct(&sk->sk_rcu); in sk_destruct()
1856 static void __sk_free(struct sock *sk) in __sk_free() argument
1858 if (likely(sk->sk_net_refcnt)) in __sk_free()
1859 sock_inuse_add(sock_net(sk), -1); in __sk_free()
1861 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk))) in __sk_free()
1862 sock_diag_broadcast_destroy(sk); in __sk_free()
1864 sk_destruct(sk); in __sk_free()
1867 void sk_free(struct sock *sk) in sk_free() argument
1874 if (refcount_dec_and_test(&sk->sk_wmem_alloc)) in sk_free()
1875 __sk_free(sk); in sk_free()
1879 static void sk_init_common(struct sock *sk) in sk_init_common() argument
1881 skb_queue_head_init(&sk->sk_receive_queue); in sk_init_common()
1882 skb_queue_head_init(&sk->sk_write_queue); in sk_init_common()
1883 skb_queue_head_init(&sk->sk_error_queue); in sk_init_common()
1885 rwlock_init(&sk->sk_callback_lock); in sk_init_common()
1886 lockdep_set_class_and_name(&sk->sk_receive_queue.lock, in sk_init_common()
1887 af_rlock_keys + sk->sk_family, in sk_init_common()
1888 af_family_rlock_key_strings[sk->sk_family]); in sk_init_common()
1889 lockdep_set_class_and_name(&sk->sk_write_queue.lock, in sk_init_common()
1890 af_wlock_keys + sk->sk_family, in sk_init_common()
1891 af_family_wlock_key_strings[sk->sk_family]); in sk_init_common()
1892 lockdep_set_class_and_name(&sk->sk_error_queue.lock, in sk_init_common()
1893 af_elock_keys + sk->sk_family, in sk_init_common()
1894 af_family_elock_key_strings[sk->sk_family]); in sk_init_common()
1895 lockdep_set_class_and_name(&sk->sk_callback_lock, in sk_init_common()
1896 af_callback_keys + sk->sk_family, in sk_init_common()
1897 af_family_clock_key_strings[sk->sk_family]); in sk_init_common()
1907 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) in sk_clone_lock() argument
1909 struct proto *prot = READ_ONCE(sk->sk_prot); in sk_clone_lock()
1914 newsk = sk_prot_alloc(prot, priority, sk->sk_family); in sk_clone_lock()
1918 sock_copy(newsk, sk); in sk_clone_lock()
1947 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; in sk_clone_lock()
1958 filter = rcu_dereference(sk->sk_filter); in sk_clone_lock()
1968 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { in sk_clone_lock()
1981 if (bpf_sk_storage_clone(sk, newsk)) { in sk_clone_lock()
2022 if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP) in sk_clone_lock()
2029 void sk_free_unlock_clone(struct sock *sk) in sk_free_unlock_clone() argument
2033 sk->sk_destruct = NULL; in sk_free_unlock_clone()
2034 bh_unlock_sock(sk); in sk_free_unlock_clone()
2035 sk_free(sk); in sk_free_unlock_clone()
2039 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) in sk_setup_caps() argument
2043 sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps; in sk_setup_caps()
2044 if (sk->sk_route_caps & NETIF_F_GSO) in sk_setup_caps()
2045 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; in sk_setup_caps()
2046 sk->sk_route_caps &= ~sk->sk_route_nocaps; in sk_setup_caps()
2047 if (sk_can_gso(sk)) { in sk_setup_caps()
2049 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; in sk_setup_caps()
2051 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; in sk_setup_caps()
2052 sk->sk_gso_max_size = dst->dev->gso_max_size; in sk_setup_caps()
2056 sk->sk_gso_max_segs = max_segs; in sk_setup_caps()
2057 sk_dst_set(sk, dst); in sk_setup_caps()
2071 struct sock *sk = skb->sk; in sock_wfree() local
2074 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { in sock_wfree()
2079 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc)); in sock_wfree()
2080 sk->sk_write_space(sk); in sock_wfree()
2087 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc)) in sock_wfree()
2088 __sk_free(sk); in sock_wfree()
2097 struct sock *sk = skb->sk; in __sock_wfree() local
2099 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) in __sock_wfree()
2100 __sk_free(sk); in __sock_wfree()
2103 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) in skb_set_owner_w() argument
2106 skb->sk = sk; in skb_set_owner_w()
2108 if (unlikely(!sk_fullsock(sk))) { in skb_set_owner_w()
2110 sock_hold(sk); in skb_set_owner_w()
2115 skb_set_hash_from_sk(skb, sk); in skb_set_owner_w()
2121 refcount_add(skb->truesize, &sk->sk_wmem_alloc); in skb_set_owner_w()
2149 if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk)) in skb_orphan_partial()
2161 struct sock *sk = skb->sk; in sock_rfree() local
2164 atomic_sub(len, &sk->sk_rmem_alloc); in sock_rfree()
2165 sk_mem_uncharge(sk, len); in sock_rfree()
2175 sock_put(skb->sk); in sock_efree()
2185 if (sk_is_refcounted(skb->sk)) in sock_pfree()
2186 sock_gen_put(skb->sk); in sock_pfree()
2191 kuid_t sock_i_uid(struct sock *sk) in sock_i_uid() argument
2195 read_lock_bh(&sk->sk_callback_lock); in sock_i_uid()
2196 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; in sock_i_uid()
2197 read_unlock_bh(&sk->sk_callback_lock); in sock_i_uid()
2202 unsigned long __sock_i_ino(struct sock *sk) in __sock_i_ino() argument
2206 read_lock(&sk->sk_callback_lock); in __sock_i_ino()
2207 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; in __sock_i_ino()
2208 read_unlock(&sk->sk_callback_lock); in __sock_i_ino()
2213 unsigned long sock_i_ino(struct sock *sk) in sock_i_ino() argument
2218 ino = __sock_i_ino(sk); in sock_i_ino()
2227 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, in sock_wmalloc() argument
2231 refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) { in sock_wmalloc()
2235 skb_set_owner_w(skb, sk); in sock_wmalloc()
2245 struct sock *sk = skb->sk; in sock_ofree() local
2247 atomic_sub(skb->truesize, &sk->sk_omem_alloc); in sock_ofree()
2250 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, in sock_omalloc() argument
2256 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) > in sock_omalloc()
2264 atomic_add(skb->truesize, &sk->sk_omem_alloc); in sock_omalloc()
2265 skb->sk = sk; in sock_omalloc()
2273 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) in sock_kmalloc() argument
2278 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) { in sock_kmalloc()
2283 atomic_add(size, &sk->sk_omem_alloc); in sock_kmalloc()
2287 atomic_sub(size, &sk->sk_omem_alloc); in sock_kmalloc()
2297 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, in __sock_kfree_s() argument
2306 atomic_sub(size, &sk->sk_omem_alloc); in __sock_kfree_s()
2309 void sock_kfree_s(struct sock *sk, void *mem, int size) in sock_kfree_s() argument
2311 __sock_kfree_s(sk, mem, size, false); in sock_kfree_s()
2315 void sock_kzfree_s(struct sock *sk, void *mem, int size) in sock_kzfree_s() argument
2317 __sock_kfree_s(sk, mem, size, true); in sock_kzfree_s()
2324 static long sock_wait_for_wmem(struct sock *sk, long timeo) in sock_wait_for_wmem() argument
2328 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); in sock_wait_for_wmem()
2334 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in sock_wait_for_wmem()
2335 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in sock_wait_for_wmem()
2336 if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) in sock_wait_for_wmem()
2338 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) in sock_wait_for_wmem()
2340 if (READ_ONCE(sk->sk_err)) in sock_wait_for_wmem()
2344 finish_wait(sk_sleep(sk), &wait); in sock_wait_for_wmem()
2353 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, in sock_alloc_send_pskb() argument
2361 timeo = sock_sndtimeo(sk, noblock); in sock_alloc_send_pskb()
2363 err = sock_error(sk); in sock_alloc_send_pskb()
2368 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) in sock_alloc_send_pskb()
2371 if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf)) in sock_alloc_send_pskb()
2374 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); in sock_alloc_send_pskb()
2375 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in sock_alloc_send_pskb()
2381 timeo = sock_wait_for_wmem(sk, timeo); in sock_alloc_send_pskb()
2384 errcode, sk->sk_allocation); in sock_alloc_send_pskb()
2386 skb_set_owner_w(skb, sk); in sock_alloc_send_pskb()
2397 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, in sock_alloc_send_skb() argument
2400 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); in sock_alloc_send_skb()
2404 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, in __sock_cmsg_send() argument
2411 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) in __sock_cmsg_send()
2430 if (!sock_flag(sk, SOCK_TXTIME)) in __sock_cmsg_send()
2447 int sock_cmsg_send(struct sock *sk, struct msghdr *msg, in sock_cmsg_send() argument
2458 ret = __sock_cmsg_send(sk, msg, cmsg, sockc); in sock_cmsg_send()
2466 static void sk_enter_memory_pressure(struct sock *sk) in sk_enter_memory_pressure() argument
2468 if (!sk->sk_prot->enter_memory_pressure) in sk_enter_memory_pressure()
2471 sk->sk_prot->enter_memory_pressure(sk); in sk_enter_memory_pressure()
2474 static void sk_leave_memory_pressure(struct sock *sk) in sk_leave_memory_pressure() argument
2476 if (sk->sk_prot->leave_memory_pressure) { in sk_leave_memory_pressure()
2477 sk->sk_prot->leave_memory_pressure(sk); in sk_leave_memory_pressure()
2479 unsigned long *memory_pressure = sk->sk_prot->memory_pressure; in sk_leave_memory_pressure()
2533 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) in sk_page_frag_refill() argument
2535 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) in sk_page_frag_refill()
2538 sk_enter_memory_pressure(sk); in sk_page_frag_refill()
2539 sk_stream_moderate_sndbuf(sk); in sk_page_frag_refill()
2544 static void __lock_sock(struct sock *sk) in __lock_sock() argument
2545 __releases(&sk->sk_lock.slock) in __lock_sock()
2546 __acquires(&sk->sk_lock.slock) in __lock_sock()
2551 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, in __lock_sock()
2553 spin_unlock_bh(&sk->sk_lock.slock); in __lock_sock()
2555 spin_lock_bh(&sk->sk_lock.slock); in __lock_sock()
2556 if (!sock_owned_by_user(sk)) in __lock_sock()
2559 finish_wait(&sk->sk_lock.wq, &wait); in __lock_sock()
2562 void __release_sock(struct sock *sk) in __release_sock() argument
2563 __releases(&sk->sk_lock.slock) in __release_sock()
2564 __acquires(&sk->sk_lock.slock) in __release_sock()
2568 while ((skb = sk->sk_backlog.head) != NULL) { in __release_sock()
2569 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; in __release_sock()
2571 spin_unlock_bh(&sk->sk_lock.slock); in __release_sock()
2578 sk_backlog_rcv(sk, skb); in __release_sock()
2585 spin_lock_bh(&sk->sk_lock.slock); in __release_sock()
2592 sk->sk_backlog.len = 0; in __release_sock()
2595 void __sk_flush_backlog(struct sock *sk) in __sk_flush_backlog() argument
2597 spin_lock_bh(&sk->sk_lock.slock); in __sk_flush_backlog()
2598 __release_sock(sk); in __sk_flush_backlog()
2599 spin_unlock_bh(&sk->sk_lock.slock); in __sk_flush_backlog()
2613 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) in sk_wait_data() argument
2618 add_wait_queue(sk_sleep(sk), &wait); in sk_wait_data()
2619 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); in sk_wait_data()
2620 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait); in sk_wait_data()
2621 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); in sk_wait_data()
2622 remove_wait_queue(sk_sleep(sk), &wait); in sk_wait_data()
2636 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) in __sk_mem_raise_allocated() argument
2638 struct proto *prot = sk->sk_prot; in __sk_mem_raise_allocated()
2639 long allocated = sk_memory_allocated_add(sk, amt); in __sk_mem_raise_allocated()
2642 if (mem_cgroup_sockets_enabled && sk->sk_memcg && in __sk_mem_raise_allocated()
2643 !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt))) in __sk_mem_raise_allocated()
2647 if (allocated <= sk_prot_mem_limits(sk, 0)) { in __sk_mem_raise_allocated()
2648 sk_leave_memory_pressure(sk); in __sk_mem_raise_allocated()
2653 if (allocated > sk_prot_mem_limits(sk, 1)) in __sk_mem_raise_allocated()
2654 sk_enter_memory_pressure(sk); in __sk_mem_raise_allocated()
2657 if (allocated > sk_prot_mem_limits(sk, 2)) in __sk_mem_raise_allocated()
2662 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot)) in __sk_mem_raise_allocated()
2666 int wmem0 = sk_get_wmem0(sk, prot); in __sk_mem_raise_allocated()
2668 if (sk->sk_type == SOCK_STREAM) { in __sk_mem_raise_allocated()
2669 if (sk->sk_wmem_queued < wmem0) in __sk_mem_raise_allocated()
2671 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) { in __sk_mem_raise_allocated()
2676 if (sk_has_memory_pressure(sk)) { in __sk_mem_raise_allocated()
2679 if (!sk_under_memory_pressure(sk)) in __sk_mem_raise_allocated()
2681 alloc = sk_sockets_allocated_read_positive(sk); in __sk_mem_raise_allocated()
2682 if (sk_prot_mem_limits(sk, 2) > alloc * in __sk_mem_raise_allocated()
2683 sk_mem_pages(sk->sk_wmem_queued + in __sk_mem_raise_allocated()
2684 atomic_read(&sk->sk_rmem_alloc) + in __sk_mem_raise_allocated()
2685 sk->sk_forward_alloc)) in __sk_mem_raise_allocated()
2691 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { in __sk_mem_raise_allocated()
2692 sk_stream_moderate_sndbuf(sk); in __sk_mem_raise_allocated()
2697 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) in __sk_mem_raise_allocated()
2702 trace_sock_exceed_buf_limit(sk, prot, allocated, kind); in __sk_mem_raise_allocated()
2704 sk_memory_allocated_sub(sk, amt); in __sk_mem_raise_allocated()
2706 if (mem_cgroup_sockets_enabled && sk->sk_memcg) in __sk_mem_raise_allocated()
2707 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt); in __sk_mem_raise_allocated()
2723 int __sk_mem_schedule(struct sock *sk, int size, int kind) in __sk_mem_schedule() argument
2727 sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT; in __sk_mem_schedule()
2728 ret = __sk_mem_raise_allocated(sk, size, amt, kind); in __sk_mem_schedule()
2730 sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT; in __sk_mem_schedule()
2742 void __sk_mem_reduce_allocated(struct sock *sk, int amount) in __sk_mem_reduce_allocated() argument
2744 sk_memory_allocated_sub(sk, amount); in __sk_mem_reduce_allocated()
2746 if (mem_cgroup_sockets_enabled && sk->sk_memcg) in __sk_mem_reduce_allocated()
2747 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); in __sk_mem_reduce_allocated()
2749 if (sk_under_global_memory_pressure(sk) && in __sk_mem_reduce_allocated()
2750 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) in __sk_mem_reduce_allocated()
2751 sk_leave_memory_pressure(sk); in __sk_mem_reduce_allocated()
2760 void __sk_mem_reclaim(struct sock *sk, int amount) in __sk_mem_reclaim() argument
2763 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT; in __sk_mem_reclaim()
2764 __sk_mem_reduce_allocated(sk, amount); in __sk_mem_reclaim()
2768 int sk_set_peek_off(struct sock *sk, int val) in sk_set_peek_off() argument
2770 WRITE_ONCE(sk->sk_peek_off, val); in sk_set_peek_off()
2839 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len) in sock_no_sendmsg_locked() argument
2875 sock_update_netprioidx(&sock->sk->sk_cgrp_data); in __receive_sock()
2876 sock_update_classid(&sock->sk->sk_cgrp_data); in __receive_sock()
2894 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page, in sock_no_sendpage_locked() argument
2904 res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size); in sock_no_sendpage_locked()
2914 static void sock_def_wakeup(struct sock *sk) in sock_def_wakeup() argument
2919 wq = rcu_dereference(sk->sk_wq); in sock_def_wakeup()
2925 static void sock_def_error_report(struct sock *sk) in sock_def_error_report() argument
2930 wq = rcu_dereference(sk->sk_wq); in sock_def_error_report()
2933 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); in sock_def_error_report()
2937 void sock_def_readable(struct sock *sk) in sock_def_readable() argument
2942 wq = rcu_dereference(sk->sk_wq); in sock_def_readable()
2956 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); in sock_def_readable()
2960 static void sock_def_write_space(struct sock *sk) in sock_def_write_space() argument
2969 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) { in sock_def_write_space()
2970 wq = rcu_dereference(sk->sk_wq); in sock_def_write_space()
2976 if (sock_writeable(sk)) in sock_def_write_space()
2977 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); in sock_def_write_space()
2983 static void sock_def_destruct(struct sock *sk) in sock_def_destruct() argument
2987 void sk_send_sigurg(struct sock *sk) in sk_send_sigurg() argument
2989 if (sk->sk_socket && sk->sk_socket->file) in sk_send_sigurg()
2990 if (send_sigurg(&sk->sk_socket->file->f_owner)) in sk_send_sigurg()
2991 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); in sk_send_sigurg()
2995 void sk_reset_timer(struct sock *sk, struct timer_list* timer, in sk_reset_timer() argument
2999 sock_hold(sk); in sk_reset_timer()
3003 void sk_stop_timer(struct sock *sk, struct timer_list* timer) in sk_stop_timer() argument
3006 __sock_put(sk); in sk_stop_timer()
3010 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer) in sk_stop_timer_sync() argument
3013 __sock_put(sk); in sk_stop_timer_sync()
3017 void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid) in sock_init_data_uid() argument
3019 sk_init_common(sk); in sock_init_data_uid()
3020 sk->sk_send_head = NULL; in sock_init_data_uid()
3022 timer_setup(&sk->sk_timer, NULL, 0); in sock_init_data_uid()
3024 sk->sk_allocation = GFP_KERNEL; in sock_init_data_uid()
3025 sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default); in sock_init_data_uid()
3026 sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); in sock_init_data_uid()
3027 sk->sk_state = TCP_CLOSE; in sock_init_data_uid()
3028 sk_set_socket(sk, sock); in sock_init_data_uid()
3030 sock_set_flag(sk, SOCK_ZAPPED); in sock_init_data_uid()
3033 sk->sk_type = sock->type; in sock_init_data_uid()
3034 RCU_INIT_POINTER(sk->sk_wq, &sock->wq); in sock_init_data_uid()
3035 sock->sk = sk; in sock_init_data_uid()
3037 RCU_INIT_POINTER(sk->sk_wq, NULL); in sock_init_data_uid()
3039 sk->sk_uid = uid; in sock_init_data_uid()
3041 rwlock_init(&sk->sk_callback_lock); in sock_init_data_uid()
3042 if (sk->sk_kern_sock) in sock_init_data_uid()
3044 &sk->sk_callback_lock, in sock_init_data_uid()
3045 af_kern_callback_keys + sk->sk_family, in sock_init_data_uid()
3046 af_family_kern_clock_key_strings[sk->sk_family]); in sock_init_data_uid()
3049 &sk->sk_callback_lock, in sock_init_data_uid()
3050 af_callback_keys + sk->sk_family, in sock_init_data_uid()
3051 af_family_clock_key_strings[sk->sk_family]); in sock_init_data_uid()
3053 sk->sk_state_change = sock_def_wakeup; in sock_init_data_uid()
3054 sk->sk_data_ready = sock_def_readable; in sock_init_data_uid()
3055 sk->sk_write_space = sock_def_write_space; in sock_init_data_uid()
3056 sk->sk_error_report = sock_def_error_report; in sock_init_data_uid()
3057 sk->sk_destruct = sock_def_destruct; in sock_init_data_uid()
3059 sk->sk_frag.page = NULL; in sock_init_data_uid()
3060 sk->sk_frag.offset = 0; in sock_init_data_uid()
3061 sk->sk_peek_off = -1; in sock_init_data_uid()
3063 sk->sk_peer_pid = NULL; in sock_init_data_uid()
3064 sk->sk_peer_cred = NULL; in sock_init_data_uid()
3065 spin_lock_init(&sk->sk_peer_lock); in sock_init_data_uid()
3067 sk->sk_write_pending = 0; in sock_init_data_uid()
3068 sk->sk_rcvlowat = 1; in sock_init_data_uid()
3069 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; in sock_init_data_uid()
3070 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; in sock_init_data_uid()
3072 sk->sk_stamp = SK_DEFAULT_STAMP; in sock_init_data_uid()
3074 seqlock_init(&sk->sk_stamp_seq); in sock_init_data_uid()
3076 atomic_set(&sk->sk_zckey, 0); in sock_init_data_uid()
3079 sk->sk_napi_id = 0; in sock_init_data_uid()
3080 sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read); in sock_init_data_uid()
3083 sk->sk_max_pacing_rate = ~0UL; in sock_init_data_uid()
3084 sk->sk_pacing_rate = ~0UL; in sock_init_data_uid()
3085 WRITE_ONCE(sk->sk_pacing_shift, 10); in sock_init_data_uid()
3086 sk->sk_incoming_cpu = -1; in sock_init_data_uid()
3088 sk_rx_queue_clear(sk); in sock_init_data_uid()
3094 refcount_set(&sk->sk_refcnt, 1); in sock_init_data_uid()
3095 atomic_set(&sk->sk_drops, 0); in sock_init_data_uid()
3099 void sock_init_data(struct socket *sock, struct sock *sk) in sock_init_data() argument
3103 make_kuid(sock_net(sk)->user_ns, 0); in sock_init_data()
3105 sock_init_data_uid(sock, sk, uid); in sock_init_data()
3109 void lock_sock_nested(struct sock *sk, int subclass) in lock_sock_nested() argument
3112 spin_lock_bh(&sk->sk_lock.slock); in lock_sock_nested()
3113 if (sk->sk_lock.owned) in lock_sock_nested()
3114 __lock_sock(sk); in lock_sock_nested()
3115 sk->sk_lock.owned = 1; in lock_sock_nested()
3116 spin_unlock(&sk->sk_lock.slock); in lock_sock_nested()
3120 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); in lock_sock_nested()
3125 void release_sock(struct sock *sk) in release_sock() argument
3127 spin_lock_bh(&sk->sk_lock.slock); in release_sock()
3128 if (sk->sk_backlog.tail) in release_sock()
3129 __release_sock(sk); in release_sock()
3134 if (sk->sk_prot->release_cb) in release_sock()
3135 sk->sk_prot->release_cb(sk); in release_sock()
3137 sock_release_ownership(sk); in release_sock()
3138 if (waitqueue_active(&sk->sk_lock.wq)) in release_sock()
3139 wake_up(&sk->sk_lock.wq); in release_sock()
3140 spin_unlock_bh(&sk->sk_lock.slock); in release_sock()
3157 bool lock_sock_fast(struct sock *sk) in lock_sock_fast() argument
3160 spin_lock_bh(&sk->sk_lock.slock); in lock_sock_fast()
3162 if (!sk->sk_lock.owned) in lock_sock_fast()
3168 __lock_sock(sk); in lock_sock_fast()
3169 sk->sk_lock.owned = 1; in lock_sock_fast()
3170 spin_unlock(&sk->sk_lock.slock); in lock_sock_fast()
3174 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); in lock_sock_fast()
3183 struct sock *sk = sock->sk; in sock_gettstamp() local
3186 sock_enable_timestamp(sk, SOCK_TIMESTAMP); in sock_gettstamp()
3187 ts = ktime_to_timespec64(sock_read_timestamp(sk)); in sock_gettstamp()
3192 sock_write_timestamp(sk, kt); in sock_gettstamp()
3219 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag) in sock_enable_timestamp() argument
3221 if (!sock_flag(sk, flag)) { in sock_enable_timestamp()
3222 unsigned long previous_flags = sk->sk_flags; in sock_enable_timestamp()
3224 sock_set_flag(sk, flag); in sock_enable_timestamp()
3230 if (sock_needs_netstamp(sk) && in sock_enable_timestamp()
3236 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, in sock_recv_errqueue() argument
3244 skb = sock_dequeue_err_skb(sk); in sock_recv_errqueue()
3257 sock_recv_timestamp(msg, sk, skb); in sock_recv_errqueue()
3282 struct sock *sk = sock->sk; in sock_common_getsockopt() local
3284 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); in sock_common_getsockopt()
3291 struct sock *sk = sock->sk; in sock_common_recvmsg() local
3295 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, in sock_common_recvmsg()
3309 struct sock *sk = sock->sk; in sock_common_setsockopt() local
3311 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); in sock_common_setsockopt()
3315 void sk_common_release(struct sock *sk) in sk_common_release() argument
3317 if (sk->sk_prot->destroy) in sk_common_release()
3318 sk->sk_prot->destroy(sk); in sk_common_release()
3328 sk->sk_prot->unhash(sk); in sk_common_release()
3342 sock_orphan(sk); in sk_common_release()
3344 xfrm_sk_free_policy(sk); in sk_common_release()
3346 sk_refcnt_debug_release(sk); in sk_common_release()
3348 sock_put(sk); in sk_common_release()
3352 void sk_get_meminfo(const struct sock *sk, u32 *mem) in sk_get_meminfo() argument
3356 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); in sk_get_meminfo()
3357 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf); in sk_get_meminfo()
3358 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); in sk_get_meminfo()
3359 mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf); in sk_get_meminfo()
3360 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; in sk_get_meminfo()
3361 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued); in sk_get_meminfo()
3362 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); in sk_get_meminfo()
3363 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); in sk_get_meminfo()
3364 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); in sk_get_meminfo()
3753 struct sock *sk = p; in sk_busy_loop_end() local
3755 return !skb_queue_empty_lockless(&sk->sk_receive_queue) || in sk_busy_loop_end()
3756 sk_busy_loop_timeout(sk, start_time); in sk_busy_loop_end()
3761 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len) in sock_bind_add() argument
3763 if (!sk->sk_prot->bind_add) in sock_bind_add()
3765 return sk->sk_prot->bind_add(sk, addr, addr_len); in sock_bind_add()