Lines Matching refs:sk
325 void tcp_enter_memory_pressure(struct sock *sk) in tcp_enter_memory_pressure() argument
336 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); in tcp_enter_memory_pressure()
340 void tcp_leave_memory_pressure(struct sock *sk) in tcp_leave_memory_pressure() argument
348 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, in tcp_leave_memory_pressure()
408 void tcp_init_sock(struct sock *sk) in tcp_init_sock() argument
410 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_sock()
411 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock()
414 sk->tcp_rtx_queue = RB_ROOT; in tcp_init_sock()
415 tcp_init_xmit_timers(sk); in tcp_init_sock()
440 tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; in tcp_init_sock()
441 tcp_assign_congestion_control(sk); in tcp_init_sock()
446 sk->sk_state = TCP_CLOSE; in tcp_init_sock()
448 sk->sk_write_space = sk_stream_write_space; in tcp_init_sock()
449 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); in tcp_init_sock()
453 WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]); in tcp_init_sock()
454 WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]); in tcp_init_sock()
456 sk_sockets_allocated_inc(sk); in tcp_init_sock()
457 sk->sk_route_forced_caps = NETIF_F_GSO; in tcp_init_sock()
461 static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) in tcp_tx_timestamp() argument
463 struct sk_buff *skb = tcp_write_queue_tail(sk); in tcp_tx_timestamp()
469 sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); in tcp_tx_timestamp()
478 int target, struct sock *sk) in tcp_stream_is_readable() argument
481 (sk->sk_prot->stream_memory_read ? in tcp_stream_is_readable()
482 sk->sk_prot->stream_memory_read(sk) : false); in tcp_stream_is_readable()
495 struct sock *sk = sock->sk; in tcp_poll() local
496 const struct tcp_sock *tp = tcp_sk(sk); in tcp_poll()
501 state = inet_sk_state_load(sk); in tcp_poll()
503 return inet_csk_listen_poll(sk); in tcp_poll()
539 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) in tcp_poll()
541 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_poll()
547 int target = sock_rcvlowat(sk, 0, INT_MAX); in tcp_poll()
550 !sock_flag(sk, SOCK_URGINLINE) && in tcp_poll()
554 if (tcp_stream_is_readable(tp, target, sk)) in tcp_poll()
557 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { in tcp_poll()
558 if (sk_stream_is_writeable(sk)) { in tcp_poll()
561 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); in tcp_poll()
562 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in tcp_poll()
570 if (sk_stream_is_writeable(sk)) in tcp_poll()
578 } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { in tcp_poll()
587 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) in tcp_poll()
594 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) in tcp_ioctl() argument
596 struct tcp_sock *tp = tcp_sk(sk); in tcp_ioctl()
602 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
605 slow = lock_sock_fast(sk); in tcp_ioctl()
606 answ = tcp_inq(sk); in tcp_ioctl()
607 unlock_sock_fast(sk, slow); in tcp_ioctl()
614 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
617 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in tcp_ioctl()
623 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
626 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in tcp_ioctl()
651 static void skb_entail(struct sock *sk, struct sk_buff *skb) in skb_entail() argument
653 struct tcp_sock *tp = tcp_sk(sk); in skb_entail()
661 tcp_add_write_queue_tail(sk, skb); in skb_entail()
662 sk_wmem_queued_add(sk, skb->truesize); in skb_entail()
663 sk_mem_charge(sk, skb->truesize); in skb_entail()
667 tcp_slow_start_after_idle_check(sk); in skb_entail()
686 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, in tcp_should_autocork() argument
690 sock_net(sk)->ipv4.sysctl_tcp_autocorking && in tcp_should_autocork()
691 !tcp_rtx_queue_empty(sk) && in tcp_should_autocork()
692 refcount_read(&sk->sk_wmem_alloc) > skb->truesize; in tcp_should_autocork()
695 static void tcp_push(struct sock *sk, int flags, int mss_now, in tcp_push() argument
698 struct tcp_sock *tp = tcp_sk(sk); in tcp_push()
701 skb = tcp_write_queue_tail(sk); in tcp_push()
709 if (tcp_should_autocork(sk, skb, size_goal)) { in tcp_push()
712 if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { in tcp_push()
713 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); in tcp_push()
714 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); in tcp_push()
719 if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) in tcp_push()
726 __tcp_push_pending_frames(sk, mss_now, nonagle); in tcp_push()
735 ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, in tcp_splice_data_recv()
742 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) in __tcp_splice_read() argument
750 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); in __tcp_splice_read()
769 struct sock *sk = sock->sk; in tcp_splice_read() local
779 sock_rps_record_flow(sk); in tcp_splice_read()
788 lock_sock(sk); in tcp_splice_read()
790 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); in tcp_splice_read()
792 ret = __tcp_splice_read(sk, &tss); in tcp_splice_read()
798 if (sock_flag(sk, SOCK_DONE)) in tcp_splice_read()
800 if (sk->sk_err) { in tcp_splice_read()
801 ret = sock_error(sk); in tcp_splice_read()
804 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_splice_read()
806 if (sk->sk_state == TCP_CLOSE) { in tcp_splice_read()
822 if (!skb_queue_empty(&sk->sk_receive_queue)) in tcp_splice_read()
824 sk_wait_data(sk, &timeo, NULL); in tcp_splice_read()
836 release_sock(sk); in tcp_splice_read()
837 lock_sock(sk); in tcp_splice_read()
839 if (sk->sk_err || sk->sk_state == TCP_CLOSE || in tcp_splice_read()
840 (sk->sk_shutdown & RCV_SHUTDOWN) || in tcp_splice_read()
845 release_sock(sk); in tcp_splice_read()
854 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, in sk_stream_alloc_skb() argument
860 skb = sk->sk_tx_skb_cache; in sk_stream_alloc_skb()
863 sk->sk_tx_skb_cache = NULL; in sk_stream_alloc_skb()
874 if (unlikely(tcp_under_memory_pressure(sk))) in sk_stream_alloc_skb()
875 sk_mem_reclaim_partial(sk); in sk_stream_alloc_skb()
877 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); in sk_stream_alloc_skb()
883 sk_forced_mem_schedule(sk, skb->truesize); in sk_stream_alloc_skb()
885 mem_scheduled = sk_wmem_schedule(sk, skb->truesize); in sk_stream_alloc_skb()
888 skb_reserve(skb, sk->sk_prot->max_header); in sk_stream_alloc_skb()
899 sk->sk_prot->enter_memory_pressure(sk); in sk_stream_alloc_skb()
900 sk_stream_moderate_sndbuf(sk); in sk_stream_alloc_skb()
905 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, in tcp_xmit_size_goal() argument
908 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_size_goal()
915 new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; in tcp_xmit_size_goal()
923 sk->sk_gso_max_segs); in tcp_xmit_size_goal()
930 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) in tcp_send_mss() argument
934 mss_now = tcp_current_mss(sk); in tcp_send_mss()
935 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); in tcp_send_mss()
946 static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb) in tcp_remove_empty_skb() argument
949 tcp_unlink_write_queue(skb, sk); in tcp_remove_empty_skb()
950 if (tcp_write_queue_empty(sk)) in tcp_remove_empty_skb()
951 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); in tcp_remove_empty_skb()
952 sk_wmem_free_skb(sk, skb); in tcp_remove_empty_skb()
956 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, in do_tcp_sendpages() argument
959 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_sendpages()
963 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); in do_tcp_sendpages()
973 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && in do_tcp_sendpages()
974 !tcp_passive_fastopen(sk)) { in do_tcp_sendpages()
975 err = sk_stream_wait_connect(sk, &timeo); in do_tcp_sendpages()
980 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); in do_tcp_sendpages()
982 mss_now = tcp_send_mss(sk, &size_goal, flags); in do_tcp_sendpages()
986 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) in do_tcp_sendpages()
990 struct sk_buff *skb = tcp_write_queue_tail(sk); in do_tcp_sendpages()
997 if (!sk_stream_memory_free(sk)) in do_tcp_sendpages()
1000 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, in do_tcp_sendpages()
1001 tcp_rtx_and_write_queues_empty(sk)); in do_tcp_sendpages()
1008 skb_entail(sk, skb); in do_tcp_sendpages()
1021 if (!sk_wmem_schedule(sk, copy)) in do_tcp_sendpages()
1037 sk_wmem_queued_add(sk, copy); in do_tcp_sendpages()
1038 sk_mem_charge(sk, copy); in do_tcp_sendpages()
1058 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); in do_tcp_sendpages()
1059 } else if (skb == tcp_send_head(sk)) in do_tcp_sendpages()
1060 tcp_push_one(sk, mss_now); in do_tcp_sendpages()
1064 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in do_tcp_sendpages()
1066 tcp_push(sk, flags & ~MSG_MORE, mss_now, in do_tcp_sendpages()
1069 err = sk_stream_wait_memory(sk, &timeo); in do_tcp_sendpages()
1073 mss_now = tcp_send_mss(sk, &size_goal, flags); in do_tcp_sendpages()
1078 tcp_tx_timestamp(sk, sk->sk_tsflags); in do_tcp_sendpages()
1080 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in do_tcp_sendpages()
1085 tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk)); in do_tcp_sendpages()
1090 if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { in do_tcp_sendpages()
1091 sk->sk_write_space(sk); in do_tcp_sendpages()
1092 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); in do_tcp_sendpages()
1094 return sk_stream_error(sk, flags, err); in do_tcp_sendpages()
1098 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, in tcp_sendpage_locked() argument
1101 if (!(sk->sk_route_caps & NETIF_F_SG)) in tcp_sendpage_locked()
1102 return sock_no_sendpage_locked(sk, page, offset, size, flags); in tcp_sendpage_locked()
1104 tcp_rate_check_app_limited(sk); /* is sending application-limited? */ in tcp_sendpage_locked()
1106 return do_tcp_sendpages(sk, page, offset, size, flags); in tcp_sendpage_locked()
1110 int tcp_sendpage(struct sock *sk, struct page *page, int offset, in tcp_sendpage() argument
1115 lock_sock(sk); in tcp_sendpage()
1116 ret = tcp_sendpage_locked(sk, page, offset, size, flags); in tcp_sendpage()
1117 release_sock(sk); in tcp_sendpage()
1131 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, in tcp_sendmsg_fastopen() argument
1135 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_fastopen()
1136 struct inet_sock *inet = inet_sk(sk); in tcp_sendmsg_fastopen()
1140 if (!(sock_net(sk)->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) || in tcp_sendmsg_fastopen()
1148 sk->sk_allocation); in tcp_sendmsg_fastopen()
1156 err = tcp_connect(sk); in tcp_sendmsg_fastopen()
1159 tcp_set_state(sk, TCP_CLOSE); in tcp_sendmsg_fastopen()
1161 sk->sk_route_caps = 0; in tcp_sendmsg_fastopen()
1165 err = __inet_stream_connect(sk->sk_socket, uaddr, in tcp_sendmsg_fastopen()
1178 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) in tcp_sendmsg_locked() argument
1180 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_locked()
1192 if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { in tcp_sendmsg_locked()
1193 skb = tcp_write_queue_tail(sk); in tcp_sendmsg_locked()
1194 uarg = sock_zerocopy_realloc(sk, size, skb_zcopy(skb)); in tcp_sendmsg_locked()
1200 zc = sk->sk_route_caps & NETIF_F_SG; in tcp_sendmsg_locked()
1205 if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) && in tcp_sendmsg_locked()
1207 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg); in tcp_sendmsg_locked()
1214 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); in tcp_sendmsg_locked()
1216 tcp_rate_check_app_limited(sk); /* is sending application-limited? */ in tcp_sendmsg_locked()
1222 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && in tcp_sendmsg_locked()
1223 !tcp_passive_fastopen(sk)) { in tcp_sendmsg_locked()
1224 err = sk_stream_wait_connect(sk, &timeo); in tcp_sendmsg_locked()
1231 copied = tcp_send_rcvq(sk, msg, size); in tcp_sendmsg_locked()
1242 sockcm_init(&sockc, sk); in tcp_sendmsg_locked()
1244 err = sock_cmsg_send(sk, msg, &sockc); in tcp_sendmsg_locked()
1252 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); in tcp_sendmsg_locked()
1258 mss_now = tcp_send_mss(sk, &size_goal, flags); in tcp_sendmsg_locked()
1261 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) in tcp_sendmsg_locked()
1267 skb = tcp_write_queue_tail(sk); in tcp_sendmsg_locked()
1275 if (!sk_stream_memory_free(sk)) in tcp_sendmsg_locked()
1280 if (sk_flush_backlog(sk)) in tcp_sendmsg_locked()
1283 first_skb = tcp_rtx_and_write_queues_empty(sk); in tcp_sendmsg_locked()
1284 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, in tcp_sendmsg_locked()
1292 skb_entail(sk, skb); in tcp_sendmsg_locked()
1311 err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy); in tcp_sendmsg_locked()
1317 struct page_frag *pfrag = sk_page_frag(sk); in tcp_sendmsg_locked()
1319 if (!sk_page_frag_refill(sk, pfrag)) in tcp_sendmsg_locked()
1333 if (!sk_wmem_schedule(sk, copy)) in tcp_sendmsg_locked()
1336 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, in tcp_sendmsg_locked()
1353 err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg); in tcp_sendmsg_locked()
1382 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); in tcp_sendmsg_locked()
1383 } else if (skb == tcp_send_head(sk)) in tcp_sendmsg_locked()
1384 tcp_push_one(sk, mss_now); in tcp_sendmsg_locked()
1388 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in tcp_sendmsg_locked()
1391 tcp_push(sk, flags & ~MSG_MORE, mss_now, in tcp_sendmsg_locked()
1394 err = sk_stream_wait_memory(sk, &timeo); in tcp_sendmsg_locked()
1398 mss_now = tcp_send_mss(sk, &size_goal, flags); in tcp_sendmsg_locked()
1403 tcp_tx_timestamp(sk, sockc.tsflags); in tcp_sendmsg_locked()
1404 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in tcp_sendmsg_locked()
1411 skb = tcp_write_queue_tail(sk); in tcp_sendmsg_locked()
1413 tcp_remove_empty_skb(sk, skb); in tcp_sendmsg_locked()
1419 err = sk_stream_error(sk, flags, err); in tcp_sendmsg_locked()
1421 if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { in tcp_sendmsg_locked()
1422 sk->sk_write_space(sk); in tcp_sendmsg_locked()
1423 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); in tcp_sendmsg_locked()
1429 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) in tcp_sendmsg() argument
1433 lock_sock(sk); in tcp_sendmsg()
1434 ret = tcp_sendmsg_locked(sk, msg, size); in tcp_sendmsg()
1435 release_sock(sk); in tcp_sendmsg()
1446 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) in tcp_recv_urg() argument
1448 struct tcp_sock *tp = tcp_sk(sk); in tcp_recv_urg()
1451 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || in tcp_recv_urg()
1455 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) in tcp_recv_urg()
1478 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) in tcp_recv_urg()
1490 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) in tcp_peek_sndq() argument
1497 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { in tcp_peek_sndq()
1504 skb_queue_walk(&sk->sk_write_queue, skb) { in tcp_peek_sndq()
1521 static void tcp_cleanup_rbuf(struct sock *sk, int copied) in tcp_cleanup_rbuf() argument
1523 struct tcp_sock *tp = tcp_sk(sk); in tcp_cleanup_rbuf()
1526 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); in tcp_cleanup_rbuf()
1532 if (inet_csk_ack_scheduled(sk)) { in tcp_cleanup_rbuf()
1533 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_rbuf()
1548 !inet_csk_in_pingpong_mode(sk))) && in tcp_cleanup_rbuf()
1549 !atomic_read(&sk->sk_rmem_alloc))) in tcp_cleanup_rbuf()
1559 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { in tcp_cleanup_rbuf()
1564 __u32 new_window = __tcp_select_window(sk); in tcp_cleanup_rbuf()
1576 tcp_send_ack(sk); in tcp_cleanup_rbuf()
1579 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) in tcp_recv_skb() argument
1584 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { in tcp_recv_skb()
1598 sk_eat_skb(sk, skb); in tcp_recv_skb()
1614 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, in tcp_read_sock() argument
1618 struct tcp_sock *tp = tcp_sk(sk); in tcp_read_sock()
1623 if (sk->sk_state == TCP_LISTEN) in tcp_read_sock()
1625 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { in tcp_read_sock()
1654 skb = tcp_recv_skb(sk, seq - 1, &offset); in tcp_read_sock()
1664 sk_eat_skb(sk, skb); in tcp_read_sock()
1668 sk_eat_skb(sk, skb); in tcp_read_sock()
1675 tcp_rcv_space_adjust(sk); in tcp_read_sock()
1679 tcp_recv_skb(sk, seq, &offset); in tcp_read_sock()
1680 tcp_cleanup_rbuf(sk, copied); in tcp_read_sock()
1688 return tcp_inq(sock->sk); in tcp_peek_len()
1693 int tcp_set_rcvlowat(struct sock *sk, int val) in tcp_set_rcvlowat() argument
1697 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) in tcp_set_rcvlowat()
1698 cap = sk->sk_rcvbuf >> 1; in tcp_set_rcvlowat()
1700 cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1; in tcp_set_rcvlowat()
1702 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); in tcp_set_rcvlowat()
1705 tcp_data_ready(sk); in tcp_set_rcvlowat()
1707 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) in tcp_set_rcvlowat()
1711 if (val > sk->sk_rcvbuf) { in tcp_set_rcvlowat()
1712 WRITE_ONCE(sk->sk_rcvbuf, val); in tcp_set_rcvlowat()
1713 tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val); in tcp_set_rcvlowat()
1738 static int tcp_zerocopy_receive(struct sock *sk, in tcp_zerocopy_receive() argument
1753 if (sk->sk_state == TCP_LISTEN) in tcp_zerocopy_receive()
1756 sock_rps_record_flow(sk); in tcp_zerocopy_receive()
1766 tp = tcp_sk(sk); in tcp_zerocopy_receive()
1768 inq = tcp_inq(sk); in tcp_zerocopy_receive()
1784 skb = tcp_recv_skb(sk, seq, &offset); in tcp_zerocopy_receive()
1823 tcp_rcv_space_adjust(sk); in tcp_zerocopy_receive()
1826 tcp_recv_skb(sk, seq, &offset); in tcp_zerocopy_receive()
1827 tcp_cleanup_rbuf(sk, length); in tcp_zerocopy_receive()
1832 if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE)) in tcp_zerocopy_receive()
1855 static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, in tcp_recv_timestamp() argument
1858 int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW); in tcp_recv_timestamp()
1862 if (sock_flag(sk, SOCK_RCVTSTAMP)) { in tcp_recv_timestamp()
1863 if (sock_flag(sk, SOCK_RCVTSTAMPNS)) { in tcp_recv_timestamp()
1894 if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) in tcp_recv_timestamp()
1901 if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) in tcp_recv_timestamp()
1909 if (sock_flag(sk, SOCK_TSTAMP_NEW)) in tcp_recv_timestamp()
1916 static int tcp_inq_hint(struct sock *sk) in tcp_inq_hint() argument
1918 const struct tcp_sock *tp = tcp_sk(sk); in tcp_inq_hint()
1925 lock_sock(sk); in tcp_inq_hint()
1927 release_sock(sk); in tcp_inq_hint()
1932 if (inq == 0 && sock_flag(sk, SOCK_DONE)) in tcp_inq_hint()
1945 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, in tcp_recvmsg() argument
1948 struct tcp_sock *tp = tcp_sk(sk); in tcp_recvmsg()
1962 return inet_recv_error(sk, msg, len, addr_len); in tcp_recvmsg()
1964 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) && in tcp_recvmsg()
1965 (sk->sk_state == TCP_ESTABLISHED)) in tcp_recvmsg()
1966 sk_busy_loop(sk, nonblock); in tcp_recvmsg()
1968 lock_sock(sk); in tcp_recvmsg()
1971 if (sk->sk_state == TCP_LISTEN) in tcp_recvmsg()
1975 timeo = sock_rcvtimeo(sk, nonblock); in tcp_recvmsg()
2002 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); in tcp_recvmsg()
2019 last = skb_peek_tail(&sk->sk_receive_queue); in tcp_recvmsg()
2020 skb_queue_walk(&sk->sk_receive_queue, skb) { in tcp_recvmsg()
2047 if (copied >= target && !sk->sk_backlog.tail) in tcp_recvmsg()
2051 if (sk->sk_err || in tcp_recvmsg()
2052 sk->sk_state == TCP_CLOSE || in tcp_recvmsg()
2053 (sk->sk_shutdown & RCV_SHUTDOWN) || in tcp_recvmsg()
2058 if (sock_flag(sk, SOCK_DONE)) in tcp_recvmsg()
2061 if (sk->sk_err) { in tcp_recvmsg()
2062 copied = sock_error(sk); in tcp_recvmsg()
2066 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_recvmsg()
2069 if (sk->sk_state == TCP_CLOSE) { in tcp_recvmsg()
2088 tcp_cleanup_rbuf(sk, copied); in tcp_recvmsg()
2092 release_sock(sk); in tcp_recvmsg()
2093 lock_sock(sk); in tcp_recvmsg()
2095 sk_wait_data(sk, &timeo, last); in tcp_recvmsg()
2118 if (!sock_flag(sk, SOCK_URGINLINE)) { in tcp_recvmsg()
2145 tcp_rcv_space_adjust(sk); in tcp_recvmsg()
2150 tcp_fast_path_check(sk); in tcp_recvmsg()
2162 sk_eat_skb(sk, skb); in tcp_recvmsg()
2169 sk_eat_skb(sk, skb); in tcp_recvmsg()
2178 tcp_cleanup_rbuf(sk, copied); in tcp_recvmsg()
2180 release_sock(sk); in tcp_recvmsg()
2184 tcp_recv_timestamp(msg, sk, &tss); in tcp_recvmsg()
2186 inq = tcp_inq_hint(sk); in tcp_recvmsg()
2194 release_sock(sk); in tcp_recvmsg()
2198 err = tcp_recv_urg(sk, msg, len, flags); in tcp_recvmsg()
2202 err = tcp_peek_sndq(sk, msg, len); in tcp_recvmsg()
2207 void tcp_set_state(struct sock *sk, int state) in tcp_set_state() argument
2209 int oldstate = sk->sk_state; in tcp_set_state()
2232 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG)) in tcp_set_state()
2233 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state); in tcp_set_state()
2238 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); in tcp_set_state()
2243 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); in tcp_set_state()
2245 sk->sk_prot->unhash(sk); in tcp_set_state()
2246 if (inet_csk(sk)->icsk_bind_hash && in tcp_set_state()
2247 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) in tcp_set_state()
2248 inet_put_port(sk); in tcp_set_state()
2252 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); in tcp_set_state()
2258 inet_sk_state_store(sk, state); in tcp_set_state()
2286 static int tcp_close_state(struct sock *sk) in tcp_close_state() argument
2288 int next = (int)new_state[sk->sk_state]; in tcp_close_state()
2291 tcp_set_state(sk, ns); in tcp_close_state()
2301 void tcp_shutdown(struct sock *sk, int how) in tcp_shutdown() argument
2311 if ((1 << sk->sk_state) & in tcp_shutdown()
2315 if (tcp_close_state(sk)) in tcp_shutdown()
2316 tcp_send_fin(sk); in tcp_shutdown()
2321 bool tcp_check_oom(struct sock *sk, int shift) in tcp_check_oom() argument
2325 too_many_orphans = tcp_too_many_orphans(sk, shift); in tcp_check_oom()
2326 out_of_socket_memory = tcp_out_of_memory(sk); in tcp_check_oom()
2335 void tcp_close(struct sock *sk, long timeout) in tcp_close() argument
2341 lock_sock(sk); in tcp_close()
2342 sk->sk_shutdown = SHUTDOWN_MASK; in tcp_close()
2344 if (sk->sk_state == TCP_LISTEN) { in tcp_close()
2345 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2348 inet_csk_listen_stop(sk); in tcp_close()
2357 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { in tcp_close()
2366 sk_mem_reclaim(sk); in tcp_close()
2369 if (sk->sk_state == TCP_CLOSE) in tcp_close()
2379 if (unlikely(tcp_sk(sk)->repair)) { in tcp_close()
2380 sk->sk_prot->disconnect(sk, 0); in tcp_close()
2383 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); in tcp_close()
2384 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2385 tcp_send_active_reset(sk, sk->sk_allocation); in tcp_close()
2386 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { in tcp_close()
2388 sk->sk_prot->disconnect(sk, 0); in tcp_close()
2389 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_close()
2390 } else if (tcp_close_state(sk)) { in tcp_close()
2420 tcp_send_fin(sk); in tcp_close()
2423 sk_stream_wait_close(sk, timeout); in tcp_close()
2426 state = sk->sk_state; in tcp_close()
2427 sock_hold(sk); in tcp_close()
2428 sock_orphan(sk); in tcp_close()
2431 bh_lock_sock(sk); in tcp_close()
2433 __release_sock(sk); in tcp_close()
2435 percpu_counter_inc(sk->sk_prot->orphan_count); in tcp_close()
2438 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) in tcp_close()
2455 if (sk->sk_state == TCP_FIN_WAIT2) { in tcp_close()
2456 struct tcp_sock *tp = tcp_sk(sk); in tcp_close()
2458 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2459 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_close()
2460 __NET_INC_STATS(sock_net(sk), in tcp_close()
2463 const int tmo = tcp_fin_time(sk); in tcp_close()
2466 inet_csk_reset_keepalive_timer(sk, in tcp_close()
2469 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); in tcp_close()
2474 if (sk->sk_state != TCP_CLOSE) { in tcp_close()
2475 sk_mem_reclaim(sk); in tcp_close()
2476 if (tcp_check_oom(sk, 0)) { in tcp_close()
2477 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2478 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_close()
2479 __NET_INC_STATS(sock_net(sk), in tcp_close()
2481 } else if (!check_net(sock_net(sk))) { in tcp_close()
2483 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2487 if (sk->sk_state == TCP_CLOSE) { in tcp_close()
2490 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, in tcp_close()
2491 lockdep_sock_is_held(sk)); in tcp_close()
2497 reqsk_fastopen_remove(sk, req, false); in tcp_close()
2498 inet_csk_destroy_sock(sk); in tcp_close()
2503 bh_unlock_sock(sk); in tcp_close()
2505 release_sock(sk); in tcp_close()
2506 sock_put(sk); in tcp_close()
2519 static void tcp_rtx_queue_purge(struct sock *sk) in tcp_rtx_queue_purge() argument
2521 struct rb_node *p = rb_first(&sk->tcp_rtx_queue); in tcp_rtx_queue_purge()
2523 tcp_sk(sk)->highest_sack = NULL; in tcp_rtx_queue_purge()
2531 tcp_rtx_queue_unlink(skb, sk); in tcp_rtx_queue_purge()
2532 sk_wmem_free_skb(sk, skb); in tcp_rtx_queue_purge()
2536 void tcp_write_queue_purge(struct sock *sk) in tcp_write_queue_purge() argument
2540 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); in tcp_write_queue_purge()
2541 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { in tcp_write_queue_purge()
2543 sk_wmem_free_skb(sk, skb); in tcp_write_queue_purge()
2545 tcp_rtx_queue_purge(sk); in tcp_write_queue_purge()
2546 skb = sk->sk_tx_skb_cache; in tcp_write_queue_purge()
2549 sk->sk_tx_skb_cache = NULL; in tcp_write_queue_purge()
2551 INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); in tcp_write_queue_purge()
2552 sk_mem_reclaim(sk); in tcp_write_queue_purge()
2553 tcp_clear_all_retrans_hints(tcp_sk(sk)); in tcp_write_queue_purge()
2554 tcp_sk(sk)->packets_out = 0; in tcp_write_queue_purge()
2555 inet_csk(sk)->icsk_backoff = 0; in tcp_write_queue_purge()
2558 int tcp_disconnect(struct sock *sk, int flags) in tcp_disconnect() argument
2560 struct inet_sock *inet = inet_sk(sk); in tcp_disconnect()
2561 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_disconnect()
2562 struct tcp_sock *tp = tcp_sk(sk); in tcp_disconnect()
2563 int old_state = sk->sk_state; in tcp_disconnect()
2567 tcp_set_state(sk, TCP_CLOSE); in tcp_disconnect()
2571 inet_csk_listen_stop(sk); in tcp_disconnect()
2573 sk->sk_err = ECONNABORTED; in tcp_disconnect()
2580 tcp_send_active_reset(sk, gfp_any()); in tcp_disconnect()
2581 sk->sk_err = ECONNRESET; in tcp_disconnect()
2583 sk->sk_err = ECONNRESET; in tcp_disconnect()
2585 tcp_clear_xmit_timers(sk); in tcp_disconnect()
2586 __skb_queue_purge(&sk->sk_receive_queue); in tcp_disconnect()
2587 if (sk->sk_rx_skb_cache) { in tcp_disconnect()
2588 __kfree_skb(sk->sk_rx_skb_cache); in tcp_disconnect()
2589 sk->sk_rx_skb_cache = NULL; in tcp_disconnect()
2593 tcp_write_queue_purge(sk); in tcp_disconnect()
2594 tcp_fastopen_active_disable_ofo_check(sk); in tcp_disconnect()
2599 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) in tcp_disconnect()
2600 inet_reset_saddr(sk); in tcp_disconnect()
2602 sk->sk_shutdown = 0; in tcp_disconnect()
2603 sock_reset_flag(sk, SOCK_DONE); in tcp_disconnect()
2622 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_disconnect()
2625 inet_csk_delack_init(sk); in tcp_disconnect()
2631 __sk_dst_reset(sk); in tcp_disconnect()
2632 dst_release(sk->sk_rx_dst); in tcp_disconnect()
2633 sk->sk_rx_dst = NULL; in tcp_disconnect()
2669 if (sk->sk_frag.page) { in tcp_disconnect()
2670 put_page(sk->sk_frag.page); in tcp_disconnect()
2671 sk->sk_frag.page = NULL; in tcp_disconnect()
2672 sk->sk_frag.offset = 0; in tcp_disconnect()
2675 sk->sk_error_report(sk); in tcp_disconnect()
2680 static inline bool tcp_can_repair_sock(const struct sock *sk) in tcp_can_repair_sock() argument
2682 return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && in tcp_can_repair_sock()
2683 (sk->sk_state != TCP_LISTEN); in tcp_can_repair_sock()
2718 static int tcp_repair_options_est(struct sock *sk, in tcp_repair_options_est() argument
2721 struct tcp_sock *tp = tcp_sk(sk); in tcp_repair_options_est()
2734 tcp_mtup_init(sk); in tcp_repair_options_est()
2785 static int do_tcp_setsockopt(struct sock *sk, int level, in do_tcp_setsockopt() argument
2788 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_setsockopt()
2789 struct inet_connection_sock *icsk = inet_csk(sk); in do_tcp_setsockopt()
2790 struct net *net = sock_net(sk); in do_tcp_setsockopt()
2808 lock_sock(sk); in do_tcp_setsockopt()
2809 err = tcp_set_congestion_control(sk, name, true, true, in do_tcp_setsockopt()
2810 ns_capable(sock_net(sk)->user_ns, in do_tcp_setsockopt()
2812 release_sock(sk); in do_tcp_setsockopt()
2828 lock_sock(sk); in do_tcp_setsockopt()
2829 err = tcp_set_ulp(sk, name); in do_tcp_setsockopt()
2830 release_sock(sk); in do_tcp_setsockopt()
2850 return tcp_fastopen_reset_cipher(net, sk, key, backup_key); in do_tcp_setsockopt()
2863 lock_sock(sk); in do_tcp_setsockopt()
2889 tcp_push_pending_frames(sk); in do_tcp_setsockopt()
2908 if (!tcp_can_repair_sock(sk)) in do_tcp_setsockopt()
2912 sk->sk_reuse = SK_FORCE_REUSE; in do_tcp_setsockopt()
2916 sk->sk_reuse = SK_NO_REUSE; in do_tcp_setsockopt()
2917 tcp_send_window_probe(sk); in do_tcp_setsockopt()
2920 sk->sk_reuse = SK_NO_REUSE; in do_tcp_setsockopt()
2936 if (sk->sk_state != TCP_CLOSE) in do_tcp_setsockopt()
2949 else if (sk->sk_state == TCP_ESTABLISHED) in do_tcp_setsockopt()
2950 err = tcp_repair_options_est(sk, in do_tcp_setsockopt()
2975 tcp_push_pending_frames(sk); in do_tcp_setsockopt()
2984 if (sock_flag(sk, SOCK_KEEPOPEN) && in do_tcp_setsockopt()
2985 !((1 << sk->sk_state) & in do_tcp_setsockopt()
2992 inet_csk_reset_keepalive_timer(sk, elapsed); in do_tcp_setsockopt()
3040 if (sk->sk_state != TCP_CLOSE) { in do_tcp_setsockopt()
3052 inet_csk_enter_pingpong_mode(sk); in do_tcp_setsockopt()
3054 inet_csk_exit_pingpong_mode(sk); in do_tcp_setsockopt()
3055 if ((1 << sk->sk_state) & in do_tcp_setsockopt()
3057 inet_csk_ack_scheduled(sk)) { in do_tcp_setsockopt()
3059 tcp_cleanup_rbuf(sk, 1); in do_tcp_setsockopt()
3061 inet_csk_enter_pingpong_mode(sk); in do_tcp_setsockopt()
3069 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) in do_tcp_setsockopt()
3070 err = tp->af_specific->md5_parse(sk, optname, optval, optlen); in do_tcp_setsockopt()
3086 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | in do_tcp_setsockopt()
3090 fastopen_queue_tune(sk, val); in do_tcp_setsockopt()
3099 if (sk->sk_state == TCP_CLOSE) in do_tcp_setsockopt()
3110 else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) in do_tcp_setsockopt()
3126 sk->sk_write_space(sk); in do_tcp_setsockopt()
3144 release_sock(sk); in do_tcp_setsockopt()
3148 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, in tcp_setsockopt() argument
3151 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_setsockopt()
3154 return icsk->icsk_af_ops->setsockopt(sk, level, optname, in tcp_setsockopt()
3156 return do_tcp_setsockopt(sk, level, optname, optval, optlen); in tcp_setsockopt()
3161 int compat_tcp_setsockopt(struct sock *sk, int level, int optname, in compat_tcp_setsockopt() argument
3165 return inet_csk_compat_setsockopt(sk, level, optname, in compat_tcp_setsockopt()
3167 return do_tcp_setsockopt(sk, level, optname, optval, optlen); in compat_tcp_setsockopt()
3192 void tcp_get_info(struct sock *sk, struct tcp_info *info) in tcp_get_info() argument
3194 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ in tcp_get_info()
3195 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_get_info()
3202 if (sk->sk_type != SOCK_STREAM) in tcp_get_info()
3205 info->tcpi_state = inet_sk_state_load(sk); in tcp_get_info()
3208 rate = READ_ONCE(sk->sk_pacing_rate); in tcp_get_info()
3212 rate = READ_ONCE(sk->sk_max_pacing_rate); in tcp_get_info()
3224 info->tcpi_unacked = sk->sk_ack_backlog; in tcp_get_info()
3225 info->tcpi_sacked = sk->sk_max_ack_backlog; in tcp_get_info()
3229 slow = lock_sock_fast(sk); in tcp_get_info()
3305 unlock_sock_fast(sk, slow); in tcp_get_info()
3337 struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) in tcp_get_timestamping_opt_stats() argument
3339 const struct tcp_sock *tp = tcp_sk(sk); in tcp_get_timestamping_opt_stats()
3361 rate = READ_ONCE(sk->sk_pacing_rate); in tcp_get_timestamping_opt_stats()
3372 nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits); in tcp_get_timestamping_opt_stats()
3379 nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); in tcp_get_timestamping_opt_stats()
3392 static int do_tcp_getsockopt(struct sock *sk, int level, in do_tcp_getsockopt() argument
3395 struct inet_connection_sock *icsk = inet_csk(sk); in do_tcp_getsockopt()
3396 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_getsockopt()
3397 struct net *net = sock_net(sk); in do_tcp_getsockopt()
3411 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) in do_tcp_getsockopt()
3452 tcp_get_info(sk, &info); in do_tcp_getsockopt()
3472 sz = ca_ops->get_info(sk, ~0U, &attr, &info); in do_tcp_getsockopt()
3482 val = !inet_csk_in_pingpong_mode(sk); in do_tcp_getsockopt()
3620 lock_sock(sk); in do_tcp_getsockopt()
3624 release_sock(sk); in do_tcp_getsockopt()
3627 release_sock(sk); in do_tcp_getsockopt()
3632 release_sock(sk); in do_tcp_getsockopt()
3636 release_sock(sk); in do_tcp_getsockopt()
3640 release_sock(sk); in do_tcp_getsockopt()
3642 release_sock(sk); in do_tcp_getsockopt()
3660 lock_sock(sk); in do_tcp_getsockopt()
3661 err = tcp_zerocopy_receive(sk, &zc); in do_tcp_getsockopt()
3662 release_sock(sk); in do_tcp_getsockopt()
3679 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, in tcp_getsockopt() argument
3682 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_getsockopt()
3685 return icsk->icsk_af_ops->getsockopt(sk, level, optname, in tcp_getsockopt()
3687 return do_tcp_getsockopt(sk, level, optname, optval, optlen); in tcp_getsockopt()
3692 int compat_tcp_getsockopt(struct sock *sk, int level, int optname, in compat_tcp_getsockopt() argument
3696 return inet_csk_compat_getsockopt(sk, level, optname, in compat_tcp_getsockopt()
3698 return do_tcp_getsockopt(sk, level, optname, optval, optlen); in compat_tcp_getsockopt()
3838 void tcp_done(struct sock *sk) in tcp_done() argument
3846 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1); in tcp_done()
3848 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) in tcp_done()
3849 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); in tcp_done()
3851 tcp_set_state(sk, TCP_CLOSE); in tcp_done()
3852 tcp_clear_xmit_timers(sk); in tcp_done()
3854 reqsk_fastopen_remove(sk, req, false); in tcp_done()
3856 sk->sk_shutdown = SHUTDOWN_MASK; in tcp_done()
3858 if (!sock_flag(sk, SOCK_DEAD)) in tcp_done()
3859 sk->sk_state_change(sk); in tcp_done()
3861 inet_csk_destroy_sock(sk); in tcp_done()
3865 int tcp_abort(struct sock *sk, int err) in tcp_abort() argument
3867 if (!sk_fullsock(sk)) { in tcp_abort()
3868 if (sk->sk_state == TCP_NEW_SYN_RECV) { in tcp_abort()
3869 struct request_sock *req = inet_reqsk(sk); in tcp_abort()
3880 lock_sock(sk); in tcp_abort()
3882 if (sk->sk_state == TCP_LISTEN) { in tcp_abort()
3883 tcp_set_state(sk, TCP_CLOSE); in tcp_abort()
3884 inet_csk_listen_stop(sk); in tcp_abort()
3889 bh_lock_sock(sk); in tcp_abort()
3891 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_abort()
3892 sk->sk_err = err; in tcp_abort()
3895 sk->sk_error_report(sk); in tcp_abort()
3896 if (tcp_need_reset(sk->sk_state)) in tcp_abort()
3897 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_abort()
3898 tcp_done(sk); in tcp_abort()
3901 bh_unlock_sock(sk); in tcp_abort()
3903 tcp_write_queue_purge(sk); in tcp_abort()
3904 release_sock(sk); in tcp_abort()