• Home
  • Raw
  • Download

Lines Matching refs:sk

327 void tcp_enter_memory_pressure(struct sock *sk)  in tcp_enter_memory_pressure()  argument
330 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); in tcp_enter_memory_pressure()
378 void tcp_init_sock(struct sock *sk) in tcp_init_sock() argument
380 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_sock()
381 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock()
384 tcp_init_xmit_timers(sk); in tcp_init_sock()
410 tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; in tcp_init_sock()
412 tcp_assign_congestion_control(sk); in tcp_init_sock()
416 sk->sk_state = TCP_CLOSE; in tcp_init_sock()
418 sk->sk_write_space = sk_stream_write_space; in tcp_init_sock()
419 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); in tcp_init_sock()
423 sk->sk_sndbuf = sysctl_tcp_wmem[1]; in tcp_init_sock()
424 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; in tcp_init_sock()
427 sk_sockets_allocated_inc(sk); in tcp_init_sock()
432 static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb) in tcp_tx_timestamp() argument
438 sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); in tcp_tx_timestamp()
456 struct sock *sk = sock->sk; in tcp_poll() local
457 const struct tcp_sock *tp = tcp_sk(sk); in tcp_poll()
460 sock_rps_record_flow(sk); in tcp_poll()
462 sock_poll_wait(file, sk_sleep(sk), wait); in tcp_poll()
464 state = sk_state_load(sk); in tcp_poll()
466 return inet_csk_listen_poll(sk); in tcp_poll()
502 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) in tcp_poll()
504 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_poll()
510 int target = sock_rcvlowat(sk, 0, INT_MAX); in tcp_poll()
513 !sock_flag(sk, SOCK_URGINLINE) && in tcp_poll()
520 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { in tcp_poll()
521 if (sk_stream_is_writeable(sk)) { in tcp_poll()
524 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); in tcp_poll()
525 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in tcp_poll()
533 if (sk_stream_is_writeable(sk)) in tcp_poll()
541 } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { in tcp_poll()
550 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) in tcp_poll()
557 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) in tcp_ioctl() argument
559 struct tcp_sock *tp = tcp_sk(sk); in tcp_ioctl()
565 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
568 slow = lock_sock_fast(sk); in tcp_ioctl()
569 answ = tcp_inq(sk); in tcp_ioctl()
570 unlock_sock_fast(sk, slow); in tcp_ioctl()
576 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
579 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in tcp_ioctl()
585 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
588 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in tcp_ioctl()
612 static void skb_entail(struct sock *sk, struct sk_buff *skb) in skb_entail() argument
614 struct tcp_sock *tp = tcp_sk(sk); in skb_entail()
622 tcp_add_write_queue_tail(sk, skb); in skb_entail()
623 sk->sk_wmem_queued += skb->truesize; in skb_entail()
624 sk_mem_charge(sk, skb->truesize); in skb_entail()
628 tcp_slow_start_after_idle_check(sk); in skb_entail()
647 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, in tcp_should_autocork() argument
652 skb != tcp_write_queue_head(sk) && in tcp_should_autocork()
653 atomic_read(&sk->sk_wmem_alloc) > skb->truesize; in tcp_should_autocork()
656 static void tcp_push(struct sock *sk, int flags, int mss_now, in tcp_push() argument
659 struct tcp_sock *tp = tcp_sk(sk); in tcp_push()
662 if (!tcp_send_head(sk)) in tcp_push()
665 skb = tcp_write_queue_tail(sk); in tcp_push()
671 if (tcp_should_autocork(sk, skb, size_goal)) { in tcp_push()
675 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); in tcp_push()
681 if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize) in tcp_push()
688 __tcp_push_pending_frames(sk, mss_now, nonagle); in tcp_push()
697 ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, in tcp_splice_data_recv()
704 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) in __tcp_splice_read() argument
712 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); in __tcp_splice_read()
731 struct sock *sk = sock->sk; in tcp_splice_read() local
741 sock_rps_record_flow(sk); in tcp_splice_read()
750 lock_sock(sk); in tcp_splice_read()
752 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); in tcp_splice_read()
754 ret = __tcp_splice_read(sk, &tss); in tcp_splice_read()
760 if (sock_flag(sk, SOCK_DONE)) in tcp_splice_read()
762 if (sk->sk_err) { in tcp_splice_read()
763 ret = sock_error(sk); in tcp_splice_read()
766 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_splice_read()
768 if (sk->sk_state == TCP_CLOSE) { in tcp_splice_read()
773 if (!sock_flag(sk, SOCK_DONE)) in tcp_splice_read()
785 if (!skb_queue_empty(&sk->sk_receive_queue)) in tcp_splice_read()
787 sk_wait_data(sk, &timeo, NULL); in tcp_splice_read()
799 release_sock(sk); in tcp_splice_read()
800 lock_sock(sk); in tcp_splice_read()
802 if (sk->sk_err || sk->sk_state == TCP_CLOSE || in tcp_splice_read()
803 (sk->sk_shutdown & RCV_SHUTDOWN) || in tcp_splice_read()
808 release_sock(sk); in tcp_splice_read()
817 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, in sk_stream_alloc_skb() argument
825 if (unlikely(tcp_under_memory_pressure(sk))) in sk_stream_alloc_skb()
826 sk_mem_reclaim_partial(sk); in sk_stream_alloc_skb()
828 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); in sk_stream_alloc_skb()
834 sk_forced_mem_schedule(sk, skb->truesize); in sk_stream_alloc_skb()
836 mem_scheduled = sk_wmem_schedule(sk, skb->truesize); in sk_stream_alloc_skb()
839 skb_reserve(skb, sk->sk_prot->max_header); in sk_stream_alloc_skb()
849 sk->sk_prot->enter_memory_pressure(sk); in sk_stream_alloc_skb()
850 sk_stream_moderate_sndbuf(sk); in sk_stream_alloc_skb()
855 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, in tcp_xmit_size_goal() argument
858 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_size_goal()
861 if (!large_allowed || !sk_can_gso(sk)) in tcp_xmit_size_goal()
865 new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; in tcp_xmit_size_goal()
873 sk->sk_gso_max_segs); in tcp_xmit_size_goal()
880 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) in tcp_send_mss() argument
884 mss_now = tcp_current_mss(sk); in tcp_send_mss()
885 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); in tcp_send_mss()
890 static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, in do_tcp_sendpages() argument
893 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_sendpages()
897 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); in do_tcp_sendpages()
903 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && in do_tcp_sendpages()
904 !tcp_passive_fastopen(sk)) { in do_tcp_sendpages()
905 err = sk_stream_wait_connect(sk, &timeo); in do_tcp_sendpages()
910 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); in do_tcp_sendpages()
912 mss_now = tcp_send_mss(sk, &size_goal, flags); in do_tcp_sendpages()
916 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) in do_tcp_sendpages()
920 struct sk_buff *skb = tcp_write_queue_tail(sk); in do_tcp_sendpages()
924 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0 || in do_tcp_sendpages()
927 if (!sk_stream_memory_free(sk)) in do_tcp_sendpages()
930 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, in do_tcp_sendpages()
931 skb_queue_empty(&sk->sk_write_queue)); in do_tcp_sendpages()
935 skb_entail(sk, skb); in do_tcp_sendpages()
948 if (!sk_wmem_schedule(sk, copy)) in do_tcp_sendpages()
962 sk->sk_wmem_queued += copy; in do_tcp_sendpages()
963 sk_mem_charge(sk, copy); in do_tcp_sendpages()
983 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); in do_tcp_sendpages()
984 } else if (skb == tcp_send_head(sk)) in do_tcp_sendpages()
985 tcp_push_one(sk, mss_now); in do_tcp_sendpages()
989 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in do_tcp_sendpages()
991 tcp_push(sk, flags & ~MSG_MORE, mss_now, in do_tcp_sendpages()
994 err = sk_stream_wait_memory(sk, &timeo); in do_tcp_sendpages()
998 mss_now = tcp_send_mss(sk, &size_goal, flags); in do_tcp_sendpages()
1003 tcp_tx_timestamp(sk, sk->sk_tsflags, tcp_write_queue_tail(sk)); in do_tcp_sendpages()
1005 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in do_tcp_sendpages()
1014 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) in do_tcp_sendpages()
1015 sk->sk_write_space(sk); in do_tcp_sendpages()
1016 return sk_stream_error(sk, flags, err); in do_tcp_sendpages()
1019 int tcp_sendpage(struct sock *sk, struct page *page, int offset, in tcp_sendpage() argument
1024 if (!(sk->sk_route_caps & NETIF_F_SG) || in tcp_sendpage()
1025 !sk_check_csum_caps(sk)) in tcp_sendpage()
1026 return sock_no_sendpage(sk->sk_socket, page, offset, size, in tcp_sendpage()
1029 lock_sock(sk); in tcp_sendpage()
1031 tcp_rate_check_app_limited(sk); /* is sending application-limited? */ in tcp_sendpage()
1033 res = do_tcp_sendpages(sk, page, offset, size, flags); in tcp_sendpage()
1034 release_sock(sk); in tcp_sendpage()
1056 static int select_size(const struct sock *sk, bool sg, bool first_skb) in select_size() argument
1058 const struct tcp_sock *tp = tcp_sk(sk); in select_size()
1062 if (sk_can_gso(sk)) { in select_size()
1084 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, in tcp_sendmsg_fastopen() argument
1087 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_fastopen()
1088 struct inet_sock *inet = inet_sk(sk); in tcp_sendmsg_fastopen()
1100 sk->sk_allocation); in tcp_sendmsg_fastopen()
1107 err = tcp_connect(sk); in tcp_sendmsg_fastopen()
1110 tcp_set_state(sk, TCP_CLOSE); in tcp_sendmsg_fastopen()
1112 sk->sk_route_caps = 0; in tcp_sendmsg_fastopen()
1116 err = __inet_stream_connect(sk->sk_socket, uaddr, in tcp_sendmsg_fastopen()
1129 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) in tcp_sendmsg() argument
1131 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg()
1140 lock_sock(sk); in tcp_sendmsg()
1143 if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) { in tcp_sendmsg()
1144 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); in tcp_sendmsg()
1151 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); in tcp_sendmsg()
1153 tcp_rate_check_app_limited(sk); /* is sending application-limited? */ in tcp_sendmsg()
1159 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && in tcp_sendmsg()
1160 !tcp_passive_fastopen(sk)) { in tcp_sendmsg()
1161 err = sk_stream_wait_connect(sk, &timeo); in tcp_sendmsg()
1168 copied = tcp_send_rcvq(sk, msg, size); in tcp_sendmsg()
1179 sockc.tsflags = sk->sk_tsflags; in tcp_sendmsg()
1181 err = sock_cmsg_send(sk, msg, &sockc); in tcp_sendmsg()
1189 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); in tcp_sendmsg()
1195 mss_now = tcp_send_mss(sk, &size_goal, flags); in tcp_sendmsg()
1198 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) in tcp_sendmsg()
1201 sg = !!(sk->sk_route_caps & NETIF_F_SG); in tcp_sendmsg()
1207 skb = tcp_write_queue_tail(sk); in tcp_sendmsg()
1208 if (tcp_send_head(sk)) { in tcp_sendmsg()
1221 if (!sk_stream_memory_free(sk)) in tcp_sendmsg()
1224 if (process_backlog && sk_flush_backlog(sk)) { in tcp_sendmsg()
1228 first_skb = skb_queue_empty(&sk->sk_write_queue); in tcp_sendmsg()
1229 skb = sk_stream_alloc_skb(sk, in tcp_sendmsg()
1230 select_size(sk, sg, first_skb), in tcp_sendmsg()
1231 sk->sk_allocation, in tcp_sendmsg()
1240 if (sk_check_csum_caps(sk)) in tcp_sendmsg()
1243 skb_entail(sk, skb); in tcp_sendmsg()
1263 err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy); in tcp_sendmsg()
1269 struct page_frag *pfrag = sk_page_frag(sk); in tcp_sendmsg()
1271 if (!sk_page_frag_refill(sk, pfrag)) in tcp_sendmsg()
1285 if (!sk_wmem_schedule(sk, copy)) in tcp_sendmsg()
1288 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, in tcp_sendmsg()
1325 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); in tcp_sendmsg()
1326 } else if (skb == tcp_send_head(sk)) in tcp_sendmsg()
1327 tcp_push_one(sk, mss_now); in tcp_sendmsg()
1331 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in tcp_sendmsg()
1334 tcp_push(sk, flags & ~MSG_MORE, mss_now, in tcp_sendmsg()
1337 err = sk_stream_wait_memory(sk, &timeo); in tcp_sendmsg()
1341 mss_now = tcp_send_mss(sk, &size_goal, flags); in tcp_sendmsg()
1346 tcp_tx_timestamp(sk, sockc.tsflags, tcp_write_queue_tail(sk)); in tcp_sendmsg()
1347 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in tcp_sendmsg()
1350 release_sock(sk); in tcp_sendmsg()
1355 tcp_unlink_write_queue(skb, sk); in tcp_sendmsg()
1359 tcp_check_send_head(sk, skb); in tcp_sendmsg()
1360 sk_wmem_free_skb(sk, skb); in tcp_sendmsg()
1367 err = sk_stream_error(sk, flags, err); in tcp_sendmsg()
1369 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) in tcp_sendmsg()
1370 sk->sk_write_space(sk); in tcp_sendmsg()
1371 release_sock(sk); in tcp_sendmsg()
1381 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) in tcp_recv_urg() argument
1383 struct tcp_sock *tp = tcp_sk(sk); in tcp_recv_urg()
1386 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || in tcp_recv_urg()
1390 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) in tcp_recv_urg()
1413 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) in tcp_recv_urg()
1425 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) in tcp_peek_sndq() argument
1432 skb_queue_walk(&sk->sk_write_queue, skb) { in tcp_peek_sndq()
1449 static void tcp_cleanup_rbuf(struct sock *sk, int copied) in tcp_cleanup_rbuf() argument
1451 struct tcp_sock *tp = tcp_sk(sk); in tcp_cleanup_rbuf()
1454 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); in tcp_cleanup_rbuf()
1460 if (inet_csk_ack_scheduled(sk)) { in tcp_cleanup_rbuf()
1461 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_rbuf()
1477 !atomic_read(&sk->sk_rmem_alloc))) in tcp_cleanup_rbuf()
1487 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { in tcp_cleanup_rbuf()
1492 __u32 new_window = __tcp_select_window(sk); in tcp_cleanup_rbuf()
1504 tcp_send_ack(sk); in tcp_cleanup_rbuf()
1507 static void tcp_prequeue_process(struct sock *sk) in tcp_prequeue_process() argument
1510 struct tcp_sock *tp = tcp_sk(sk); in tcp_prequeue_process()
1512 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED); in tcp_prequeue_process()
1515 sk_backlog_rcv(sk, skb); in tcp_prequeue_process()
1521 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) in tcp_recv_skb() argument
1526 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { in tcp_recv_skb()
1540 sk_eat_skb(sk, skb); in tcp_recv_skb()
1556 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, in tcp_read_sock() argument
1560 struct tcp_sock *tp = tcp_sk(sk); in tcp_read_sock()
1565 if (sk->sk_state == TCP_LISTEN) in tcp_read_sock()
1567 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { in tcp_read_sock()
1596 skb = tcp_recv_skb(sk, seq - 1, &offset); in tcp_read_sock()
1606 sk_eat_skb(sk, skb); in tcp_read_sock()
1610 sk_eat_skb(sk, skb); in tcp_read_sock()
1617 tcp_rcv_space_adjust(sk); in tcp_read_sock()
1621 tcp_recv_skb(sk, seq, &offset); in tcp_read_sock()
1622 tcp_cleanup_rbuf(sk, copied); in tcp_read_sock()
1630 return tcp_inq(sock->sk); in tcp_peek_len()
1642 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, in tcp_recvmsg() argument
1645 struct tcp_sock *tp = tcp_sk(sk); in tcp_recvmsg()
1658 return inet_recv_error(sk, msg, len, addr_len); in tcp_recvmsg()
1660 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && in tcp_recvmsg()
1661 (sk->sk_state == TCP_ESTABLISHED)) in tcp_recvmsg()
1662 sk_busy_loop(sk, nonblock); in tcp_recvmsg()
1664 lock_sock(sk); in tcp_recvmsg()
1667 if (sk->sk_state == TCP_LISTEN) in tcp_recvmsg()
1670 timeo = sock_rcvtimeo(sk, nonblock); in tcp_recvmsg()
1697 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); in tcp_recvmsg()
1714 last = skb_peek_tail(&sk->sk_receive_queue); in tcp_recvmsg()
1715 skb_queue_walk(&sk->sk_receive_queue, skb) { in tcp_recvmsg()
1742 if (copied >= target && !sk->sk_backlog.tail) in tcp_recvmsg()
1746 if (sk->sk_err || in tcp_recvmsg()
1747 sk->sk_state == TCP_CLOSE || in tcp_recvmsg()
1748 (sk->sk_shutdown & RCV_SHUTDOWN) || in tcp_recvmsg()
1753 if (sock_flag(sk, SOCK_DONE)) in tcp_recvmsg()
1756 if (sk->sk_err) { in tcp_recvmsg()
1757 copied = sock_error(sk); in tcp_recvmsg()
1761 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_recvmsg()
1764 if (sk->sk_state == TCP_CLOSE) { in tcp_recvmsg()
1765 if (!sock_flag(sk, SOCK_DONE)) { in tcp_recvmsg()
1786 tcp_cleanup_rbuf(sk, copied); in tcp_recvmsg()
1835 release_sock(sk); in tcp_recvmsg()
1836 lock_sock(sk); in tcp_recvmsg()
1838 sk_wait_data(sk, &timeo, last); in tcp_recvmsg()
1848 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); in tcp_recvmsg()
1856 tcp_prequeue_process(sk); in tcp_recvmsg()
1860 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); in tcp_recvmsg()
1886 if (!sock_flag(sk, SOCK_URGINLINE)) { in tcp_recvmsg()
1913 tcp_rcv_space_adjust(sk); in tcp_recvmsg()
1918 tcp_fast_path_check(sk); in tcp_recvmsg()
1926 sk_eat_skb(sk, skb); in tcp_recvmsg()
1933 sk_eat_skb(sk, skb); in tcp_recvmsg()
1943 tcp_prequeue_process(sk); in tcp_recvmsg()
1946 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); in tcp_recvmsg()
1961 tcp_cleanup_rbuf(sk, copied); in tcp_recvmsg()
1963 release_sock(sk); in tcp_recvmsg()
1967 release_sock(sk); in tcp_recvmsg()
1971 err = tcp_recv_urg(sk, msg, len, flags); in tcp_recvmsg()
1975 err = tcp_peek_sndq(sk, msg, len); in tcp_recvmsg()
1980 void tcp_set_state(struct sock *sk, int state) in tcp_set_state() argument
1982 int oldstate = sk->sk_state; in tcp_set_state()
1987 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); in tcp_set_state()
1992 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); in tcp_set_state()
1994 sk->sk_prot->unhash(sk); in tcp_set_state()
1995 if (inet_csk(sk)->icsk_bind_hash && in tcp_set_state()
1996 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) in tcp_set_state()
1997 inet_put_port(sk); in tcp_set_state()
2001 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); in tcp_set_state()
2007 sk_state_store(sk, state); in tcp_set_state()
2010 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); in tcp_set_state()
2039 static int tcp_close_state(struct sock *sk) in tcp_close_state() argument
2041 int next = (int)new_state[sk->sk_state]; in tcp_close_state()
2044 tcp_set_state(sk, ns); in tcp_close_state()
2054 void tcp_shutdown(struct sock *sk, int how) in tcp_shutdown() argument
2064 if ((1 << sk->sk_state) & in tcp_shutdown()
2068 if (tcp_close_state(sk)) in tcp_shutdown()
2069 tcp_send_fin(sk); in tcp_shutdown()
2074 bool tcp_check_oom(struct sock *sk, int shift) in tcp_check_oom() argument
2078 too_many_orphans = tcp_too_many_orphans(sk, shift); in tcp_check_oom()
2079 out_of_socket_memory = tcp_out_of_memory(sk); in tcp_check_oom()
2088 void tcp_close(struct sock *sk, long timeout) in tcp_close() argument
2094 lock_sock(sk); in tcp_close()
2095 sk->sk_shutdown = SHUTDOWN_MASK; in tcp_close()
2097 if (sk->sk_state == TCP_LISTEN) { in tcp_close()
2098 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2101 inet_csk_listen_stop(sk); in tcp_close()
2110 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { in tcp_close()
2119 sk_mem_reclaim(sk); in tcp_close()
2122 if (sk->sk_state == TCP_CLOSE) in tcp_close()
2132 if (unlikely(tcp_sk(sk)->repair)) { in tcp_close()
2133 sk->sk_prot->disconnect(sk, 0); in tcp_close()
2136 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); in tcp_close()
2137 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2138 tcp_send_active_reset(sk, sk->sk_allocation); in tcp_close()
2139 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { in tcp_close()
2141 sk->sk_prot->disconnect(sk, 0); in tcp_close()
2142 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_close()
2143 } else if (tcp_close_state(sk)) { in tcp_close()
2173 tcp_send_fin(sk); in tcp_close()
2176 sk_stream_wait_close(sk, timeout); in tcp_close()
2179 state = sk->sk_state; in tcp_close()
2180 sock_hold(sk); in tcp_close()
2181 sock_orphan(sk); in tcp_close()
2184 release_sock(sk); in tcp_close()
2191 bh_lock_sock(sk); in tcp_close()
2192 WARN_ON(sock_owned_by_user(sk)); in tcp_close()
2194 percpu_counter_inc(sk->sk_prot->orphan_count); in tcp_close()
2197 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) in tcp_close()
2214 if (sk->sk_state == TCP_FIN_WAIT2) { in tcp_close()
2215 struct tcp_sock *tp = tcp_sk(sk); in tcp_close()
2217 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2218 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_close()
2219 __NET_INC_STATS(sock_net(sk), in tcp_close()
2222 const int tmo = tcp_fin_time(sk); in tcp_close()
2225 inet_csk_reset_keepalive_timer(sk, in tcp_close()
2228 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); in tcp_close()
2233 if (sk->sk_state != TCP_CLOSE) { in tcp_close()
2234 sk_mem_reclaim(sk); in tcp_close()
2235 if (tcp_check_oom(sk, 0)) { in tcp_close()
2236 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2237 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_close()
2238 __NET_INC_STATS(sock_net(sk), in tcp_close()
2240 } else if (!check_net(sock_net(sk))) { in tcp_close()
2242 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2246 if (sk->sk_state == TCP_CLOSE) { in tcp_close()
2247 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; in tcp_close()
2253 reqsk_fastopen_remove(sk, req, false); in tcp_close()
2254 inet_csk_destroy_sock(sk); in tcp_close()
2259 bh_unlock_sock(sk); in tcp_close()
2261 sock_put(sk); in tcp_close()
2274 int tcp_disconnect(struct sock *sk, int flags) in tcp_disconnect() argument
2276 struct inet_sock *inet = inet_sk(sk); in tcp_disconnect()
2277 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_disconnect()
2278 struct tcp_sock *tp = tcp_sk(sk); in tcp_disconnect()
2280 int old_state = sk->sk_state; in tcp_disconnect()
2283 tcp_set_state(sk, TCP_CLOSE); in tcp_disconnect()
2287 inet_csk_listen_stop(sk); in tcp_disconnect()
2289 sk->sk_err = ECONNABORTED; in tcp_disconnect()
2296 tcp_send_active_reset(sk, gfp_any()); in tcp_disconnect()
2297 sk->sk_err = ECONNRESET; in tcp_disconnect()
2299 sk->sk_err = ECONNRESET; in tcp_disconnect()
2301 tcp_clear_xmit_timers(sk); in tcp_disconnect()
2302 __skb_queue_purge(&sk->sk_receive_queue); in tcp_disconnect()
2303 tcp_write_queue_purge(sk); in tcp_disconnect()
2308 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) in tcp_disconnect()
2309 inet_reset_saddr(sk); in tcp_disconnect()
2311 sk->sk_shutdown = 0; in tcp_disconnect()
2312 sock_reset_flag(sk, SOCK_DONE); in tcp_disconnect()
2324 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_disconnect()
2327 inet_csk_delack_init(sk); in tcp_disconnect()
2332 tcp_init_send_head(sk); in tcp_disconnect()
2334 __sk_dst_reset(sk); in tcp_disconnect()
2335 dst_release(sk->sk_rx_dst); in tcp_disconnect()
2336 sk->sk_rx_dst = NULL; in tcp_disconnect()
2345 if (sk->sk_frag.page) { in tcp_disconnect()
2346 put_page(sk->sk_frag.page); in tcp_disconnect()
2347 sk->sk_frag.page = NULL; in tcp_disconnect()
2348 sk->sk_frag.offset = 0; in tcp_disconnect()
2351 sk->sk_error_report(sk); in tcp_disconnect()
2356 static inline bool tcp_can_repair_sock(const struct sock *sk) in tcp_can_repair_sock() argument
2358 return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && in tcp_can_repair_sock()
2359 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); in tcp_can_repair_sock()
2446 static int do_tcp_setsockopt(struct sock *sk, int level, in do_tcp_setsockopt() argument
2449 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_setsockopt()
2450 struct inet_connection_sock *icsk = inet_csk(sk); in do_tcp_setsockopt()
2451 struct net *net = sock_net(sk); in do_tcp_setsockopt()
2469 lock_sock(sk); in do_tcp_setsockopt()
2470 err = tcp_set_congestion_control(sk, name); in do_tcp_setsockopt()
2471 release_sock(sk); in do_tcp_setsockopt()
2485 lock_sock(sk); in do_tcp_setsockopt()
2510 tcp_push_pending_frames(sk); in do_tcp_setsockopt()
2534 if (!tcp_can_repair_sock(sk)) in do_tcp_setsockopt()
2538 sk->sk_reuse = SK_FORCE_REUSE; in do_tcp_setsockopt()
2542 sk->sk_reuse = SK_NO_REUSE; in do_tcp_setsockopt()
2543 tcp_send_window_probe(sk); in do_tcp_setsockopt()
2559 if (sk->sk_state != TCP_CLOSE) in do_tcp_setsockopt()
2572 else if (sk->sk_state == TCP_ESTABLISHED) in do_tcp_setsockopt()
2598 tcp_push_pending_frames(sk); in do_tcp_setsockopt()
2607 if (sock_flag(sk, SOCK_KEEPOPEN) && in do_tcp_setsockopt()
2608 !((1 << sk->sk_state) & in do_tcp_setsockopt()
2615 inet_csk_reset_keepalive_timer(sk, elapsed); in do_tcp_setsockopt()
2663 if (sk->sk_state != TCP_CLOSE) { in do_tcp_setsockopt()
2678 if ((1 << sk->sk_state) & in do_tcp_setsockopt()
2680 inet_csk_ack_scheduled(sk)) { in do_tcp_setsockopt()
2682 tcp_cleanup_rbuf(sk, 1); in do_tcp_setsockopt()
2691 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) in do_tcp_setsockopt()
2692 err = tp->af_specific->md5_parse(sk, optval, optlen); in do_tcp_setsockopt()
2708 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | in do_tcp_setsockopt()
2712 fastopen_queue_tune(sk, val); in do_tcp_setsockopt()
2721 if (sk->sk_state == TCP_CLOSE) in do_tcp_setsockopt()
2740 sk->sk_write_space(sk); in do_tcp_setsockopt()
2747 release_sock(sk); in do_tcp_setsockopt()
2751 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, in tcp_setsockopt() argument
2754 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_setsockopt()
2757 return icsk->icsk_af_ops->setsockopt(sk, level, optname, in tcp_setsockopt()
2759 return do_tcp_setsockopt(sk, level, optname, optval, optlen); in tcp_setsockopt()
2764 int compat_tcp_setsockopt(struct sock *sk, int level, int optname, in compat_tcp_setsockopt() argument
2768 return inet_csk_compat_setsockopt(sk, level, optname, in compat_tcp_setsockopt()
2770 return do_tcp_setsockopt(sk, level, optname, optval, optlen); in compat_tcp_setsockopt()
2776 void tcp_get_info(struct sock *sk, struct tcp_info *info) in tcp_get_info() argument
2778 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ in tcp_get_info()
2779 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_get_info()
2787 if (sk->sk_type != SOCK_STREAM) in tcp_get_info()
2790 info->tcpi_state = sk_state_load(sk); in tcp_get_info()
2820 info->tcpi_unacked = sk->sk_ack_backlog; in tcp_get_info()
2821 info->tcpi_sacked = sk->sk_max_ack_backlog; in tcp_get_info()
2848 rate = READ_ONCE(sk->sk_pacing_rate); in tcp_get_info()
2852 rate = READ_ONCE(sk->sk_max_pacing_rate); in tcp_get_info()
2882 static int do_tcp_getsockopt(struct sock *sk, int level, in do_tcp_getsockopt() argument
2885 struct inet_connection_sock *icsk = inet_csk(sk); in do_tcp_getsockopt()
2886 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_getsockopt()
2887 struct net *net = sock_net(sk); in do_tcp_getsockopt()
2901 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) in do_tcp_getsockopt()
2942 tcp_get_info(sk, &info); in do_tcp_getsockopt()
2962 sz = ca_ops->get_info(sk, ~0U, &attr, &info); in do_tcp_getsockopt()
3059 lock_sock(sk); in do_tcp_getsockopt()
3063 release_sock(sk); in do_tcp_getsockopt()
3066 release_sock(sk); in do_tcp_getsockopt()
3071 release_sock(sk); in do_tcp_getsockopt()
3075 release_sock(sk); in do_tcp_getsockopt()
3079 release_sock(sk); in do_tcp_getsockopt()
3081 release_sock(sk); in do_tcp_getsockopt()
3099 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, in tcp_getsockopt() argument
3102 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_getsockopt()
3105 return icsk->icsk_af_ops->getsockopt(sk, level, optname, in tcp_getsockopt()
3107 return do_tcp_getsockopt(sk, level, optname, optval, optlen); in tcp_getsockopt()
3112 int compat_tcp_getsockopt(struct sock *sk, int level, int optname, in compat_tcp_getsockopt() argument
3116 return inet_csk_compat_getsockopt(sk, level, optname, in compat_tcp_getsockopt()
3118 return do_tcp_getsockopt(sk, level, optname, optval, optlen); in compat_tcp_getsockopt()
3255 void tcp_done(struct sock *sk) in tcp_done() argument
3257 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; in tcp_done()
3259 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) in tcp_done()
3260 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); in tcp_done()
3262 tcp_set_state(sk, TCP_CLOSE); in tcp_done()
3263 tcp_clear_xmit_timers(sk); in tcp_done()
3265 reqsk_fastopen_remove(sk, req, false); in tcp_done()
3267 sk->sk_shutdown = SHUTDOWN_MASK; in tcp_done()
3269 if (!sock_flag(sk, SOCK_DEAD)) in tcp_done()
3270 sk->sk_state_change(sk); in tcp_done()
3272 inet_csk_destroy_sock(sk); in tcp_done()
3276 int tcp_abort(struct sock *sk, int err) in tcp_abort() argument
3278 if (!sk_fullsock(sk)) { in tcp_abort()
3279 if (sk->sk_state == TCP_NEW_SYN_RECV) { in tcp_abort()
3280 struct request_sock *req = inet_reqsk(sk); in tcp_abort()
3292 lock_sock(sk); in tcp_abort()
3294 if (sk->sk_state == TCP_LISTEN) { in tcp_abort()
3295 tcp_set_state(sk, TCP_CLOSE); in tcp_abort()
3296 inet_csk_listen_stop(sk); in tcp_abort()
3301 bh_lock_sock(sk); in tcp_abort()
3303 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_abort()
3304 sk->sk_err = err; in tcp_abort()
3307 sk->sk_error_report(sk); in tcp_abort()
3308 if (tcp_need_reset(sk->sk_state)) in tcp_abort()
3309 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_abort()
3310 tcp_done(sk); in tcp_abort()
3313 bh_unlock_sock(sk); in tcp_abort()
3315 release_sock(sk); in tcp_abort()