Lines Matching refs:ssk
273 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, in __mptcp_move_skb() argument
277 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skb()
282 __skb_unlink(skb, &ssk->sk_receive_queue); in __mptcp_move_skb()
291 if (ssk->sk_forward_alloc < amount) in __mptcp_move_skb()
294 ssk->sk_forward_alloc -= amount; in __mptcp_move_skb()
423 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_timeout_from_subflow() local
425 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? in mptcp_timeout_from_subflow()
426 inet_csk(ssk)->icsk_timeout - jiffies : 0; in mptcp_timeout_from_subflow()
439 static bool tcp_can_send_ack(const struct sock *ssk) in tcp_can_send_ack() argument
441 return !((1 << inet_sk_state_load(ssk)) & in tcp_can_send_ack()
445 void mptcp_subflow_send_ack(struct sock *ssk) in mptcp_subflow_send_ack() argument
449 slow = lock_sock_fast(ssk); in mptcp_subflow_send_ack()
450 if (tcp_can_send_ack(ssk)) in mptcp_subflow_send_ack()
451 tcp_send_ack(ssk); in mptcp_subflow_send_ack()
452 unlock_sock_fast(ssk, slow); in mptcp_subflow_send_ack()
463 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk) in mptcp_subflow_cleanup_rbuf() argument
467 slow = lock_sock_fast(ssk); in mptcp_subflow_cleanup_rbuf()
468 if (tcp_can_send_ack(ssk)) in mptcp_subflow_cleanup_rbuf()
469 tcp_cleanup_rbuf(ssk, 1); in mptcp_subflow_cleanup_rbuf()
470 unlock_sock_fast(ssk, slow); in mptcp_subflow_cleanup_rbuf()
473 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty) in mptcp_subflow_could_cleanup() argument
475 const struct inet_connection_sock *icsk = inet_csk(ssk); in mptcp_subflow_could_cleanup()
477 const struct tcp_sock *tp = tcp_sk(ssk); in mptcp_subflow_could_cleanup()
498 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_cleanup_rbuf() local
500 if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty)) in mptcp_cleanup_rbuf()
501 mptcp_subflow_cleanup_rbuf(ssk); in mptcp_cleanup_rbuf()
556 struct sock *ssk, in __mptcp_move_skbs_from_subflow() argument
559 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skbs_from_subflow()
570 int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); in __mptcp_move_skbs_from_subflow()
578 pr_debug("msk=%p ssk=%p", msk, ssk); in __mptcp_move_skbs_from_subflow()
579 tp = tcp_sk(ssk); in __mptcp_move_skbs_from_subflow()
590 skb = skb_peek(&ssk->sk_receive_queue); in __mptcp_move_skbs_from_subflow()
623 if (__mptcp_move_skb(msk, ssk, skb, offset, len)) in __mptcp_move_skbs_from_subflow()
631 sk_eat_skb(ssk, skb); in __mptcp_move_skbs_from_subflow()
636 more_data_avail = mptcp_subflow_data_available(ssk); in __mptcp_move_skbs_from_subflow()
691 static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) in __mptcp_subflow_error_report() argument
693 int err = sock_error(ssk); in __mptcp_subflow_error_report()
710 ssk_state = inet_sk_state_load(ssk); in __mptcp_subflow_error_report()
734 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) in move_skbs_to_msk() argument
739 __mptcp_move_skbs_from_subflow(msk, ssk, &moved); in move_skbs_to_msk()
741 if (unlikely(ssk->sk_err)) { in move_skbs_to_msk()
758 void mptcp_data_ready(struct sock *sk, struct sock *ssk) in mptcp_data_ready() argument
760 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_data_ready()
771 ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); in mptcp_data_ready()
784 if (move_skbs_to_msk(msk, ssk)) in mptcp_data_ready()
1179 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_enter_memory_pressure() local
1182 tcp_enter_memory_pressure(ssk); in mptcp_enter_memory_pressure()
1183 sk_stream_moderate_sndbuf(ssk); in mptcp_enter_memory_pressure()
1274 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp) in __mptcp_alloc_tx_skb() argument
1282 if (likely(sk_wmem_schedule(ssk, skb->truesize))) { in __mptcp_alloc_tx_skb()
1283 tcp_skb_entail(ssk, skb); in __mptcp_alloc_tx_skb()
1291 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held) in mptcp_alloc_tx_skb() argument
1301 return __mptcp_alloc_tx_skb(sk, ssk, gfp); in mptcp_alloc_tx_skb()
1316 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, in mptcp_sendmsg_frag() argument
1332 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); in mptcp_sendmsg_frag()
1339 info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags); in mptcp_sendmsg_frag()
1342 skb = tcp_write_queue_tail(ssk); in mptcp_sendmsg_frag()
1353 tcp_mark_push(tcp_sk(ssk), skb); in mptcp_sendmsg_frag()
1360 tcp_mark_push(tcp_sk(ssk), skb); in mptcp_sendmsg_frag()
1367 skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held); in mptcp_sendmsg_frag()
1381 if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) { in mptcp_sendmsg_frag()
1382 tcp_remove_empty_skb(ssk); in mptcp_sendmsg_frag()
1392 if (!sk_wmem_schedule(ssk, copy)) { in mptcp_sendmsg_frag()
1393 tcp_remove_empty_skb(ssk); in mptcp_sendmsg_frag()
1407 sk_wmem_queued_add(ssk, copy); in mptcp_sendmsg_frag()
1408 sk_mem_charge(ssk, copy); in mptcp_sendmsg_frag()
1410 WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy); in mptcp_sendmsg_frag()
1423 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; in mptcp_sendmsg_frag()
1433 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; in mptcp_sendmsg_frag()
1437 tcp_push_pending_frames(ssk); in mptcp_sendmsg_frag()
1443 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; in mptcp_sendmsg_frag()
1454 struct sock *ssk; member
1490 struct sock *ssk; in mptcp_subflow_get_send() local
1513 send_info[i].ssk = NULL; in mptcp_subflow_get_send()
1518 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_send()
1524 if (!sk_stream_memory_free(subflow->tcp_sock) || !tcp_sk(ssk)->snd_wnd) in mptcp_subflow_get_send()
1527 pace = READ_ONCE(ssk->sk_pacing_rate); in mptcp_subflow_get_send()
1531 ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, in mptcp_subflow_get_send()
1534 send_info[subflow->backup].ssk = ssk; in mptcp_subflow_get_send()
1542 send_info[0].ssk = send_info[1].ssk; in mptcp_subflow_get_send()
1544 if (send_info[0].ssk) { in mptcp_subflow_get_send()
1545 msk->last_snd = send_info[0].ssk; in mptcp_subflow_get_send()
1554 static void mptcp_push_release(struct sock *sk, struct sock *ssk, in mptcp_push_release() argument
1557 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal); in mptcp_push_release()
1558 release_sock(ssk); in mptcp_push_release()
1589 struct sock *prev_ssk = NULL, *ssk = NULL; in __mptcp_push_pending() local
1604 prev_ssk = ssk; in __mptcp_push_pending()
1606 ssk = mptcp_subflow_get_send(msk); in __mptcp_push_pending()
1611 if (ssk != prev_ssk && prev_ssk) in __mptcp_push_pending()
1613 if (!ssk) in __mptcp_push_pending()
1620 if (ssk != prev_ssk) in __mptcp_push_pending()
1621 lock_sock(ssk); in __mptcp_push_pending()
1623 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); in __mptcp_push_pending()
1625 mptcp_push_release(sk, ssk, &info); in __mptcp_push_pending()
1639 if (ssk) in __mptcp_push_pending()
1640 mptcp_push_release(sk, ssk, &info); in __mptcp_push_pending()
1650 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk) in __mptcp_subflow_push_pending() argument
1673 xmit_ssk = first ? ssk : mptcp_subflow_get_send(mptcp_sk(sk)); in __mptcp_subflow_push_pending()
1676 if (xmit_ssk != ssk) { in __mptcp_subflow_push_pending()
1682 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); in __mptcp_subflow_push_pending()
1702 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, in __mptcp_subflow_push_pending()
1966 struct sock *ssk; in mptcp_rcv_space_adjust() local
1969 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_rcv_space_adjust()
1970 slow = lock_sock_fast(ssk); in mptcp_rcv_space_adjust()
1971 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); in mptcp_rcv_space_adjust()
1972 tcp_sk(ssk)->window_clamp = window_clamp; in mptcp_rcv_space_adjust()
1973 tcp_cleanup_rbuf(ssk, 1); in mptcp_rcv_space_adjust()
1974 unlock_sock_fast(ssk, slow); in mptcp_rcv_space_adjust()
2012 struct sock *ssk = mptcp_subflow_recv_lookup(msk); in __mptcp_move_skbs() local
2019 if (likely(!ssk)) in __mptcp_move_skbs()
2022 slowpath = lock_sock_fast(ssk); in __mptcp_move_skbs()
2025 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved); in __mptcp_move_skbs()
2028 if (unlikely(ssk->sk_err)) in __mptcp_move_skbs()
2030 unlock_sock_fast(ssk, slowpath); in __mptcp_move_skbs()
2202 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_retrans() local
2208 if (!tcp_rtx_and_write_queues_empty(ssk)) { in mptcp_subflow_get_retrans()
2209 mptcp_pm_subflow_chk_stale(msk, ssk); in mptcp_subflow_get_retrans()
2216 backup = ssk; in mptcp_subflow_get_retrans()
2221 pick = ssk; in mptcp_subflow_get_retrans()
2285 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, in __mptcp_close_ssk() argument
2293 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); in __mptcp_close_ssk()
2298 if (ssk->sk_socket) in __mptcp_close_ssk()
2299 sock_orphan(ssk); in __mptcp_close_ssk()
2308 if (!inet_csk(ssk)->icsk_ulp_ops) { in __mptcp_close_ssk()
2312 __tcp_close(ssk, 0); in __mptcp_close_ssk()
2315 __sock_put(ssk); in __mptcp_close_ssk()
2317 __mptcp_subflow_error_report(sk, ssk); in __mptcp_close_ssk()
2318 release_sock(ssk); in __mptcp_close_ssk()
2320 sock_put(ssk); in __mptcp_close_ssk()
2322 if (ssk == msk->last_snd) in __mptcp_close_ssk()
2325 if (ssk == msk->first) in __mptcp_close_ssk()
2328 if (msk->subflow && ssk == msk->subflow->sk) in __mptcp_close_ssk()
2335 void mptcp_close_ssk(struct sock *sk, struct sock *ssk, in mptcp_close_ssk() argument
2339 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); in mptcp_close_ssk()
2340 __mptcp_close_ssk(sk, ssk, subflow); in mptcp_close_ssk()
2355 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close_subflow() local
2357 if (inet_sk_state_load(ssk) != TCP_CLOSE) in __mptcp_close_subflow()
2361 if (!skb_queue_empty_lockless(&ssk->sk_receive_queue)) in __mptcp_close_subflow()
2364 mptcp_close_ssk((struct sock *)msk, ssk, subflow); in __mptcp_close_subflow()
2423 struct sock *ssk; in __mptcp_retrans() local
2442 ssk = mptcp_subflow_get_retrans(msk); in __mptcp_retrans()
2443 if (!ssk) in __mptcp_retrans()
2446 lock_sock(ssk); in __mptcp_retrans()
2452 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); in __mptcp_retrans()
2462 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, in __mptcp_retrans()
2466 release_sock(ssk); in __mptcp_retrans()
2607 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) in mptcp_subflow_shutdown() argument
2609 lock_sock(ssk); in mptcp_subflow_shutdown()
2611 switch (ssk->sk_state) { in mptcp_subflow_shutdown()
2617 tcp_disconnect(ssk, O_NONBLOCK); in mptcp_subflow_shutdown()
2622 ssk->sk_shutdown |= how; in mptcp_subflow_shutdown()
2623 tcp_shutdown(ssk, how); in mptcp_subflow_shutdown()
2631 pr_debug("Sending DATA_FIN on subflow %p", ssk); in mptcp_subflow_shutdown()
2632 tcp_send_ack(ssk); in mptcp_subflow_shutdown()
2639 release_sock(ssk); in mptcp_subflow_shutdown()
2733 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_destroy_sock() local
2734 __mptcp_close_ssk(sk, ssk, subflow); in __mptcp_destroy_sock()
2772 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_close() local
2773 bool slow = lock_sock_fast_nested(ssk); in mptcp_close()
2775 subflows_alive += ssk->sk_state != TCP_CLOSE; in mptcp_close()
2777 sock_orphan(ssk); in mptcp_close()
2778 unlock_sock_fast(ssk, slow); in mptcp_close()
2806 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) in mptcp_copy_inaddrs() argument
2809 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); in mptcp_copy_inaddrs()
2812 msk->sk_v6_daddr = ssk->sk_v6_daddr; in mptcp_copy_inaddrs()
2813 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; in mptcp_copy_inaddrs()
2821 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; in mptcp_copy_inaddrs()
2822 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; in mptcp_copy_inaddrs()
2823 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; in mptcp_copy_inaddrs()
2824 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; in mptcp_copy_inaddrs()
2825 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; in mptcp_copy_inaddrs()
2826 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; in mptcp_copy_inaddrs()
2843 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_disconnect() local
2845 lock_sock(ssk); in mptcp_disconnect()
2846 tcp_disconnect(ssk, flags); in mptcp_disconnect()
2847 release_sock(ssk); in mptcp_disconnect()
2965 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) in mptcp_rcv_space_init() argument
2967 const struct tcp_sock *tp = tcp_sk(ssk); in mptcp_rcv_space_init()
2980 WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd); in mptcp_rcv_space_init()
3064 void __mptcp_check_push(struct sock *sk, struct sock *ssk) in __mptcp_check_push() argument
3072 if (xmit_ssk == ssk) in __mptcp_check_push()
3073 __mptcp_subflow_push_pending(sk, ssk); in __mptcp_check_push()
3133 static void schedule_3rdack_retransmission(struct sock *ssk) in schedule_3rdack_retransmission() argument
3135 struct inet_connection_sock *icsk = inet_csk(ssk); in schedule_3rdack_retransmission()
3136 struct tcp_sock *tp = tcp_sk(ssk); in schedule_3rdack_retransmission()
3139 if (mptcp_subflow_ctx(ssk)->fully_established) in schedule_3rdack_retransmission()
3152 sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout); in schedule_3rdack_retransmission()
3155 void mptcp_subflow_process_delegated(struct sock *ssk) in mptcp_subflow_process_delegated() argument
3157 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_process_delegated()
3163 __mptcp_subflow_push_pending(sk, ssk); in mptcp_subflow_process_delegated()
3170 schedule_3rdack_retransmission(ssk); in mptcp_subflow_process_delegated()
3202 void mptcp_finish_connect(struct sock *ssk) in mptcp_finish_connect() argument
3209 subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_connect()
3232 mptcp_pm_new_connection(msk, ssk, 0); in mptcp_finish_connect()
3234 mptcp_rcv_space_init(msk, ssk); in mptcp_finish_connect()
3246 bool mptcp_finish_join(struct sock *ssk) in mptcp_finish_join() argument
3248 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_join()
3279 sock_hold(ssk); in mptcp_finish_join()
3291 if (parent_sock && !ssk->sk_socket) in mptcp_finish_join()
3292 mptcp_sock_graft(ssk, parent_sock); in mptcp_finish_join()
3295 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); in mptcp_finish_join()
3501 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_stream_accept() local
3503 if (!ssk->sk_socket) in mptcp_stream_accept()
3504 mptcp_sock_graft(ssk, newsock); in mptcp_stream_accept()
3620 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_napi_poll() local
3622 bh_lock_sock_nested(ssk); in mptcp_napi_poll()
3623 if (!sock_owned_by_user(ssk) && in mptcp_napi_poll()
3625 mptcp_subflow_process_delegated(ssk); in mptcp_napi_poll()
3631 bh_unlock_sock(ssk); in mptcp_napi_poll()
3632 sock_put(ssk); in mptcp_napi_poll()