Lines Matching refs:sk
43 static int nos_ivs(struct sock *sk, unsigned int size) in nos_ivs() argument
45 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in nos_ivs()
50 static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb) in set_ivs_imm() argument
52 int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE; in set_ivs_imm()
64 static int max_ivs_size(struct sock *sk, int size) in max_ivs_size() argument
66 return nos_ivs(sk, size) * CIPHER_BLOCK_SIZE; in max_ivs_size()
69 static int ivs_size(struct sock *sk, const struct sk_buff *skb) in ivs_size() argument
71 return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) * in ivs_size()
89 static struct sk_buff *create_flowc_wr_skb(struct sock *sk, in create_flowc_wr_skb() argument
93 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in create_flowc_wr_skb()
106 static int send_flowc_wr(struct sock *sk, struct fw_flowc_wr *flowc, in send_flowc_wr() argument
109 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in send_flowc_wr()
110 struct tcp_sock *tp = tcp_sk(sk); in send_flowc_wr()
117 if (csk_flag(sk, CSK_TX_DATA_SENT)) { in send_flowc_wr()
118 skb = create_flowc_wr_skb(sk, flowc, flowclen); in send_flowc_wr()
122 skb_entail(sk, skb, in send_flowc_wr()
132 skb = create_flowc_wr_skb(sk, flowc, flowclen); in send_flowc_wr()
135 send_or_defer(sk, tp, skb, 0); in send_flowc_wr()
159 int send_tx_flowc_wr(struct sock *sk, int compl, in send_tx_flowc_wr() argument
171 csk = rcu_dereference_sk_user_data(sk); in send_tx_flowc_wr()
172 tp = tcp_sk(sk); in send_tx_flowc_wr()
193 FLOWC_PARAM(TCPSTATE, tcp_state_to_flowc_state(sk->sk_state)); in send_tx_flowc_wr()
215 return send_flowc_wr(sk, flowc, flowclen); in send_tx_flowc_wr()
219 static int tls_copy_ivs(struct sock *sk, struct sk_buff *skb) in tls_copy_ivs() argument
230 csk = rcu_dereference_sk_user_data(sk); in tls_copy_ivs()
232 number_of_ivs = nos_ivs(sk, skb->len); in tls_copy_ivs()
257 page = alloc_pages(sk->sk_allocation | __GFP_COMP, 0); in tls_copy_ivs()
276 static void tls_copy_tx_key(struct sock *sk, struct sk_buff *skb) in tls_copy_tx_key() argument
286 csk = rcu_dereference_sk_user_data(sk); in tls_copy_tx_key()
340 static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb, in tls_tx_data_wr() argument
359 csk = rcu_dereference_sk_user_data(sk); in tls_tx_data_wr()
392 ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 : in tls_tx_data_wr()
399 TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) && in tls_tx_data_wr()
442 static int chtls_expansion_size(struct sock *sk, int data_len, in chtls_expansion_size() argument
446 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in chtls_expansion_size()
479 static void make_tlstx_data_wr(struct sock *sk, struct sk_buff *skb, in make_tlstx_data_wr() argument
488 csk = rcu_dereference_sk_user_data(sk); in make_tlstx_data_wr()
491 expn_sz = chtls_expansion_size(sk, tls_len, 0, NULL); in make_tlstx_data_wr()
493 hws->expansion = chtls_expansion_size(sk, in make_tlstx_data_wr()
501 if (tls_copy_ivs(sk, skb)) in make_tlstx_data_wr()
503 tls_copy_tx_key(sk, skb); in make_tlstx_data_wr()
504 tls_tx_data_wr(sk, skb, tls_len, tls_tx_imm, credits, expn_sz, pdus); in make_tlstx_data_wr()
508 static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb, in make_tx_data_wr() argument
517 csk = rcu_dereference_sk_user_data(sk); in make_tx_data_wr()
530 ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 : in make_tx_data_wr()
535 TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) && in make_tx_data_wr()
547 wr_size += ivs_size(csk->sk, skb); in chtls_wr_size()
602 struct sock *sk; in chtls_push_frames() local
606 sk = csk->sk; in chtls_push_frames()
607 tp = tcp_sk(sk); in chtls_push_frames()
609 if (unlikely(sk_in_state(sk, TCPF_SYN_SENT | TCPF_CLOSE))) in chtls_push_frames()
612 if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN))) in chtls_push_frames()
643 flowclen16 = send_tx_flowc_wr(sk, 1, tp->snd_nxt, in chtls_push_frames()
679 make_tlstx_data_wr(sk, skb, tls_tx_imm, in chtls_push_frames()
682 make_tx_data_wr(sk, skb, immdlen, len, in chtls_push_frames()
709 sk->sk_wmem_queued -= total_size; in chtls_push_frames()
728 static bool should_push(struct sock *sk) in should_push() argument
730 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in should_push()
732 struct tcp_sock *tp = tcp_sk(sk); in should_push()
760 static bool send_should_push(struct sock *sk, int flags) in send_should_push() argument
762 return should_push(sk) && !corked(tcp_sk(sk), flags); in send_should_push()
765 void chtls_tcp_push(struct sock *sk, int flags) in chtls_tcp_push() argument
767 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in chtls_tcp_push()
772 struct tcp_sock *tp = tcp_sk(sk); in chtls_tcp_push()
785 should_push(sk))) in chtls_tcp_push()
798 static int select_size(struct sock *sk, int io_len, int flags, int len) in select_size() argument
813 if (!send_should_push(sk, flags)) in select_size()
819 void skb_entail(struct sock *sk, struct sk_buff *skb, int flags) in skb_entail() argument
821 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in skb_entail()
822 struct tcp_sock *tp = tcp_sk(sk); in skb_entail()
827 sk->sk_wmem_queued += skb->truesize; in skb_entail()
829 if (TCP_PAGE(sk) && TCP_OFF(sk)) { in skb_entail()
830 put_page(TCP_PAGE(sk)); in skb_entail()
831 TCP_PAGE(sk) = NULL; in skb_entail()
832 TCP_OFF(sk) = 0; in skb_entail()
836 static struct sk_buff *get_tx_skb(struct sock *sk, int size) in get_tx_skb() argument
840 skb = alloc_skb(size + TX_HEADER_LEN, sk->sk_allocation); in get_tx_skb()
843 skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR); in get_tx_skb()
849 static struct sk_buff *get_record_skb(struct sock *sk, int size, bool zcopy) in get_record_skb() argument
851 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in get_record_skb()
855 KEY_ON_MEM_SZ + max_ivs_size(sk, size)), in get_record_skb()
856 sk->sk_allocation); in get_record_skb()
859 KEY_ON_MEM_SZ + max_ivs_size(sk, size))); in get_record_skb()
860 skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR); in get_record_skb()
877 static void push_frames_if_head(struct sock *sk) in push_frames_if_head() argument
879 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in push_frames_if_head()
885 static int chtls_skb_copy_to_page_nocache(struct sock *sk, in chtls_skb_copy_to_page_nocache() argument
893 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + in chtls_skb_copy_to_page_nocache()
901 sk->sk_wmem_queued += copy; in chtls_skb_copy_to_page_nocache()
905 static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk) in csk_mem_free() argument
907 return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0); in csk_mem_free()
911 struct sock *sk, long *timeo_p) in csk_wait_memory() argument
921 if (csk_mem_free(cdev, sk)) { in csk_wait_memory()
926 add_wait_queue(sk_sleep(sk), &wait); in csk_wait_memory()
928 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); in csk_wait_memory()
930 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) in csk_wait_memory()
934 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in csk_wait_memory()
939 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); in csk_wait_memory()
940 if (csk_mem_free(cdev, sk) && !vm_wait) in csk_wait_memory()
943 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in csk_wait_memory()
944 sk->sk_write_pending++; in csk_wait_memory()
945 sk_wait_event(sk, ¤t_timeo, sk->sk_err || in csk_wait_memory()
946 (sk->sk_shutdown & SEND_SHUTDOWN) || in csk_wait_memory()
947 (csk_mem_free(cdev, sk) && !vm_wait), &wait); in csk_wait_memory()
948 sk->sk_write_pending--; in csk_wait_memory()
963 remove_wait_queue(sk_sleep(sk), &wait); in csk_wait_memory()
976 static int chtls_proccess_cmsg(struct sock *sk, struct msghdr *msg, in chtls_proccess_cmsg() argument
1007 int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) in chtls_sendmsg() argument
1009 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in chtls_sendmsg()
1011 struct tcp_sock *tp = tcp_sk(sk); in chtls_sendmsg()
1018 lock_sock(sk); in chtls_sendmsg()
1020 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); in chtls_sendmsg()
1022 if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { in chtls_sendmsg()
1023 err = sk_stream_wait_connect(sk, &timeo); in chtls_sendmsg()
1028 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); in chtls_sendmsg()
1030 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) in chtls_sendmsg()
1044 if (!csk_mem_free(cdev, sk)) in chtls_sendmsg()
1051 err = chtls_proccess_cmsg(sk, msg, in chtls_sendmsg()
1071 push_frames_if_head(sk); in chtls_sendmsg()
1075 skb = get_record_skb(sk, in chtls_sendmsg()
1076 select_size(sk, in chtls_sendmsg()
1082 skb = get_tx_skb(sk, in chtls_sendmsg()
1083 select_size(sk, size, flags, in chtls_sendmsg()
1099 err = skb_add_data_nocache(sk, skb, in chtls_sendmsg()
1105 struct page *page = TCP_PAGE(sk); in chtls_sendmsg()
1107 int off = TCP_OFF(sk); in chtls_sendmsg()
1124 TCP_PAGE(sk) = page = NULL; in chtls_sendmsg()
1129 gfp_t gfp = sk->sk_allocation; in chtls_sendmsg()
1154 err = chtls_skb_copy_to_page_nocache(sk, &msg->msg_iter, in chtls_sendmsg()
1158 if (!TCP_PAGE(sk)) { in chtls_sendmsg()
1159 TCP_PAGE(sk) = page; in chtls_sendmsg()
1160 TCP_OFF(sk) = 0; in chtls_sendmsg()
1174 TCP_PAGE(sk) = page; in chtls_sendmsg()
1176 TCP_PAGE(sk) = NULL; in chtls_sendmsg()
1179 TCP_OFF(sk) = off + copy; in chtls_sendmsg()
1191 (sk_stream_wspace(sk) < sk_stream_min_wspace(sk))) in chtls_sendmsg()
1198 push_frames_if_head(sk); in chtls_sendmsg()
1201 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in chtls_sendmsg()
1203 err = csk_wait_memory(cdev, sk, &timeo); in chtls_sendmsg()
1210 chtls_tcp_push(sk, flags); in chtls_sendmsg()
1212 release_sock(sk); in chtls_sendmsg()
1217 sk->sk_wmem_queued -= skb->truesize; in chtls_sendmsg()
1226 copied = sk_stream_error(sk, flags, err); in chtls_sendmsg()
1230 int chtls_sendpage(struct sock *sk, struct page *page, in chtls_sendpage() argument
1239 tp = tcp_sk(sk); in chtls_sendpage()
1241 csk = rcu_dereference_sk_user_data(sk); in chtls_sendpage()
1243 lock_sock(sk); in chtls_sendpage()
1244 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); in chtls_sendpage()
1246 err = sk_stream_wait_connect(sk, &timeo); in chtls_sendpage()
1247 if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && in chtls_sendpage()
1261 if (!csk_mem_free(cdev, sk)) in chtls_sendpage()
1265 skb = get_record_skb(sk, in chtls_sendpage()
1266 select_size(sk, size, in chtls_sendpage()
1271 skb = get_tx_skb(sk, 0); in chtls_sendpage()
1288 push_frames_if_head(sk); in chtls_sendpage()
1297 sk->sk_wmem_queued += copy; in chtls_sendpage()
1304 (sk_stream_wspace(sk) < sk_stream_min_wspace(sk))) in chtls_sendpage()
1311 push_frames_if_head(sk); in chtls_sendpage()
1314 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in chtls_sendpage()
1316 err = csk_wait_memory(cdev, sk, &timeo); in chtls_sendpage()
1323 chtls_tcp_push(sk, flags); in chtls_sendpage()
1325 release_sock(sk); in chtls_sendpage()
1335 copied = sk_stream_error(sk, flags, err); in chtls_sendpage()
1339 static void chtls_select_window(struct sock *sk) in chtls_select_window() argument
1341 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in chtls_select_window()
1342 struct tcp_sock *tp = tcp_sk(sk); in chtls_select_window()
1345 wnd = max_t(unsigned int, wnd, tcp_full_space(sk)); in chtls_select_window()
1399 static void chtls_cleanup_rbuf(struct sock *sk, int copied) in chtls_cleanup_rbuf() argument
1401 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in chtls_cleanup_rbuf()
1409 if (!sk_in_state(sk, CREDIT_RETURN_STATE)) in chtls_cleanup_rbuf()
1412 chtls_select_window(sk); in chtls_cleanup_rbuf()
1413 tp = tcp_sk(sk); in chtls_cleanup_rbuf()
1428 static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, in chtls_pt_recvmsg() argument
1431 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); in chtls_pt_recvmsg()
1435 struct tcp_sock *tp = tcp_sk(sk); in chtls_pt_recvmsg()
1444 timeo = sock_rcvtimeo(sk, nonblock); in chtls_pt_recvmsg()
1445 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); in chtls_pt_recvmsg()
1447 if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND))) in chtls_pt_recvmsg()
1448 chtls_cleanup_rbuf(sk, copied); in chtls_pt_recvmsg()
1464 skb = skb_peek(&sk->sk_receive_queue); in chtls_pt_recvmsg()
1471 sk->sk_write_space(sk); in chtls_pt_recvmsg()
1473 if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) in chtls_pt_recvmsg()
1477 if (sk->sk_err || sk->sk_state == TCP_CLOSE || in chtls_pt_recvmsg()
1478 (sk->sk_shutdown & RCV_SHUTDOWN) || in chtls_pt_recvmsg()
1485 if (sock_flag(sk, SOCK_DONE)) in chtls_pt_recvmsg()
1487 if (sk->sk_err) { in chtls_pt_recvmsg()
1488 copied = sock_error(sk); in chtls_pt_recvmsg()
1491 if (sk->sk_shutdown & RCV_SHUTDOWN) in chtls_pt_recvmsg()
1493 if (sk->sk_state == TCP_CLOSE) { in chtls_pt_recvmsg()
1506 if (READ_ONCE(sk->sk_backlog.tail)) { in chtls_pt_recvmsg()
1507 release_sock(sk); in chtls_pt_recvmsg()
1508 lock_sock(sk); in chtls_pt_recvmsg()
1509 chtls_cleanup_rbuf(sk, copied); in chtls_pt_recvmsg()
1515 chtls_cleanup_rbuf(sk, copied); in chtls_pt_recvmsg()
1516 sk_wait_data(sk, &timeo, NULL); in chtls_pt_recvmsg()
1521 __skb_unlink(skb, &sk->sk_receive_queue); in chtls_pt_recvmsg()
1530 release_sock(sk); in chtls_pt_recvmsg()
1531 lock_sock(sk); in chtls_pt_recvmsg()
1547 } else if (!sock_flag(sk, SOCK_URGINLINE)) { in chtls_pt_recvmsg()
1598 chtls_free_skb(sk, skb); in chtls_pt_recvmsg()
1601 next_skb = skb_peek(&sk->sk_receive_queue); in chtls_pt_recvmsg()
1610 chtls_cleanup_rbuf(sk, copied); in chtls_pt_recvmsg()
1611 release_sock(sk); in chtls_pt_recvmsg()
1618 static int peekmsg(struct sock *sk, struct msghdr *msg, in peekmsg() argument
1621 struct tcp_sock *tp = tcp_sk(sk); in peekmsg()
1628 lock_sock(sk); in peekmsg()
1629 timeo = sock_rcvtimeo(sk, nonblock); in peekmsg()
1643 skb_queue_walk(&sk->sk_receive_queue, skb) { in peekmsg()
1652 if (sock_flag(sk, SOCK_DONE)) in peekmsg()
1654 if (sk->sk_err) { in peekmsg()
1655 copied = sock_error(sk); in peekmsg()
1658 if (sk->sk_shutdown & RCV_SHUTDOWN) in peekmsg()
1660 if (sk->sk_state == TCP_CLOSE) { in peekmsg()
1673 if (READ_ONCE(sk->sk_backlog.tail)) { in peekmsg()
1675 release_sock(sk); in peekmsg()
1676 lock_sock(sk); in peekmsg()
1678 sk_wait_data(sk, &timeo, NULL); in peekmsg()
1706 if (!sock_flag(sk, SOCK_URGINLINE)) { in peekmsg()
1735 release_sock(sk); in peekmsg()
1739 int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, in chtls_recvmsg() argument
1742 struct tcp_sock *tp = tcp_sk(sk); in chtls_recvmsg()
1753 return tcp_prot.recvmsg(sk, msg, len, nonblock, flags, in chtls_recvmsg()
1757 return peekmsg(sk, msg, len, nonblock, flags); in chtls_recvmsg()
1759 if (sk_can_busy_loop(sk) && in chtls_recvmsg()
1760 skb_queue_empty_lockless(&sk->sk_receive_queue) && in chtls_recvmsg()
1761 sk->sk_state == TCP_ESTABLISHED) in chtls_recvmsg()
1762 sk_busy_loop(sk, nonblock); in chtls_recvmsg()
1764 lock_sock(sk); in chtls_recvmsg()
1765 csk = rcu_dereference_sk_user_data(sk); in chtls_recvmsg()
1768 return chtls_pt_recvmsg(sk, msg, len, nonblock, in chtls_recvmsg()
1771 timeo = sock_rcvtimeo(sk, nonblock); in chtls_recvmsg()
1772 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); in chtls_recvmsg()
1774 if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND))) in chtls_recvmsg()
1775 chtls_cleanup_rbuf(sk, copied); in chtls_recvmsg()
1791 skb = skb_peek(&sk->sk_receive_queue); in chtls_recvmsg()
1799 sk->sk_write_space(sk); in chtls_recvmsg()
1801 if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) in chtls_recvmsg()
1805 if (sk->sk_err || sk->sk_state == TCP_CLOSE || in chtls_recvmsg()
1806 (sk->sk_shutdown & RCV_SHUTDOWN) || in chtls_recvmsg()
1810 if (sock_flag(sk, SOCK_DONE)) in chtls_recvmsg()
1812 if (sk->sk_err) { in chtls_recvmsg()
1813 copied = sock_error(sk); in chtls_recvmsg()
1816 if (sk->sk_shutdown & RCV_SHUTDOWN) in chtls_recvmsg()
1818 if (sk->sk_state == TCP_CLOSE) { in chtls_recvmsg()
1832 if (READ_ONCE(sk->sk_backlog.tail)) { in chtls_recvmsg()
1833 release_sock(sk); in chtls_recvmsg()
1834 lock_sock(sk); in chtls_recvmsg()
1835 chtls_cleanup_rbuf(sk, copied); in chtls_recvmsg()
1841 chtls_cleanup_rbuf(sk, copied); in chtls_recvmsg()
1842 sk_wait_data(sk, &timeo, NULL); in chtls_recvmsg()
1847 chtls_kfree_skb(sk, skb); in chtls_recvmsg()
1870 } else if (!sock_flag(sk, SOCK_URGINLINE)) { in chtls_recvmsg()
1899 chtls_free_skb(sk, skb); in chtls_recvmsg()
1903 !skb_peek(&sk->sk_receive_queue)) in chtls_recvmsg()
1909 chtls_cleanup_rbuf(sk, copied); in chtls_recvmsg()
1911 release_sock(sk); in chtls_recvmsg()