Lines Matching +full:rpc +full:- +full:if
1 // SPDX-License-Identifier: GPL-2.0
5 * Client-side transport implementation for sockets.
164 * Wait duration for a reply from the RPC portmapper.
169 * Delay if a UDP socket connect error occurs. This is most likely some
181 * increase over time if the server is down or not responding.
186 * TCP idle timeout; client drops the transport socket if it is idle
188 * holding port numbers when there is no RPC traffic.
192 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
203 dprintk("RPC: %s\n", msg); in xs_pktdump()
205 if (!(j & 31)) { in xs_pktdump()
206 if (j) in xs_pktdump()
224 return (struct rpc_xprt *) sk->sk_user_data; in xprt_from_sock()
229 return (struct sockaddr *) &xprt->addr; in xs_addr()
234 return (struct sockaddr_un *) &xprt->addr; in xs_addr_un()
239 return (struct sockaddr_in *) &xprt->addr; in xs_addr_in()
244 return (struct sockaddr_in6 *) &xprt->addr; in xs_addr_in6()
255 switch (sap->sa_family) { in xs_format_common_peer_addresses()
258 strlcpy(buf, sun->sun_path, sizeof(buf)); in xs_format_common_peer_addresses()
259 xprt->address_strings[RPC_DISPLAY_ADDR] = in xs_format_common_peer_addresses()
264 xprt->address_strings[RPC_DISPLAY_ADDR] = in xs_format_common_peer_addresses()
267 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); in xs_format_common_peer_addresses()
271 xprt->address_strings[RPC_DISPLAY_ADDR] = in xs_format_common_peer_addresses()
274 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); in xs_format_common_peer_addresses()
280 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); in xs_format_common_peer_addresses()
289 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); in xs_format_common_peer_ports()
292 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); in xs_format_common_peer_ports()
299 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol; in xs_format_peer_addresses()
300 xprt->address_strings[RPC_DISPLAY_NETID] = netid; in xs_format_peer_addresses()
307 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]); in xs_update_peer_port()
308 kfree(xprt->address_strings[RPC_DISPLAY_PORT]); in xs_update_peer_port()
323 kfree(xprt->address_strings[i]); in xs_free_peer_addresses()
332 if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES)) in xs_alloc_sparse_pages()
334 n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT; in xs_alloc_sparse_pages()
336 if (buf->pages[i]) in xs_alloc_sparse_pages()
338 buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp); in xs_alloc_sparse_pages()
339 if (!buf->pages[i]) { in xs_alloc_sparse_pages()
341 return i > buf->page_base ? i - buf->page_base : 0; in xs_alloc_sparse_pages()
351 if (seek != 0) in xs_sock_recvmsg()
352 iov_iter_advance(&msg->msg_iter, seek); in xs_sock_recvmsg()
361 iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count); in xs_read_kvec()
370 iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count); in xs_read_bvec()
378 iov_iter_discard(&msg->msg_iter, READ, count); in xs_read_discard()
382 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
409 want = min_t(size_t, count, buf->head[0].iov_len); in xs_read_xdr_buf()
410 if (seek < want) { in xs_read_xdr_buf()
411 ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek); in xs_read_xdr_buf()
412 if (ret <= 0) in xs_read_xdr_buf()
415 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) in xs_read_xdr_buf()
417 if (ret != want) in xs_read_xdr_buf()
421 seek -= want; in xs_read_xdr_buf()
426 min_t(size_t, count - offset, buf->page_len), in xs_read_xdr_buf()
428 if (seek < want) { in xs_read_xdr_buf()
429 ret = xs_read_bvec(sock, msg, flags, buf->bvec, in xs_read_xdr_buf()
431 want + buf->page_base, in xs_read_xdr_buf()
432 seek + buf->page_base); in xs_read_xdr_buf()
433 if (ret <= 0) in xs_read_xdr_buf()
435 xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); in xs_read_xdr_buf()
436 ret -= buf->page_base; in xs_read_xdr_buf()
438 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) in xs_read_xdr_buf()
440 if (ret != want) in xs_read_xdr_buf()
444 seek -= want; in xs_read_xdr_buf()
448 want = min_t(size_t, count - offset, buf->tail[0].iov_len); in xs_read_xdr_buf()
449 if (seek < want) { in xs_read_xdr_buf()
450 ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek); in xs_read_xdr_buf()
451 if (ret <= 0) in xs_read_xdr_buf()
454 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) in xs_read_xdr_buf()
456 if (ret != want) in xs_read_xdr_buf()
458 } else if (offset < seek_init) in xs_read_xdr_buf()
460 ret = -EMSGSIZE; in xs_read_xdr_buf()
462 *read = offset - seek_init; in xs_read_xdr_buf()
472 if (!transport->recv.copied) { in xs_read_header()
473 if (buf->head[0].iov_len >= transport->recv.offset) in xs_read_header()
474 memcpy(buf->head[0].iov_base, in xs_read_header()
475 &transport->recv.xid, in xs_read_header()
476 transport->recv.offset); in xs_read_header()
477 transport->recv.copied = transport->recv.offset; in xs_read_header()
484 return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT); in xs_read_stream_request_done()
491 if (xs_read_stream_request_done(transport)) in xs_read_stream_check_eor()
492 msg->msg_flags |= MSG_EOR; in xs_read_stream_check_eor()
499 struct xdr_buf *buf = &req->rq_private_buf; in xs_read_stream_request()
505 want = transport->recv.len - transport->recv.offset; in xs_read_stream_request()
506 if (want != 0) { in xs_read_stream_request()
507 ret = xs_read_xdr_buf(transport->sock, msg, flags, buf, in xs_read_stream_request()
508 transport->recv.copied + want, in xs_read_stream_request()
509 transport->recv.copied, in xs_read_stream_request()
511 transport->recv.offset += read; in xs_read_stream_request()
512 transport->recv.copied += read; in xs_read_stream_request()
515 if (transport->recv.offset == transport->recv.len) in xs_read_stream_request()
518 if (want == 0) in xs_read_stream_request()
524 case -EFAULT: in xs_read_stream_request()
525 case -EMSGSIZE: in xs_read_stream_request()
526 msg->msg_flags |= MSG_TRUNC; in xs_read_stream_request()
529 return -ESHUTDOWN; in xs_read_stream_request()
537 if (isfrag) in xs_read_stream_headersize()
547 .iov_base = &transport->recv.fraghdr, in xs_read_stream_header()
550 return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek); in xs_read_stream_header()
553 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
557 struct rpc_xprt *xprt = &transport->xprt; in xs_read_stream_call()
562 req = xprt_lookup_bc_request(xprt, transport->recv.xid); in xs_read_stream_call()
563 if (!req) { in xs_read_stream_call()
565 return -ESHUTDOWN; in xs_read_stream_call()
567 if (transport->recv.copied && !req->rq_private_buf.len) in xs_read_stream_call()
568 return -ESHUTDOWN; in xs_read_stream_call()
571 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) in xs_read_stream_call()
572 xprt_complete_bc_request(req, transport->recv.copied); in xs_read_stream_call()
574 req->rq_private_buf.len = transport->recv.copied; in xs_read_stream_call()
582 return -ESHUTDOWN; in xs_read_stream_call()
589 struct rpc_xprt *xprt = &transport->xprt; in xs_read_stream_reply()
594 spin_lock(&xprt->queue_lock); in xs_read_stream_reply()
595 req = xprt_lookup_rqst(xprt, transport->recv.xid); in xs_read_stream_reply()
596 if (!req || (transport->recv.copied && !req->rq_private_buf.len)) { in xs_read_stream_reply()
597 msg->msg_flags |= MSG_TRUNC; in xs_read_stream_reply()
601 spin_unlock(&xprt->queue_lock); in xs_read_stream_reply()
605 spin_lock(&xprt->queue_lock); in xs_read_stream_reply()
606 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) in xs_read_stream_reply()
607 xprt_complete_rqst(req->rq_task, transport->recv.copied); in xs_read_stream_reply()
609 req->rq_private_buf.len = transport->recv.copied; in xs_read_stream_reply()
612 spin_unlock(&xprt->queue_lock); in xs_read_stream_reply()
623 if (transport->recv.len == 0) { in xs_read_stream()
624 want = xs_read_stream_headersize(transport->recv.copied != 0); in xs_read_stream()
626 transport->recv.offset); in xs_read_stream()
627 if (ret <= 0) in xs_read_stream()
629 transport->recv.offset = ret; in xs_read_stream()
630 if (transport->recv.offset != want) in xs_read_stream()
631 return transport->recv.offset; in xs_read_stream()
632 transport->recv.len = be32_to_cpu(transport->recv.fraghdr) & in xs_read_stream()
634 transport->recv.offset -= sizeof(transport->recv.fraghdr); in xs_read_stream()
638 switch (be32_to_cpu(transport->recv.calldir)) { in xs_read_stream()
648 if (msg.msg_flags & MSG_TRUNC) { in xs_read_stream()
649 transport->recv.calldir = cpu_to_be32(-1); in xs_read_stream()
650 transport->recv.copied = -1; in xs_read_stream()
652 if (ret < 0) in xs_read_stream()
655 if (transport->recv.offset < transport->recv.len) { in xs_read_stream()
656 if (!(msg.msg_flags & MSG_TRUNC)) in xs_read_stream()
659 ret = xs_read_discard(transport->sock, &msg, flags, in xs_read_stream()
660 transport->recv.len - transport->recv.offset); in xs_read_stream()
661 if (ret <= 0) in xs_read_stream()
663 transport->recv.offset += ret; in xs_read_stream()
665 if (transport->recv.offset != transport->recv.len) in xs_read_stream()
668 if (xs_read_stream_request_done(transport)) { in xs_read_stream()
670 transport->recv.copied = 0; in xs_read_stream()
672 transport->recv.offset = 0; in xs_read_stream()
673 transport->recv.len = 0; in xs_read_stream()
676 return ret != 0 ? ret : -ESHUTDOWN; in xs_read_stream()
681 return transport->sock->ops->poll(transport->file, transport->sock, in xs_poll_socket()
695 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); in xs_poll_check_readable()
696 if (!xs_poll_socket_readable(transport)) in xs_poll_check_readable()
698 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) in xs_poll_check_readable()
699 queue_work(xprtiod_workqueue, &transport->recv_worker); in xs_poll_check_readable()
707 mutex_lock(&transport->recv_mutex); in xs_stream_data_receive()
708 if (transport->sock == NULL) in xs_stream_data_receive()
712 if (ret < 0) in xs_stream_data_receive()
717 if (ret == -ESHUTDOWN) in xs_stream_data_receive()
718 kernel_sock_shutdown(transport->sock, SHUT_RDWR); in xs_stream_data_receive()
722 mutex_unlock(&transport->recv_mutex); in xs_stream_data_receive()
723 trace_xs_stream_read_data(&transport->xprt, ret, read); in xs_stream_data_receive()
739 transport->recv.offset = 0; in xs_stream_reset_connect()
740 transport->recv.len = 0; in xs_stream_reset_connect()
741 transport->recv.copied = 0; in xs_stream_reset_connect()
742 transport->xmit.offset = 0; in xs_stream_reset_connect()
748 transport->xprt.stat.connect_count++; in xs_stream_start_connect()
749 transport->xprt.stat.connect_start = jiffies; in xs_stream_start_connect()
755 * xs_nospace - handle transmit was incomplete
756 * @req: pointer to RPC request
761 struct rpc_xprt *xprt = req->rq_xprt; in xs_nospace()
763 struct sock *sk = transport->inet; in xs_nospace()
764 int ret = -EAGAIN; in xs_nospace()
769 spin_lock(&xprt->transport_lock); in xs_nospace()
772 if (xprt_connected(xprt)) { in xs_nospace()
774 sk->sk_write_pending++; in xs_nospace()
777 ret = -ENOTCONN; in xs_nospace()
779 spin_unlock(&xprt->transport_lock); in xs_nospace()
782 if (ret == -EAGAIN) { in xs_nospace()
786 wq = rcu_dereference(sk->sk_wq); in xs_nospace()
787 set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); in xs_nospace()
790 sk->sk_write_space(sk); in xs_nospace()
798 xdr_free_bvec(&req->rq_rcv_buf); in xs_stream_prepare_request()
799 req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL); in xs_stream_prepare_request()
803 * Determine if the previous message in the stream was aborted before it
809 return transport->xmit.offset != 0 && req->rq_bytes_sent == 0; in xs_send_request_was_aborted()
813 * Return the stream record marker field for a record of length < 2^31-1
818 if (!xdr->len) in xs_stream_record_marker()
820 return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len); in xs_stream_record_marker()
824 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
825 * @req: pointer to RPC request
836 struct rpc_xprt *xprt = req->rq_xprt; in xs_local_send_request()
839 struct xdr_buf *xdr = &req->rq_snd_buf; in xs_local_send_request()
841 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; in xs_local_send_request()
848 /* Close the stream if the previous transmission was incomplete */ in xs_local_send_request()
849 if (xs_send_request_was_aborted(transport, req)) { in xs_local_send_request()
851 return -ENOTCONN; in xs_local_send_request()
855 req->rq_svec->iov_base, req->rq_svec->iov_len); in xs_local_send_request()
857 req->rq_xtime = ktime_get(); in xs_local_send_request()
858 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, in xs_local_send_request()
859 transport->xmit.offset, rm, &sent); in xs_local_send_request()
860 dprintk("RPC: %s(%u) = %d\n", in xs_local_send_request()
861 __func__, xdr->len - transport->xmit.offset, status); in xs_local_send_request()
863 if (status == -EAGAIN && sock_writeable(transport->inet)) in xs_local_send_request()
864 status = -ENOBUFS; in xs_local_send_request()
866 if (likely(sent > 0) || status == 0) { in xs_local_send_request()
867 transport->xmit.offset += sent; in xs_local_send_request()
868 req->rq_bytes_sent = transport->xmit.offset; in xs_local_send_request()
869 if (likely(req->rq_bytes_sent >= msglen)) { in xs_local_send_request()
870 req->rq_xmit_bytes_sent += transport->xmit.offset; in xs_local_send_request()
871 transport->xmit.offset = 0; in xs_local_send_request()
874 status = -EAGAIN; in xs_local_send_request()
878 case -ENOBUFS: in xs_local_send_request()
880 case -EAGAIN: in xs_local_send_request()
884 dprintk("RPC: sendmsg returned unrecognized error %d\n", in xs_local_send_request()
885 -status); in xs_local_send_request()
887 case -EPIPE: in xs_local_send_request()
889 status = -ENOTCONN; in xs_local_send_request()
896 * xs_udp_send_request - write an RPC request to a UDP socket
897 * @req: pointer to RPC request
908 struct rpc_xprt *xprt = req->rq_xprt; in xs_udp_send_request()
910 struct xdr_buf *xdr = &req->rq_snd_buf; in xs_udp_send_request()
913 .msg_namelen = xprt->addrlen, in xs_udp_send_request()
920 req->rq_svec->iov_base, in xs_udp_send_request()
921 req->rq_svec->iov_len); in xs_udp_send_request()
923 if (!xprt_bound(xprt)) in xs_udp_send_request()
924 return -ENOTCONN; in xs_udp_send_request()
926 if (!xprt_request_get_cong(xprt, req)) in xs_udp_send_request()
927 return -EBADSLT; in xs_udp_send_request()
929 req->rq_xtime = ktime_get(); in xs_udp_send_request()
930 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent); in xs_udp_send_request()
932 dprintk("RPC: xs_udp_send_request(%u) = %d\n", in xs_udp_send_request()
933 xdr->len, status); in xs_udp_send_request()
935 /* firewall is blocking us, don't return -EAGAIN or we end up looping */ in xs_udp_send_request()
936 if (status == -EPERM) in xs_udp_send_request()
939 if (status == -EAGAIN && sock_writeable(transport->inet)) in xs_udp_send_request()
940 status = -ENOBUFS; in xs_udp_send_request()
942 if (sent > 0 || status == 0) { in xs_udp_send_request()
943 req->rq_xmit_bytes_sent += sent; in xs_udp_send_request()
944 if (sent >= req->rq_slen) in xs_udp_send_request()
947 status = -EAGAIN; in xs_udp_send_request()
952 case -ENOTSOCK: in xs_udp_send_request()
953 status = -ENOTCONN; in xs_udp_send_request()
956 case -EAGAIN: in xs_udp_send_request()
959 case -ENETUNREACH: in xs_udp_send_request()
960 case -ENOBUFS: in xs_udp_send_request()
961 case -EPIPE: in xs_udp_send_request()
962 case -ECONNREFUSED: in xs_udp_send_request()
963 case -EPERM: in xs_udp_send_request()
968 dprintk("RPC: sendmsg returned unrecognized error %d\n", in xs_udp_send_request()
969 -status); in xs_udp_send_request()
976 * xs_tcp_send_request - write an RPC request to a TCP socket
977 * @req: pointer to RPC request
987 * if sendmsg is not able to make progress?
991 struct rpc_xprt *xprt = req->rq_xprt; in xs_tcp_send_request()
993 struct xdr_buf *xdr = &req->rq_snd_buf; in xs_tcp_send_request()
995 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; in xs_tcp_send_request()
1003 /* Close the stream if the previous transmission was incomplete */ in xs_tcp_send_request()
1004 if (xs_send_request_was_aborted(transport, req)) { in xs_tcp_send_request()
1005 if (transport->sock != NULL) in xs_tcp_send_request()
1006 kernel_sock_shutdown(transport->sock, SHUT_RDWR); in xs_tcp_send_request()
1007 return -ENOTCONN; in xs_tcp_send_request()
1011 req->rq_svec->iov_base, in xs_tcp_send_request()
1012 req->rq_svec->iov_len); in xs_tcp_send_request()
1014 if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state)) in xs_tcp_send_request()
1015 xs_tcp_set_socket_timeouts(xprt, transport->sock); in xs_tcp_send_request()
1020 req->rq_xtime = ktime_get(); in xs_tcp_send_request()
1022 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, in xs_tcp_send_request()
1023 transport->xmit.offset, rm, &sent); in xs_tcp_send_request()
1025 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", in xs_tcp_send_request()
1026 xdr->len - transport->xmit.offset, status); in xs_tcp_send_request()
1028 /* If we've sent the entire packet, immediately in xs_tcp_send_request()
1030 transport->xmit.offset += sent; in xs_tcp_send_request()
1031 req->rq_bytes_sent = transport->xmit.offset; in xs_tcp_send_request()
1032 if (likely(req->rq_bytes_sent >= msglen)) { in xs_tcp_send_request()
1033 req->rq_xmit_bytes_sent += transport->xmit.offset; in xs_tcp_send_request()
1034 transport->xmit.offset = 0; in xs_tcp_send_request()
1040 if (status == -EAGAIN ) { in xs_tcp_send_request()
1042 * Return EAGAIN if we're sure we're hitting the in xs_tcp_send_request()
1045 if (test_bit(SOCK_NOSPACE, &transport->sock->flags)) in xs_tcp_send_request()
1050 if (sent == 0) { in xs_tcp_send_request()
1051 status = -ENOBUFS; in xs_tcp_send_request()
1052 if (vm_wait) in xs_tcp_send_request()
1061 if (status < 0) in xs_tcp_send_request()
1067 case -ENOTSOCK: in xs_tcp_send_request()
1068 status = -ENOTCONN; in xs_tcp_send_request()
1071 case -EAGAIN: in xs_tcp_send_request()
1074 case -ECONNRESET: in xs_tcp_send_request()
1075 case -ECONNREFUSED: in xs_tcp_send_request()
1076 case -ENOTCONN: in xs_tcp_send_request()
1077 case -EADDRINUSE: in xs_tcp_send_request()
1078 case -ENOBUFS: in xs_tcp_send_request()
1079 case -EPIPE: in xs_tcp_send_request()
1082 dprintk("RPC: sendmsg returned unrecognized error %d\n", in xs_tcp_send_request()
1083 -status); in xs_tcp_send_request()
1091 transport->old_data_ready = sk->sk_data_ready; in xs_save_old_callbacks()
1092 transport->old_state_change = sk->sk_state_change; in xs_save_old_callbacks()
1093 transport->old_write_space = sk->sk_write_space; in xs_save_old_callbacks()
1094 transport->old_error_report = sk->sk_error_report; in xs_save_old_callbacks()
1099 sk->sk_data_ready = transport->old_data_ready; in xs_restore_old_callbacks()
1100 sk->sk_state_change = transport->old_state_change; in xs_restore_old_callbacks()
1101 sk->sk_write_space = transport->old_write_space; in xs_restore_old_callbacks()
1102 sk->sk_error_report = transport->old_error_report; in xs_restore_old_callbacks()
1109 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); in xs_sock_reset_state_flags()
1110 clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state); in xs_sock_reset_state_flags()
1111 clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state); in xs_sock_reset_state_flags()
1112 clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state); in xs_sock_reset_state_flags()
1117 set_bit(nr, &transport->sock_state); in xs_run_error_worker()
1118 queue_work(xprtiod_workqueue, &transport->error_worker); in xs_run_error_worker()
1124 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); in xs_sock_reset_connection_flags()
1125 clear_bit(XPRT_CLOSING, &xprt->state); in xs_sock_reset_connection_flags()
1131 * xs_error_report - callback to handle TCP socket state errors
1135 * using the socket, and so we don't want to clear sk->sk_err.
1142 read_lock_bh(&sk->sk_callback_lock); in xs_error_report()
1143 if (!(xprt = xprt_from_sock(sk))) in xs_error_report()
1147 transport->xprt_err = -sk->sk_err; in xs_error_report()
1148 if (transport->xprt_err == 0) in xs_error_report()
1150 dprintk("RPC: xs_error_report client %p, error=%d...\n", in xs_error_report()
1151 xprt, -transport->xprt_err); in xs_error_report()
1152 trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err); in xs_error_report()
1158 read_unlock_bh(&sk->sk_callback_lock); in xs_error_report()
1163 struct socket *sock = transport->sock; in xs_reset_transport()
1164 struct sock *sk = transport->inet; in xs_reset_transport()
1165 struct rpc_xprt *xprt = &transport->xprt; in xs_reset_transport()
1166 struct file *filp = transport->file; in xs_reset_transport()
1168 if (sk == NULL) in xs_reset_transport()
1175 if (!(current->flags & PF_WQ_WORKER)) { in xs_reset_transport()
1177 set_bit(XPRT_CLOSE_WAIT, &xprt->state); in xs_reset_transport()
1181 if (atomic_read(&transport->xprt.swapper)) in xs_reset_transport()
1186 mutex_lock(&transport->recv_mutex); in xs_reset_transport()
1187 write_lock_bh(&sk->sk_callback_lock); in xs_reset_transport()
1188 transport->inet = NULL; in xs_reset_transport()
1189 transport->sock = NULL; in xs_reset_transport()
1190 transport->file = NULL; in xs_reset_transport()
1192 sk->sk_user_data = NULL; in xs_reset_transport()
1196 write_unlock_bh(&sk->sk_callback_lock); in xs_reset_transport()
1200 mutex_unlock(&transport->recv_mutex); in xs_reset_transport()
1209 * xs_close - close a socket
1222 dprintk("RPC: xs_close xprt %p\n", xprt); in xs_close()
1225 xprt->reestablish_timeout = 0; in xs_close()
1230 dprintk("RPC: injecting transport disconnect on xprt=%p\n", in xs_inject_disconnect()
1242 * xs_destroy - prepare to shutdown a transport
1250 dprintk("RPC: xs_destroy xprt %p\n", xprt); in xs_destroy()
1252 cancel_delayed_work_sync(&transport->connect_worker); in xs_destroy()
1254 cancel_work_sync(&transport->recv_worker); in xs_destroy()
1255 cancel_work_sync(&transport->error_worker); in xs_destroy()
1261 * xs_udp_data_read_skb - receive callback for UDP sockets
1277 repsize = skb->len; in xs_udp_data_read_skb()
1278 if (repsize < 4) { in xs_udp_data_read_skb()
1279 dprintk("RPC: impossible RPC reply size %d!\n", repsize); in xs_udp_data_read_skb()
1285 if (xp == NULL) in xs_udp_data_read_skb()
1289 spin_lock(&xprt->queue_lock); in xs_udp_data_read_skb()
1291 if (!rovr) in xs_udp_data_read_skb()
1294 xprt_update_rtt(rovr->rq_task); in xs_udp_data_read_skb()
1295 spin_unlock(&xprt->queue_lock); in xs_udp_data_read_skb()
1296 task = rovr->rq_task; in xs_udp_data_read_skb()
1298 if ((copied = rovr->rq_private_buf.buflen) > repsize) in xs_udp_data_read_skb()
1301 /* Suck it into the iovec, verify checksum if not done by hw. */ in xs_udp_data_read_skb()
1302 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { in xs_udp_data_read_skb()
1303 spin_lock(&xprt->queue_lock); in xs_udp_data_read_skb()
1309 spin_lock(&xprt->transport_lock); in xs_udp_data_read_skb()
1311 spin_unlock(&xprt->transport_lock); in xs_udp_data_read_skb()
1312 spin_lock(&xprt->queue_lock); in xs_udp_data_read_skb()
1318 spin_unlock(&xprt->queue_lock); in xs_udp_data_read_skb()
1327 mutex_lock(&transport->recv_mutex); in xs_udp_data_receive()
1328 sk = transport->inet; in xs_udp_data_receive()
1329 if (sk == NULL) in xs_udp_data_receive()
1333 if (skb == NULL) in xs_udp_data_receive()
1335 xs_udp_data_read_skb(&transport->xprt, sk, skb); in xs_udp_data_receive()
1341 mutex_unlock(&transport->recv_mutex); in xs_udp_data_receive()
1355 * xs_data_ready - "data ready" callback for UDP sockets
1363 read_lock_bh(&sk->sk_callback_lock); in xs_data_ready()
1364 dprintk("RPC: xs_data_ready...\n"); in xs_data_ready()
1366 if (xprt != NULL) { in xs_data_ready()
1369 transport->old_data_ready(sk); in xs_data_ready()
1373 if (xprt->reestablish_timeout) in xs_data_ready()
1374 xprt->reestablish_timeout = 0; in xs_data_ready()
1375 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) in xs_data_ready()
1376 queue_work(xprtiod_workqueue, &transport->recv_worker); in xs_data_ready()
1378 read_unlock_bh(&sk->sk_callback_lock); in xs_data_ready()
1382 * Helper function to force a TCP close if the server is sending
1390 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1398 * xs_tcp_state_change - callback to handle TCP socket state changes
1407 read_lock_bh(&sk->sk_callback_lock); in xs_tcp_state_change()
1408 if (!(xprt = xprt_from_sock(sk))) in xs_tcp_state_change()
1410 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); in xs_tcp_state_change()
1411 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n", in xs_tcp_state_change()
1412 sk->sk_state, xprt_connected(xprt), in xs_tcp_state_change()
1415 sk->sk_shutdown); in xs_tcp_state_change()
1418 trace_rpc_socket_state_change(xprt, sk->sk_socket); in xs_tcp_state_change()
1419 switch (sk->sk_state) { in xs_tcp_state_change()
1421 if (!xprt_test_and_set_connected(xprt)) { in xs_tcp_state_change()
1422 xprt->connect_cookie++; in xs_tcp_state_change()
1423 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); in xs_tcp_state_change()
1426 xprt->stat.connect_count++; in xs_tcp_state_change()
1427 xprt->stat.connect_time += (long)jiffies - in xs_tcp_state_change()
1428 xprt->stat.connect_start; in xs_tcp_state_change()
1434 xprt->connect_cookie++; in xs_tcp_state_change()
1435 xprt->reestablish_timeout = 0; in xs_tcp_state_change()
1436 set_bit(XPRT_CLOSING, &xprt->state); in xs_tcp_state_change()
1438 clear_bit(XPRT_CONNECTED, &xprt->state); in xs_tcp_state_change()
1439 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); in xs_tcp_state_change()
1444 xprt->connect_cookie++; in xs_tcp_state_change()
1445 clear_bit(XPRT_CONNECTED, &xprt->state); in xs_tcp_state_change()
1450 * If the server closed down the connection, make sure that in xs_tcp_state_change()
1453 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) in xs_tcp_state_change()
1454 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; in xs_tcp_state_change()
1457 set_bit(XPRT_CLOSING, &xprt->state); in xs_tcp_state_change()
1459 clear_bit(XPRT_CONNECTED, &xprt->state); in xs_tcp_state_change()
1463 if (test_and_clear_bit(XPRT_SOCK_CONNECTING, in xs_tcp_state_change()
1464 &transport->sock_state)) in xs_tcp_state_change()
1466 clear_bit(XPRT_CLOSING, &xprt->state); in xs_tcp_state_change()
1471 read_unlock_bh(&sk->sk_callback_lock); in xs_tcp_state_change()
1480 if (!sk->sk_socket) in xs_write_space()
1482 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in xs_write_space()
1484 if (unlikely(!(xprt = xprt_from_sock(sk)))) in xs_write_space()
1488 wq = rcu_dereference(sk->sk_wq); in xs_write_space()
1489 if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0) in xs_write_space()
1493 sk->sk_write_pending--; in xs_write_space()
1499 * xs_udp_write_space - callback invoked when socket buffer space
1510 read_lock_bh(&sk->sk_callback_lock); in xs_udp_write_space()
1513 if (sock_writeable(sk)) in xs_udp_write_space()
1516 read_unlock_bh(&sk->sk_callback_lock); in xs_udp_write_space()
1520 * xs_tcp_write_space - callback invoked when socket buffer space
1531 read_lock_bh(&sk->sk_callback_lock); in xs_tcp_write_space()
1534 if (sk_stream_is_writeable(sk)) in xs_tcp_write_space()
1537 read_unlock_bh(&sk->sk_callback_lock); in xs_tcp_write_space()
1543 struct sock *sk = transport->inet; in xs_udp_do_set_buffer_size()
1545 if (transport->rcvsize) { in xs_udp_do_set_buffer_size()
1546 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; in xs_udp_do_set_buffer_size()
1547 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2; in xs_udp_do_set_buffer_size()
1549 if (transport->sndsize) { in xs_udp_do_set_buffer_size()
1550 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; in xs_udp_do_set_buffer_size()
1551 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2; in xs_udp_do_set_buffer_size()
1552 sk->sk_write_space(sk); in xs_udp_do_set_buffer_size()
1557 * xs_udp_set_buffer_size - set send and receive limits
1568 transport->sndsize = 0; in xs_udp_set_buffer_size()
1569 if (sndsize) in xs_udp_set_buffer_size()
1570 transport->sndsize = sndsize + 1024; in xs_udp_set_buffer_size()
1571 transport->rcvsize = 0; in xs_udp_set_buffer_size()
1572 if (rcvsize) in xs_udp_set_buffer_size()
1573 transport->rcvsize = rcvsize + 1024; in xs_udp_set_buffer_size()
1579 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1587 spin_lock(&xprt->transport_lock); in xs_udp_timer()
1588 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); in xs_udp_timer()
1589 spin_unlock(&xprt->transport_lock); in xs_udp_timer()
1598 if (max < min) in xs_get_random_port()
1599 return -EADDRINUSE; in xs_get_random_port()
1600 range = max - min + 1; in xs_get_random_port()
1610 if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0) in xs_sock_getport()
1614 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port); in xs_sock_getport()
1617 port = ntohs(((struct sockaddr_in *)&buf)->sin_port); in xs_sock_getport()
1624 * xs_set_port - reset the port number in the remote endpoint address
1631 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); in xs_set_port()
1639 if (transport->srcport == 0 && transport->xprt.reuseport) in xs_set_srcport()
1640 transport->srcport = xs_sock_getport(sock); in xs_set_srcport()
1645 int port = transport->srcport; in xs_get_srcport()
1647 if (port == 0 && transport->xprt.resvport) in xs_get_srcport()
1655 return xs_sock_getport(sock->sock); in get_srcport()
1661 if (transport->srcport != 0) in xs_next_srcport()
1662 transport->srcport = 0; in xs_next_srcport()
1663 if (!transport->xprt.resvport) in xs_next_srcport()
1665 if (port <= xprt_min_resvport || port > xprt_max_resvport) in xs_next_srcport()
1667 return --port; in xs_next_srcport()
1677 * If we are asking for any ephemeral port (i.e. port == 0 && in xs_bind()
1678 * transport->xprt.resvport == 0), don't bind. Let the local in xs_bind()
1687 * If we're asking for any reserved port (i.e. port == 0 && in xs_bind()
1688 * transport->xprt.resvport == 1) xs_get_srcport above will in xs_bind()
1689 * ensure that port is non-zero and we will bind as needed. in xs_bind()
1691 if (port <= 0) in xs_bind()
1694 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); in xs_bind()
1698 transport->xprt.addrlen); in xs_bind()
1699 if (err == 0) { in xs_bind()
1700 if (transport->xprt.reuseport) in xs_bind()
1701 transport->srcport = port; in xs_bind()
1706 if (port > last) in xs_bind()
1708 } while (err == -EADDRINUSE && nloop != 2); in xs_bind()
1710 if (myaddr.ss_family == AF_INET) in xs_bind()
1711 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__, in xs_bind()
1712 &((struct sockaddr_in *)&myaddr)->sin_addr, in xs_bind()
1715 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__, in xs_bind()
1716 &((struct sockaddr_in6 *)&myaddr)->sin6_addr, in xs_bind()
1726 xprt_set_bound(task->tk_xprt); in xs_local_rpcbind()
1739 struct sock *sk = sock->sk; in xs_reclassify_socketu()
1741 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC", in xs_reclassify_socketu()
1742 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]); in xs_reclassify_socketu()
1747 struct sock *sk = sock->sk; in xs_reclassify_socket4()
1749 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", in xs_reclassify_socket4()
1750 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]); in xs_reclassify_socket4()
1755 struct sock *sk = sock->sk; in xs_reclassify_socket6()
1757 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", in xs_reclassify_socket6()
1758 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); in xs_reclassify_socket6()
1763 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk))) in xs_reclassify_socket()
1796 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1); in xs_create_sock()
1797 if (err < 0) { in xs_create_sock()
1798 dprintk("RPC: can't create %d transport socket (%d).\n", in xs_create_sock()
1799 protocol, -err); in xs_create_sock()
1804 if (reuseport) in xs_create_sock()
1805 sock_set_reuseport(sock->sk); in xs_create_sock()
1808 if (err) { in xs_create_sock()
1814 if (IS_ERR(filp)) in xs_create_sock()
1816 transport->file = filp; in xs_create_sock()
1829 if (!transport->inet) { in xs_local_finish_connecting()
1830 struct sock *sk = sock->sk; in xs_local_finish_connecting()
1832 write_lock_bh(&sk->sk_callback_lock); in xs_local_finish_connecting()
1836 sk->sk_user_data = xprt; in xs_local_finish_connecting()
1837 sk->sk_data_ready = xs_data_ready; in xs_local_finish_connecting()
1838 sk->sk_write_space = xs_udp_write_space; in xs_local_finish_connecting()
1840 sk->sk_error_report = xs_error_report; in xs_local_finish_connecting()
1845 transport->sock = sock; in xs_local_finish_connecting()
1846 transport->inet = sk; in xs_local_finish_connecting()
1848 write_unlock_bh(&sk->sk_callback_lock); in xs_local_finish_connecting()
1853 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0); in xs_local_finish_connecting()
1857 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1862 struct rpc_xprt *xprt = &transport->xprt; in xs_local_setup_socket()
1867 status = __sock_create(xprt->xprt_net, AF_LOCAL, in xs_local_setup_socket()
1869 if (status < 0) { in xs_local_setup_socket()
1870 dprintk("RPC: can't create AF_LOCAL " in xs_local_setup_socket()
1871 "transport socket (%d).\n", -status); in xs_local_setup_socket()
1877 if (IS_ERR(filp)) { in xs_local_setup_socket()
1881 transport->file = filp; in xs_local_setup_socket()
1883 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n", in xs_local_setup_socket()
1884 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
1890 dprintk("RPC: xprt %p connected to %s\n", in xs_local_setup_socket()
1891 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
1892 xprt->stat.connect_count++; in xs_local_setup_socket()
1893 xprt->stat.connect_time += (long)jiffies - in xs_local_setup_socket()
1894 xprt->stat.connect_start; in xs_local_setup_socket()
1896 case -ENOBUFS: in xs_local_setup_socket()
1898 case -ENOENT: in xs_local_setup_socket()
1899 dprintk("RPC: xprt %p: socket %s does not exist\n", in xs_local_setup_socket()
1900 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
1902 case -ECONNREFUSED: in xs_local_setup_socket()
1903 dprintk("RPC: xprt %p: connection refused for %s\n", in xs_local_setup_socket()
1904 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
1908 __func__, -status, in xs_local_setup_socket()
1909 xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
1923 if (RPC_IS_ASYNC(task)) { in xs_local_connect()
1926 * filesystem namespace of the process making the rpc in xs_local_connect()
1929 * If we want to support asynchronous AF_LOCAL calls, in xs_local_connect()
1933 task->tk_rpc_status = -ENOTCONN; in xs_local_connect()
1934 rpc_exit(task, -ENOTCONN); in xs_local_connect()
1938 if (ret && !RPC_IS_SOFTCONN(task)) in xs_local_connect()
1942 #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
1954 * If there's no sock, then we have nothing to set. The in xs_set_memalloc()
1957 if (!transport->inet) in xs_set_memalloc()
1959 if (atomic_read(&xprt->swapper)) in xs_set_memalloc()
1960 sk_set_memalloc(transport->inet); in xs_set_memalloc()
1964 * xs_enable_swap - Tag this transport as being used for swap.
1968 * optionally mark it for swapping if it wasn't already.
1975 if (atomic_inc_return(&xprt->swapper) != 1) in xs_enable_swap()
1977 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) in xs_enable_swap()
1978 return -ERESTARTSYS; in xs_enable_swap()
1979 if (xs->inet) in xs_enable_swap()
1980 sk_set_memalloc(xs->inet); in xs_enable_swap()
1986 * xs_disable_swap - Untag this transport as being used for swap.
1989 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
1997 if (!atomic_dec_and_test(&xprt->swapper)) in xs_disable_swap()
1999 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) in xs_disable_swap()
2001 if (xs->inet) in xs_disable_swap()
2002 sk_clear_memalloc(xs->inet); in xs_disable_swap()
2013 return -EINVAL; in xs_enable_swap()
2026 if (!transport->inet) { in xs_udp_finish_connecting()
2027 struct sock *sk = sock->sk; in xs_udp_finish_connecting()
2029 write_lock_bh(&sk->sk_callback_lock); in xs_udp_finish_connecting()
2033 sk->sk_user_data = xprt; in xs_udp_finish_connecting()
2034 sk->sk_data_ready = xs_data_ready; in xs_udp_finish_connecting()
2035 sk->sk_write_space = xs_udp_write_space; in xs_udp_finish_connecting()
2041 transport->sock = sock; in xs_udp_finish_connecting()
2042 transport->inet = sk; in xs_udp_finish_connecting()
2046 write_unlock_bh(&sk->sk_callback_lock); in xs_udp_finish_connecting()
2050 xprt->stat.connect_start = jiffies; in xs_udp_finish_connecting()
2057 struct rpc_xprt *xprt = &transport->xprt; in xs_udp_setup_socket()
2059 int status = -EIO; in xs_udp_setup_socket()
2062 xs_addr(xprt)->sa_family, SOCK_DGRAM, in xs_udp_setup_socket()
2064 if (IS_ERR(sock)) in xs_udp_setup_socket()
2067 dprintk("RPC: worker connecting xprt %p via %s to " in xs_udp_setup_socket()
2069 xprt->address_strings[RPC_DISPLAY_PROTO], in xs_udp_setup_socket()
2070 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_udp_setup_socket()
2071 xprt->address_strings[RPC_DISPLAY_PORT]); in xs_udp_setup_socket()
2083 * xs_tcp_shutdown - gracefully shut down a TCP socket
2092 struct socket *sock = transport->sock; in xs_tcp_shutdown()
2093 int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE; in xs_tcp_shutdown()
2095 if (sock == NULL) in xs_tcp_shutdown()
2116 spin_lock(&xprt->transport_lock); in xs_tcp_set_socket_timeouts()
2117 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ); in xs_tcp_set_socket_timeouts()
2118 keepcnt = xprt->timeout->to_retries + 1; in xs_tcp_set_socket_timeouts()
2119 timeo = jiffies_to_msecs(xprt->timeout->to_initval) * in xs_tcp_set_socket_timeouts()
2120 (xprt->timeout->to_retries + 1); in xs_tcp_set_socket_timeouts()
2121 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); in xs_tcp_set_socket_timeouts()
2122 spin_unlock(&xprt->transport_lock); in xs_tcp_set_socket_timeouts()
2125 sock_set_keepalive(sock->sk); in xs_tcp_set_socket_timeouts()
2126 tcp_sock_set_keepidle(sock->sk, keepidle); in xs_tcp_set_socket_timeouts()
2127 tcp_sock_set_keepintvl(sock->sk, keepidle); in xs_tcp_set_socket_timeouts()
2128 tcp_sock_set_keepcnt(sock->sk, keepcnt); in xs_tcp_set_socket_timeouts()
2131 tcp_sock_set_user_timeout(sock->sk, timeo); in xs_tcp_set_socket_timeouts()
2142 spin_lock(&xprt->transport_lock); in xs_tcp_set_connect_timeout()
2143 if (reconnect_timeout < xprt->max_reconnect_timeout) in xs_tcp_set_connect_timeout()
2144 xprt->max_reconnect_timeout = reconnect_timeout; in xs_tcp_set_connect_timeout()
2145 if (connect_timeout < xprt->connect_timeout) { in xs_tcp_set_connect_timeout()
2146 memcpy(&to, xprt->timeout, sizeof(to)); in xs_tcp_set_connect_timeout()
2149 if (initval < XS_TCP_INIT_REEST_TO << 1) in xs_tcp_set_connect_timeout()
2153 memcpy(&transport->tcp_timeout, &to, in xs_tcp_set_connect_timeout()
2154 sizeof(transport->tcp_timeout)); in xs_tcp_set_connect_timeout()
2155 xprt->timeout = &transport->tcp_timeout; in xs_tcp_set_connect_timeout()
2156 xprt->connect_timeout = connect_timeout; in xs_tcp_set_connect_timeout()
2158 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); in xs_tcp_set_connect_timeout()
2159 spin_unlock(&xprt->transport_lock); in xs_tcp_set_connect_timeout()
2165 int ret = -ENOTCONN; in xs_tcp_finish_connecting()
2167 if (!transport->inet) { in xs_tcp_finish_connecting()
2168 struct sock *sk = sock->sk; in xs_tcp_finish_connecting()
2170 /* Avoid temporary address, they are bad for long-lived in xs_tcp_finish_connecting()
2177 if (xs_addr(xprt)->sa_family == PF_INET6) { in xs_tcp_finish_connecting()
2184 write_lock_bh(&sk->sk_callback_lock); in xs_tcp_finish_connecting()
2188 sk->sk_user_data = xprt; in xs_tcp_finish_connecting()
2189 sk->sk_data_ready = xs_data_ready; in xs_tcp_finish_connecting()
2190 sk->sk_state_change = xs_tcp_state_change; in xs_tcp_finish_connecting()
2191 sk->sk_write_space = xs_tcp_write_space; in xs_tcp_finish_connecting()
2193 sk->sk_error_report = xs_error_report; in xs_tcp_finish_connecting()
2197 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; in xs_tcp_finish_connecting()
2202 transport->sock = sock; in xs_tcp_finish_connecting()
2203 transport->inet = sk; in xs_tcp_finish_connecting()
2205 write_unlock_bh(&sk->sk_callback_lock); in xs_tcp_finish_connecting()
2208 if (!xprt_bound(xprt)) in xs_tcp_finish_connecting()
2216 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); in xs_tcp_finish_connecting()
2217 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); in xs_tcp_finish_connecting()
2222 case -EINPROGRESS: in xs_tcp_finish_connecting()
2224 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) in xs_tcp_finish_connecting()
2225 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; in xs_tcp_finish_connecting()
2227 case -EADDRNOTAVAIL: in xs_tcp_finish_connecting()
2229 transport->srcport = 0; in xs_tcp_finish_connecting()
2236 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2245 struct socket *sock = transport->sock; in xs_tcp_setup_socket()
2246 struct rpc_xprt *xprt = &transport->xprt; in xs_tcp_setup_socket()
2247 int status = -EIO; in xs_tcp_setup_socket()
2249 if (!sock) { in xs_tcp_setup_socket()
2251 xs_addr(xprt)->sa_family, SOCK_STREAM, in xs_tcp_setup_socket()
2253 if (IS_ERR(sock)) { in xs_tcp_setup_socket()
2259 dprintk("RPC: worker connecting xprt %p via %s to " in xs_tcp_setup_socket()
2261 xprt->address_strings[RPC_DISPLAY_PROTO], in xs_tcp_setup_socket()
2262 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_tcp_setup_socket()
2263 xprt->address_strings[RPC_DISPLAY_PORT]); in xs_tcp_setup_socket()
2267 dprintk("RPC: %p connect status %d connected %d sock state %d\n", in xs_tcp_setup_socket()
2268 xprt, -status, xprt_connected(xprt), in xs_tcp_setup_socket()
2269 sock->sk->sk_state); in xs_tcp_setup_socket()
2275 case -EADDRNOTAVAIL: in xs_tcp_setup_socket()
2282 case -EINPROGRESS: in xs_tcp_setup_socket()
2283 case -EALREADY: in xs_tcp_setup_socket()
2286 case -EINVAL: in xs_tcp_setup_socket()
2287 /* Happens, for instance, if the user specified a link in xs_tcp_setup_socket()
2288 * local IPv6 address without a scope-id. in xs_tcp_setup_socket()
2290 case -ECONNREFUSED: in xs_tcp_setup_socket()
2291 case -ECONNRESET: in xs_tcp_setup_socket()
2292 case -ENETDOWN: in xs_tcp_setup_socket()
2293 case -ENETUNREACH: in xs_tcp_setup_socket()
2294 case -EHOSTUNREACH: in xs_tcp_setup_socket()
2295 case -EADDRINUSE: in xs_tcp_setup_socket()
2296 case -ENOBUFS: in xs_tcp_setup_socket()
2298 * xs_tcp_force_close() wakes tasks with -EIO. in xs_tcp_setup_socket()
2306 status = -EAGAIN; in xs_tcp_setup_socket()
2314 * xs_connect - connect a socket to a remote endpoint
2316 * @task: address of RPC task that manages state of connect request
2318 * TCP: If the remote end dropped the connection, delay reconnecting.
2324 * If a UDP socket connect fails, the delay behavior here prevents
2334 if (transport->sock != NULL) { in xs_connect()
2335 dprintk("RPC: xs_connect delayed xprt %p for %lu " in xs_connect()
2337 xprt, xprt->reestablish_timeout / HZ); in xs_connect()
2346 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); in xs_connect()
2349 &transport->connect_worker, in xs_connect()
2355 if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state)) in xs_wake_disconnect()
2356 xs_tcp_force_close(&transport->xprt); in xs_wake_disconnect()
2361 if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state)) in xs_wake_write()
2362 xprt_write_space(&transport->xprt); in xs_wake_write()
2369 if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) in xs_wake_error()
2371 mutex_lock(&transport->recv_mutex); in xs_wake_error()
2372 if (transport->sock == NULL) in xs_wake_error()
2374 if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) in xs_wake_error()
2376 sockerr = xchg(&transport->xprt_err, 0); in xs_wake_error()
2377 if (sockerr < 0) in xs_wake_error()
2378 xprt_wake_pending_tasks(&transport->xprt, sockerr); in xs_wake_error()
2380 mutex_unlock(&transport->recv_mutex); in xs_wake_error()
2385 if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state)) in xs_wake_pending()
2386 xprt_wake_pending_tasks(&transport->xprt, -EAGAIN); in xs_wake_pending()
2401 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2410 if (xprt_connected(xprt)) in xs_local_print_stats()
2411 idle_time = (long)(jiffies - xprt->last_used) / HZ; in xs_local_print_stats()
2415 xprt->stat.bind_count, in xs_local_print_stats()
2416 xprt->stat.connect_count, in xs_local_print_stats()
2417 xprt->stat.connect_time / HZ, in xs_local_print_stats()
2419 xprt->stat.sends, in xs_local_print_stats()
2420 xprt->stat.recvs, in xs_local_print_stats()
2421 xprt->stat.bad_xids, in xs_local_print_stats()
2422 xprt->stat.req_u, in xs_local_print_stats()
2423 xprt->stat.bklog_u, in xs_local_print_stats()
2424 xprt->stat.max_slots, in xs_local_print_stats()
2425 xprt->stat.sending_u, in xs_local_print_stats()
2426 xprt->stat.pending_u); in xs_local_print_stats()
2430 * xs_udp_print_stats - display UDP socket-specifc stats
2441 transport->srcport, in xs_udp_print_stats()
2442 xprt->stat.bind_count, in xs_udp_print_stats()
2443 xprt->stat.sends, in xs_udp_print_stats()
2444 xprt->stat.recvs, in xs_udp_print_stats()
2445 xprt->stat.bad_xids, in xs_udp_print_stats()
2446 xprt->stat.req_u, in xs_udp_print_stats()
2447 xprt->stat.bklog_u, in xs_udp_print_stats()
2448 xprt->stat.max_slots, in xs_udp_print_stats()
2449 xprt->stat.sending_u, in xs_udp_print_stats()
2450 xprt->stat.pending_u); in xs_udp_print_stats()
2454 * xs_tcp_print_stats - display TCP socket-specifc stats
2464 if (xprt_connected(xprt)) in xs_tcp_print_stats()
2465 idle_time = (long)(jiffies - xprt->last_used) / HZ; in xs_tcp_print_stats()
2469 transport->srcport, in xs_tcp_print_stats()
2470 xprt->stat.bind_count, in xs_tcp_print_stats()
2471 xprt->stat.connect_count, in xs_tcp_print_stats()
2472 xprt->stat.connect_time / HZ, in xs_tcp_print_stats()
2474 xprt->stat.sends, in xs_tcp_print_stats()
2475 xprt->stat.recvs, in xs_tcp_print_stats()
2476 xprt->stat.bad_xids, in xs_tcp_print_stats()
2477 xprt->stat.req_u, in xs_tcp_print_stats()
2478 xprt->stat.bklog_u, in xs_tcp_print_stats()
2479 xprt->stat.max_slots, in xs_tcp_print_stats()
2480 xprt->stat.sending_u, in xs_tcp_print_stats()
2481 xprt->stat.pending_u); in xs_tcp_print_stats()
2485 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2491 struct rpc_rqst *rqst = task->tk_rqstp; in bc_malloc()
2492 size_t size = rqst->rq_callsize; in bc_malloc()
2496 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) { in bc_malloc()
2499 return -EINVAL; in bc_malloc()
2503 if (!page) in bc_malloc()
2504 return -ENOMEM; in bc_malloc()
2507 buf->len = PAGE_SIZE; in bc_malloc()
2509 rqst->rq_buffer = buf->data; in bc_malloc()
2510 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; in bc_malloc()
2519 void *buffer = task->tk_rqstp->rq_buffer; in bc_free()
2528 struct xdr_buf *xdr = &req->rq_snd_buf; in bc_sendto()
2530 container_of(req->rq_xprt, struct sock_xprt, xprt); in bc_sendto()
2535 (u32)xdr->len); in bc_sendto()
2539 req->rq_xtime = ktime_get(); in bc_sendto()
2540 err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent); in bc_sendto()
2542 if (err < 0 || sent != (xdr->len + sizeof(marker))) in bc_sendto()
2543 return -EAGAIN; in bc_sendto()
2548 * bc_send_request - Send a backchannel Call on a TCP socket
2555 * %0 if the message was sent successfully
2556 * %ENOTCONN if the message was not sent
2566 xprt = req->rq_xprt->bc_xprt; in bc_send_request()
2572 mutex_lock(&xprt->xpt_mutex); in bc_send_request()
2573 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) in bc_send_request()
2574 len = -ENOTCONN; in bc_send_request()
2577 mutex_unlock(&xprt->xpt_mutex); in bc_send_request()
2579 if (len > 0) in bc_send_request()
2601 dprintk("RPC: bc_destroy xprt %p\n", xprt); in bc_destroy()
2721 dprintk("RPC: %s: Bad address family\n", __func__); in xs_init_anyaddr()
2722 return -EAFNOSUPPORT; in xs_init_anyaddr()
2734 if (args->addrlen > sizeof(xprt->addr)) { in xs_setup_xprt()
2735 dprintk("RPC: xs_setup_xprt: address too large\n"); in xs_setup_xprt()
2736 return ERR_PTR(-EBADF); in xs_setup_xprt()
2739 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size, in xs_setup_xprt()
2741 if (xprt == NULL) { in xs_setup_xprt()
2742 dprintk("RPC: xs_setup_xprt: couldn't allocate " in xs_setup_xprt()
2744 return ERR_PTR(-ENOMEM); in xs_setup_xprt()
2748 mutex_init(&new->recv_mutex); in xs_setup_xprt()
2749 memcpy(&xprt->addr, args->dstaddr, args->addrlen); in xs_setup_xprt()
2750 xprt->addrlen = args->addrlen; in xs_setup_xprt()
2751 if (args->srcaddr) in xs_setup_xprt()
2752 memcpy(&new->srcaddr, args->srcaddr, args->addrlen); in xs_setup_xprt()
2755 err = xs_init_anyaddr(args->dstaddr->sa_family, in xs_setup_xprt()
2756 (struct sockaddr *)&new->srcaddr); in xs_setup_xprt()
2757 if (err != 0) { in xs_setup_xprt()
2773 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2774 * @args: rpc transport creation arguments
2780 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr; in xs_setup_local()
2787 if (IS_ERR(xprt)) in xs_setup_local()
2791 xprt->prot = 0; in xs_setup_local()
2792 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; in xs_setup_local()
2794 xprt->bind_timeout = XS_BIND_TO; in xs_setup_local()
2795 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; in xs_setup_local()
2796 xprt->idle_timeout = XS_IDLE_DISC_TO; in xs_setup_local()
2798 xprt->ops = &xs_local_ops; in xs_setup_local()
2799 xprt->timeout = &xs_local_default_timeout; in xs_setup_local()
2801 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); in xs_setup_local()
2802 INIT_WORK(&transport->error_worker, xs_error_handle); in xs_setup_local()
2803 INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket); in xs_setup_local()
2805 switch (sun->sun_family) { in xs_setup_local()
2807 if (sun->sun_path[0] != '/') { in xs_setup_local()
2808 dprintk("RPC: bad AF_LOCAL address: %s\n", in xs_setup_local()
2809 sun->sun_path); in xs_setup_local()
2810 ret = ERR_PTR(-EINVAL); in xs_setup_local()
2816 if (ret) in xs_setup_local()
2820 ret = ERR_PTR(-EAFNOSUPPORT); in xs_setup_local()
2824 dprintk("RPC: set up xprt to %s via AF_LOCAL\n", in xs_setup_local()
2825 xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_setup_local()
2827 if (try_module_get(THIS_MODULE)) in xs_setup_local()
2829 ret = ERR_PTR(-EINVAL); in xs_setup_local()
2843 * xs_setup_udp - Set up transport to use a UDP socket
2844 * @args: rpc transport creation arguments
2849 struct sockaddr *addr = args->dstaddr; in xs_setup_udp()
2856 if (IS_ERR(xprt)) in xs_setup_udp()
2860 xprt->prot = IPPROTO_UDP; in xs_setup_udp()
2862 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); in xs_setup_udp()
2864 xprt->bind_timeout = XS_BIND_TO; in xs_setup_udp()
2865 xprt->reestablish_timeout = XS_UDP_REEST_TO; in xs_setup_udp()
2866 xprt->idle_timeout = XS_IDLE_DISC_TO; in xs_setup_udp()
2868 xprt->ops = &xs_udp_ops; in xs_setup_udp()
2870 xprt->timeout = &xs_udp_default_timeout; in xs_setup_udp()
2872 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn); in xs_setup_udp()
2873 INIT_WORK(&transport->error_worker, xs_error_handle); in xs_setup_udp()
2874 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket); in xs_setup_udp()
2876 switch (addr->sa_family) { in xs_setup_udp()
2878 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) in xs_setup_udp()
2884 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) in xs_setup_udp()
2890 ret = ERR_PTR(-EAFNOSUPPORT); in xs_setup_udp()
2894 if (xprt_bound(xprt)) in xs_setup_udp()
2895 dprintk("RPC: set up xprt to %s (port %s) via %s\n", in xs_setup_udp()
2896 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_udp()
2897 xprt->address_strings[RPC_DISPLAY_PORT], in xs_setup_udp()
2898 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_udp()
2900 dprintk("RPC: set up xprt to %s (autobind) via %s\n", in xs_setup_udp()
2901 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_udp()
2902 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_udp()
2904 if (try_module_get(THIS_MODULE)) in xs_setup_udp()
2906 ret = ERR_PTR(-EINVAL); in xs_setup_udp()
2919 * xs_setup_tcp - Set up transport to use a TCP socket
2920 * @args: rpc transport creation arguments
2925 struct sockaddr *addr = args->dstaddr; in xs_setup_tcp()
2931 if (args->flags & XPRT_CREATE_INFINITE_SLOTS) in xs_setup_tcp()
2936 if (IS_ERR(xprt)) in xs_setup_tcp()
2940 xprt->prot = IPPROTO_TCP; in xs_setup_tcp()
2941 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; in xs_setup_tcp()
2943 xprt->bind_timeout = XS_BIND_TO; in xs_setup_tcp()
2944 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; in xs_setup_tcp()
2945 xprt->idle_timeout = XS_IDLE_DISC_TO; in xs_setup_tcp()
2947 xprt->ops = &xs_tcp_ops; in xs_setup_tcp()
2948 xprt->timeout = &xs_tcp_default_timeout; in xs_setup_tcp()
2950 xprt->max_reconnect_timeout = xprt->timeout->to_maxval; in xs_setup_tcp()
2951 xprt->connect_timeout = xprt->timeout->to_initval * in xs_setup_tcp()
2952 (xprt->timeout->to_retries + 1); in xs_setup_tcp()
2954 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); in xs_setup_tcp()
2955 INIT_WORK(&transport->error_worker, xs_error_handle); in xs_setup_tcp()
2956 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); in xs_setup_tcp()
2958 switch (addr->sa_family) { in xs_setup_tcp()
2960 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) in xs_setup_tcp()
2966 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) in xs_setup_tcp()
2972 ret = ERR_PTR(-EAFNOSUPPORT); in xs_setup_tcp()
2976 if (xprt_bound(xprt)) in xs_setup_tcp()
2977 dprintk("RPC: set up xprt to %s (port %s) via %s\n", in xs_setup_tcp()
2978 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_tcp()
2979 xprt->address_strings[RPC_DISPLAY_PORT], in xs_setup_tcp()
2980 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_tcp()
2982 dprintk("RPC: set up xprt to %s (autobind) via %s\n", in xs_setup_tcp()
2983 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_tcp()
2984 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_tcp()
2986 if (try_module_get(THIS_MODULE)) in xs_setup_tcp()
2988 ret = ERR_PTR(-EINVAL); in xs_setup_tcp()
2995 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
2996 * @args: rpc transport creation arguments
3001 struct sockaddr *addr = args->dstaddr; in xs_setup_bc_tcp()
3009 if (IS_ERR(xprt)) in xs_setup_bc_tcp()
3013 xprt->prot = IPPROTO_TCP; in xs_setup_bc_tcp()
3014 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; in xs_setup_bc_tcp()
3015 xprt->timeout = &xs_tcp_default_timeout; in xs_setup_bc_tcp()
3019 xprt->bind_timeout = 0; in xs_setup_bc_tcp()
3020 xprt->reestablish_timeout = 0; in xs_setup_bc_tcp()
3021 xprt->idle_timeout = 0; in xs_setup_bc_tcp()
3023 xprt->ops = &bc_tcp_ops; in xs_setup_bc_tcp()
3025 switch (addr->sa_family) { in xs_setup_bc_tcp()
3035 ret = ERR_PTR(-EAFNOSUPPORT); in xs_setup_bc_tcp()
3039 dprintk("RPC: set up xprt to %s (port %s) via %s\n", in xs_setup_bc_tcp()
3040 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_bc_tcp()
3041 xprt->address_strings[RPC_DISPLAY_PORT], in xs_setup_bc_tcp()
3042 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_bc_tcp()
3051 args->bc_xprt->xpt_bc_xprt = xprt; in xs_setup_bc_tcp()
3052 xprt->bc_xprt = args->bc_xprt; in xs_setup_bc_tcp()
3053 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); in xs_setup_bc_tcp()
3054 transport->sock = bc_sock->sk_sock; in xs_setup_bc_tcp()
3055 transport->inet = bc_sock->sk_sk; in xs_setup_bc_tcp()
3063 if (try_module_get(THIS_MODULE)) in xs_setup_bc_tcp()
3066 args->bc_xprt->xpt_bc_xprt = NULL; in xs_setup_bc_tcp()
3067 args->bc_xprt->xpt_bc_xps = NULL; in xs_setup_bc_tcp()
3069 ret = ERR_PTR(-EINVAL); in xs_setup_bc_tcp()
3112 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3117 if (!sunrpc_table_header) in init_socket_xprt()
3129 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3134 if (sunrpc_table_header) { in cleanup_socket_xprt()
3152 if (!val) in param_set_uint_minmax()
3153 return -EINVAL; in param_set_uint_minmax()
3155 if (ret) in param_set_uint_minmax()
3157 if (num < min || num > max) in param_set_uint_minmax()
3158 return -EINVAL; in param_set_uint_minmax()
3159 *((unsigned int *)kp->arg) = num; in param_set_uint_minmax()