• Home
  • Raw
  • Download

Lines Matching +full:rpc +full:- +full:if

1 // SPDX-License-Identifier: GPL-2.0
5 * Client-side transport implementation for sockets.
71 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
165 * Wait duration for a reply from the RPC portmapper.
170 * Delay if a UDP socket connect error occurs. This is most likely some
182 * increase over time if the server is down or not responding.
187 * TCP idle timeout; client drops the transport socket if it is idle
189 * holding port numbers when there is no RPC traffic.
193 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
204 dprintk("RPC: %s\n", msg); in xs_pktdump()
206 if (!(j & 31)) { in xs_pktdump()
207 if (j) in xs_pktdump()
225 return (struct rpc_xprt *) sk->sk_user_data; in xprt_from_sock()
230 return (struct sockaddr *) &xprt->addr; in xs_addr()
235 return (struct sockaddr_un *) &xprt->addr; in xs_addr_un()
240 return (struct sockaddr_in *) &xprt->addr; in xs_addr_in()
245 return (struct sockaddr_in6 *) &xprt->addr; in xs_addr_in6()
256 switch (sap->sa_family) { in xs_format_common_peer_addresses()
259 strlcpy(buf, sun->sun_path, sizeof(buf)); in xs_format_common_peer_addresses()
260 xprt->address_strings[RPC_DISPLAY_ADDR] = in xs_format_common_peer_addresses()
265 xprt->address_strings[RPC_DISPLAY_ADDR] = in xs_format_common_peer_addresses()
268 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); in xs_format_common_peer_addresses()
272 xprt->address_strings[RPC_DISPLAY_ADDR] = in xs_format_common_peer_addresses()
275 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); in xs_format_common_peer_addresses()
281 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); in xs_format_common_peer_addresses()
290 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); in xs_format_common_peer_ports()
293 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); in xs_format_common_peer_ports()
300 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol; in xs_format_peer_addresses()
301 xprt->address_strings[RPC_DISPLAY_NETID] = netid; in xs_format_peer_addresses()
308 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]); in xs_update_peer_port()
309 kfree(xprt->address_strings[RPC_DISPLAY_PORT]); in xs_update_peer_port()
324 kfree(xprt->address_strings[i]); in xs_free_peer_addresses()
338 .iov_base = vec->iov_base + base, in xs_send_kvec()
339 .iov_len = vec->iov_len - base, in xs_send_kvec()
342 if (iov.iov_len != 0) in xs_send_kvec()
355 remainder = xdr->page_len - base; in xs_send_pagedata()
356 base += xdr->page_base; in xs_send_pagedata()
357 ppage = xdr->pages + (base >> PAGE_SHIFT); in xs_send_pagedata()
359 do_sendpage = sock->ops->sendpage; in xs_send_pagedata()
360 if (!zerocopy) in xs_send_pagedata()
363 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder); in xs_send_pagedata()
366 remainder -= len; in xs_send_pagedata()
367 if (more) in xs_send_pagedata()
369 if (remainder != 0) in xs_send_pagedata()
372 if (remainder == 0 || err != len) in xs_send_pagedata()
378 if (err > 0) { in xs_send_pagedata()
386 * xs_sendpages - write pages directly to a socket
388 * @addr: UDP only -- address of destination
389 * @addrlen: UDP only -- length of destination address
392 * @zerocopy: true if it is safe to use sendpage()
398 unsigned int remainder = xdr->len - base; in xs_sendpages()
402 if (unlikely(!sock)) in xs_sendpages()
403 return -ENOTSOCK; in xs_sendpages()
405 if (base != 0) { in xs_sendpages()
410 if (base < xdr->head[0].iov_len || addr != NULL) { in xs_sendpages()
411 unsigned int len = xdr->head[0].iov_len - base; in xs_sendpages()
412 remainder -= len; in xs_sendpages()
413 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0); in xs_sendpages()
414 if (remainder == 0 || err != len) in xs_sendpages()
419 base -= xdr->head[0].iov_len; in xs_sendpages()
421 if (base < xdr->page_len) { in xs_sendpages()
422 unsigned int len = xdr->page_len - base; in xs_sendpages()
423 remainder -= len; in xs_sendpages()
426 if (remainder == 0 || sent != len) in xs_sendpages()
430 base -= xdr->page_len; in xs_sendpages()
432 if (base >= xdr->tail[0].iov_len) in xs_sendpages()
434 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0); in xs_sendpages()
436 if (err > 0) { in xs_sendpages()
445 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt); in xs_nospace_callback()
447 transport->inet->sk_write_pending--; in xs_nospace_callback()
451 * xs_nospace - place task on wait queue if transmit was incomplete
457 struct rpc_rqst *req = task->tk_rqstp; in xs_nospace()
458 struct rpc_xprt *xprt = req->rq_xprt; in xs_nospace()
460 struct sock *sk = transport->inet; in xs_nospace()
461 int ret = -EAGAIN; in xs_nospace()
463 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", in xs_nospace()
464 task->tk_pid, req->rq_slen - req->rq_bytes_sent, in xs_nospace()
465 req->rq_slen); in xs_nospace()
468 spin_lock_bh(&xprt->transport_lock); in xs_nospace()
471 if (xprt_connected(xprt)) { in xs_nospace()
473 sk->sk_write_pending++; in xs_nospace()
476 ret = -ENOTCONN; in xs_nospace()
478 spin_unlock_bh(&xprt->transport_lock); in xs_nospace()
481 if (ret == -EAGAIN) { in xs_nospace()
485 wq = rcu_dereference(sk->sk_wq); in xs_nospace()
486 set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); in xs_nospace()
489 sk->sk_write_space(sk); in xs_nospace()
499 u32 reclen = buf->len - sizeof(rpc_fraghdr); in xs_encode_stream_record_marker()
500 rpc_fraghdr *base = buf->head[0].iov_base; in xs_encode_stream_record_marker()
505 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
506 * @task: RPC task that manages the state of an RPC request
517 struct rpc_rqst *req = task->tk_rqstp; in xs_local_send_request()
518 struct rpc_xprt *xprt = req->rq_xprt; in xs_local_send_request()
521 struct xdr_buf *xdr = &req->rq_snd_buf; in xs_local_send_request()
525 xs_encode_stream_record_marker(&req->rq_snd_buf); in xs_local_send_request()
528 req->rq_svec->iov_base, req->rq_svec->iov_len); in xs_local_send_request()
530 req->rq_xtime = ktime_get(); in xs_local_send_request()
531 status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent, in xs_local_send_request()
533 dprintk("RPC: %s(%u) = %d\n", in xs_local_send_request()
534 __func__, xdr->len - req->rq_bytes_sent, status); in xs_local_send_request()
536 if (status == -EAGAIN && sock_writeable(transport->inet)) in xs_local_send_request()
537 status = -ENOBUFS; in xs_local_send_request()
539 if (likely(sent > 0) || status == 0) { in xs_local_send_request()
540 req->rq_bytes_sent += sent; in xs_local_send_request()
541 req->rq_xmit_bytes_sent += sent; in xs_local_send_request()
542 if (likely(req->rq_bytes_sent >= req->rq_slen)) { in xs_local_send_request()
543 req->rq_bytes_sent = 0; in xs_local_send_request()
546 status = -EAGAIN; in xs_local_send_request()
550 case -ENOBUFS: in xs_local_send_request()
552 case -EAGAIN: in xs_local_send_request()
556 dprintk("RPC: sendmsg returned unrecognized error %d\n", in xs_local_send_request()
557 -status); in xs_local_send_request()
559 case -EPIPE: in xs_local_send_request()
561 status = -ENOTCONN; in xs_local_send_request()
568 * xs_udp_send_request - write an RPC request to a UDP socket
569 * @task: address of RPC task that manages the state of an RPC request
580 struct rpc_rqst *req = task->tk_rqstp; in xs_udp_send_request()
581 struct rpc_xprt *xprt = req->rq_xprt; in xs_udp_send_request()
583 struct xdr_buf *xdr = &req->rq_snd_buf; in xs_udp_send_request()
588 req->rq_svec->iov_base, in xs_udp_send_request()
589 req->rq_svec->iov_len); in xs_udp_send_request()
591 if (!xprt_bound(xprt)) in xs_udp_send_request()
592 return -ENOTCONN; in xs_udp_send_request()
593 req->rq_xtime = ktime_get(); in xs_udp_send_request()
594 status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen, in xs_udp_send_request()
595 xdr, req->rq_bytes_sent, true, &sent); in xs_udp_send_request()
597 dprintk("RPC: xs_udp_send_request(%u) = %d\n", in xs_udp_send_request()
598 xdr->len - req->rq_bytes_sent, status); in xs_udp_send_request()
600 /* firewall is blocking us, don't return -EAGAIN or we end up looping */ in xs_udp_send_request()
601 if (status == -EPERM) in xs_udp_send_request()
604 if (status == -EAGAIN && sock_writeable(transport->inet)) in xs_udp_send_request()
605 status = -ENOBUFS; in xs_udp_send_request()
607 if (sent > 0 || status == 0) { in xs_udp_send_request()
608 req->rq_xmit_bytes_sent += sent; in xs_udp_send_request()
609 if (sent >= req->rq_slen) in xs_udp_send_request()
612 status = -EAGAIN; in xs_udp_send_request()
617 case -ENOTSOCK: in xs_udp_send_request()
618 status = -ENOTCONN; in xs_udp_send_request()
621 case -EAGAIN: in xs_udp_send_request()
624 case -ENETUNREACH: in xs_udp_send_request()
625 case -ENOBUFS: in xs_udp_send_request()
626 case -EPIPE: in xs_udp_send_request()
627 case -ECONNREFUSED: in xs_udp_send_request()
628 case -EPERM: in xs_udp_send_request()
633 dprintk("RPC: sendmsg returned unrecognized error %d\n", in xs_udp_send_request()
634 -status); in xs_udp_send_request()
641 * xs_tcp_send_request - write an RPC request to a TCP socket
642 * @task: address of RPC task that manages the state of an RPC request
652 * if sendmsg is not able to make progress?
656 struct rpc_rqst *req = task->tk_rqstp; in xs_tcp_send_request()
657 struct rpc_xprt *xprt = req->rq_xprt; in xs_tcp_send_request()
659 struct xdr_buf *xdr = &req->rq_snd_buf; in xs_tcp_send_request()
665 xs_encode_stream_record_marker(&req->rq_snd_buf); in xs_tcp_send_request()
668 req->rq_svec->iov_base, in xs_tcp_send_request()
669 req->rq_svec->iov_len); in xs_tcp_send_request()
670 /* Don't use zero copy if this is a resend. If the RPC call in xs_tcp_send_request()
674 if (task->tk_flags & RPC_TASK_SENT) in xs_tcp_send_request()
677 if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state)) in xs_tcp_send_request()
678 xs_tcp_set_socket_timeouts(xprt, transport->sock); in xs_tcp_send_request()
683 req->rq_xtime = ktime_get(); in xs_tcp_send_request()
686 status = xs_sendpages(transport->sock, NULL, 0, xdr, in xs_tcp_send_request()
687 req->rq_bytes_sent, zerocopy, &sent); in xs_tcp_send_request()
689 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", in xs_tcp_send_request()
690 xdr->len - req->rq_bytes_sent, status); in xs_tcp_send_request()
692 /* If we've sent the entire packet, immediately in xs_tcp_send_request()
694 req->rq_bytes_sent += sent; in xs_tcp_send_request()
695 req->rq_xmit_bytes_sent += sent; in xs_tcp_send_request()
696 if (likely(req->rq_bytes_sent >= req->rq_slen)) { in xs_tcp_send_request()
697 req->rq_bytes_sent = 0; in xs_tcp_send_request()
703 if (status == -EAGAIN ) { in xs_tcp_send_request()
705 * Return EAGAIN if we're sure we're hitting the in xs_tcp_send_request()
708 if (test_bit(SOCK_NOSPACE, &transport->sock->flags)) in xs_tcp_send_request()
713 if (sent == 0) { in xs_tcp_send_request()
714 status = -ENOBUFS; in xs_tcp_send_request()
715 if (vm_wait) in xs_tcp_send_request()
724 if (status < 0) in xs_tcp_send_request()
730 case -ENOTSOCK: in xs_tcp_send_request()
731 status = -ENOTCONN; in xs_tcp_send_request()
734 case -EAGAIN: in xs_tcp_send_request()
737 case -ECONNRESET: in xs_tcp_send_request()
738 case -ECONNREFUSED: in xs_tcp_send_request()
739 case -ENOTCONN: in xs_tcp_send_request()
740 case -EADDRINUSE: in xs_tcp_send_request()
741 case -ENOBUFS: in xs_tcp_send_request()
742 case -EPIPE: in xs_tcp_send_request()
745 dprintk("RPC: sendmsg returned unrecognized error %d\n", in xs_tcp_send_request()
746 -status); in xs_tcp_send_request()
753 * xs_tcp_release_xprt - clean up after a tcp transmission
755 * @task: rpc task
757 * This cleans up if an error causes us to abort the transmission of a request.
765 if (task != xprt->snd_task) in xs_tcp_release_xprt()
767 if (task == NULL) in xs_tcp_release_xprt()
769 req = task->tk_rqstp; in xs_tcp_release_xprt()
770 if (req == NULL) in xs_tcp_release_xprt()
772 if (req->rq_bytes_sent == 0) in xs_tcp_release_xprt()
774 if (req->rq_bytes_sent == req->rq_snd_buf.len) in xs_tcp_release_xprt()
776 set_bit(XPRT_CLOSE_WAIT, &xprt->state); in xs_tcp_release_xprt()
783 transport->old_data_ready = sk->sk_data_ready; in xs_save_old_callbacks()
784 transport->old_state_change = sk->sk_state_change; in xs_save_old_callbacks()
785 transport->old_write_space = sk->sk_write_space; in xs_save_old_callbacks()
786 transport->old_error_report = sk->sk_error_report; in xs_save_old_callbacks()
791 sk->sk_data_ready = transport->old_data_ready; in xs_restore_old_callbacks()
792 sk->sk_state_change = transport->old_state_change; in xs_restore_old_callbacks()
793 sk->sk_write_space = transport->old_write_space; in xs_restore_old_callbacks()
794 sk->sk_error_report = transport->old_error_report; in xs_restore_old_callbacks()
801 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); in xs_sock_reset_state_flags()
807 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); in xs_sock_reset_connection_flags()
808 clear_bit(XPRT_CLOSING, &xprt->state); in xs_sock_reset_connection_flags()
814 * xs_error_report - callback to handle TCP socket state errors
818 * using the socket, and so we don't want to clear sk->sk_err.
825 read_lock_bh(&sk->sk_callback_lock); in xs_error_report()
826 if (!(xprt = xprt_from_sock(sk))) in xs_error_report()
829 err = -sk->sk_err; in xs_error_report()
830 if (err == 0) in xs_error_report()
832 dprintk("RPC: xs_error_report client %p, error=%d...\n", in xs_error_report()
833 xprt, -err); in xs_error_report()
834 trace_rpc_socket_error(xprt, sk->sk_socket, err); in xs_error_report()
837 read_unlock_bh(&sk->sk_callback_lock); in xs_error_report()
842 struct socket *sock = transport->sock; in xs_reset_transport()
843 struct sock *sk = transport->inet; in xs_reset_transport()
844 struct rpc_xprt *xprt = &transport->xprt; in xs_reset_transport()
846 if (sk == NULL) in xs_reset_transport()
853 if (!(current->flags & PF_WQ_WORKER)) { in xs_reset_transport()
855 set_bit(XPRT_CLOSE_WAIT, &xprt->state); in xs_reset_transport()
859 if (atomic_read(&transport->xprt.swapper)) in xs_reset_transport()
864 mutex_lock(&transport->recv_mutex); in xs_reset_transport()
865 write_lock_bh(&sk->sk_callback_lock); in xs_reset_transport()
866 transport->inet = NULL; in xs_reset_transport()
867 transport->sock = NULL; in xs_reset_transport()
869 sk->sk_user_data = NULL; in xs_reset_transport()
873 write_unlock_bh(&sk->sk_callback_lock); in xs_reset_transport()
875 mutex_unlock(&transport->recv_mutex); in xs_reset_transport()
882 * xs_close - close a socket
895 dprintk("RPC: xs_close xprt %p\n", xprt); in xs_close()
898 xprt->reestablish_timeout = 0; in xs_close()
905 dprintk("RPC: injecting transport disconnect on xprt=%p\n", in xs_inject_disconnect()
917 * xs_destroy - prepare to shutdown a transport
925 dprintk("RPC: xs_destroy xprt %p\n", xprt); in xs_destroy()
927 cancel_delayed_work_sync(&transport->connect_worker); in xs_destroy()
929 cancel_work_sync(&transport->recv_worker); in xs_destroy()
939 .count = skb->len - sizeof(rpc_fraghdr), in xs_local_copy_to_xdr()
942 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0) in xs_local_copy_to_xdr()
943 return -1; in xs_local_copy_to_xdr()
944 if (desc.count) in xs_local_copy_to_xdr()
945 return -1; in xs_local_copy_to_xdr()
967 repsize = skb->len - sizeof(rpc_fraghdr); in xs_local_data_read_skb()
968 if (repsize < 4) { in xs_local_data_read_skb()
969 dprintk("RPC: impossible RPC reply size %d\n", repsize); in xs_local_data_read_skb()
975 if (xp == NULL) in xs_local_data_read_skb()
979 spin_lock(&xprt->recv_lock); in xs_local_data_read_skb()
981 if (!rovr) in xs_local_data_read_skb()
984 spin_unlock(&xprt->recv_lock); in xs_local_data_read_skb()
985 task = rovr->rq_task; in xs_local_data_read_skb()
987 copied = rovr->rq_private_buf.buflen; in xs_local_data_read_skb()
988 if (copied > repsize) in xs_local_data_read_skb()
991 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) { in xs_local_data_read_skb()
992 dprintk("RPC: sk_buff copy failed\n"); in xs_local_data_read_skb()
993 spin_lock(&xprt->recv_lock); in xs_local_data_read_skb()
997 spin_lock(&xprt->recv_lock); in xs_local_data_read_skb()
1002 spin_unlock(&xprt->recv_lock); in xs_local_data_read_skb()
1012 mutex_lock(&transport->recv_mutex); in xs_local_data_receive()
1013 sk = transport->inet; in xs_local_data_receive()
1014 if (sk == NULL) in xs_local_data_receive()
1018 if (skb != NULL) { in xs_local_data_receive()
1019 xs_local_data_read_skb(&transport->xprt, sk, skb); in xs_local_data_receive()
1023 if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) in xs_local_data_receive()
1025 if (need_resched()) { in xs_local_data_receive()
1026 mutex_unlock(&transport->recv_mutex); in xs_local_data_receive()
1032 mutex_unlock(&transport->recv_mutex); in xs_local_data_receive()
1043 * xs_udp_data_read_skb - receive callback for UDP sockets
1059 repsize = skb->len; in xs_udp_data_read_skb()
1060 if (repsize < 4) { in xs_udp_data_read_skb()
1061 dprintk("RPC: impossible RPC reply size %d!\n", repsize); in xs_udp_data_read_skb()
1067 if (xp == NULL) in xs_udp_data_read_skb()
1071 spin_lock(&xprt->recv_lock); in xs_udp_data_read_skb()
1073 if (!rovr) in xs_udp_data_read_skb()
1076 xprt_update_rtt(rovr->rq_task); in xs_udp_data_read_skb()
1077 spin_unlock(&xprt->recv_lock); in xs_udp_data_read_skb()
1078 task = rovr->rq_task; in xs_udp_data_read_skb()
1080 if ((copied = rovr->rq_private_buf.buflen) > repsize) in xs_udp_data_read_skb()
1083 /* Suck it into the iovec, verify checksum if not done by hw. */ in xs_udp_data_read_skb()
1084 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { in xs_udp_data_read_skb()
1085 spin_lock(&xprt->recv_lock); in xs_udp_data_read_skb()
1091 spin_lock_bh(&xprt->transport_lock); in xs_udp_data_read_skb()
1093 spin_unlock_bh(&xprt->transport_lock); in xs_udp_data_read_skb()
1094 spin_lock(&xprt->recv_lock); in xs_udp_data_read_skb()
1100 spin_unlock(&xprt->recv_lock); in xs_udp_data_read_skb()
1110 mutex_lock(&transport->recv_mutex); in xs_udp_data_receive()
1111 sk = transport->inet; in xs_udp_data_receive()
1112 if (sk == NULL) in xs_udp_data_receive()
1116 if (skb != NULL) { in xs_udp_data_receive()
1117 xs_udp_data_read_skb(&transport->xprt, sk, skb); in xs_udp_data_receive()
1121 if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) in xs_udp_data_receive()
1123 if (need_resched()) { in xs_udp_data_receive()
1124 mutex_unlock(&transport->recv_mutex); in xs_udp_data_receive()
1130 mutex_unlock(&transport->recv_mutex); in xs_udp_data_receive()
1141 * xs_data_ready - "data ready" callback for UDP sockets
1149 read_lock_bh(&sk->sk_callback_lock); in xs_data_ready()
1150 dprintk("RPC: xs_data_ready...\n"); in xs_data_ready()
1152 if (xprt != NULL) { in xs_data_ready()
1155 transport->old_data_ready(sk); in xs_data_ready()
1159 if (xprt->reestablish_timeout) in xs_data_ready()
1160 xprt->reestablish_timeout = 0; in xs_data_ready()
1161 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) in xs_data_ready()
1162 queue_work(xprtiod_workqueue, &transport->recv_worker); in xs_data_ready()
1164 read_unlock_bh(&sk->sk_callback_lock); in xs_data_ready()
1168 * Helper function to force a TCP close if the server is sending
1182 p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset; in xs_tcp_read_fraghdr()
1183 len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset; in xs_tcp_read_fraghdr()
1185 transport->tcp_offset += used; in xs_tcp_read_fraghdr()
1186 if (used != len) in xs_tcp_read_fraghdr()
1189 transport->tcp_reclen = ntohl(transport->tcp_fraghdr); in xs_tcp_read_fraghdr()
1190 if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) in xs_tcp_read_fraghdr()
1191 transport->tcp_flags |= TCP_RCV_LAST_FRAG; in xs_tcp_read_fraghdr()
1193 transport->tcp_flags &= ~TCP_RCV_LAST_FRAG; in xs_tcp_read_fraghdr()
1194 transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; in xs_tcp_read_fraghdr()
1196 transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR; in xs_tcp_read_fraghdr()
1197 transport->tcp_offset = 0; in xs_tcp_read_fraghdr()
1200 if (unlikely(transport->tcp_reclen < 8)) { in xs_tcp_read_fraghdr()
1201 dprintk("RPC: invalid TCP record fragment length\n"); in xs_tcp_read_fraghdr()
1205 dprintk("RPC: reading TCP record fragment of length %d\n", in xs_tcp_read_fraghdr()
1206 transport->tcp_reclen); in xs_tcp_read_fraghdr()
1211 if (transport->tcp_offset == transport->tcp_reclen) { in xs_tcp_check_fraghdr()
1212 transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR; in xs_tcp_check_fraghdr()
1213 transport->tcp_offset = 0; in xs_tcp_check_fraghdr()
1214 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) { in xs_tcp_check_fraghdr()
1215 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; in xs_tcp_check_fraghdr()
1216 transport->tcp_flags |= TCP_RCV_COPY_XID; in xs_tcp_check_fraghdr()
1217 transport->tcp_copied = 0; in xs_tcp_check_fraghdr()
1227 len = sizeof(transport->tcp_xid) - transport->tcp_offset; in xs_tcp_read_xid()
1228 dprintk("RPC: reading XID (%zu bytes)\n", len); in xs_tcp_read_xid()
1229 p = ((char *) &transport->tcp_xid) + transport->tcp_offset; in xs_tcp_read_xid()
1231 transport->tcp_offset += used; in xs_tcp_read_xid()
1232 if (used != len) in xs_tcp_read_xid()
1234 transport->tcp_flags &= ~TCP_RCV_COPY_XID; in xs_tcp_read_xid()
1235 transport->tcp_flags |= TCP_RCV_READ_CALLDIR; in xs_tcp_read_xid()
1236 transport->tcp_copied = 4; in xs_tcp_read_xid()
1237 dprintk("RPC: reading %s XID %08x\n", in xs_tcp_read_xid()
1238 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for" in xs_tcp_read_xid()
1240 ntohl(transport->tcp_xid)); in xs_tcp_read_xid()
1252 * We want transport->tcp_offset to be 8 at the end of this routine in xs_tcp_read_calldir()
1255 * transport->tcp_offset is 4 (after having already read the xid). in xs_tcp_read_calldir()
1257 offset = transport->tcp_offset - sizeof(transport->tcp_xid); in xs_tcp_read_calldir()
1258 len = sizeof(transport->tcp_calldir) - offset; in xs_tcp_read_calldir()
1259 dprintk("RPC: reading CALL/REPLY flag (%zu bytes)\n", len); in xs_tcp_read_calldir()
1260 p = ((char *) &transport->tcp_calldir) + offset; in xs_tcp_read_calldir()
1262 transport->tcp_offset += used; in xs_tcp_read_calldir()
1263 if (used != len) in xs_tcp_read_calldir()
1265 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR; in xs_tcp_read_calldir()
1270 switch (ntohl(transport->tcp_calldir)) { in xs_tcp_read_calldir()
1272 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR; in xs_tcp_read_calldir()
1273 transport->tcp_flags |= TCP_RCV_COPY_DATA; in xs_tcp_read_calldir()
1274 transport->tcp_flags |= TCP_RPC_REPLY; in xs_tcp_read_calldir()
1277 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR; in xs_tcp_read_calldir()
1278 transport->tcp_flags |= TCP_RCV_COPY_DATA; in xs_tcp_read_calldir()
1279 transport->tcp_flags &= ~TCP_RPC_REPLY; in xs_tcp_read_calldir()
1282 dprintk("RPC: invalid request message type\n"); in xs_tcp_read_calldir()
1283 xs_tcp_force_close(&transport->xprt); in xs_tcp_read_calldir()
1298 rcvbuf = &req->rq_private_buf; in xs_tcp_read_common()
1300 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) { in xs_tcp_read_common()
1302 * Save the RPC direction in the XDR buffer in xs_tcp_read_common()
1304 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied, in xs_tcp_read_common()
1305 &transport->tcp_calldir, in xs_tcp_read_common()
1306 sizeof(transport->tcp_calldir)); in xs_tcp_read_common()
1307 transport->tcp_copied += sizeof(transport->tcp_calldir); in xs_tcp_read_common()
1308 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR; in xs_tcp_read_common()
1311 len = desc->count; in xs_tcp_read_common()
1312 if (len > transport->tcp_reclen - transport->tcp_offset) in xs_tcp_read_common()
1313 desc->count = transport->tcp_reclen - transport->tcp_offset; in xs_tcp_read_common()
1314 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, in xs_tcp_read_common()
1317 if (desc->count) { in xs_tcp_read_common()
1327 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; in xs_tcp_read_common()
1328 dprintk("RPC: XID %08x truncated request\n", in xs_tcp_read_common()
1329 ntohl(transport->tcp_xid)); in xs_tcp_read_common()
1330 dprintk("RPC: xprt = %p, tcp_copied = %lu, " in xs_tcp_read_common()
1332 xprt, transport->tcp_copied, in xs_tcp_read_common()
1333 transport->tcp_offset, transport->tcp_reclen); in xs_tcp_read_common()
1337 transport->tcp_copied += r; in xs_tcp_read_common()
1338 transport->tcp_offset += r; in xs_tcp_read_common()
1339 desc->count = len - r; in xs_tcp_read_common()
1341 dprintk("RPC: XID %08x read %zd bytes\n", in xs_tcp_read_common()
1342 ntohl(transport->tcp_xid), r); in xs_tcp_read_common()
1343 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, " in xs_tcp_read_common()
1344 "tcp_reclen = %u\n", xprt, transport->tcp_copied, in xs_tcp_read_common()
1345 transport->tcp_offset, transport->tcp_reclen); in xs_tcp_read_common()
1347 if (transport->tcp_copied == req->rq_private_buf.buflen) in xs_tcp_read_common()
1348 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; in xs_tcp_read_common()
1349 else if (transport->tcp_offset == transport->tcp_reclen) { in xs_tcp_read_common()
1350 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) in xs_tcp_read_common()
1351 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; in xs_tcp_read_common()
1356 * Finds the request corresponding to the RPC xid and invokes the common
1366 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid)); in xs_tcp_read_reply()
1369 spin_lock(&xprt->recv_lock); in xs_tcp_read_reply()
1370 req = xprt_lookup_rqst(xprt, transport->tcp_xid); in xs_tcp_read_reply()
1371 if (!req) { in xs_tcp_read_reply()
1372 dprintk("RPC: XID %08x request not found!\n", in xs_tcp_read_reply()
1373 ntohl(transport->tcp_xid)); in xs_tcp_read_reply()
1374 spin_unlock(&xprt->recv_lock); in xs_tcp_read_reply()
1375 return -1; in xs_tcp_read_reply()
1378 spin_unlock(&xprt->recv_lock); in xs_tcp_read_reply()
1382 spin_lock(&xprt->recv_lock); in xs_tcp_read_reply()
1383 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) in xs_tcp_read_reply()
1384 xprt_complete_rqst(req->rq_task, transport->tcp_copied); in xs_tcp_read_reply()
1386 spin_unlock(&xprt->recv_lock); in xs_tcp_read_reply()
1390 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1395 * If we're unable to obtain the rpc_rqst we schedule the closing of the
1396 * connection and return -1.
1406 req = xprt_lookup_bc_request(xprt, transport->tcp_xid); in xs_tcp_read_callback()
1407 if (req == NULL) { in xs_tcp_read_callback()
1410 return -1; in xs_tcp_read_callback()
1413 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid)); in xs_tcp_read_callback()
1416 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) in xs_tcp_read_callback()
1417 xprt_complete_bc_request(req, transport->tcp_copied); in xs_tcp_read_callback()
1428 return (transport->tcp_flags & TCP_RPC_REPLY) ? in _xs_tcp_read_data()
1437 ret = svc_create_xprt(serv, "tcp-bc", net, PF_INET, 0, in xs_tcp_bc_up()
1439 if (ret < 0) in xs_tcp_bc_up()
1466 if (_xs_tcp_read_data(xprt, desc) == 0) in xs_tcp_read_data()
1473 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; in xs_tcp_read_data()
1481 len = transport->tcp_reclen - transport->tcp_offset; in xs_tcp_read_discard()
1482 if (len > desc->count) in xs_tcp_read_discard()
1483 len = desc->count; in xs_tcp_read_discard()
1484 desc->count -= len; in xs_tcp_read_discard()
1485 desc->offset += len; in xs_tcp_read_discard()
1486 transport->tcp_offset += len; in xs_tcp_read_discard()
1487 dprintk("RPC: discarded %zu bytes\n", len); in xs_tcp_read_discard()
1493 struct rpc_xprt *xprt = rd_desc->arg.data; in xs_tcp_data_recv()
1502 dprintk("RPC: xs_tcp_data_recv started\n"); in xs_tcp_data_recv()
1505 /* Read in a new fragment marker if necessary */ in xs_tcp_data_recv()
1507 if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) { in xs_tcp_data_recv()
1511 /* Read in the xid if necessary */ in xs_tcp_data_recv()
1512 if (transport->tcp_flags & TCP_RCV_COPY_XID) { in xs_tcp_data_recv()
1517 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) { in xs_tcp_data_recv()
1522 if (transport->tcp_flags & TCP_RCV_COPY_DATA) { in xs_tcp_data_recv()
1529 ret = len - desc.count; in xs_tcp_data_recv()
1530 if (ret < rd_desc->count) in xs_tcp_data_recv()
1531 rd_desc->count -= ret; in xs_tcp_data_recv()
1533 rd_desc->count = 0; in xs_tcp_data_recv()
1535 dprintk("RPC: xs_tcp_data_recv done\n"); in xs_tcp_data_recv()
1541 struct rpc_xprt *xprt = &transport->xprt; in xs_tcp_data_receive()
1550 mutex_lock(&transport->recv_mutex); in xs_tcp_data_receive()
1551 sk = transport->inet; in xs_tcp_data_receive()
1552 if (sk == NULL) in xs_tcp_data_receive()
1560 if (rd_desc.count != 0 || read < 0) { in xs_tcp_data_receive()
1561 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); in xs_tcp_data_receive()
1567 if (need_resched()) { in xs_tcp_data_receive()
1568 mutex_unlock(&transport->recv_mutex); in xs_tcp_data_receive()
1573 if (test_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) in xs_tcp_data_receive()
1574 queue_work(xprtiod_workqueue, &transport->recv_worker); in xs_tcp_data_receive()
1576 mutex_unlock(&transport->recv_mutex); in xs_tcp_data_receive()
1588 * xs_tcp_state_change - callback to handle TCP socket state changes
1597 read_lock_bh(&sk->sk_callback_lock); in xs_tcp_state_change()
1598 if (!(xprt = xprt_from_sock(sk))) in xs_tcp_state_change()
1600 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); in xs_tcp_state_change()
1601 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n", in xs_tcp_state_change()
1602 sk->sk_state, xprt_connected(xprt), in xs_tcp_state_change()
1605 sk->sk_shutdown); in xs_tcp_state_change()
1608 trace_rpc_socket_state_change(xprt, sk->sk_socket); in xs_tcp_state_change()
1609 switch (sk->sk_state) { in xs_tcp_state_change()
1611 spin_lock(&xprt->transport_lock); in xs_tcp_state_change()
1612 if (!xprt_test_and_set_connected(xprt)) { in xs_tcp_state_change()
1615 transport->tcp_offset = 0; in xs_tcp_state_change()
1616 transport->tcp_reclen = 0; in xs_tcp_state_change()
1617 transport->tcp_copied = 0; in xs_tcp_state_change()
1618 transport->tcp_flags = in xs_tcp_state_change()
1620 xprt->connect_cookie++; in xs_tcp_state_change()
1621 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); in xs_tcp_state_change()
1624 xprt->stat.connect_count++; in xs_tcp_state_change()
1625 xprt->stat.connect_time += (long)jiffies - in xs_tcp_state_change()
1626 xprt->stat.connect_start; in xs_tcp_state_change()
1627 xprt_wake_pending_tasks(xprt, -EAGAIN); in xs_tcp_state_change()
1629 spin_unlock(&xprt->transport_lock); in xs_tcp_state_change()
1633 xprt->connect_cookie++; in xs_tcp_state_change()
1634 xprt->reestablish_timeout = 0; in xs_tcp_state_change()
1635 set_bit(XPRT_CLOSING, &xprt->state); in xs_tcp_state_change()
1637 clear_bit(XPRT_CONNECTED, &xprt->state); in xs_tcp_state_change()
1638 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); in xs_tcp_state_change()
1643 xprt->connect_cookie++; in xs_tcp_state_change()
1644 clear_bit(XPRT_CONNECTED, &xprt->state); in xs_tcp_state_change()
1649 * If the server closed down the connection, make sure that in xs_tcp_state_change()
1652 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) in xs_tcp_state_change()
1653 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; in xs_tcp_state_change()
1656 set_bit(XPRT_CLOSING, &xprt->state); in xs_tcp_state_change()
1658 clear_bit(XPRT_CONNECTED, &xprt->state); in xs_tcp_state_change()
1662 if (test_and_clear_bit(XPRT_SOCK_CONNECTING, in xs_tcp_state_change()
1663 &transport->sock_state)) in xs_tcp_state_change()
1665 clear_bit(XPRT_CLOSING, &xprt->state); in xs_tcp_state_change()
1666 if (sk->sk_err) in xs_tcp_state_change()
1667 xprt_wake_pending_tasks(xprt, -sk->sk_err); in xs_tcp_state_change()
1672 read_unlock_bh(&sk->sk_callback_lock); in xs_tcp_state_change()
1680 if (!sk->sk_socket) in xs_write_space()
1682 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in xs_write_space()
1684 if (unlikely(!(xprt = xprt_from_sock(sk)))) in xs_write_space()
1687 wq = rcu_dereference(sk->sk_wq); in xs_write_space()
1688 if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0) in xs_write_space()
1697 * xs_udp_write_space - callback invoked when socket buffer space
1708 read_lock_bh(&sk->sk_callback_lock); in xs_udp_write_space()
1711 if (sock_writeable(sk)) in xs_udp_write_space()
1714 read_unlock_bh(&sk->sk_callback_lock); in xs_udp_write_space()
1718 * xs_tcp_write_space - callback invoked when socket buffer space
1729 read_lock_bh(&sk->sk_callback_lock); in xs_tcp_write_space()
1732 if (sk_stream_is_writeable(sk)) in xs_tcp_write_space()
1735 read_unlock_bh(&sk->sk_callback_lock); in xs_tcp_write_space()
1741 struct sock *sk = transport->inet; in xs_udp_do_set_buffer_size()
1743 if (transport->rcvsize) { in xs_udp_do_set_buffer_size()
1744 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; in xs_udp_do_set_buffer_size()
1745 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2; in xs_udp_do_set_buffer_size()
1747 if (transport->sndsize) { in xs_udp_do_set_buffer_size()
1748 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; in xs_udp_do_set_buffer_size()
1749 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2; in xs_udp_do_set_buffer_size()
1750 sk->sk_write_space(sk); in xs_udp_do_set_buffer_size()
1755 * xs_udp_set_buffer_size - set send and receive limits
1766 transport->sndsize = 0; in xs_udp_set_buffer_size()
1767 if (sndsize) in xs_udp_set_buffer_size()
1768 transport->sndsize = sndsize + 1024; in xs_udp_set_buffer_size()
1769 transport->rcvsize = 0; in xs_udp_set_buffer_size()
1770 if (rcvsize) in xs_udp_set_buffer_size()
1771 transport->rcvsize = rcvsize + 1024; in xs_udp_set_buffer_size()
1777 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1784 spin_lock_bh(&xprt->transport_lock); in xs_udp_timer()
1785 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); in xs_udp_timer()
1786 spin_unlock_bh(&xprt->transport_lock); in xs_udp_timer()
1795 if (max < min) in xs_get_random_port()
1796 return -EADDRINUSE; in xs_get_random_port()
1797 range = max - min + 1; in xs_get_random_port()
1803 * xs_set_reuseaddr_port - set the socket's port and address reuse options
1822 if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0) in xs_sock_getport()
1826 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port); in xs_sock_getport()
1829 port = ntohs(((struct sockaddr_in *)&buf)->sin_port); in xs_sock_getport()
1836 * xs_set_port - reset the port number in the remote endpoint address
1843 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); in xs_set_port()
1851 if (transport->srcport == 0) in xs_set_srcport()
1852 transport->srcport = xs_sock_getport(sock); in xs_set_srcport()
1857 int port = transport->srcport; in xs_get_srcport()
1859 if (port == 0 && transport->xprt.resvport) in xs_get_srcport()
1866 if (transport->srcport != 0) in xs_next_srcport()
1867 transport->srcport = 0; in xs_next_srcport()
1868 if (!transport->xprt.resvport) in xs_next_srcport()
1870 if (port <= xprt_min_resvport || port > xprt_max_resvport) in xs_next_srcport()
1872 return --port; in xs_next_srcport()
1882 * If we are asking for any ephemeral port (i.e. port == 0 && in xs_bind()
1883 * transport->xprt.resvport == 0), don't bind. Let the local in xs_bind()
1892 * If we're asking for any reserved port (i.e. port == 0 && in xs_bind()
1893 * transport->xprt.resvport == 1) xs_get_srcport above will in xs_bind()
1894 * ensure that port is non-zero and we will bind as needed. in xs_bind()
1896 if (port <= 0) in xs_bind()
1899 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); in xs_bind()
1903 transport->xprt.addrlen); in xs_bind()
1904 if (err == 0) { in xs_bind()
1905 transport->srcport = port; in xs_bind()
1910 if (port > last) in xs_bind()
1912 } while (err == -EADDRINUSE && nloop != 2); in xs_bind()
1914 if (myaddr.ss_family == AF_INET) in xs_bind()
1915 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__, in xs_bind()
1916 &((struct sockaddr_in *)&myaddr)->sin_addr, in xs_bind()
1919 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__, in xs_bind()
1920 &((struct sockaddr_in6 *)&myaddr)->sin6_addr, in xs_bind()
1930 xprt_set_bound(task->tk_xprt); in xs_local_rpcbind()
1943 struct sock *sk = sock->sk; in xs_reclassify_socketu()
1945 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC", in xs_reclassify_socketu()
1946 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]); in xs_reclassify_socketu()
1951 struct sock *sk = sock->sk; in xs_reclassify_socket4()
1953 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", in xs_reclassify_socket4()
1954 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]); in xs_reclassify_socket4()
1959 struct sock *sk = sock->sk; in xs_reclassify_socket6()
1961 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", in xs_reclassify_socket6()
1962 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); in xs_reclassify_socket6()
1967 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk))) in xs_reclassify_socket()
1999 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1); in xs_create_sock()
2000 if (err < 0) { in xs_create_sock()
2001 dprintk("RPC: can't create %d transport socket (%d).\n", in xs_create_sock()
2002 protocol, -err); in xs_create_sock()
2007 if (reuseport) in xs_create_sock()
2011 if (err) { in xs_create_sock()
2027 if (!transport->inet) { in xs_local_finish_connecting()
2028 struct sock *sk = sock->sk; in xs_local_finish_connecting()
2030 write_lock_bh(&sk->sk_callback_lock); in xs_local_finish_connecting()
2034 sk->sk_user_data = xprt; in xs_local_finish_connecting()
2035 sk->sk_data_ready = xs_data_ready; in xs_local_finish_connecting()
2036 sk->sk_write_space = xs_udp_write_space; in xs_local_finish_connecting()
2038 sk->sk_error_report = xs_error_report; in xs_local_finish_connecting()
2039 sk->sk_allocation = GFP_NOIO; in xs_local_finish_connecting()
2044 transport->sock = sock; in xs_local_finish_connecting()
2045 transport->inet = sk; in xs_local_finish_connecting()
2047 write_unlock_bh(&sk->sk_callback_lock); in xs_local_finish_connecting()
2051 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0); in xs_local_finish_connecting()
2055 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
2060 struct rpc_xprt *xprt = &transport->xprt; in xs_local_setup_socket()
2062 int status = -EIO; in xs_local_setup_socket()
2064 status = __sock_create(xprt->xprt_net, AF_LOCAL, in xs_local_setup_socket()
2066 if (status < 0) { in xs_local_setup_socket()
2067 dprintk("RPC: can't create AF_LOCAL " in xs_local_setup_socket()
2068 "transport socket (%d).\n", -status); in xs_local_setup_socket()
2073 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n", in xs_local_setup_socket()
2074 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
2080 dprintk("RPC: xprt %p connected to %s\n", in xs_local_setup_socket()
2081 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
2082 xprt->stat.connect_count++; in xs_local_setup_socket()
2083 xprt->stat.connect_time += (long)jiffies - in xs_local_setup_socket()
2084 xprt->stat.connect_start; in xs_local_setup_socket()
2086 case -ENOBUFS: in xs_local_setup_socket()
2088 case -ENOENT: in xs_local_setup_socket()
2089 dprintk("RPC: xprt %p: socket %s does not exist\n", in xs_local_setup_socket()
2090 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
2092 case -ECONNREFUSED: in xs_local_setup_socket()
2093 dprintk("RPC: xprt %p: connection refused for %s\n", in xs_local_setup_socket()
2094 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
2098 __func__, -status, in xs_local_setup_socket()
2099 xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
2113 if (RPC_IS_ASYNC(task)) { in xs_local_connect()
2116 * filesystem namespace of the process making the rpc in xs_local_connect()
2119 * If we want to support asynchronous AF_LOCAL calls, in xs_local_connect()
2123 rpc_exit(task, -ENOTCONN); in xs_local_connect()
2127 if (ret && !RPC_IS_SOFTCONN(task)) in xs_local_connect()
2131 #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
2143 * If there's no sock, then we have nothing to set. The in xs_set_memalloc()
2146 if (!transport->inet) in xs_set_memalloc()
2148 if (atomic_read(&xprt->swapper)) in xs_set_memalloc()
2149 sk_set_memalloc(transport->inet); in xs_set_memalloc()
2153 * xs_enable_swap - Tag this transport as being used for swap.
2157 * optionally mark it for swapping if it wasn't already.
2164 if (atomic_inc_return(&xprt->swapper) != 1) in xs_enable_swap()
2166 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) in xs_enable_swap()
2167 return -ERESTARTSYS; in xs_enable_swap()
2168 if (xs->inet) in xs_enable_swap()
2169 sk_set_memalloc(xs->inet); in xs_enable_swap()
2175 * xs_disable_swap - Untag this transport as being used for swap.
2178 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
2186 if (!atomic_dec_and_test(&xprt->swapper)) in xs_disable_swap()
2188 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) in xs_disable_swap()
2190 if (xs->inet) in xs_disable_swap()
2191 sk_clear_memalloc(xs->inet); in xs_disable_swap()
2202 return -EINVAL; in xs_enable_swap()
2215 if (!transport->inet) { in xs_udp_finish_connecting()
2216 struct sock *sk = sock->sk; in xs_udp_finish_connecting()
2218 write_lock_bh(&sk->sk_callback_lock); in xs_udp_finish_connecting()
2222 sk->sk_user_data = xprt; in xs_udp_finish_connecting()
2223 sk->sk_data_ready = xs_data_ready; in xs_udp_finish_connecting()
2224 sk->sk_write_space = xs_udp_write_space; in xs_udp_finish_connecting()
2226 sk->sk_allocation = GFP_NOIO; in xs_udp_finish_connecting()
2231 transport->sock = sock; in xs_udp_finish_connecting()
2232 transport->inet = sk; in xs_udp_finish_connecting()
2236 write_unlock_bh(&sk->sk_callback_lock); in xs_udp_finish_connecting()
2240 xprt->stat.connect_start = jiffies; in xs_udp_finish_connecting()
2247 struct rpc_xprt *xprt = &transport->xprt; in xs_udp_setup_socket()
2249 int status = -EIO; in xs_udp_setup_socket()
2252 xs_addr(xprt)->sa_family, SOCK_DGRAM, in xs_udp_setup_socket()
2254 if (IS_ERR(sock)) in xs_udp_setup_socket()
2257 dprintk("RPC: worker connecting xprt %p via %s to " in xs_udp_setup_socket()
2259 xprt->address_strings[RPC_DISPLAY_PROTO], in xs_udp_setup_socket()
2260 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_udp_setup_socket()
2261 xprt->address_strings[RPC_DISPLAY_PORT]); in xs_udp_setup_socket()
2273 * xs_tcp_shutdown - gracefully shut down a TCP socket
2282 struct socket *sock = transport->sock; in xs_tcp_shutdown()
2283 int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE; in xs_tcp_shutdown()
2285 if (sock == NULL) in xs_tcp_shutdown()
2307 spin_lock_bh(&xprt->transport_lock); in xs_tcp_set_socket_timeouts()
2308 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ); in xs_tcp_set_socket_timeouts()
2309 keepcnt = xprt->timeout->to_retries + 1; in xs_tcp_set_socket_timeouts()
2310 timeo = jiffies_to_msecs(xprt->timeout->to_initval) * in xs_tcp_set_socket_timeouts()
2311 (xprt->timeout->to_retries + 1); in xs_tcp_set_socket_timeouts()
2312 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); in xs_tcp_set_socket_timeouts()
2313 spin_unlock_bh(&xprt->transport_lock); in xs_tcp_set_socket_timeouts()
2338 spin_lock_bh(&xprt->transport_lock); in xs_tcp_set_connect_timeout()
2339 if (reconnect_timeout < xprt->max_reconnect_timeout) in xs_tcp_set_connect_timeout()
2340 xprt->max_reconnect_timeout = reconnect_timeout; in xs_tcp_set_connect_timeout()
2341 if (connect_timeout < xprt->connect_timeout) { in xs_tcp_set_connect_timeout()
2342 memcpy(&to, xprt->timeout, sizeof(to)); in xs_tcp_set_connect_timeout()
2345 if (initval < XS_TCP_INIT_REEST_TO << 1) in xs_tcp_set_connect_timeout()
2349 memcpy(&transport->tcp_timeout, &to, in xs_tcp_set_connect_timeout()
2350 sizeof(transport->tcp_timeout)); in xs_tcp_set_connect_timeout()
2351 xprt->timeout = &transport->tcp_timeout; in xs_tcp_set_connect_timeout()
2352 xprt->connect_timeout = connect_timeout; in xs_tcp_set_connect_timeout()
2354 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); in xs_tcp_set_connect_timeout()
2355 spin_unlock_bh(&xprt->transport_lock); in xs_tcp_set_connect_timeout()
2361 int ret = -ENOTCONN; in xs_tcp_finish_connecting()
2363 if (!transport->inet) { in xs_tcp_finish_connecting()
2364 struct sock *sk = sock->sk; in xs_tcp_finish_connecting()
2367 /* Avoid temporary address, they are bad for long-lived in xs_tcp_finish_connecting()
2379 write_lock_bh(&sk->sk_callback_lock); in xs_tcp_finish_connecting()
2383 sk->sk_user_data = xprt; in xs_tcp_finish_connecting()
2384 sk->sk_data_ready = xs_data_ready; in xs_tcp_finish_connecting()
2385 sk->sk_state_change = xs_tcp_state_change; in xs_tcp_finish_connecting()
2386 sk->sk_write_space = xs_tcp_write_space; in xs_tcp_finish_connecting()
2388 sk->sk_error_report = xs_error_report; in xs_tcp_finish_connecting()
2389 sk->sk_allocation = GFP_NOIO; in xs_tcp_finish_connecting()
2393 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; in xs_tcp_finish_connecting()
2398 transport->sock = sock; in xs_tcp_finish_connecting()
2399 transport->inet = sk; in xs_tcp_finish_connecting()
2401 write_unlock_bh(&sk->sk_callback_lock); in xs_tcp_finish_connecting()
2404 if (!xprt_bound(xprt)) in xs_tcp_finish_connecting()
2410 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); in xs_tcp_finish_connecting()
2411 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); in xs_tcp_finish_connecting()
2416 case -EINPROGRESS: in xs_tcp_finish_connecting()
2418 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) in xs_tcp_finish_connecting()
2419 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; in xs_tcp_finish_connecting()
2421 case -EADDRNOTAVAIL: in xs_tcp_finish_connecting()
2423 transport->srcport = 0; in xs_tcp_finish_connecting()
2430 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2438 struct socket *sock = transport->sock; in xs_tcp_setup_socket()
2439 struct rpc_xprt *xprt = &transport->xprt; in xs_tcp_setup_socket()
2440 int status = -EIO; in xs_tcp_setup_socket()
2442 if (!sock) { in xs_tcp_setup_socket()
2444 xs_addr(xprt)->sa_family, SOCK_STREAM, in xs_tcp_setup_socket()
2446 if (IS_ERR(sock)) { in xs_tcp_setup_socket()
2452 dprintk("RPC: worker connecting xprt %p via %s to " in xs_tcp_setup_socket()
2454 xprt->address_strings[RPC_DISPLAY_PROTO], in xs_tcp_setup_socket()
2455 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_tcp_setup_socket()
2456 xprt->address_strings[RPC_DISPLAY_PORT]); in xs_tcp_setup_socket()
2460 dprintk("RPC: %p connect status %d connected %d sock state %d\n", in xs_tcp_setup_socket()
2461 xprt, -status, xprt_connected(xprt), in xs_tcp_setup_socket()
2462 sock->sk->sk_state); in xs_tcp_setup_socket()
2468 case -EADDRNOTAVAIL: in xs_tcp_setup_socket()
2475 case -EINPROGRESS: in xs_tcp_setup_socket()
2476 case -EALREADY: in xs_tcp_setup_socket()
2479 case -EINVAL: in xs_tcp_setup_socket()
2480 /* Happens, for instance, if the user specified a link in xs_tcp_setup_socket()
2481 * local IPv6 address without a scope-id. in xs_tcp_setup_socket()
2483 case -ECONNREFUSED: in xs_tcp_setup_socket()
2484 case -ECONNRESET: in xs_tcp_setup_socket()
2485 case -ENETDOWN: in xs_tcp_setup_socket()
2486 case -ENETUNREACH: in xs_tcp_setup_socket()
2487 case -EHOSTUNREACH: in xs_tcp_setup_socket()
2488 case -EADDRINUSE: in xs_tcp_setup_socket()
2489 case -ENOBUFS: in xs_tcp_setup_socket()
2491 * xs_tcp_force_close() wakes tasks with -EIO. in xs_tcp_setup_socket()
2499 status = -EAGAIN; in xs_tcp_setup_socket()
2510 start = xprt->stat.connect_start + xprt->reestablish_timeout; in xs_reconnect_delay()
2511 if (time_after(start, now)) in xs_reconnect_delay()
2512 return start - now; in xs_reconnect_delay()
2518 xprt->reestablish_timeout <<= 1; in xs_reconnect_backoff()
2519 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout) in xs_reconnect_backoff()
2520 xprt->reestablish_timeout = xprt->max_reconnect_timeout; in xs_reconnect_backoff()
2521 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) in xs_reconnect_backoff()
2522 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; in xs_reconnect_backoff()
2526 * xs_connect - connect a socket to a remote endpoint
2528 * @task: address of RPC task that manages state of connect request
2530 * TCP: If the remote end dropped the connection, delay reconnecting.
2536 * If a UDP socket connect fails, the delay behavior here prevents
2546 if (transport->sock != NULL) { in xs_connect()
2547 dprintk("RPC: xs_connect delayed xprt %p for %lu " in xs_connect()
2549 xprt, xprt->reestablish_timeout / HZ); in xs_connect()
2558 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); in xs_connect()
2561 &transport->connect_worker, in xs_connect()
2566 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2575 if (xprt_connected(xprt)) in xs_local_print_stats()
2576 idle_time = (long)(jiffies - xprt->last_used) / HZ; in xs_local_print_stats()
2580 xprt->stat.bind_count, in xs_local_print_stats()
2581 xprt->stat.connect_count, in xs_local_print_stats()
2582 xprt->stat.connect_time, in xs_local_print_stats()
2584 xprt->stat.sends, in xs_local_print_stats()
2585 xprt->stat.recvs, in xs_local_print_stats()
2586 xprt->stat.bad_xids, in xs_local_print_stats()
2587 xprt->stat.req_u, in xs_local_print_stats()
2588 xprt->stat.bklog_u, in xs_local_print_stats()
2589 xprt->stat.max_slots, in xs_local_print_stats()
2590 xprt->stat.sending_u, in xs_local_print_stats()
2591 xprt->stat.pending_u); in xs_local_print_stats()
2595 * xs_udp_print_stats - display UDP socket-specifc stats
2606 transport->srcport, in xs_udp_print_stats()
2607 xprt->stat.bind_count, in xs_udp_print_stats()
2608 xprt->stat.sends, in xs_udp_print_stats()
2609 xprt->stat.recvs, in xs_udp_print_stats()
2610 xprt->stat.bad_xids, in xs_udp_print_stats()
2611 xprt->stat.req_u, in xs_udp_print_stats()
2612 xprt->stat.bklog_u, in xs_udp_print_stats()
2613 xprt->stat.max_slots, in xs_udp_print_stats()
2614 xprt->stat.sending_u, in xs_udp_print_stats()
2615 xprt->stat.pending_u); in xs_udp_print_stats()
2619 * xs_tcp_print_stats - display TCP socket-specifc stats
2629 if (xprt_connected(xprt)) in xs_tcp_print_stats()
2630 idle_time = (long)(jiffies - xprt->last_used) / HZ; in xs_tcp_print_stats()
2634 transport->srcport, in xs_tcp_print_stats()
2635 xprt->stat.bind_count, in xs_tcp_print_stats()
2636 xprt->stat.connect_count, in xs_tcp_print_stats()
2637 xprt->stat.connect_time, in xs_tcp_print_stats()
2639 xprt->stat.sends, in xs_tcp_print_stats()
2640 xprt->stat.recvs, in xs_tcp_print_stats()
2641 xprt->stat.bad_xids, in xs_tcp_print_stats()
2642 xprt->stat.req_u, in xs_tcp_print_stats()
2643 xprt->stat.bklog_u, in xs_tcp_print_stats()
2644 xprt->stat.max_slots, in xs_tcp_print_stats()
2645 xprt->stat.sending_u, in xs_tcp_print_stats()
2646 xprt->stat.pending_u); in xs_tcp_print_stats()
2650 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2656 struct rpc_rqst *rqst = task->tk_rqstp; in bc_malloc()
2657 size_t size = rqst->rq_callsize; in bc_malloc()
2661 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) { in bc_malloc()
2664 return -EINVAL; in bc_malloc()
2668 if (!page) in bc_malloc()
2669 return -ENOMEM; in bc_malloc()
2672 buf->len = PAGE_SIZE; in bc_malloc()
2674 rqst->rq_buffer = buf->data; in bc_malloc()
2675 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; in bc_malloc()
2684 void *buffer = task->tk_rqstp->rq_buffer; in bc_free()
2692 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2698 struct xdr_buf *xbufp = &req->rq_snd_buf; in bc_sendto()
2699 struct rpc_xprt *xprt = req->rq_xprt; in bc_sendto()
2702 struct socket *sock = transport->sock; in bc_sendto()
2708 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK; in bc_sendto()
2709 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK; in bc_sendto()
2711 virt_to_page(xbufp->head[0].iov_base), headoff, in bc_sendto()
2712 xbufp->tail[0].iov_base, tailoff); in bc_sendto()
2714 if (len != xbufp->len) { in bc_sendto()
2716 len = -EAGAIN; in bc_sendto()
2727 struct rpc_rqst *req = task->tk_rqstp; in bc_send_request()
2731 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid)); in bc_send_request()
2735 xprt = req->rq_xprt->bc_xprt; in bc_send_request()
2741 if (!mutex_trylock(&xprt->xpt_mutex)) { in bc_send_request()
2742 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL); in bc_send_request()
2743 if (!mutex_trylock(&xprt->xpt_mutex)) in bc_send_request()
2744 return -EAGAIN; in bc_send_request()
2745 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task); in bc_send_request()
2747 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) in bc_send_request()
2748 len = -ENOTCONN; in bc_send_request()
2751 mutex_unlock(&xprt->xpt_mutex); in bc_send_request()
2753 if (len > 0) in bc_send_request()
2774 dprintk("RPC: bc_destroy xprt %p\n", xprt); in bc_destroy()
2892 dprintk("RPC: %s: Bad address family\n", __func__); in xs_init_anyaddr()
2893 return -EAFNOSUPPORT; in xs_init_anyaddr()
2905 if (args->addrlen > sizeof(xprt->addr)) { in xs_setup_xprt()
2906 dprintk("RPC: xs_setup_xprt: address too large\n"); in xs_setup_xprt()
2907 return ERR_PTR(-EBADF); in xs_setup_xprt()
2910 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size, in xs_setup_xprt()
2912 if (xprt == NULL) { in xs_setup_xprt()
2913 dprintk("RPC: xs_setup_xprt: couldn't allocate " in xs_setup_xprt()
2915 return ERR_PTR(-ENOMEM); in xs_setup_xprt()
2919 mutex_init(&new->recv_mutex); in xs_setup_xprt()
2920 memcpy(&xprt->addr, args->dstaddr, args->addrlen); in xs_setup_xprt()
2921 xprt->addrlen = args->addrlen; in xs_setup_xprt()
2922 if (args->srcaddr) in xs_setup_xprt()
2923 memcpy(&new->srcaddr, args->srcaddr, args->addrlen); in xs_setup_xprt()
2926 err = xs_init_anyaddr(args->dstaddr->sa_family, in xs_setup_xprt()
2927 (struct sockaddr *)&new->srcaddr); in xs_setup_xprt()
2928 if (err != 0) { in xs_setup_xprt()
2944 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2945 * @args: rpc transport creation arguments
2951 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr; in xs_setup_local()
2958 if (IS_ERR(xprt)) in xs_setup_local()
2962 xprt->prot = 0; in xs_setup_local()
2963 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); in xs_setup_local()
2964 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; in xs_setup_local()
2966 xprt->bind_timeout = XS_BIND_TO; in xs_setup_local()
2967 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; in xs_setup_local()
2968 xprt->idle_timeout = XS_IDLE_DISC_TO; in xs_setup_local()
2970 xprt->ops = &xs_local_ops; in xs_setup_local()
2971 xprt->timeout = &xs_local_default_timeout; in xs_setup_local()
2973 INIT_WORK(&transport->recv_worker, xs_local_data_receive_workfn); in xs_setup_local()
2974 INIT_DELAYED_WORK(&transport->connect_worker, in xs_setup_local()
2977 switch (sun->sun_family) { in xs_setup_local()
2979 if (sun->sun_path[0] != '/') { in xs_setup_local()
2980 dprintk("RPC: bad AF_LOCAL address: %s\n", in xs_setup_local()
2981 sun->sun_path); in xs_setup_local()
2982 ret = ERR_PTR(-EINVAL); in xs_setup_local()
2988 if (ret) in xs_setup_local()
2992 ret = ERR_PTR(-EAFNOSUPPORT); in xs_setup_local()
2996 dprintk("RPC: set up xprt to %s via AF_LOCAL\n", in xs_setup_local()
2997 xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_setup_local()
2999 if (try_module_get(THIS_MODULE)) in xs_setup_local()
3001 ret = ERR_PTR(-EINVAL); in xs_setup_local()
3015 * xs_setup_udp - Set up transport to use a UDP socket
3016 * @args: rpc transport creation arguments
3021 struct sockaddr *addr = args->dstaddr; in xs_setup_udp()
3028 if (IS_ERR(xprt)) in xs_setup_udp()
3032 xprt->prot = IPPROTO_UDP; in xs_setup_udp()
3033 xprt->tsh_size = 0; in xs_setup_udp()
3035 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); in xs_setup_udp()
3037 xprt->bind_timeout = XS_BIND_TO; in xs_setup_udp()
3038 xprt->reestablish_timeout = XS_UDP_REEST_TO; in xs_setup_udp()
3039 xprt->idle_timeout = XS_IDLE_DISC_TO; in xs_setup_udp()
3041 xprt->ops = &xs_udp_ops; in xs_setup_udp()
3043 xprt->timeout = &xs_udp_default_timeout; in xs_setup_udp()
3045 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn); in xs_setup_udp()
3046 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket); in xs_setup_udp()
3048 switch (addr->sa_family) { in xs_setup_udp()
3050 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) in xs_setup_udp()
3056 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) in xs_setup_udp()
3062 ret = ERR_PTR(-EAFNOSUPPORT); in xs_setup_udp()
3066 if (xprt_bound(xprt)) in xs_setup_udp()
3067 dprintk("RPC: set up xprt to %s (port %s) via %s\n", in xs_setup_udp()
3068 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_udp()
3069 xprt->address_strings[RPC_DISPLAY_PORT], in xs_setup_udp()
3070 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_udp()
3072 dprintk("RPC: set up xprt to %s (autobind) via %s\n", in xs_setup_udp()
3073 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_udp()
3074 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_udp()
3076 if (try_module_get(THIS_MODULE)) in xs_setup_udp()
3078 ret = ERR_PTR(-EINVAL); in xs_setup_udp()
3091 * xs_setup_tcp - Set up transport to use a TCP socket
3092 * @args: rpc transport creation arguments
3097 struct sockaddr *addr = args->dstaddr; in xs_setup_tcp()
3103 if (args->flags & XPRT_CREATE_INFINITE_SLOTS) in xs_setup_tcp()
3108 if (IS_ERR(xprt)) in xs_setup_tcp()
3112 xprt->prot = IPPROTO_TCP; in xs_setup_tcp()
3113 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); in xs_setup_tcp()
3114 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; in xs_setup_tcp()
3116 xprt->bind_timeout = XS_BIND_TO; in xs_setup_tcp()
3117 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; in xs_setup_tcp()
3118 xprt->idle_timeout = XS_IDLE_DISC_TO; in xs_setup_tcp()
3120 xprt->ops = &xs_tcp_ops; in xs_setup_tcp()
3121 xprt->timeout = &xs_tcp_default_timeout; in xs_setup_tcp()
3123 xprt->max_reconnect_timeout = xprt->timeout->to_maxval; in xs_setup_tcp()
3124 xprt->connect_timeout = xprt->timeout->to_initval * in xs_setup_tcp()
3125 (xprt->timeout->to_retries + 1); in xs_setup_tcp()
3127 INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn); in xs_setup_tcp()
3128 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); in xs_setup_tcp()
3130 switch (addr->sa_family) { in xs_setup_tcp()
3132 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) in xs_setup_tcp()
3138 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) in xs_setup_tcp()
3144 ret = ERR_PTR(-EAFNOSUPPORT); in xs_setup_tcp()
3148 if (xprt_bound(xprt)) in xs_setup_tcp()
3149 dprintk("RPC: set up xprt to %s (port %s) via %s\n", in xs_setup_tcp()
3150 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_tcp()
3151 xprt->address_strings[RPC_DISPLAY_PORT], in xs_setup_tcp()
3152 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_tcp()
3154 dprintk("RPC: set up xprt to %s (autobind) via %s\n", in xs_setup_tcp()
3155 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_tcp()
3156 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_tcp()
3158 if (try_module_get(THIS_MODULE)) in xs_setup_tcp()
3160 ret = ERR_PTR(-EINVAL); in xs_setup_tcp()
3167 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
3168 * @args: rpc transport creation arguments
3173 struct sockaddr *addr = args->dstaddr; in xs_setup_bc_tcp()
3181 if (IS_ERR(xprt)) in xs_setup_bc_tcp()
3185 xprt->prot = IPPROTO_TCP; in xs_setup_bc_tcp()
3186 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); in xs_setup_bc_tcp()
3187 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; in xs_setup_bc_tcp()
3188 xprt->timeout = &xs_tcp_default_timeout; in xs_setup_bc_tcp()
3192 xprt->bind_timeout = 0; in xs_setup_bc_tcp()
3193 xprt->reestablish_timeout = 0; in xs_setup_bc_tcp()
3194 xprt->idle_timeout = 0; in xs_setup_bc_tcp()
3196 xprt->ops = &bc_tcp_ops; in xs_setup_bc_tcp()
3198 switch (addr->sa_family) { in xs_setup_bc_tcp()
3208 ret = ERR_PTR(-EAFNOSUPPORT); in xs_setup_bc_tcp()
3212 dprintk("RPC: set up xprt to %s (port %s) via %s\n", in xs_setup_bc_tcp()
3213 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_bc_tcp()
3214 xprt->address_strings[RPC_DISPLAY_PORT], in xs_setup_bc_tcp()
3215 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_bc_tcp()
3224 args->bc_xprt->xpt_bc_xprt = xprt; in xs_setup_bc_tcp()
3225 xprt->bc_xprt = args->bc_xprt; in xs_setup_bc_tcp()
3226 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); in xs_setup_bc_tcp()
3227 transport->sock = bc_sock->sk_sock; in xs_setup_bc_tcp()
3228 transport->inet = bc_sock->sk_sk; in xs_setup_bc_tcp()
3236 if (try_module_get(THIS_MODULE)) in xs_setup_bc_tcp()
3239 args->bc_xprt->xpt_bc_xprt = NULL; in xs_setup_bc_tcp()
3240 args->bc_xprt->xpt_bc_xps = NULL; in xs_setup_bc_tcp()
3242 ret = ERR_PTR(-EINVAL); in xs_setup_bc_tcp()
3281 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3286 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) in init_socket_xprt()
3287 if (!sunrpc_table_header) in init_socket_xprt()
3300 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3305 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) in cleanup_socket_xprt()
3306 if (sunrpc_table_header) { in cleanup_socket_xprt()
3325 if (!val) in param_set_uint_minmax()
3326 return -EINVAL; in param_set_uint_minmax()
3328 if (ret) in param_set_uint_minmax()
3330 if (num < min || num > max) in param_set_uint_minmax()
3331 return -EINVAL; in param_set_uint_minmax()
3332 *((unsigned int *)kp->arg) = num; in param_set_uint_minmax()