• Home
  • Raw
  • Download

Lines Matching full:smc

2  *  Shared Memory Communications over RDMA (SMC-R) and RoCE
18 #define KMSG_COMPONENT "smc"
30 #include <net/smc.h>
33 #include "smc.h"
54 struct smc_sock *smc = smc_sk(sk); in smc_set_keepalive() local
56 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val); in smc_set_keepalive()
95 .name = "SMC",
121 struct smc_sock *smc; in smc_release() local
127 smc = smc_sk(sk); in smc_release()
130 flush_work(&smc->connect_work); in smc_release()
131 kfree(smc->connect_info); in smc_release()
132 smc->connect_info = NULL; in smc_release()
142 if (!smc->use_fallback) { in smc_release()
143 rc = smc_close_active(smc); in smc_release()
150 if (smc->clcsock) { in smc_release()
151 if (smc->use_fallback && sk->sk_state == SMC_LISTEN) { in smc_release()
153 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); in smc_release()
155 mutex_lock(&smc->clcsock_release_lock); in smc_release()
156 sock_release(smc->clcsock); in smc_release()
157 smc->clcsock = NULL; in smc_release()
158 mutex_unlock(&smc->clcsock_release_lock); in smc_release()
160 if (smc->use_fallback) { in smc_release()
170 if (!smc->use_fallback && sk->sk_state == SMC_CLOSED) in smc_release()
171 smc_conn_free(&smc->conn); in smc_release()
192 struct smc_sock *smc; in smc_sock_alloc() local
205 smc = smc_sk(sk); in smc_sock_alloc()
206 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); in smc_sock_alloc()
207 INIT_WORK(&smc->connect_work, smc_connect_work); in smc_sock_alloc()
208 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); in smc_sock_alloc()
209 INIT_LIST_HEAD(&smc->accept_q); in smc_sock_alloc()
210 spin_lock_init(&smc->accept_q_lock); in smc_sock_alloc()
211 spin_lock_init(&smc->conn.send_lock); in smc_sock_alloc()
214 mutex_init(&smc->clcsock_release_lock); in smc_sock_alloc()
224 struct smc_sock *smc; in smc_bind() local
227 smc = smc_sk(sk); in smc_bind()
251 smc->clcsock->sk->sk_reuse = sk->sk_reuse; in smc_bind()
252 rc = kernel_bind(smc->clcsock, uaddr, addr_len); in smc_bind()
293 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
294 * clc socket (since smc is not called for these options from net/core)
296 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc) in smc_copy_sock_settings_to_clc() argument
298 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC); in smc_copy_sock_settings_to_clc()
305 /* copy only settings and flags relevant for smc from clc to smc socket */
306 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc) in smc_copy_sock_settings_to_smc() argument
308 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); in smc_copy_sock_settings_to_smc()
330 static int smc_clnt_conf_first_link(struct smc_sock *smc) in smc_clnt_conf_first_link() argument
332 struct net *net = sock_net(smc->clcsock->sk); in smc_clnt_conf_first_link()
333 struct smc_link_group *lgr = smc->conn.lgr; in smc_clnt_conf_first_link()
346 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), in smc_clnt_conf_first_link()
360 if (smc_reg_rmb(link, smc->conn.rmb_desc, false)) in smc_clnt_conf_first_link()
374 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), in smc_clnt_conf_first_link()
391 static void smcr_conn_save_peer_info(struct smc_sock *smc, in smcr_conn_save_peer_info() argument
396 smc->conn.peer_rmbe_idx = clc->rmbe_idx; in smcr_conn_save_peer_info()
397 smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token); in smcr_conn_save_peer_info()
398 smc->conn.peer_rmbe_size = bufsize; in smcr_conn_save_peer_info()
399 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); in smcr_conn_save_peer_info()
400 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1); in smcr_conn_save_peer_info()
403 static void smcd_conn_save_peer_info(struct smc_sock *smc, in smcd_conn_save_peer_info() argument
408 smc->conn.peer_rmbe_idx = clc->dmbe_idx; in smcd_conn_save_peer_info()
409 smc->conn.peer_token = clc->token; in smcd_conn_save_peer_info()
411 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg); in smcd_conn_save_peer_info()
412 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); in smcd_conn_save_peer_info()
413 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx; in smcd_conn_save_peer_info()
416 static void smc_conn_save_peer_info(struct smc_sock *smc, in smc_conn_save_peer_info() argument
419 if (smc->conn.lgr->is_smcd) in smc_conn_save_peer_info()
420 smcd_conn_save_peer_info(smc, clc); in smc_conn_save_peer_info()
422 smcr_conn_save_peer_info(smc, clc); in smc_conn_save_peer_info()
436 static int smc_connect_fallback(struct smc_sock *smc, int reason_code) in smc_connect_fallback() argument
438 smc->use_fallback = true; in smc_connect_fallback()
439 smc->fallback_rsn = reason_code; in smc_connect_fallback()
440 smc_copy_sock_settings_to_clc(smc); in smc_connect_fallback()
441 if (smc->sk.sk_state == SMC_INIT) in smc_connect_fallback()
442 smc->sk.sk_state = SMC_ACTIVE; in smc_connect_fallback()
447 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code) in smc_connect_decline_fallback() argument
452 if (smc->sk.sk_state == SMC_INIT) in smc_connect_decline_fallback()
453 sock_put(&smc->sk); /* passive closing */ in smc_connect_decline_fallback()
457 rc = smc_clc_send_decline(smc, reason_code); in smc_connect_decline_fallback()
459 if (smc->sk.sk_state == SMC_INIT) in smc_connect_decline_fallback()
460 sock_put(&smc->sk); /* passive closing */ in smc_connect_decline_fallback()
464 return smc_connect_fallback(smc, reason_code); in smc_connect_decline_fallback()
468 static int smc_connect_abort(struct smc_sock *smc, int reason_code, in smc_connect_abort() argument
472 smc_lgr_forget(smc->conn.lgr); in smc_connect_abort()
474 smc_conn_free(&smc->conn); in smc_connect_abort()
480 static int smc_check_rdma(struct smc_sock *smc, struct smc_ib_device **ibdev, in smc_check_rdma() argument
489 smc_pnet_find_roce_resource(smc->clcsock->sk, ibdev, ibport, vlan_id, in smc_check_rdma()
499 static int smc_check_ism(struct smc_sock *smc, struct smcd_dev **ismdev) in smc_check_ism() argument
502 smc_pnet_find_ism_resource(smc->clcsock->sk, ismdev); in smc_check_ism()
509 static int smc_connect_ism_vlan_setup(struct smc_sock *smc, in smc_connect_ism_vlan_setup() argument
521 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd, in smc_connect_ism_vlan_cleanup() argument
533 static int smc_connect_clc(struct smc_sock *smc, int smc_type, in smc_connect_clc() argument
541 rc = smc_clc_send_proposal(smc, smc_type, ibdev, ibport, gid, ismdev); in smc_connect_clc()
544 /* receive SMC Accept CLC message */ in smc_connect_clc()
545 return smc_clc_wait_msg(smc, aclc, sizeof(*aclc), SMC_CLC_ACCEPT); in smc_connect_clc()
549 static int smc_connect_rdma(struct smc_sock *smc, in smc_connect_rdma() argument
558 local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, in smc_connect_rdma()
567 return smc_connect_abort(smc, reason_code, 0); in smc_connect_rdma()
569 link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK]; in smc_connect_rdma()
571 smc_conn_save_peer_info(smc, aclc); in smc_connect_rdma()
574 if (smc_buf_create(smc, false)) in smc_connect_rdma()
575 return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact); in smc_connect_rdma()
580 if (smc_rmb_rtoken_handling(&smc->conn, aclc)) in smc_connect_rdma()
581 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RTOK, in smc_connect_rdma()
584 smc_close_init(smc); in smc_connect_rdma()
585 smc_rx_init(smc); in smc_connect_rdma()
589 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RDYLNK, in smc_connect_rdma()
592 if (!smc->conn.rmb_desc->reused && in smc_connect_rdma()
593 smc_reg_rmb(link, smc->conn.rmb_desc, true)) in smc_connect_rdma()
594 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_REGRMB, in smc_connect_rdma()
597 smc_rmb_sync_sg_for_device(&smc->conn); in smc_connect_rdma()
599 reason_code = smc_clc_send_confirm(smc); in smc_connect_rdma()
601 return smc_connect_abort(smc, reason_code, local_contact); in smc_connect_rdma()
603 smc_tx_init(smc); in smc_connect_rdma()
607 reason_code = smc_clnt_conf_first_link(smc); in smc_connect_rdma()
609 return smc_connect_abort(smc, reason_code, in smc_connect_rdma()
614 smc_copy_sock_settings_to_clc(smc); in smc_connect_rdma()
615 if (smc->sk.sk_state == SMC_INIT) in smc_connect_rdma()
616 smc->sk.sk_state = SMC_ACTIVE; in smc_connect_rdma()
622 static int smc_connect_ism(struct smc_sock *smc, in smc_connect_ism() argument
630 local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, in smc_connect_ism()
633 return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); in smc_connect_ism()
636 if (smc_buf_create(smc, true)) in smc_connect_ism()
637 return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact); in smc_connect_ism()
639 smc_conn_save_peer_info(smc, aclc); in smc_connect_ism()
640 smc_close_init(smc); in smc_connect_ism()
641 smc_rx_init(smc); in smc_connect_ism()
642 smc_tx_init(smc); in smc_connect_ism()
644 rc = smc_clc_send_confirm(smc); in smc_connect_ism()
646 return smc_connect_abort(smc, rc, local_contact); in smc_connect_ism()
649 smc_copy_sock_settings_to_clc(smc); in smc_connect_ism()
650 if (smc->sk.sk_state == SMC_INIT) in smc_connect_ism()
651 smc->sk.sk_state = SMC_ACTIVE; in smc_connect_ism()
657 static int __smc_connect(struct smc_sock *smc) in __smc_connect() argument
669 sock_hold(&smc->sk); /* sock put in passive closing */ in __smc_connect()
671 if (smc->use_fallback) in __smc_connect()
672 return smc_connect_fallback(smc, smc->fallback_rsn); in __smc_connect()
674 /* if peer has not signalled SMC-capability, fall back */ in __smc_connect()
675 if (!tcp_sk(smc->clcsock->sk)->syn_smc) in __smc_connect()
676 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC); in __smc_connect()
678 /* IPSec connections opt out of SMC-R optimizations */ in __smc_connect()
679 if (using_ipsec(smc)) in __smc_connect()
680 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC); in __smc_connect()
683 if (smc_vlan_by_tcpsk(smc->clcsock, &vlan)) in __smc_connect()
684 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR); in __smc_connect()
687 if (!smc_check_ism(smc, &ismdev) && in __smc_connect()
688 !smc_connect_ism_vlan_setup(smc, ismdev, vlan)) { in __smc_connect()
695 if (!smc_check_rdma(smc, &ibdev, &ibport, vlan, gid)) { in __smc_connect()
706 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_NOSMCDEV); in __smc_connect()
709 rc = smc_connect_clc(smc, smc_type, &aclc, ibdev, ibport, gid, ismdev); in __smc_connect()
711 smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); in __smc_connect()
712 return smc_connect_decline_fallback(smc, rc); in __smc_connect()
717 rc = smc_connect_rdma(smc, &aclc, ibdev, ibport); in __smc_connect()
719 rc = smc_connect_ism(smc, &aclc, ismdev); in __smc_connect()
723 smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); in __smc_connect()
724 return smc_connect_decline_fallback(smc, rc); in __smc_connect()
727 smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); in __smc_connect()
733 struct smc_sock *smc = container_of(work, struct smc_sock, in smc_connect_work() local
737 lock_sock(&smc->sk); in smc_connect_work()
738 rc = kernel_connect(smc->clcsock, &smc->connect_info->addr, in smc_connect_work()
739 smc->connect_info->alen, smc->connect_info->flags); in smc_connect_work()
740 if (smc->clcsock->sk->sk_err) { in smc_connect_work()
741 smc->sk.sk_err = smc->clcsock->sk->sk_err; in smc_connect_work()
745 smc->sk.sk_err = -rc; in smc_connect_work()
749 rc = __smc_connect(smc); in smc_connect_work()
751 smc->sk.sk_err = -rc; in smc_connect_work()
754 if (smc->sk.sk_err) in smc_connect_work()
755 smc->sk.sk_state_change(&smc->sk); in smc_connect_work()
757 smc->sk.sk_write_space(&smc->sk); in smc_connect_work()
758 kfree(smc->connect_info); in smc_connect_work()
759 smc->connect_info = NULL; in smc_connect_work()
760 release_sock(&smc->sk); in smc_connect_work()
767 struct smc_sock *smc; in smc_connect() local
770 smc = smc_sk(sk); in smc_connect()
772 /* separate smc parameter checking to be safe */ in smc_connect()
790 smc_copy_sock_settings_to_clc(smc); in smc_connect()
791 tcp_sk(smc->clcsock->sk)->syn_smc = 1; in smc_connect()
793 if (smc->connect_info) { in smc_connect()
797 smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL); in smc_connect()
798 if (!smc->connect_info) { in smc_connect()
802 smc->connect_info->alen = alen; in smc_connect()
803 smc->connect_info->flags = flags ^ O_NONBLOCK; in smc_connect()
804 memcpy(&smc->connect_info->addr, addr, alen); in smc_connect()
805 schedule_work(&smc->connect_work); in smc_connect()
808 rc = kernel_connect(smc->clcsock, addr, alen, flags); in smc_connect()
812 rc = __smc_connect(smc); in smc_connect()
924 struct smc_sock *smc = smc_sk(sk); in smc_close_non_accepted() local
930 if (!smc->use_fallback) { in smc_close_non_accepted()
931 smc_close_active(smc); in smc_close_non_accepted()
936 if (smc->clcsock) { in smc_close_non_accepted()
939 tcp = smc->clcsock; in smc_close_non_accepted()
940 smc->clcsock = NULL; in smc_close_non_accepted()
943 if (smc->use_fallback) { in smc_close_non_accepted()
948 smc_conn_free(&smc->conn); in smc_close_non_accepted()
954 static int smc_serv_conf_first_link(struct smc_sock *smc) in smc_serv_conf_first_link() argument
956 struct net *net = sock_net(smc->clcsock->sk); in smc_serv_conf_first_link()
957 struct smc_link_group *lgr = smc->conn.lgr; in smc_serv_conf_first_link()
964 if (smc_reg_rmb(link, smc->conn.rmb_desc, false)) in smc_serv_conf_first_link()
979 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), in smc_serv_conf_first_link()
1000 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), in smc_serv_conf_first_link()
1113 /* listen worker: initialize connection and buffers for SMC-D */
1224 /* check if peer is smc capable */ in smc_listen_work()
1233 * wait for and receive SMC Proposal CLC message in smc_listen_work()
1243 /* IPSec connections opt out of SMC-R optimizations */ in smc_listen_work()
1270 /* SMC not supported, decline */ in smc_listen_work()
1277 /* send SMC Accept CLC message */ in smc_listen_work()
1285 /* receive SMC Confirm CLC message */ in smc_listen_work()
1341 struct smc_sock *smc; in smc_listen() local
1344 smc = smc_sk(sk); in smc_listen()
1357 * them to the clc socket -- copy smc socket options to clc socket in smc_listen()
1359 smc_copy_sock_settings_to_clc(smc); in smc_listen()
1360 if (!smc->use_fallback) in smc_listen()
1361 tcp_sk(smc->clcsock->sk)->syn_smc = 1; in smc_listen()
1363 rc = kernel_listen(smc->clcsock, backlog); in smc_listen()
1369 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); in smc_listen()
1371 if (!schedule_work(&smc->tcp_listen_work)) in smc_listen()
1452 struct smc_sock *smc; in smc_getname() local
1458 smc = smc_sk(sock->sk); in smc_getname()
1460 return smc->clcsock->ops->getname(smc->clcsock, addr, peer); in smc_getname()
1466 struct smc_sock *smc; in smc_sendmsg() local
1469 smc = smc_sk(sk); in smc_sendmsg()
1478 smc->use_fallback = true; in smc_sendmsg()
1479 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; in smc_sendmsg()
1486 if (smc->use_fallback) in smc_sendmsg()
1487 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len); in smc_sendmsg()
1489 rc = smc_tx_sendmsg(smc, msg, len); in smc_sendmsg()
1499 struct smc_sock *smc; in smc_recvmsg() local
1502 smc = smc_sk(sk); in smc_recvmsg()
1514 if (smc->use_fallback) { in smc_recvmsg()
1515 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags); in smc_recvmsg()
1518 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags); in smc_recvmsg()
1544 struct smc_sock *smc; in smc_poll() local
1549 smc = smc_sk(sock->sk); in smc_poll()
1550 if (smc->use_fallback) { in smc_poll()
1552 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); in smc_poll()
1553 sk->sk_err = smc->clcsock->sk->sk_err; in smc_poll()
1568 if (atomic_read(&smc->conn.sndbuf_space) || in smc_poll()
1575 if (atomic_read(&smc->conn.bytes_to_rcv)) in smc_poll()
1581 if (smc->conn.urg_state == SMC_URG_VALID) in smc_poll()
1592 struct smc_sock *smc; in smc_shutdown() local
1596 smc = smc_sk(sk); in smc_shutdown()
1611 if (smc->use_fallback) { in smc_shutdown()
1612 rc = kernel_sock_shutdown(smc->clcsock, how); in smc_shutdown()
1613 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown; in smc_shutdown()
1620 rc = smc_close_active(smc); in smc_shutdown()
1623 rc = smc_close_shutdown_write(smc); in smc_shutdown()
1630 if (smc->clcsock) in smc_shutdown()
1631 rc1 = kernel_sock_shutdown(smc->clcsock, how); in smc_shutdown()
1644 struct smc_sock *smc; in smc_setsockopt() local
1647 smc = smc_sk(sk); in smc_setsockopt()
1652 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname, in smc_setsockopt()
1654 if (smc->clcsock->sk->sk_err) { in smc_setsockopt()
1655 sk->sk_err = smc->clcsock->sk->sk_err; in smc_setsockopt()
1673 /* option not supported by SMC */ in smc_setsockopt()
1675 smc->use_fallback = true; in smc_setsockopt()
1676 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; in smc_setsockopt()
1678 if (!smc->use_fallback) in smc_setsockopt()
1686 if (val && !smc->use_fallback) in smc_setsockopt()
1687 mod_delayed_work(system_wq, &smc->conn.tx_work, in smc_setsockopt()
1695 if (!val && !smc->use_fallback) in smc_setsockopt()
1696 mod_delayed_work(system_wq, &smc->conn.tx_work, in smc_setsockopt()
1701 smc->sockopt_defer_accept = val; in smc_setsockopt()
1714 struct smc_sock *smc; in smc_getsockopt() local
1716 smc = smc_sk(sock->sk); in smc_getsockopt()
1718 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname, in smc_getsockopt()
1727 struct smc_sock *smc; in smc_ioctl() local
1730 smc = smc_sk(sock->sk); in smc_ioctl()
1731 conn = &smc->conn; in smc_ioctl()
1732 lock_sock(&smc->sk); in smc_ioctl()
1733 if (smc->use_fallback) { in smc_ioctl()
1734 if (!smc->clcsock) { in smc_ioctl()
1735 release_sock(&smc->sk); in smc_ioctl()
1738 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); in smc_ioctl()
1739 release_sock(&smc->sk); in smc_ioctl()
1744 if (smc->sk.sk_state == SMC_LISTEN) { in smc_ioctl()
1745 release_sock(&smc->sk); in smc_ioctl()
1748 if (smc->sk.sk_state == SMC_INIT || in smc_ioctl()
1749 smc->sk.sk_state == SMC_CLOSED) in smc_ioctl()
1752 answ = atomic_read(&smc->conn.bytes_to_rcv); in smc_ioctl()
1756 if (smc->sk.sk_state == SMC_LISTEN) { in smc_ioctl()
1757 release_sock(&smc->sk); in smc_ioctl()
1760 if (smc->sk.sk_state == SMC_INIT || in smc_ioctl()
1761 smc->sk.sk_state == SMC_CLOSED) in smc_ioctl()
1764 answ = smc->conn.sndbuf_desc->len - in smc_ioctl()
1765 atomic_read(&smc->conn.sndbuf_space); in smc_ioctl()
1769 if (smc->sk.sk_state == SMC_LISTEN) { in smc_ioctl()
1770 release_sock(&smc->sk); in smc_ioctl()
1773 if (smc->sk.sk_state == SMC_INIT || in smc_ioctl()
1774 smc->sk.sk_state == SMC_CLOSED) in smc_ioctl()
1777 answ = smc_tx_prepared_sends(&smc->conn); in smc_ioctl()
1780 if (smc->sk.sk_state == SMC_LISTEN) { in smc_ioctl()
1781 release_sock(&smc->sk); in smc_ioctl()
1784 if (smc->sk.sk_state == SMC_INIT || in smc_ioctl()
1785 smc->sk.sk_state == SMC_CLOSED) { in smc_ioctl()
1795 release_sock(&smc->sk); in smc_ioctl()
1798 release_sock(&smc->sk); in smc_ioctl()
1807 struct smc_sock *smc; in smc_sendpage() local
1810 smc = smc_sk(sk); in smc_sendpage()
1817 if (smc->use_fallback) in smc_sendpage()
1818 rc = kernel_sendpage(smc->clcsock, page, offset, in smc_sendpage()
1838 struct smc_sock *smc; in smc_splice_read() local
1841 smc = smc_sk(sk); in smc_splice_read()
1854 if (smc->use_fallback) { in smc_splice_read()
1855 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos, in smc_splice_read()
1866 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags); in smc_splice_read()
1901 struct smc_sock *smc; in smc_create() local
1920 smc = smc_sk(sk); in smc_create()
1921 smc->use_fallback = false; /* assume rdma capability first */ in smc_create()
1922 smc->fallback_rsn = 0; in smc_create()
1924 &smc->clcsock); in smc_create()
1929 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE); in smc_create()
1930 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE); in smc_create()
2017 MODULE_DESCRIPTION("smc socket address family");