• Home
  • Raw
  • Download

Lines Matching full:smc

3  *  Shared Memory Communications over RDMA (SMC-R) and RoCE
19 #define KMSG_COMPONENT "smc"
33 #include <net/smc.h>
40 #include "smc.h"
67 struct smc_sock *smc = smc_sk(sk); in smc_set_keepalive() local
69 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val); in smc_set_keepalive()
108 .name = "SMC",
131 static void smc_restore_fallback_changes(struct smc_sock *smc) in smc_restore_fallback_changes() argument
133 if (smc->clcsock->file) { /* non-accepted sockets have no file yet */ in smc_restore_fallback_changes()
134 smc->clcsock->file->private_data = smc->sk.sk_socket; in smc_restore_fallback_changes()
135 smc->clcsock->file = NULL; in smc_restore_fallback_changes()
139 static int __smc_release(struct smc_sock *smc) in __smc_release() argument
141 struct sock *sk = &smc->sk; in __smc_release()
144 if (!smc->use_fallback) { in __smc_release()
145 rc = smc_close_active(smc); in __smc_release()
155 rc = kernel_sock_shutdown(smc->clcsock, in __smc_release()
161 smc_restore_fallback_changes(smc); in __smc_release()
167 if (smc->clcsock) { in __smc_release()
169 smc_clcsock_release(smc); in __smc_release()
172 if (!smc->use_fallback) in __smc_release()
173 smc_conn_free(&smc->conn); in __smc_release()
182 struct smc_sock *smc; in smc_release() local
189 smc = smc_sk(sk); in smc_release()
194 if (smc->connect_nonblock && old_state == SMC_INIT) in smc_release()
195 tcp_abort(smc->clcsock->sk, ECONNABORTED); in smc_release()
197 if (cancel_work_sync(&smc->connect_work)) in smc_release()
198 sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */ in smc_release()
209 !smc->use_fallback) in smc_release()
210 smc_close_active_abort(smc); in smc_release()
212 rc = __smc_release(smc); in smc_release()
238 struct smc_sock *smc; in smc_sock_alloc() local
251 smc = smc_sk(sk); in smc_sock_alloc()
252 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); in smc_sock_alloc()
253 INIT_WORK(&smc->connect_work, smc_connect_work); in smc_sock_alloc()
254 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); in smc_sock_alloc()
255 INIT_LIST_HEAD(&smc->accept_q); in smc_sock_alloc()
256 spin_lock_init(&smc->accept_q_lock); in smc_sock_alloc()
257 spin_lock_init(&smc->conn.send_lock); in smc_sock_alloc()
260 mutex_init(&smc->clcsock_release_lock); in smc_sock_alloc()
270 struct smc_sock *smc; in smc_bind() local
273 smc = smc_sk(sk); in smc_bind()
294 if (sk->sk_state != SMC_INIT || smc->connect_nonblock) in smc_bind()
297 smc->clcsock->sk->sk_reuse = sk->sk_reuse; in smc_bind()
298 rc = kernel_bind(smc->clcsock, uaddr, addr_len); in smc_bind()
340 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
341 * clc socket (since smc is not called for these options from net/core)
343 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc) in smc_copy_sock_settings_to_clc() argument
345 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC); in smc_copy_sock_settings_to_clc()
352 /* copy only settings and flags relevant for smc from clc to smc socket */
353 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc) in smc_copy_sock_settings_to_smc() argument
355 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); in smc_copy_sock_settings_to_smc()
393 static int smcr_clnt_conf_first_link(struct smc_sock *smc) in smcr_clnt_conf_first_link() argument
395 struct smc_link *link = smc->conn.lnk; in smcr_clnt_conf_first_link()
409 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), in smcr_clnt_conf_first_link()
425 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc)) in smcr_clnt_conf_first_link()
429 smc->conn.rmb_desc->is_conf_rkey = true; in smcr_clnt_conf_first_link()
445 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), in smcr_clnt_conf_first_link()
456 static void smcr_conn_save_peer_info(struct smc_sock *smc, in smcr_conn_save_peer_info() argument
461 smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx; in smcr_conn_save_peer_info()
462 smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token); in smcr_conn_save_peer_info()
463 smc->conn.peer_rmbe_size = bufsize; in smcr_conn_save_peer_info()
464 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); in smcr_conn_save_peer_info()
465 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1); in smcr_conn_save_peer_info()
478 static void smcd_conn_save_peer_info(struct smc_sock *smc, in smcd_conn_save_peer_info() argument
483 smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx; in smcd_conn_save_peer_info()
484 smc->conn.peer_token = clc->d0.token; in smcd_conn_save_peer_info()
486 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg); in smcd_conn_save_peer_info()
487 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); in smcd_conn_save_peer_info()
488 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx; in smcd_conn_save_peer_info()
497 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->eid, in smcd_conn_save_peer_info()
499 smc->conn.lgr->peer_os = fce->os_type; in smcd_conn_save_peer_info()
500 smc->conn.lgr->peer_smc_release = fce->release; in smcd_conn_save_peer_info()
502 memcpy(smc->conn.lgr->peer_hostname, fce->hostname, in smcd_conn_save_peer_info()
507 static void smc_conn_save_peer_info(struct smc_sock *smc, in smc_conn_save_peer_info() argument
510 if (smc->conn.lgr->is_smcd) in smc_conn_save_peer_info()
511 smcd_conn_save_peer_info(smc, clc); in smc_conn_save_peer_info()
513 smcr_conn_save_peer_info(smc, clc); in smc_conn_save_peer_info()
526 static void smc_switch_to_fallback(struct smc_sock *smc) in smc_switch_to_fallback() argument
528 wait_queue_head_t *smc_wait = sk_sleep(&smc->sk); in smc_switch_to_fallback()
529 wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk); in smc_switch_to_fallback()
532 smc->use_fallback = true; in smc_switch_to_fallback()
533 if (smc->sk.sk_socket && smc->sk.sk_socket->file) { in smc_switch_to_fallback()
534 smc->clcsock->file = smc->sk.sk_socket->file; in smc_switch_to_fallback()
535 smc->clcsock->file->private_data = smc->clcsock; in smc_switch_to_fallback()
536 smc->clcsock->wq.fasync_list = in smc_switch_to_fallback()
537 smc->sk.sk_socket->wq.fasync_list; in smc_switch_to_fallback()
540 * smc socket->wq, which should be removed in smc_switch_to_fallback()
552 static int smc_connect_fallback(struct smc_sock *smc, int reason_code) in smc_connect_fallback() argument
554 smc_switch_to_fallback(smc); in smc_connect_fallback()
555 smc->fallback_rsn = reason_code; in smc_connect_fallback()
556 smc_copy_sock_settings_to_clc(smc); in smc_connect_fallback()
557 smc->connect_nonblock = 0; in smc_connect_fallback()
558 if (smc->sk.sk_state == SMC_INIT) in smc_connect_fallback()
559 smc->sk.sk_state = SMC_ACTIVE; in smc_connect_fallback()
564 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code, in smc_connect_decline_fallback() argument
570 if (smc->sk.sk_state == SMC_INIT) in smc_connect_decline_fallback()
571 sock_put(&smc->sk); /* passive closing */ in smc_connect_decline_fallback()
575 rc = smc_clc_send_decline(smc, reason_code, version); in smc_connect_decline_fallback()
577 if (smc->sk.sk_state == SMC_INIT) in smc_connect_decline_fallback()
578 sock_put(&smc->sk); /* passive closing */ in smc_connect_decline_fallback()
582 return smc_connect_fallback(smc, reason_code); in smc_connect_decline_fallback()
586 static void smc_connect_abort(struct smc_sock *smc, int local_first) in smc_connect_abort() argument
589 smc_lgr_cleanup_early(&smc->conn); in smc_connect_abort()
591 smc_conn_free(&smc->conn); in smc_connect_abort()
596 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini) in smc_find_rdma_device() argument
602 smc_pnet_find_roce_resource(smc->clcsock->sk, ini); in smc_find_rdma_device()
610 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini) in smc_find_ism_device() argument
613 smc_pnet_find_ism_resource(smc->clcsock->sk, ini); in smc_find_ism_device()
636 static int smc_find_ism_v2_device_clnt(struct smc_sock *smc, in smc_find_ism_v2_device_clnt() argument
654 smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) { in smc_find_ism_v2_device_clnt()
673 static int smc_connect_ism_vlan_setup(struct smc_sock *smc, in smc_connect_ism_vlan_setup() argument
681 static int smc_find_proposal_devices(struct smc_sock *smc, in smc_find_proposal_devices() argument
688 if (smc_find_ism_device(smc, ini) || in smc_find_proposal_devices()
689 smc_connect_ism_vlan_setup(smc, ini)) { in smc_find_proposal_devices()
695 if (smc_find_rdma_device(smc, ini)) { in smc_find_proposal_devices()
702 if (smc_ism_v2_capable && smc_find_ism_v2_device_clnt(smc, ini)) in smc_find_proposal_devices()
716 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, in smc_connect_ism_vlan_cleanup() argument
732 static int smc_connect_clc(struct smc_sock *smc, in smc_connect_clc() argument
739 rc = smc_clc_send_proposal(smc, ini); in smc_connect_clc()
742 /* receive SMC Accept CLC message */ in smc_connect_clc()
743 return smc_clc_wait_msg(smc, aclc2, SMC_CLC_MAX_ACCEPT_LEN, in smc_connect_clc()
748 static int smc_connect_rdma(struct smc_sock *smc, in smc_connect_rdma() argument
761 reason_code = smc_conn_create(smc, ini); in smc_connect_rdma()
767 smc_conn_save_peer_info(smc, aclc); in smc_connect_rdma()
770 link = smc->conn.lnk; in smc_connect_rdma()
775 struct smc_link *l = &smc->conn.lgr->lnk[i]; in smc_connect_rdma()
790 smc->conn.lnk = link; in smc_connect_rdma()
794 if (smc_buf_create(smc, false)) { in smc_connect_rdma()
802 if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) { in smc_connect_rdma()
807 smc_close_init(smc); in smc_connect_rdma()
808 smc_rx_init(smc); in smc_connect_rdma()
816 if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) { in smc_connect_rdma()
821 smc_rmb_sync_sg_for_device(&smc->conn); in smc_connect_rdma()
823 reason_code = smc_clc_send_confirm(smc, ini->first_contact_local, in smc_connect_rdma()
828 smc_tx_init(smc); in smc_connect_rdma()
833 reason_code = smcr_clnt_conf_first_link(smc); in smc_connect_rdma()
840 smc_copy_sock_settings_to_clc(smc); in smc_connect_rdma()
841 smc->connect_nonblock = 0; in smc_connect_rdma()
842 if (smc->sk.sk_state == SMC_INIT) in smc_connect_rdma()
843 smc->sk.sk_state = SMC_ACTIVE; in smc_connect_rdma()
847 smc_connect_abort(smc, ini->first_contact_local); in smc_connect_rdma()
849 smc->connect_nonblock = 0; in smc_connect_rdma()
874 static int smc_connect_ism(struct smc_sock *smc, in smc_connect_ism() argument
893 /* there is only one lgr role for SMC-D; use server lock */ in smc_connect_ism()
895 rc = smc_conn_create(smc, ini); in smc_connect_ism()
902 rc = smc_buf_create(smc, true); in smc_connect_ism()
908 smc_conn_save_peer_info(smc, aclc); in smc_connect_ism()
909 smc_close_init(smc); in smc_connect_ism()
910 smc_rx_init(smc); in smc_connect_ism()
911 smc_tx_init(smc); in smc_connect_ism()
913 rc = smc_clc_send_confirm(smc, ini->first_contact_local, in smc_connect_ism()
919 smc_copy_sock_settings_to_clc(smc); in smc_connect_ism()
920 smc->connect_nonblock = 0; in smc_connect_ism()
921 if (smc->sk.sk_state == SMC_INIT) in smc_connect_ism()
922 smc->sk.sk_state = SMC_ACTIVE; in smc_connect_ism()
926 smc_connect_abort(smc, ini->first_contact_local); in smc_connect_ism()
928 smc->connect_nonblock = 0; in smc_connect_ism()
952 static int __smc_connect(struct smc_sock *smc) in __smc_connect() argument
961 if (smc->use_fallback) in __smc_connect()
962 return smc_connect_fallback(smc, smc->fallback_rsn); in __smc_connect()
964 /* if peer has not signalled SMC-capability, fall back */ in __smc_connect()
965 if (!tcp_sk(smc->clcsock->sk)->syn_smc) in __smc_connect()
966 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC); in __smc_connect()
968 /* IPSec connections opt out of SMC optimizations */ in __smc_connect()
969 if (using_ipsec(smc)) in __smc_connect()
970 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC, in __smc_connect()
975 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM, in __smc_connect()
984 if (smc_vlan_by_tcpsk(smc->clcsock, ini)) { in __smc_connect()
993 rc = smc_find_proposal_devices(smc, ini); in __smc_connect()
1006 rc = smc_connect_clc(smc, aclc2, ini); in __smc_connect()
1010 /* check if smc modes and versions of CLC proposal and accept match */ in __smc_connect()
1019 rc = smc_connect_rdma(smc, aclc, ini); in __smc_connect()
1021 rc = smc_connect_ism(smc, aclc, ini); in __smc_connect()
1025 smc_connect_ism_vlan_cleanup(smc, ini); in __smc_connect()
1031 smc_connect_ism_vlan_cleanup(smc, ini); in __smc_connect()
1035 return smc_connect_decline_fallback(smc, rc, version); in __smc_connect()
1040 struct smc_sock *smc = container_of(work, struct smc_sock, in smc_connect_work() local
1042 long timeo = smc->sk.sk_sndtimeo; in smc_connect_work()
1047 lock_sock(smc->clcsock->sk); in smc_connect_work()
1048 if (smc->clcsock->sk->sk_err) { in smc_connect_work()
1049 smc->sk.sk_err = smc->clcsock->sk->sk_err; in smc_connect_work()
1050 } else if ((1 << smc->clcsock->sk->sk_state) & in smc_connect_work()
1052 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo); in smc_connect_work()
1054 ((1 << smc->clcsock->sk->sk_state) & in smc_connect_work()
1058 release_sock(smc->clcsock->sk); in smc_connect_work()
1059 lock_sock(&smc->sk); in smc_connect_work()
1060 if (rc != 0 || smc->sk.sk_err) { in smc_connect_work()
1061 smc->sk.sk_state = SMC_CLOSED; in smc_connect_work()
1063 smc->sk.sk_err = EPIPE; in smc_connect_work()
1065 smc->sk.sk_err = ECONNREFUSED; in smc_connect_work()
1067 smc->sk.sk_err = -sock_intr_errno(timeo); in smc_connect_work()
1068 sock_put(&smc->sk); /* passive closing */ in smc_connect_work()
1072 rc = __smc_connect(smc); in smc_connect_work()
1074 smc->sk.sk_err = -rc; in smc_connect_work()
1077 if (!sock_flag(&smc->sk, SOCK_DEAD)) { in smc_connect_work()
1078 if (smc->sk.sk_err) { in smc_connect_work()
1079 smc->sk.sk_state_change(&smc->sk); in smc_connect_work()
1081 smc->clcsock->sk->sk_write_space(smc->clcsock->sk); in smc_connect_work()
1082 smc->sk.sk_write_space(&smc->sk); in smc_connect_work()
1085 release_sock(&smc->sk); in smc_connect_work()
1092 struct smc_sock *smc; in smc_connect() local
1095 smc = smc_sk(sk); in smc_connect()
1097 /* separate smc parameter checking to be safe */ in smc_connect()
1115 smc_copy_sock_settings_to_clc(smc); in smc_connect()
1116 tcp_sk(smc->clcsock->sk)->syn_smc = 1; in smc_connect()
1117 if (smc->connect_nonblock) { in smc_connect()
1121 rc = kernel_connect(smc->clcsock, addr, alen, flags); in smc_connect()
1125 if (smc->use_fallback) in smc_connect()
1127 sock_hold(&smc->sk); /* sock put in passive closing */ in smc_connect()
1129 if (queue_work(smc_hs_wq, &smc->connect_work)) in smc_connect()
1130 smc->connect_nonblock = 1; in smc_connect()
1133 rc = __smc_connect(smc); in smc_connect()
1182 /* new clcsock has inherited the smc listen-specific sk_data_ready in smc_clcsock_accept()
1254 struct smc_sock *smc = smc_sk(sk); in smc_close_non_accepted() local
1261 __smc_release(smc); in smc_close_non_accepted()
1267 static int smcr_serv_conf_first_link(struct smc_sock *smc) in smcr_serv_conf_first_link() argument
1269 struct smc_link *link = smc->conn.lnk; in smcr_serv_conf_first_link()
1273 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc)) in smcr_serv_conf_first_link()
1287 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), in smcr_serv_conf_first_link()
1298 smc->conn.rmb_desc->is_conf_rkey = true; in smcr_serv_conf_first_link()
1449 /* listen worker: initialize connection and buffers for SMC-D */
1717 /* check if peer is smc capable */ in smc_listen_work()
1726 * wait for and receive SMC Proposal CLC message in smc_listen_work()
1740 /* IPSec connections opt out of SMC optimizations */ in smc_listen_work()
1767 /* send SMC Accept CLC message */ in smc_listen_work()
1773 /* SMC-D does not need this lock any more */ in smc_listen_work()
1777 /* receive SMC Confirm CLC message */ in smc_listen_work()
1863 struct smc_sock *smc; in smc_listen() local
1866 smc = smc_sk(sk); in smc_listen()
1871 smc->connect_nonblock) in smc_listen()
1880 * them to the clc socket -- copy smc socket options to clc socket in smc_listen()
1882 smc_copy_sock_settings_to_clc(smc); in smc_listen()
1883 if (!smc->use_fallback) in smc_listen()
1884 tcp_sk(smc->clcsock->sk)->syn_smc = 1; in smc_listen()
1887 * smc-specific sk_data_ready function in smc_listen()
1889 smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready; in smc_listen()
1890 smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready; in smc_listen()
1891 smc->clcsock->sk->sk_user_data = in smc_listen()
1892 (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY); in smc_listen()
1893 rc = kernel_listen(smc->clcsock, backlog); in smc_listen()
1895 smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready; in smc_listen()
1980 struct smc_sock *smc; in smc_getname() local
1986 smc = smc_sk(sock->sk); in smc_getname()
1988 return smc->clcsock->ops->getname(smc->clcsock, addr, peer); in smc_getname()
1994 struct smc_sock *smc; in smc_sendmsg() local
1997 smc = smc_sk(sk); in smc_sendmsg()
2000 /* SMC does not support connect with fastopen */ in smc_sendmsg()
2003 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) { in smc_sendmsg()
2004 smc_switch_to_fallback(smc); in smc_sendmsg()
2005 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; in smc_sendmsg()
2017 if (smc->use_fallback) in smc_sendmsg()
2018 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len); in smc_sendmsg()
2020 rc = smc_tx_sendmsg(smc, msg, len); in smc_sendmsg()
2030 struct smc_sock *smc; in smc_recvmsg() local
2033 smc = smc_sk(sk); in smc_recvmsg()
2050 if (smc->use_fallback) { in smc_recvmsg()
2051 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags); in smc_recvmsg()
2054 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags); in smc_recvmsg()
2079 struct smc_sock *smc; in smc_poll() local
2085 smc = smc_sk(sock->sk); in smc_poll()
2086 if (smc->use_fallback) { in smc_poll()
2088 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); in smc_poll()
2089 sk->sk_err = smc->clcsock->sk->sk_err; in smc_poll()
2101 } else if (smc->use_fallback) { /* as result of connect_work()*/ in smc_poll()
2102 mask |= smc->clcsock->ops->poll(file, smc->clcsock, in smc_poll()
2104 sk->sk_err = smc->clcsock->sk->sk_err; in smc_poll()
2107 atomic_read(&smc->conn.sndbuf_space)) || in smc_poll()
2114 if (atomic_read(&smc->conn.bytes_to_rcv)) in smc_poll()
2120 if (smc->conn.urg_state == SMC_URG_VALID) in smc_poll()
2132 struct smc_sock *smc; in smc_shutdown() local
2137 smc = smc_sk(sk); in smc_shutdown()
2152 if (smc->use_fallback) { in smc_shutdown()
2153 rc = kernel_sock_shutdown(smc->clcsock, how); in smc_shutdown()
2154 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown; in smc_shutdown()
2164 rc = smc_close_active(smc); in smc_shutdown()
2170 rc = smc_close_shutdown_write(smc); in smc_shutdown()
2177 if (do_shutdown && smc->clcsock) in smc_shutdown()
2178 rc1 = kernel_sock_shutdown(smc->clcsock, how); in smc_shutdown()
2191 struct smc_sock *smc; in smc_setsockopt() local
2197 smc = smc_sk(sk); in smc_setsockopt()
2202 if (unlikely(!smc->clcsock->ops->setsockopt)) in smc_setsockopt()
2205 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname, in smc_setsockopt()
2207 if (smc->clcsock->sk->sk_err) { in smc_setsockopt()
2208 sk->sk_err = smc->clcsock->sk->sk_err; in smc_setsockopt()
2218 if (rc || smc->use_fallback) in smc_setsockopt()
2225 /* option not supported by SMC */ in smc_setsockopt()
2226 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) { in smc_setsockopt()
2227 smc_switch_to_fallback(smc); in smc_setsockopt()
2228 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; in smc_setsockopt()
2238 mod_delayed_work(smc->conn.lgr->tx_wq, in smc_setsockopt()
2239 &smc->conn.tx_work, 0); in smc_setsockopt()
2247 mod_delayed_work(smc->conn.lgr->tx_wq, in smc_setsockopt()
2248 &smc->conn.tx_work, 0); in smc_setsockopt()
2252 smc->sockopt_defer_accept = val; in smc_setsockopt()
2266 struct smc_sock *smc; in smc_getsockopt() local
2268 smc = smc_sk(sock->sk); in smc_getsockopt()
2270 if (unlikely(!smc->clcsock->ops->getsockopt)) in smc_getsockopt()
2272 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname, in smc_getsockopt()
2281 struct smc_sock *smc; in smc_ioctl() local
2284 smc = smc_sk(sock->sk); in smc_ioctl()
2285 conn = &smc->conn; in smc_ioctl()
2286 lock_sock(&smc->sk); in smc_ioctl()
2287 if (smc->use_fallback) { in smc_ioctl()
2288 if (!smc->clcsock) { in smc_ioctl()
2289 release_sock(&smc->sk); in smc_ioctl()
2292 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); in smc_ioctl()
2293 release_sock(&smc->sk); in smc_ioctl()
2298 if (smc->sk.sk_state == SMC_LISTEN) { in smc_ioctl()
2299 release_sock(&smc->sk); in smc_ioctl()
2302 if (smc->sk.sk_state == SMC_INIT || in smc_ioctl()
2303 smc->sk.sk_state == SMC_CLOSED) in smc_ioctl()
2306 answ = atomic_read(&smc->conn.bytes_to_rcv); in smc_ioctl()
2310 if (smc->sk.sk_state == SMC_LISTEN) { in smc_ioctl()
2311 release_sock(&smc->sk); in smc_ioctl()
2314 if (smc->sk.sk_state == SMC_INIT || in smc_ioctl()
2315 smc->sk.sk_state == SMC_CLOSED) in smc_ioctl()
2318 answ = smc->conn.sndbuf_desc->len - in smc_ioctl()
2319 atomic_read(&smc->conn.sndbuf_space); in smc_ioctl()
2323 if (smc->sk.sk_state == SMC_LISTEN) { in smc_ioctl()
2324 release_sock(&smc->sk); in smc_ioctl()
2327 if (smc->sk.sk_state == SMC_INIT || in smc_ioctl()
2328 smc->sk.sk_state == SMC_CLOSED) in smc_ioctl()
2331 answ = smc_tx_prepared_sends(&smc->conn); in smc_ioctl()
2334 if (smc->sk.sk_state == SMC_LISTEN) { in smc_ioctl()
2335 release_sock(&smc->sk); in smc_ioctl()
2338 if (smc->sk.sk_state == SMC_INIT || in smc_ioctl()
2339 smc->sk.sk_state == SMC_CLOSED) { in smc_ioctl()
2349 release_sock(&smc->sk); in smc_ioctl()
2352 release_sock(&smc->sk); in smc_ioctl()
2361 struct smc_sock *smc; in smc_sendpage() local
2364 smc = smc_sk(sk); in smc_sendpage()
2371 if (smc->use_fallback) in smc_sendpage()
2372 rc = kernel_sendpage(smc->clcsock, page, offset, in smc_sendpage()
2392 struct smc_sock *smc; in smc_splice_read() local
2395 smc = smc_sk(sk); in smc_splice_read()
2412 if (smc->use_fallback) { in smc_splice_read()
2413 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos, in smc_splice_read()
2424 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags); in smc_splice_read()
2459 struct smc_sock *smc; in smc_create() local
2478 smc = smc_sk(sk); in smc_create()
2479 smc->use_fallback = false; /* assume rdma capability first */ in smc_create()
2480 smc->fallback_rsn = 0; in smc_create()
2482 &smc->clcsock); in smc_create()
2487 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE); in smc_create()
2488 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE); in smc_create()
2629 MODULE_DESCRIPTION("smc socket address family");