1 /* 2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the BSD-type 8 * license below: 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 17 * Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials provided 20 * with the distribution. 21 * 22 * Neither the name of the Network Appliance, Inc. nor the names of 23 * its contributors may be used to endorse or promote products 24 * derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #ifndef _LINUX_SUNRPC_XPRT_RDMA_H 41 #define _LINUX_SUNRPC_XPRT_RDMA_H 42 43 #include <linux/wait.h> /* wait_queue_head_t, etc */ 44 #include <linux/spinlock.h> /* spinlock_t, etc */ 45 #include <linux/atomic.h> /* atomic_t, etc */ 46 #include <linux/workqueue.h> /* struct work_struct */ 47 48 #include <rdma/rdma_cm.h> /* RDMA connection api */ 49 #include <rdma/ib_verbs.h> /* RDMA verbs api */ 50 51 #include <linux/sunrpc/clnt.h> /* rpc_xprt */ 52 #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */ 53 #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */ 54 #include <linux/sunrpc/svc.h> /* RPCSVC_MAXPAYLOAD */ 55 56 #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */ 57 #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */ 58 59 /* 60 * Interface Adapter -- one per transport instance 61 */ 62 struct rpcrdma_ia { 63 rwlock_t ri_qplock; 64 struct rdma_cm_id *ri_id; 65 struct ib_pd *ri_pd; 66 struct ib_mr *ri_bind_mem; 67 u32 ri_dma_lkey; 68 int ri_have_dma_lkey; 69 struct completion ri_done; 70 int ri_async_rc; 71 enum rpcrdma_memreg ri_memreg_strategy; 72 unsigned int ri_max_frmr_depth; 73 struct ib_device_attr ri_devattr; 74 }; 75 76 /* 77 * RDMA Endpoint -- one per transport instance 78 */ 79 80 #define RPCRDMA_WC_BUDGET (128) 81 #define RPCRDMA_POLLSIZE (16) 82 83 struct rpcrdma_ep { 84 atomic_t rep_cqcount; 85 int rep_cqinit; 86 int rep_connected; 87 struct rpcrdma_ia *rep_ia; 88 struct ib_qp_init_attr rep_attr; 89 wait_queue_head_t rep_connect_wait; 90 struct ib_sge rep_pad; /* holds zeroed pad */ 91 struct ib_mr *rep_pad_mr; /* holds zeroed pad */ 92 void (*rep_func)(struct rpcrdma_ep *); 93 struct rpc_xprt *rep_xprt; /* for rep_func */ 94 struct rdma_conn_param rep_remote_cma; 95 struct sockaddr_storage rep_remote_addr; 96 struct delayed_work rep_connect_worker; 97 struct ib_wc rep_send_wcs[RPCRDMA_POLLSIZE]; 98 struct ib_wc rep_recv_wcs[RPCRDMA_POLLSIZE]; 99 }; 100 101 #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) 102 #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) 103 104 enum rpcrdma_chunktype { 105 rpcrdma_noch = 0, 106 rpcrdma_readch, 107 rpcrdma_areadch, 108 rpcrdma_writech, 109 rpcrdma_replych 110 }; 111 112 /* 113 * struct rpcrdma_rep -- this structure encapsulates state required to recv 114 * and complete a reply, asychronously. It needs several pieces of 115 * state: 116 * o recv buffer (posted to provider) 117 * o ib_sge (also donated to provider) 118 * o status of reply (length, success or not) 119 * o bookkeeping state to get run by tasklet (list, etc) 120 * 121 * These are allocated during initialization, per-transport instance; 122 * however, the tasklet execution list itself is global, as it should 123 * always be pretty short. 124 * 125 * N of these are associated with a transport instance, and stored in 126 * struct rpcrdma_buffer. N is the max number of outstanding requests. 127 */ 128 129 /* temporary static scatter/gather max */ 130 #define RPCRDMA_MAX_DATA_SEGS (64) /* max scatter/gather */ 131 #define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */ 132 #define MAX_RPCRDMAHDR (\ 133 /* max supported RPC/RDMA header */ \ 134 sizeof(struct rpcrdma_msg) + (2 * sizeof(u32)) + \ 135 (sizeof(struct rpcrdma_read_chunk) * RPCRDMA_MAX_SEGS) + sizeof(u32)) 136 137 struct rpcrdma_buffer; 138 139 struct rpcrdma_rep { 140 unsigned int rr_len; /* actual received reply length */ 141 struct rpcrdma_buffer *rr_buffer; /* home base for this structure */ 142 struct rpc_xprt *rr_xprt; /* needed for request/reply matching */ 143 void (*rr_func)(struct rpcrdma_rep *);/* called by tasklet in softint */ 144 struct list_head rr_list; /* tasklet list */ 145 struct ib_sge rr_iov; /* for posting */ 146 struct ib_mr *rr_handle; /* handle for mem in rr_iov */ 147 char rr_base[MAX_RPCRDMAHDR]; /* minimal inline receive buffer */ 148 }; 149 150 /* 151 * struct rpcrdma_mw - external memory region metadata 152 * 153 * An external memory region is any buffer or page that is registered 154 * on the fly (ie, not pre-registered). 155 * 156 * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During 157 * call_allocate, rpcrdma_buffer_get() assigns one to each segment in 158 * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep 159 * track of registration metadata while each RPC is pending. 160 * rpcrdma_deregister_external() uses this metadata to unmap and 161 * release these resources when an RPC is complete. 162 */ 163 enum rpcrdma_frmr_state { 164 FRMR_IS_INVALID, /* ready to be used */ 165 FRMR_IS_VALID, /* in use */ 166 FRMR_IS_STALE, /* failed completion */ 167 }; 168 169 struct rpcrdma_frmr { 170 struct ib_fast_reg_page_list *fr_pgl; 171 struct ib_mr *fr_mr; 172 enum rpcrdma_frmr_state fr_state; 173 }; 174 175 struct rpcrdma_mw { 176 union { 177 struct ib_fmr *fmr; 178 struct rpcrdma_frmr frmr; 179 } r; 180 struct list_head mw_list; 181 struct list_head mw_all; 182 }; 183 184 /* 185 * struct rpcrdma_req -- structure central to the request/reply sequence. 186 * 187 * N of these are associated with a transport instance, and stored in 188 * struct rpcrdma_buffer. N is the max number of outstanding requests. 189 * 190 * It includes pre-registered buffer memory for send AND recv. 191 * The recv buffer, however, is not owned by this structure, and 192 * is "donated" to the hardware when a recv is posted. When a 193 * reply is handled, the recv buffer used is given back to the 194 * struct rpcrdma_req associated with the request. 195 * 196 * In addition to the basic memory, this structure includes an array 197 * of iovs for send operations. The reason is that the iovs passed to 198 * ib_post_{send,recv} must not be modified until the work request 199 * completes. 200 * 201 * NOTES: 202 * o RPCRDMA_MAX_SEGS is the max number of addressible chunk elements we 203 * marshal. The number needed varies depending on the iov lists that 204 * are passed to us, the memory registration mode we are in, and if 205 * physical addressing is used, the layout. 206 */ 207 208 struct rpcrdma_mr_seg { /* chunk descriptors */ 209 union { /* chunk memory handles */ 210 struct ib_mr *rl_mr; /* if registered directly */ 211 struct rpcrdma_mw *rl_mw; /* if registered from region */ 212 } mr_chunk; 213 u64 mr_base; /* registration result */ 214 u32 mr_rkey; /* registration result */ 215 u32 mr_len; /* length of chunk or segment */ 216 int mr_nsegs; /* number of segments in chunk or 0 */ 217 enum dma_data_direction mr_dir; /* segment mapping direction */ 218 dma_addr_t mr_dma; /* segment mapping address */ 219 size_t mr_dmalen; /* segment mapping length */ 220 struct page *mr_page; /* owning page, if any */ 221 char *mr_offset; /* kva if no page, else offset */ 222 }; 223 224 struct rpcrdma_req { 225 size_t rl_size; /* actual length of buffer */ 226 unsigned int rl_niovs; /* 0, 2 or 4 */ 227 unsigned int rl_nchunks; /* non-zero if chunks */ 228 unsigned int rl_connect_cookie; /* retry detection */ 229 enum rpcrdma_chunktype rl_rtype, rl_wtype; 230 struct rpcrdma_buffer *rl_buffer; /* home base for this structure */ 231 struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ 232 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];/* chunk segments */ 233 struct ib_sge rl_send_iov[4]; /* for active requests */ 234 struct ib_sge rl_iov; /* for posting */ 235 struct ib_mr *rl_handle; /* handle for mem in rl_iov */ 236 char rl_base[MAX_RPCRDMAHDR]; /* start of actual buffer */ 237 __u32 rl_xdr_buf[0]; /* start of returned rpc rq_buffer */ 238 }; 239 #define rpcr_to_rdmar(r) \ 240 container_of((r)->rq_buffer, struct rpcrdma_req, rl_xdr_buf[0]) 241 242 /* 243 * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for 244 * inline requests/replies, and client/server credits. 245 * 246 * One of these is associated with a transport instance 247 */ 248 struct rpcrdma_buffer { 249 spinlock_t rb_lock; /* protects indexes */ 250 atomic_t rb_credits; /* most recent server credits */ 251 int rb_max_requests;/* client max requests */ 252 struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */ 253 struct list_head rb_all; 254 int rb_send_index; 255 struct rpcrdma_req **rb_send_bufs; 256 int rb_recv_index; 257 struct rpcrdma_rep **rb_recv_bufs; 258 char *rb_pool; 259 }; 260 #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia) 261 262 /* 263 * Internal structure for transport instance creation. This 264 * exists primarily for modularity. 265 * 266 * This data should be set with mount options 267 */ 268 struct rpcrdma_create_data_internal { 269 struct sockaddr_storage addr; /* RDMA server address */ 270 unsigned int max_requests; /* max requests (slots) in flight */ 271 unsigned int rsize; /* mount rsize - max read hdr+data */ 272 unsigned int wsize; /* mount wsize - max write hdr+data */ 273 unsigned int inline_rsize; /* max non-rdma read data payload */ 274 unsigned int inline_wsize; /* max non-rdma write data payload */ 275 unsigned int padding; /* non-rdma write header padding */ 276 }; 277 278 #define RPCRDMA_INLINE_READ_THRESHOLD(rq) \ 279 (rpcx_to_rdmad(rq->rq_xprt).inline_rsize) 280 281 #define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\ 282 (rpcx_to_rdmad(rq->rq_xprt).inline_wsize) 283 284 #define RPCRDMA_INLINE_PAD_VALUE(rq)\ 285 rpcx_to_rdmad(rq->rq_xprt).padding 286 287 /* 288 * Statistics for RPCRDMA 289 */ 290 struct rpcrdma_stats { 291 unsigned long read_chunk_count; 292 unsigned long write_chunk_count; 293 unsigned long reply_chunk_count; 294 295 unsigned long long total_rdma_request; 296 unsigned long long total_rdma_reply; 297 298 unsigned long long pullup_copy_count; 299 unsigned long long fixup_copy_count; 300 unsigned long hardway_register_count; 301 unsigned long failed_marshal_count; 302 unsigned long bad_reply_count; 303 }; 304 305 /* 306 * RPCRDMA transport -- encapsulates the structures above for 307 * integration with RPC. 308 * 309 * The contained structures are embedded, not pointers, 310 * for convenience. This structure need not be visible externally. 311 * 312 * It is allocated and initialized during mount, and released 313 * during unmount. 314 */ 315 struct rpcrdma_xprt { 316 struct rpc_xprt xprt; 317 struct rpcrdma_ia rx_ia; 318 struct rpcrdma_ep rx_ep; 319 struct rpcrdma_buffer rx_buf; 320 struct rpcrdma_create_data_internal rx_data; 321 struct delayed_work rdma_connect; 322 struct rpcrdma_stats rx_stats; 323 }; 324 325 #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, xprt) 326 #define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data) 327 328 /* Setting this to 0 ensures interoperability with early servers. 329 * Setting this to 1 enhances certain unaligned read/write performance. 330 * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */ 331 extern int xprt_rdma_pad_optimize; 332 333 /* 334 * Interface Adapter calls - xprtrdma/verbs.c 335 */ 336 int rpcrdma_ia_open(struct rpcrdma_xprt *, struct sockaddr *, int); 337 void rpcrdma_ia_close(struct rpcrdma_ia *); 338 339 /* 340 * Endpoint calls - xprtrdma/verbs.c 341 */ 342 int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *, 343 struct rpcrdma_create_data_internal *); 344 void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *); 345 int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *); 346 void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *); 347 348 int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *, 349 struct rpcrdma_req *); 350 int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *, 351 struct rpcrdma_rep *); 352 353 /* 354 * Buffer calls - xprtrdma/verbs.c 355 */ 356 int rpcrdma_buffer_create(struct rpcrdma_buffer *, struct rpcrdma_ep *, 357 struct rpcrdma_ia *, 358 struct rpcrdma_create_data_internal *); 359 void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); 360 361 struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); 362 void rpcrdma_buffer_put(struct rpcrdma_req *); 363 void rpcrdma_recv_buffer_get(struct rpcrdma_req *); 364 void rpcrdma_recv_buffer_put(struct rpcrdma_rep *); 365 366 int rpcrdma_register_internal(struct rpcrdma_ia *, void *, int, 367 struct ib_mr **, struct ib_sge *); 368 int rpcrdma_deregister_internal(struct rpcrdma_ia *, 369 struct ib_mr *, struct ib_sge *); 370 371 int rpcrdma_register_external(struct rpcrdma_mr_seg *, 372 int, int, struct rpcrdma_xprt *); 373 int rpcrdma_deregister_external(struct rpcrdma_mr_seg *, 374 struct rpcrdma_xprt *); 375 376 /* 377 * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c 378 */ 379 void rpcrdma_connect_worker(struct work_struct *); 380 void rpcrdma_conn_func(struct rpcrdma_ep *); 381 void rpcrdma_reply_handler(struct rpcrdma_rep *); 382 383 /* 384 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c 385 */ 386 ssize_t rpcrdma_marshal_chunks(struct rpc_rqst *, ssize_t); 387 int rpcrdma_marshal_req(struct rpc_rqst *); 388 size_t rpcrdma_max_payload(struct rpcrdma_xprt *); 389 390 /* Temporary NFS request map cache. Created in svc_rdma.c */ 391 extern struct kmem_cache *svc_rdma_map_cachep; 392 /* WR context cache. Created in svc_rdma.c */ 393 extern struct kmem_cache *svc_rdma_ctxt_cachep; 394 /* Workqueue created in svc_rdma.c */ 395 extern struct workqueue_struct *svc_rdma_wq; 396 397 #if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT) 398 #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD 399 #else 400 #define RPCSVC_MAXPAYLOAD_RDMA (RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT) 401 #endif 402 403 #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */ 404