• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38 
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
41 
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/mm.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
50 #include <linux/workqueue.h>
51 
52 #include <linux/atomic.h>
53 #include <asm/uaccess.h>
54 
55 extern struct workqueue_struct *ib_wq;
56 
57 union ib_gid {
58 	u8	raw[16];
59 	struct {
60 		__be64	subnet_prefix;
61 		__be64	interface_id;
62 	} global;
63 };
64 
65 enum rdma_node_type {
66 	/* IB values map to NodeInfo:NodeType. */
67 	RDMA_NODE_IB_CA 	= 1,
68 	RDMA_NODE_IB_SWITCH,
69 	RDMA_NODE_IB_ROUTER,
70 	RDMA_NODE_RNIC
71 };
72 
73 enum rdma_transport_type {
74 	RDMA_TRANSPORT_IB,
75 	RDMA_TRANSPORT_IWARP
76 };
77 
78 enum rdma_transport_type
79 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
80 
81 enum rdma_link_layer {
82 	IB_LINK_LAYER_UNSPECIFIED,
83 	IB_LINK_LAYER_INFINIBAND,
84 	IB_LINK_LAYER_ETHERNET,
85 };
86 
87 enum ib_device_cap_flags {
88 	IB_DEVICE_RESIZE_MAX_WR		= 1,
89 	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
90 	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
91 	IB_DEVICE_RAW_MULTI		= (1<<3),
92 	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
93 	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
94 	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
95 	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
96 	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
97 	IB_DEVICE_INIT_TYPE		= (1<<9),
98 	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
99 	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
100 	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
101 	IB_DEVICE_SRQ_RESIZE		= (1<<13),
102 	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
103 	IB_DEVICE_LOCAL_DMA_LKEY	= (1<<15),
104 	IB_DEVICE_RESERVED		= (1<<16), /* old SEND_W_INV */
105 	IB_DEVICE_MEM_WINDOW		= (1<<17),
106 	/*
107 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
108 	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
109 	 * messages and can verify the validity of checksum for
110 	 * incoming messages.  Setting this flag implies that the
111 	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
112 	 */
113 	IB_DEVICE_UD_IP_CSUM		= (1<<18),
114 	IB_DEVICE_UD_TSO		= (1<<19),
115 	IB_DEVICE_XRC			= (1<<20),
116 	IB_DEVICE_MEM_MGT_EXTENSIONS	= (1<<21),
117 	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
118 	IB_DEVICE_MEM_WINDOW_TYPE_2A	= (1<<23),
119 	IB_DEVICE_MEM_WINDOW_TYPE_2B	= (1<<24)
120 };
121 
122 enum ib_atomic_cap {
123 	IB_ATOMIC_NONE,
124 	IB_ATOMIC_HCA,
125 	IB_ATOMIC_GLOB
126 };
127 
128 struct ib_device_attr {
129 	u64			fw_ver;
130 	__be64			sys_image_guid;
131 	u64			max_mr_size;
132 	u64			page_size_cap;
133 	u32			vendor_id;
134 	u32			vendor_part_id;
135 	u32			hw_ver;
136 	int			max_qp;
137 	int			max_qp_wr;
138 	int			device_cap_flags;
139 	int			max_sge;
140 	int			max_sge_rd;
141 	int			max_cq;
142 	int			max_cqe;
143 	int			max_mr;
144 	int			max_pd;
145 	int			max_qp_rd_atom;
146 	int			max_ee_rd_atom;
147 	int			max_res_rd_atom;
148 	int			max_qp_init_rd_atom;
149 	int			max_ee_init_rd_atom;
150 	enum ib_atomic_cap	atomic_cap;
151 	enum ib_atomic_cap	masked_atomic_cap;
152 	int			max_ee;
153 	int			max_rdd;
154 	int			max_mw;
155 	int			max_raw_ipv6_qp;
156 	int			max_raw_ethy_qp;
157 	int			max_mcast_grp;
158 	int			max_mcast_qp_attach;
159 	int			max_total_mcast_qp_attach;
160 	int			max_ah;
161 	int			max_fmr;
162 	int			max_map_per_fmr;
163 	int			max_srq;
164 	int			max_srq_wr;
165 	int			max_srq_sge;
166 	unsigned int		max_fast_reg_page_list_len;
167 	u16			max_pkeys;
168 	u8			local_ca_ack_delay;
169 };
170 
171 enum ib_mtu {
172 	IB_MTU_256  = 1,
173 	IB_MTU_512  = 2,
174 	IB_MTU_1024 = 3,
175 	IB_MTU_2048 = 4,
176 	IB_MTU_4096 = 5
177 };
178 
ib_mtu_enum_to_int(enum ib_mtu mtu)179 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
180 {
181 	switch (mtu) {
182 	case IB_MTU_256:  return  256;
183 	case IB_MTU_512:  return  512;
184 	case IB_MTU_1024: return 1024;
185 	case IB_MTU_2048: return 2048;
186 	case IB_MTU_4096: return 4096;
187 	default: 	  return -1;
188 	}
189 }
190 
191 enum ib_port_state {
192 	IB_PORT_NOP		= 0,
193 	IB_PORT_DOWN		= 1,
194 	IB_PORT_INIT		= 2,
195 	IB_PORT_ARMED		= 3,
196 	IB_PORT_ACTIVE		= 4,
197 	IB_PORT_ACTIVE_DEFER	= 5
198 };
199 
200 enum ib_port_cap_flags {
201 	IB_PORT_SM				= 1 <<  1,
202 	IB_PORT_NOTICE_SUP			= 1 <<  2,
203 	IB_PORT_TRAP_SUP			= 1 <<  3,
204 	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
205 	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
206 	IB_PORT_SL_MAP_SUP			= 1 <<  6,
207 	IB_PORT_MKEY_NVRAM			= 1 <<  7,
208 	IB_PORT_PKEY_NVRAM			= 1 <<  8,
209 	IB_PORT_LED_INFO_SUP			= 1 <<  9,
210 	IB_PORT_SM_DISABLED			= 1 << 10,
211 	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
212 	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
213 	IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
214 	IB_PORT_CM_SUP				= 1 << 16,
215 	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
216 	IB_PORT_REINIT_SUP			= 1 << 18,
217 	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
218 	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
219 	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
220 	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
221 	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
222 	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
223 	IB_PORT_CLIENT_REG_SUP			= 1 << 25
224 };
225 
226 enum ib_port_width {
227 	IB_WIDTH_1X	= 1,
228 	IB_WIDTH_4X	= 2,
229 	IB_WIDTH_8X	= 4,
230 	IB_WIDTH_12X	= 8
231 };
232 
ib_width_enum_to_int(enum ib_port_width width)233 static inline int ib_width_enum_to_int(enum ib_port_width width)
234 {
235 	switch (width) {
236 	case IB_WIDTH_1X:  return  1;
237 	case IB_WIDTH_4X:  return  4;
238 	case IB_WIDTH_8X:  return  8;
239 	case IB_WIDTH_12X: return 12;
240 	default: 	  return -1;
241 	}
242 }
243 
244 enum ib_port_speed {
245 	IB_SPEED_SDR	= 1,
246 	IB_SPEED_DDR	= 2,
247 	IB_SPEED_QDR	= 4,
248 	IB_SPEED_FDR10	= 8,
249 	IB_SPEED_FDR	= 16,
250 	IB_SPEED_EDR	= 32
251 };
252 
253 struct ib_protocol_stats {
254 	/* TBD... */
255 };
256 
257 struct iw_protocol_stats {
258 	u64	ipInReceives;
259 	u64	ipInHdrErrors;
260 	u64	ipInTooBigErrors;
261 	u64	ipInNoRoutes;
262 	u64	ipInAddrErrors;
263 	u64	ipInUnknownProtos;
264 	u64	ipInTruncatedPkts;
265 	u64	ipInDiscards;
266 	u64	ipInDelivers;
267 	u64	ipOutForwDatagrams;
268 	u64	ipOutRequests;
269 	u64	ipOutDiscards;
270 	u64	ipOutNoRoutes;
271 	u64	ipReasmTimeout;
272 	u64	ipReasmReqds;
273 	u64	ipReasmOKs;
274 	u64	ipReasmFails;
275 	u64	ipFragOKs;
276 	u64	ipFragFails;
277 	u64	ipFragCreates;
278 	u64	ipInMcastPkts;
279 	u64	ipOutMcastPkts;
280 	u64	ipInBcastPkts;
281 	u64	ipOutBcastPkts;
282 
283 	u64	tcpRtoAlgorithm;
284 	u64	tcpRtoMin;
285 	u64	tcpRtoMax;
286 	u64	tcpMaxConn;
287 	u64	tcpActiveOpens;
288 	u64	tcpPassiveOpens;
289 	u64	tcpAttemptFails;
290 	u64	tcpEstabResets;
291 	u64	tcpCurrEstab;
292 	u64	tcpInSegs;
293 	u64	tcpOutSegs;
294 	u64	tcpRetransSegs;
295 	u64	tcpInErrs;
296 	u64	tcpOutRsts;
297 };
298 
299 union rdma_protocol_stats {
300 	struct ib_protocol_stats	ib;
301 	struct iw_protocol_stats	iw;
302 };
303 
304 struct ib_port_attr {
305 	enum ib_port_state	state;
306 	enum ib_mtu		max_mtu;
307 	enum ib_mtu		active_mtu;
308 	int			gid_tbl_len;
309 	u32			port_cap_flags;
310 	u32			max_msg_sz;
311 	u32			bad_pkey_cntr;
312 	u32			qkey_viol_cntr;
313 	u16			pkey_tbl_len;
314 	u16			lid;
315 	u16			sm_lid;
316 	u8			lmc;
317 	u8			max_vl_num;
318 	u8			sm_sl;
319 	u8			subnet_timeout;
320 	u8			init_type_reply;
321 	u8			active_width;
322 	u8			active_speed;
323 	u8                      phys_state;
324 };
325 
326 enum ib_device_modify_flags {
327 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
328 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
329 };
330 
331 struct ib_device_modify {
332 	u64	sys_image_guid;
333 	char	node_desc[64];
334 };
335 
336 enum ib_port_modify_flags {
337 	IB_PORT_SHUTDOWN		= 1,
338 	IB_PORT_INIT_TYPE		= (1<<2),
339 	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
340 };
341 
342 struct ib_port_modify {
343 	u32	set_port_cap_mask;
344 	u32	clr_port_cap_mask;
345 	u8	init_type;
346 };
347 
348 enum ib_event_type {
349 	IB_EVENT_CQ_ERR,
350 	IB_EVENT_QP_FATAL,
351 	IB_EVENT_QP_REQ_ERR,
352 	IB_EVENT_QP_ACCESS_ERR,
353 	IB_EVENT_COMM_EST,
354 	IB_EVENT_SQ_DRAINED,
355 	IB_EVENT_PATH_MIG,
356 	IB_EVENT_PATH_MIG_ERR,
357 	IB_EVENT_DEVICE_FATAL,
358 	IB_EVENT_PORT_ACTIVE,
359 	IB_EVENT_PORT_ERR,
360 	IB_EVENT_LID_CHANGE,
361 	IB_EVENT_PKEY_CHANGE,
362 	IB_EVENT_SM_CHANGE,
363 	IB_EVENT_SRQ_ERR,
364 	IB_EVENT_SRQ_LIMIT_REACHED,
365 	IB_EVENT_QP_LAST_WQE_REACHED,
366 	IB_EVENT_CLIENT_REREGISTER,
367 	IB_EVENT_GID_CHANGE,
368 };
369 
370 struct ib_event {
371 	struct ib_device	*device;
372 	union {
373 		struct ib_cq	*cq;
374 		struct ib_qp	*qp;
375 		struct ib_srq	*srq;
376 		u8		port_num;
377 	} element;
378 	enum ib_event_type	event;
379 };
380 
381 struct ib_event_handler {
382 	struct ib_device *device;
383 	void            (*handler)(struct ib_event_handler *, struct ib_event *);
384 	struct list_head  list;
385 };
386 
387 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
388 	do {							\
389 		(_ptr)->device  = _device;			\
390 		(_ptr)->handler = _handler;			\
391 		INIT_LIST_HEAD(&(_ptr)->list);			\
392 	} while (0)
393 
394 struct ib_global_route {
395 	union ib_gid	dgid;
396 	u32		flow_label;
397 	u8		sgid_index;
398 	u8		hop_limit;
399 	u8		traffic_class;
400 };
401 
402 struct ib_grh {
403 	__be32		version_tclass_flow;
404 	__be16		paylen;
405 	u8		next_hdr;
406 	u8		hop_limit;
407 	union ib_gid	sgid;
408 	union ib_gid	dgid;
409 };
410 
411 enum {
412 	IB_MULTICAST_QPN = 0xffffff
413 };
414 
415 #define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
416 
417 enum ib_ah_flags {
418 	IB_AH_GRH	= 1
419 };
420 
421 enum ib_rate {
422 	IB_RATE_PORT_CURRENT = 0,
423 	IB_RATE_2_5_GBPS = 2,
424 	IB_RATE_5_GBPS   = 5,
425 	IB_RATE_10_GBPS  = 3,
426 	IB_RATE_20_GBPS  = 6,
427 	IB_RATE_30_GBPS  = 4,
428 	IB_RATE_40_GBPS  = 7,
429 	IB_RATE_60_GBPS  = 8,
430 	IB_RATE_80_GBPS  = 9,
431 	IB_RATE_120_GBPS = 10,
432 	IB_RATE_14_GBPS  = 11,
433 	IB_RATE_56_GBPS  = 12,
434 	IB_RATE_112_GBPS = 13,
435 	IB_RATE_168_GBPS = 14,
436 	IB_RATE_25_GBPS  = 15,
437 	IB_RATE_100_GBPS = 16,
438 	IB_RATE_200_GBPS = 17,
439 	IB_RATE_300_GBPS = 18
440 };
441 
442 /**
443  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
444  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
445  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
446  * @rate: rate to convert.
447  */
448 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
449 
450 /**
451  * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
452  * For example, IB_RATE_2_5_GBPS will be converted to 2500.
453  * @rate: rate to convert.
454  */
455 int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
456 
457 /**
458  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
459  * enum.
460  * @mult: multiple to convert.
461  */
462 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
463 
464 struct ib_ah_attr {
465 	struct ib_global_route	grh;
466 	u16			dlid;
467 	u8			sl;
468 	u8			src_path_bits;
469 	u8			static_rate;
470 	u8			ah_flags;
471 	u8			port_num;
472 };
473 
474 enum ib_wc_status {
475 	IB_WC_SUCCESS,
476 	IB_WC_LOC_LEN_ERR,
477 	IB_WC_LOC_QP_OP_ERR,
478 	IB_WC_LOC_EEC_OP_ERR,
479 	IB_WC_LOC_PROT_ERR,
480 	IB_WC_WR_FLUSH_ERR,
481 	IB_WC_MW_BIND_ERR,
482 	IB_WC_BAD_RESP_ERR,
483 	IB_WC_LOC_ACCESS_ERR,
484 	IB_WC_REM_INV_REQ_ERR,
485 	IB_WC_REM_ACCESS_ERR,
486 	IB_WC_REM_OP_ERR,
487 	IB_WC_RETRY_EXC_ERR,
488 	IB_WC_RNR_RETRY_EXC_ERR,
489 	IB_WC_LOC_RDD_VIOL_ERR,
490 	IB_WC_REM_INV_RD_REQ_ERR,
491 	IB_WC_REM_ABORT_ERR,
492 	IB_WC_INV_EECN_ERR,
493 	IB_WC_INV_EEC_STATE_ERR,
494 	IB_WC_FATAL_ERR,
495 	IB_WC_RESP_TIMEOUT_ERR,
496 	IB_WC_GENERAL_ERR
497 };
498 
499 enum ib_wc_opcode {
500 	IB_WC_SEND,
501 	IB_WC_RDMA_WRITE,
502 	IB_WC_RDMA_READ,
503 	IB_WC_COMP_SWAP,
504 	IB_WC_FETCH_ADD,
505 	IB_WC_BIND_MW,
506 	IB_WC_LSO,
507 	IB_WC_LOCAL_INV,
508 	IB_WC_FAST_REG_MR,
509 	IB_WC_MASKED_COMP_SWAP,
510 	IB_WC_MASKED_FETCH_ADD,
511 /*
512  * Set value of IB_WC_RECV so consumers can test if a completion is a
513  * receive by testing (opcode & IB_WC_RECV).
514  */
515 	IB_WC_RECV			= 1 << 7,
516 	IB_WC_RECV_RDMA_WITH_IMM
517 };
518 
519 enum ib_wc_flags {
520 	IB_WC_GRH		= 1,
521 	IB_WC_WITH_IMM		= (1<<1),
522 	IB_WC_WITH_INVALIDATE	= (1<<2),
523 	IB_WC_IP_CSUM_OK	= (1<<3),
524 };
525 
526 struct ib_wc {
527 	u64			wr_id;
528 	enum ib_wc_status	status;
529 	enum ib_wc_opcode	opcode;
530 	u32			vendor_err;
531 	u32			byte_len;
532 	struct ib_qp	       *qp;
533 	union {
534 		__be32		imm_data;
535 		u32		invalidate_rkey;
536 	} ex;
537 	u32			src_qp;
538 	int			wc_flags;
539 	u16			pkey_index;
540 	u16			slid;
541 	u8			sl;
542 	u8			dlid_path_bits;
543 	u8			port_num;	/* valid only for DR SMPs on switches */
544 };
545 
546 enum ib_cq_notify_flags {
547 	IB_CQ_SOLICITED			= 1 << 0,
548 	IB_CQ_NEXT_COMP			= 1 << 1,
549 	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
550 	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
551 };
552 
553 enum ib_srq_type {
554 	IB_SRQT_BASIC,
555 	IB_SRQT_XRC
556 };
557 
558 enum ib_srq_attr_mask {
559 	IB_SRQ_MAX_WR	= 1 << 0,
560 	IB_SRQ_LIMIT	= 1 << 1,
561 };
562 
563 struct ib_srq_attr {
564 	u32	max_wr;
565 	u32	max_sge;
566 	u32	srq_limit;
567 };
568 
569 struct ib_srq_init_attr {
570 	void		      (*event_handler)(struct ib_event *, void *);
571 	void		       *srq_context;
572 	struct ib_srq_attr	attr;
573 	enum ib_srq_type	srq_type;
574 
575 	union {
576 		struct {
577 			struct ib_xrcd *xrcd;
578 			struct ib_cq   *cq;
579 		} xrc;
580 	} ext;
581 };
582 
583 struct ib_qp_cap {
584 	u32	max_send_wr;
585 	u32	max_recv_wr;
586 	u32	max_send_sge;
587 	u32	max_recv_sge;
588 	u32	max_inline_data;
589 };
590 
591 enum ib_sig_type {
592 	IB_SIGNAL_ALL_WR,
593 	IB_SIGNAL_REQ_WR
594 };
595 
596 enum ib_qp_type {
597 	/*
598 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
599 	 * here (and in that order) since the MAD layer uses them as
600 	 * indices into a 2-entry table.
601 	 */
602 	IB_QPT_SMI,
603 	IB_QPT_GSI,
604 
605 	IB_QPT_RC,
606 	IB_QPT_UC,
607 	IB_QPT_UD,
608 	IB_QPT_RAW_IPV6,
609 	IB_QPT_RAW_ETHERTYPE,
610 	IB_QPT_RAW_PACKET = 8,
611 	IB_QPT_XRC_INI = 9,
612 	IB_QPT_XRC_TGT,
613 	IB_QPT_MAX
614 };
615 
616 enum ib_qp_create_flags {
617 	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
618 	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
619 	/* reserve bits 26-31 for low level drivers' internal use */
620 	IB_QP_CREATE_RESERVED_START		= 1 << 26,
621 	IB_QP_CREATE_RESERVED_END		= 1 << 31,
622 };
623 
624 struct ib_qp_init_attr {
625 	void                  (*event_handler)(struct ib_event *, void *);
626 	void		       *qp_context;
627 	struct ib_cq	       *send_cq;
628 	struct ib_cq	       *recv_cq;
629 	struct ib_srq	       *srq;
630 	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
631 	struct ib_qp_cap	cap;
632 	enum ib_sig_type	sq_sig_type;
633 	enum ib_qp_type		qp_type;
634 	enum ib_qp_create_flags	create_flags;
635 	u8			port_num; /* special QP types only */
636 };
637 
638 struct ib_qp_open_attr {
639 	void                  (*event_handler)(struct ib_event *, void *);
640 	void		       *qp_context;
641 	u32			qp_num;
642 	enum ib_qp_type		qp_type;
643 };
644 
645 enum ib_rnr_timeout {
646 	IB_RNR_TIMER_655_36 =  0,
647 	IB_RNR_TIMER_000_01 =  1,
648 	IB_RNR_TIMER_000_02 =  2,
649 	IB_RNR_TIMER_000_03 =  3,
650 	IB_RNR_TIMER_000_04 =  4,
651 	IB_RNR_TIMER_000_06 =  5,
652 	IB_RNR_TIMER_000_08 =  6,
653 	IB_RNR_TIMER_000_12 =  7,
654 	IB_RNR_TIMER_000_16 =  8,
655 	IB_RNR_TIMER_000_24 =  9,
656 	IB_RNR_TIMER_000_32 = 10,
657 	IB_RNR_TIMER_000_48 = 11,
658 	IB_RNR_TIMER_000_64 = 12,
659 	IB_RNR_TIMER_000_96 = 13,
660 	IB_RNR_TIMER_001_28 = 14,
661 	IB_RNR_TIMER_001_92 = 15,
662 	IB_RNR_TIMER_002_56 = 16,
663 	IB_RNR_TIMER_003_84 = 17,
664 	IB_RNR_TIMER_005_12 = 18,
665 	IB_RNR_TIMER_007_68 = 19,
666 	IB_RNR_TIMER_010_24 = 20,
667 	IB_RNR_TIMER_015_36 = 21,
668 	IB_RNR_TIMER_020_48 = 22,
669 	IB_RNR_TIMER_030_72 = 23,
670 	IB_RNR_TIMER_040_96 = 24,
671 	IB_RNR_TIMER_061_44 = 25,
672 	IB_RNR_TIMER_081_92 = 26,
673 	IB_RNR_TIMER_122_88 = 27,
674 	IB_RNR_TIMER_163_84 = 28,
675 	IB_RNR_TIMER_245_76 = 29,
676 	IB_RNR_TIMER_327_68 = 30,
677 	IB_RNR_TIMER_491_52 = 31
678 };
679 
680 enum ib_qp_attr_mask {
681 	IB_QP_STATE			= 1,
682 	IB_QP_CUR_STATE			= (1<<1),
683 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
684 	IB_QP_ACCESS_FLAGS		= (1<<3),
685 	IB_QP_PKEY_INDEX		= (1<<4),
686 	IB_QP_PORT			= (1<<5),
687 	IB_QP_QKEY			= (1<<6),
688 	IB_QP_AV			= (1<<7),
689 	IB_QP_PATH_MTU			= (1<<8),
690 	IB_QP_TIMEOUT			= (1<<9),
691 	IB_QP_RETRY_CNT			= (1<<10),
692 	IB_QP_RNR_RETRY			= (1<<11),
693 	IB_QP_RQ_PSN			= (1<<12),
694 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
695 	IB_QP_ALT_PATH			= (1<<14),
696 	IB_QP_MIN_RNR_TIMER		= (1<<15),
697 	IB_QP_SQ_PSN			= (1<<16),
698 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
699 	IB_QP_PATH_MIG_STATE		= (1<<18),
700 	IB_QP_CAP			= (1<<19),
701 	IB_QP_DEST_QPN			= (1<<20)
702 };
703 
704 enum ib_qp_state {
705 	IB_QPS_RESET,
706 	IB_QPS_INIT,
707 	IB_QPS_RTR,
708 	IB_QPS_RTS,
709 	IB_QPS_SQD,
710 	IB_QPS_SQE,
711 	IB_QPS_ERR
712 };
713 
714 enum ib_mig_state {
715 	IB_MIG_MIGRATED,
716 	IB_MIG_REARM,
717 	IB_MIG_ARMED
718 };
719 
720 enum ib_mw_type {
721 	IB_MW_TYPE_1 = 1,
722 	IB_MW_TYPE_2 = 2
723 };
724 
725 struct ib_qp_attr {
726 	enum ib_qp_state	qp_state;
727 	enum ib_qp_state	cur_qp_state;
728 	enum ib_mtu		path_mtu;
729 	enum ib_mig_state	path_mig_state;
730 	u32			qkey;
731 	u32			rq_psn;
732 	u32			sq_psn;
733 	u32			dest_qp_num;
734 	int			qp_access_flags;
735 	struct ib_qp_cap	cap;
736 	struct ib_ah_attr	ah_attr;
737 	struct ib_ah_attr	alt_ah_attr;
738 	u16			pkey_index;
739 	u16			alt_pkey_index;
740 	u8			en_sqd_async_notify;
741 	u8			sq_draining;
742 	u8			max_rd_atomic;
743 	u8			max_dest_rd_atomic;
744 	u8			min_rnr_timer;
745 	u8			port_num;
746 	u8			timeout;
747 	u8			retry_cnt;
748 	u8			rnr_retry;
749 	u8			alt_port_num;
750 	u8			alt_timeout;
751 };
752 
753 enum ib_wr_opcode {
754 	IB_WR_RDMA_WRITE,
755 	IB_WR_RDMA_WRITE_WITH_IMM,
756 	IB_WR_SEND,
757 	IB_WR_SEND_WITH_IMM,
758 	IB_WR_RDMA_READ,
759 	IB_WR_ATOMIC_CMP_AND_SWP,
760 	IB_WR_ATOMIC_FETCH_AND_ADD,
761 	IB_WR_LSO,
762 	IB_WR_SEND_WITH_INV,
763 	IB_WR_RDMA_READ_WITH_INV,
764 	IB_WR_LOCAL_INV,
765 	IB_WR_FAST_REG_MR,
766 	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
767 	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
768 	IB_WR_BIND_MW,
769 };
770 
771 enum ib_send_flags {
772 	IB_SEND_FENCE		= 1,
773 	IB_SEND_SIGNALED	= (1<<1),
774 	IB_SEND_SOLICITED	= (1<<2),
775 	IB_SEND_INLINE		= (1<<3),
776 	IB_SEND_IP_CSUM		= (1<<4)
777 };
778 
779 struct ib_sge {
780 	u64	addr;
781 	u32	length;
782 	u32	lkey;
783 };
784 
785 struct ib_fast_reg_page_list {
786 	struct ib_device       *device;
787 	u64		       *page_list;
788 	unsigned int		max_page_list_len;
789 };
790 
791 /**
792  * struct ib_mw_bind_info - Parameters for a memory window bind operation.
793  * @mr: A memory region to bind the memory window to.
794  * @addr: The address where the memory window should begin.
795  * @length: The length of the memory window, in bytes.
796  * @mw_access_flags: Access flags from enum ib_access_flags for the window.
797  *
798  * This struct contains the shared parameters for type 1 and type 2
799  * memory window bind operations.
800  */
801 struct ib_mw_bind_info {
802 	struct ib_mr   *mr;
803 	u64		addr;
804 	u64		length;
805 	int		mw_access_flags;
806 };
807 
808 struct ib_send_wr {
809 	struct ib_send_wr      *next;
810 	u64			wr_id;
811 	struct ib_sge	       *sg_list;
812 	int			num_sge;
813 	enum ib_wr_opcode	opcode;
814 	int			send_flags;
815 	union {
816 		__be32		imm_data;
817 		u32		invalidate_rkey;
818 	} ex;
819 	union {
820 		struct {
821 			u64	remote_addr;
822 			u32	rkey;
823 		} rdma;
824 		struct {
825 			u64	remote_addr;
826 			u64	compare_add;
827 			u64	swap;
828 			u64	compare_add_mask;
829 			u64	swap_mask;
830 			u32	rkey;
831 		} atomic;
832 		struct {
833 			struct ib_ah *ah;
834 			void   *header;
835 			int     hlen;
836 			int     mss;
837 			u32	remote_qpn;
838 			u32	remote_qkey;
839 			u16	pkey_index; /* valid for GSI only */
840 			u8	port_num;   /* valid for DR SMPs on switch only */
841 		} ud;
842 		struct {
843 			u64				iova_start;
844 			struct ib_fast_reg_page_list   *page_list;
845 			unsigned int			page_shift;
846 			unsigned int			page_list_len;
847 			u32				length;
848 			int				access_flags;
849 			u32				rkey;
850 		} fast_reg;
851 		struct {
852 			struct ib_mw            *mw;
853 			/* The new rkey for the memory window. */
854 			u32                      rkey;
855 			struct ib_mw_bind_info   bind_info;
856 		} bind_mw;
857 	} wr;
858 	u32			xrc_remote_srq_num;	/* XRC TGT QPs only */
859 };
860 
861 struct ib_recv_wr {
862 	struct ib_recv_wr      *next;
863 	u64			wr_id;
864 	struct ib_sge	       *sg_list;
865 	int			num_sge;
866 };
867 
868 enum ib_access_flags {
869 	IB_ACCESS_LOCAL_WRITE	= 1,
870 	IB_ACCESS_REMOTE_WRITE	= (1<<1),
871 	IB_ACCESS_REMOTE_READ	= (1<<2),
872 	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
873 	IB_ACCESS_MW_BIND	= (1<<4),
874 	IB_ZERO_BASED		= (1<<5)
875 };
876 
877 struct ib_phys_buf {
878 	u64      addr;
879 	u64      size;
880 };
881 
882 struct ib_mr_attr {
883 	struct ib_pd	*pd;
884 	u64		device_virt_addr;
885 	u64		size;
886 	int		mr_access_flags;
887 	u32		lkey;
888 	u32		rkey;
889 };
890 
891 enum ib_mr_rereg_flags {
892 	IB_MR_REREG_TRANS	= 1,
893 	IB_MR_REREG_PD		= (1<<1),
894 	IB_MR_REREG_ACCESS	= (1<<2)
895 };
896 
897 /**
898  * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
899  * @wr_id:      Work request id.
900  * @send_flags: Flags from ib_send_flags enum.
901  * @bind_info:  More parameters of the bind operation.
902  */
903 struct ib_mw_bind {
904 	u64                    wr_id;
905 	int                    send_flags;
906 	struct ib_mw_bind_info bind_info;
907 };
908 
909 struct ib_fmr_attr {
910 	int	max_pages;
911 	int	max_maps;
912 	u8	page_shift;
913 };
914 
915 struct ib_ucontext {
916 	struct ib_device       *device;
917 	struct list_head	pd_list;
918 	struct list_head	mr_list;
919 	struct list_head	mw_list;
920 	struct list_head	cq_list;
921 	struct list_head	qp_list;
922 	struct list_head	srq_list;
923 	struct list_head	ah_list;
924 	struct list_head	xrcd_list;
925 	int			closing;
926 };
927 
928 struct ib_uobject {
929 	u64			user_handle;	/* handle given to us by userspace */
930 	struct ib_ucontext     *context;	/* associated user context */
931 	void		       *object;		/* containing object */
932 	struct list_head	list;		/* link to context's list */
933 	int			id;		/* index into kernel idr */
934 	struct kref		ref;
935 	struct rw_semaphore	mutex;		/* protects .live */
936 	int			live;
937 };
938 
939 struct ib_udata {
940 	void __user *inbuf;
941 	void __user *outbuf;
942 	size_t       inlen;
943 	size_t       outlen;
944 };
945 
946 struct ib_pd {
947 	struct ib_device       *device;
948 	struct ib_uobject      *uobject;
949 	atomic_t          	usecnt; /* count all resources */
950 };
951 
952 struct ib_xrcd {
953 	struct ib_device       *device;
954 	atomic_t		usecnt; /* count all exposed resources */
955 	struct inode	       *inode;
956 
957 	struct mutex		tgt_qp_mutex;
958 	struct list_head	tgt_qp_list;
959 };
960 
961 struct ib_ah {
962 	struct ib_device	*device;
963 	struct ib_pd		*pd;
964 	struct ib_uobject	*uobject;
965 };
966 
967 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
968 
969 struct ib_cq {
970 	struct ib_device       *device;
971 	struct ib_uobject      *uobject;
972 	ib_comp_handler   	comp_handler;
973 	void                  (*event_handler)(struct ib_event *, void *);
974 	void                   *cq_context;
975 	int               	cqe;
976 	atomic_t          	usecnt; /* count number of work queues */
977 };
978 
979 struct ib_srq {
980 	struct ib_device       *device;
981 	struct ib_pd	       *pd;
982 	struct ib_uobject      *uobject;
983 	void		      (*event_handler)(struct ib_event *, void *);
984 	void		       *srq_context;
985 	enum ib_srq_type	srq_type;
986 	atomic_t		usecnt;
987 
988 	union {
989 		struct {
990 			struct ib_xrcd *xrcd;
991 			struct ib_cq   *cq;
992 			u32		srq_num;
993 		} xrc;
994 	} ext;
995 };
996 
997 struct ib_qp {
998 	struct ib_device       *device;
999 	struct ib_pd	       *pd;
1000 	struct ib_cq	       *send_cq;
1001 	struct ib_cq	       *recv_cq;
1002 	struct ib_srq	       *srq;
1003 	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1004 	struct list_head	xrcd_list;
1005 	atomic_t		usecnt; /* count times opened, mcast attaches */
1006 	struct list_head	open_list;
1007 	struct ib_qp           *real_qp;
1008 	struct ib_uobject      *uobject;
1009 	void                  (*event_handler)(struct ib_event *, void *);
1010 	void		       *qp_context;
1011 	u32			qp_num;
1012 	enum ib_qp_type		qp_type;
1013 };
1014 
1015 struct ib_mr {
1016 	struct ib_device  *device;
1017 	struct ib_pd	  *pd;
1018 	struct ib_uobject *uobject;
1019 	u32		   lkey;
1020 	u32		   rkey;
1021 	atomic_t	   usecnt; /* count number of MWs */
1022 };
1023 
1024 struct ib_mw {
1025 	struct ib_device	*device;
1026 	struct ib_pd		*pd;
1027 	struct ib_uobject	*uobject;
1028 	u32			rkey;
1029 	enum ib_mw_type         type;
1030 };
1031 
1032 struct ib_fmr {
1033 	struct ib_device	*device;
1034 	struct ib_pd		*pd;
1035 	struct list_head	list;
1036 	u32			lkey;
1037 	u32			rkey;
1038 };
1039 
1040 struct ib_mad;
1041 struct ib_grh;
1042 
1043 enum ib_process_mad_flags {
1044 	IB_MAD_IGNORE_MKEY	= 1,
1045 	IB_MAD_IGNORE_BKEY	= 2,
1046 	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1047 };
1048 
1049 enum ib_mad_result {
1050 	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1051 	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1052 	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
1053 	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
1054 };
1055 
1056 #define IB_DEVICE_NAME_MAX 64
1057 
1058 struct ib_cache {
1059 	rwlock_t                lock;
1060 	struct ib_event_handler event_handler;
1061 	struct ib_pkey_cache  **pkey_cache;
1062 	struct ib_gid_cache   **gid_cache;
1063 	u8                     *lmc_cache;
1064 };
1065 
1066 struct ib_dma_mapping_ops {
1067 	int		(*mapping_error)(struct ib_device *dev,
1068 					 u64 dma_addr);
1069 	u64		(*map_single)(struct ib_device *dev,
1070 				      void *ptr, size_t size,
1071 				      enum dma_data_direction direction);
1072 	void		(*unmap_single)(struct ib_device *dev,
1073 					u64 addr, size_t size,
1074 					enum dma_data_direction direction);
1075 	u64		(*map_page)(struct ib_device *dev,
1076 				    struct page *page, unsigned long offset,
1077 				    size_t size,
1078 				    enum dma_data_direction direction);
1079 	void		(*unmap_page)(struct ib_device *dev,
1080 				      u64 addr, size_t size,
1081 				      enum dma_data_direction direction);
1082 	int		(*map_sg)(struct ib_device *dev,
1083 				  struct scatterlist *sg, int nents,
1084 				  enum dma_data_direction direction);
1085 	void		(*unmap_sg)(struct ib_device *dev,
1086 				    struct scatterlist *sg, int nents,
1087 				    enum dma_data_direction direction);
1088 	u64		(*dma_address)(struct ib_device *dev,
1089 				       struct scatterlist *sg);
1090 	unsigned int	(*dma_len)(struct ib_device *dev,
1091 				   struct scatterlist *sg);
1092 	void		(*sync_single_for_cpu)(struct ib_device *dev,
1093 					       u64 dma_handle,
1094 					       size_t size,
1095 					       enum dma_data_direction dir);
1096 	void		(*sync_single_for_device)(struct ib_device *dev,
1097 						  u64 dma_handle,
1098 						  size_t size,
1099 						  enum dma_data_direction dir);
1100 	void		*(*alloc_coherent)(struct ib_device *dev,
1101 					   size_t size,
1102 					   u64 *dma_handle,
1103 					   gfp_t flag);
1104 	void		(*free_coherent)(struct ib_device *dev,
1105 					 size_t size, void *cpu_addr,
1106 					 u64 dma_handle);
1107 };
1108 
1109 struct iw_cm_verbs;
1110 
1111 struct ib_device {
1112 	struct device                *dma_device;
1113 
1114 	char                          name[IB_DEVICE_NAME_MAX];
1115 
1116 	struct list_head              event_handler_list;
1117 	spinlock_t                    event_handler_lock;
1118 
1119 	spinlock_t                    client_data_lock;
1120 	struct list_head              core_list;
1121 	struct list_head              client_data_list;
1122 
1123 	struct ib_cache               cache;
1124 	int                          *pkey_tbl_len;
1125 	int                          *gid_tbl_len;
1126 
1127 	int			      num_comp_vectors;
1128 
1129 	struct iw_cm_verbs	     *iwcm;
1130 
1131 	int		           (*get_protocol_stats)(struct ib_device *device,
1132 							 union rdma_protocol_stats *stats);
1133 	int		           (*query_device)(struct ib_device *device,
1134 						   struct ib_device_attr *device_attr);
1135 	int		           (*query_port)(struct ib_device *device,
1136 						 u8 port_num,
1137 						 struct ib_port_attr *port_attr);
1138 	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
1139 						     u8 port_num);
1140 	int		           (*query_gid)(struct ib_device *device,
1141 						u8 port_num, int index,
1142 						union ib_gid *gid);
1143 	int		           (*query_pkey)(struct ib_device *device,
1144 						 u8 port_num, u16 index, u16 *pkey);
1145 	int		           (*modify_device)(struct ib_device *device,
1146 						    int device_modify_mask,
1147 						    struct ib_device_modify *device_modify);
1148 	int		           (*modify_port)(struct ib_device *device,
1149 						  u8 port_num, int port_modify_mask,
1150 						  struct ib_port_modify *port_modify);
1151 	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1152 						     struct ib_udata *udata);
1153 	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1154 	int                        (*mmap)(struct ib_ucontext *context,
1155 					   struct vm_area_struct *vma);
1156 	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1157 					       struct ib_ucontext *context,
1158 					       struct ib_udata *udata);
1159 	int                        (*dealloc_pd)(struct ib_pd *pd);
1160 	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1161 						struct ib_ah_attr *ah_attr);
1162 	int                        (*modify_ah)(struct ib_ah *ah,
1163 						struct ib_ah_attr *ah_attr);
1164 	int                        (*query_ah)(struct ib_ah *ah,
1165 					       struct ib_ah_attr *ah_attr);
1166 	int                        (*destroy_ah)(struct ib_ah *ah);
1167 	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1168 						 struct ib_srq_init_attr *srq_init_attr,
1169 						 struct ib_udata *udata);
1170 	int                        (*modify_srq)(struct ib_srq *srq,
1171 						 struct ib_srq_attr *srq_attr,
1172 						 enum ib_srq_attr_mask srq_attr_mask,
1173 						 struct ib_udata *udata);
1174 	int                        (*query_srq)(struct ib_srq *srq,
1175 						struct ib_srq_attr *srq_attr);
1176 	int                        (*destroy_srq)(struct ib_srq *srq);
1177 	int                        (*post_srq_recv)(struct ib_srq *srq,
1178 						    struct ib_recv_wr *recv_wr,
1179 						    struct ib_recv_wr **bad_recv_wr);
1180 	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1181 						struct ib_qp_init_attr *qp_init_attr,
1182 						struct ib_udata *udata);
1183 	int                        (*modify_qp)(struct ib_qp *qp,
1184 						struct ib_qp_attr *qp_attr,
1185 						int qp_attr_mask,
1186 						struct ib_udata *udata);
1187 	int                        (*query_qp)(struct ib_qp *qp,
1188 					       struct ib_qp_attr *qp_attr,
1189 					       int qp_attr_mask,
1190 					       struct ib_qp_init_attr *qp_init_attr);
1191 	int                        (*destroy_qp)(struct ib_qp *qp);
1192 	int                        (*post_send)(struct ib_qp *qp,
1193 						struct ib_send_wr *send_wr,
1194 						struct ib_send_wr **bad_send_wr);
1195 	int                        (*post_recv)(struct ib_qp *qp,
1196 						struct ib_recv_wr *recv_wr,
1197 						struct ib_recv_wr **bad_recv_wr);
1198 	struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
1199 						int comp_vector,
1200 						struct ib_ucontext *context,
1201 						struct ib_udata *udata);
1202 	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1203 						u16 cq_period);
1204 	int                        (*destroy_cq)(struct ib_cq *cq);
1205 	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1206 						struct ib_udata *udata);
1207 	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1208 					      struct ib_wc *wc);
1209 	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1210 	int                        (*req_notify_cq)(struct ib_cq *cq,
1211 						    enum ib_cq_notify_flags flags);
1212 	int                        (*req_ncomp_notif)(struct ib_cq *cq,
1213 						      int wc_cnt);
1214 	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
1215 						 int mr_access_flags);
1216 	struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
1217 						  struct ib_phys_buf *phys_buf_array,
1218 						  int num_phys_buf,
1219 						  int mr_access_flags,
1220 						  u64 *iova_start);
1221 	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1222 						  u64 start, u64 length,
1223 						  u64 virt_addr,
1224 						  int mr_access_flags,
1225 						  struct ib_udata *udata);
1226 	int                        (*query_mr)(struct ib_mr *mr,
1227 					       struct ib_mr_attr *mr_attr);
1228 	int                        (*dereg_mr)(struct ib_mr *mr);
1229 	struct ib_mr *		   (*alloc_fast_reg_mr)(struct ib_pd *pd,
1230 					       int max_page_list_len);
1231 	struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1232 								   int page_list_len);
1233 	void			   (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1234 	int                        (*rereg_phys_mr)(struct ib_mr *mr,
1235 						    int mr_rereg_mask,
1236 						    struct ib_pd *pd,
1237 						    struct ib_phys_buf *phys_buf_array,
1238 						    int num_phys_buf,
1239 						    int mr_access_flags,
1240 						    u64 *iova_start);
1241 	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
1242 					       enum ib_mw_type type);
1243 	int                        (*bind_mw)(struct ib_qp *qp,
1244 					      struct ib_mw *mw,
1245 					      struct ib_mw_bind *mw_bind);
1246 	int                        (*dealloc_mw)(struct ib_mw *mw);
1247 	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
1248 						int mr_access_flags,
1249 						struct ib_fmr_attr *fmr_attr);
1250 	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
1251 						   u64 *page_list, int list_len,
1252 						   u64 iova);
1253 	int		           (*unmap_fmr)(struct list_head *fmr_list);
1254 	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
1255 	int                        (*attach_mcast)(struct ib_qp *qp,
1256 						   union ib_gid *gid,
1257 						   u16 lid);
1258 	int                        (*detach_mcast)(struct ib_qp *qp,
1259 						   union ib_gid *gid,
1260 						   u16 lid);
1261 	int                        (*process_mad)(struct ib_device *device,
1262 						  int process_mad_flags,
1263 						  u8 port_num,
1264 						  struct ib_wc *in_wc,
1265 						  struct ib_grh *in_grh,
1266 						  struct ib_mad *in_mad,
1267 						  struct ib_mad *out_mad);
1268 	struct ib_xrcd *	   (*alloc_xrcd)(struct ib_device *device,
1269 						 struct ib_ucontext *ucontext,
1270 						 struct ib_udata *udata);
1271 	int			   (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1272 
1273 	struct ib_dma_mapping_ops   *dma_ops;
1274 
1275 	struct module               *owner;
1276 	struct device                dev;
1277 	struct kobject               *ports_parent;
1278 	struct list_head             port_list;
1279 
1280 	enum {
1281 		IB_DEV_UNINITIALIZED,
1282 		IB_DEV_REGISTERED,
1283 		IB_DEV_UNREGISTERED
1284 	}                            reg_state;
1285 
1286 	int			     uverbs_abi_ver;
1287 	u64			     uverbs_cmd_mask;
1288 
1289 	char			     node_desc[64];
1290 	__be64			     node_guid;
1291 	u32			     local_dma_lkey;
1292 	u8                           node_type;
1293 	u8                           phys_port_cnt;
1294 };
1295 
1296 struct ib_client {
1297 	char  *name;
1298 	void (*add)   (struct ib_device *);
1299 	void (*remove)(struct ib_device *);
1300 
1301 	struct list_head list;
1302 };
1303 
1304 struct ib_device *ib_alloc_device(size_t size);
1305 void ib_dealloc_device(struct ib_device *device);
1306 
1307 int ib_register_device(struct ib_device *device,
1308 		       int (*port_callback)(struct ib_device *,
1309 					    u8, struct kobject *));
1310 void ib_unregister_device(struct ib_device *device);
1311 
1312 int ib_register_client   (struct ib_client *client);
1313 void ib_unregister_client(struct ib_client *client);
1314 
1315 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1316 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1317 			 void *data);
1318 
ib_copy_from_udata(void * dest,struct ib_udata * udata,size_t len)1319 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1320 {
1321 	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1322 }
1323 
ib_copy_to_udata(struct ib_udata * udata,void * src,size_t len)1324 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1325 {
1326 	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1327 }
1328 
1329 /**
1330  * ib_modify_qp_is_ok - Check that the supplied attribute mask
1331  * contains all required attributes and no attributes not allowed for
1332  * the given QP state transition.
1333  * @cur_state: Current QP state
1334  * @next_state: Next QP state
1335  * @type: QP type
1336  * @mask: Mask of supplied QP attributes
1337  *
1338  * This function is a helper function that a low-level driver's
1339  * modify_qp method can use to validate the consumer's input.  It
1340  * checks that cur_state and next_state are valid QP states, that a
1341  * transition from cur_state to next_state is allowed by the IB spec,
1342  * and that the attribute mask supplied is allowed for the transition.
1343  */
1344 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1345 		       enum ib_qp_type type, enum ib_qp_attr_mask mask);
1346 
1347 int ib_register_event_handler  (struct ib_event_handler *event_handler);
1348 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1349 void ib_dispatch_event(struct ib_event *event);
1350 
1351 int ib_query_device(struct ib_device *device,
1352 		    struct ib_device_attr *device_attr);
1353 
1354 int ib_query_port(struct ib_device *device,
1355 		  u8 port_num, struct ib_port_attr *port_attr);
1356 
1357 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1358 					       u8 port_num);
1359 
1360 int ib_query_gid(struct ib_device *device,
1361 		 u8 port_num, int index, union ib_gid *gid);
1362 
1363 int ib_query_pkey(struct ib_device *device,
1364 		  u8 port_num, u16 index, u16 *pkey);
1365 
1366 int ib_modify_device(struct ib_device *device,
1367 		     int device_modify_mask,
1368 		     struct ib_device_modify *device_modify);
1369 
1370 int ib_modify_port(struct ib_device *device,
1371 		   u8 port_num, int port_modify_mask,
1372 		   struct ib_port_modify *port_modify);
1373 
1374 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1375 		u8 *port_num, u16 *index);
1376 
1377 int ib_find_pkey(struct ib_device *device,
1378 		 u8 port_num, u16 pkey, u16 *index);
1379 
1380 /**
1381  * ib_alloc_pd - Allocates an unused protection domain.
1382  * @device: The device on which to allocate the protection domain.
1383  *
1384  * A protection domain object provides an association between QPs, shared
1385  * receive queues, address handles, memory regions, and memory windows.
1386  */
1387 struct ib_pd *ib_alloc_pd(struct ib_device *device);
1388 
1389 /**
1390  * ib_dealloc_pd - Deallocates a protection domain.
1391  * @pd: The protection domain to deallocate.
1392  */
1393 int ib_dealloc_pd(struct ib_pd *pd);
1394 
1395 /**
1396  * ib_create_ah - Creates an address handle for the given address vector.
1397  * @pd: The protection domain associated with the address handle.
1398  * @ah_attr: The attributes of the address vector.
1399  *
1400  * The address handle is used to reference a local or global destination
1401  * in all UD QP post sends.
1402  */
1403 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1404 
1405 /**
1406  * ib_init_ah_from_wc - Initializes address handle attributes from a
1407  *   work completion.
1408  * @device: Device on which the received message arrived.
1409  * @port_num: Port on which the received message arrived.
1410  * @wc: Work completion associated with the received message.
1411  * @grh: References the received global route header.  This parameter is
1412  *   ignored unless the work completion indicates that the GRH is valid.
1413  * @ah_attr: Returned attributes that can be used when creating an address
1414  *   handle for replying to the message.
1415  */
1416 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1417 		       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1418 
1419 /**
1420  * ib_create_ah_from_wc - Creates an address handle associated with the
1421  *   sender of the specified work completion.
1422  * @pd: The protection domain associated with the address handle.
1423  * @wc: Work completion information associated with a received message.
1424  * @grh: References the received global route header.  This parameter is
1425  *   ignored unless the work completion indicates that the GRH is valid.
1426  * @port_num: The outbound port number to associate with the address.
1427  *
1428  * The address handle is used to reference a local or global destination
1429  * in all UD QP post sends.
1430  */
1431 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1432 				   struct ib_grh *grh, u8 port_num);
1433 
1434 /**
1435  * ib_modify_ah - Modifies the address vector associated with an address
1436  *   handle.
1437  * @ah: The address handle to modify.
1438  * @ah_attr: The new address vector attributes to associate with the
1439  *   address handle.
1440  */
1441 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1442 
1443 /**
1444  * ib_query_ah - Queries the address vector associated with an address
1445  *   handle.
1446  * @ah: The address handle to query.
1447  * @ah_attr: The address vector attributes associated with the address
1448  *   handle.
1449  */
1450 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1451 
1452 /**
1453  * ib_destroy_ah - Destroys an address handle.
1454  * @ah: The address handle to destroy.
1455  */
1456 int ib_destroy_ah(struct ib_ah *ah);
1457 
1458 /**
1459  * ib_create_srq - Creates a SRQ associated with the specified protection
1460  *   domain.
1461  * @pd: The protection domain associated with the SRQ.
1462  * @srq_init_attr: A list of initial attributes required to create the
1463  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1464  *   the actual capabilities of the created SRQ.
1465  *
1466  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1467  * requested size of the SRQ, and set to the actual values allocated
1468  * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1469  * will always be at least as large as the requested values.
1470  */
1471 struct ib_srq *ib_create_srq(struct ib_pd *pd,
1472 			     struct ib_srq_init_attr *srq_init_attr);
1473 
1474 /**
1475  * ib_modify_srq - Modifies the attributes for the specified SRQ.
1476  * @srq: The SRQ to modify.
1477  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1478  *   the current values of selected SRQ attributes are returned.
1479  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1480  *   are being modified.
1481  *
1482  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1483  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1484  * the number of receives queued drops below the limit.
1485  */
1486 int ib_modify_srq(struct ib_srq *srq,
1487 		  struct ib_srq_attr *srq_attr,
1488 		  enum ib_srq_attr_mask srq_attr_mask);
1489 
1490 /**
1491  * ib_query_srq - Returns the attribute list and current values for the
1492  *   specified SRQ.
1493  * @srq: The SRQ to query.
1494  * @srq_attr: The attributes of the specified SRQ.
1495  */
1496 int ib_query_srq(struct ib_srq *srq,
1497 		 struct ib_srq_attr *srq_attr);
1498 
1499 /**
1500  * ib_destroy_srq - Destroys the specified SRQ.
1501  * @srq: The SRQ to destroy.
1502  */
1503 int ib_destroy_srq(struct ib_srq *srq);
1504 
1505 /**
1506  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1507  * @srq: The SRQ to post the work request on.
1508  * @recv_wr: A list of work requests to post on the receive queue.
1509  * @bad_recv_wr: On an immediate failure, this parameter will reference
1510  *   the work request that failed to be posted on the QP.
1511  */
ib_post_srq_recv(struct ib_srq * srq,struct ib_recv_wr * recv_wr,struct ib_recv_wr ** bad_recv_wr)1512 static inline int ib_post_srq_recv(struct ib_srq *srq,
1513 				   struct ib_recv_wr *recv_wr,
1514 				   struct ib_recv_wr **bad_recv_wr)
1515 {
1516 	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1517 }
1518 
1519 /**
1520  * ib_create_qp - Creates a QP associated with the specified protection
1521  *   domain.
1522  * @pd: The protection domain associated with the QP.
1523  * @qp_init_attr: A list of initial attributes required to create the
1524  *   QP.  If QP creation succeeds, then the attributes are updated to
1525  *   the actual capabilities of the created QP.
1526  */
1527 struct ib_qp *ib_create_qp(struct ib_pd *pd,
1528 			   struct ib_qp_init_attr *qp_init_attr);
1529 
1530 /**
1531  * ib_modify_qp - Modifies the attributes for the specified QP and then
1532  *   transitions the QP to the given state.
1533  * @qp: The QP to modify.
1534  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1535  *   the current values of selected QP attributes are returned.
1536  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1537  *   are being modified.
1538  */
1539 int ib_modify_qp(struct ib_qp *qp,
1540 		 struct ib_qp_attr *qp_attr,
1541 		 int qp_attr_mask);
1542 
1543 /**
1544  * ib_query_qp - Returns the attribute list and current values for the
1545  *   specified QP.
1546  * @qp: The QP to query.
1547  * @qp_attr: The attributes of the specified QP.
1548  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1549  * @qp_init_attr: Additional attributes of the selected QP.
1550  *
1551  * The qp_attr_mask may be used to limit the query to gathering only the
1552  * selected attributes.
1553  */
1554 int ib_query_qp(struct ib_qp *qp,
1555 		struct ib_qp_attr *qp_attr,
1556 		int qp_attr_mask,
1557 		struct ib_qp_init_attr *qp_init_attr);
1558 
1559 /**
1560  * ib_destroy_qp - Destroys the specified QP.
1561  * @qp: The QP to destroy.
1562  */
1563 int ib_destroy_qp(struct ib_qp *qp);
1564 
1565 /**
1566  * ib_open_qp - Obtain a reference to an existing sharable QP.
1567  * @xrcd - XRC domain
1568  * @qp_open_attr: Attributes identifying the QP to open.
1569  *
1570  * Returns a reference to a sharable QP.
1571  */
1572 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1573 			 struct ib_qp_open_attr *qp_open_attr);
1574 
1575 /**
1576  * ib_close_qp - Release an external reference to a QP.
1577  * @qp: The QP handle to release
1578  *
1579  * The opened QP handle is released by the caller.  The underlying
1580  * shared QP is not destroyed until all internal references are released.
1581  */
1582 int ib_close_qp(struct ib_qp *qp);
1583 
1584 /**
1585  * ib_post_send - Posts a list of work requests to the send queue of
1586  *   the specified QP.
1587  * @qp: The QP to post the work request on.
1588  * @send_wr: A list of work requests to post on the send queue.
1589  * @bad_send_wr: On an immediate failure, this parameter will reference
1590  *   the work request that failed to be posted on the QP.
1591  *
1592  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1593  * error is returned, the QP state shall not be affected,
1594  * ib_post_send() will return an immediate error after queueing any
1595  * earlier work requests in the list.
1596  */
ib_post_send(struct ib_qp * qp,struct ib_send_wr * send_wr,struct ib_send_wr ** bad_send_wr)1597 static inline int ib_post_send(struct ib_qp *qp,
1598 			       struct ib_send_wr *send_wr,
1599 			       struct ib_send_wr **bad_send_wr)
1600 {
1601 	return qp->device->post_send(qp, send_wr, bad_send_wr);
1602 }
1603 
1604 /**
1605  * ib_post_recv - Posts a list of work requests to the receive queue of
1606  *   the specified QP.
1607  * @qp: The QP to post the work request on.
1608  * @recv_wr: A list of work requests to post on the receive queue.
1609  * @bad_recv_wr: On an immediate failure, this parameter will reference
1610  *   the work request that failed to be posted on the QP.
1611  */
ib_post_recv(struct ib_qp * qp,struct ib_recv_wr * recv_wr,struct ib_recv_wr ** bad_recv_wr)1612 static inline int ib_post_recv(struct ib_qp *qp,
1613 			       struct ib_recv_wr *recv_wr,
1614 			       struct ib_recv_wr **bad_recv_wr)
1615 {
1616 	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1617 }
1618 
1619 /**
1620  * ib_create_cq - Creates a CQ on the specified device.
1621  * @device: The device on which to create the CQ.
1622  * @comp_handler: A user-specified callback that is invoked when a
1623  *   completion event occurs on the CQ.
1624  * @event_handler: A user-specified callback that is invoked when an
1625  *   asynchronous event not associated with a completion occurs on the CQ.
1626  * @cq_context: Context associated with the CQ returned to the user via
1627  *   the associated completion and event handlers.
1628  * @cqe: The minimum size of the CQ.
1629  * @comp_vector - Completion vector used to signal completion events.
1630  *     Must be >= 0 and < context->num_comp_vectors.
1631  *
1632  * Users can examine the cq structure to determine the actual CQ size.
1633  */
1634 struct ib_cq *ib_create_cq(struct ib_device *device,
1635 			   ib_comp_handler comp_handler,
1636 			   void (*event_handler)(struct ib_event *, void *),
1637 			   void *cq_context, int cqe, int comp_vector);
1638 
1639 /**
1640  * ib_resize_cq - Modifies the capacity of the CQ.
1641  * @cq: The CQ to resize.
1642  * @cqe: The minimum size of the CQ.
1643  *
1644  * Users can examine the cq structure to determine the actual CQ size.
1645  */
1646 int ib_resize_cq(struct ib_cq *cq, int cqe);
1647 
1648 /**
1649  * ib_modify_cq - Modifies moderation params of the CQ
1650  * @cq: The CQ to modify.
1651  * @cq_count: number of CQEs that will trigger an event
1652  * @cq_period: max period of time in usec before triggering an event
1653  *
1654  */
1655 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1656 
1657 /**
1658  * ib_destroy_cq - Destroys the specified CQ.
1659  * @cq: The CQ to destroy.
1660  */
1661 int ib_destroy_cq(struct ib_cq *cq);
1662 
1663 /**
1664  * ib_poll_cq - poll a CQ for completion(s)
1665  * @cq:the CQ being polled
1666  * @num_entries:maximum number of completions to return
1667  * @wc:array of at least @num_entries &struct ib_wc where completions
1668  *   will be returned
1669  *
1670  * Poll a CQ for (possibly multiple) completions.  If the return value
1671  * is < 0, an error occurred.  If the return value is >= 0, it is the
1672  * number of completions returned.  If the return value is
1673  * non-negative and < num_entries, then the CQ was emptied.
1674  */
ib_poll_cq(struct ib_cq * cq,int num_entries,struct ib_wc * wc)1675 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1676 			     struct ib_wc *wc)
1677 {
1678 	return cq->device->poll_cq(cq, num_entries, wc);
1679 }
1680 
1681 /**
1682  * ib_peek_cq - Returns the number of unreaped completions currently
1683  *   on the specified CQ.
1684  * @cq: The CQ to peek.
1685  * @wc_cnt: A minimum number of unreaped completions to check for.
1686  *
1687  * If the number of unreaped completions is greater than or equal to wc_cnt,
1688  * this function returns wc_cnt, otherwise, it returns the actual number of
1689  * unreaped completions.
1690  */
1691 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1692 
1693 /**
1694  * ib_req_notify_cq - Request completion notification on a CQ.
1695  * @cq: The CQ to generate an event for.
1696  * @flags:
1697  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1698  *   to request an event on the next solicited event or next work
1699  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1700  *   may also be |ed in to request a hint about missed events, as
1701  *   described below.
1702  *
1703  * Return Value:
1704  *    < 0 means an error occurred while requesting notification
1705  *   == 0 means notification was requested successfully, and if
1706  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1707  *        were missed and it is safe to wait for another event.  In
1708  *        this case is it guaranteed that any work completions added
1709  *        to the CQ since the last CQ poll will trigger a completion
1710  *        notification event.
1711  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1712  *        in.  It means that the consumer must poll the CQ again to
1713  *        make sure it is empty to avoid missing an event because of a
1714  *        race between requesting notification and an entry being
1715  *        added to the CQ.  This return value means it is possible
1716  *        (but not guaranteed) that a work completion has been added
1717  *        to the CQ since the last poll without triggering a
1718  *        completion notification event.
1719  */
ib_req_notify_cq(struct ib_cq * cq,enum ib_cq_notify_flags flags)1720 static inline int ib_req_notify_cq(struct ib_cq *cq,
1721 				   enum ib_cq_notify_flags flags)
1722 {
1723 	return cq->device->req_notify_cq(cq, flags);
1724 }
1725 
1726 /**
1727  * ib_req_ncomp_notif - Request completion notification when there are
1728  *   at least the specified number of unreaped completions on the CQ.
1729  * @cq: The CQ to generate an event for.
1730  * @wc_cnt: The number of unreaped completions that should be on the
1731  *   CQ before an event is generated.
1732  */
ib_req_ncomp_notif(struct ib_cq * cq,int wc_cnt)1733 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1734 {
1735 	return cq->device->req_ncomp_notif ?
1736 		cq->device->req_ncomp_notif(cq, wc_cnt) :
1737 		-ENOSYS;
1738 }
1739 
1740 /**
1741  * ib_get_dma_mr - Returns a memory region for system memory that is
1742  *   usable for DMA.
1743  * @pd: The protection domain associated with the memory region.
1744  * @mr_access_flags: Specifies the memory access rights.
1745  *
1746  * Note that the ib_dma_*() functions defined below must be used
1747  * to create/destroy addresses used with the Lkey or Rkey returned
1748  * by ib_get_dma_mr().
1749  */
1750 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1751 
1752 /**
1753  * ib_dma_mapping_error - check a DMA addr for error
1754  * @dev: The device for which the dma_addr was created
1755  * @dma_addr: The DMA address to check
1756  */
ib_dma_mapping_error(struct ib_device * dev,u64 dma_addr)1757 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1758 {
1759 	if (dev->dma_ops)
1760 		return dev->dma_ops->mapping_error(dev, dma_addr);
1761 	return dma_mapping_error(dev->dma_device, dma_addr);
1762 }
1763 
1764 /**
1765  * ib_dma_map_single - Map a kernel virtual address to DMA address
1766  * @dev: The device for which the dma_addr is to be created
1767  * @cpu_addr: The kernel virtual address
1768  * @size: The size of the region in bytes
1769  * @direction: The direction of the DMA
1770  */
ib_dma_map_single(struct ib_device * dev,void * cpu_addr,size_t size,enum dma_data_direction direction)1771 static inline u64 ib_dma_map_single(struct ib_device *dev,
1772 				    void *cpu_addr, size_t size,
1773 				    enum dma_data_direction direction)
1774 {
1775 	if (dev->dma_ops)
1776 		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1777 	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1778 }
1779 
1780 /**
1781  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1782  * @dev: The device for which the DMA address was created
1783  * @addr: The DMA address
1784  * @size: The size of the region in bytes
1785  * @direction: The direction of the DMA
1786  */
ib_dma_unmap_single(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction)1787 static inline void ib_dma_unmap_single(struct ib_device *dev,
1788 				       u64 addr, size_t size,
1789 				       enum dma_data_direction direction)
1790 {
1791 	if (dev->dma_ops)
1792 		dev->dma_ops->unmap_single(dev, addr, size, direction);
1793 	else
1794 		dma_unmap_single(dev->dma_device, addr, size, direction);
1795 }
1796 
ib_dma_map_single_attrs(struct ib_device * dev,void * cpu_addr,size_t size,enum dma_data_direction direction,struct dma_attrs * attrs)1797 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1798 					  void *cpu_addr, size_t size,
1799 					  enum dma_data_direction direction,
1800 					  struct dma_attrs *attrs)
1801 {
1802 	return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1803 				    direction, attrs);
1804 }
1805 
ib_dma_unmap_single_attrs(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction,struct dma_attrs * attrs)1806 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1807 					     u64 addr, size_t size,
1808 					     enum dma_data_direction direction,
1809 					     struct dma_attrs *attrs)
1810 {
1811 	return dma_unmap_single_attrs(dev->dma_device, addr, size,
1812 				      direction, attrs);
1813 }
1814 
1815 /**
1816  * ib_dma_map_page - Map a physical page to DMA address
1817  * @dev: The device for which the dma_addr is to be created
1818  * @page: The page to be mapped
1819  * @offset: The offset within the page
1820  * @size: The size of the region in bytes
1821  * @direction: The direction of the DMA
1822  */
ib_dma_map_page(struct ib_device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction)1823 static inline u64 ib_dma_map_page(struct ib_device *dev,
1824 				  struct page *page,
1825 				  unsigned long offset,
1826 				  size_t size,
1827 					 enum dma_data_direction direction)
1828 {
1829 	if (dev->dma_ops)
1830 		return dev->dma_ops->map_page(dev, page, offset, size, direction);
1831 	return dma_map_page(dev->dma_device, page, offset, size, direction);
1832 }
1833 
1834 /**
1835  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1836  * @dev: The device for which the DMA address was created
1837  * @addr: The DMA address
1838  * @size: The size of the region in bytes
1839  * @direction: The direction of the DMA
1840  */
ib_dma_unmap_page(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction)1841 static inline void ib_dma_unmap_page(struct ib_device *dev,
1842 				     u64 addr, size_t size,
1843 				     enum dma_data_direction direction)
1844 {
1845 	if (dev->dma_ops)
1846 		dev->dma_ops->unmap_page(dev, addr, size, direction);
1847 	else
1848 		dma_unmap_page(dev->dma_device, addr, size, direction);
1849 }
1850 
1851 /**
1852  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1853  * @dev: The device for which the DMA addresses are to be created
1854  * @sg: The array of scatter/gather entries
1855  * @nents: The number of scatter/gather entries
1856  * @direction: The direction of the DMA
1857  */
ib_dma_map_sg(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction)1858 static inline int ib_dma_map_sg(struct ib_device *dev,
1859 				struct scatterlist *sg, int nents,
1860 				enum dma_data_direction direction)
1861 {
1862 	if (dev->dma_ops)
1863 		return dev->dma_ops->map_sg(dev, sg, nents, direction);
1864 	return dma_map_sg(dev->dma_device, sg, nents, direction);
1865 }
1866 
1867 /**
1868  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1869  * @dev: The device for which the DMA addresses were created
1870  * @sg: The array of scatter/gather entries
1871  * @nents: The number of scatter/gather entries
1872  * @direction: The direction of the DMA
1873  */
ib_dma_unmap_sg(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction)1874 static inline void ib_dma_unmap_sg(struct ib_device *dev,
1875 				   struct scatterlist *sg, int nents,
1876 				   enum dma_data_direction direction)
1877 {
1878 	if (dev->dma_ops)
1879 		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1880 	else
1881 		dma_unmap_sg(dev->dma_device, sg, nents, direction);
1882 }
1883 
ib_dma_map_sg_attrs(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,struct dma_attrs * attrs)1884 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1885 				      struct scatterlist *sg, int nents,
1886 				      enum dma_data_direction direction,
1887 				      struct dma_attrs *attrs)
1888 {
1889 	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1890 }
1891 
ib_dma_unmap_sg_attrs(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,struct dma_attrs * attrs)1892 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1893 					 struct scatterlist *sg, int nents,
1894 					 enum dma_data_direction direction,
1895 					 struct dma_attrs *attrs)
1896 {
1897 	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1898 }
1899 /**
1900  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1901  * @dev: The device for which the DMA addresses were created
1902  * @sg: The scatter/gather entry
1903  */
ib_sg_dma_address(struct ib_device * dev,struct scatterlist * sg)1904 static inline u64 ib_sg_dma_address(struct ib_device *dev,
1905 				    struct scatterlist *sg)
1906 {
1907 	if (dev->dma_ops)
1908 		return dev->dma_ops->dma_address(dev, sg);
1909 	return sg_dma_address(sg);
1910 }
1911 
1912 /**
1913  * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1914  * @dev: The device for which the DMA addresses were created
1915  * @sg: The scatter/gather entry
1916  */
ib_sg_dma_len(struct ib_device * dev,struct scatterlist * sg)1917 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1918 					 struct scatterlist *sg)
1919 {
1920 	if (dev->dma_ops)
1921 		return dev->dma_ops->dma_len(dev, sg);
1922 	return sg_dma_len(sg);
1923 }
1924 
1925 /**
1926  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1927  * @dev: The device for which the DMA address was created
1928  * @addr: The DMA address
1929  * @size: The size of the region in bytes
1930  * @dir: The direction of the DMA
1931  */
ib_dma_sync_single_for_cpu(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction dir)1932 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1933 					      u64 addr,
1934 					      size_t size,
1935 					      enum dma_data_direction dir)
1936 {
1937 	if (dev->dma_ops)
1938 		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1939 	else
1940 		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1941 }
1942 
1943 /**
1944  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1945  * @dev: The device for which the DMA address was created
1946  * @addr: The DMA address
1947  * @size: The size of the region in bytes
1948  * @dir: The direction of the DMA
1949  */
ib_dma_sync_single_for_device(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction dir)1950 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1951 						 u64 addr,
1952 						 size_t size,
1953 						 enum dma_data_direction dir)
1954 {
1955 	if (dev->dma_ops)
1956 		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1957 	else
1958 		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1959 }
1960 
1961 /**
1962  * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1963  * @dev: The device for which the DMA address is requested
1964  * @size: The size of the region to allocate in bytes
1965  * @dma_handle: A pointer for returning the DMA address of the region
1966  * @flag: memory allocator flags
1967  */
ib_dma_alloc_coherent(struct ib_device * dev,size_t size,u64 * dma_handle,gfp_t flag)1968 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1969 					   size_t size,
1970 					   u64 *dma_handle,
1971 					   gfp_t flag)
1972 {
1973 	if (dev->dma_ops)
1974 		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1975 	else {
1976 		dma_addr_t handle;
1977 		void *ret;
1978 
1979 		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1980 		*dma_handle = handle;
1981 		return ret;
1982 	}
1983 }
1984 
1985 /**
1986  * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1987  * @dev: The device for which the DMA addresses were allocated
1988  * @size: The size of the region
1989  * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1990  * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1991  */
ib_dma_free_coherent(struct ib_device * dev,size_t size,void * cpu_addr,u64 dma_handle)1992 static inline void ib_dma_free_coherent(struct ib_device *dev,
1993 					size_t size, void *cpu_addr,
1994 					u64 dma_handle)
1995 {
1996 	if (dev->dma_ops)
1997 		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1998 	else
1999 		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2000 }
2001 
2002 /**
2003  * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
2004  *   by an HCA.
2005  * @pd: The protection domain associated assigned to the registered region.
2006  * @phys_buf_array: Specifies a list of physical buffers to use in the
2007  *   memory region.
2008  * @num_phys_buf: Specifies the size of the phys_buf_array.
2009  * @mr_access_flags: Specifies the memory access rights.
2010  * @iova_start: The offset of the region's starting I/O virtual address.
2011  */
2012 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2013 			     struct ib_phys_buf *phys_buf_array,
2014 			     int num_phys_buf,
2015 			     int mr_access_flags,
2016 			     u64 *iova_start);
2017 
2018 /**
2019  * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
2020  *   Conceptually, this call performs the functions deregister memory region
2021  *   followed by register physical memory region.  Where possible,
2022  *   resources are reused instead of deallocated and reallocated.
2023  * @mr: The memory region to modify.
2024  * @mr_rereg_mask: A bit-mask used to indicate which of the following
2025  *   properties of the memory region are being modified.
2026  * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2027  *   the new protection domain to associated with the memory region,
2028  *   otherwise, this parameter is ignored.
2029  * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2030  *   field specifies a list of physical buffers to use in the new
2031  *   translation, otherwise, this parameter is ignored.
2032  * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2033  *   field specifies the size of the phys_buf_array, otherwise, this
2034  *   parameter is ignored.
2035  * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
2036  *   field specifies the new memory access rights, otherwise, this
2037  *   parameter is ignored.
2038  * @iova_start: The offset of the region's starting I/O virtual address.
2039  */
2040 int ib_rereg_phys_mr(struct ib_mr *mr,
2041 		     int mr_rereg_mask,
2042 		     struct ib_pd *pd,
2043 		     struct ib_phys_buf *phys_buf_array,
2044 		     int num_phys_buf,
2045 		     int mr_access_flags,
2046 		     u64 *iova_start);
2047 
2048 /**
2049  * ib_query_mr - Retrieves information about a specific memory region.
2050  * @mr: The memory region to retrieve information about.
2051  * @mr_attr: The attributes of the specified memory region.
2052  */
2053 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2054 
2055 /**
2056  * ib_dereg_mr - Deregisters a memory region and removes it from the
2057  *   HCA translation table.
2058  * @mr: The memory region to deregister.
2059  *
2060  * This function can fail, if the memory region has memory windows bound to it.
2061  */
2062 int ib_dereg_mr(struct ib_mr *mr);
2063 
2064 /**
2065  * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2066  *   IB_WR_FAST_REG_MR send work request.
2067  * @pd: The protection domain associated with the region.
2068  * @max_page_list_len: requested max physical buffer list length to be
2069  *   used with fast register work requests for this MR.
2070  */
2071 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2072 
2073 /**
2074  * ib_alloc_fast_reg_page_list - Allocates a page list array
2075  * @device - ib device pointer.
2076  * @page_list_len - size of the page list array to be allocated.
2077  *
2078  * This allocates and returns a struct ib_fast_reg_page_list * and a
2079  * page_list array that is at least page_list_len in size.  The actual
2080  * size is returned in max_page_list_len.  The caller is responsible
2081  * for initializing the contents of the page_list array before posting
2082  * a send work request with the IB_WC_FAST_REG_MR opcode.
2083  *
2084  * The page_list array entries must be translated using one of the
2085  * ib_dma_*() functions just like the addresses passed to
2086  * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
2087  * ib_fast_reg_page_list must not be modified by the caller until the
2088  * IB_WC_FAST_REG_MR work request completes.
2089  */
2090 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2091 				struct ib_device *device, int page_list_len);
2092 
2093 /**
2094  * ib_free_fast_reg_page_list - Deallocates a previously allocated
2095  *   page list array.
2096  * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2097  */
2098 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2099 
2100 /**
2101  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2102  *   R_Key and L_Key.
2103  * @mr - struct ib_mr pointer to be updated.
2104  * @newkey - new key to be used.
2105  */
ib_update_fast_reg_key(struct ib_mr * mr,u8 newkey)2106 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2107 {
2108 	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2109 	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2110 }
2111 
2112 /**
2113  * ib_inc_rkey - increments the key portion of the given rkey. Can be used
2114  * for calculating a new rkey for type 2 memory windows.
2115  * @rkey - the rkey to increment.
2116  */
ib_inc_rkey(u32 rkey)2117 static inline u32 ib_inc_rkey(u32 rkey)
2118 {
2119 	const u32 mask = 0x000000ff;
2120 	return ((rkey + 1) & mask) | (rkey & ~mask);
2121 }
2122 
2123 /**
2124  * ib_alloc_mw - Allocates a memory window.
2125  * @pd: The protection domain associated with the memory window.
2126  * @type: The type of the memory window (1 or 2).
2127  */
2128 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
2129 
2130 /**
2131  * ib_bind_mw - Posts a work request to the send queue of the specified
2132  *   QP, which binds the memory window to the given address range and
2133  *   remote access attributes.
2134  * @qp: QP to post the bind work request on.
2135  * @mw: The memory window to bind.
2136  * @mw_bind: Specifies information about the memory window, including
2137  *   its address range, remote access rights, and associated memory region.
2138  *
2139  * If there is no immediate error, the function will update the rkey member
2140  * of the mw parameter to its new value. The bind operation can still fail
2141  * asynchronously.
2142  */
ib_bind_mw(struct ib_qp * qp,struct ib_mw * mw,struct ib_mw_bind * mw_bind)2143 static inline int ib_bind_mw(struct ib_qp *qp,
2144 			     struct ib_mw *mw,
2145 			     struct ib_mw_bind *mw_bind)
2146 {
2147 	/* XXX reference counting in corresponding MR? */
2148 	return mw->device->bind_mw ?
2149 		mw->device->bind_mw(qp, mw, mw_bind) :
2150 		-ENOSYS;
2151 }
2152 
2153 /**
2154  * ib_dealloc_mw - Deallocates a memory window.
2155  * @mw: The memory window to deallocate.
2156  */
2157 int ib_dealloc_mw(struct ib_mw *mw);
2158 
2159 /**
2160  * ib_alloc_fmr - Allocates a unmapped fast memory region.
2161  * @pd: The protection domain associated with the unmapped region.
2162  * @mr_access_flags: Specifies the memory access rights.
2163  * @fmr_attr: Attributes of the unmapped region.
2164  *
2165  * A fast memory region must be mapped before it can be used as part of
2166  * a work request.
2167  */
2168 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2169 			    int mr_access_flags,
2170 			    struct ib_fmr_attr *fmr_attr);
2171 
2172 /**
2173  * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2174  * @fmr: The fast memory region to associate with the pages.
2175  * @page_list: An array of physical pages to map to the fast memory region.
2176  * @list_len: The number of pages in page_list.
2177  * @iova: The I/O virtual address to use with the mapped region.
2178  */
ib_map_phys_fmr(struct ib_fmr * fmr,u64 * page_list,int list_len,u64 iova)2179 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2180 				  u64 *page_list, int list_len,
2181 				  u64 iova)
2182 {
2183 	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2184 }
2185 
2186 /**
2187  * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2188  * @fmr_list: A linked list of fast memory regions to unmap.
2189  */
2190 int ib_unmap_fmr(struct list_head *fmr_list);
2191 
2192 /**
2193  * ib_dealloc_fmr - Deallocates a fast memory region.
2194  * @fmr: The fast memory region to deallocate.
2195  */
2196 int ib_dealloc_fmr(struct ib_fmr *fmr);
2197 
2198 /**
2199  * ib_attach_mcast - Attaches the specified QP to a multicast group.
2200  * @qp: QP to attach to the multicast group.  The QP must be type
2201  *   IB_QPT_UD.
2202  * @gid: Multicast group GID.
2203  * @lid: Multicast group LID in host byte order.
2204  *
2205  * In order to send and receive multicast packets, subnet
2206  * administration must have created the multicast group and configured
2207  * the fabric appropriately.  The port associated with the specified
2208  * QP must also be a member of the multicast group.
2209  */
2210 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2211 
2212 /**
2213  * ib_detach_mcast - Detaches the specified QP from a multicast group.
2214  * @qp: QP to detach from the multicast group.
2215  * @gid: Multicast group GID.
2216  * @lid: Multicast group LID in host byte order.
2217  */
2218 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2219 
2220 /**
2221  * ib_alloc_xrcd - Allocates an XRC domain.
2222  * @device: The device on which to allocate the XRC domain.
2223  */
2224 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2225 
2226 /**
2227  * ib_dealloc_xrcd - Deallocates an XRC domain.
2228  * @xrcd: The XRC domain to deallocate.
2229  */
2230 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2231 
2232 #endif /* IB_VERBS_H */
2233