• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX5_QP_H
34 #define MLX5_QP_H
35 
36 #include <linux/mlx5/device.h>
37 #include <linux/mlx5/driver.h>
38 
39 #define MLX5_INVALID_LKEY	0x100
40 #define MLX5_SIG_WQE_SIZE	(MLX5_SEND_WQE_BB * 5)
41 #define MLX5_DIF_SIZE		8
42 #define MLX5_STRIDE_BLOCK_OP	0x400
43 #define MLX5_CPY_GRD_MASK	0xc0
44 #define MLX5_CPY_APP_MASK	0x30
45 #define MLX5_CPY_REF_MASK	0x0f
46 #define MLX5_BSF_INC_REFTAG	(1 << 6)
47 #define MLX5_BSF_INL_VALID	(1 << 15)
48 #define MLX5_BSF_REFRESH_DIF	(1 << 14)
49 #define MLX5_BSF_REPEAT_BLOCK	(1 << 7)
50 #define MLX5_BSF_APPTAG_ESCAPE	0x1
51 #define MLX5_BSF_APPREF_ESCAPE	0x2
52 
53 #define MLX5_QPN_BITS		24
54 #define MLX5_QPN_MASK		((1 << MLX5_QPN_BITS) - 1)
55 
56 enum mlx5_qp_optpar {
57 	MLX5_QP_OPTPAR_ALT_ADDR_PATH		= 1 << 0,
58 	MLX5_QP_OPTPAR_RRE			= 1 << 1,
59 	MLX5_QP_OPTPAR_RAE			= 1 << 2,
60 	MLX5_QP_OPTPAR_RWE			= 1 << 3,
61 	MLX5_QP_OPTPAR_PKEY_INDEX		= 1 << 4,
62 	MLX5_QP_OPTPAR_Q_KEY			= 1 << 5,
63 	MLX5_QP_OPTPAR_RNR_TIMEOUT		= 1 << 6,
64 	MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH	= 1 << 7,
65 	MLX5_QP_OPTPAR_SRA_MAX			= 1 << 8,
66 	MLX5_QP_OPTPAR_RRA_MAX			= 1 << 9,
67 	MLX5_QP_OPTPAR_PM_STATE			= 1 << 10,
68 	MLX5_QP_OPTPAR_RETRY_COUNT		= 1 << 12,
69 	MLX5_QP_OPTPAR_RNR_RETRY		= 1 << 13,
70 	MLX5_QP_OPTPAR_ACK_TIMEOUT		= 1 << 14,
71 	MLX5_QP_OPTPAR_PRI_PORT			= 1 << 16,
72 	MLX5_QP_OPTPAR_SRQN			= 1 << 18,
73 	MLX5_QP_OPTPAR_CQN_RCV			= 1 << 19,
74 	MLX5_QP_OPTPAR_DC_HS			= 1 << 20,
75 	MLX5_QP_OPTPAR_DC_KEY			= 1 << 21,
76 };
77 
78 enum mlx5_qp_state {
79 	MLX5_QP_STATE_RST			= 0,
80 	MLX5_QP_STATE_INIT			= 1,
81 	MLX5_QP_STATE_RTR			= 2,
82 	MLX5_QP_STATE_RTS			= 3,
83 	MLX5_QP_STATE_SQER			= 4,
84 	MLX5_QP_STATE_SQD			= 5,
85 	MLX5_QP_STATE_ERR			= 6,
86 	MLX5_QP_STATE_SQ_DRAINING		= 7,
87 	MLX5_QP_STATE_SUSPENDED			= 9,
88 	MLX5_QP_NUM_STATE
89 };
90 
91 enum {
92 	MLX5_QP_ST_RC				= 0x0,
93 	MLX5_QP_ST_UC				= 0x1,
94 	MLX5_QP_ST_UD				= 0x2,
95 	MLX5_QP_ST_XRC				= 0x3,
96 	MLX5_QP_ST_MLX				= 0x4,
97 	MLX5_QP_ST_DCI				= 0x5,
98 	MLX5_QP_ST_DCT				= 0x6,
99 	MLX5_QP_ST_QP0				= 0x7,
100 	MLX5_QP_ST_QP1				= 0x8,
101 	MLX5_QP_ST_RAW_ETHERTYPE		= 0x9,
102 	MLX5_QP_ST_RAW_IPV6			= 0xa,
103 	MLX5_QP_ST_SNIFFER			= 0xb,
104 	MLX5_QP_ST_SYNC_UMR			= 0xe,
105 	MLX5_QP_ST_PTP_1588			= 0xd,
106 	MLX5_QP_ST_REG_UMR			= 0xc,
107 	MLX5_QP_ST_MAX
108 };
109 
110 enum {
111 	MLX5_QP_PM_MIGRATED			= 0x3,
112 	MLX5_QP_PM_ARMED			= 0x0,
113 	MLX5_QP_PM_REARM			= 0x1
114 };
115 
116 enum {
117 	MLX5_NON_ZERO_RQ	= 0 << 24,
118 	MLX5_SRQ_RQ		= 1 << 24,
119 	MLX5_CRQ_RQ		= 2 << 24,
120 	MLX5_ZERO_LEN_RQ	= 3 << 24
121 };
122 
123 enum {
124 	/* params1 */
125 	MLX5_QP_BIT_SRE				= 1 << 15,
126 	MLX5_QP_BIT_SWE				= 1 << 14,
127 	MLX5_QP_BIT_SAE				= 1 << 13,
128 	/* params2 */
129 	MLX5_QP_BIT_RRE				= 1 << 15,
130 	MLX5_QP_BIT_RWE				= 1 << 14,
131 	MLX5_QP_BIT_RAE				= 1 << 13,
132 	MLX5_QP_BIT_RIC				= 1 <<	4,
133 };
134 
135 enum {
136 	MLX5_WQE_CTRL_CQ_UPDATE		= 2 << 2,
137 	MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE	= 3 << 2,
138 	MLX5_WQE_CTRL_SOLICITED		= 1 << 1,
139 };
140 
141 enum {
142 	MLX5_SEND_WQE_DS	= 16,
143 	MLX5_SEND_WQE_BB	= 64,
144 };
145 
146 #define MLX5_SEND_WQEBB_NUM_DS	(MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
147 
148 enum {
149 	MLX5_SEND_WQE_MAX_WQEBBS	= 16,
150 };
151 
152 enum {
153 	MLX5_WQE_FMR_PERM_LOCAL_READ	= 1 << 27,
154 	MLX5_WQE_FMR_PERM_LOCAL_WRITE	= 1 << 28,
155 	MLX5_WQE_FMR_PERM_REMOTE_READ	= 1 << 29,
156 	MLX5_WQE_FMR_PERM_REMOTE_WRITE	= 1 << 30,
157 	MLX5_WQE_FMR_PERM_ATOMIC	= 1 << 31
158 };
159 
160 enum {
161 	MLX5_FENCE_MODE_NONE			= 0 << 5,
162 	MLX5_FENCE_MODE_INITIATOR_SMALL		= 1 << 5,
163 	MLX5_FENCE_MODE_FENCE			= 2 << 5,
164 	MLX5_FENCE_MODE_STRONG_ORDERING		= 3 << 5,
165 	MLX5_FENCE_MODE_SMALL_AND_FENCE		= 4 << 5,
166 };
167 
168 enum {
169 	MLX5_QP_LAT_SENSITIVE	= 1 << 28,
170 	MLX5_QP_BLOCK_MCAST	= 1 << 30,
171 	MLX5_QP_ENABLE_SIG	= 1 << 31,
172 };
173 
174 enum {
175 	MLX5_RCV_DBR	= 0,
176 	MLX5_SND_DBR	= 1,
177 };
178 
179 enum {
180 	MLX5_FLAGS_INLINE	= 1<<7,
181 	MLX5_FLAGS_CHECK_FREE   = 1<<5,
182 };
183 
184 struct mlx5_wqe_fmr_seg {
185 	__be32			flags;
186 	__be32			mem_key;
187 	__be64			buf_list;
188 	__be64			start_addr;
189 	__be64			reg_len;
190 	__be32			offset;
191 	__be32			page_size;
192 	u32			reserved[2];
193 };
194 
195 struct mlx5_wqe_ctrl_seg {
196 	__be32			opmod_idx_opcode;
197 	__be32			qpn_ds;
198 	u8			signature;
199 	u8			rsvd[2];
200 	u8			fm_ce_se;
201 	__be32			imm;
202 };
203 
204 #define MLX5_WQE_CTRL_DS_MASK 0x3f
205 #define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
206 #define MLX5_WQE_CTRL_QPN_SHIFT 8
207 #define MLX5_WQE_DS_UNITS 16
208 #define MLX5_WQE_CTRL_OPCODE_MASK 0xff
209 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
210 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
211 
212 enum {
213 	MLX5_ETH_WQE_L3_INNER_CSUM      = 1 << 4,
214 	MLX5_ETH_WQE_L4_INNER_CSUM      = 1 << 5,
215 	MLX5_ETH_WQE_L3_CSUM            = 1 << 6,
216 	MLX5_ETH_WQE_L4_CSUM            = 1 << 7,
217 };
218 
219 struct mlx5_wqe_eth_seg {
220 	u8              rsvd0[4];
221 	u8              cs_flags;
222 	u8              rsvd1;
223 	__be16          mss;
224 	__be32          rsvd2;
225 	__be16          inline_hdr_sz;
226 	u8              inline_hdr_start[2];
227 };
228 
229 struct mlx5_wqe_xrc_seg {
230 	__be32			xrc_srqn;
231 	u8			rsvd[12];
232 };
233 
234 struct mlx5_wqe_masked_atomic_seg {
235 	__be64			swap_add;
236 	__be64			compare;
237 	__be64			swap_add_mask;
238 	__be64			compare_mask;
239 };
240 
241 struct mlx5_av {
242 	union {
243 		struct {
244 			__be32	qkey;
245 			__be32	reserved;
246 		} qkey;
247 		__be64	dc_key;
248 	} key;
249 	__be32	dqp_dct;
250 	u8	stat_rate_sl;
251 	u8	fl_mlid;
252 	__be16	rlid;
253 	u8	reserved0[10];
254 	u8	tclass;
255 	u8	hop_limit;
256 	__be32	grh_gid_fl;
257 	u8	rgid[16];
258 };
259 
260 struct mlx5_wqe_datagram_seg {
261 	struct mlx5_av	av;
262 };
263 
264 struct mlx5_wqe_raddr_seg {
265 	__be64			raddr;
266 	__be32			rkey;
267 	u32			reserved;
268 };
269 
270 struct mlx5_wqe_atomic_seg {
271 	__be64			swap_add;
272 	__be64			compare;
273 };
274 
275 struct mlx5_wqe_data_seg {
276 	__be32			byte_count;
277 	__be32			lkey;
278 	__be64			addr;
279 };
280 
281 struct mlx5_wqe_umr_ctrl_seg {
282 	u8		flags;
283 	u8		rsvd0[3];
284 	__be16		klm_octowords;
285 	__be16		bsf_octowords;
286 	__be64		mkey_mask;
287 	u8		rsvd1[32];
288 };
289 
290 struct mlx5_seg_set_psv {
291 	__be32		psv_num;
292 	__be16		syndrome;
293 	__be16		status;
294 	__be32		transient_sig;
295 	__be32		ref_tag;
296 };
297 
298 struct mlx5_seg_get_psv {
299 	u8		rsvd[19];
300 	u8		num_psv;
301 	__be32		l_key;
302 	__be64		va;
303 	__be32		psv_index[4];
304 };
305 
306 struct mlx5_seg_check_psv {
307 	u8		rsvd0[2];
308 	__be16		err_coalescing_op;
309 	u8		rsvd1[2];
310 	__be16		xport_err_op;
311 	u8		rsvd2[2];
312 	__be16		xport_err_mask;
313 	u8		rsvd3[7];
314 	u8		num_psv;
315 	__be32		l_key;
316 	__be64		va;
317 	__be32		psv_index[4];
318 };
319 
320 struct mlx5_rwqe_sig {
321 	u8	rsvd0[4];
322 	u8	signature;
323 	u8	rsvd1[11];
324 };
325 
326 struct mlx5_wqe_signature_seg {
327 	u8	rsvd0[4];
328 	u8	signature;
329 	u8	rsvd1[11];
330 };
331 
332 #define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
333 
334 struct mlx5_wqe_inline_seg {
335 	__be32	byte_count;
336 };
337 
338 enum mlx5_sig_type {
339 	MLX5_DIF_CRC = 0x1,
340 	MLX5_DIF_IPCS = 0x2,
341 };
342 
343 struct mlx5_bsf_inl {
344 	__be16		vld_refresh;
345 	__be16		dif_apptag;
346 	__be32		dif_reftag;
347 	u8		sig_type;
348 	u8		rp_inv_seed;
349 	u8		rsvd[3];
350 	u8		dif_inc_ref_guard_check;
351 	__be16		dif_app_bitmask_check;
352 };
353 
354 struct mlx5_bsf {
355 	struct mlx5_bsf_basic {
356 		u8		bsf_size_sbs;
357 		u8		check_byte_mask;
358 		union {
359 			u8	copy_byte_mask;
360 			u8	bs_selector;
361 			u8	rsvd_wflags;
362 		} wire;
363 		union {
364 			u8	bs_selector;
365 			u8	rsvd_mflags;
366 		} mem;
367 		__be32		raw_data_size;
368 		__be32		w_bfs_psv;
369 		__be32		m_bfs_psv;
370 	} basic;
371 	struct mlx5_bsf_ext {
372 		__be32		t_init_gen_pro_size;
373 		__be32		rsvd_epi_size;
374 		__be32		w_tfs_psv;
375 		__be32		m_tfs_psv;
376 	} ext;
377 	struct mlx5_bsf_inl	w_inl;
378 	struct mlx5_bsf_inl	m_inl;
379 };
380 
381 struct mlx5_klm {
382 	__be32		bcount;
383 	__be32		key;
384 	__be64		va;
385 };
386 
387 struct mlx5_stride_block_entry {
388 	__be16		stride;
389 	__be16		bcount;
390 	__be32		key;
391 	__be64		va;
392 };
393 
394 struct mlx5_stride_block_ctrl_seg {
395 	__be32		bcount_per_cycle;
396 	__be32		op;
397 	__be32		repeat_count;
398 	u16		rsvd;
399 	__be16		num_entries;
400 };
401 
402 enum mlx5_pagefault_flags {
403 	MLX5_PFAULT_REQUESTOR = 1 << 0,
404 	MLX5_PFAULT_WRITE     = 1 << 1,
405 	MLX5_PFAULT_RDMA      = 1 << 2,
406 };
407 
408 /* Contains the details of a pagefault. */
409 struct mlx5_pagefault {
410 	u32			bytes_committed;
411 	u8			event_subtype;
412 	enum mlx5_pagefault_flags flags;
413 	union {
414 		/* Initiator or send message responder pagefault details. */
415 		struct {
416 			/* Received packet size, only valid for responders. */
417 			u32	packet_size;
418 			/*
419 			 * WQE index. Refers to either the send queue or
420 			 * receive queue, according to event_subtype.
421 			 */
422 			u16	wqe_index;
423 		} wqe;
424 		/* RDMA responder pagefault details */
425 		struct {
426 			u32	r_key;
427 			/*
428 			 * Received packet size, minimal size page fault
429 			 * resolution required for forward progress.
430 			 */
431 			u32	packet_size;
432 			u32	rdma_op_len;
433 			u64	rdma_va;
434 		} rdma;
435 	};
436 };
437 
438 struct mlx5_core_qp {
439 	struct mlx5_core_rsc_common	common; /* must be first */
440 	void (*event)		(struct mlx5_core_qp *, int);
441 	void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
442 	int			qpn;
443 	struct mlx5_rsc_debug	*dbg;
444 	int			pid;
445 };
446 
447 struct mlx5_qp_path {
448 	u8			fl;
449 	u8			rsvd3;
450 	u8			free_ar;
451 	u8			pkey_index;
452 	u8			rsvd0;
453 	u8			grh_mlid;
454 	__be16			rlid;
455 	u8			ackto_lt;
456 	u8			mgid_index;
457 	u8			static_rate;
458 	u8			hop_limit;
459 	__be32			tclass_flowlabel;
460 	u8			rgid[16];
461 	u8			rsvd1[4];
462 	u8			sl;
463 	u8			port;
464 	u8			rsvd2[6];
465 };
466 
467 struct mlx5_qp_context {
468 	__be32			flags;
469 	__be32			flags_pd;
470 	u8			mtu_msgmax;
471 	u8			rq_size_stride;
472 	__be16			sq_crq_size;
473 	__be32			qp_counter_set_usr_page;
474 	__be32			wire_qpn;
475 	__be32			log_pg_sz_remote_qpn;
476 	struct			mlx5_qp_path pri_path;
477 	struct			mlx5_qp_path alt_path;
478 	__be32			params1;
479 	u8			reserved2[4];
480 	__be32			next_send_psn;
481 	__be32			cqn_send;
482 	u8			reserved3[8];
483 	__be32			last_acked_psn;
484 	__be32			ssn;
485 	__be32			params2;
486 	__be32			rnr_nextrecvpsn;
487 	__be32			xrcd;
488 	__be32			cqn_recv;
489 	__be64			db_rec_addr;
490 	__be32			qkey;
491 	__be32			rq_type_srqn;
492 	__be32			rmsn;
493 	__be16			hw_sq_wqe_counter;
494 	__be16			sw_sq_wqe_counter;
495 	__be16			hw_rcyclic_byte_counter;
496 	__be16			hw_rq_counter;
497 	__be16			sw_rcyclic_byte_counter;
498 	__be16			sw_rq_counter;
499 	u8			rsvd0[5];
500 	u8			cgs;
501 	u8			cs_req;
502 	u8			cs_res;
503 	__be64			dc_access_key;
504 	u8			rsvd1[24];
505 };
506 
507 struct mlx5_create_qp_mbox_in {
508 	struct mlx5_inbox_hdr	hdr;
509 	__be32			input_qpn;
510 	u8			rsvd0[4];
511 	__be32			opt_param_mask;
512 	u8			rsvd1[4];
513 	struct mlx5_qp_context	ctx;
514 	u8			rsvd3[16];
515 	__be64			pas[0];
516 };
517 
518 struct mlx5_create_qp_mbox_out {
519 	struct mlx5_outbox_hdr	hdr;
520 	__be32			qpn;
521 	u8			rsvd0[4];
522 };
523 
524 struct mlx5_destroy_qp_mbox_in {
525 	struct mlx5_inbox_hdr	hdr;
526 	__be32			qpn;
527 	u8			rsvd0[4];
528 };
529 
530 struct mlx5_destroy_qp_mbox_out {
531 	struct mlx5_outbox_hdr	hdr;
532 	u8			rsvd0[8];
533 };
534 
535 struct mlx5_modify_qp_mbox_in {
536 	struct mlx5_inbox_hdr	hdr;
537 	__be32			qpn;
538 	u8			rsvd0[4];
539 	__be32			optparam;
540 	u8			rsvd1[4];
541 	struct mlx5_qp_context	ctx;
542 	u8			rsvd2[16];
543 };
544 
545 struct mlx5_modify_qp_mbox_out {
546 	struct mlx5_outbox_hdr	hdr;
547 	u8			rsvd0[8];
548 };
549 
550 struct mlx5_query_qp_mbox_in {
551 	struct mlx5_inbox_hdr	hdr;
552 	__be32			qpn;
553 	u8			rsvd[4];
554 };
555 
556 struct mlx5_query_qp_mbox_out {
557 	struct mlx5_outbox_hdr	hdr;
558 	u8			rsvd1[8];
559 	__be32			optparam;
560 	u8			rsvd0[4];
561 	struct mlx5_qp_context	ctx;
562 	u8			rsvd2[16];
563 	__be64			pas[0];
564 };
565 
566 struct mlx5_conf_sqp_mbox_in {
567 	struct mlx5_inbox_hdr	hdr;
568 	__be32			qpn;
569 	u8			rsvd[3];
570 	u8			type;
571 };
572 
573 struct mlx5_conf_sqp_mbox_out {
574 	struct mlx5_outbox_hdr	hdr;
575 	u8			rsvd[8];
576 };
577 
578 struct mlx5_alloc_xrcd_mbox_in {
579 	struct mlx5_inbox_hdr	hdr;
580 	u8			rsvd[8];
581 };
582 
583 struct mlx5_alloc_xrcd_mbox_out {
584 	struct mlx5_outbox_hdr	hdr;
585 	__be32			xrcdn;
586 	u8			rsvd[4];
587 };
588 
589 struct mlx5_dealloc_xrcd_mbox_in {
590 	struct mlx5_inbox_hdr	hdr;
591 	__be32			xrcdn;
592 	u8			rsvd[4];
593 };
594 
595 struct mlx5_dealloc_xrcd_mbox_out {
596 	struct mlx5_outbox_hdr	hdr;
597 	u8			rsvd[8];
598 };
599 
__mlx5_qp_lookup(struct mlx5_core_dev * dev,u32 qpn)600 static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
601 {
602 	return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
603 }
604 
__mlx5_mr_lookup(struct mlx5_core_dev * dev,u32 key)605 static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
606 {
607 	return radix_tree_lookup(&dev->priv.mr_table.tree, key);
608 }
609 
610 struct mlx5_page_fault_resume_mbox_in {
611 	struct mlx5_inbox_hdr	hdr;
612 	__be32			flags_qpn;
613 	u8			reserved[4];
614 };
615 
616 struct mlx5_page_fault_resume_mbox_out {
617 	struct mlx5_outbox_hdr	hdr;
618 	u8			rsvd[8];
619 };
620 
621 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
622 			struct mlx5_core_qp *qp,
623 			struct mlx5_create_qp_mbox_in *in,
624 			int inlen);
625 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
626 			enum mlx5_qp_state new_state,
627 			struct mlx5_modify_qp_mbox_in *in, int sqd_event,
628 			struct mlx5_core_qp *qp);
629 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
630 			 struct mlx5_core_qp *qp);
631 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
632 		       struct mlx5_query_qp_mbox_out *out, int outlen);
633 
634 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
635 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
636 void mlx5_init_qp_table(struct mlx5_core_dev *dev);
637 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
638 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
639 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
640 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
641 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
642 				u8 context, int error);
643 #endif
644 
mlx5_qp_type_str(int type)645 static inline const char *mlx5_qp_type_str(int type)
646 {
647 	switch (type) {
648 	case MLX5_QP_ST_RC: return "RC";
649 	case MLX5_QP_ST_UC: return "C";
650 	case MLX5_QP_ST_UD: return "UD";
651 	case MLX5_QP_ST_XRC: return "XRC";
652 	case MLX5_QP_ST_MLX: return "MLX";
653 	case MLX5_QP_ST_QP0: return "QP0";
654 	case MLX5_QP_ST_QP1: return "QP1";
655 	case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
656 	case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
657 	case MLX5_QP_ST_SNIFFER: return "SNIFFER";
658 	case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
659 	case MLX5_QP_ST_PTP_1588: return "PTP_1588";
660 	case MLX5_QP_ST_REG_UMR: return "REG_UMR";
661 	default: return "Invalid transport type";
662 	}
663 }
664 
mlx5_qp_state_str(int state)665 static inline const char *mlx5_qp_state_str(int state)
666 {
667 	switch (state) {
668 	case MLX5_QP_STATE_RST:
669 	return "RST";
670 	case MLX5_QP_STATE_INIT:
671 	return "INIT";
672 	case MLX5_QP_STATE_RTR:
673 	return "RTR";
674 	case MLX5_QP_STATE_RTS:
675 	return "RTS";
676 	case MLX5_QP_STATE_SQER:
677 	return "SQER";
678 	case MLX5_QP_STATE_SQD:
679 	return "SQD";
680 	case MLX5_QP_STATE_ERR:
681 	return "ERR";
682 	case MLX5_QP_STATE_SQ_DRAINING:
683 	return "SQ_DRAINING";
684 	case MLX5_QP_STATE_SUSPENDED:
685 	return "SUSPENDED";
686 	default: return "Invalid QP state";
687 	}
688 }
689 
690 #endif /* MLX5_QP_H */
691