• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2 /* Copyright (c) 2015 - 2020 Intel Corporation */
3 #ifndef IRDMA_USER_H
4 #define IRDMA_USER_H
5 
6 #define irdma_handle void *
7 #define irdma_adapter_handle irdma_handle
8 #define irdma_qp_handle irdma_handle
9 #define irdma_cq_handle irdma_handle
10 #define irdma_pd_id irdma_handle
11 #define irdma_stag_handle irdma_handle
12 #define irdma_stag_index u32
13 #define irdma_stag u32
14 #define irdma_stag_key u8
15 #define irdma_tagged_offset u64
16 #define irdma_access_privileges u32
17 #define irdma_physical_fragment u64
18 #define irdma_address_list u64 *
19 #define irdma_sgl struct irdma_sge *
20 
21 #define	IRDMA_MAX_MR_SIZE       0x200000000000ULL
22 
23 #define IRDMA_ACCESS_FLAGS_LOCALREAD		0x01
24 #define IRDMA_ACCESS_FLAGS_LOCALWRITE		0x02
25 #define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY	0x04
26 #define IRDMA_ACCESS_FLAGS_REMOTEREAD		0x05
27 #define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY	0x08
28 #define IRDMA_ACCESS_FLAGS_REMOTEWRITE		0x0a
29 #define IRDMA_ACCESS_FLAGS_BIND_WINDOW		0x10
30 #define IRDMA_ACCESS_FLAGS_ZERO_BASED		0x20
31 #define IRDMA_ACCESS_FLAGS_ALL			0x3f
32 
33 #define IRDMA_OP_TYPE_RDMA_WRITE		0x00
34 #define IRDMA_OP_TYPE_RDMA_READ			0x01
35 #define IRDMA_OP_TYPE_SEND			0x03
36 #define IRDMA_OP_TYPE_SEND_INV			0x04
37 #define IRDMA_OP_TYPE_SEND_SOL			0x05
38 #define IRDMA_OP_TYPE_SEND_SOL_INV		0x06
39 #define IRDMA_OP_TYPE_RDMA_WRITE_SOL		0x0d
40 #define IRDMA_OP_TYPE_BIND_MW			0x08
41 #define IRDMA_OP_TYPE_FAST_REG_NSMR		0x09
42 #define IRDMA_OP_TYPE_INV_STAG			0x0a
43 #define IRDMA_OP_TYPE_RDMA_READ_INV_STAG	0x0b
44 #define IRDMA_OP_TYPE_NOP			0x0c
45 #define IRDMA_OP_TYPE_REC	0x3e
46 #define IRDMA_OP_TYPE_REC_IMM	0x3f
47 
48 #define IRDMA_FLUSH_MAJOR_ERR	1
49 
50 enum irdma_device_caps_const {
51 	IRDMA_WQE_SIZE =			4,
52 	IRDMA_CQP_WQE_SIZE =			8,
53 	IRDMA_CQE_SIZE =			4,
54 	IRDMA_EXTENDED_CQE_SIZE =		8,
55 	IRDMA_AEQE_SIZE =			2,
56 	IRDMA_CEQE_SIZE =			1,
57 	IRDMA_CQP_CTX_SIZE =			8,
58 	IRDMA_SHADOW_AREA_SIZE =		8,
59 	IRDMA_QUERY_FPM_BUF_SIZE =		176,
60 	IRDMA_COMMIT_FPM_BUF_SIZE =		176,
61 	IRDMA_GATHER_STATS_BUF_SIZE =		1024,
62 	IRDMA_MIN_IW_QP_ID =			0,
63 	IRDMA_MAX_IW_QP_ID =			262143,
64 	IRDMA_MIN_CEQID =			0,
65 	IRDMA_MAX_CEQID =			1023,
66 	IRDMA_CEQ_MAX_COUNT =			IRDMA_MAX_CEQID + 1,
67 	IRDMA_MIN_CQID =			0,
68 	IRDMA_MAX_CQID =			524287,
69 	IRDMA_MIN_AEQ_ENTRIES =			1,
70 	IRDMA_MAX_AEQ_ENTRIES =			524287,
71 	IRDMA_MIN_CEQ_ENTRIES =			1,
72 	IRDMA_MAX_CEQ_ENTRIES =			262143,
73 	IRDMA_MIN_CQ_SIZE =			1,
74 	IRDMA_MAX_CQ_SIZE =			1048575,
75 	IRDMA_DB_ID_ZERO =			0,
76 	IRDMA_MAX_WQ_FRAGMENT_COUNT =		13,
77 	IRDMA_MAX_SGE_RD =			13,
78 	IRDMA_MAX_OUTBOUND_MSG_SIZE =		2147483647,
79 	IRDMA_MAX_INBOUND_MSG_SIZE =		2147483647,
80 	IRDMA_MAX_PUSH_PAGE_COUNT =		1024,
81 	IRDMA_MAX_PE_ENA_VF_COUNT =		32,
82 	IRDMA_MAX_VF_FPM_ID =			47,
83 	IRDMA_MAX_SQ_PAYLOAD_SIZE =		2145386496,
84 	IRDMA_MAX_INLINE_DATA_SIZE =		101,
85 	IRDMA_MAX_WQ_ENTRIES =			32768,
86 	IRDMA_Q2_BUF_SIZE =			256,
87 	IRDMA_QP_CTX_SIZE =			256,
88 	IRDMA_MAX_PDS =				262144,
89 };
90 
91 enum irdma_addressing_type {
92 	IRDMA_ADDR_TYPE_ZERO_BASED = 0,
93 	IRDMA_ADDR_TYPE_VA_BASED   = 1,
94 };
95 
96 enum irdma_flush_opcode {
97 	FLUSH_INVALID = 0,
98 	FLUSH_GENERAL_ERR,
99 	FLUSH_PROT_ERR,
100 	FLUSH_REM_ACCESS_ERR,
101 	FLUSH_LOC_QP_OP_ERR,
102 	FLUSH_REM_OP_ERR,
103 	FLUSH_LOC_LEN_ERR,
104 	FLUSH_FATAL_ERR,
105 	FLUSH_RETRY_EXC_ERR,
106 	FLUSH_MW_BIND_ERR,
107 	FLUSH_REM_INV_REQ_ERR,
108 };
109 
110 enum irdma_cmpl_status {
111 	IRDMA_COMPL_STATUS_SUCCESS = 0,
112 	IRDMA_COMPL_STATUS_FLUSHED,
113 	IRDMA_COMPL_STATUS_INVALID_WQE,
114 	IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
115 	IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
116 	IRDMA_COMPL_STATUS_INVALID_STAG,
117 	IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
118 	IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
119 	IRDMA_COMPL_STATUS_INVALID_PD_ID,
120 	IRDMA_COMPL_STATUS_WRAP_ERROR,
121 	IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
122 	IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
123 	IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
124 	IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
125 	IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
126 	IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
127 	IRDMA_COMPL_STATUS_INVALID_FBO,
128 	IRDMA_COMPL_STATUS_INVALID_LEN,
129 	IRDMA_COMPL_STATUS_INVALID_ACCESS,
130 	IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
131 	IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
132 	IRDMA_COMPL_STATUS_INVALID_REGION,
133 	IRDMA_COMPL_STATUS_INVALID_WINDOW,
134 	IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
135 	IRDMA_COMPL_STATUS_UNKNOWN,
136 };
137 
138 enum irdma_cmpl_notify {
139 	IRDMA_CQ_COMPL_EVENT     = 0,
140 	IRDMA_CQ_COMPL_SOLICITED = 1,
141 };
142 
143 enum irdma_qp_caps {
144 	IRDMA_WRITE_WITH_IMM = 1,
145 	IRDMA_SEND_WITH_IMM  = 2,
146 	IRDMA_ROCE	     = 4,
147 	IRDMA_PUSH_MODE      = 8,
148 };
149 
150 struct irdma_qp_uk;
151 struct irdma_cq_uk;
152 struct irdma_qp_uk_init_info;
153 struct irdma_cq_uk_init_info;
154 
155 struct irdma_sge {
156 	irdma_tagged_offset tag_off;
157 	u32 len;
158 	irdma_stag stag;
159 };
160 
161 struct irdma_ring {
162 	u32 head;
163 	u32 tail;
164 	u32 size;
165 };
166 
167 struct irdma_cqe {
168 	__le64 buf[IRDMA_CQE_SIZE];
169 };
170 
171 struct irdma_extended_cqe {
172 	__le64 buf[IRDMA_EXTENDED_CQE_SIZE];
173 };
174 
175 struct irdma_post_send {
176 	irdma_sgl sg_list;
177 	u32 num_sges;
178 	u32 qkey;
179 	u32 dest_qp;
180 	u32 ah_id;
181 };
182 
183 struct irdma_post_inline_send {
184 	void *data;
185 	u32 len;
186 	u32 qkey;
187 	u32 dest_qp;
188 	u32 ah_id;
189 };
190 
191 struct irdma_post_rq_info {
192 	u64 wr_id;
193 	irdma_sgl sg_list;
194 	u32 num_sges;
195 };
196 
197 struct irdma_rdma_write {
198 	irdma_sgl lo_sg_list;
199 	u32 num_lo_sges;
200 	struct irdma_sge rem_addr;
201 };
202 
203 struct irdma_inline_rdma_write {
204 	void *data;
205 	u32 len;
206 	struct irdma_sge rem_addr;
207 };
208 
209 struct irdma_rdma_read {
210 	irdma_sgl lo_sg_list;
211 	u32 num_lo_sges;
212 	struct irdma_sge rem_addr;
213 };
214 
215 struct irdma_bind_window {
216 	irdma_stag mr_stag;
217 	u64 bind_len;
218 	void *va;
219 	enum irdma_addressing_type addressing_type;
220 	bool ena_reads:1;
221 	bool ena_writes:1;
222 	irdma_stag mw_stag;
223 	bool mem_window_type_1:1;
224 };
225 
226 struct irdma_inv_local_stag {
227 	irdma_stag target_stag;
228 };
229 
230 struct irdma_post_sq_info {
231 	u64 wr_id;
232 	u8 op_type;
233 	u8 l4len;
234 	bool signaled:1;
235 	bool read_fence:1;
236 	bool local_fence:1;
237 	bool inline_data:1;
238 	bool imm_data_valid:1;
239 	bool push_wqe:1;
240 	bool report_rtt:1;
241 	bool udp_hdr:1;
242 	bool defer_flag:1;
243 	u32 imm_data;
244 	u32 stag_to_inv;
245 	union {
246 		struct irdma_post_send send;
247 		struct irdma_rdma_write rdma_write;
248 		struct irdma_rdma_read rdma_read;
249 		struct irdma_bind_window bind_window;
250 		struct irdma_inv_local_stag inv_local_stag;
251 		struct irdma_inline_rdma_write inline_rdma_write;
252 		struct irdma_post_inline_send inline_send;
253 	} op;
254 };
255 
256 struct irdma_cq_poll_info {
257 	u64 wr_id;
258 	irdma_qp_handle qp_handle;
259 	u32 bytes_xfered;
260 	u32 tcp_seq_num_rtt;
261 	u32 qp_id;
262 	u32 ud_src_qpn;
263 	u32 imm_data;
264 	irdma_stag inv_stag; /* or L_R_Key */
265 	enum irdma_cmpl_status comp_status;
266 	u16 major_err;
267 	u16 minor_err;
268 	u16 ud_vlan;
269 	u8 ud_smac[6];
270 	u8 op_type;
271 	bool stag_invalid_set:1; /* or L_R_Key set */
272 	bool push_dropped:1;
273 	bool error:1;
274 	bool solicited_event:1;
275 	bool ipv4:1;
276 	bool ud_vlan_valid:1;
277 	bool ud_smac_valid:1;
278 	bool imm_valid:1;
279 };
280 
281 enum irdma_status_code irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
282 						  struct irdma_post_sq_info *info,
283 						  bool post_sq);
284 enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
285 					    struct irdma_post_sq_info *info,
286 					    bool post_sq);
287 enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
288 					struct irdma_post_sq_info *info,
289 					bool post_sq);
290 enum irdma_status_code irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id,
291 					 bool signaled, bool post_sq);
292 enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
293 					     struct irdma_post_rq_info *info);
294 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
295 enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
296 					  struct irdma_post_sq_info *info,
297 					  bool inv_stag, bool post_sq);
298 enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
299 					   struct irdma_post_sq_info *info,
300 					   bool post_sq);
301 enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
302 				     struct irdma_post_sq_info *info, bool post_sq);
303 enum irdma_status_code irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
304 						      struct irdma_post_sq_info *info,
305 						      bool post_sq);
306 
307 struct irdma_wqe_uk_ops {
308 	void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
309 	u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
310 	void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
311 				u8 valid);
312 	void (*iw_set_mw_bind_wqe)(__le64 *wqe,
313 				   struct irdma_bind_window *op_info);
314 };
315 
316 enum irdma_status_code irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
317 					     struct irdma_cq_poll_info *info);
318 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
319 				      enum irdma_cmpl_notify cq_notify);
320 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
321 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
322 enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
323 					struct irdma_cq_uk_init_info *info);
324 enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
325 					struct irdma_qp_uk_init_info *info);
326 struct irdma_sq_uk_wr_trk_info {
327 	u64 wrid;
328 	u32 wr_len;
329 	u16 quanta;
330 	u8 reserved[2];
331 };
332 
333 struct irdma_qp_quanta {
334 	__le64 elem[IRDMA_WQE_SIZE];
335 };
336 
337 struct irdma_qp_uk {
338 	struct irdma_qp_quanta *sq_base;
339 	struct irdma_qp_quanta *rq_base;
340 	struct irdma_uk_attrs *uk_attrs;
341 	u32 __iomem *wqe_alloc_db;
342 	struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
343 	u64 *rq_wrid_array;
344 	__le64 *shadow_area;
345 	__le32 *push_db;
346 	__le64 *push_wqe;
347 	struct irdma_ring sq_ring;
348 	struct irdma_ring rq_ring;
349 	struct irdma_ring initial_ring;
350 	u32 qp_id;
351 	u32 qp_caps;
352 	u32 sq_size;
353 	u32 rq_size;
354 	u32 max_sq_frag_cnt;
355 	u32 max_rq_frag_cnt;
356 	u32 max_inline_data;
357 	struct irdma_wqe_uk_ops wqe_ops;
358 	u16 conn_wqes;
359 	u8 qp_type;
360 	u8 swqe_polarity;
361 	u8 swqe_polarity_deferred;
362 	u8 rwqe_polarity;
363 	u8 rq_wqe_size;
364 	u8 rq_wqe_size_multiplier;
365 	bool deferred_flag:1;
366 	bool push_mode:1; /* whether the last post wqe was pushed */
367 	bool push_dropped:1;
368 	bool first_sq_wq:1;
369 	bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
370 	bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
371 	bool destroy_pending:1; /* Indicates the QP is being destroyed */
372 	void *back_qp;
373 	spinlock_t *lock;
374 	u8 dbg_rq_flushed;
375 	u8 sq_flush_seen;
376 	u8 rq_flush_seen;
377 };
378 
379 struct irdma_cq_uk {
380 	struct irdma_cqe *cq_base;
381 	u32 __iomem *cqe_alloc_db;
382 	u32 __iomem *cq_ack_db;
383 	__le64 *shadow_area;
384 	u32 cq_id;
385 	u32 cq_size;
386 	struct irdma_ring cq_ring;
387 	u8 polarity;
388 	bool avoid_mem_cflct:1;
389 };
390 
391 struct irdma_qp_uk_init_info {
392 	struct irdma_qp_quanta *sq;
393 	struct irdma_qp_quanta *rq;
394 	struct irdma_uk_attrs *uk_attrs;
395 	u32 __iomem *wqe_alloc_db;
396 	__le64 *shadow_area;
397 	struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
398 	u64 *rq_wrid_array;
399 	u32 qp_id;
400 	u32 qp_caps;
401 	u32 sq_size;
402 	u32 rq_size;
403 	u32 max_sq_frag_cnt;
404 	u32 max_rq_frag_cnt;
405 	u32 max_inline_data;
406 	u8 first_sq_wq;
407 	u8 type;
408 	int abi_ver;
409 	bool legacy_mode;
410 };
411 
412 struct irdma_cq_uk_init_info {
413 	u32 __iomem *cqe_alloc_db;
414 	u32 __iomem *cq_ack_db;
415 	struct irdma_cqe *cq_base;
416 	__le64 *shadow_area;
417 	u32 cq_size;
418 	u32 cq_id;
419 	bool avoid_mem_cflct;
420 };
421 
422 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
423 				   u16 quanta, u32 total_size,
424 				   struct irdma_post_sq_info *info);
425 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
426 void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
427 enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
428 				 bool signaled, bool post_sq);
429 enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
430 enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
431 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
432 			 u32 inline_data, u8 *shift);
433 enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
434 					 u32 sq_size, u8 shift, u32 *wqdepth);
435 enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
436 					 u32 rq_size, u8 shift, u32 *wqdepth);
437 void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
438 		       u32 wqe_idx, bool post_sq);
439 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
440 #endif /* IRDMA_USER_H */
441