• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #ifndef RXE_VERBS_H
8 #define RXE_VERBS_H
9 
10 #include <linux/interrupt.h>
11 #include <linux/workqueue.h>
12 #include <rdma/rdma_user_rxe.h>
13 #include "rxe_pool.h"
14 #include "rxe_task.h"
15 #include "rxe_hw_counters.h"
16 
pkey_match(u16 key1,u16 key2)17 static inline int pkey_match(u16 key1, u16 key2)
18 {
19 	return (((key1 & 0x7fff) != 0) &&
20 		((key1 & 0x7fff) == (key2 & 0x7fff)) &&
21 		((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
22 }
23 
24 /* Return >0 if psn_a > psn_b
25  *	   0 if psn_a == psn_b
26  *	  <0 if psn_a < psn_b
27  */
psn_compare(u32 psn_a,u32 psn_b)28 static inline int psn_compare(u32 psn_a, u32 psn_b)
29 {
30 	s32 diff;
31 
32 	diff = (psn_a - psn_b) << 8;
33 	return diff;
34 }
35 
36 struct rxe_ucontext {
37 	struct ib_ucontext ibuc;
38 	struct rxe_pool_entry	pelem;
39 };
40 
41 struct rxe_pd {
42 	struct ib_pd            ibpd;
43 	struct rxe_pool_entry	pelem;
44 };
45 
46 struct rxe_ah {
47 	struct ib_ah		ibah;
48 	struct rxe_pool_entry	pelem;
49 	struct rxe_pd		*pd;
50 	struct rxe_av		av;
51 };
52 
53 struct rxe_cqe {
54 	union {
55 		struct ib_wc		ibwc;
56 		struct ib_uverbs_wc	uibwc;
57 	};
58 };
59 
60 struct rxe_cq {
61 	struct ib_cq		ibcq;
62 	struct rxe_pool_entry	pelem;
63 	struct rxe_queue	*queue;
64 	spinlock_t		cq_lock;
65 	u8			notify;
66 	bool			is_dying;
67 	int			is_user;
68 	struct tasklet_struct	comp_task;
69 };
70 
71 enum wqe_state {
72 	wqe_state_posted,
73 	wqe_state_processing,
74 	wqe_state_pending,
75 	wqe_state_done,
76 	wqe_state_error,
77 };
78 
79 struct rxe_sq {
80 	int			max_wr;
81 	int			max_sge;
82 	int			max_inline;
83 	spinlock_t		sq_lock; /* guard queue */
84 	struct rxe_queue	*queue;
85 };
86 
87 struct rxe_rq {
88 	int			max_wr;
89 	int			max_sge;
90 	spinlock_t		producer_lock; /* guard queue producer */
91 	spinlock_t		consumer_lock; /* guard queue consumer */
92 	struct rxe_queue	*queue;
93 };
94 
95 struct rxe_srq {
96 	struct ib_srq		ibsrq;
97 	struct rxe_pool_entry	pelem;
98 	struct rxe_pd		*pd;
99 	struct rxe_rq		rq;
100 	u32			srq_num;
101 
102 	int			limit;
103 	int			error;
104 };
105 
106 enum rxe_qp_state {
107 	QP_STATE_RESET,
108 	QP_STATE_INIT,
109 	QP_STATE_READY,
110 	QP_STATE_DRAIN,		/* req only */
111 	QP_STATE_DRAINED,	/* req only */
112 	QP_STATE_ERROR
113 };
114 
115 struct rxe_req_info {
116 	enum rxe_qp_state	state;
117 	int			wqe_index;
118 	u32			psn;
119 	int			opcode;
120 	atomic_t		rd_atomic;
121 	int			wait_fence;
122 	int			need_rd_atomic;
123 	int			wait_psn;
124 	int			need_retry;
125 	int			noack_pkts;
126 	struct rxe_task		task;
127 };
128 
129 struct rxe_comp_info {
130 	u32			psn;
131 	int			opcode;
132 	int			timeout;
133 	int			timeout_retry;
134 	int			started_retry;
135 	u32			retry_cnt;
136 	u32			rnr_retry;
137 	struct rxe_task		task;
138 };
139 
140 enum rdatm_res_state {
141 	rdatm_res_state_next,
142 	rdatm_res_state_new,
143 	rdatm_res_state_replay,
144 };
145 
146 struct resp_res {
147 	int			type;
148 	int			replay;
149 	u32			first_psn;
150 	u32			last_psn;
151 	u32			cur_psn;
152 	enum rdatm_res_state	state;
153 
154 	union {
155 		struct {
156 			struct sk_buff	*skb;
157 		} atomic;
158 		struct {
159 			struct rxe_mr	*mr;
160 			u64		va_org;
161 			u32		rkey;
162 			u32		length;
163 			u64		va;
164 			u32		resid;
165 		} read;
166 	};
167 };
168 
169 struct rxe_resp_info {
170 	enum rxe_qp_state	state;
171 	u32			msn;
172 	u32			psn;
173 	u32			ack_psn;
174 	int			opcode;
175 	int			drop_msg;
176 	int			goto_error;
177 	int			sent_psn_nak;
178 	enum ib_wc_status	status;
179 	u8			aeth_syndrome;
180 
181 	/* Receive only */
182 	struct rxe_recv_wqe	*wqe;
183 
184 	/* RDMA read / atomic only */
185 	u64			va;
186 	u64			offset;
187 	struct rxe_mr		*mr;
188 	u32			resid;
189 	u32			rkey;
190 	u32			length;
191 	u64			atomic_orig;
192 
193 	/* SRQ only */
194 	struct {
195 		struct rxe_recv_wqe	wqe;
196 		struct ib_sge		sge[RXE_MAX_SGE];
197 	} srq_wqe;
198 
199 	/* Responder resources. It's a circular list where the oldest
200 	 * resource is dropped first.
201 	 */
202 	struct resp_res		*resources;
203 	unsigned int		res_head;
204 	unsigned int		res_tail;
205 	struct resp_res		*res;
206 	struct rxe_task		task;
207 };
208 
209 struct rxe_qp {
210 	struct ib_qp		ibqp;
211 	struct rxe_pool_entry	pelem;
212 	struct ib_qp_attr	attr;
213 	unsigned int		valid;
214 	unsigned int		mtu;
215 	bool			is_user;
216 
217 	struct rxe_pd		*pd;
218 	struct rxe_srq		*srq;
219 	struct rxe_cq		*scq;
220 	struct rxe_cq		*rcq;
221 
222 	enum ib_sig_type	sq_sig_type;
223 
224 	struct rxe_sq		sq;
225 	struct rxe_rq		rq;
226 
227 	struct socket		*sk;
228 	u32			dst_cookie;
229 	u16			src_port;
230 
231 	struct rxe_av		pri_av;
232 	struct rxe_av		alt_av;
233 
234 	/* list of mcast groups qp has joined (for cleanup) */
235 	struct list_head	grp_list;
236 	spinlock_t		grp_lock; /* guard grp_list */
237 
238 	struct sk_buff_head	req_pkts;
239 	struct sk_buff_head	resp_pkts;
240 	struct sk_buff_head	send_pkts;
241 
242 	struct rxe_req_info	req;
243 	struct rxe_comp_info	comp;
244 	struct rxe_resp_info	resp;
245 
246 	atomic_t		ssn;
247 	atomic_t		skb_out;
248 	int			need_req_skb;
249 
250 	/* Timer for retranmitting packet when ACKs have been lost. RC
251 	 * only. The requester sets it when it is not already
252 	 * started. The responder resets it whenever an ack is
253 	 * received.
254 	 */
255 	struct timer_list retrans_timer;
256 	u64 qp_timeout_jiffies;
257 
258 	/* Timer for handling RNR NAKS. */
259 	struct timer_list rnr_nak_timer;
260 
261 	spinlock_t		state_lock; /* guard requester and completer */
262 
263 	struct execute_work	cleanup_work;
264 };
265 
266 enum rxe_mr_state {
267 	RXE_MR_STATE_ZOMBIE,
268 	RXE_MR_STATE_INVALID,
269 	RXE_MR_STATE_FREE,
270 	RXE_MR_STATE_VALID,
271 };
272 
273 enum rxe_mr_type {
274 	RXE_MR_TYPE_NONE,
275 	RXE_MR_TYPE_DMA,
276 	RXE_MR_TYPE_MR,
277 };
278 
279 enum rxe_mr_copy_dir {
280 	RXE_TO_MR_OBJ,
281 	RXE_FROM_MR_OBJ,
282 };
283 
284 enum rxe_mr_lookup_type {
285 	RXE_LOOKUP_LOCAL,
286 	RXE_LOOKUP_REMOTE,
287 };
288 
289 #define RXE_BUF_PER_MAP		(PAGE_SIZE / sizeof(struct rxe_phys_buf))
290 
291 struct rxe_phys_buf {
292 	u64      addr;
293 	u64      size;
294 };
295 
296 struct rxe_map {
297 	struct rxe_phys_buf	buf[RXE_BUF_PER_MAP];
298 };
299 
rkey_is_mw(u32 rkey)300 static inline int rkey_is_mw(u32 rkey)
301 {
302 	u32 index = rkey >> 8;
303 
304 	return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX);
305 }
306 
307 struct rxe_mr {
308 	struct rxe_pool_entry	pelem;
309 	struct ib_mr		ibmr;
310 
311 	struct ib_umem		*umem;
312 
313 	u32			lkey;
314 	u32			rkey;
315 	enum rxe_mr_state	state;
316 	enum rxe_mr_type	type;
317 	u64			va;
318 	u64			iova;
319 	size_t			length;
320 	u32			offset;
321 	int			access;
322 
323 	int			page_shift;
324 	int			page_mask;
325 	int			map_shift;
326 	int			map_mask;
327 
328 	u32			num_buf;
329 	u32			nbuf;
330 
331 	u32			max_buf;
332 	u32			num_map;
333 
334 	atomic_t		num_mw;
335 
336 	struct rxe_map		**map;
337 };
338 
339 enum rxe_mw_state {
340 	RXE_MW_STATE_INVALID	= RXE_MR_STATE_INVALID,
341 	RXE_MW_STATE_FREE	= RXE_MR_STATE_FREE,
342 	RXE_MW_STATE_VALID	= RXE_MR_STATE_VALID,
343 };
344 
345 struct rxe_mw {
346 	struct ib_mw		ibmw;
347 	struct rxe_pool_entry	pelem;
348 	spinlock_t		lock;
349 	enum rxe_mw_state	state;
350 	struct rxe_qp		*qp; /* Type 2 only */
351 	struct rxe_mr		*mr;
352 	u32			rkey;
353 	int			access;
354 	u64			addr;
355 	u64			length;
356 };
357 
358 struct rxe_mc_grp {
359 	struct rxe_pool_entry	pelem;
360 	spinlock_t		mcg_lock; /* guard group */
361 	struct rxe_dev		*rxe;
362 	struct list_head	qp_list;
363 	union ib_gid		mgid;
364 	int			num_qp;
365 	u32			qkey;
366 	u16			pkey;
367 };
368 
369 struct rxe_mc_elem {
370 	struct rxe_pool_entry	pelem;
371 	struct list_head	qp_list;
372 	struct list_head	grp_list;
373 	struct rxe_qp		*qp;
374 	struct rxe_mc_grp	*grp;
375 };
376 
377 struct rxe_port {
378 	struct ib_port_attr	attr;
379 	__be64			port_guid;
380 	__be64			subnet_prefix;
381 	spinlock_t		port_lock; /* guard port */
382 	unsigned int		mtu_cap;
383 	/* special QPs */
384 	u32			qp_smi_index;
385 	u32			qp_gsi_index;
386 };
387 
388 struct rxe_dev {
389 	struct ib_device	ib_dev;
390 	struct ib_device_attr	attr;
391 	int			max_ucontext;
392 	int			max_inline_data;
393 	struct mutex	usdev_lock;
394 
395 	struct net_device	*ndev;
396 
397 	int			xmit_errors;
398 
399 	struct rxe_pool		uc_pool;
400 	struct rxe_pool		pd_pool;
401 	struct rxe_pool		ah_pool;
402 	struct rxe_pool		srq_pool;
403 	struct rxe_pool		qp_pool;
404 	struct rxe_pool		cq_pool;
405 	struct rxe_pool		mr_pool;
406 	struct rxe_pool		mw_pool;
407 	struct rxe_pool		mc_grp_pool;
408 	struct rxe_pool		mc_elem_pool;
409 
410 	spinlock_t		pending_lock; /* guard pending_mmaps */
411 	struct list_head	pending_mmaps;
412 
413 	spinlock_t		mmap_offset_lock; /* guard mmap_offset */
414 	u64			mmap_offset;
415 
416 	atomic64_t		stats_counters[RXE_NUM_OF_COUNTERS];
417 
418 	struct rxe_port		port;
419 	struct crypto_shash	*tfm;
420 };
421 
rxe_counter_inc(struct rxe_dev * rxe,enum rxe_counters index)422 static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
423 {
424 	atomic64_inc(&rxe->stats_counters[index]);
425 }
426 
to_rdev(struct ib_device * dev)427 static inline struct rxe_dev *to_rdev(struct ib_device *dev)
428 {
429 	return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
430 }
431 
to_ruc(struct ib_ucontext * uc)432 static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
433 {
434 	return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
435 }
436 
to_rpd(struct ib_pd * pd)437 static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
438 {
439 	return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
440 }
441 
to_rah(struct ib_ah * ah)442 static inline struct rxe_ah *to_rah(struct ib_ah *ah)
443 {
444 	return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
445 }
446 
to_rsrq(struct ib_srq * srq)447 static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
448 {
449 	return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
450 }
451 
to_rqp(struct ib_qp * qp)452 static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
453 {
454 	return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
455 }
456 
to_rcq(struct ib_cq * cq)457 static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
458 {
459 	return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
460 }
461 
to_rmr(struct ib_mr * mr)462 static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
463 {
464 	return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
465 }
466 
to_rmw(struct ib_mw * mw)467 static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
468 {
469 	return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
470 }
471 
mr_pd(struct rxe_mr * mr)472 static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
473 {
474 	return to_rpd(mr->ibmr.pd);
475 }
476 
rxe_mw_pd(struct rxe_mw * mw)477 static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
478 {
479 	return to_rpd(mw->ibmw.pd);
480 }
481 
482 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
483 
484 void rxe_mc_cleanup(struct rxe_pool_entry *arg);
485 
486 #endif /* RXE_VERBS_H */
487