• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #ifndef __IWCH_PROVIDER_H__
33 #define __IWCH_PROVIDER_H__
34 
35 #include <linux/list.h>
36 #include <linux/spinlock.h>
37 #include <rdma/ib_verbs.h>
38 #include <asm/types.h>
39 #include "t3cdev.h"
40 #include "iwch.h"
41 #include "cxio_wr.h"
42 #include "cxio_hal.h"
43 
44 struct iwch_pd {
45 	struct ib_pd ibpd;
46 	u32 pdid;
47 	struct iwch_dev *rhp;
48 };
49 
to_iwch_pd(struct ib_pd * ibpd)50 static inline struct iwch_pd *to_iwch_pd(struct ib_pd *ibpd)
51 {
52 	return container_of(ibpd, struct iwch_pd, ibpd);
53 }
54 
55 struct tpt_attributes {
56 	u32 stag;
57 	u32 state:1;
58 	u32 type:2;
59 	u32 rsvd:1;
60 	enum tpt_mem_perm perms;
61 	u32 remote_invaliate_disable:1;
62 	u32 zbva:1;
63 	u32 mw_bind_enable:1;
64 	u32 page_size:5;
65 
66 	u32 pdid;
67 	u32 qpid;
68 	u32 pbl_addr;
69 	u32 len;
70 	u64 va_fbo;
71 	u32 pbl_size;
72 };
73 
74 struct iwch_mr {
75 	struct ib_mr ibmr;
76 	struct ib_umem *umem;
77 	struct iwch_dev *rhp;
78 	u64 kva;
79 	struct tpt_attributes attr;
80 	u64 *pages;
81 	u32 npages;
82 };
83 
84 typedef struct iwch_mw iwch_mw_handle;
85 
to_iwch_mr(struct ib_mr * ibmr)86 static inline struct iwch_mr *to_iwch_mr(struct ib_mr *ibmr)
87 {
88 	return container_of(ibmr, struct iwch_mr, ibmr);
89 }
90 
91 struct iwch_mw {
92 	struct ib_mw ibmw;
93 	struct iwch_dev *rhp;
94 	u64 kva;
95 	struct tpt_attributes attr;
96 };
97 
to_iwch_mw(struct ib_mw * ibmw)98 static inline struct iwch_mw *to_iwch_mw(struct ib_mw *ibmw)
99 {
100 	return container_of(ibmw, struct iwch_mw, ibmw);
101 }
102 
103 struct iwch_cq {
104 	struct ib_cq ibcq;
105 	struct iwch_dev *rhp;
106 	struct t3_cq cq;
107 	spinlock_t lock;
108 	spinlock_t comp_handler_lock;
109 	atomic_t refcnt;
110 	wait_queue_head_t wait;
111 	u32 __user *user_rptr_addr;
112 };
113 
to_iwch_cq(struct ib_cq * ibcq)114 static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq)
115 {
116 	return container_of(ibcq, struct iwch_cq, ibcq);
117 }
118 
119 enum IWCH_QP_FLAGS {
120 	QP_QUIESCED = 0x01
121 };
122 
123 struct iwch_mpa_attributes {
124 	u8 initiator;
125 	u8 recv_marker_enabled;
126 	u8 xmit_marker_enabled;	/* iWARP: enable inbound Read Resp. */
127 	u8 crc_enabled;
128 	u8 version;	/* 0 or 1 */
129 };
130 
131 struct iwch_qp_attributes {
132 	u32 scq;
133 	u32 rcq;
134 	u32 sq_num_entries;
135 	u32 rq_num_entries;
136 	u32 sq_max_sges;
137 	u32 sq_max_sges_rdma_write;
138 	u32 rq_max_sges;
139 	u32 state;
140 	u8 enable_rdma_read;
141 	u8 enable_rdma_write;	/* enable inbound Read Resp. */
142 	u8 enable_bind;
143 	u8 enable_mmid0_fastreg;	/* Enable STAG0 + Fast-register */
144 	/*
145 	 * Next QP state. If specify the current state, only the
146 	 * QP attributes will be modified.
147 	 */
148 	u32 max_ord;
149 	u32 max_ird;
150 	u32 pd;	/* IN */
151 	u32 next_state;
152 	char terminate_buffer[52];
153 	u32 terminate_msg_len;
154 	u8 is_terminate_local;
155 	struct iwch_mpa_attributes mpa_attr;	/* IN-OUT */
156 	struct iwch_ep *llp_stream_handle;
157 	char *stream_msg_buf;	/* Last stream msg. before Idle -> RTS */
158 	u32 stream_msg_buf_len;	/* Only on Idle -> RTS */
159 };
160 
161 struct iwch_qp {
162 	struct ib_qp ibqp;
163 	struct iwch_dev *rhp;
164 	struct iwch_ep *ep;
165 	struct iwch_qp_attributes attr;
166 	struct t3_wq wq;
167 	spinlock_t lock;
168 	atomic_t refcnt;
169 	wait_queue_head_t wait;
170 	enum IWCH_QP_FLAGS flags;
171 	struct timer_list timer;
172 };
173 
qp_quiesced(struct iwch_qp * qhp)174 static inline int qp_quiesced(struct iwch_qp *qhp)
175 {
176 	return qhp->flags & QP_QUIESCED;
177 }
178 
to_iwch_qp(struct ib_qp * ibqp)179 static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp)
180 {
181 	return container_of(ibqp, struct iwch_qp, ibqp);
182 }
183 
184 void iwch_qp_add_ref(struct ib_qp *qp);
185 void iwch_qp_rem_ref(struct ib_qp *qp);
186 
187 struct iwch_ucontext {
188 	struct ib_ucontext ibucontext;
189 	struct cxio_ucontext uctx;
190 	u32 key;
191 	spinlock_t mmap_lock;
192 	struct list_head mmaps;
193 };
194 
to_iwch_ucontext(struct ib_ucontext * c)195 static inline struct iwch_ucontext *to_iwch_ucontext(struct ib_ucontext *c)
196 {
197 	return container_of(c, struct iwch_ucontext, ibucontext);
198 }
199 
200 struct iwch_mm_entry {
201 	struct list_head entry;
202 	u64 addr;
203 	u32 key;
204 	unsigned len;
205 };
206 
remove_mmap(struct iwch_ucontext * ucontext,u32 key,unsigned len)207 static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
208 						u32 key, unsigned len)
209 {
210 	struct list_head *pos, *nxt;
211 	struct iwch_mm_entry *mm;
212 
213 	spin_lock(&ucontext->mmap_lock);
214 	list_for_each_safe(pos, nxt, &ucontext->mmaps) {
215 
216 		mm = list_entry(pos, struct iwch_mm_entry, entry);
217 		if (mm->key == key && mm->len == len) {
218 			list_del_init(&mm->entry);
219 			spin_unlock(&ucontext->mmap_lock);
220 			PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
221 			     key, (unsigned long long) mm->addr, mm->len);
222 			return mm;
223 		}
224 	}
225 	spin_unlock(&ucontext->mmap_lock);
226 	return NULL;
227 }
228 
insert_mmap(struct iwch_ucontext * ucontext,struct iwch_mm_entry * mm)229 static inline void insert_mmap(struct iwch_ucontext *ucontext,
230 			       struct iwch_mm_entry *mm)
231 {
232 	spin_lock(&ucontext->mmap_lock);
233 	PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
234 	     mm->key, (unsigned long long) mm->addr, mm->len);
235 	list_add_tail(&mm->entry, &ucontext->mmaps);
236 	spin_unlock(&ucontext->mmap_lock);
237 }
238 
239 enum iwch_qp_attr_mask {
240 	IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
241 	IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
242 	IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
243 	IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
244 	IWCH_QP_ATTR_MAX_ORD = 1 << 11,
245 	IWCH_QP_ATTR_MAX_IRD = 1 << 12,
246 	IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
247 	IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
248 	IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
249 	IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
250 	IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
251 				     IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
252 				     IWCH_QP_ATTR_MAX_ORD |
253 				     IWCH_QP_ATTR_MAX_IRD |
254 				     IWCH_QP_ATTR_LLP_STREAM_HANDLE |
255 				     IWCH_QP_ATTR_STREAM_MSG_BUFFER |
256 				     IWCH_QP_ATTR_MPA_ATTR |
257 				     IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
258 };
259 
260 int iwch_modify_qp(struct iwch_dev *rhp,
261 				struct iwch_qp *qhp,
262 				enum iwch_qp_attr_mask mask,
263 				struct iwch_qp_attributes *attrs,
264 				int internal);
265 
266 enum iwch_qp_state {
267 	IWCH_QP_STATE_IDLE,
268 	IWCH_QP_STATE_RTS,
269 	IWCH_QP_STATE_ERROR,
270 	IWCH_QP_STATE_TERMINATE,
271 	IWCH_QP_STATE_CLOSING,
272 	IWCH_QP_STATE_TOT
273 };
274 
iwch_convert_state(enum ib_qp_state ib_state)275 static inline int iwch_convert_state(enum ib_qp_state ib_state)
276 {
277 	switch (ib_state) {
278 	case IB_QPS_RESET:
279 	case IB_QPS_INIT:
280 		return IWCH_QP_STATE_IDLE;
281 	case IB_QPS_RTS:
282 		return IWCH_QP_STATE_RTS;
283 	case IB_QPS_SQD:
284 		return IWCH_QP_STATE_CLOSING;
285 	case IB_QPS_SQE:
286 		return IWCH_QP_STATE_TERMINATE;
287 	case IB_QPS_ERR:
288 		return IWCH_QP_STATE_ERROR;
289 	default:
290 		return -1;
291 	}
292 }
293 
iwch_ib_to_tpt_access(int acc)294 static inline u32 iwch_ib_to_tpt_access(int acc)
295 {
296 	return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
297 	       (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
298 	       (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
299 	       (acc & IB_ACCESS_MW_BIND ? TPT_MW_BIND : 0) |
300 	       TPT_LOCAL_READ;
301 }
302 
iwch_ib_to_tpt_bind_access(int acc)303 static inline u32 iwch_ib_to_tpt_bind_access(int acc)
304 {
305 	return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
306 	       (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0);
307 }
308 
309 enum iwch_mmid_state {
310 	IWCH_STAG_STATE_VALID,
311 	IWCH_STAG_STATE_INVALID
312 };
313 
314 enum iwch_qp_query_flags {
315 	IWCH_QP_QUERY_CONTEXT_NONE = 0x0,	/* No ctx; Only attrs */
316 	IWCH_QP_QUERY_CONTEXT_GET = 0x1,	/* Get ctx + attrs */
317 	IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2,	/* Not Supported */
318 
319 	/*
320 	 * Quiesce QP context; Consumer
321 	 * will NOT replay outstanding WR
322 	 */
323 	IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
324 	IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
325 	IWCH_QP_QUERY_TEST_USERWRITE = 0x32	/* Test special */
326 };
327 
328 u16 iwch_rqes_posted(struct iwch_qp *qhp);
329 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
330 		      struct ib_send_wr **bad_wr);
331 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
332 		      struct ib_recv_wr **bad_wr);
333 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
334 int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
335 int iwch_post_zb_read(struct iwch_ep *ep);
336 int iwch_register_device(struct iwch_dev *dev);
337 void iwch_unregister_device(struct iwch_dev *dev);
338 void stop_read_rep_timer(struct iwch_qp *qhp);
339 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
340 		      struct iwch_mr *mhp, int shift);
341 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
342 void iwch_free_pbl(struct iwch_mr *mhp);
343 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
344 
345 #define IWCH_NODE_DESC "cxgb3 Chelsio Communications"
346 
347 #endif
348