• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2005 Ammasso, Inc.  All rights reserved.
3  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  */
34 #include "c2.h"
35 #include "c2_wr.h"
36 #include "c2_vq.h"
37 #include <rdma/iw_cm.h>
38 
c2_llp_connect(struct iw_cm_id * cm_id,struct iw_cm_conn_param * iw_param)39 int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
40 {
41 	struct c2_dev *c2dev = to_c2dev(cm_id->device);
42 	struct ib_qp *ibqp;
43 	struct c2_qp *qp;
44 	struct c2wr_qp_connect_req *wr;	/* variable size needs a malloc. */
45 	struct c2_vq_req *vq_req;
46 	int err;
47 
48 	ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
49 	if (!ibqp)
50 		return -EINVAL;
51 	qp = to_c2qp(ibqp);
52 
53 	/* Associate QP <--> CM_ID */
54 	cm_id->provider_data = qp;
55 	cm_id->add_ref(cm_id);
56 	qp->cm_id = cm_id;
57 
58 	/*
59 	 * only support the max private_data length
60 	 */
61 	if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
62 		err = -EINVAL;
63 		goto bail0;
64 	}
65 	/*
66 	 * Set the rdma read limits
67 	 */
68 	err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
69 	if (err)
70 		goto bail0;
71 
72 	/*
73 	 * Create and send a WR_QP_CONNECT...
74 	 */
75 	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
76 	if (!wr) {
77 		err = -ENOMEM;
78 		goto bail0;
79 	}
80 
81 	vq_req = vq_req_alloc(c2dev);
82 	if (!vq_req) {
83 		err = -ENOMEM;
84 		goto bail1;
85 	}
86 
87 	c2_wr_set_id(wr, CCWR_QP_CONNECT);
88 	wr->hdr.context = 0;
89 	wr->rnic_handle = c2dev->adapter_handle;
90 	wr->qp_handle = qp->adapter_handle;
91 
92 	wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr;
93 	wr->remote_port = cm_id->remote_addr.sin_port;
94 
95 	/*
96 	 * Move any private data from the callers's buf into
97 	 * the WR.
98 	 */
99 	if (iw_param->private_data) {
100 		wr->private_data_length =
101 			cpu_to_be32(iw_param->private_data_len);
102 		memcpy(&wr->private_data[0], iw_param->private_data,
103 		       iw_param->private_data_len);
104 	} else
105 		wr->private_data_length = 0;
106 
107 	/*
108 	 * Send WR to adapter.  NOTE: There is no synch reply from
109 	 * the adapter.
110 	 */
111 	err = vq_send_wr(c2dev, (union c2wr *) wr);
112 	vq_req_free(c2dev, vq_req);
113 
114  bail1:
115 	kfree(wr);
116  bail0:
117 	if (err) {
118 		/*
119 		 * If we fail, release reference on QP and
120 		 * disassociate QP from CM_ID
121 		 */
122 		cm_id->provider_data = NULL;
123 		qp->cm_id = NULL;
124 		cm_id->rem_ref(cm_id);
125 	}
126 	return err;
127 }
128 
c2_llp_service_create(struct iw_cm_id * cm_id,int backlog)129 int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
130 {
131 	struct c2_dev *c2dev;
132 	struct c2wr_ep_listen_create_req wr;
133 	struct c2wr_ep_listen_create_rep *reply;
134 	struct c2_vq_req *vq_req;
135 	int err;
136 
137 	c2dev = to_c2dev(cm_id->device);
138 	if (c2dev == NULL)
139 		return -EINVAL;
140 
141 	/*
142 	 * Allocate verbs request.
143 	 */
144 	vq_req = vq_req_alloc(c2dev);
145 	if (!vq_req)
146 		return -ENOMEM;
147 
148 	/*
149 	 * Build the WR
150 	 */
151 	c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
152 	wr.hdr.context = (u64) (unsigned long) vq_req;
153 	wr.rnic_handle = c2dev->adapter_handle;
154 	wr.local_addr = cm_id->local_addr.sin_addr.s_addr;
155 	wr.local_port = cm_id->local_addr.sin_port;
156 	wr.backlog = cpu_to_be32(backlog);
157 	wr.user_context = (u64) (unsigned long) cm_id;
158 
159 	/*
160 	 * Reference the request struct.  Dereferenced in the int handler.
161 	 */
162 	vq_req_get(c2dev, vq_req);
163 
164 	/*
165 	 * Send WR to adapter
166 	 */
167 	err = vq_send_wr(c2dev, (union c2wr *) & wr);
168 	if (err) {
169 		vq_req_put(c2dev, vq_req);
170 		goto bail0;
171 	}
172 
173 	/*
174 	 * Wait for reply from adapter
175 	 */
176 	err = vq_wait_for_reply(c2dev, vq_req);
177 	if (err)
178 		goto bail0;
179 
180 	/*
181 	 * Process reply
182 	 */
183 	reply =
184 	    (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
185 	if (!reply) {
186 		err = -ENOMEM;
187 		goto bail1;
188 	}
189 
190 	if ((err = c2_errno(reply)) != 0)
191 		goto bail1;
192 
193 	/*
194 	 * Keep the adapter handle. Used in subsequent destroy
195 	 */
196 	cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
197 
198 	/*
199 	 * free vq stuff
200 	 */
201 	vq_repbuf_free(c2dev, reply);
202 	vq_req_free(c2dev, vq_req);
203 
204 	return 0;
205 
206  bail1:
207 	vq_repbuf_free(c2dev, reply);
208  bail0:
209 	vq_req_free(c2dev, vq_req);
210 	return err;
211 }
212 
213 
c2_llp_service_destroy(struct iw_cm_id * cm_id)214 int c2_llp_service_destroy(struct iw_cm_id *cm_id)
215 {
216 
217 	struct c2_dev *c2dev;
218 	struct c2wr_ep_listen_destroy_req wr;
219 	struct c2wr_ep_listen_destroy_rep *reply;
220 	struct c2_vq_req *vq_req;
221 	int err;
222 
223 	c2dev = to_c2dev(cm_id->device);
224 	if (c2dev == NULL)
225 		return -EINVAL;
226 
227 	/*
228 	 * Allocate verbs request.
229 	 */
230 	vq_req = vq_req_alloc(c2dev);
231 	if (!vq_req)
232 		return -ENOMEM;
233 
234 	/*
235 	 * Build the WR
236 	 */
237 	c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
238 	wr.hdr.context = (unsigned long) vq_req;
239 	wr.rnic_handle = c2dev->adapter_handle;
240 	wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
241 
242 	/*
243 	 * reference the request struct.  dereferenced in the int handler.
244 	 */
245 	vq_req_get(c2dev, vq_req);
246 
247 	/*
248 	 * Send WR to adapter
249 	 */
250 	err = vq_send_wr(c2dev, (union c2wr *) & wr);
251 	if (err) {
252 		vq_req_put(c2dev, vq_req);
253 		goto bail0;
254 	}
255 
256 	/*
257 	 * Wait for reply from adapter
258 	 */
259 	err = vq_wait_for_reply(c2dev, vq_req);
260 	if (err)
261 		goto bail0;
262 
263 	/*
264 	 * Process reply
265 	 */
266 	reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
267 	if (!reply) {
268 		err = -ENOMEM;
269 		goto bail0;
270 	}
271 	if ((err = c2_errno(reply)) != 0)
272 		goto bail1;
273 
274  bail1:
275 	vq_repbuf_free(c2dev, reply);
276  bail0:
277 	vq_req_free(c2dev, vq_req);
278 	return err;
279 }
280 
c2_llp_accept(struct iw_cm_id * cm_id,struct iw_cm_conn_param * iw_param)281 int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
282 {
283 	struct c2_dev *c2dev = to_c2dev(cm_id->device);
284 	struct c2_qp *qp;
285 	struct ib_qp *ibqp;
286 	struct c2wr_cr_accept_req *wr;	/* variable length WR */
287 	struct c2_vq_req *vq_req;
288 	struct c2wr_cr_accept_rep *reply;	/* VQ Reply msg ptr. */
289 	int err;
290 
291 	ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
292 	if (!ibqp)
293 		return -EINVAL;
294 	qp = to_c2qp(ibqp);
295 
296 	/* Set the RDMA read limits */
297 	err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
298 	if (err)
299 		goto bail0;
300 
301 	/* Allocate verbs request. */
302 	vq_req = vq_req_alloc(c2dev);
303 	if (!vq_req) {
304 		err = -ENOMEM;
305 		goto bail0;
306 	}
307 	vq_req->qp = qp;
308 	vq_req->cm_id = cm_id;
309 	vq_req->event = IW_CM_EVENT_ESTABLISHED;
310 
311 	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
312 	if (!wr) {
313 		err = -ENOMEM;
314 		goto bail1;
315 	}
316 
317 	/* Build the WR */
318 	c2_wr_set_id(wr, CCWR_CR_ACCEPT);
319 	wr->hdr.context = (unsigned long) vq_req;
320 	wr->rnic_handle = c2dev->adapter_handle;
321 	wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
322 	wr->qp_handle = qp->adapter_handle;
323 
324 	/* Replace the cr_handle with the QP after accept */
325 	cm_id->provider_data = qp;
326 	cm_id->add_ref(cm_id);
327 	qp->cm_id = cm_id;
328 
329 	cm_id->provider_data = qp;
330 
331 	/* Validate private_data length */
332 	if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
333 		err = -EINVAL;
334 		goto bail1;
335 	}
336 
337 	if (iw_param->private_data) {
338 		wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
339 		memcpy(&wr->private_data[0],
340 		       iw_param->private_data, iw_param->private_data_len);
341 	} else
342 		wr->private_data_length = 0;
343 
344 	/* Reference the request struct.  Dereferenced in the int handler. */
345 	vq_req_get(c2dev, vq_req);
346 
347 	/* Send WR to adapter */
348 	err = vq_send_wr(c2dev, (union c2wr *) wr);
349 	if (err) {
350 		vq_req_put(c2dev, vq_req);
351 		goto bail1;
352 	}
353 
354 	/* Wait for reply from adapter */
355 	err = vq_wait_for_reply(c2dev, vq_req);
356 	if (err)
357 		goto bail1;
358 
359 	/* Check that reply is present */
360 	reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
361 	if (!reply) {
362 		err = -ENOMEM;
363 		goto bail1;
364 	}
365 
366 	err = c2_errno(reply);
367 	vq_repbuf_free(c2dev, reply);
368 
369 	if (!err)
370 		c2_set_qp_state(qp, C2_QP_STATE_RTS);
371  bail1:
372 	kfree(wr);
373 	vq_req_free(c2dev, vq_req);
374  bail0:
375 	if (err) {
376 		/*
377 		 * If we fail, release reference on QP and
378 		 * disassociate QP from CM_ID
379 		 */
380 		cm_id->provider_data = NULL;
381 		qp->cm_id = NULL;
382 		cm_id->rem_ref(cm_id);
383 	}
384 	return err;
385 }
386 
c2_llp_reject(struct iw_cm_id * cm_id,const void * pdata,u8 pdata_len)387 int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
388 {
389 	struct c2_dev *c2dev;
390 	struct c2wr_cr_reject_req wr;
391 	struct c2_vq_req *vq_req;
392 	struct c2wr_cr_reject_rep *reply;
393 	int err;
394 
395 	c2dev = to_c2dev(cm_id->device);
396 
397 	/*
398 	 * Allocate verbs request.
399 	 */
400 	vq_req = vq_req_alloc(c2dev);
401 	if (!vq_req)
402 		return -ENOMEM;
403 
404 	/*
405 	 * Build the WR
406 	 */
407 	c2_wr_set_id(&wr, CCWR_CR_REJECT);
408 	wr.hdr.context = (unsigned long) vq_req;
409 	wr.rnic_handle = c2dev->adapter_handle;
410 	wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
411 
412 	/*
413 	 * reference the request struct.  dereferenced in the int handler.
414 	 */
415 	vq_req_get(c2dev, vq_req);
416 
417 	/*
418 	 * Send WR to adapter
419 	 */
420 	err = vq_send_wr(c2dev, (union c2wr *) & wr);
421 	if (err) {
422 		vq_req_put(c2dev, vq_req);
423 		goto bail0;
424 	}
425 
426 	/*
427 	 * Wait for reply from adapter
428 	 */
429 	err = vq_wait_for_reply(c2dev, vq_req);
430 	if (err)
431 		goto bail0;
432 
433 	/*
434 	 * Process reply
435 	 */
436 	reply = (struct c2wr_cr_reject_rep *) (unsigned long)
437 		vq_req->reply_msg;
438 	if (!reply) {
439 		err = -ENOMEM;
440 		goto bail0;
441 	}
442 	err = c2_errno(reply);
443 	/*
444 	 * free vq stuff
445 	 */
446 	vq_repbuf_free(c2dev, reply);
447 
448  bail0:
449 	vq_req_free(c2dev, vq_req);
450 	return err;
451 }
452