• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
8  *
9  * This software is available to you under a choice of one of two
10  * licenses.  You may choose to be licensed under the terms of the GNU
11  * General Public License (GPL) Version 2, available from the file
12  * COPYING in the main directory of this source tree, or the
13  * OpenIB.org BSD license below:
14  *
15  *     Redistribution and use in source and binary forms, with or
16  *     without modification, are permitted provided that the following
17  *     conditions are met:
18  *
19  *      - Redistributions of source code must retain the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer.
22  *
23  *      - Redistributions in binary form must reproduce the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer in the documentation and/or other materials
26  *        provided with the distribution.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35  * SOFTWARE.
36  *
37  */
38 #include <linux/gfp.h>
39 
40 #include "c2.h"
41 #include "c2_vq.h"
42 #include "c2_status.h"
43 
44 #define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
45 
c2_cq_get(struct c2_dev * c2dev,int cqn)46 static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
47 {
48 	struct c2_cq *cq;
49 	unsigned long flags;
50 
51 	spin_lock_irqsave(&c2dev->lock, flags);
52 	cq = c2dev->qptr_array[cqn];
53 	if (!cq) {
54 		spin_unlock_irqrestore(&c2dev->lock, flags);
55 		return NULL;
56 	}
57 	atomic_inc(&cq->refcount);
58 	spin_unlock_irqrestore(&c2dev->lock, flags);
59 	return cq;
60 }
61 
c2_cq_put(struct c2_cq * cq)62 static void c2_cq_put(struct c2_cq *cq)
63 {
64 	if (atomic_dec_and_test(&cq->refcount))
65 		wake_up(&cq->wait);
66 }
67 
c2_cq_event(struct c2_dev * c2dev,u32 mq_index)68 void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
69 {
70 	struct c2_cq *cq;
71 
72 	cq = c2_cq_get(c2dev, mq_index);
73 	if (!cq) {
74 		printk("discarding events on destroyed CQN=%d\n", mq_index);
75 		return;
76 	}
77 
78 	(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
79 	c2_cq_put(cq);
80 }
81 
c2_cq_clean(struct c2_dev * c2dev,struct c2_qp * qp,u32 mq_index)82 void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
83 {
84 	struct c2_cq *cq;
85 	struct c2_mq *q;
86 
87 	cq = c2_cq_get(c2dev, mq_index);
88 	if (!cq)
89 		return;
90 
91 	spin_lock_irq(&cq->lock);
92 	q = &cq->mq;
93 	if (q && !c2_mq_empty(q)) {
94 		u16 priv = q->priv;
95 		struct c2wr_ce *msg;
96 
97 		while (priv != be16_to_cpu(*q->shared)) {
98 			msg = (struct c2wr_ce *)
99 				(q->msg_pool.host + priv * q->msg_size);
100 			if (msg->qp_user_context == (u64) (unsigned long) qp) {
101 				msg->qp_user_context = (u64) 0;
102 			}
103 			priv = (priv + 1) % q->q_size;
104 		}
105 	}
106 	spin_unlock_irq(&cq->lock);
107 	c2_cq_put(cq);
108 }
109 
c2_cqe_status_to_openib(u8 status)110 static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
111 {
112 	switch (status) {
113 	case C2_OK:
114 		return IB_WC_SUCCESS;
115 	case CCERR_FLUSHED:
116 		return IB_WC_WR_FLUSH_ERR;
117 	case CCERR_BASE_AND_BOUNDS_VIOLATION:
118 		return IB_WC_LOC_PROT_ERR;
119 	case CCERR_ACCESS_VIOLATION:
120 		return IB_WC_LOC_ACCESS_ERR;
121 	case CCERR_TOTAL_LENGTH_TOO_BIG:
122 		return IB_WC_LOC_LEN_ERR;
123 	case CCERR_INVALID_WINDOW:
124 		return IB_WC_MW_BIND_ERR;
125 	default:
126 		return IB_WC_GENERAL_ERR;
127 	}
128 }
129 
130 
c2_poll_one(struct c2_dev * c2dev,struct c2_cq * cq,struct ib_wc * entry)131 static inline int c2_poll_one(struct c2_dev *c2dev,
132 			      struct c2_cq *cq, struct ib_wc *entry)
133 {
134 	struct c2wr_ce *ce;
135 	struct c2_qp *qp;
136 	int is_recv = 0;
137 
138 	ce = c2_mq_consume(&cq->mq);
139 	if (!ce) {
140 		return -EAGAIN;
141 	}
142 
143 	/*
144 	 * if the qp returned is null then this qp has already
145 	 * been freed and we are unable process the completion.
146 	 * try pulling the next message
147 	 */
148 	while ((qp =
149 		(struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
150 		c2_mq_free(&cq->mq);
151 		ce = c2_mq_consume(&cq->mq);
152 		if (!ce)
153 			return -EAGAIN;
154 	}
155 
156 	entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
157 	entry->wr_id = ce->hdr.context;
158 	entry->qp = &qp->ibqp;
159 	entry->wc_flags = 0;
160 	entry->slid = 0;
161 	entry->sl = 0;
162 	entry->src_qp = 0;
163 	entry->dlid_path_bits = 0;
164 	entry->pkey_index = 0;
165 
166 	switch (c2_wr_get_id(ce)) {
167 	case C2_WR_TYPE_SEND:
168 		entry->opcode = IB_WC_SEND;
169 		break;
170 	case C2_WR_TYPE_RDMA_WRITE:
171 		entry->opcode = IB_WC_RDMA_WRITE;
172 		break;
173 	case C2_WR_TYPE_RDMA_READ:
174 		entry->opcode = IB_WC_RDMA_READ;
175 		break;
176 	case C2_WR_TYPE_BIND_MW:
177 		entry->opcode = IB_WC_BIND_MW;
178 		break;
179 	case C2_WR_TYPE_RECV:
180 		entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
181 		entry->opcode = IB_WC_RECV;
182 		is_recv = 1;
183 		break;
184 	default:
185 		break;
186 	}
187 
188 	/* consume the WQEs */
189 	if (is_recv)
190 		c2_mq_lconsume(&qp->rq_mq, 1);
191 	else
192 		c2_mq_lconsume(&qp->sq_mq,
193 			       be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
194 
195 	/* free the message */
196 	c2_mq_free(&cq->mq);
197 
198 	return 0;
199 }
200 
c2_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * entry)201 int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
202 {
203 	struct c2_dev *c2dev = to_c2dev(ibcq->device);
204 	struct c2_cq *cq = to_c2cq(ibcq);
205 	unsigned long flags;
206 	int npolled, err;
207 
208 	spin_lock_irqsave(&cq->lock, flags);
209 
210 	for (npolled = 0; npolled < num_entries; ++npolled) {
211 
212 		err = c2_poll_one(c2dev, cq, entry + npolled);
213 		if (err)
214 			break;
215 	}
216 
217 	spin_unlock_irqrestore(&cq->lock, flags);
218 
219 	return npolled;
220 }
221 
c2_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags notify_flags)222 int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
223 {
224 	struct c2_mq_shared __iomem *shared;
225 	struct c2_cq *cq;
226 	unsigned long flags;
227 	int ret = 0;
228 
229 	cq = to_c2cq(ibcq);
230 	shared = cq->mq.peer;
231 
232 	if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
233 		writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
234 	else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
235 		writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
236 	else
237 		return -EINVAL;
238 
239 	writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
240 
241 	/*
242 	 * Now read back shared->armed to make the PCI
243 	 * write synchronous.  This is necessary for
244 	 * correct cq notification semantics.
245 	 */
246 	readb(&shared->armed);
247 
248 	if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
249 		spin_lock_irqsave(&cq->lock, flags);
250 		ret = !c2_mq_empty(&cq->mq);
251 		spin_unlock_irqrestore(&cq->lock, flags);
252 	}
253 
254 	return ret;
255 }
256 
c2_free_cq_buf(struct c2_dev * c2dev,struct c2_mq * mq)257 static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
258 {
259 	dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
260 			  mq->msg_pool.host, dma_unmap_addr(mq, mapping));
261 }
262 
c2_alloc_cq_buf(struct c2_dev * c2dev,struct c2_mq * mq,size_t q_size,size_t msg_size)263 static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq,
264 			   size_t q_size, size_t msg_size)
265 {
266 	u8 *pool_start;
267 
268 	if (q_size > SIZE_MAX / msg_size)
269 		return -EINVAL;
270 
271 	pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
272 					&mq->host_dma, GFP_KERNEL);
273 	if (!pool_start)
274 		return -ENOMEM;
275 
276 	c2_mq_rep_init(mq,
277 		       0,		/* index (currently unknown) */
278 		       q_size,
279 		       msg_size,
280 		       pool_start,
281 		       NULL,	/* peer (currently unknown) */
282 		       C2_MQ_HOST_TARGET);
283 
284 	dma_unmap_addr_set(mq, mapping, mq->host_dma);
285 
286 	return 0;
287 }
288 
c2_init_cq(struct c2_dev * c2dev,int entries,struct c2_ucontext * ctx,struct c2_cq * cq)289 int c2_init_cq(struct c2_dev *c2dev, int entries,
290 	       struct c2_ucontext *ctx, struct c2_cq *cq)
291 {
292 	struct c2wr_cq_create_req wr;
293 	struct c2wr_cq_create_rep *reply;
294 	unsigned long peer_pa;
295 	struct c2_vq_req *vq_req;
296 	int err;
297 
298 	might_sleep();
299 
300 	cq->ibcq.cqe = entries - 1;
301 	cq->is_kernel = !ctx;
302 
303 	/* Allocate a shared pointer */
304 	cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
305 				      &cq->mq.shared_dma, GFP_KERNEL);
306 	if (!cq->mq.shared)
307 		return -ENOMEM;
308 
309 	/* Allocate pages for the message pool */
310 	err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
311 	if (err)
312 		goto bail0;
313 
314 	vq_req = vq_req_alloc(c2dev);
315 	if (!vq_req) {
316 		err = -ENOMEM;
317 		goto bail1;
318 	}
319 
320 	memset(&wr, 0, sizeof(wr));
321 	c2_wr_set_id(&wr, CCWR_CQ_CREATE);
322 	wr.hdr.context = (unsigned long) vq_req;
323 	wr.rnic_handle = c2dev->adapter_handle;
324 	wr.msg_size = cpu_to_be32(cq->mq.msg_size);
325 	wr.depth = cpu_to_be32(cq->mq.q_size);
326 	wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
327 	wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
328 	wr.user_context = (u64) (unsigned long) (cq);
329 
330 	vq_req_get(c2dev, vq_req);
331 
332 	err = vq_send_wr(c2dev, (union c2wr *) & wr);
333 	if (err) {
334 		vq_req_put(c2dev, vq_req);
335 		goto bail2;
336 	}
337 
338 	err = vq_wait_for_reply(c2dev, vq_req);
339 	if (err)
340 		goto bail2;
341 
342 	reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
343 	if (!reply) {
344 		err = -ENOMEM;
345 		goto bail2;
346 	}
347 
348 	if ((err = c2_errno(reply)) != 0)
349 		goto bail3;
350 
351 	cq->adapter_handle = reply->cq_handle;
352 	cq->mq.index = be32_to_cpu(reply->mq_index);
353 
354 	peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
355 	cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
356 	if (!cq->mq.peer) {
357 		err = -ENOMEM;
358 		goto bail3;
359 	}
360 
361 	vq_repbuf_free(c2dev, reply);
362 	vq_req_free(c2dev, vq_req);
363 
364 	spin_lock_init(&cq->lock);
365 	atomic_set(&cq->refcount, 1);
366 	init_waitqueue_head(&cq->wait);
367 
368 	/*
369 	 * Use the MQ index allocated by the adapter to
370 	 * store the CQ in the qptr_array
371 	 */
372 	cq->cqn = cq->mq.index;
373 	c2dev->qptr_array[cq->cqn] = cq;
374 
375 	return 0;
376 
377       bail3:
378 	vq_repbuf_free(c2dev, reply);
379       bail2:
380 	vq_req_free(c2dev, vq_req);
381       bail1:
382 	c2_free_cq_buf(c2dev, &cq->mq);
383       bail0:
384 	c2_free_mqsp(cq->mq.shared);
385 
386 	return err;
387 }
388 
c2_free_cq(struct c2_dev * c2dev,struct c2_cq * cq)389 void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
390 {
391 	int err;
392 	struct c2_vq_req *vq_req;
393 	struct c2wr_cq_destroy_req wr;
394 	struct c2wr_cq_destroy_rep *reply;
395 
396 	might_sleep();
397 
398 	/* Clear CQ from the qptr array */
399 	spin_lock_irq(&c2dev->lock);
400 	c2dev->qptr_array[cq->mq.index] = NULL;
401 	atomic_dec(&cq->refcount);
402 	spin_unlock_irq(&c2dev->lock);
403 
404 	wait_event(cq->wait, !atomic_read(&cq->refcount));
405 
406 	vq_req = vq_req_alloc(c2dev);
407 	if (!vq_req) {
408 		goto bail0;
409 	}
410 
411 	memset(&wr, 0, sizeof(wr));
412 	c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
413 	wr.hdr.context = (unsigned long) vq_req;
414 	wr.rnic_handle = c2dev->adapter_handle;
415 	wr.cq_handle = cq->adapter_handle;
416 
417 	vq_req_get(c2dev, vq_req);
418 
419 	err = vq_send_wr(c2dev, (union c2wr *) & wr);
420 	if (err) {
421 		vq_req_put(c2dev, vq_req);
422 		goto bail1;
423 	}
424 
425 	err = vq_wait_for_reply(c2dev, vq_req);
426 	if (err)
427 		goto bail1;
428 
429 	reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
430 	if (reply)
431 		vq_repbuf_free(c2dev, reply);
432       bail1:
433 	vq_req_free(c2dev, vq_req);
434       bail0:
435 	if (cq->is_kernel) {
436 		c2_free_cq_buf(c2dev, &cq->mq);
437 	}
438 
439 	return;
440 }
441