• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
8  *
9  * This software is available to you under a choice of one of two
10  * licenses.  You may choose to be licensed under the terms of the GNU
11  * General Public License (GPL) Version 2, available from the file
12  * COPYING in the main directory of this source tree, or the
13  * OpenIB.org BSD license below:
14  *
15  *     Redistribution and use in source and binary forms, with or
16  *     without modification, are permitted provided that the following
17  *     conditions are met:
18  *
19  *      - Redistributions of source code must retain the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer.
22  *
23  *      - Redistributions in binary form must reproduce the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer in the documentation and/or other materials
26  *        provided with the distribution.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35  * SOFTWARE.
36  *
37  */
38 #include "c2.h"
39 #include "c2_vq.h"
40 #include "c2_status.h"
41 
42 #define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
43 
c2_cq_get(struct c2_dev * c2dev,int cqn)44 static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
45 {
46 	struct c2_cq *cq;
47 	unsigned long flags;
48 
49 	spin_lock_irqsave(&c2dev->lock, flags);
50 	cq = c2dev->qptr_array[cqn];
51 	if (!cq) {
52 		spin_unlock_irqrestore(&c2dev->lock, flags);
53 		return NULL;
54 	}
55 	atomic_inc(&cq->refcount);
56 	spin_unlock_irqrestore(&c2dev->lock, flags);
57 	return cq;
58 }
59 
c2_cq_put(struct c2_cq * cq)60 static void c2_cq_put(struct c2_cq *cq)
61 {
62 	if (atomic_dec_and_test(&cq->refcount))
63 		wake_up(&cq->wait);
64 }
65 
c2_cq_event(struct c2_dev * c2dev,u32 mq_index)66 void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
67 {
68 	struct c2_cq *cq;
69 
70 	cq = c2_cq_get(c2dev, mq_index);
71 	if (!cq) {
72 		printk("discarding events on destroyed CQN=%d\n", mq_index);
73 		return;
74 	}
75 
76 	(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
77 	c2_cq_put(cq);
78 }
79 
c2_cq_clean(struct c2_dev * c2dev,struct c2_qp * qp,u32 mq_index)80 void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
81 {
82 	struct c2_cq *cq;
83 	struct c2_mq *q;
84 
85 	cq = c2_cq_get(c2dev, mq_index);
86 	if (!cq)
87 		return;
88 
89 	spin_lock_irq(&cq->lock);
90 	q = &cq->mq;
91 	if (q && !c2_mq_empty(q)) {
92 		u16 priv = q->priv;
93 		struct c2wr_ce *msg;
94 
95 		while (priv != be16_to_cpu(*q->shared)) {
96 			msg = (struct c2wr_ce *)
97 				(q->msg_pool.host + priv * q->msg_size);
98 			if (msg->qp_user_context == (u64) (unsigned long) qp) {
99 				msg->qp_user_context = (u64) 0;
100 			}
101 			priv = (priv + 1) % q->q_size;
102 		}
103 	}
104 	spin_unlock_irq(&cq->lock);
105 	c2_cq_put(cq);
106 }
107 
c2_cqe_status_to_openib(u8 status)108 static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
109 {
110 	switch (status) {
111 	case C2_OK:
112 		return IB_WC_SUCCESS;
113 	case CCERR_FLUSHED:
114 		return IB_WC_WR_FLUSH_ERR;
115 	case CCERR_BASE_AND_BOUNDS_VIOLATION:
116 		return IB_WC_LOC_PROT_ERR;
117 	case CCERR_ACCESS_VIOLATION:
118 		return IB_WC_LOC_ACCESS_ERR;
119 	case CCERR_TOTAL_LENGTH_TOO_BIG:
120 		return IB_WC_LOC_LEN_ERR;
121 	case CCERR_INVALID_WINDOW:
122 		return IB_WC_MW_BIND_ERR;
123 	default:
124 		return IB_WC_GENERAL_ERR;
125 	}
126 }
127 
128 
c2_poll_one(struct c2_dev * c2dev,struct c2_cq * cq,struct ib_wc * entry)129 static inline int c2_poll_one(struct c2_dev *c2dev,
130 			      struct c2_cq *cq, struct ib_wc *entry)
131 {
132 	struct c2wr_ce *ce;
133 	struct c2_qp *qp;
134 	int is_recv = 0;
135 
136 	ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq);
137 	if (!ce) {
138 		return -EAGAIN;
139 	}
140 
141 	/*
142 	 * if the qp returned is null then this qp has already
143 	 * been freed and we are unable process the completion.
144 	 * try pulling the next message
145 	 */
146 	while ((qp =
147 		(struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
148 		c2_mq_free(&cq->mq);
149 		ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq);
150 		if (!ce)
151 			return -EAGAIN;
152 	}
153 
154 	entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
155 	entry->wr_id = ce->hdr.context;
156 	entry->qp = &qp->ibqp;
157 	entry->wc_flags = 0;
158 	entry->slid = 0;
159 	entry->sl = 0;
160 	entry->src_qp = 0;
161 	entry->dlid_path_bits = 0;
162 	entry->pkey_index = 0;
163 
164 	switch (c2_wr_get_id(ce)) {
165 	case C2_WR_TYPE_SEND:
166 		entry->opcode = IB_WC_SEND;
167 		break;
168 	case C2_WR_TYPE_RDMA_WRITE:
169 		entry->opcode = IB_WC_RDMA_WRITE;
170 		break;
171 	case C2_WR_TYPE_RDMA_READ:
172 		entry->opcode = IB_WC_RDMA_READ;
173 		break;
174 	case C2_WR_TYPE_BIND_MW:
175 		entry->opcode = IB_WC_BIND_MW;
176 		break;
177 	case C2_WR_TYPE_RECV:
178 		entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
179 		entry->opcode = IB_WC_RECV;
180 		is_recv = 1;
181 		break;
182 	default:
183 		break;
184 	}
185 
186 	/* consume the WQEs */
187 	if (is_recv)
188 		c2_mq_lconsume(&qp->rq_mq, 1);
189 	else
190 		c2_mq_lconsume(&qp->sq_mq,
191 			       be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
192 
193 	/* free the message */
194 	c2_mq_free(&cq->mq);
195 
196 	return 0;
197 }
198 
c2_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * entry)199 int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
200 {
201 	struct c2_dev *c2dev = to_c2dev(ibcq->device);
202 	struct c2_cq *cq = to_c2cq(ibcq);
203 	unsigned long flags;
204 	int npolled, err;
205 
206 	spin_lock_irqsave(&cq->lock, flags);
207 
208 	for (npolled = 0; npolled < num_entries; ++npolled) {
209 
210 		err = c2_poll_one(c2dev, cq, entry + npolled);
211 		if (err)
212 			break;
213 	}
214 
215 	spin_unlock_irqrestore(&cq->lock, flags);
216 
217 	return npolled;
218 }
219 
c2_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags notify_flags)220 int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
221 {
222 	struct c2_mq_shared __iomem *shared;
223 	struct c2_cq *cq;
224 	unsigned long flags;
225 	int ret = 0;
226 
227 	cq = to_c2cq(ibcq);
228 	shared = cq->mq.peer;
229 
230 	if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
231 		writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
232 	else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
233 		writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
234 	else
235 		return -EINVAL;
236 
237 	writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
238 
239 	/*
240 	 * Now read back shared->armed to make the PCI
241 	 * write synchronous.  This is necessary for
242 	 * correct cq notification semantics.
243 	 */
244 	readb(&shared->armed);
245 
246 	if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
247 		spin_lock_irqsave(&cq->lock, flags);
248 		ret = !c2_mq_empty(&cq->mq);
249 		spin_unlock_irqrestore(&cq->lock, flags);
250 	}
251 
252 	return ret;
253 }
254 
c2_free_cq_buf(struct c2_dev * c2dev,struct c2_mq * mq)255 static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
256 {
257 	dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
258 			  mq->msg_pool.host, pci_unmap_addr(mq, mapping));
259 }
260 
c2_alloc_cq_buf(struct c2_dev * c2dev,struct c2_mq * mq,int q_size,int msg_size)261 static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
262 			   int msg_size)
263 {
264 	u8 *pool_start;
265 
266 	pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
267 					&mq->host_dma, GFP_KERNEL);
268 	if (!pool_start)
269 		return -ENOMEM;
270 
271 	c2_mq_rep_init(mq,
272 		       0,		/* index (currently unknown) */
273 		       q_size,
274 		       msg_size,
275 		       pool_start,
276 		       NULL,	/* peer (currently unknown) */
277 		       C2_MQ_HOST_TARGET);
278 
279 	pci_unmap_addr_set(mq, mapping, mq->host_dma);
280 
281 	return 0;
282 }
283 
c2_init_cq(struct c2_dev * c2dev,int entries,struct c2_ucontext * ctx,struct c2_cq * cq)284 int c2_init_cq(struct c2_dev *c2dev, int entries,
285 	       struct c2_ucontext *ctx, struct c2_cq *cq)
286 {
287 	struct c2wr_cq_create_req wr;
288 	struct c2wr_cq_create_rep *reply;
289 	unsigned long peer_pa;
290 	struct c2_vq_req *vq_req;
291 	int err;
292 
293 	might_sleep();
294 
295 	cq->ibcq.cqe = entries - 1;
296 	cq->is_kernel = !ctx;
297 
298 	/* Allocate a shared pointer */
299 	cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
300 				      &cq->mq.shared_dma, GFP_KERNEL);
301 	if (!cq->mq.shared)
302 		return -ENOMEM;
303 
304 	/* Allocate pages for the message pool */
305 	err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
306 	if (err)
307 		goto bail0;
308 
309 	vq_req = vq_req_alloc(c2dev);
310 	if (!vq_req) {
311 		err = -ENOMEM;
312 		goto bail1;
313 	}
314 
315 	memset(&wr, 0, sizeof(wr));
316 	c2_wr_set_id(&wr, CCWR_CQ_CREATE);
317 	wr.hdr.context = (unsigned long) vq_req;
318 	wr.rnic_handle = c2dev->adapter_handle;
319 	wr.msg_size = cpu_to_be32(cq->mq.msg_size);
320 	wr.depth = cpu_to_be32(cq->mq.q_size);
321 	wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
322 	wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
323 	wr.user_context = (u64) (unsigned long) (cq);
324 
325 	vq_req_get(c2dev, vq_req);
326 
327 	err = vq_send_wr(c2dev, (union c2wr *) & wr);
328 	if (err) {
329 		vq_req_put(c2dev, vq_req);
330 		goto bail2;
331 	}
332 
333 	err = vq_wait_for_reply(c2dev, vq_req);
334 	if (err)
335 		goto bail2;
336 
337 	reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
338 	if (!reply) {
339 		err = -ENOMEM;
340 		goto bail2;
341 	}
342 
343 	if ((err = c2_errno(reply)) != 0)
344 		goto bail3;
345 
346 	cq->adapter_handle = reply->cq_handle;
347 	cq->mq.index = be32_to_cpu(reply->mq_index);
348 
349 	peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
350 	cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
351 	if (!cq->mq.peer) {
352 		err = -ENOMEM;
353 		goto bail3;
354 	}
355 
356 	vq_repbuf_free(c2dev, reply);
357 	vq_req_free(c2dev, vq_req);
358 
359 	spin_lock_init(&cq->lock);
360 	atomic_set(&cq->refcount, 1);
361 	init_waitqueue_head(&cq->wait);
362 
363 	/*
364 	 * Use the MQ index allocated by the adapter to
365 	 * store the CQ in the qptr_array
366 	 */
367 	cq->cqn = cq->mq.index;
368 	c2dev->qptr_array[cq->cqn] = cq;
369 
370 	return 0;
371 
372       bail3:
373 	vq_repbuf_free(c2dev, reply);
374       bail2:
375 	vq_req_free(c2dev, vq_req);
376       bail1:
377 	c2_free_cq_buf(c2dev, &cq->mq);
378       bail0:
379 	c2_free_mqsp(cq->mq.shared);
380 
381 	return err;
382 }
383 
c2_free_cq(struct c2_dev * c2dev,struct c2_cq * cq)384 void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
385 {
386 	int err;
387 	struct c2_vq_req *vq_req;
388 	struct c2wr_cq_destroy_req wr;
389 	struct c2wr_cq_destroy_rep *reply;
390 
391 	might_sleep();
392 
393 	/* Clear CQ from the qptr array */
394 	spin_lock_irq(&c2dev->lock);
395 	c2dev->qptr_array[cq->mq.index] = NULL;
396 	atomic_dec(&cq->refcount);
397 	spin_unlock_irq(&c2dev->lock);
398 
399 	wait_event(cq->wait, !atomic_read(&cq->refcount));
400 
401 	vq_req = vq_req_alloc(c2dev);
402 	if (!vq_req) {
403 		goto bail0;
404 	}
405 
406 	memset(&wr, 0, sizeof(wr));
407 	c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
408 	wr.hdr.context = (unsigned long) vq_req;
409 	wr.rnic_handle = c2dev->adapter_handle;
410 	wr.cq_handle = cq->adapter_handle;
411 
412 	vq_req_get(c2dev, vq_req);
413 
414 	err = vq_send_wr(c2dev, (union c2wr *) & wr);
415 	if (err) {
416 		vq_req_put(c2dev, vq_req);
417 		goto bail1;
418 	}
419 
420 	err = vq_wait_for_reply(c2dev, vq_req);
421 	if (err)
422 		goto bail1;
423 
424 	reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
425 	if (reply)
426 		vq_repbuf_free(c2dev, reply);
427       bail1:
428 	vq_req_free(c2dev, vq_req);
429       bail0:
430 	if (cq->is_kernel) {
431 		c2_free_cq_buf(c2dev, &cq->mq);
432 	}
433 
434 	return;
435 }
436