• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
36 
37 #include "ipath_verbs.h"
38 
39 /**
40  * ipath_cq_enter - add a new entry to the completion queue
41  * @cq: completion queue
42  * @entry: work completion entry to add
43  * @sig: true if @entry is a solicitated entry
44  *
45  * This may be called with qp->s_lock held.
46  */
ipath_cq_enter(struct ipath_cq * cq,struct ib_wc * entry,int solicited)47 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
48 {
49 	struct ipath_cq_wc *wc;
50 	unsigned long flags;
51 	u32 head;
52 	u32 next;
53 
54 	spin_lock_irqsave(&cq->lock, flags);
55 
56 	/*
57 	 * Note that the head pointer might be writable by user processes.
58 	 * Take care to verify it is a sane value.
59 	 */
60 	wc = cq->queue;
61 	head = wc->head;
62 	if (head >= (unsigned) cq->ibcq.cqe) {
63 		head = cq->ibcq.cqe;
64 		next = 0;
65 	} else
66 		next = head + 1;
67 	if (unlikely(next == wc->tail)) {
68 		spin_unlock_irqrestore(&cq->lock, flags);
69 		if (cq->ibcq.event_handler) {
70 			struct ib_event ev;
71 
72 			ev.device = cq->ibcq.device;
73 			ev.element.cq = &cq->ibcq;
74 			ev.event = IB_EVENT_CQ_ERR;
75 			cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
76 		}
77 		return;
78 	}
79 	if (cq->ip) {
80 		wc->uqueue[head].wr_id = entry->wr_id;
81 		wc->uqueue[head].status = entry->status;
82 		wc->uqueue[head].opcode = entry->opcode;
83 		wc->uqueue[head].vendor_err = entry->vendor_err;
84 		wc->uqueue[head].byte_len = entry->byte_len;
85 		wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data;
86 		wc->uqueue[head].qp_num = entry->qp->qp_num;
87 		wc->uqueue[head].src_qp = entry->src_qp;
88 		wc->uqueue[head].wc_flags = entry->wc_flags;
89 		wc->uqueue[head].pkey_index = entry->pkey_index;
90 		wc->uqueue[head].slid = entry->slid;
91 		wc->uqueue[head].sl = entry->sl;
92 		wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
93 		wc->uqueue[head].port_num = entry->port_num;
94 		/* Make sure entry is written before the head index. */
95 		smp_wmb();
96 	} else
97 		wc->kqueue[head] = *entry;
98 	wc->head = next;
99 
100 	if (cq->notify == IB_CQ_NEXT_COMP ||
101 	    (cq->notify == IB_CQ_SOLICITED && solicited)) {
102 		cq->notify = IB_CQ_NONE;
103 		cq->triggered++;
104 		/*
105 		 * This will cause send_complete() to be called in
106 		 * another thread.
107 		 */
108 		tasklet_hi_schedule(&cq->comptask);
109 	}
110 
111 	spin_unlock_irqrestore(&cq->lock, flags);
112 
113 	if (entry->status != IB_WC_SUCCESS)
114 		to_idev(cq->ibcq.device)->n_wqe_errs++;
115 }
116 
117 /**
118  * ipath_poll_cq - poll for work completion entries
119  * @ibcq: the completion queue to poll
120  * @num_entries: the maximum number of entries to return
121  * @entry: pointer to array where work completions are placed
122  *
123  * Returns the number of completion entries polled.
124  *
125  * This may be called from interrupt context.  Also called by ib_poll_cq()
126  * in the generic verbs code.
127  */
ipath_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * entry)128 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
129 {
130 	struct ipath_cq *cq = to_icq(ibcq);
131 	struct ipath_cq_wc *wc;
132 	unsigned long flags;
133 	int npolled;
134 	u32 tail;
135 
136 	/* The kernel can only poll a kernel completion queue */
137 	if (cq->ip) {
138 		npolled = -EINVAL;
139 		goto bail;
140 	}
141 
142 	spin_lock_irqsave(&cq->lock, flags);
143 
144 	wc = cq->queue;
145 	tail = wc->tail;
146 	if (tail > (u32) cq->ibcq.cqe)
147 		tail = (u32) cq->ibcq.cqe;
148 	for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
149 		if (tail == wc->head)
150 			break;
151 		/* The kernel doesn't need a RMB since it has the lock. */
152 		*entry = wc->kqueue[tail];
153 		if (tail >= cq->ibcq.cqe)
154 			tail = 0;
155 		else
156 			tail++;
157 	}
158 	wc->tail = tail;
159 
160 	spin_unlock_irqrestore(&cq->lock, flags);
161 
162 bail:
163 	return npolled;
164 }
165 
send_complete(unsigned long data)166 static void send_complete(unsigned long data)
167 {
168 	struct ipath_cq *cq = (struct ipath_cq *)data;
169 
170 	/*
171 	 * The completion handler will most likely rearm the notification
172 	 * and poll for all pending entries.  If a new completion entry
173 	 * is added while we are in this routine, tasklet_hi_schedule()
174 	 * won't call us again until we return so we check triggered to
175 	 * see if we need to call the handler again.
176 	 */
177 	for (;;) {
178 		u8 triggered = cq->triggered;
179 
180 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
181 
182 		if (cq->triggered == triggered)
183 			return;
184 	}
185 }
186 
187 /**
188  * ipath_create_cq - create a completion queue
189  * @ibdev: the device this completion queue is attached to
190  * @entries: the minimum size of the completion queue
191  * @context: unused by the InfiniPath driver
192  * @udata: unused by the InfiniPath driver
193  *
194  * Returns a pointer to the completion queue or negative errno values
195  * for failure.
196  *
197  * Called by ib_create_cq() in the generic verbs code.
198  */
ipath_create_cq(struct ib_device * ibdev,int entries,int comp_vector,struct ib_ucontext * context,struct ib_udata * udata)199 struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
200 			      struct ib_ucontext *context,
201 			      struct ib_udata *udata)
202 {
203 	struct ipath_ibdev *dev = to_idev(ibdev);
204 	struct ipath_cq *cq;
205 	struct ipath_cq_wc *wc;
206 	struct ib_cq *ret;
207 	u32 sz;
208 
209 	if (entries < 1 || entries > ib_ipath_max_cqes) {
210 		ret = ERR_PTR(-EINVAL);
211 		goto done;
212 	}
213 
214 	/* Allocate the completion queue structure. */
215 	cq = kmalloc(sizeof(*cq), GFP_KERNEL);
216 	if (!cq) {
217 		ret = ERR_PTR(-ENOMEM);
218 		goto done;
219 	}
220 
221 	/*
222 	 * Allocate the completion queue entries and head/tail pointers.
223 	 * This is allocated separately so that it can be resized and
224 	 * also mapped into user space.
225 	 * We need to use vmalloc() in order to support mmap and large
226 	 * numbers of entries.
227 	 */
228 	sz = sizeof(*wc);
229 	if (udata && udata->outlen >= sizeof(__u64))
230 		sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
231 	else
232 		sz += sizeof(struct ib_wc) * (entries + 1);
233 	wc = vmalloc_user(sz);
234 	if (!wc) {
235 		ret = ERR_PTR(-ENOMEM);
236 		goto bail_cq;
237 	}
238 
239 	/*
240 	 * Return the address of the WC as the offset to mmap.
241 	 * See ipath_mmap() for details.
242 	 */
243 	if (udata && udata->outlen >= sizeof(__u64)) {
244 		int err;
245 
246 		cq->ip = ipath_create_mmap_info(dev, sz, context, wc);
247 		if (!cq->ip) {
248 			ret = ERR_PTR(-ENOMEM);
249 			goto bail_wc;
250 		}
251 
252 		err = ib_copy_to_udata(udata, &cq->ip->offset,
253 				       sizeof(cq->ip->offset));
254 		if (err) {
255 			ret = ERR_PTR(err);
256 			goto bail_ip;
257 		}
258 	} else
259 		cq->ip = NULL;
260 
261 	spin_lock(&dev->n_cqs_lock);
262 	if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
263 		spin_unlock(&dev->n_cqs_lock);
264 		ret = ERR_PTR(-ENOMEM);
265 		goto bail_ip;
266 	}
267 
268 	dev->n_cqs_allocated++;
269 	spin_unlock(&dev->n_cqs_lock);
270 
271 	if (cq->ip) {
272 		spin_lock_irq(&dev->pending_lock);
273 		list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
274 		spin_unlock_irq(&dev->pending_lock);
275 	}
276 
277 	/*
278 	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
279 	 * The number of entries should be >= the number requested or return
280 	 * an error.
281 	 */
282 	cq->ibcq.cqe = entries;
283 	cq->notify = IB_CQ_NONE;
284 	cq->triggered = 0;
285 	spin_lock_init(&cq->lock);
286 	tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
287 	wc->head = 0;
288 	wc->tail = 0;
289 	cq->queue = wc;
290 
291 	ret = &cq->ibcq;
292 
293 	goto done;
294 
295 bail_ip:
296 	kfree(cq->ip);
297 bail_wc:
298 	vfree(wc);
299 bail_cq:
300 	kfree(cq);
301 done:
302 	return ret;
303 }
304 
305 /**
306  * ipath_destroy_cq - destroy a completion queue
307  * @ibcq: the completion queue to destroy.
308  *
309  * Returns 0 for success.
310  *
311  * Called by ib_destroy_cq() in the generic verbs code.
312  */
ipath_destroy_cq(struct ib_cq * ibcq)313 int ipath_destroy_cq(struct ib_cq *ibcq)
314 {
315 	struct ipath_ibdev *dev = to_idev(ibcq->device);
316 	struct ipath_cq *cq = to_icq(ibcq);
317 
318 	tasklet_kill(&cq->comptask);
319 	spin_lock(&dev->n_cqs_lock);
320 	dev->n_cqs_allocated--;
321 	spin_unlock(&dev->n_cqs_lock);
322 	if (cq->ip)
323 		kref_put(&cq->ip->ref, ipath_release_mmap_info);
324 	else
325 		vfree(cq->queue);
326 	kfree(cq);
327 
328 	return 0;
329 }
330 
331 /**
332  * ipath_req_notify_cq - change the notification type for a completion queue
333  * @ibcq: the completion queue
334  * @notify_flags: the type of notification to request
335  *
336  * Returns 0 for success.
337  *
338  * This may be called from interrupt context.  Also called by
339  * ib_req_notify_cq() in the generic verbs code.
340  */
ipath_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags notify_flags)341 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
342 {
343 	struct ipath_cq *cq = to_icq(ibcq);
344 	unsigned long flags;
345 	int ret = 0;
346 
347 	spin_lock_irqsave(&cq->lock, flags);
348 	/*
349 	 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
350 	 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
351 	 */
352 	if (cq->notify != IB_CQ_NEXT_COMP)
353 		cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
354 
355 	if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
356 	    cq->queue->head != cq->queue->tail)
357 		ret = 1;
358 
359 	spin_unlock_irqrestore(&cq->lock, flags);
360 
361 	return ret;
362 }
363 
364 /**
365  * ipath_resize_cq - change the size of the CQ
366  * @ibcq: the completion queue
367  *
368  * Returns 0 for success.
369  */
ipath_resize_cq(struct ib_cq * ibcq,int cqe,struct ib_udata * udata)370 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
371 {
372 	struct ipath_cq *cq = to_icq(ibcq);
373 	struct ipath_cq_wc *old_wc;
374 	struct ipath_cq_wc *wc;
375 	u32 head, tail, n;
376 	int ret;
377 	u32 sz;
378 
379 	if (cqe < 1 || cqe > ib_ipath_max_cqes) {
380 		ret = -EINVAL;
381 		goto bail;
382 	}
383 
384 	/*
385 	 * Need to use vmalloc() if we want to support large #s of entries.
386 	 */
387 	sz = sizeof(*wc);
388 	if (udata && udata->outlen >= sizeof(__u64))
389 		sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
390 	else
391 		sz += sizeof(struct ib_wc) * (cqe + 1);
392 	wc = vmalloc_user(sz);
393 	if (!wc) {
394 		ret = -ENOMEM;
395 		goto bail;
396 	}
397 
398 	/* Check that we can write the offset to mmap. */
399 	if (udata && udata->outlen >= sizeof(__u64)) {
400 		__u64 offset = 0;
401 
402 		ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
403 		if (ret)
404 			goto bail_free;
405 	}
406 
407 	spin_lock_irq(&cq->lock);
408 	/*
409 	 * Make sure head and tail are sane since they
410 	 * might be user writable.
411 	 */
412 	old_wc = cq->queue;
413 	head = old_wc->head;
414 	if (head > (u32) cq->ibcq.cqe)
415 		head = (u32) cq->ibcq.cqe;
416 	tail = old_wc->tail;
417 	if (tail > (u32) cq->ibcq.cqe)
418 		tail = (u32) cq->ibcq.cqe;
419 	if (head < tail)
420 		n = cq->ibcq.cqe + 1 + head - tail;
421 	else
422 		n = head - tail;
423 	if (unlikely((u32)cqe < n)) {
424 		ret = -EINVAL;
425 		goto bail_unlock;
426 	}
427 	for (n = 0; tail != head; n++) {
428 		if (cq->ip)
429 			wc->uqueue[n] = old_wc->uqueue[tail];
430 		else
431 			wc->kqueue[n] = old_wc->kqueue[tail];
432 		if (tail == (u32) cq->ibcq.cqe)
433 			tail = 0;
434 		else
435 			tail++;
436 	}
437 	cq->ibcq.cqe = cqe;
438 	wc->head = n;
439 	wc->tail = 0;
440 	cq->queue = wc;
441 	spin_unlock_irq(&cq->lock);
442 
443 	vfree(old_wc);
444 
445 	if (cq->ip) {
446 		struct ipath_ibdev *dev = to_idev(ibcq->device);
447 		struct ipath_mmap_info *ip = cq->ip;
448 
449 		ipath_update_mmap_info(dev, ip, sz, wc);
450 
451 		/*
452 		 * Return the offset to mmap.
453 		 * See ipath_mmap() for details.
454 		 */
455 		if (udata && udata->outlen >= sizeof(__u64)) {
456 			ret = ib_copy_to_udata(udata, &ip->offset,
457 					       sizeof(ip->offset));
458 			if (ret)
459 				goto bail;
460 		}
461 
462 		spin_lock_irq(&dev->pending_lock);
463 		if (list_empty(&ip->pending_mmaps))
464 			list_add(&ip->pending_mmaps, &dev->pending_mmaps);
465 		spin_unlock_irq(&dev->pending_lock);
466 	}
467 
468 	ret = 0;
469 	goto bail;
470 
471 bail_unlock:
472 	spin_unlock_irq(&cq->lock);
473 bail_free:
474 	vfree(wc);
475 bail:
476 	return ret;
477 }
478