• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/err.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/kthread.h>
39 
40 #include "qib_verbs.h"
41 #include "qib.h"
42 
43 /**
44  * qib_cq_enter - add a new entry to the completion queue
45  * @cq: completion queue
46  * @entry: work completion entry to add
47  * @sig: true if @entry is a solicitated entry
48  *
49  * This may be called with qp->s_lock held.
50  */
qib_cq_enter(struct qib_cq * cq,struct ib_wc * entry,int solicited)51 void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
52 {
53 	struct qib_cq_wc *wc;
54 	unsigned long flags;
55 	u32 head;
56 	u32 next;
57 
58 	spin_lock_irqsave(&cq->lock, flags);
59 
60 	/*
61 	 * Note that the head pointer might be writable by user processes.
62 	 * Take care to verify it is a sane value.
63 	 */
64 	wc = cq->queue;
65 	head = wc->head;
66 	if (head >= (unsigned) cq->ibcq.cqe) {
67 		head = cq->ibcq.cqe;
68 		next = 0;
69 	} else
70 		next = head + 1;
71 	if (unlikely(next == wc->tail)) {
72 		spin_unlock_irqrestore(&cq->lock, flags);
73 		if (cq->ibcq.event_handler) {
74 			struct ib_event ev;
75 
76 			ev.device = cq->ibcq.device;
77 			ev.element.cq = &cq->ibcq;
78 			ev.event = IB_EVENT_CQ_ERR;
79 			cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
80 		}
81 		return;
82 	}
83 	if (cq->ip) {
84 		wc->uqueue[head].wr_id = entry->wr_id;
85 		wc->uqueue[head].status = entry->status;
86 		wc->uqueue[head].opcode = entry->opcode;
87 		wc->uqueue[head].vendor_err = entry->vendor_err;
88 		wc->uqueue[head].byte_len = entry->byte_len;
89 		wc->uqueue[head].ex.imm_data =
90 			(__u32 __force)entry->ex.imm_data;
91 		wc->uqueue[head].qp_num = entry->qp->qp_num;
92 		wc->uqueue[head].src_qp = entry->src_qp;
93 		wc->uqueue[head].wc_flags = entry->wc_flags;
94 		wc->uqueue[head].pkey_index = entry->pkey_index;
95 		wc->uqueue[head].slid = entry->slid;
96 		wc->uqueue[head].sl = entry->sl;
97 		wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
98 		wc->uqueue[head].port_num = entry->port_num;
99 		/* Make sure entry is written before the head index. */
100 		smp_wmb();
101 	} else
102 		wc->kqueue[head] = *entry;
103 	wc->head = next;
104 
105 	if (cq->notify == IB_CQ_NEXT_COMP ||
106 	    (cq->notify == IB_CQ_SOLICITED &&
107 	     (solicited || entry->status != IB_WC_SUCCESS))) {
108 		struct kthread_worker *worker;
109 		/*
110 		 * This will cause send_complete() to be called in
111 		 * another thread.
112 		 */
113 		smp_rmb();
114 		worker = cq->dd->worker;
115 		if (likely(worker)) {
116 			cq->notify = IB_CQ_NONE;
117 			cq->triggered++;
118 			queue_kthread_work(worker, &cq->comptask);
119 		}
120 	}
121 
122 	spin_unlock_irqrestore(&cq->lock, flags);
123 }
124 
125 /**
126  * qib_poll_cq - poll for work completion entries
127  * @ibcq: the completion queue to poll
128  * @num_entries: the maximum number of entries to return
129  * @entry: pointer to array where work completions are placed
130  *
131  * Returns the number of completion entries polled.
132  *
133  * This may be called from interrupt context.  Also called by ib_poll_cq()
134  * in the generic verbs code.
135  */
qib_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * entry)136 int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
137 {
138 	struct qib_cq *cq = to_icq(ibcq);
139 	struct qib_cq_wc *wc;
140 	unsigned long flags;
141 	int npolled;
142 	u32 tail;
143 
144 	/* The kernel can only poll a kernel completion queue */
145 	if (cq->ip) {
146 		npolled = -EINVAL;
147 		goto bail;
148 	}
149 
150 	spin_lock_irqsave(&cq->lock, flags);
151 
152 	wc = cq->queue;
153 	tail = wc->tail;
154 	if (tail > (u32) cq->ibcq.cqe)
155 		tail = (u32) cq->ibcq.cqe;
156 	for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
157 		if (tail == wc->head)
158 			break;
159 		/* The kernel doesn't need a RMB since it has the lock. */
160 		*entry = wc->kqueue[tail];
161 		if (tail >= cq->ibcq.cqe)
162 			tail = 0;
163 		else
164 			tail++;
165 	}
166 	wc->tail = tail;
167 
168 	spin_unlock_irqrestore(&cq->lock, flags);
169 
170 bail:
171 	return npolled;
172 }
173 
send_complete(struct kthread_work * work)174 static void send_complete(struct kthread_work *work)
175 {
176 	struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
177 
178 	/*
179 	 * The completion handler will most likely rearm the notification
180 	 * and poll for all pending entries.  If a new completion entry
181 	 * is added while we are in this routine, queue_work()
182 	 * won't call us again until we return so we check triggered to
183 	 * see if we need to call the handler again.
184 	 */
185 	for (;;) {
186 		u8 triggered = cq->triggered;
187 
188 		/*
189 		 * IPoIB connected mode assumes the callback is from a
190 		 * soft IRQ. We simulate this by blocking "bottom halves".
191 		 * See the implementation for ipoib_cm_handle_tx_wc(),
192 		 * netif_tx_lock_bh() and netif_tx_lock().
193 		 */
194 		local_bh_disable();
195 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
196 		local_bh_enable();
197 
198 		if (cq->triggered == triggered)
199 			return;
200 	}
201 }
202 
203 /**
204  * qib_create_cq - create a completion queue
205  * @ibdev: the device this completion queue is attached to
206  * @attr: creation attributes
207  * @context: unused by the QLogic_IB driver
208  * @udata: user data for libibverbs.so
209  *
210  * Returns a pointer to the completion queue or negative errno values
211  * for failure.
212  *
213  * Called by ib_create_cq() in the generic verbs code.
214  */
qib_create_cq(struct ib_device * ibdev,const struct ib_cq_init_attr * attr,struct ib_ucontext * context,struct ib_udata * udata)215 struct ib_cq *qib_create_cq(struct ib_device *ibdev,
216 			    const struct ib_cq_init_attr *attr,
217 			    struct ib_ucontext *context,
218 			    struct ib_udata *udata)
219 {
220 	int entries = attr->cqe;
221 	struct qib_ibdev *dev = to_idev(ibdev);
222 	struct qib_cq *cq;
223 	struct qib_cq_wc *wc;
224 	struct ib_cq *ret;
225 	u32 sz;
226 
227 	if (attr->flags)
228 		return ERR_PTR(-EINVAL);
229 
230 	if (entries < 1 || entries > ib_qib_max_cqes) {
231 		ret = ERR_PTR(-EINVAL);
232 		goto done;
233 	}
234 
235 	/* Allocate the completion queue structure. */
236 	cq = kmalloc(sizeof(*cq), GFP_KERNEL);
237 	if (!cq) {
238 		ret = ERR_PTR(-ENOMEM);
239 		goto done;
240 	}
241 
242 	/*
243 	 * Allocate the completion queue entries and head/tail pointers.
244 	 * This is allocated separately so that it can be resized and
245 	 * also mapped into user space.
246 	 * We need to use vmalloc() in order to support mmap and large
247 	 * numbers of entries.
248 	 */
249 	sz = sizeof(*wc);
250 	if (udata && udata->outlen >= sizeof(__u64))
251 		sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
252 	else
253 		sz += sizeof(struct ib_wc) * (entries + 1);
254 	wc = vmalloc_user(sz);
255 	if (!wc) {
256 		ret = ERR_PTR(-ENOMEM);
257 		goto bail_cq;
258 	}
259 
260 	/*
261 	 * Return the address of the WC as the offset to mmap.
262 	 * See qib_mmap() for details.
263 	 */
264 	if (udata && udata->outlen >= sizeof(__u64)) {
265 		int err;
266 
267 		cq->ip = qib_create_mmap_info(dev, sz, context, wc);
268 		if (!cq->ip) {
269 			ret = ERR_PTR(-ENOMEM);
270 			goto bail_wc;
271 		}
272 
273 		err = ib_copy_to_udata(udata, &cq->ip->offset,
274 				       sizeof(cq->ip->offset));
275 		if (err) {
276 			ret = ERR_PTR(err);
277 			goto bail_ip;
278 		}
279 	} else
280 		cq->ip = NULL;
281 
282 	spin_lock(&dev->n_cqs_lock);
283 	if (dev->n_cqs_allocated == ib_qib_max_cqs) {
284 		spin_unlock(&dev->n_cqs_lock);
285 		ret = ERR_PTR(-ENOMEM);
286 		goto bail_ip;
287 	}
288 
289 	dev->n_cqs_allocated++;
290 	spin_unlock(&dev->n_cqs_lock);
291 
292 	if (cq->ip) {
293 		spin_lock_irq(&dev->pending_lock);
294 		list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
295 		spin_unlock_irq(&dev->pending_lock);
296 	}
297 
298 	/*
299 	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
300 	 * The number of entries should be >= the number requested or return
301 	 * an error.
302 	 */
303 	cq->dd = dd_from_dev(dev);
304 	cq->ibcq.cqe = entries;
305 	cq->notify = IB_CQ_NONE;
306 	cq->triggered = 0;
307 	spin_lock_init(&cq->lock);
308 	init_kthread_work(&cq->comptask, send_complete);
309 	wc->head = 0;
310 	wc->tail = 0;
311 	cq->queue = wc;
312 
313 	ret = &cq->ibcq;
314 
315 	goto done;
316 
317 bail_ip:
318 	kfree(cq->ip);
319 bail_wc:
320 	vfree(wc);
321 bail_cq:
322 	kfree(cq);
323 done:
324 	return ret;
325 }
326 
327 /**
328  * qib_destroy_cq - destroy a completion queue
329  * @ibcq: the completion queue to destroy.
330  *
331  * Returns 0 for success.
332  *
333  * Called by ib_destroy_cq() in the generic verbs code.
334  */
qib_destroy_cq(struct ib_cq * ibcq)335 int qib_destroy_cq(struct ib_cq *ibcq)
336 {
337 	struct qib_ibdev *dev = to_idev(ibcq->device);
338 	struct qib_cq *cq = to_icq(ibcq);
339 
340 	flush_kthread_work(&cq->comptask);
341 	spin_lock(&dev->n_cqs_lock);
342 	dev->n_cqs_allocated--;
343 	spin_unlock(&dev->n_cqs_lock);
344 	if (cq->ip)
345 		kref_put(&cq->ip->ref, qib_release_mmap_info);
346 	else
347 		vfree(cq->queue);
348 	kfree(cq);
349 
350 	return 0;
351 }
352 
353 /**
354  * qib_req_notify_cq - change the notification type for a completion queue
355  * @ibcq: the completion queue
356  * @notify_flags: the type of notification to request
357  *
358  * Returns 0 for success.
359  *
360  * This may be called from interrupt context.  Also called by
361  * ib_req_notify_cq() in the generic verbs code.
362  */
qib_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags notify_flags)363 int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
364 {
365 	struct qib_cq *cq = to_icq(ibcq);
366 	unsigned long flags;
367 	int ret = 0;
368 
369 	spin_lock_irqsave(&cq->lock, flags);
370 	/*
371 	 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
372 	 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
373 	 */
374 	if (cq->notify != IB_CQ_NEXT_COMP)
375 		cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
376 
377 	if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
378 	    cq->queue->head != cq->queue->tail)
379 		ret = 1;
380 
381 	spin_unlock_irqrestore(&cq->lock, flags);
382 
383 	return ret;
384 }
385 
386 /**
387  * qib_resize_cq - change the size of the CQ
388  * @ibcq: the completion queue
389  *
390  * Returns 0 for success.
391  */
qib_resize_cq(struct ib_cq * ibcq,int cqe,struct ib_udata * udata)392 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
393 {
394 	struct qib_cq *cq = to_icq(ibcq);
395 	struct qib_cq_wc *old_wc;
396 	struct qib_cq_wc *wc;
397 	u32 head, tail, n;
398 	int ret;
399 	u32 sz;
400 
401 	if (cqe < 1 || cqe > ib_qib_max_cqes) {
402 		ret = -EINVAL;
403 		goto bail;
404 	}
405 
406 	/*
407 	 * Need to use vmalloc() if we want to support large #s of entries.
408 	 */
409 	sz = sizeof(*wc);
410 	if (udata && udata->outlen >= sizeof(__u64))
411 		sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
412 	else
413 		sz += sizeof(struct ib_wc) * (cqe + 1);
414 	wc = vmalloc_user(sz);
415 	if (!wc) {
416 		ret = -ENOMEM;
417 		goto bail;
418 	}
419 
420 	/* Check that we can write the offset to mmap. */
421 	if (udata && udata->outlen >= sizeof(__u64)) {
422 		__u64 offset = 0;
423 
424 		ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
425 		if (ret)
426 			goto bail_free;
427 	}
428 
429 	spin_lock_irq(&cq->lock);
430 	/*
431 	 * Make sure head and tail are sane since they
432 	 * might be user writable.
433 	 */
434 	old_wc = cq->queue;
435 	head = old_wc->head;
436 	if (head > (u32) cq->ibcq.cqe)
437 		head = (u32) cq->ibcq.cqe;
438 	tail = old_wc->tail;
439 	if (tail > (u32) cq->ibcq.cqe)
440 		tail = (u32) cq->ibcq.cqe;
441 	if (head < tail)
442 		n = cq->ibcq.cqe + 1 + head - tail;
443 	else
444 		n = head - tail;
445 	if (unlikely((u32)cqe < n)) {
446 		ret = -EINVAL;
447 		goto bail_unlock;
448 	}
449 	for (n = 0; tail != head; n++) {
450 		if (cq->ip)
451 			wc->uqueue[n] = old_wc->uqueue[tail];
452 		else
453 			wc->kqueue[n] = old_wc->kqueue[tail];
454 		if (tail == (u32) cq->ibcq.cqe)
455 			tail = 0;
456 		else
457 			tail++;
458 	}
459 	cq->ibcq.cqe = cqe;
460 	wc->head = n;
461 	wc->tail = 0;
462 	cq->queue = wc;
463 	spin_unlock_irq(&cq->lock);
464 
465 	vfree(old_wc);
466 
467 	if (cq->ip) {
468 		struct qib_ibdev *dev = to_idev(ibcq->device);
469 		struct qib_mmap_info *ip = cq->ip;
470 
471 		qib_update_mmap_info(dev, ip, sz, wc);
472 
473 		/*
474 		 * Return the offset to mmap.
475 		 * See qib_mmap() for details.
476 		 */
477 		if (udata && udata->outlen >= sizeof(__u64)) {
478 			ret = ib_copy_to_udata(udata, &ip->offset,
479 					       sizeof(ip->offset));
480 			if (ret)
481 				goto bail;
482 		}
483 
484 		spin_lock_irq(&dev->pending_lock);
485 		if (list_empty(&ip->pending_mmaps))
486 			list_add(&ip->pending_mmaps, &dev->pending_mmaps);
487 		spin_unlock_irq(&dev->pending_lock);
488 	}
489 
490 	ret = 0;
491 	goto bail;
492 
493 bail_unlock:
494 	spin_unlock_irq(&cq->lock);
495 bail_free:
496 	vfree(wc);
497 bail:
498 	return ret;
499 }
500 
qib_cq_init(struct qib_devdata * dd)501 int qib_cq_init(struct qib_devdata *dd)
502 {
503 	int ret = 0;
504 	int cpu;
505 	struct task_struct *task;
506 
507 	if (dd->worker)
508 		return 0;
509 	dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL);
510 	if (!dd->worker)
511 		return -ENOMEM;
512 	init_kthread_worker(dd->worker);
513 	task = kthread_create_on_node(
514 		kthread_worker_fn,
515 		dd->worker,
516 		dd->assigned_node_id,
517 		"qib_cq%d", dd->unit);
518 	if (IS_ERR(task))
519 		goto task_fail;
520 	cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id));
521 	kthread_bind(task, cpu);
522 	wake_up_process(task);
523 out:
524 	return ret;
525 task_fail:
526 	ret = PTR_ERR(task);
527 	kfree(dd->worker);
528 	dd->worker = NULL;
529 	goto out;
530 }
531 
qib_cq_exit(struct qib_devdata * dd)532 void qib_cq_exit(struct qib_devdata *dd)
533 {
534 	struct kthread_worker *worker;
535 
536 	worker = dd->worker;
537 	if (!worker)
538 		return;
539 	/* blocks future queuing from send_complete() */
540 	dd->worker = NULL;
541 	smp_wmb();
542 	flush_kthread_worker(worker);
543 	kthread_stop(worker->task);
544 	kfree(worker);
545 }
546