1 /*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 #include <linux/err.h>
52 #include <linux/slab.h>
53 #include <linux/vmalloc.h>
54 #include <linux/kthread.h>
55
56 #include "verbs.h"
57 #include "hfi.h"
58
59 /**
60 * hfi1_cq_enter - add a new entry to the completion queue
61 * @cq: completion queue
62 * @entry: work completion entry to add
63 * @sig: true if @entry is a solicited entry
64 *
65 * This may be called with qp->s_lock held.
66 */
hfi1_cq_enter(struct hfi1_cq * cq,struct ib_wc * entry,int solicited)67 void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited)
68 {
69 struct hfi1_cq_wc *wc;
70 unsigned long flags;
71 u32 head;
72 u32 next;
73
74 spin_lock_irqsave(&cq->lock, flags);
75
76 /*
77 * Note that the head pointer might be writable by user processes.
78 * Take care to verify it is a sane value.
79 */
80 wc = cq->queue;
81 head = wc->head;
82 if (head >= (unsigned) cq->ibcq.cqe) {
83 head = cq->ibcq.cqe;
84 next = 0;
85 } else
86 next = head + 1;
87 if (unlikely(next == wc->tail)) {
88 spin_unlock_irqrestore(&cq->lock, flags);
89 if (cq->ibcq.event_handler) {
90 struct ib_event ev;
91
92 ev.device = cq->ibcq.device;
93 ev.element.cq = &cq->ibcq;
94 ev.event = IB_EVENT_CQ_ERR;
95 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
96 }
97 return;
98 }
99 if (cq->ip) {
100 wc->uqueue[head].wr_id = entry->wr_id;
101 wc->uqueue[head].status = entry->status;
102 wc->uqueue[head].opcode = entry->opcode;
103 wc->uqueue[head].vendor_err = entry->vendor_err;
104 wc->uqueue[head].byte_len = entry->byte_len;
105 wc->uqueue[head].ex.imm_data =
106 (__u32 __force)entry->ex.imm_data;
107 wc->uqueue[head].qp_num = entry->qp->qp_num;
108 wc->uqueue[head].src_qp = entry->src_qp;
109 wc->uqueue[head].wc_flags = entry->wc_flags;
110 wc->uqueue[head].pkey_index = entry->pkey_index;
111 wc->uqueue[head].slid = entry->slid;
112 wc->uqueue[head].sl = entry->sl;
113 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
114 wc->uqueue[head].port_num = entry->port_num;
115 /* Make sure entry is written before the head index. */
116 smp_wmb();
117 } else
118 wc->kqueue[head] = *entry;
119 wc->head = next;
120
121 if (cq->notify == IB_CQ_NEXT_COMP ||
122 (cq->notify == IB_CQ_SOLICITED &&
123 (solicited || entry->status != IB_WC_SUCCESS))) {
124 struct kthread_worker *worker;
125 /*
126 * This will cause send_complete() to be called in
127 * another thread.
128 */
129 smp_read_barrier_depends(); /* see hfi1_cq_exit */
130 worker = cq->dd->worker;
131 if (likely(worker)) {
132 cq->notify = IB_CQ_NONE;
133 cq->triggered++;
134 queue_kthread_work(worker, &cq->comptask);
135 }
136 }
137
138 spin_unlock_irqrestore(&cq->lock, flags);
139 }
140
141 /**
142 * hfi1_poll_cq - poll for work completion entries
143 * @ibcq: the completion queue to poll
144 * @num_entries: the maximum number of entries to return
145 * @entry: pointer to array where work completions are placed
146 *
147 * Returns the number of completion entries polled.
148 *
149 * This may be called from interrupt context. Also called by ib_poll_cq()
150 * in the generic verbs code.
151 */
hfi1_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * entry)152 int hfi1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
153 {
154 struct hfi1_cq *cq = to_icq(ibcq);
155 struct hfi1_cq_wc *wc;
156 unsigned long flags;
157 int npolled;
158 u32 tail;
159
160 /* The kernel can only poll a kernel completion queue */
161 if (cq->ip) {
162 npolled = -EINVAL;
163 goto bail;
164 }
165
166 spin_lock_irqsave(&cq->lock, flags);
167
168 wc = cq->queue;
169 tail = wc->tail;
170 if (tail > (u32) cq->ibcq.cqe)
171 tail = (u32) cq->ibcq.cqe;
172 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
173 if (tail == wc->head)
174 break;
175 /* The kernel doesn't need a RMB since it has the lock. */
176 *entry = wc->kqueue[tail];
177 if (tail >= cq->ibcq.cqe)
178 tail = 0;
179 else
180 tail++;
181 }
182 wc->tail = tail;
183
184 spin_unlock_irqrestore(&cq->lock, flags);
185
186 bail:
187 return npolled;
188 }
189
send_complete(struct kthread_work * work)190 static void send_complete(struct kthread_work *work)
191 {
192 struct hfi1_cq *cq = container_of(work, struct hfi1_cq, comptask);
193
194 /*
195 * The completion handler will most likely rearm the notification
196 * and poll for all pending entries. If a new completion entry
197 * is added while we are in this routine, queue_work()
198 * won't call us again until we return so we check triggered to
199 * see if we need to call the handler again.
200 */
201 for (;;) {
202 u8 triggered = cq->triggered;
203
204 /*
205 * IPoIB connected mode assumes the callback is from a
206 * soft IRQ. We simulate this by blocking "bottom halves".
207 * See the implementation for ipoib_cm_handle_tx_wc(),
208 * netif_tx_lock_bh() and netif_tx_lock().
209 */
210 local_bh_disable();
211 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
212 local_bh_enable();
213
214 if (cq->triggered == triggered)
215 return;
216 }
217 }
218
219 /**
220 * hfi1_create_cq - create a completion queue
221 * @ibdev: the device this completion queue is attached to
222 * @attr: creation attributes
223 * @context: unused by the driver
224 * @udata: user data for libibverbs.so
225 *
226 * Returns a pointer to the completion queue or negative errno values
227 * for failure.
228 *
229 * Called by ib_create_cq() in the generic verbs code.
230 */
hfi1_create_cq(struct ib_device * ibdev,const struct ib_cq_init_attr * attr,struct ib_ucontext * context,struct ib_udata * udata)231 struct ib_cq *hfi1_create_cq(
232 struct ib_device *ibdev,
233 const struct ib_cq_init_attr *attr,
234 struct ib_ucontext *context,
235 struct ib_udata *udata)
236 {
237 struct hfi1_ibdev *dev = to_idev(ibdev);
238 struct hfi1_cq *cq;
239 struct hfi1_cq_wc *wc;
240 struct ib_cq *ret;
241 u32 sz;
242 unsigned int entries = attr->cqe;
243
244 if (attr->flags)
245 return ERR_PTR(-EINVAL);
246
247 if (entries < 1 || entries > hfi1_max_cqes)
248 return ERR_PTR(-EINVAL);
249
250 /* Allocate the completion queue structure. */
251 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
252 if (!cq)
253 return ERR_PTR(-ENOMEM);
254
255 /*
256 * Allocate the completion queue entries and head/tail pointers.
257 * This is allocated separately so that it can be resized and
258 * also mapped into user space.
259 * We need to use vmalloc() in order to support mmap and large
260 * numbers of entries.
261 */
262 sz = sizeof(*wc);
263 if (udata && udata->outlen >= sizeof(__u64))
264 sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
265 else
266 sz += sizeof(struct ib_wc) * (entries + 1);
267 wc = vmalloc_user(sz);
268 if (!wc) {
269 ret = ERR_PTR(-ENOMEM);
270 goto bail_cq;
271 }
272
273 /*
274 * Return the address of the WC as the offset to mmap.
275 * See hfi1_mmap() for details.
276 */
277 if (udata && udata->outlen >= sizeof(__u64)) {
278 int err;
279
280 cq->ip = hfi1_create_mmap_info(dev, sz, context, wc);
281 if (!cq->ip) {
282 ret = ERR_PTR(-ENOMEM);
283 goto bail_wc;
284 }
285
286 err = ib_copy_to_udata(udata, &cq->ip->offset,
287 sizeof(cq->ip->offset));
288 if (err) {
289 ret = ERR_PTR(err);
290 goto bail_ip;
291 }
292 } else
293 cq->ip = NULL;
294
295 spin_lock(&dev->n_cqs_lock);
296 if (dev->n_cqs_allocated == hfi1_max_cqs) {
297 spin_unlock(&dev->n_cqs_lock);
298 ret = ERR_PTR(-ENOMEM);
299 goto bail_ip;
300 }
301
302 dev->n_cqs_allocated++;
303 spin_unlock(&dev->n_cqs_lock);
304
305 if (cq->ip) {
306 spin_lock_irq(&dev->pending_lock);
307 list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
308 spin_unlock_irq(&dev->pending_lock);
309 }
310
311 /*
312 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
313 * The number of entries should be >= the number requested or return
314 * an error.
315 */
316 cq->dd = dd_from_dev(dev);
317 cq->ibcq.cqe = entries;
318 cq->notify = IB_CQ_NONE;
319 cq->triggered = 0;
320 spin_lock_init(&cq->lock);
321 init_kthread_work(&cq->comptask, send_complete);
322 wc->head = 0;
323 wc->tail = 0;
324 cq->queue = wc;
325
326 ret = &cq->ibcq;
327
328 goto done;
329
330 bail_ip:
331 kfree(cq->ip);
332 bail_wc:
333 vfree(wc);
334 bail_cq:
335 kfree(cq);
336 done:
337 return ret;
338 }
339
340 /**
341 * hfi1_destroy_cq - destroy a completion queue
342 * @ibcq: the completion queue to destroy.
343 *
344 * Returns 0 for success.
345 *
346 * Called by ib_destroy_cq() in the generic verbs code.
347 */
hfi1_destroy_cq(struct ib_cq * ibcq)348 int hfi1_destroy_cq(struct ib_cq *ibcq)
349 {
350 struct hfi1_ibdev *dev = to_idev(ibcq->device);
351 struct hfi1_cq *cq = to_icq(ibcq);
352
353 flush_kthread_work(&cq->comptask);
354 spin_lock(&dev->n_cqs_lock);
355 dev->n_cqs_allocated--;
356 spin_unlock(&dev->n_cqs_lock);
357 if (cq->ip)
358 kref_put(&cq->ip->ref, hfi1_release_mmap_info);
359 else
360 vfree(cq->queue);
361 kfree(cq);
362
363 return 0;
364 }
365
366 /**
367 * hfi1_req_notify_cq - change the notification type for a completion queue
368 * @ibcq: the completion queue
369 * @notify_flags: the type of notification to request
370 *
371 * Returns 0 for success.
372 *
373 * This may be called from interrupt context. Also called by
374 * ib_req_notify_cq() in the generic verbs code.
375 */
hfi1_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags notify_flags)376 int hfi1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
377 {
378 struct hfi1_cq *cq = to_icq(ibcq);
379 unsigned long flags;
380 int ret = 0;
381
382 spin_lock_irqsave(&cq->lock, flags);
383 /*
384 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
385 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
386 */
387 if (cq->notify != IB_CQ_NEXT_COMP)
388 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
389
390 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
391 cq->queue->head != cq->queue->tail)
392 ret = 1;
393
394 spin_unlock_irqrestore(&cq->lock, flags);
395
396 return ret;
397 }
398
399 /**
400 * hfi1_resize_cq - change the size of the CQ
401 * @ibcq: the completion queue
402 *
403 * Returns 0 for success.
404 */
hfi1_resize_cq(struct ib_cq * ibcq,int cqe,struct ib_udata * udata)405 int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
406 {
407 struct hfi1_cq *cq = to_icq(ibcq);
408 struct hfi1_cq_wc *old_wc;
409 struct hfi1_cq_wc *wc;
410 u32 head, tail, n;
411 int ret;
412 u32 sz;
413
414 if (cqe < 1 || cqe > hfi1_max_cqes) {
415 ret = -EINVAL;
416 goto bail;
417 }
418
419 /*
420 * Need to use vmalloc() if we want to support large #s of entries.
421 */
422 sz = sizeof(*wc);
423 if (udata && udata->outlen >= sizeof(__u64))
424 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
425 else
426 sz += sizeof(struct ib_wc) * (cqe + 1);
427 wc = vmalloc_user(sz);
428 if (!wc) {
429 ret = -ENOMEM;
430 goto bail;
431 }
432
433 /* Check that we can write the offset to mmap. */
434 if (udata && udata->outlen >= sizeof(__u64)) {
435 __u64 offset = 0;
436
437 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
438 if (ret)
439 goto bail_free;
440 }
441
442 spin_lock_irq(&cq->lock);
443 /*
444 * Make sure head and tail are sane since they
445 * might be user writable.
446 */
447 old_wc = cq->queue;
448 head = old_wc->head;
449 if (head > (u32) cq->ibcq.cqe)
450 head = (u32) cq->ibcq.cqe;
451 tail = old_wc->tail;
452 if (tail > (u32) cq->ibcq.cqe)
453 tail = (u32) cq->ibcq.cqe;
454 if (head < tail)
455 n = cq->ibcq.cqe + 1 + head - tail;
456 else
457 n = head - tail;
458 if (unlikely((u32)cqe < n)) {
459 ret = -EINVAL;
460 goto bail_unlock;
461 }
462 for (n = 0; tail != head; n++) {
463 if (cq->ip)
464 wc->uqueue[n] = old_wc->uqueue[tail];
465 else
466 wc->kqueue[n] = old_wc->kqueue[tail];
467 if (tail == (u32) cq->ibcq.cqe)
468 tail = 0;
469 else
470 tail++;
471 }
472 cq->ibcq.cqe = cqe;
473 wc->head = n;
474 wc->tail = 0;
475 cq->queue = wc;
476 spin_unlock_irq(&cq->lock);
477
478 vfree(old_wc);
479
480 if (cq->ip) {
481 struct hfi1_ibdev *dev = to_idev(ibcq->device);
482 struct hfi1_mmap_info *ip = cq->ip;
483
484 hfi1_update_mmap_info(dev, ip, sz, wc);
485
486 /*
487 * Return the offset to mmap.
488 * See hfi1_mmap() for details.
489 */
490 if (udata && udata->outlen >= sizeof(__u64)) {
491 ret = ib_copy_to_udata(udata, &ip->offset,
492 sizeof(ip->offset));
493 if (ret)
494 goto bail;
495 }
496
497 spin_lock_irq(&dev->pending_lock);
498 if (list_empty(&ip->pending_mmaps))
499 list_add(&ip->pending_mmaps, &dev->pending_mmaps);
500 spin_unlock_irq(&dev->pending_lock);
501 }
502
503 ret = 0;
504 goto bail;
505
506 bail_unlock:
507 spin_unlock_irq(&cq->lock);
508 bail_free:
509 vfree(wc);
510 bail:
511 return ret;
512 }
513
hfi1_cq_init(struct hfi1_devdata * dd)514 int hfi1_cq_init(struct hfi1_devdata *dd)
515 {
516 int ret = 0;
517 int cpu;
518 struct task_struct *task;
519
520 if (dd->worker)
521 return 0;
522 dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL);
523 if (!dd->worker)
524 return -ENOMEM;
525 init_kthread_worker(dd->worker);
526 task = kthread_create_on_node(
527 kthread_worker_fn,
528 dd->worker,
529 dd->assigned_node_id,
530 "hfi1_cq%d", dd->unit);
531 if (IS_ERR(task))
532 goto task_fail;
533 cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id));
534 kthread_bind(task, cpu);
535 wake_up_process(task);
536 out:
537 return ret;
538 task_fail:
539 ret = PTR_ERR(task);
540 kfree(dd->worker);
541 dd->worker = NULL;
542 goto out;
543 }
544
hfi1_cq_exit(struct hfi1_devdata * dd)545 void hfi1_cq_exit(struct hfi1_devdata *dd)
546 {
547 struct kthread_worker *worker;
548
549 worker = dd->worker;
550 if (!worker)
551 return;
552 /* blocks future queuing from send_complete() */
553 dd->worker = NULL;
554 smp_wmb(); /* See hfi1_cq_enter */
555 flush_kthread_worker(worker);
556 kthread_stop(worker->task);
557 kfree(worker);
558 }
559