1 /*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/sched/mm.h>
41 #include <linux/spinlock.h>
42 #include <linux/ethtool.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/inetdevice.h>
45 #include <linux/slab.h>
46
47 #include <asm/io.h>
48 #include <asm/irq.h>
49 #include <asm/byteorder.h>
50
51 #include <rdma/iw_cm.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_smi.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_user_verbs.h>
56
57 #include "cxio_hal.h"
58 #include "iwch.h"
59 #include "iwch_provider.h"
60 #include "iwch_cm.h"
61 #include <rdma/cxgb3-abi.h>
62 #include "common.h"
63
iwch_dealloc_ucontext(struct ib_ucontext * context)64 static int iwch_dealloc_ucontext(struct ib_ucontext *context)
65 {
66 struct iwch_dev *rhp = to_iwch_dev(context->device);
67 struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
68 struct iwch_mm_entry *mm, *tmp;
69
70 pr_debug("%s context %p\n", __func__, context);
71 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
72 kfree(mm);
73 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
74 kfree(ucontext);
75 return 0;
76 }
77
iwch_alloc_ucontext(struct ib_device * ibdev,struct ib_udata * udata)78 static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
79 struct ib_udata *udata)
80 {
81 struct iwch_ucontext *context;
82 struct iwch_dev *rhp = to_iwch_dev(ibdev);
83
84 pr_debug("%s ibdev %p\n", __func__, ibdev);
85 context = kzalloc(sizeof(*context), GFP_KERNEL);
86 if (!context)
87 return ERR_PTR(-ENOMEM);
88 cxio_init_ucontext(&rhp->rdev, &context->uctx);
89 INIT_LIST_HEAD(&context->mmaps);
90 spin_lock_init(&context->mmap_lock);
91 return &context->ibucontext;
92 }
93
iwch_destroy_cq(struct ib_cq * ib_cq)94 static int iwch_destroy_cq(struct ib_cq *ib_cq)
95 {
96 struct iwch_cq *chp;
97
98 pr_debug("%s ib_cq %p\n", __func__, ib_cq);
99 chp = to_iwch_cq(ib_cq);
100
101 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
102 atomic_dec(&chp->refcnt);
103 wait_event(chp->wait, !atomic_read(&chp->refcnt));
104
105 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
106 kfree(chp);
107 return 0;
108 }
109
iwch_create_cq(struct ib_device * ibdev,const struct ib_cq_init_attr * attr,struct ib_ucontext * ib_context,struct ib_udata * udata)110 static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
111 const struct ib_cq_init_attr *attr,
112 struct ib_ucontext *ib_context,
113 struct ib_udata *udata)
114 {
115 int entries = attr->cqe;
116 struct iwch_dev *rhp;
117 struct iwch_cq *chp;
118 struct iwch_create_cq_resp uresp;
119 struct iwch_create_cq_req ureq;
120 struct iwch_ucontext *ucontext = NULL;
121 static int warned;
122 size_t resplen;
123
124 pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
125 if (attr->flags)
126 return ERR_PTR(-EINVAL);
127
128 rhp = to_iwch_dev(ibdev);
129 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
130 if (!chp)
131 return ERR_PTR(-ENOMEM);
132
133 if (ib_context) {
134 ucontext = to_iwch_ucontext(ib_context);
135 if (!t3a_device(rhp)) {
136 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
137 kfree(chp);
138 return ERR_PTR(-EFAULT);
139 }
140 chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
141 }
142 }
143
144 if (t3a_device(rhp)) {
145
146 /*
147 * T3A: Add some fluff to handle extra CQEs inserted
148 * for various errors.
149 * Additional CQE possibilities:
150 * TERMINATE,
151 * incoming RDMA WRITE Failures
152 * incoming RDMA READ REQUEST FAILUREs
153 * NOTE: We cannot ensure the CQ won't overflow.
154 */
155 entries += 16;
156 }
157 entries = roundup_pow_of_two(entries);
158 chp->cq.size_log2 = ilog2(entries);
159
160 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
161 kfree(chp);
162 return ERR_PTR(-ENOMEM);
163 }
164 chp->rhp = rhp;
165 chp->ibcq.cqe = 1 << chp->cq.size_log2;
166 spin_lock_init(&chp->lock);
167 spin_lock_init(&chp->comp_handler_lock);
168 atomic_set(&chp->refcnt, 1);
169 init_waitqueue_head(&chp->wait);
170 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
171 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
172 kfree(chp);
173 return ERR_PTR(-ENOMEM);
174 }
175
176 if (ucontext) {
177 struct iwch_mm_entry *mm;
178
179 mm = kmalloc(sizeof *mm, GFP_KERNEL);
180 if (!mm) {
181 iwch_destroy_cq(&chp->ibcq);
182 return ERR_PTR(-ENOMEM);
183 }
184 uresp.cqid = chp->cq.cqid;
185 uresp.size_log2 = chp->cq.size_log2;
186 spin_lock(&ucontext->mmap_lock);
187 uresp.key = ucontext->key;
188 ucontext->key += PAGE_SIZE;
189 spin_unlock(&ucontext->mmap_lock);
190 mm->key = uresp.key;
191 mm->addr = virt_to_phys(chp->cq.queue);
192 if (udata->outlen < sizeof uresp) {
193 if (!warned++)
194 pr_warn("Warning - downlevel libcxgb3 (non-fatal)\n");
195 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
196 sizeof(struct t3_cqe));
197 resplen = sizeof(struct iwch_create_cq_resp_v0);
198 } else {
199 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
200 sizeof(struct t3_cqe));
201 uresp.memsize = mm->len;
202 uresp.reserved = 0;
203 resplen = sizeof uresp;
204 }
205 if (ib_copy_to_udata(udata, &uresp, resplen)) {
206 kfree(mm);
207 iwch_destroy_cq(&chp->ibcq);
208 return ERR_PTR(-EFAULT);
209 }
210 insert_mmap(ucontext, mm);
211 }
212 pr_debug("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
213 chp->cq.cqid, chp, (1 << chp->cq.size_log2),
214 (unsigned long long)chp->cq.dma_addr);
215 return &chp->ibcq;
216 }
217
iwch_resize_cq(struct ib_cq * cq,int cqe,struct ib_udata * udata)218 static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
219 {
220 #ifdef notyet
221 struct iwch_cq *chp = to_iwch_cq(cq);
222 struct t3_cq oldcq, newcq;
223 int ret;
224
225 pr_debug("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
226
227 /* We don't downsize... */
228 if (cqe <= cq->cqe)
229 return 0;
230
231 /* create new t3_cq with new size */
232 cqe = roundup_pow_of_two(cqe+1);
233 newcq.size_log2 = ilog2(cqe);
234
235 /* Dont allow resize to less than the current wce count */
236 if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
237 return -ENOMEM;
238 }
239
240 /* Quiesce all QPs using this CQ */
241 ret = iwch_quiesce_qps(chp);
242 if (ret) {
243 return ret;
244 }
245
246 ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
247 if (ret) {
248 return ret;
249 }
250
251 /* copy CQEs */
252 memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
253 sizeof(struct t3_cqe));
254
255 /* old iwch_qp gets new t3_cq but keeps old cqid */
256 oldcq = chp->cq;
257 chp->cq = newcq;
258 chp->cq.cqid = oldcq.cqid;
259
260 /* resize new t3_cq to update the HW context */
261 ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
262 if (ret) {
263 chp->cq = oldcq;
264 return ret;
265 }
266 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;
267
268 /* destroy old t3_cq */
269 oldcq.cqid = newcq.cqid;
270 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
271 if (ret) {
272 pr_err("%s - cxio_destroy_cq failed %d\n", __func__, ret);
273 }
274
275 /* add user hooks here */
276
277 /* resume qps */
278 ret = iwch_resume_qps(chp);
279 return ret;
280 #else
281 return -ENOSYS;
282 #endif
283 }
284
iwch_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)285 static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
286 {
287 struct iwch_dev *rhp;
288 struct iwch_cq *chp;
289 enum t3_cq_opcode cq_op;
290 int err;
291 unsigned long flag;
292 u32 rptr;
293
294 chp = to_iwch_cq(ibcq);
295 rhp = chp->rhp;
296 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
297 cq_op = CQ_ARM_SE;
298 else
299 cq_op = CQ_ARM_AN;
300 if (chp->user_rptr_addr) {
301 if (get_user(rptr, chp->user_rptr_addr))
302 return -EFAULT;
303 spin_lock_irqsave(&chp->lock, flag);
304 chp->cq.rptr = rptr;
305 } else
306 spin_lock_irqsave(&chp->lock, flag);
307 pr_debug("%s rptr 0x%x\n", __func__, chp->cq.rptr);
308 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
309 spin_unlock_irqrestore(&chp->lock, flag);
310 if (err < 0)
311 pr_err("Error %d rearming CQID 0x%x\n", err, chp->cq.cqid);
312 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
313 err = 0;
314 return err;
315 }
316
iwch_mmap(struct ib_ucontext * context,struct vm_area_struct * vma)317 static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
318 {
319 int len = vma->vm_end - vma->vm_start;
320 u32 key = vma->vm_pgoff << PAGE_SHIFT;
321 struct cxio_rdev *rdev_p;
322 int ret = 0;
323 struct iwch_mm_entry *mm;
324 struct iwch_ucontext *ucontext;
325 u64 addr;
326
327 pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
328 key, len);
329
330 if (vma->vm_start & (PAGE_SIZE-1)) {
331 return -EINVAL;
332 }
333
334 rdev_p = &(to_iwch_dev(context->device)->rdev);
335 ucontext = to_iwch_ucontext(context);
336
337 mm = remove_mmap(ucontext, key, len);
338 if (!mm)
339 return -EINVAL;
340 addr = mm->addr;
341 kfree(mm);
342
343 if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
344 (addr < (rdev_p->rnic_info.udbell_physbase +
345 rdev_p->rnic_info.udbell_len))) {
346
347 /*
348 * Map T3 DB register.
349 */
350 if (vma->vm_flags & VM_READ) {
351 return -EPERM;
352 }
353
354 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
355 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
356 vma->vm_flags &= ~VM_MAYREAD;
357 ret = io_remap_pfn_range(vma, vma->vm_start,
358 addr >> PAGE_SHIFT,
359 len, vma->vm_page_prot);
360 } else {
361
362 /*
363 * Map WQ or CQ contig dma memory...
364 */
365 ret = remap_pfn_range(vma, vma->vm_start,
366 addr >> PAGE_SHIFT,
367 len, vma->vm_page_prot);
368 }
369
370 return ret;
371 }
372
iwch_deallocate_pd(struct ib_pd * pd)373 static int iwch_deallocate_pd(struct ib_pd *pd)
374 {
375 struct iwch_dev *rhp;
376 struct iwch_pd *php;
377
378 php = to_iwch_pd(pd);
379 rhp = php->rhp;
380 pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
381 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
382 kfree(php);
383 return 0;
384 }
385
iwch_allocate_pd(struct ib_device * ibdev,struct ib_ucontext * context,struct ib_udata * udata)386 static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
387 struct ib_ucontext *context,
388 struct ib_udata *udata)
389 {
390 struct iwch_pd *php;
391 u32 pdid;
392 struct iwch_dev *rhp;
393
394 pr_debug("%s ibdev %p\n", __func__, ibdev);
395 rhp = (struct iwch_dev *) ibdev;
396 pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
397 if (!pdid)
398 return ERR_PTR(-EINVAL);
399 php = kzalloc(sizeof(*php), GFP_KERNEL);
400 if (!php) {
401 cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
402 return ERR_PTR(-ENOMEM);
403 }
404 php->pdid = pdid;
405 php->rhp = rhp;
406 if (context) {
407 struct iwch_alloc_pd_resp resp = {.pdid = php->pdid};
408
409 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
410 iwch_deallocate_pd(&php->ibpd);
411 return ERR_PTR(-EFAULT);
412 }
413 }
414 pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
415 return &php->ibpd;
416 }
417
iwch_dereg_mr(struct ib_mr * ib_mr)418 static int iwch_dereg_mr(struct ib_mr *ib_mr)
419 {
420 struct iwch_dev *rhp;
421 struct iwch_mr *mhp;
422 u32 mmid;
423
424 pr_debug("%s ib_mr %p\n", __func__, ib_mr);
425
426 mhp = to_iwch_mr(ib_mr);
427 kfree(mhp->pages);
428 rhp = mhp->rhp;
429 mmid = mhp->attr.stag >> 8;
430 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
431 mhp->attr.pbl_addr);
432 iwch_free_pbl(mhp);
433 remove_handle(rhp, &rhp->mmidr, mmid);
434 if (mhp->kva)
435 kfree((void *) (unsigned long) mhp->kva);
436 if (mhp->umem)
437 ib_umem_release(mhp->umem);
438 pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
439 kfree(mhp);
440 return 0;
441 }
442
iwch_get_dma_mr(struct ib_pd * pd,int acc)443 static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
444 {
445 const u64 total_size = 0xffffffff;
446 const u64 mask = (total_size + PAGE_SIZE - 1) & PAGE_MASK;
447 struct iwch_pd *php = to_iwch_pd(pd);
448 struct iwch_dev *rhp = php->rhp;
449 struct iwch_mr *mhp;
450 __be64 *page_list;
451 int shift = 26, npages, ret, i;
452
453 pr_debug("%s ib_pd %p\n", __func__, pd);
454
455 /*
456 * T3 only supports 32 bits of size.
457 */
458 if (sizeof(phys_addr_t) > 4) {
459 pr_warn_once("Cannot support dma_mrs on this platform\n");
460 return ERR_PTR(-ENOTSUPP);
461 }
462
463 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
464 if (!mhp)
465 return ERR_PTR(-ENOMEM);
466
467 mhp->rhp = rhp;
468
469 npages = (total_size + (1ULL << shift) - 1) >> shift;
470 if (!npages) {
471 ret = -EINVAL;
472 goto err;
473 }
474
475 page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL);
476 if (!page_list) {
477 ret = -ENOMEM;
478 goto err;
479 }
480
481 for (i = 0; i < npages; i++)
482 page_list[i] = cpu_to_be64((u64)i << shift);
483
484 pr_debug("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
485 __func__, mask, shift, total_size, npages);
486
487 ret = iwch_alloc_pbl(mhp, npages);
488 if (ret) {
489 kfree(page_list);
490 goto err_pbl;
491 }
492
493 ret = iwch_write_pbl(mhp, page_list, npages, 0);
494 kfree(page_list);
495 if (ret)
496 goto err_pbl;
497
498 mhp->attr.pdid = php->pdid;
499 mhp->attr.zbva = 0;
500
501 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
502 mhp->attr.va_fbo = 0;
503 mhp->attr.page_size = shift - 12;
504
505 mhp->attr.len = (u32) total_size;
506 mhp->attr.pbl_size = npages;
507 ret = iwch_register_mem(rhp, php, mhp, shift);
508 if (ret)
509 goto err_pbl;
510
511 return &mhp->ibmr;
512
513 err_pbl:
514 iwch_free_pbl(mhp);
515
516 err:
517 kfree(mhp);
518 return ERR_PTR(ret);
519 }
520
iwch_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt,int acc,struct ib_udata * udata)521 static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
522 u64 virt, int acc, struct ib_udata *udata)
523 {
524 __be64 *pages;
525 int shift, n, len;
526 int i, k, entry;
527 int err = 0;
528 struct iwch_dev *rhp;
529 struct iwch_pd *php;
530 struct iwch_mr *mhp;
531 struct iwch_reg_user_mr_resp uresp;
532 struct scatterlist *sg;
533 pr_debug("%s ib_pd %p\n", __func__, pd);
534
535 php = to_iwch_pd(pd);
536 rhp = php->rhp;
537 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
538 if (!mhp)
539 return ERR_PTR(-ENOMEM);
540
541 mhp->rhp = rhp;
542
543 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
544 if (IS_ERR(mhp->umem)) {
545 err = PTR_ERR(mhp->umem);
546 kfree(mhp);
547 return ERR_PTR(err);
548 }
549
550 shift = mhp->umem->page_shift;
551
552 n = mhp->umem->nmap;
553
554 err = iwch_alloc_pbl(mhp, n);
555 if (err)
556 goto err;
557
558 pages = (__be64 *) __get_free_page(GFP_KERNEL);
559 if (!pages) {
560 err = -ENOMEM;
561 goto err_pbl;
562 }
563
564 i = n = 0;
565
566 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
567 len = sg_dma_len(sg) >> shift;
568 for (k = 0; k < len; ++k) {
569 pages[i++] = cpu_to_be64(sg_dma_address(sg) +
570 (k << shift));
571 if (i == PAGE_SIZE / sizeof *pages) {
572 err = iwch_write_pbl(mhp, pages, i, n);
573 if (err)
574 goto pbl_done;
575 n += i;
576 i = 0;
577 }
578 }
579 }
580
581 if (i)
582 err = iwch_write_pbl(mhp, pages, i, n);
583
584 pbl_done:
585 free_page((unsigned long) pages);
586 if (err)
587 goto err_pbl;
588
589 mhp->attr.pdid = php->pdid;
590 mhp->attr.zbva = 0;
591 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
592 mhp->attr.va_fbo = virt;
593 mhp->attr.page_size = shift - 12;
594 mhp->attr.len = (u32) length;
595
596 err = iwch_register_mem(rhp, php, mhp, shift);
597 if (err)
598 goto err_pbl;
599
600 if (udata && !t3a_device(rhp)) {
601 uresp.pbl_addr = (mhp->attr.pbl_addr -
602 rhp->rdev.rnic_info.pbl_base) >> 3;
603 pr_debug("%s user resp pbl_addr 0x%x\n", __func__,
604 uresp.pbl_addr);
605
606 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
607 iwch_dereg_mr(&mhp->ibmr);
608 err = -EFAULT;
609 goto err;
610 }
611 }
612
613 return &mhp->ibmr;
614
615 err_pbl:
616 iwch_free_pbl(mhp);
617
618 err:
619 ib_umem_release(mhp->umem);
620 kfree(mhp);
621 return ERR_PTR(err);
622 }
623
iwch_alloc_mw(struct ib_pd * pd,enum ib_mw_type type,struct ib_udata * udata)624 static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
625 struct ib_udata *udata)
626 {
627 struct iwch_dev *rhp;
628 struct iwch_pd *php;
629 struct iwch_mw *mhp;
630 u32 mmid;
631 u32 stag = 0;
632 int ret;
633
634 if (type != IB_MW_TYPE_1)
635 return ERR_PTR(-EINVAL);
636
637 php = to_iwch_pd(pd);
638 rhp = php->rhp;
639 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
640 if (!mhp)
641 return ERR_PTR(-ENOMEM);
642 ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
643 if (ret) {
644 kfree(mhp);
645 return ERR_PTR(ret);
646 }
647 mhp->rhp = rhp;
648 mhp->attr.pdid = php->pdid;
649 mhp->attr.type = TPT_MW;
650 mhp->attr.stag = stag;
651 mmid = (stag) >> 8;
652 mhp->ibmw.rkey = stag;
653 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
654 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
655 kfree(mhp);
656 return ERR_PTR(-ENOMEM);
657 }
658 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
659 return &(mhp->ibmw);
660 }
661
iwch_dealloc_mw(struct ib_mw * mw)662 static int iwch_dealloc_mw(struct ib_mw *mw)
663 {
664 struct iwch_dev *rhp;
665 struct iwch_mw *mhp;
666 u32 mmid;
667
668 mhp = to_iwch_mw(mw);
669 rhp = mhp->rhp;
670 mmid = (mw->rkey) >> 8;
671 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
672 remove_handle(rhp, &rhp->mmidr, mmid);
673 pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
674 kfree(mhp);
675 return 0;
676 }
677
iwch_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)678 static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
679 enum ib_mr_type mr_type,
680 u32 max_num_sg)
681 {
682 struct iwch_dev *rhp;
683 struct iwch_pd *php;
684 struct iwch_mr *mhp;
685 u32 mmid;
686 u32 stag = 0;
687 int ret = -ENOMEM;
688
689 if (mr_type != IB_MR_TYPE_MEM_REG ||
690 max_num_sg > T3_MAX_FASTREG_DEPTH)
691 return ERR_PTR(-EINVAL);
692
693 php = to_iwch_pd(pd);
694 rhp = php->rhp;
695 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
696 if (!mhp)
697 goto err;
698
699 mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
700 if (!mhp->pages)
701 goto pl_err;
702
703 mhp->rhp = rhp;
704 ret = iwch_alloc_pbl(mhp, max_num_sg);
705 if (ret)
706 goto err1;
707 mhp->attr.pbl_size = max_num_sg;
708 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
709 mhp->attr.pbl_size, mhp->attr.pbl_addr);
710 if (ret)
711 goto err2;
712 mhp->attr.pdid = php->pdid;
713 mhp->attr.type = TPT_NON_SHARED_MR;
714 mhp->attr.stag = stag;
715 mhp->attr.state = 1;
716 mmid = (stag) >> 8;
717 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
718 ret = insert_handle(rhp, &rhp->mmidr, mhp, mmid);
719 if (ret)
720 goto err3;
721
722 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
723 return &(mhp->ibmr);
724 err3:
725 cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
726 mhp->attr.pbl_addr);
727 err2:
728 iwch_free_pbl(mhp);
729 err1:
730 kfree(mhp->pages);
731 pl_err:
732 kfree(mhp);
733 err:
734 return ERR_PTR(ret);
735 }
736
iwch_set_page(struct ib_mr * ibmr,u64 addr)737 static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
738 {
739 struct iwch_mr *mhp = to_iwch_mr(ibmr);
740
741 if (unlikely(mhp->npages == mhp->attr.pbl_size))
742 return -ENOMEM;
743
744 mhp->pages[mhp->npages++] = addr;
745
746 return 0;
747 }
748
iwch_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)749 static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
750 int sg_nents, unsigned int *sg_offset)
751 {
752 struct iwch_mr *mhp = to_iwch_mr(ibmr);
753
754 mhp->npages = 0;
755
756 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
757 }
758
iwch_destroy_qp(struct ib_qp * ib_qp)759 static int iwch_destroy_qp(struct ib_qp *ib_qp)
760 {
761 struct iwch_dev *rhp;
762 struct iwch_qp *qhp;
763 struct iwch_qp_attributes attrs;
764 struct iwch_ucontext *ucontext;
765
766 qhp = to_iwch_qp(ib_qp);
767 rhp = qhp->rhp;
768
769 attrs.next_state = IWCH_QP_STATE_ERROR;
770 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
771 wait_event(qhp->wait, !qhp->ep);
772
773 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
774
775 atomic_dec(&qhp->refcnt);
776 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
777
778 ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
779 : NULL;
780 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
781 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
782
783 pr_debug("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
784 ib_qp, qhp->wq.qpid, qhp);
785 kfree(qhp);
786 return 0;
787 }
788
iwch_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * attrs,struct ib_udata * udata)789 static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
790 struct ib_qp_init_attr *attrs,
791 struct ib_udata *udata)
792 {
793 struct iwch_dev *rhp;
794 struct iwch_qp *qhp;
795 struct iwch_pd *php;
796 struct iwch_cq *schp;
797 struct iwch_cq *rchp;
798 struct iwch_create_qp_resp uresp;
799 int wqsize, sqsize, rqsize;
800 struct iwch_ucontext *ucontext;
801
802 pr_debug("%s ib_pd %p\n", __func__, pd);
803 if (attrs->qp_type != IB_QPT_RC)
804 return ERR_PTR(-EINVAL);
805 php = to_iwch_pd(pd);
806 rhp = php->rhp;
807 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
808 rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
809 if (!schp || !rchp)
810 return ERR_PTR(-EINVAL);
811
812 /* The RQT size must be # of entries + 1 rounded up to a power of two */
813 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
814 if (rqsize == attrs->cap.max_recv_wr)
815 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
816
817 /* T3 doesn't support RQT depth < 16 */
818 if (rqsize < 16)
819 rqsize = 16;
820
821 if (rqsize > T3_MAX_RQ_SIZE)
822 return ERR_PTR(-EINVAL);
823
824 if (attrs->cap.max_inline_data > T3_MAX_INLINE)
825 return ERR_PTR(-EINVAL);
826
827 /*
828 * NOTE: The SQ and total WQ sizes don't need to be
829 * a power of two. However, all the code assumes
830 * they are. EG: Q_FREECNT() and friends.
831 */
832 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
833 wqsize = roundup_pow_of_two(rqsize + sqsize);
834
835 /*
836 * Kernel users need more wq space for fastreg WRs which can take
837 * 2 WR fragments.
838 */
839 ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
840 if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
841 wqsize = roundup_pow_of_two(rqsize +
842 roundup_pow_of_two(attrs->cap.max_send_wr * 2));
843 pr_debug("%s wqsize %d sqsize %d rqsize %d\n", __func__,
844 wqsize, sqsize, rqsize);
845 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
846 if (!qhp)
847 return ERR_PTR(-ENOMEM);
848 qhp->wq.size_log2 = ilog2(wqsize);
849 qhp->wq.rq_size_log2 = ilog2(rqsize);
850 qhp->wq.sq_size_log2 = ilog2(sqsize);
851 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
852 ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
853 kfree(qhp);
854 return ERR_PTR(-ENOMEM);
855 }
856
857 attrs->cap.max_recv_wr = rqsize - 1;
858 attrs->cap.max_send_wr = sqsize;
859 attrs->cap.max_inline_data = T3_MAX_INLINE;
860
861 qhp->rhp = rhp;
862 qhp->attr.pd = php->pdid;
863 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
864 qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
865 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
866 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
867 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
868 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
869 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
870 qhp->attr.state = IWCH_QP_STATE_IDLE;
871 qhp->attr.next_state = IWCH_QP_STATE_IDLE;
872
873 /*
874 * XXX - These don't get passed in from the openib user
875 * at create time. The CM sets them via a QP modify.
876 * Need to fix... I think the CM should
877 */
878 qhp->attr.enable_rdma_read = 1;
879 qhp->attr.enable_rdma_write = 1;
880 qhp->attr.enable_bind = 1;
881 qhp->attr.max_ord = 1;
882 qhp->attr.max_ird = 1;
883
884 spin_lock_init(&qhp->lock);
885 init_waitqueue_head(&qhp->wait);
886 atomic_set(&qhp->refcnt, 1);
887
888 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
889 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
890 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
891 kfree(qhp);
892 return ERR_PTR(-ENOMEM);
893 }
894
895 if (udata) {
896
897 struct iwch_mm_entry *mm1, *mm2;
898
899 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
900 if (!mm1) {
901 iwch_destroy_qp(&qhp->ibqp);
902 return ERR_PTR(-ENOMEM);
903 }
904
905 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
906 if (!mm2) {
907 kfree(mm1);
908 iwch_destroy_qp(&qhp->ibqp);
909 return ERR_PTR(-ENOMEM);
910 }
911
912 uresp.qpid = qhp->wq.qpid;
913 uresp.size_log2 = qhp->wq.size_log2;
914 uresp.sq_size_log2 = qhp->wq.sq_size_log2;
915 uresp.rq_size_log2 = qhp->wq.rq_size_log2;
916 spin_lock(&ucontext->mmap_lock);
917 uresp.key = ucontext->key;
918 ucontext->key += PAGE_SIZE;
919 uresp.db_key = ucontext->key;
920 ucontext->key += PAGE_SIZE;
921 spin_unlock(&ucontext->mmap_lock);
922 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
923 kfree(mm1);
924 kfree(mm2);
925 iwch_destroy_qp(&qhp->ibqp);
926 return ERR_PTR(-EFAULT);
927 }
928 mm1->key = uresp.key;
929 mm1->addr = virt_to_phys(qhp->wq.queue);
930 mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
931 insert_mmap(ucontext, mm1);
932 mm2->key = uresp.db_key;
933 mm2->addr = qhp->wq.udb & PAGE_MASK;
934 mm2->len = PAGE_SIZE;
935 insert_mmap(ucontext, mm2);
936 }
937 qhp->ibqp.qp_num = qhp->wq.qpid;
938 pr_debug("%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
939 __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
940 qhp->wq.qpid, qhp, (unsigned long long)qhp->wq.dma_addr,
941 1 << qhp->wq.size_log2, qhp->wq.rq_addr);
942 return &qhp->ibqp;
943 }
944
iwch_ib_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)945 static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
946 int attr_mask, struct ib_udata *udata)
947 {
948 struct iwch_dev *rhp;
949 struct iwch_qp *qhp;
950 enum iwch_qp_attr_mask mask = 0;
951 struct iwch_qp_attributes attrs;
952
953 pr_debug("%s ib_qp %p\n", __func__, ibqp);
954
955 /* iwarp does not support the RTR state */
956 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
957 attr_mask &= ~IB_QP_STATE;
958
959 /* Make sure we still have something left to do */
960 if (!attr_mask)
961 return 0;
962
963 memset(&attrs, 0, sizeof attrs);
964 qhp = to_iwch_qp(ibqp);
965 rhp = qhp->rhp;
966
967 attrs.next_state = iwch_convert_state(attr->qp_state);
968 attrs.enable_rdma_read = (attr->qp_access_flags &
969 IB_ACCESS_REMOTE_READ) ? 1 : 0;
970 attrs.enable_rdma_write = (attr->qp_access_flags &
971 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
972 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
973
974
975 mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
976 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
977 (IWCH_QP_ATTR_ENABLE_RDMA_READ |
978 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
979 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
980
981 return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
982 }
983
iwch_qp_add_ref(struct ib_qp * qp)984 void iwch_qp_add_ref(struct ib_qp *qp)
985 {
986 pr_debug("%s ib_qp %p\n", __func__, qp);
987 atomic_inc(&(to_iwch_qp(qp)->refcnt));
988 }
989
iwch_qp_rem_ref(struct ib_qp * qp)990 void iwch_qp_rem_ref(struct ib_qp *qp)
991 {
992 pr_debug("%s ib_qp %p\n", __func__, qp);
993 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
994 wake_up(&(to_iwch_qp(qp)->wait));
995 }
996
iwch_get_qp(struct ib_device * dev,int qpn)997 static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
998 {
999 pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1000 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
1001 }
1002
1003
iwch_query_pkey(struct ib_device * ibdev,u8 port,u16 index,u16 * pkey)1004 static int iwch_query_pkey(struct ib_device *ibdev,
1005 u8 port, u16 index, u16 * pkey)
1006 {
1007 pr_debug("%s ibdev %p\n", __func__, ibdev);
1008 *pkey = 0;
1009 return 0;
1010 }
1011
iwch_query_gid(struct ib_device * ibdev,u8 port,int index,union ib_gid * gid)1012 static int iwch_query_gid(struct ib_device *ibdev, u8 port,
1013 int index, union ib_gid *gid)
1014 {
1015 struct iwch_dev *dev;
1016
1017 pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
1018 __func__, ibdev, port, index, gid);
1019 dev = to_iwch_dev(ibdev);
1020 BUG_ON(port == 0 || port > 2);
1021 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1022 memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
1023 return 0;
1024 }
1025
fw_vers_string_to_u64(struct iwch_dev * iwch_dev)1026 static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
1027 {
1028 struct ethtool_drvinfo info;
1029 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1030 char *cp, *next;
1031 unsigned fw_maj, fw_min, fw_mic;
1032
1033 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1034
1035 next = info.fw_version + 1;
1036 cp = strsep(&next, ".");
1037 sscanf(cp, "%i", &fw_maj);
1038 cp = strsep(&next, ".");
1039 sscanf(cp, "%i", &fw_min);
1040 cp = strsep(&next, ".");
1041 sscanf(cp, "%i", &fw_mic);
1042
1043 return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
1044 (fw_mic & 0xffff);
1045 }
1046
iwch_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * uhw)1047 static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
1048 struct ib_udata *uhw)
1049 {
1050
1051 struct iwch_dev *dev;
1052
1053 pr_debug("%s ibdev %p\n", __func__, ibdev);
1054
1055 if (uhw->inlen || uhw->outlen)
1056 return -EINVAL;
1057
1058 dev = to_iwch_dev(ibdev);
1059 memset(props, 0, sizeof *props);
1060 memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1061 props->hw_ver = dev->rdev.t3cdev_p->type;
1062 props->fw_ver = fw_vers_string_to_u64(dev);
1063 props->device_cap_flags = dev->device_cap_flags;
1064 props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
1065 props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
1066 props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
1067 props->max_mr_size = dev->attr.max_mr_size;
1068 props->max_qp = dev->attr.max_qps;
1069 props->max_qp_wr = dev->attr.max_wrs;
1070 props->max_send_sge = dev->attr.max_sge_per_wr;
1071 props->max_recv_sge = dev->attr.max_sge_per_wr;
1072 props->max_sge_rd = 1;
1073 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1074 props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1075 props->max_cq = dev->attr.max_cqs;
1076 props->max_cqe = dev->attr.max_cqes_per_cq;
1077 props->max_mr = dev->attr.max_mem_regs;
1078 props->max_pd = dev->attr.max_pds;
1079 props->local_ca_ack_delay = 0;
1080 props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
1081
1082 return 0;
1083 }
1084
iwch_query_port(struct ib_device * ibdev,u8 port,struct ib_port_attr * props)1085 static int iwch_query_port(struct ib_device *ibdev,
1086 u8 port, struct ib_port_attr *props)
1087 {
1088 struct iwch_dev *dev;
1089 struct net_device *netdev;
1090 struct in_device *inetdev;
1091
1092 pr_debug("%s ibdev %p\n", __func__, ibdev);
1093
1094 dev = to_iwch_dev(ibdev);
1095 netdev = dev->rdev.port_info.lldevs[port-1];
1096
1097 /* props being zeroed by the caller, avoid zeroing it here */
1098 props->max_mtu = IB_MTU_4096;
1099 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
1100
1101 if (!netif_carrier_ok(netdev))
1102 props->state = IB_PORT_DOWN;
1103 else {
1104 inetdev = in_dev_get(netdev);
1105 if (inetdev) {
1106 if (inetdev->ifa_list)
1107 props->state = IB_PORT_ACTIVE;
1108 else
1109 props->state = IB_PORT_INIT;
1110 in_dev_put(inetdev);
1111 } else
1112 props->state = IB_PORT_INIT;
1113 }
1114
1115 props->port_cap_flags =
1116 IB_PORT_CM_SUP |
1117 IB_PORT_SNMP_TUNNEL_SUP |
1118 IB_PORT_REINIT_SUP |
1119 IB_PORT_DEVICE_MGMT_SUP |
1120 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1121 props->gid_tbl_len = 1;
1122 props->pkey_tbl_len = 1;
1123 props->active_width = 2;
1124 props->active_speed = IB_SPEED_DDR;
1125 props->max_msg_sz = -1;
1126
1127 return 0;
1128 }
1129
show_rev(struct device * dev,struct device_attribute * attr,char * buf)1130 static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
1131 char *buf)
1132 {
1133 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1134 ibdev.dev);
1135 pr_debug("%s dev 0x%p\n", __func__, dev);
1136 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
1137 }
1138
show_hca(struct device * dev,struct device_attribute * attr,char * buf)1139 static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
1140 char *buf)
1141 {
1142 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1143 ibdev.dev);
1144 struct ethtool_drvinfo info;
1145 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1146
1147 pr_debug("%s dev 0x%p\n", __func__, dev);
1148 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1149 return sprintf(buf, "%s\n", info.driver);
1150 }
1151
show_board(struct device * dev,struct device_attribute * attr,char * buf)1152 static ssize_t show_board(struct device *dev, struct device_attribute *attr,
1153 char *buf)
1154 {
1155 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1156 ibdev.dev);
1157 pr_debug("%s dev 0x%p\n", __func__, dev);
1158 return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
1159 iwch_dev->rdev.rnic_info.pdev->device);
1160 }
1161
1162 enum counters {
1163 IPINRECEIVES,
1164 IPINHDRERRORS,
1165 IPINADDRERRORS,
1166 IPINUNKNOWNPROTOS,
1167 IPINDISCARDS,
1168 IPINDELIVERS,
1169 IPOUTREQUESTS,
1170 IPOUTDISCARDS,
1171 IPOUTNOROUTES,
1172 IPREASMTIMEOUT,
1173 IPREASMREQDS,
1174 IPREASMOKS,
1175 IPREASMFAILS,
1176 TCPACTIVEOPENS,
1177 TCPPASSIVEOPENS,
1178 TCPATTEMPTFAILS,
1179 TCPESTABRESETS,
1180 TCPCURRESTAB,
1181 TCPINSEGS,
1182 TCPOUTSEGS,
1183 TCPRETRANSSEGS,
1184 TCPINERRS,
1185 TCPOUTRSTS,
1186 TCPRTOMIN,
1187 TCPRTOMAX,
1188 NR_COUNTERS
1189 };
1190
1191 static const char * const names[] = {
1192 [IPINRECEIVES] = "ipInReceives",
1193 [IPINHDRERRORS] = "ipInHdrErrors",
1194 [IPINADDRERRORS] = "ipInAddrErrors",
1195 [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
1196 [IPINDISCARDS] = "ipInDiscards",
1197 [IPINDELIVERS] = "ipInDelivers",
1198 [IPOUTREQUESTS] = "ipOutRequests",
1199 [IPOUTDISCARDS] = "ipOutDiscards",
1200 [IPOUTNOROUTES] = "ipOutNoRoutes",
1201 [IPREASMTIMEOUT] = "ipReasmTimeout",
1202 [IPREASMREQDS] = "ipReasmReqds",
1203 [IPREASMOKS] = "ipReasmOKs",
1204 [IPREASMFAILS] = "ipReasmFails",
1205 [TCPACTIVEOPENS] = "tcpActiveOpens",
1206 [TCPPASSIVEOPENS] = "tcpPassiveOpens",
1207 [TCPATTEMPTFAILS] = "tcpAttemptFails",
1208 [TCPESTABRESETS] = "tcpEstabResets",
1209 [TCPCURRESTAB] = "tcpCurrEstab",
1210 [TCPINSEGS] = "tcpInSegs",
1211 [TCPOUTSEGS] = "tcpOutSegs",
1212 [TCPRETRANSSEGS] = "tcpRetransSegs",
1213 [TCPINERRS] = "tcpInErrs",
1214 [TCPOUTRSTS] = "tcpOutRsts",
1215 [TCPRTOMIN] = "tcpRtoMin",
1216 [TCPRTOMAX] = "tcpRtoMax",
1217 };
1218
iwch_alloc_stats(struct ib_device * ibdev,u8 port_num)1219 static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
1220 u8 port_num)
1221 {
1222 BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
1223
1224 /* Our driver only supports device level stats */
1225 if (port_num != 0)
1226 return NULL;
1227
1228 return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
1229 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1230 }
1231
iwch_get_mib(struct ib_device * ibdev,struct rdma_hw_stats * stats,u8 port,int index)1232 static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1233 u8 port, int index)
1234 {
1235 struct iwch_dev *dev;
1236 struct tp_mib_stats m;
1237 int ret;
1238
1239 if (port != 0 || !stats)
1240 return -ENOSYS;
1241
1242 pr_debug("%s ibdev %p\n", __func__, ibdev);
1243 dev = to_iwch_dev(ibdev);
1244 ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
1245 if (ret)
1246 return -ENOSYS;
1247
1248 stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
1249 stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
1250 stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
1251 stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
1252 stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
1253 stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
1254 stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
1255 stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
1256 stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
1257 stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
1258 stats->value[IPREASMREQDS] = m.ipReasmReqds;
1259 stats->value[IPREASMOKS] = m.ipReasmOKs;
1260 stats->value[IPREASMFAILS] = m.ipReasmFails;
1261 stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
1262 stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
1263 stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
1264 stats->value[TCPESTABRESETS] = m.tcpEstabResets;
1265 stats->value[TCPCURRESTAB] = m.tcpOutRsts;
1266 stats->value[TCPINSEGS] = m.tcpCurrEstab;
1267 stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
1268 stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
1269 stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
1270 stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
1271 stats->value[TCPRTOMIN] = m.tcpRtoMin;
1272 stats->value[TCPRTOMAX] = m.tcpRtoMax;
1273
1274 return stats->num_counters;
1275 }
1276
1277 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1278 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1279 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1280
1281 static struct device_attribute *iwch_class_attributes[] = {
1282 &dev_attr_hw_rev,
1283 &dev_attr_hca_type,
1284 &dev_attr_board_id,
1285 };
1286
iwch_port_immutable(struct ib_device * ibdev,u8 port_num,struct ib_port_immutable * immutable)1287 static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
1288 struct ib_port_immutable *immutable)
1289 {
1290 struct ib_port_attr attr;
1291 int err;
1292
1293 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
1294
1295 err = ib_query_port(ibdev, port_num, &attr);
1296 if (err)
1297 return err;
1298
1299 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1300 immutable->gid_tbl_len = attr.gid_tbl_len;
1301
1302 return 0;
1303 }
1304
get_dev_fw_ver_str(struct ib_device * ibdev,char * str)1305 static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str)
1306 {
1307 struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
1308 struct ethtool_drvinfo info;
1309 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1310
1311 pr_debug("%s dev 0x%p\n", __func__, iwch_dev);
1312 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1313 snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
1314 }
1315
iwch_register_device(struct iwch_dev * dev)1316 int iwch_register_device(struct iwch_dev *dev)
1317 {
1318 int ret;
1319 int i;
1320
1321 pr_debug("%s iwch_dev %p\n", __func__, dev);
1322 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1323 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1324 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1325 dev->ibdev.owner = THIS_MODULE;
1326 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
1327 IB_DEVICE_MEM_WINDOW |
1328 IB_DEVICE_MEM_MGT_EXTENSIONS;
1329
1330 /* cxgb3 supports STag 0. */
1331 dev->ibdev.local_dma_lkey = 0;
1332
1333 dev->ibdev.uverbs_cmd_mask =
1334 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1335 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1336 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1337 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1338 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1339 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1340 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1341 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1342 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1343 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1344 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1345 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1346 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1347 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1348 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1349 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1350 (1ull << IB_USER_VERBS_CMD_POST_RECV);
1351 dev->ibdev.node_type = RDMA_NODE_RNIC;
1352 BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
1353 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1354 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
1355 dev->ibdev.num_comp_vectors = 1;
1356 dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev;
1357 dev->ibdev.query_device = iwch_query_device;
1358 dev->ibdev.query_port = iwch_query_port;
1359 dev->ibdev.query_pkey = iwch_query_pkey;
1360 dev->ibdev.query_gid = iwch_query_gid;
1361 dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1362 dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1363 dev->ibdev.mmap = iwch_mmap;
1364 dev->ibdev.alloc_pd = iwch_allocate_pd;
1365 dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1366 dev->ibdev.create_qp = iwch_create_qp;
1367 dev->ibdev.modify_qp = iwch_ib_modify_qp;
1368 dev->ibdev.destroy_qp = iwch_destroy_qp;
1369 dev->ibdev.create_cq = iwch_create_cq;
1370 dev->ibdev.destroy_cq = iwch_destroy_cq;
1371 dev->ibdev.resize_cq = iwch_resize_cq;
1372 dev->ibdev.poll_cq = iwch_poll_cq;
1373 dev->ibdev.get_dma_mr = iwch_get_dma_mr;
1374 dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1375 dev->ibdev.dereg_mr = iwch_dereg_mr;
1376 dev->ibdev.alloc_mw = iwch_alloc_mw;
1377 dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1378 dev->ibdev.alloc_mr = iwch_alloc_mr;
1379 dev->ibdev.map_mr_sg = iwch_map_mr_sg;
1380 dev->ibdev.req_notify_cq = iwch_arm_cq;
1381 dev->ibdev.post_send = iwch_post_send;
1382 dev->ibdev.post_recv = iwch_post_receive;
1383 dev->ibdev.alloc_hw_stats = iwch_alloc_stats;
1384 dev->ibdev.get_hw_stats = iwch_get_mib;
1385 dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1386 dev->ibdev.get_port_immutable = iwch_port_immutable;
1387 dev->ibdev.get_dev_fw_str = get_dev_fw_ver_str;
1388
1389 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
1390 if (!dev->ibdev.iwcm)
1391 return -ENOMEM;
1392
1393 dev->ibdev.iwcm->connect = iwch_connect;
1394 dev->ibdev.iwcm->accept = iwch_accept_cr;
1395 dev->ibdev.iwcm->reject = iwch_reject_cr;
1396 dev->ibdev.iwcm->create_listen = iwch_create_listen;
1397 dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1398 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1399 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1400 dev->ibdev.iwcm->get_qp = iwch_get_qp;
1401 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
1402 sizeof(dev->ibdev.iwcm->ifname));
1403
1404 dev->ibdev.driver_id = RDMA_DRIVER_CXGB3;
1405 ret = ib_register_device(&dev->ibdev, NULL);
1406 if (ret)
1407 goto bail1;
1408
1409 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
1410 ret = device_create_file(&dev->ibdev.dev,
1411 iwch_class_attributes[i]);
1412 if (ret) {
1413 goto bail2;
1414 }
1415 }
1416 return 0;
1417 bail2:
1418 ib_unregister_device(&dev->ibdev);
1419 bail1:
1420 kfree(dev->ibdev.iwcm);
1421 return ret;
1422 }
1423
iwch_unregister_device(struct iwch_dev * dev)1424 void iwch_unregister_device(struct iwch_dev *dev)
1425 {
1426 int i;
1427
1428 pr_debug("%s iwch_dev %p\n", __func__, dev);
1429 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
1430 device_remove_file(&dev->ibdev.dev,
1431 iwch_class_attributes[i]);
1432 ib_unregister_device(&dev->ibdev);
1433 kfree(dev->ibdev.iwcm);
1434 return;
1435 }
1436