• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/sched/mm.h>
41 #include <linux/spinlock.h>
42 #include <linux/ethtool.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/inetdevice.h>
45 #include <linux/slab.h>
46 
47 #include <asm/io.h>
48 #include <asm/irq.h>
49 #include <asm/byteorder.h>
50 
51 #include <rdma/iw_cm.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_smi.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_user_verbs.h>
56 #include <rdma/uverbs_ioctl.h>
57 
58 #include "cxio_hal.h"
59 #include "iwch.h"
60 #include "iwch_provider.h"
61 #include "iwch_cm.h"
62 #include <rdma/cxgb3-abi.h>
63 #include "common.h"
64 
iwch_dealloc_ucontext(struct ib_ucontext * context)65 static void iwch_dealloc_ucontext(struct ib_ucontext *context)
66 {
67 	struct iwch_dev *rhp = to_iwch_dev(context->device);
68 	struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
69 	struct iwch_mm_entry *mm, *tmp;
70 
71 	pr_debug("%s context %p\n", __func__, context);
72 	list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
73 		kfree(mm);
74 	cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
75 }
76 
iwch_alloc_ucontext(struct ib_ucontext * ucontext,struct ib_udata * udata)77 static int iwch_alloc_ucontext(struct ib_ucontext *ucontext,
78 			       struct ib_udata *udata)
79 {
80 	struct ib_device *ibdev = ucontext->device;
81 	struct iwch_ucontext *context = to_iwch_ucontext(ucontext);
82 	struct iwch_dev *rhp = to_iwch_dev(ibdev);
83 
84 	pr_debug("%s ibdev %p\n", __func__, ibdev);
85 	cxio_init_ucontext(&rhp->rdev, &context->uctx);
86 	INIT_LIST_HEAD(&context->mmaps);
87 	spin_lock_init(&context->mmap_lock);
88 	return 0;
89 }
90 
iwch_destroy_cq(struct ib_cq * ib_cq,struct ib_udata * udata)91 static void iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
92 {
93 	struct iwch_cq *chp;
94 
95 	pr_debug("%s ib_cq %p\n", __func__, ib_cq);
96 	chp = to_iwch_cq(ib_cq);
97 
98 	xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
99 	atomic_dec(&chp->refcnt);
100 	wait_event(chp->wait, !atomic_read(&chp->refcnt));
101 
102 	cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
103 }
104 
iwch_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)105 static int iwch_create_cq(struct ib_cq *ibcq,
106 			  const struct ib_cq_init_attr *attr,
107 			  struct ib_udata *udata)
108 {
109 	struct ib_device *ibdev = ibcq->device;
110 	int entries = attr->cqe;
111 	struct iwch_dev *rhp = to_iwch_dev(ibcq->device);
112 	struct iwch_cq *chp = to_iwch_cq(ibcq);
113 	struct iwch_create_cq_resp uresp;
114 	struct iwch_create_cq_req ureq;
115 	static int warned;
116 	size_t resplen;
117 
118 	pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
119 	if (attr->flags)
120 		return -EINVAL;
121 
122 	if (udata) {
123 		if (!t3a_device(rhp)) {
124 			if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
125 				return  -EFAULT;
126 
127 			chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
128 		}
129 	}
130 
131 	if (t3a_device(rhp)) {
132 
133 		/*
134 		 * T3A: Add some fluff to handle extra CQEs inserted
135 		 * for various errors.
136 		 * Additional CQE possibilities:
137 		 *      TERMINATE,
138 		 *      incoming RDMA WRITE Failures
139 		 *      incoming RDMA READ REQUEST FAILUREs
140 		 * NOTE: We cannot ensure the CQ won't overflow.
141 		 */
142 		entries += 16;
143 	}
144 	entries = roundup_pow_of_two(entries);
145 	chp->cq.size_log2 = ilog2(entries);
146 
147 	if (cxio_create_cq(&rhp->rdev, &chp->cq, !udata))
148 		return -ENOMEM;
149 
150 	chp->rhp = rhp;
151 	chp->ibcq.cqe = 1 << chp->cq.size_log2;
152 	spin_lock_init(&chp->lock);
153 	spin_lock_init(&chp->comp_handler_lock);
154 	atomic_set(&chp->refcnt, 1);
155 	init_waitqueue_head(&chp->wait);
156 	if (xa_store_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL)) {
157 		cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
158 		return -ENOMEM;
159 	}
160 
161 	if (udata) {
162 		struct iwch_mm_entry *mm;
163 		struct iwch_ucontext *ucontext = rdma_udata_to_drv_context(
164 			udata, struct iwch_ucontext, ibucontext);
165 
166 		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
167 		if (!mm) {
168 			iwch_destroy_cq(&chp->ibcq, udata);
169 			return -ENOMEM;
170 		}
171 		uresp.cqid = chp->cq.cqid;
172 		uresp.size_log2 = chp->cq.size_log2;
173 		spin_lock(&ucontext->mmap_lock);
174 		uresp.key = ucontext->key;
175 		ucontext->key += PAGE_SIZE;
176 		spin_unlock(&ucontext->mmap_lock);
177 		mm->key = uresp.key;
178 		mm->addr = virt_to_phys(chp->cq.queue);
179 		if (udata->outlen < sizeof(uresp)) {
180 			if (!warned++)
181 				pr_warn("Warning - downlevel libcxgb3 (non-fatal)\n");
182 			mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
183 					     sizeof(struct t3_cqe));
184 			resplen = sizeof(struct iwch_create_cq_resp_v0);
185 		} else {
186 			mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
187 					     sizeof(struct t3_cqe));
188 			uresp.memsize = mm->len;
189 			uresp.reserved = 0;
190 			resplen = sizeof(uresp);
191 		}
192 		if (ib_copy_to_udata(udata, &uresp, resplen)) {
193 			kfree(mm);
194 			iwch_destroy_cq(&chp->ibcq, udata);
195 			return -EFAULT;
196 		}
197 		insert_mmap(ucontext, mm);
198 	}
199 	pr_debug("created cqid 0x%0x chp %p size 0x%0x, dma_addr %pad\n",
200 		 chp->cq.cqid, chp, (1 << chp->cq.size_log2),
201 		 &chp->cq.dma_addr);
202 	return 0;
203 }
204 
iwch_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)205 static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
206 {
207 	struct iwch_dev *rhp;
208 	struct iwch_cq *chp;
209 	enum t3_cq_opcode cq_op;
210 	int err;
211 	unsigned long flag;
212 	u32 rptr;
213 
214 	chp = to_iwch_cq(ibcq);
215 	rhp = chp->rhp;
216 	if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
217 		cq_op = CQ_ARM_SE;
218 	else
219 		cq_op = CQ_ARM_AN;
220 	if (chp->user_rptr_addr) {
221 		if (get_user(rptr, chp->user_rptr_addr))
222 			return -EFAULT;
223 		spin_lock_irqsave(&chp->lock, flag);
224 		chp->cq.rptr = rptr;
225 	} else
226 		spin_lock_irqsave(&chp->lock, flag);
227 	pr_debug("%s rptr 0x%x\n", __func__, chp->cq.rptr);
228 	err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
229 	spin_unlock_irqrestore(&chp->lock, flag);
230 	if (err < 0)
231 		pr_err("Error %d rearming CQID 0x%x\n", err, chp->cq.cqid);
232 	if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
233 		err = 0;
234 	return err;
235 }
236 
iwch_mmap(struct ib_ucontext * context,struct vm_area_struct * vma)237 static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
238 {
239 	int len = vma->vm_end - vma->vm_start;
240 	u32 key = vma->vm_pgoff << PAGE_SHIFT;
241 	struct cxio_rdev *rdev_p;
242 	int ret = 0;
243 	struct iwch_mm_entry *mm;
244 	struct iwch_ucontext *ucontext;
245 	u64 addr;
246 
247 	pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
248 		 key, len);
249 
250 	if (vma->vm_start & (PAGE_SIZE-1)) {
251 	        return -EINVAL;
252 	}
253 
254 	rdev_p = &(to_iwch_dev(context->device)->rdev);
255 	ucontext = to_iwch_ucontext(context);
256 
257 	mm = remove_mmap(ucontext, key, len);
258 	if (!mm)
259 		return -EINVAL;
260 	addr = mm->addr;
261 	kfree(mm);
262 
263 	if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
264 	    (addr < (rdev_p->rnic_info.udbell_physbase +
265 		       rdev_p->rnic_info.udbell_len))) {
266 
267 		/*
268 		 * Map T3 DB register.
269 		 */
270 		if (vma->vm_flags & VM_READ) {
271 			return -EPERM;
272 		}
273 
274 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
275 		vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
276 		vma->vm_flags &= ~VM_MAYREAD;
277 		ret = io_remap_pfn_range(vma, vma->vm_start,
278 					 addr >> PAGE_SHIFT,
279 				         len, vma->vm_page_prot);
280 	} else {
281 
282 		/*
283 		 * Map WQ or CQ contig dma memory...
284 		 */
285 		ret = remap_pfn_range(vma, vma->vm_start,
286 				      addr >> PAGE_SHIFT,
287 				      len, vma->vm_page_prot);
288 	}
289 
290 	return ret;
291 }
292 
iwch_deallocate_pd(struct ib_pd * pd,struct ib_udata * udata)293 static void iwch_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
294 {
295 	struct iwch_dev *rhp;
296 	struct iwch_pd *php;
297 
298 	php = to_iwch_pd(pd);
299 	rhp = php->rhp;
300 	pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
301 	cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
302 }
303 
iwch_allocate_pd(struct ib_pd * pd,struct ib_udata * udata)304 static int iwch_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
305 {
306 	struct iwch_pd *php = to_iwch_pd(pd);
307 	struct ib_device *ibdev = pd->device;
308 	u32 pdid;
309 	struct iwch_dev *rhp;
310 
311 	pr_debug("%s ibdev %p\n", __func__, ibdev);
312 	rhp = (struct iwch_dev *) ibdev;
313 	pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
314 	if (!pdid)
315 		return -EINVAL;
316 
317 	php->pdid = pdid;
318 	php->rhp = rhp;
319 	if (udata) {
320 		struct iwch_alloc_pd_resp resp = {.pdid = php->pdid};
321 
322 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
323 			iwch_deallocate_pd(&php->ibpd, udata);
324 			return -EFAULT;
325 		}
326 	}
327 	pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
328 	return 0;
329 }
330 
iwch_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)331 static int iwch_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
332 {
333 	struct iwch_dev *rhp;
334 	struct iwch_mr *mhp;
335 	u32 mmid;
336 
337 	pr_debug("%s ib_mr %p\n", __func__, ib_mr);
338 
339 	mhp = to_iwch_mr(ib_mr);
340 	kfree(mhp->pages);
341 	rhp = mhp->rhp;
342 	mmid = mhp->attr.stag >> 8;
343 	cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
344 		       mhp->attr.pbl_addr);
345 	iwch_free_pbl(mhp);
346 	xa_erase_irq(&rhp->mrs, mmid);
347 	if (mhp->kva)
348 		kfree((void *) (unsigned long) mhp->kva);
349 	ib_umem_release(mhp->umem);
350 	pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
351 	kfree(mhp);
352 	return 0;
353 }
354 
iwch_get_dma_mr(struct ib_pd * pd,int acc)355 static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
356 {
357 	const u64 total_size = 0xffffffff;
358 	const u64 mask = (total_size + PAGE_SIZE - 1) & PAGE_MASK;
359 	struct iwch_pd *php = to_iwch_pd(pd);
360 	struct iwch_dev *rhp = php->rhp;
361 	struct iwch_mr *mhp;
362 	__be64 *page_list;
363 	int shift = 26, npages, ret, i;
364 
365 	pr_debug("%s ib_pd %p\n", __func__, pd);
366 
367 	/*
368 	 * T3 only supports 32 bits of size.
369 	 */
370 	if (sizeof(phys_addr_t) > 4) {
371 		pr_warn_once("Cannot support dma_mrs on this platform\n");
372 		return ERR_PTR(-ENOTSUPP);
373 	}
374 
375 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
376 	if (!mhp)
377 		return ERR_PTR(-ENOMEM);
378 
379 	mhp->rhp = rhp;
380 
381 	npages = (total_size + (1ULL << shift) - 1) >> shift;
382 	if (!npages) {
383 		ret = -EINVAL;
384 		goto err;
385 	}
386 
387 	page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL);
388 	if (!page_list) {
389 		ret = -ENOMEM;
390 		goto err;
391 	}
392 
393 	for (i = 0; i < npages; i++)
394 		page_list[i] = cpu_to_be64((u64)i << shift);
395 
396 	pr_debug("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
397 		 __func__, mask, shift, total_size, npages);
398 
399 	ret = iwch_alloc_pbl(mhp, npages);
400 	if (ret) {
401 		kfree(page_list);
402 		goto err_pbl;
403 	}
404 
405 	ret = iwch_write_pbl(mhp, page_list, npages, 0);
406 	kfree(page_list);
407 	if (ret)
408 		goto err_pbl;
409 
410 	mhp->attr.pdid = php->pdid;
411 	mhp->attr.zbva = 0;
412 
413 	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
414 	mhp->attr.va_fbo = 0;
415 	mhp->attr.page_size = shift - 12;
416 
417 	mhp->attr.len = (u32) total_size;
418 	mhp->attr.pbl_size = npages;
419 	ret = iwch_register_mem(rhp, php, mhp, shift);
420 	if (ret)
421 		goto err_pbl;
422 
423 	return &mhp->ibmr;
424 
425 err_pbl:
426 	iwch_free_pbl(mhp);
427 
428 err:
429 	kfree(mhp);
430 	return ERR_PTR(ret);
431 }
432 
iwch_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt,int acc,struct ib_udata * udata)433 static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
434 				      u64 virt, int acc, struct ib_udata *udata)
435 {
436 	__be64 *pages;
437 	int shift, n, i;
438 	int err = 0;
439 	struct iwch_dev *rhp;
440 	struct iwch_pd *php;
441 	struct iwch_mr *mhp;
442 	struct iwch_reg_user_mr_resp uresp;
443 	struct sg_dma_page_iter sg_iter;
444 	pr_debug("%s ib_pd %p\n", __func__, pd);
445 
446 	php = to_iwch_pd(pd);
447 	rhp = php->rhp;
448 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
449 	if (!mhp)
450 		return ERR_PTR(-ENOMEM);
451 
452 	mhp->rhp = rhp;
453 
454 	mhp->umem = ib_umem_get(udata, start, length, acc, 0);
455 	if (IS_ERR(mhp->umem)) {
456 		err = PTR_ERR(mhp->umem);
457 		kfree(mhp);
458 		return ERR_PTR(err);
459 	}
460 
461 	shift = PAGE_SHIFT;
462 
463 	n = ib_umem_num_pages(mhp->umem);
464 
465 	err = iwch_alloc_pbl(mhp, n);
466 	if (err)
467 		goto err;
468 
469 	pages = (__be64 *) __get_free_page(GFP_KERNEL);
470 	if (!pages) {
471 		err = -ENOMEM;
472 		goto err_pbl;
473 	}
474 
475 	i = n = 0;
476 
477 	for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
478 		pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
479 		if (i == PAGE_SIZE / sizeof(*pages)) {
480 			err = iwch_write_pbl(mhp, pages, i, n);
481 			if (err)
482 				goto pbl_done;
483 			n += i;
484 			i = 0;
485 		}
486 	}
487 
488 	if (i)
489 		err = iwch_write_pbl(mhp, pages, i, n);
490 
491 pbl_done:
492 	free_page((unsigned long) pages);
493 	if (err)
494 		goto err_pbl;
495 
496 	mhp->attr.pdid = php->pdid;
497 	mhp->attr.zbva = 0;
498 	mhp->attr.perms = iwch_ib_to_tpt_access(acc);
499 	mhp->attr.va_fbo = virt;
500 	mhp->attr.page_size = shift - 12;
501 	mhp->attr.len = (u32) length;
502 
503 	err = iwch_register_mem(rhp, php, mhp, shift);
504 	if (err)
505 		goto err_pbl;
506 
507 	if (udata && !t3a_device(rhp)) {
508 		uresp.pbl_addr = (mhp->attr.pbl_addr -
509 				 rhp->rdev.rnic_info.pbl_base) >> 3;
510 		pr_debug("%s user resp pbl_addr 0x%x\n", __func__,
511 			 uresp.pbl_addr);
512 
513 		if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
514 			iwch_dereg_mr(&mhp->ibmr, udata);
515 			err = -EFAULT;
516 			goto err;
517 		}
518 	}
519 
520 	return &mhp->ibmr;
521 
522 err_pbl:
523 	iwch_free_pbl(mhp);
524 
525 err:
526 	ib_umem_release(mhp->umem);
527 	kfree(mhp);
528 	return ERR_PTR(err);
529 }
530 
iwch_alloc_mw(struct ib_pd * pd,enum ib_mw_type type,struct ib_udata * udata)531 static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
532 				   struct ib_udata *udata)
533 {
534 	struct iwch_dev *rhp;
535 	struct iwch_pd *php;
536 	struct iwch_mw *mhp;
537 	u32 mmid;
538 	u32 stag = 0;
539 	int ret;
540 
541 	if (type != IB_MW_TYPE_1)
542 		return ERR_PTR(-EINVAL);
543 
544 	php = to_iwch_pd(pd);
545 	rhp = php->rhp;
546 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
547 	if (!mhp)
548 		return ERR_PTR(-ENOMEM);
549 	ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
550 	if (ret) {
551 		kfree(mhp);
552 		return ERR_PTR(ret);
553 	}
554 	mhp->rhp = rhp;
555 	mhp->attr.pdid = php->pdid;
556 	mhp->attr.type = TPT_MW;
557 	mhp->attr.stag = stag;
558 	mmid = (stag) >> 8;
559 	mhp->ibmw.rkey = stag;
560 	if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
561 		cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
562 		kfree(mhp);
563 		return ERR_PTR(-ENOMEM);
564 	}
565 	pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
566 	return &(mhp->ibmw);
567 }
568 
iwch_dealloc_mw(struct ib_mw * mw)569 static int iwch_dealloc_mw(struct ib_mw *mw)
570 {
571 	struct iwch_dev *rhp;
572 	struct iwch_mw *mhp;
573 	u32 mmid;
574 
575 	mhp = to_iwch_mw(mw);
576 	rhp = mhp->rhp;
577 	mmid = (mw->rkey) >> 8;
578 	cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
579 	xa_erase_irq(&rhp->mrs, mmid);
580 	pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
581 	kfree(mhp);
582 	return 0;
583 }
584 
iwch_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg,struct ib_udata * udata)585 static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
586 				   u32 max_num_sg, struct ib_udata *udata)
587 {
588 	struct iwch_dev *rhp;
589 	struct iwch_pd *php;
590 	struct iwch_mr *mhp;
591 	u32 mmid;
592 	u32 stag = 0;
593 	int ret = -ENOMEM;
594 
595 	if (mr_type != IB_MR_TYPE_MEM_REG ||
596 	    max_num_sg > T3_MAX_FASTREG_DEPTH)
597 		return ERR_PTR(-EINVAL);
598 
599 	php = to_iwch_pd(pd);
600 	rhp = php->rhp;
601 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
602 	if (!mhp)
603 		goto err;
604 
605 	mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
606 	if (!mhp->pages)
607 		goto pl_err;
608 
609 	mhp->rhp = rhp;
610 	ret = iwch_alloc_pbl(mhp, max_num_sg);
611 	if (ret)
612 		goto err1;
613 	mhp->attr.pbl_size = max_num_sg;
614 	ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
615 				 mhp->attr.pbl_size, mhp->attr.pbl_addr);
616 	if (ret)
617 		goto err2;
618 	mhp->attr.pdid = php->pdid;
619 	mhp->attr.type = TPT_NON_SHARED_MR;
620 	mhp->attr.stag = stag;
621 	mhp->attr.state = 1;
622 	mmid = (stag) >> 8;
623 	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
624 	ret = xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL);
625 	if (ret)
626 		goto err3;
627 
628 	pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
629 	return &(mhp->ibmr);
630 err3:
631 	cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
632 		       mhp->attr.pbl_addr);
633 err2:
634 	iwch_free_pbl(mhp);
635 err1:
636 	kfree(mhp->pages);
637 pl_err:
638 	kfree(mhp);
639 err:
640 	return ERR_PTR(ret);
641 }
642 
iwch_set_page(struct ib_mr * ibmr,u64 addr)643 static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
644 {
645 	struct iwch_mr *mhp = to_iwch_mr(ibmr);
646 
647 	if (unlikely(mhp->npages == mhp->attr.pbl_size))
648 		return -ENOMEM;
649 
650 	mhp->pages[mhp->npages++] = addr;
651 
652 	return 0;
653 }
654 
iwch_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)655 static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
656 			  int sg_nents, unsigned int *sg_offset)
657 {
658 	struct iwch_mr *mhp = to_iwch_mr(ibmr);
659 
660 	mhp->npages = 0;
661 
662 	return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
663 }
664 
iwch_destroy_qp(struct ib_qp * ib_qp,struct ib_udata * udata)665 static int iwch_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
666 {
667 	struct iwch_dev *rhp;
668 	struct iwch_qp *qhp;
669 	struct iwch_qp_attributes attrs;
670 	struct iwch_ucontext *ucontext;
671 
672 	qhp = to_iwch_qp(ib_qp);
673 	rhp = qhp->rhp;
674 
675 	attrs.next_state = IWCH_QP_STATE_ERROR;
676 	iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
677 	wait_event(qhp->wait, !qhp->ep);
678 
679 	xa_erase_irq(&rhp->qps, qhp->wq.qpid);
680 
681 	atomic_dec(&qhp->refcnt);
682 	wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
683 
684 	ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
685 					     ibucontext);
686 	cxio_destroy_qp(&rhp->rdev, &qhp->wq,
687 			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
688 
689 	pr_debug("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
690 		 ib_qp, qhp->wq.qpid, qhp);
691 	kfree(qhp);
692 	return 0;
693 }
694 
iwch_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * attrs,struct ib_udata * udata)695 static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
696 			     struct ib_qp_init_attr *attrs,
697 			     struct ib_udata *udata)
698 {
699 	struct iwch_dev *rhp;
700 	struct iwch_qp *qhp;
701 	struct iwch_pd *php;
702 	struct iwch_cq *schp;
703 	struct iwch_cq *rchp;
704 	struct iwch_create_qp_resp uresp;
705 	int wqsize, sqsize, rqsize;
706 	struct iwch_ucontext *ucontext;
707 
708 	pr_debug("%s ib_pd %p\n", __func__, pd);
709 	if (attrs->qp_type != IB_QPT_RC)
710 		return ERR_PTR(-EINVAL);
711 	php = to_iwch_pd(pd);
712 	rhp = php->rhp;
713 	schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
714 	rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
715 	if (!schp || !rchp)
716 		return ERR_PTR(-EINVAL);
717 
718 	/* The RQT size must be # of entries + 1 rounded up to a power of two */
719 	rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
720 	if (rqsize == attrs->cap.max_recv_wr)
721 		rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
722 
723 	/* T3 doesn't support RQT depth < 16 */
724 	if (rqsize < 16)
725 		rqsize = 16;
726 
727 	if (rqsize > T3_MAX_RQ_SIZE)
728 		return ERR_PTR(-EINVAL);
729 
730 	if (attrs->cap.max_inline_data > T3_MAX_INLINE)
731 		return ERR_PTR(-EINVAL);
732 
733 	/*
734 	 * NOTE: The SQ and total WQ sizes don't need to be
735 	 * a power of two.  However, all the code assumes
736 	 * they are. EG: Q_FREECNT() and friends.
737 	 */
738 	sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
739 	wqsize = roundup_pow_of_two(rqsize + sqsize);
740 
741 	/*
742 	 * Kernel users need more wq space for fastreg WRs which can take
743 	 * 2 WR fragments.
744 	 */
745 	ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
746 					     ibucontext);
747 	if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
748 		wqsize = roundup_pow_of_two(rqsize +
749 				roundup_pow_of_two(attrs->cap.max_send_wr * 2));
750 	pr_debug("%s wqsize %d sqsize %d rqsize %d\n", __func__,
751 		 wqsize, sqsize, rqsize);
752 	qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
753 	if (!qhp)
754 		return ERR_PTR(-ENOMEM);
755 	qhp->wq.size_log2 = ilog2(wqsize);
756 	qhp->wq.rq_size_log2 = ilog2(rqsize);
757 	qhp->wq.sq_size_log2 = ilog2(sqsize);
758 	if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
759 			   ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
760 		kfree(qhp);
761 		return ERR_PTR(-ENOMEM);
762 	}
763 
764 	attrs->cap.max_recv_wr = rqsize - 1;
765 	attrs->cap.max_send_wr = sqsize;
766 	attrs->cap.max_inline_data = T3_MAX_INLINE;
767 
768 	qhp->rhp = rhp;
769 	qhp->attr.pd = php->pdid;
770 	qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
771 	qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
772 	qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
773 	qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
774 	qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
775 	qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
776 	qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
777 	qhp->attr.state = IWCH_QP_STATE_IDLE;
778 	qhp->attr.next_state = IWCH_QP_STATE_IDLE;
779 
780 	/*
781 	 * XXX - These don't get passed in from the openib user
782 	 * at create time.  The CM sets them via a QP modify.
783 	 * Need to fix...  I think the CM should
784 	 */
785 	qhp->attr.enable_rdma_read = 1;
786 	qhp->attr.enable_rdma_write = 1;
787 	qhp->attr.enable_bind = 1;
788 	qhp->attr.max_ord = 1;
789 	qhp->attr.max_ird = 1;
790 
791 	spin_lock_init(&qhp->lock);
792 	init_waitqueue_head(&qhp->wait);
793 	atomic_set(&qhp->refcnt, 1);
794 
795 	if (xa_store_irq(&rhp->qps, qhp->wq.qpid, qhp, GFP_KERNEL)) {
796 		cxio_destroy_qp(&rhp->rdev, &qhp->wq,
797 			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
798 		kfree(qhp);
799 		return ERR_PTR(-ENOMEM);
800 	}
801 
802 	if (udata) {
803 
804 		struct iwch_mm_entry *mm1, *mm2;
805 
806 		mm1 = kmalloc(sizeof(*mm1), GFP_KERNEL);
807 		if (!mm1) {
808 			iwch_destroy_qp(&qhp->ibqp, udata);
809 			return ERR_PTR(-ENOMEM);
810 		}
811 
812 		mm2 = kmalloc(sizeof(*mm2), GFP_KERNEL);
813 		if (!mm2) {
814 			kfree(mm1);
815 			iwch_destroy_qp(&qhp->ibqp, udata);
816 			return ERR_PTR(-ENOMEM);
817 		}
818 
819 		uresp.qpid = qhp->wq.qpid;
820 		uresp.size_log2 = qhp->wq.size_log2;
821 		uresp.sq_size_log2 = qhp->wq.sq_size_log2;
822 		uresp.rq_size_log2 = qhp->wq.rq_size_log2;
823 		spin_lock(&ucontext->mmap_lock);
824 		uresp.key = ucontext->key;
825 		ucontext->key += PAGE_SIZE;
826 		uresp.db_key = ucontext->key;
827 		ucontext->key += PAGE_SIZE;
828 		spin_unlock(&ucontext->mmap_lock);
829 		if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
830 			kfree(mm1);
831 			kfree(mm2);
832 			iwch_destroy_qp(&qhp->ibqp, udata);
833 			return ERR_PTR(-EFAULT);
834 		}
835 		mm1->key = uresp.key;
836 		mm1->addr = virt_to_phys(qhp->wq.queue);
837 		mm1->len = PAGE_ALIGN(wqsize * sizeof(union t3_wr));
838 		insert_mmap(ucontext, mm1);
839 		mm2->key = uresp.db_key;
840 		mm2->addr = qhp->wq.udb & PAGE_MASK;
841 		mm2->len = PAGE_SIZE;
842 		insert_mmap(ucontext, mm2);
843 	}
844 	qhp->ibqp.qp_num = qhp->wq.qpid;
845 	pr_debug(
846 		"%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr %pad size %d rq_addr 0x%x\n",
847 		__func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
848 		qhp->wq.qpid, qhp, &qhp->wq.dma_addr, 1 << qhp->wq.size_log2,
849 		qhp->wq.rq_addr);
850 	return &qhp->ibqp;
851 }
852 
iwch_ib_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)853 static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
854 		      int attr_mask, struct ib_udata *udata)
855 {
856 	struct iwch_dev *rhp;
857 	struct iwch_qp *qhp;
858 	enum iwch_qp_attr_mask mask = 0;
859 	struct iwch_qp_attributes attrs = {};
860 
861 	pr_debug("%s ib_qp %p\n", __func__, ibqp);
862 
863 	/* iwarp does not support the RTR state */
864 	if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
865 		attr_mask &= ~IB_QP_STATE;
866 
867 	/* Make sure we still have something left to do */
868 	if (!attr_mask)
869 		return 0;
870 
871 	qhp = to_iwch_qp(ibqp);
872 	rhp = qhp->rhp;
873 
874 	attrs.next_state = iwch_convert_state(attr->qp_state);
875 	attrs.enable_rdma_read = (attr->qp_access_flags &
876 			       IB_ACCESS_REMOTE_READ) ?  1 : 0;
877 	attrs.enable_rdma_write = (attr->qp_access_flags &
878 				IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
879 	attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
880 
881 
882 	mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
883 	mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
884 			(IWCH_QP_ATTR_ENABLE_RDMA_READ |
885 			 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
886 			 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
887 
888 	return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
889 }
890 
iwch_qp_add_ref(struct ib_qp * qp)891 void iwch_qp_add_ref(struct ib_qp *qp)
892 {
893 	pr_debug("%s ib_qp %p\n", __func__, qp);
894 	atomic_inc(&(to_iwch_qp(qp)->refcnt));
895 }
896 
iwch_qp_rem_ref(struct ib_qp * qp)897 void iwch_qp_rem_ref(struct ib_qp *qp)
898 {
899 	pr_debug("%s ib_qp %p\n", __func__, qp);
900 	if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
901 	        wake_up(&(to_iwch_qp(qp)->wait));
902 }
903 
iwch_get_qp(struct ib_device * dev,int qpn)904 static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
905 {
906 	pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
907 	return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
908 }
909 
910 
iwch_query_pkey(struct ib_device * ibdev,u8 port,u16 index,u16 * pkey)911 static int iwch_query_pkey(struct ib_device *ibdev,
912 			   u8 port, u16 index, u16 * pkey)
913 {
914 	pr_debug("%s ibdev %p\n", __func__, ibdev);
915 	*pkey = 0;
916 	return 0;
917 }
918 
iwch_query_gid(struct ib_device * ibdev,u8 port,int index,union ib_gid * gid)919 static int iwch_query_gid(struct ib_device *ibdev, u8 port,
920 			  int index, union ib_gid *gid)
921 {
922 	struct iwch_dev *dev;
923 
924 	pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
925 		 __func__, ibdev, port, index, gid);
926 	dev = to_iwch_dev(ibdev);
927 	BUG_ON(port == 0 || port > 2);
928 	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
929 	memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
930 	return 0;
931 }
932 
fw_vers_string_to_u64(struct iwch_dev * iwch_dev)933 static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
934 {
935 	struct ethtool_drvinfo info;
936 	struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
937 	char *cp, *next;
938 	unsigned fw_maj, fw_min, fw_mic;
939 
940 	lldev->ethtool_ops->get_drvinfo(lldev, &info);
941 
942 	next = info.fw_version + 1;
943 	cp = strsep(&next, ".");
944 	sscanf(cp, "%i", &fw_maj);
945 	cp = strsep(&next, ".");
946 	sscanf(cp, "%i", &fw_min);
947 	cp = strsep(&next, ".");
948 	sscanf(cp, "%i", &fw_mic);
949 
950 	return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
951 	       (fw_mic & 0xffff);
952 }
953 
iwch_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * uhw)954 static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
955 			     struct ib_udata *uhw)
956 {
957 
958 	struct iwch_dev *dev;
959 
960 	pr_debug("%s ibdev %p\n", __func__, ibdev);
961 
962 	if (uhw->inlen || uhw->outlen)
963 		return -EINVAL;
964 
965 	dev = to_iwch_dev(ibdev);
966 	memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
967 	props->hw_ver = dev->rdev.t3cdev_p->type;
968 	props->fw_ver = fw_vers_string_to_u64(dev);
969 	props->device_cap_flags = dev->device_cap_flags;
970 	props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
971 	props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
972 	props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
973 	props->max_mr_size = dev->attr.max_mr_size;
974 	props->max_qp = dev->attr.max_qps;
975 	props->max_qp_wr = dev->attr.max_wrs;
976 	props->max_send_sge = dev->attr.max_sge_per_wr;
977 	props->max_recv_sge = dev->attr.max_sge_per_wr;
978 	props->max_sge_rd = 1;
979 	props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
980 	props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
981 	props->max_cq = dev->attr.max_cqs;
982 	props->max_cqe = dev->attr.max_cqes_per_cq;
983 	props->max_mr = dev->attr.max_mem_regs;
984 	props->max_pd = dev->attr.max_pds;
985 	props->local_ca_ack_delay = 0;
986 	props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
987 
988 	return 0;
989 }
990 
iwch_query_port(struct ib_device * ibdev,u8 port,struct ib_port_attr * props)991 static int iwch_query_port(struct ib_device *ibdev,
992 			   u8 port, struct ib_port_attr *props)
993 {
994 	pr_debug("%s ibdev %p\n", __func__, ibdev);
995 
996 	props->port_cap_flags =
997 	    IB_PORT_CM_SUP |
998 	    IB_PORT_SNMP_TUNNEL_SUP |
999 	    IB_PORT_REINIT_SUP |
1000 	    IB_PORT_DEVICE_MGMT_SUP |
1001 	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1002 	props->gid_tbl_len = 1;
1003 	props->pkey_tbl_len = 1;
1004 	props->active_width = 2;
1005 	props->active_speed = IB_SPEED_DDR;
1006 	props->max_msg_sz = -1;
1007 
1008 	return 0;
1009 }
1010 
hw_rev_show(struct device * dev,struct device_attribute * attr,char * buf)1011 static ssize_t hw_rev_show(struct device *dev,
1012 			   struct device_attribute *attr, char *buf)
1013 {
1014 	struct iwch_dev *iwch_dev =
1015 			rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
1016 
1017 	pr_debug("%s dev 0x%p\n", __func__, dev);
1018 	return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
1019 }
1020 static DEVICE_ATTR_RO(hw_rev);
1021 
hca_type_show(struct device * dev,struct device_attribute * attr,char * buf)1022 static ssize_t hca_type_show(struct device *dev,
1023 			     struct device_attribute *attr, char *buf)
1024 {
1025 	struct iwch_dev *iwch_dev =
1026 			rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
1027 	struct ethtool_drvinfo info;
1028 	struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1029 
1030 	pr_debug("%s dev 0x%p\n", __func__, dev);
1031 	lldev->ethtool_ops->get_drvinfo(lldev, &info);
1032 	return sprintf(buf, "%s\n", info.driver);
1033 }
1034 static DEVICE_ATTR_RO(hca_type);
1035 
board_id_show(struct device * dev,struct device_attribute * attr,char * buf)1036 static ssize_t board_id_show(struct device *dev,
1037 			     struct device_attribute *attr, char *buf)
1038 {
1039 	struct iwch_dev *iwch_dev =
1040 			rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
1041 
1042 	pr_debug("%s dev 0x%p\n", __func__, dev);
1043 	return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
1044 		       iwch_dev->rdev.rnic_info.pdev->device);
1045 }
1046 static DEVICE_ATTR_RO(board_id);
1047 
1048 enum counters {
1049 	IPINRECEIVES,
1050 	IPINHDRERRORS,
1051 	IPINADDRERRORS,
1052 	IPINUNKNOWNPROTOS,
1053 	IPINDISCARDS,
1054 	IPINDELIVERS,
1055 	IPOUTREQUESTS,
1056 	IPOUTDISCARDS,
1057 	IPOUTNOROUTES,
1058 	IPREASMTIMEOUT,
1059 	IPREASMREQDS,
1060 	IPREASMOKS,
1061 	IPREASMFAILS,
1062 	TCPACTIVEOPENS,
1063 	TCPPASSIVEOPENS,
1064 	TCPATTEMPTFAILS,
1065 	TCPESTABRESETS,
1066 	TCPCURRESTAB,
1067 	TCPINSEGS,
1068 	TCPOUTSEGS,
1069 	TCPRETRANSSEGS,
1070 	TCPINERRS,
1071 	TCPOUTRSTS,
1072 	TCPRTOMIN,
1073 	TCPRTOMAX,
1074 	NR_COUNTERS
1075 };
1076 
1077 static const char * const names[] = {
1078 	[IPINRECEIVES] = "ipInReceives",
1079 	[IPINHDRERRORS] = "ipInHdrErrors",
1080 	[IPINADDRERRORS] = "ipInAddrErrors",
1081 	[IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
1082 	[IPINDISCARDS] = "ipInDiscards",
1083 	[IPINDELIVERS] = "ipInDelivers",
1084 	[IPOUTREQUESTS] = "ipOutRequests",
1085 	[IPOUTDISCARDS] = "ipOutDiscards",
1086 	[IPOUTNOROUTES] = "ipOutNoRoutes",
1087 	[IPREASMTIMEOUT] = "ipReasmTimeout",
1088 	[IPREASMREQDS] = "ipReasmReqds",
1089 	[IPREASMOKS] = "ipReasmOKs",
1090 	[IPREASMFAILS] = "ipReasmFails",
1091 	[TCPACTIVEOPENS] = "tcpActiveOpens",
1092 	[TCPPASSIVEOPENS] = "tcpPassiveOpens",
1093 	[TCPATTEMPTFAILS] = "tcpAttemptFails",
1094 	[TCPESTABRESETS] = "tcpEstabResets",
1095 	[TCPCURRESTAB] = "tcpCurrEstab",
1096 	[TCPINSEGS] = "tcpInSegs",
1097 	[TCPOUTSEGS] = "tcpOutSegs",
1098 	[TCPRETRANSSEGS] = "tcpRetransSegs",
1099 	[TCPINERRS] = "tcpInErrs",
1100 	[TCPOUTRSTS] = "tcpOutRsts",
1101 	[TCPRTOMIN] = "tcpRtoMin",
1102 	[TCPRTOMAX] = "tcpRtoMax",
1103 };
1104 
iwch_alloc_stats(struct ib_device * ibdev,u8 port_num)1105 static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
1106 					      u8 port_num)
1107 {
1108 	BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
1109 
1110 	/* Our driver only supports device level stats */
1111 	if (port_num != 0)
1112 		return NULL;
1113 
1114 	return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
1115 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
1116 }
1117 
iwch_get_mib(struct ib_device * ibdev,struct rdma_hw_stats * stats,u8 port,int index)1118 static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1119 			u8 port, int index)
1120 {
1121 	struct iwch_dev *dev;
1122 	struct tp_mib_stats m;
1123 	int ret;
1124 
1125 	if (port != 0 || !stats)
1126 		return -ENOSYS;
1127 
1128 	pr_debug("%s ibdev %p\n", __func__, ibdev);
1129 	dev = to_iwch_dev(ibdev);
1130 	ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
1131 	if (ret)
1132 		return -ENOSYS;
1133 
1134 	stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) +	m.ipInReceive_lo;
1135 	stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
1136 	stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
1137 	stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
1138 	stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
1139 	stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
1140 	stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
1141 	stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
1142 	stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
1143 	stats->value[IPREASMTIMEOUT] = 	m.ipReasmTimeout;
1144 	stats->value[IPREASMREQDS] = m.ipReasmReqds;
1145 	stats->value[IPREASMOKS] = m.ipReasmOKs;
1146 	stats->value[IPREASMFAILS] = m.ipReasmFails;
1147 	stats->value[TCPACTIVEOPENS] =	m.tcpActiveOpens;
1148 	stats->value[TCPPASSIVEOPENS] =	m.tcpPassiveOpens;
1149 	stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
1150 	stats->value[TCPESTABRESETS] = m.tcpEstabResets;
1151 	stats->value[TCPCURRESTAB] = m.tcpOutRsts;
1152 	stats->value[TCPINSEGS] = m.tcpCurrEstab;
1153 	stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
1154 	stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
1155 	stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
1156 	stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
1157 	stats->value[TCPRTOMIN] = m.tcpRtoMin;
1158 	stats->value[TCPRTOMAX] = m.tcpRtoMax;
1159 
1160 	return stats->num_counters;
1161 }
1162 
1163 static struct attribute *iwch_class_attributes[] = {
1164 	&dev_attr_hw_rev.attr,
1165 	&dev_attr_hca_type.attr,
1166 	&dev_attr_board_id.attr,
1167 	NULL
1168 };
1169 
1170 static const struct attribute_group iwch_attr_group = {
1171 	.attrs = iwch_class_attributes,
1172 };
1173 
iwch_port_immutable(struct ib_device * ibdev,u8 port_num,struct ib_port_immutable * immutable)1174 static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
1175 			       struct ib_port_immutable *immutable)
1176 {
1177 	struct ib_port_attr attr;
1178 	int err;
1179 
1180 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
1181 
1182 	err = ib_query_port(ibdev, port_num, &attr);
1183 	if (err)
1184 		return err;
1185 
1186 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
1187 	immutable->gid_tbl_len = attr.gid_tbl_len;
1188 
1189 	return 0;
1190 }
1191 
get_dev_fw_ver_str(struct ib_device * ibdev,char * str)1192 static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str)
1193 {
1194 	struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
1195 	struct ethtool_drvinfo info;
1196 	struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1197 
1198 	pr_debug("%s dev 0x%p\n", __func__, iwch_dev);
1199 	lldev->ethtool_ops->get_drvinfo(lldev, &info);
1200 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
1201 }
1202 
1203 static const struct ib_device_ops iwch_dev_ops = {
1204 	.owner = THIS_MODULE,
1205 	.driver_id = RDMA_DRIVER_CXGB3,
1206 	.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION,
1207 	.uverbs_no_driver_id_binding = 1,
1208 
1209 	.alloc_hw_stats	= iwch_alloc_stats,
1210 	.alloc_mr = iwch_alloc_mr,
1211 	.alloc_mw = iwch_alloc_mw,
1212 	.alloc_pd = iwch_allocate_pd,
1213 	.alloc_ucontext = iwch_alloc_ucontext,
1214 	.create_cq = iwch_create_cq,
1215 	.create_qp = iwch_create_qp,
1216 	.dealloc_mw = iwch_dealloc_mw,
1217 	.dealloc_pd = iwch_deallocate_pd,
1218 	.dealloc_ucontext = iwch_dealloc_ucontext,
1219 	.dereg_mr = iwch_dereg_mr,
1220 	.destroy_cq = iwch_destroy_cq,
1221 	.destroy_qp = iwch_destroy_qp,
1222 	.get_dev_fw_str = get_dev_fw_ver_str,
1223 	.get_dma_mr = iwch_get_dma_mr,
1224 	.get_hw_stats = iwch_get_mib,
1225 	.get_port_immutable = iwch_port_immutable,
1226 	.iw_accept = iwch_accept_cr,
1227 	.iw_add_ref = iwch_qp_add_ref,
1228 	.iw_connect = iwch_connect,
1229 	.iw_create_listen = iwch_create_listen,
1230 	.iw_destroy_listen = iwch_destroy_listen,
1231 	.iw_get_qp = iwch_get_qp,
1232 	.iw_reject = iwch_reject_cr,
1233 	.iw_rem_ref = iwch_qp_rem_ref,
1234 	.map_mr_sg = iwch_map_mr_sg,
1235 	.mmap = iwch_mmap,
1236 	.modify_qp = iwch_ib_modify_qp,
1237 	.poll_cq = iwch_poll_cq,
1238 	.post_recv = iwch_post_receive,
1239 	.post_send = iwch_post_send,
1240 	.query_device = iwch_query_device,
1241 	.query_gid = iwch_query_gid,
1242 	.query_pkey = iwch_query_pkey,
1243 	.query_port = iwch_query_port,
1244 	.reg_user_mr = iwch_reg_user_mr,
1245 	.req_notify_cq = iwch_arm_cq,
1246 	INIT_RDMA_OBJ_SIZE(ib_pd, iwch_pd, ibpd),
1247 	INIT_RDMA_OBJ_SIZE(ib_cq, iwch_cq, ibcq),
1248 	INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext),
1249 };
1250 
set_netdevs(struct ib_device * ib_dev,struct cxio_rdev * rdev)1251 static int set_netdevs(struct ib_device *ib_dev, struct cxio_rdev *rdev)
1252 {
1253 	int ret;
1254 	int i;
1255 
1256 	for (i = 0; i < rdev->port_info.nports; i++) {
1257 		ret = ib_device_set_netdev(ib_dev, rdev->port_info.lldevs[i],
1258 					   i + 1);
1259 		if (ret)
1260 			return ret;
1261 	}
1262 	return 0;
1263 }
1264 
iwch_register_device(struct iwch_dev * dev)1265 int iwch_register_device(struct iwch_dev *dev)
1266 {
1267 	int err;
1268 
1269 	pr_debug("%s iwch_dev %p\n", __func__, dev);
1270 	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1271 	memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1272 	dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
1273 				IB_DEVICE_MEM_WINDOW |
1274 				IB_DEVICE_MEM_MGT_EXTENSIONS;
1275 
1276 	/* cxgb3 supports STag 0. */
1277 	dev->ibdev.local_dma_lkey = 0;
1278 
1279 	dev->ibdev.uverbs_cmd_mask =
1280 	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1281 	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1282 	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1283 	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1284 	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1285 	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
1286 	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1287 	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1288 	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1289 	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1290 	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1291 	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1292 	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1293 	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1294 	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1295 	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1296 	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
1297 	dev->ibdev.node_type = RDMA_NODE_RNIC;
1298 	BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
1299 	memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1300 	dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
1301 	dev->ibdev.num_comp_vectors = 1;
1302 	dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev;
1303 
1304 	memcpy(dev->ibdev.iw_ifname, dev->rdev.t3cdev_p->lldev->name,
1305 	       sizeof(dev->ibdev.iw_ifname));
1306 
1307 	rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
1308 	ib_set_device_ops(&dev->ibdev, &iwch_dev_ops);
1309 	err = set_netdevs(&dev->ibdev, &dev->rdev);
1310 	if (err)
1311 		return err;
1312 
1313 	return ib_register_device(&dev->ibdev, "cxgb3_%d");
1314 }
1315 
iwch_unregister_device(struct iwch_dev * dev)1316 void iwch_unregister_device(struct iwch_dev *dev)
1317 {
1318 	pr_debug("%s iwch_dev %p\n", __func__, dev);
1319 	ib_unregister_device(&dev->ibdev);
1320 	return;
1321 }
1322