1 /*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/dma-mapping.h>
35 #include <net/addrconf.h>
36 #include "rxe.h"
37 #include "rxe_loc.h"
38 #include "rxe_queue.h"
39 #include "rxe_hw_counters.h"
40
rxe_query_device(struct ib_device * dev,struct ib_device_attr * attr,struct ib_udata * uhw)41 static int rxe_query_device(struct ib_device *dev,
42 struct ib_device_attr *attr,
43 struct ib_udata *uhw)
44 {
45 struct rxe_dev *rxe = to_rdev(dev);
46
47 if (uhw->inlen || uhw->outlen)
48 return -EINVAL;
49
50 *attr = rxe->attr;
51 return 0;
52 }
53
rxe_query_port(struct ib_device * dev,u8 port_num,struct ib_port_attr * attr)54 static int rxe_query_port(struct ib_device *dev,
55 u8 port_num, struct ib_port_attr *attr)
56 {
57 struct rxe_dev *rxe = to_rdev(dev);
58 struct rxe_port *port;
59 int rc = -EINVAL;
60
61 if (unlikely(port_num != 1)) {
62 pr_warn("invalid port_number %d\n", port_num);
63 goto out;
64 }
65
66 port = &rxe->port;
67
68 /* *attr being zeroed by the caller, avoid zeroing it here */
69 *attr = port->attr;
70
71 mutex_lock(&rxe->usdev_lock);
72 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
73 &attr->active_width);
74 mutex_unlock(&rxe->usdev_lock);
75
76 out:
77 return rc;
78 }
79
rxe_get_netdev(struct ib_device * device,u8 port_num)80 static struct net_device *rxe_get_netdev(struct ib_device *device,
81 u8 port_num)
82 {
83 struct rxe_dev *rxe = to_rdev(device);
84
85 if (rxe->ndev) {
86 dev_hold(rxe->ndev);
87 return rxe->ndev;
88 }
89
90 return NULL;
91 }
92
rxe_query_pkey(struct ib_device * device,u8 port_num,u16 index,u16 * pkey)93 static int rxe_query_pkey(struct ib_device *device,
94 u8 port_num, u16 index, u16 *pkey)
95 {
96 struct rxe_dev *rxe = to_rdev(device);
97 struct rxe_port *port;
98
99 if (unlikely(port_num != 1)) {
100 dev_warn(device->dev.parent, "invalid port_num = %d\n",
101 port_num);
102 goto err1;
103 }
104
105 port = &rxe->port;
106
107 if (unlikely(index >= port->attr.pkey_tbl_len)) {
108 dev_warn(device->dev.parent, "invalid index = %d\n",
109 index);
110 goto err1;
111 }
112
113 *pkey = port->pkey_tbl[index];
114 return 0;
115
116 err1:
117 return -EINVAL;
118 }
119
rxe_modify_device(struct ib_device * dev,int mask,struct ib_device_modify * attr)120 static int rxe_modify_device(struct ib_device *dev,
121 int mask, struct ib_device_modify *attr)
122 {
123 struct rxe_dev *rxe = to_rdev(dev);
124
125 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
126 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
127
128 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
129 memcpy(rxe->ib_dev.node_desc,
130 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
131 }
132
133 return 0;
134 }
135
rxe_modify_port(struct ib_device * dev,u8 port_num,int mask,struct ib_port_modify * attr)136 static int rxe_modify_port(struct ib_device *dev,
137 u8 port_num, int mask, struct ib_port_modify *attr)
138 {
139 struct rxe_dev *rxe = to_rdev(dev);
140 struct rxe_port *port;
141
142 if (unlikely(port_num != 1)) {
143 pr_warn("invalid port_num = %d\n", port_num);
144 goto err1;
145 }
146
147 port = &rxe->port;
148
149 port->attr.port_cap_flags |= attr->set_port_cap_mask;
150 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
151
152 if (mask & IB_PORT_RESET_QKEY_CNTR)
153 port->attr.qkey_viol_cntr = 0;
154
155 return 0;
156
157 err1:
158 return -EINVAL;
159 }
160
rxe_get_link_layer(struct ib_device * dev,u8 port_num)161 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
162 u8 port_num)
163 {
164 struct rxe_dev *rxe = to_rdev(dev);
165
166 return rxe_link_layer(rxe, port_num);
167 }
168
rxe_alloc_ucontext(struct ib_device * dev,struct ib_udata * udata)169 static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
170 struct ib_udata *udata)
171 {
172 struct rxe_dev *rxe = to_rdev(dev);
173 struct rxe_ucontext *uc;
174
175 uc = rxe_alloc(&rxe->uc_pool);
176 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
177 }
178
rxe_dealloc_ucontext(struct ib_ucontext * ibuc)179 static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
180 {
181 struct rxe_ucontext *uc = to_ruc(ibuc);
182
183 rxe_drop_ref(uc);
184 return 0;
185 }
186
rxe_port_immutable(struct ib_device * dev,u8 port_num,struct ib_port_immutable * immutable)187 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
188 struct ib_port_immutable *immutable)
189 {
190 int err;
191 struct ib_port_attr attr;
192
193 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
194
195 err = ib_query_port(dev, port_num, &attr);
196 if (err)
197 return err;
198
199 immutable->pkey_tbl_len = attr.pkey_tbl_len;
200 immutable->gid_tbl_len = attr.gid_tbl_len;
201 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
202
203 return 0;
204 }
205
rxe_alloc_pd(struct ib_device * dev,struct ib_ucontext * context,struct ib_udata * udata)206 static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
207 struct ib_ucontext *context,
208 struct ib_udata *udata)
209 {
210 struct rxe_dev *rxe = to_rdev(dev);
211 struct rxe_pd *pd;
212
213 pd = rxe_alloc(&rxe->pd_pool);
214 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
215 }
216
rxe_dealloc_pd(struct ib_pd * ibpd)217 static int rxe_dealloc_pd(struct ib_pd *ibpd)
218 {
219 struct rxe_pd *pd = to_rpd(ibpd);
220
221 rxe_drop_ref(pd);
222 return 0;
223 }
224
rxe_init_av(struct rxe_dev * rxe,struct rdma_ah_attr * attr,struct rxe_av * av)225 static void rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
226 struct rxe_av *av)
227 {
228 rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
229 rxe_av_fill_ip_info(av, attr);
230 }
231
rxe_create_ah(struct ib_pd * ibpd,struct rdma_ah_attr * attr,struct ib_udata * udata)232 static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
233 struct rdma_ah_attr *attr,
234 struct ib_udata *udata)
235
236 {
237 int err;
238 struct rxe_dev *rxe = to_rdev(ibpd->device);
239 struct rxe_pd *pd = to_rpd(ibpd);
240 struct rxe_ah *ah;
241
242 err = rxe_av_chk_attr(rxe, attr);
243 if (err)
244 return ERR_PTR(err);
245
246 ah = rxe_alloc(&rxe->ah_pool);
247 if (!ah)
248 return ERR_PTR(-ENOMEM);
249
250 rxe_add_ref(pd);
251 ah->pd = pd;
252
253 rxe_init_av(rxe, attr, &ah->av);
254 return &ah->ibah;
255 }
256
rxe_modify_ah(struct ib_ah * ibah,struct rdma_ah_attr * attr)257 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
258 {
259 int err;
260 struct rxe_dev *rxe = to_rdev(ibah->device);
261 struct rxe_ah *ah = to_rah(ibah);
262
263 err = rxe_av_chk_attr(rxe, attr);
264 if (err)
265 return err;
266
267 rxe_init_av(rxe, attr, &ah->av);
268 return 0;
269 }
270
rxe_query_ah(struct ib_ah * ibah,struct rdma_ah_attr * attr)271 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
272 {
273 struct rxe_ah *ah = to_rah(ibah);
274
275 memset(attr, 0, sizeof(*attr));
276 attr->type = ibah->type;
277 rxe_av_to_attr(&ah->av, attr);
278 return 0;
279 }
280
rxe_destroy_ah(struct ib_ah * ibah)281 static int rxe_destroy_ah(struct ib_ah *ibah)
282 {
283 struct rxe_ah *ah = to_rah(ibah);
284
285 rxe_drop_ref(ah->pd);
286 rxe_drop_ref(ah);
287 return 0;
288 }
289
post_one_recv(struct rxe_rq * rq,const struct ib_recv_wr * ibwr)290 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
291 {
292 int err;
293 int i;
294 u32 length;
295 struct rxe_recv_wqe *recv_wqe;
296 int num_sge = ibwr->num_sge;
297
298 if (unlikely(queue_full(rq->queue))) {
299 err = -ENOMEM;
300 goto err1;
301 }
302
303 if (unlikely(num_sge > rq->max_sge)) {
304 err = -EINVAL;
305 goto err1;
306 }
307
308 length = 0;
309 for (i = 0; i < num_sge; i++)
310 length += ibwr->sg_list[i].length;
311
312 recv_wqe = producer_addr(rq->queue);
313 recv_wqe->wr_id = ibwr->wr_id;
314 recv_wqe->num_sge = num_sge;
315
316 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
317 num_sge * sizeof(struct ib_sge));
318
319 recv_wqe->dma.length = length;
320 recv_wqe->dma.resid = length;
321 recv_wqe->dma.num_sge = num_sge;
322 recv_wqe->dma.cur_sge = 0;
323 recv_wqe->dma.sge_offset = 0;
324
325 /* make sure all changes to the work queue are written before we
326 * update the producer pointer
327 */
328 smp_wmb();
329
330 advance_producer(rq->queue);
331 return 0;
332
333 err1:
334 return err;
335 }
336
rxe_create_srq(struct ib_pd * ibpd,struct ib_srq_init_attr * init,struct ib_udata * udata)337 static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
338 struct ib_srq_init_attr *init,
339 struct ib_udata *udata)
340 {
341 int err;
342 struct rxe_dev *rxe = to_rdev(ibpd->device);
343 struct rxe_pd *pd = to_rpd(ibpd);
344 struct rxe_srq *srq;
345 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
346 struct rxe_create_srq_resp __user *uresp = NULL;
347
348 if (udata) {
349 if (udata->outlen < sizeof(*uresp))
350 return ERR_PTR(-EINVAL);
351 uresp = udata->outbuf;
352 }
353
354 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
355 if (err)
356 goto err1;
357
358 srq = rxe_alloc(&rxe->srq_pool);
359 if (!srq) {
360 err = -ENOMEM;
361 goto err1;
362 }
363
364 rxe_add_index(srq);
365 rxe_add_ref(pd);
366 srq->pd = pd;
367
368 err = rxe_srq_from_init(rxe, srq, init, context, uresp);
369 if (err)
370 goto err2;
371
372 return &srq->ibsrq;
373
374 err2:
375 rxe_drop_ref(pd);
376 rxe_drop_index(srq);
377 rxe_drop_ref(srq);
378 err1:
379 return ERR_PTR(err);
380 }
381
rxe_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask mask,struct ib_udata * udata)382 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
383 enum ib_srq_attr_mask mask,
384 struct ib_udata *udata)
385 {
386 int err;
387 struct rxe_srq *srq = to_rsrq(ibsrq);
388 struct rxe_dev *rxe = to_rdev(ibsrq->device);
389 struct rxe_modify_srq_cmd ucmd = {};
390
391 if (udata) {
392 if (udata->inlen < sizeof(ucmd))
393 return -EINVAL;
394
395 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
396 if (err)
397 return err;
398 }
399
400 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
401 if (err)
402 goto err1;
403
404 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
405 if (err)
406 goto err1;
407
408 return 0;
409
410 err1:
411 return err;
412 }
413
rxe_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr)414 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
415 {
416 struct rxe_srq *srq = to_rsrq(ibsrq);
417
418 if (srq->error)
419 return -EINVAL;
420
421 attr->max_wr = srq->rq.queue->buf->index_mask;
422 attr->max_sge = srq->rq.max_sge;
423 attr->srq_limit = srq->limit;
424 return 0;
425 }
426
rxe_destroy_srq(struct ib_srq * ibsrq)427 static int rxe_destroy_srq(struct ib_srq *ibsrq)
428 {
429 struct rxe_srq *srq = to_rsrq(ibsrq);
430
431 if (srq->rq.queue)
432 rxe_queue_cleanup(srq->rq.queue);
433
434 rxe_drop_ref(srq->pd);
435 rxe_drop_index(srq);
436 rxe_drop_ref(srq);
437
438 return 0;
439 }
440
rxe_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)441 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
442 const struct ib_recv_wr **bad_wr)
443 {
444 int err = 0;
445 unsigned long flags;
446 struct rxe_srq *srq = to_rsrq(ibsrq);
447
448 spin_lock_irqsave(&srq->rq.producer_lock, flags);
449
450 while (wr) {
451 err = post_one_recv(&srq->rq, wr);
452 if (unlikely(err))
453 break;
454 wr = wr->next;
455 }
456
457 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
458
459 if (err)
460 *bad_wr = wr;
461
462 return err;
463 }
464
rxe_create_qp(struct ib_pd * ibpd,struct ib_qp_init_attr * init,struct ib_udata * udata)465 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
466 struct ib_qp_init_attr *init,
467 struct ib_udata *udata)
468 {
469 int err;
470 struct rxe_dev *rxe = to_rdev(ibpd->device);
471 struct rxe_pd *pd = to_rpd(ibpd);
472 struct rxe_qp *qp;
473 struct rxe_create_qp_resp __user *uresp = NULL;
474
475 if (udata) {
476 if (udata->outlen < sizeof(*uresp))
477 return ERR_PTR(-EINVAL);
478 uresp = udata->outbuf;
479 }
480
481 err = rxe_qp_chk_init(rxe, init);
482 if (err)
483 goto err1;
484
485 qp = rxe_alloc(&rxe->qp_pool);
486 if (!qp) {
487 err = -ENOMEM;
488 goto err1;
489 }
490
491 if (udata) {
492 if (udata->inlen) {
493 err = -EINVAL;
494 goto err2;
495 }
496 qp->is_user = 1;
497 }
498
499 rxe_add_index(qp);
500
501 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd);
502 if (err)
503 goto err3;
504
505 return &qp->ibqp;
506
507 err3:
508 rxe_drop_index(qp);
509 err2:
510 rxe_drop_ref(qp);
511 err1:
512 return ERR_PTR(err);
513 }
514
rxe_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int mask,struct ib_udata * udata)515 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
516 int mask, struct ib_udata *udata)
517 {
518 int err;
519 struct rxe_dev *rxe = to_rdev(ibqp->device);
520 struct rxe_qp *qp = to_rqp(ibqp);
521
522 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
523 if (err)
524 goto err1;
525
526 err = rxe_qp_from_attr(qp, attr, mask, udata);
527 if (err)
528 goto err1;
529
530 return 0;
531
532 err1:
533 return err;
534 }
535
rxe_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int mask,struct ib_qp_init_attr * init)536 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
537 int mask, struct ib_qp_init_attr *init)
538 {
539 struct rxe_qp *qp = to_rqp(ibqp);
540
541 rxe_qp_to_init(qp, init);
542 rxe_qp_to_attr(qp, attr, mask);
543
544 return 0;
545 }
546
rxe_destroy_qp(struct ib_qp * ibqp)547 static int rxe_destroy_qp(struct ib_qp *ibqp)
548 {
549 struct rxe_qp *qp = to_rqp(ibqp);
550
551 rxe_qp_destroy(qp);
552 rxe_drop_index(qp);
553 rxe_drop_ref(qp);
554 return 0;
555 }
556
validate_send_wr(struct rxe_qp * qp,const struct ib_send_wr * ibwr,unsigned int mask,unsigned int length)557 static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
558 unsigned int mask, unsigned int length)
559 {
560 int num_sge = ibwr->num_sge;
561 struct rxe_sq *sq = &qp->sq;
562
563 if (unlikely(num_sge > sq->max_sge))
564 goto err1;
565
566 if (unlikely(mask & WR_ATOMIC_MASK)) {
567 if (length < 8)
568 goto err1;
569
570 if (atomic_wr(ibwr)->remote_addr & 0x7)
571 goto err1;
572 }
573
574 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
575 (length > sq->max_inline)))
576 goto err1;
577
578 return 0;
579
580 err1:
581 return -EINVAL;
582 }
583
init_send_wr(struct rxe_qp * qp,struct rxe_send_wr * wr,const struct ib_send_wr * ibwr)584 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
585 const struct ib_send_wr *ibwr)
586 {
587 wr->wr_id = ibwr->wr_id;
588 wr->num_sge = ibwr->num_sge;
589 wr->opcode = ibwr->opcode;
590 wr->send_flags = ibwr->send_flags;
591
592 if (qp_type(qp) == IB_QPT_UD ||
593 qp_type(qp) == IB_QPT_SMI ||
594 qp_type(qp) == IB_QPT_GSI) {
595 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
596 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
597 if (qp_type(qp) == IB_QPT_GSI)
598 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
599 if (wr->opcode == IB_WR_SEND_WITH_IMM)
600 wr->ex.imm_data = ibwr->ex.imm_data;
601 } else {
602 switch (wr->opcode) {
603 case IB_WR_RDMA_WRITE_WITH_IMM:
604 wr->ex.imm_data = ibwr->ex.imm_data;
605 /* fall through */
606 case IB_WR_RDMA_READ:
607 case IB_WR_RDMA_WRITE:
608 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
609 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
610 break;
611 case IB_WR_SEND_WITH_IMM:
612 wr->ex.imm_data = ibwr->ex.imm_data;
613 break;
614 case IB_WR_SEND_WITH_INV:
615 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
616 break;
617 case IB_WR_ATOMIC_CMP_AND_SWP:
618 case IB_WR_ATOMIC_FETCH_AND_ADD:
619 wr->wr.atomic.remote_addr =
620 atomic_wr(ibwr)->remote_addr;
621 wr->wr.atomic.compare_add =
622 atomic_wr(ibwr)->compare_add;
623 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
624 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
625 break;
626 case IB_WR_LOCAL_INV:
627 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
628 break;
629 case IB_WR_REG_MR:
630 wr->wr.reg.mr = reg_wr(ibwr)->mr;
631 wr->wr.reg.key = reg_wr(ibwr)->key;
632 wr->wr.reg.access = reg_wr(ibwr)->access;
633 break;
634 default:
635 break;
636 }
637 }
638 }
639
init_send_wqe(struct rxe_qp * qp,const struct ib_send_wr * ibwr,unsigned int mask,unsigned int length,struct rxe_send_wqe * wqe)640 static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
641 unsigned int mask, unsigned int length,
642 struct rxe_send_wqe *wqe)
643 {
644 int num_sge = ibwr->num_sge;
645 struct ib_sge *sge;
646 int i;
647 u8 *p;
648
649 init_send_wr(qp, &wqe->wr, ibwr);
650
651 if (qp_type(qp) == IB_QPT_UD ||
652 qp_type(qp) == IB_QPT_SMI ||
653 qp_type(qp) == IB_QPT_GSI)
654 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
655
656 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
657 p = wqe->dma.inline_data;
658
659 sge = ibwr->sg_list;
660 for (i = 0; i < num_sge; i++, sge++) {
661 memcpy(p, (void *)(uintptr_t)sge->addr,
662 sge->length);
663
664 p += sge->length;
665 }
666 } else if (mask & WR_REG_MASK) {
667 wqe->mask = mask;
668 wqe->state = wqe_state_posted;
669 return 0;
670 } else
671 memcpy(wqe->dma.sge, ibwr->sg_list,
672 num_sge * sizeof(struct ib_sge));
673
674 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
675 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
676 wqe->mask = mask;
677 wqe->dma.length = length;
678 wqe->dma.resid = length;
679 wqe->dma.num_sge = num_sge;
680 wqe->dma.cur_sge = 0;
681 wqe->dma.sge_offset = 0;
682 wqe->state = wqe_state_posted;
683 wqe->ssn = atomic_add_return(1, &qp->ssn);
684
685 return 0;
686 }
687
post_one_send(struct rxe_qp * qp,const struct ib_send_wr * ibwr,unsigned int mask,u32 length)688 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
689 unsigned int mask, u32 length)
690 {
691 int err;
692 struct rxe_sq *sq = &qp->sq;
693 struct rxe_send_wqe *send_wqe;
694 unsigned long flags;
695
696 err = validate_send_wr(qp, ibwr, mask, length);
697 if (err)
698 return err;
699
700 spin_lock_irqsave(&qp->sq.sq_lock, flags);
701
702 if (unlikely(queue_full(sq->queue))) {
703 err = -ENOMEM;
704 goto err1;
705 }
706
707 send_wqe = producer_addr(sq->queue);
708
709 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
710 if (unlikely(err))
711 goto err1;
712
713 /*
714 * make sure all changes to the work queue are
715 * written before we update the producer pointer
716 */
717 smp_wmb();
718
719 advance_producer(sq->queue);
720 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
721
722 return 0;
723
724 err1:
725 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
726 return err;
727 }
728
rxe_post_send_kernel(struct rxe_qp * qp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)729 static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
730 const struct ib_send_wr **bad_wr)
731 {
732 int err = 0;
733 unsigned int mask;
734 unsigned int length = 0;
735 int i;
736 struct ib_send_wr *next;
737
738 while (wr) {
739 mask = wr_opcode_mask(wr->opcode, qp);
740 if (unlikely(!mask)) {
741 err = -EINVAL;
742 *bad_wr = wr;
743 break;
744 }
745
746 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
747 !(mask & WR_INLINE_MASK))) {
748 err = -EINVAL;
749 *bad_wr = wr;
750 break;
751 }
752
753 next = wr->next;
754
755 length = 0;
756 for (i = 0; i < wr->num_sge; i++)
757 length += wr->sg_list[i].length;
758
759 err = post_one_send(qp, wr, mask, length);
760
761 if (err) {
762 *bad_wr = wr;
763 break;
764 }
765 wr = next;
766 }
767
768 rxe_run_task(&qp->req.task, 1);
769 if (unlikely(qp->req.state == QP_STATE_ERROR))
770 rxe_run_task(&qp->comp.task, 1);
771
772 return err;
773 }
774
rxe_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)775 static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
776 const struct ib_send_wr **bad_wr)
777 {
778 struct rxe_qp *qp = to_rqp(ibqp);
779
780 if (unlikely(!qp->valid)) {
781 *bad_wr = wr;
782 return -EINVAL;
783 }
784
785 if (unlikely(qp->req.state < QP_STATE_READY)) {
786 *bad_wr = wr;
787 return -EINVAL;
788 }
789
790 if (qp->is_user) {
791 /* Utilize process context to do protocol processing */
792 rxe_run_task(&qp->req.task, 0);
793 return 0;
794 } else
795 return rxe_post_send_kernel(qp, wr, bad_wr);
796 }
797
rxe_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)798 static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
799 const struct ib_recv_wr **bad_wr)
800 {
801 int err = 0;
802 struct rxe_qp *qp = to_rqp(ibqp);
803 struct rxe_rq *rq = &qp->rq;
804 unsigned long flags;
805
806 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
807 *bad_wr = wr;
808 err = -EINVAL;
809 goto err1;
810 }
811
812 if (unlikely(qp->srq)) {
813 *bad_wr = wr;
814 err = -EINVAL;
815 goto err1;
816 }
817
818 spin_lock_irqsave(&rq->producer_lock, flags);
819
820 while (wr) {
821 err = post_one_recv(rq, wr);
822 if (unlikely(err)) {
823 *bad_wr = wr;
824 break;
825 }
826 wr = wr->next;
827 }
828
829 spin_unlock_irqrestore(&rq->producer_lock, flags);
830
831 if (qp->resp.state == QP_STATE_ERROR)
832 rxe_run_task(&qp->resp.task, 1);
833
834 err1:
835 return err;
836 }
837
rxe_create_cq(struct ib_device * dev,const struct ib_cq_init_attr * attr,struct ib_ucontext * context,struct ib_udata * udata)838 static struct ib_cq *rxe_create_cq(struct ib_device *dev,
839 const struct ib_cq_init_attr *attr,
840 struct ib_ucontext *context,
841 struct ib_udata *udata)
842 {
843 int err;
844 struct rxe_dev *rxe = to_rdev(dev);
845 struct rxe_cq *cq;
846 struct rxe_create_cq_resp __user *uresp = NULL;
847
848 if (udata) {
849 if (udata->outlen < sizeof(*uresp))
850 return ERR_PTR(-EINVAL);
851 uresp = udata->outbuf;
852 }
853
854 if (attr->flags)
855 return ERR_PTR(-EINVAL);
856
857 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
858 if (err)
859 goto err1;
860
861 cq = rxe_alloc(&rxe->cq_pool);
862 if (!cq) {
863 err = -ENOMEM;
864 goto err1;
865 }
866
867 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
868 context, uresp);
869 if (err)
870 goto err2;
871
872 return &cq->ibcq;
873
874 err2:
875 rxe_drop_ref(cq);
876 err1:
877 return ERR_PTR(err);
878 }
879
rxe_destroy_cq(struct ib_cq * ibcq)880 static int rxe_destroy_cq(struct ib_cq *ibcq)
881 {
882 struct rxe_cq *cq = to_rcq(ibcq);
883
884 rxe_cq_disable(cq);
885
886 rxe_drop_ref(cq);
887 return 0;
888 }
889
rxe_resize_cq(struct ib_cq * ibcq,int cqe,struct ib_udata * udata)890 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
891 {
892 int err;
893 struct rxe_cq *cq = to_rcq(ibcq);
894 struct rxe_dev *rxe = to_rdev(ibcq->device);
895 struct rxe_resize_cq_resp __user *uresp = NULL;
896
897 if (udata) {
898 if (udata->outlen < sizeof(*uresp))
899 return -EINVAL;
900 uresp = udata->outbuf;
901 }
902
903 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
904 if (err)
905 goto err1;
906
907 err = rxe_cq_resize_queue(cq, cqe, uresp);
908 if (err)
909 goto err1;
910
911 return 0;
912
913 err1:
914 return err;
915 }
916
rxe_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)917 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
918 {
919 int i;
920 struct rxe_cq *cq = to_rcq(ibcq);
921 struct rxe_cqe *cqe;
922 unsigned long flags;
923
924 spin_lock_irqsave(&cq->cq_lock, flags);
925 for (i = 0; i < num_entries; i++) {
926 cqe = queue_head(cq->queue);
927 if (!cqe)
928 break;
929
930 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
931 advance_consumer(cq->queue);
932 }
933 spin_unlock_irqrestore(&cq->cq_lock, flags);
934
935 return i;
936 }
937
rxe_peek_cq(struct ib_cq * ibcq,int wc_cnt)938 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
939 {
940 struct rxe_cq *cq = to_rcq(ibcq);
941 int count = queue_count(cq->queue);
942
943 return (count > wc_cnt) ? wc_cnt : count;
944 }
945
rxe_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)946 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
947 {
948 struct rxe_cq *cq = to_rcq(ibcq);
949 unsigned long irq_flags;
950 int ret = 0;
951
952 spin_lock_irqsave(&cq->cq_lock, irq_flags);
953 if (cq->notify != IB_CQ_NEXT_COMP)
954 cq->notify = flags & IB_CQ_SOLICITED_MASK;
955
956 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
957 ret = 1;
958
959 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
960
961 return ret;
962 }
963
rxe_get_dma_mr(struct ib_pd * ibpd,int access)964 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
965 {
966 struct rxe_dev *rxe = to_rdev(ibpd->device);
967 struct rxe_pd *pd = to_rpd(ibpd);
968 struct rxe_mem *mr;
969 int err;
970
971 mr = rxe_alloc(&rxe->mr_pool);
972 if (!mr) {
973 err = -ENOMEM;
974 goto err1;
975 }
976
977 rxe_add_index(mr);
978
979 rxe_add_ref(pd);
980
981 err = rxe_mem_init_dma(pd, access, mr);
982 if (err)
983 goto err2;
984
985 return &mr->ibmr;
986
987 err2:
988 rxe_drop_ref(pd);
989 rxe_drop_index(mr);
990 rxe_drop_ref(mr);
991 err1:
992 return ERR_PTR(err);
993 }
994
rxe_reg_user_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 iova,int access,struct ib_udata * udata)995 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
996 u64 start,
997 u64 length,
998 u64 iova,
999 int access, struct ib_udata *udata)
1000 {
1001 int err;
1002 struct rxe_dev *rxe = to_rdev(ibpd->device);
1003 struct rxe_pd *pd = to_rpd(ibpd);
1004 struct rxe_mem *mr;
1005
1006 mr = rxe_alloc(&rxe->mr_pool);
1007 if (!mr) {
1008 err = -ENOMEM;
1009 goto err2;
1010 }
1011
1012 rxe_add_index(mr);
1013
1014 rxe_add_ref(pd);
1015
1016 err = rxe_mem_init_user(pd, start, length, iova,
1017 access, udata, mr);
1018 if (err)
1019 goto err3;
1020
1021 return &mr->ibmr;
1022
1023 err3:
1024 rxe_drop_ref(pd);
1025 rxe_drop_index(mr);
1026 rxe_drop_ref(mr);
1027 err2:
1028 return ERR_PTR(err);
1029 }
1030
rxe_dereg_mr(struct ib_mr * ibmr)1031 static int rxe_dereg_mr(struct ib_mr *ibmr)
1032 {
1033 struct rxe_mem *mr = to_rmr(ibmr);
1034
1035 mr->state = RXE_MEM_STATE_ZOMBIE;
1036 rxe_drop_ref(mr->pd);
1037 rxe_drop_index(mr);
1038 rxe_drop_ref(mr);
1039 return 0;
1040 }
1041
rxe_alloc_mr(struct ib_pd * ibpd,enum ib_mr_type mr_type,u32 max_num_sg)1042 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1043 enum ib_mr_type mr_type,
1044 u32 max_num_sg)
1045 {
1046 struct rxe_dev *rxe = to_rdev(ibpd->device);
1047 struct rxe_pd *pd = to_rpd(ibpd);
1048 struct rxe_mem *mr;
1049 int err;
1050
1051 if (mr_type != IB_MR_TYPE_MEM_REG)
1052 return ERR_PTR(-EINVAL);
1053
1054 mr = rxe_alloc(&rxe->mr_pool);
1055 if (!mr) {
1056 err = -ENOMEM;
1057 goto err1;
1058 }
1059
1060 rxe_add_index(mr);
1061
1062 rxe_add_ref(pd);
1063
1064 err = rxe_mem_init_fast(pd, max_num_sg, mr);
1065 if (err)
1066 goto err2;
1067
1068 return &mr->ibmr;
1069
1070 err2:
1071 rxe_drop_ref(pd);
1072 rxe_drop_index(mr);
1073 rxe_drop_ref(mr);
1074 err1:
1075 return ERR_PTR(err);
1076 }
1077
rxe_set_page(struct ib_mr * ibmr,u64 addr)1078 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1079 {
1080 struct rxe_mem *mr = to_rmr(ibmr);
1081 struct rxe_map *map;
1082 struct rxe_phys_buf *buf;
1083
1084 if (unlikely(mr->nbuf == mr->num_buf))
1085 return -ENOMEM;
1086
1087 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1088 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1089
1090 buf->addr = addr;
1091 buf->size = ibmr->page_size;
1092 mr->nbuf++;
1093
1094 return 0;
1095 }
1096
rxe_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)1097 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1098 int sg_nents, unsigned int *sg_offset)
1099 {
1100 struct rxe_mem *mr = to_rmr(ibmr);
1101 int n;
1102
1103 mr->nbuf = 0;
1104
1105 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1106
1107 mr->va = ibmr->iova;
1108 mr->iova = ibmr->iova;
1109 mr->length = ibmr->length;
1110 mr->page_shift = ilog2(ibmr->page_size);
1111 mr->page_mask = ibmr->page_size - 1;
1112 mr->offset = mr->iova & mr->page_mask;
1113
1114 return n;
1115 }
1116
rxe_attach_mcast(struct ib_qp * ibqp,union ib_gid * mgid,u16 mlid)1117 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1118 {
1119 int err;
1120 struct rxe_dev *rxe = to_rdev(ibqp->device);
1121 struct rxe_qp *qp = to_rqp(ibqp);
1122 struct rxe_mc_grp *grp;
1123
1124 /* takes a ref on grp if successful */
1125 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1126 if (err)
1127 return err;
1128
1129 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1130
1131 rxe_drop_ref(grp);
1132 return err;
1133 }
1134
rxe_detach_mcast(struct ib_qp * ibqp,union ib_gid * mgid,u16 mlid)1135 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1136 {
1137 struct rxe_dev *rxe = to_rdev(ibqp->device);
1138 struct rxe_qp *qp = to_rqp(ibqp);
1139
1140 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1141 }
1142
parent_show(struct device * device,struct device_attribute * attr,char * buf)1143 static ssize_t parent_show(struct device *device,
1144 struct device_attribute *attr, char *buf)
1145 {
1146 struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1147 ib_dev.dev);
1148
1149 return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1));
1150 }
1151
1152 static DEVICE_ATTR_RO(parent);
1153
1154 static struct device_attribute *rxe_dev_attributes[] = {
1155 &dev_attr_parent,
1156 };
1157
rxe_register_device(struct rxe_dev * rxe)1158 int rxe_register_device(struct rxe_dev *rxe)
1159 {
1160 int err;
1161 int i;
1162 struct ib_device *dev = &rxe->ib_dev;
1163 struct crypto_shash *tfm;
1164
1165 strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1166 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1167
1168 dev->owner = THIS_MODULE;
1169 dev->node_type = RDMA_NODE_IB_CA;
1170 dev->phys_port_cnt = 1;
1171 dev->num_comp_vectors = num_possible_cpus();
1172 dev->dev.parent = rxe_dma_device(rxe);
1173 dev->local_dma_lkey = 0;
1174 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1175 rxe->ndev->dev_addr);
1176 dev->dev.dma_ops = &dma_virt_ops;
1177 dma_coerce_mask_and_coherent(&dev->dev,
1178 dma_get_required_mask(&dev->dev));
1179
1180 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1181 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1182 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1183 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1184 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1185 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1186 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1187 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1188 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1189 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1190 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1191 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1192 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1193 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1194 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1195 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1196 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1197 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1198 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1199 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1200 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1201 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1202 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1203 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1204 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1205 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1206 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1207 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1208 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1209 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1210 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1211 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1212 ;
1213
1214 dev->query_device = rxe_query_device;
1215 dev->modify_device = rxe_modify_device;
1216 dev->query_port = rxe_query_port;
1217 dev->modify_port = rxe_modify_port;
1218 dev->get_link_layer = rxe_get_link_layer;
1219 dev->get_netdev = rxe_get_netdev;
1220 dev->query_pkey = rxe_query_pkey;
1221 dev->alloc_ucontext = rxe_alloc_ucontext;
1222 dev->dealloc_ucontext = rxe_dealloc_ucontext;
1223 dev->mmap = rxe_mmap;
1224 dev->get_port_immutable = rxe_port_immutable;
1225 dev->alloc_pd = rxe_alloc_pd;
1226 dev->dealloc_pd = rxe_dealloc_pd;
1227 dev->create_ah = rxe_create_ah;
1228 dev->modify_ah = rxe_modify_ah;
1229 dev->query_ah = rxe_query_ah;
1230 dev->destroy_ah = rxe_destroy_ah;
1231 dev->create_srq = rxe_create_srq;
1232 dev->modify_srq = rxe_modify_srq;
1233 dev->query_srq = rxe_query_srq;
1234 dev->destroy_srq = rxe_destroy_srq;
1235 dev->post_srq_recv = rxe_post_srq_recv;
1236 dev->create_qp = rxe_create_qp;
1237 dev->modify_qp = rxe_modify_qp;
1238 dev->query_qp = rxe_query_qp;
1239 dev->destroy_qp = rxe_destroy_qp;
1240 dev->post_send = rxe_post_send;
1241 dev->post_recv = rxe_post_recv;
1242 dev->create_cq = rxe_create_cq;
1243 dev->destroy_cq = rxe_destroy_cq;
1244 dev->resize_cq = rxe_resize_cq;
1245 dev->poll_cq = rxe_poll_cq;
1246 dev->peek_cq = rxe_peek_cq;
1247 dev->req_notify_cq = rxe_req_notify_cq;
1248 dev->get_dma_mr = rxe_get_dma_mr;
1249 dev->reg_user_mr = rxe_reg_user_mr;
1250 dev->dereg_mr = rxe_dereg_mr;
1251 dev->alloc_mr = rxe_alloc_mr;
1252 dev->map_mr_sg = rxe_map_mr_sg;
1253 dev->attach_mcast = rxe_attach_mcast;
1254 dev->detach_mcast = rxe_detach_mcast;
1255 dev->get_hw_stats = rxe_ib_get_hw_stats;
1256 dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
1257
1258 tfm = crypto_alloc_shash("crc32", 0, 0);
1259 if (IS_ERR(tfm)) {
1260 pr_err("failed to allocate crc algorithm err:%ld\n",
1261 PTR_ERR(tfm));
1262 return PTR_ERR(tfm);
1263 }
1264 rxe->tfm = tfm;
1265
1266 dev->driver_id = RDMA_DRIVER_RXE;
1267 err = ib_register_device(dev, NULL);
1268 if (err) {
1269 pr_warn("%s failed with error %d\n", __func__, err);
1270 goto err1;
1271 }
1272
1273 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1274 err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1275 if (err) {
1276 pr_warn("%s failed with error %d for attr number %d\n",
1277 __func__, err, i);
1278 goto err2;
1279 }
1280 }
1281
1282 return 0;
1283
1284 err2:
1285 ib_unregister_device(dev);
1286 err1:
1287 crypto_free_shash(rxe->tfm);
1288
1289 return err;
1290 }
1291
rxe_unregister_device(struct rxe_dev * rxe)1292 int rxe_unregister_device(struct rxe_dev *rxe)
1293 {
1294 int i;
1295 struct ib_device *dev = &rxe->ib_dev;
1296
1297 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1298 device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1299
1300 ib_unregister_device(dev);
1301
1302 return 0;
1303 }
1304