1 /*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/dma-mapping.h>
35 #include <net/addrconf.h>
36 #include "rxe.h"
37 #include "rxe_loc.h"
38 #include "rxe_queue.h"
39 #include "rxe_hw_counters.h"
40
rxe_query_device(struct ib_device * dev,struct ib_device_attr * attr,struct ib_udata * uhw)41 static int rxe_query_device(struct ib_device *dev,
42 struct ib_device_attr *attr,
43 struct ib_udata *uhw)
44 {
45 struct rxe_dev *rxe = to_rdev(dev);
46
47 if (uhw->inlen || uhw->outlen)
48 return -EINVAL;
49
50 *attr = rxe->attr;
51 return 0;
52 }
53
rxe_query_port(struct ib_device * dev,u8 port_num,struct ib_port_attr * attr)54 static int rxe_query_port(struct ib_device *dev,
55 u8 port_num, struct ib_port_attr *attr)
56 {
57 struct rxe_dev *rxe = to_rdev(dev);
58 struct rxe_port *port;
59 int rc = -EINVAL;
60
61 if (unlikely(port_num != 1)) {
62 pr_warn("invalid port_number %d\n", port_num);
63 goto out;
64 }
65
66 port = &rxe->port;
67
68 /* *attr being zeroed by the caller, avoid zeroing it here */
69 *attr = port->attr;
70
71 mutex_lock(&rxe->usdev_lock);
72 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
73 &attr->active_width);
74 mutex_unlock(&rxe->usdev_lock);
75
76 out:
77 return rc;
78 }
79
rxe_query_gid(struct ib_device * device,u8 port_num,int index,union ib_gid * gid)80 static int rxe_query_gid(struct ib_device *device,
81 u8 port_num, int index, union ib_gid *gid)
82 {
83 int ret;
84
85 if (index > RXE_PORT_GID_TBL_LEN)
86 return -EINVAL;
87
88 ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
89 if (ret == -EAGAIN) {
90 memcpy(gid, &zgid, sizeof(*gid));
91 return 0;
92 }
93
94 return ret;
95 }
96
rxe_add_gid(struct ib_device * device,u8 port_num,unsigned int index,const union ib_gid * gid,const struct ib_gid_attr * attr,void ** context)97 static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
98 index, const union ib_gid *gid,
99 const struct ib_gid_attr *attr, void **context)
100 {
101 if (index >= RXE_PORT_GID_TBL_LEN)
102 return -EINVAL;
103 return 0;
104 }
105
rxe_del_gid(struct ib_device * device,u8 port_num,unsigned int index,void ** context)106 static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
107 index, void **context)
108 {
109 if (index >= RXE_PORT_GID_TBL_LEN)
110 return -EINVAL;
111 return 0;
112 }
113
rxe_get_netdev(struct ib_device * device,u8 port_num)114 static struct net_device *rxe_get_netdev(struct ib_device *device,
115 u8 port_num)
116 {
117 struct rxe_dev *rxe = to_rdev(device);
118
119 if (rxe->ndev) {
120 dev_hold(rxe->ndev);
121 return rxe->ndev;
122 }
123
124 return NULL;
125 }
126
rxe_query_pkey(struct ib_device * device,u8 port_num,u16 index,u16 * pkey)127 static int rxe_query_pkey(struct ib_device *device,
128 u8 port_num, u16 index, u16 *pkey)
129 {
130 struct rxe_dev *rxe = to_rdev(device);
131 struct rxe_port *port;
132
133 if (unlikely(port_num != 1)) {
134 dev_warn(device->dev.parent, "invalid port_num = %d\n",
135 port_num);
136 goto err1;
137 }
138
139 port = &rxe->port;
140
141 if (unlikely(index >= port->attr.pkey_tbl_len)) {
142 dev_warn(device->dev.parent, "invalid index = %d\n",
143 index);
144 goto err1;
145 }
146
147 *pkey = port->pkey_tbl[index];
148 return 0;
149
150 err1:
151 return -EINVAL;
152 }
153
rxe_modify_device(struct ib_device * dev,int mask,struct ib_device_modify * attr)154 static int rxe_modify_device(struct ib_device *dev,
155 int mask, struct ib_device_modify *attr)
156 {
157 struct rxe_dev *rxe = to_rdev(dev);
158
159 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
160 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
161
162 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
163 memcpy(rxe->ib_dev.node_desc,
164 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
165 }
166
167 return 0;
168 }
169
rxe_modify_port(struct ib_device * dev,u8 port_num,int mask,struct ib_port_modify * attr)170 static int rxe_modify_port(struct ib_device *dev,
171 u8 port_num, int mask, struct ib_port_modify *attr)
172 {
173 struct rxe_dev *rxe = to_rdev(dev);
174 struct rxe_port *port;
175
176 if (unlikely(port_num != 1)) {
177 pr_warn("invalid port_num = %d\n", port_num);
178 goto err1;
179 }
180
181 port = &rxe->port;
182
183 port->attr.port_cap_flags |= attr->set_port_cap_mask;
184 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
185
186 if (mask & IB_PORT_RESET_QKEY_CNTR)
187 port->attr.qkey_viol_cntr = 0;
188
189 return 0;
190
191 err1:
192 return -EINVAL;
193 }
194
rxe_get_link_layer(struct ib_device * dev,u8 port_num)195 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
196 u8 port_num)
197 {
198 struct rxe_dev *rxe = to_rdev(dev);
199
200 return rxe_link_layer(rxe, port_num);
201 }
202
rxe_alloc_ucontext(struct ib_device * dev,struct ib_udata * udata)203 static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
204 struct ib_udata *udata)
205 {
206 struct rxe_dev *rxe = to_rdev(dev);
207 struct rxe_ucontext *uc;
208
209 uc = rxe_alloc(&rxe->uc_pool);
210 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
211 }
212
rxe_dealloc_ucontext(struct ib_ucontext * ibuc)213 static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
214 {
215 struct rxe_ucontext *uc = to_ruc(ibuc);
216
217 rxe_drop_ref(uc);
218 return 0;
219 }
220
rxe_port_immutable(struct ib_device * dev,u8 port_num,struct ib_port_immutable * immutable)221 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
222 struct ib_port_immutable *immutable)
223 {
224 int err;
225 struct ib_port_attr attr;
226
227 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
228
229 err = ib_query_port(dev, port_num, &attr);
230 if (err)
231 return err;
232
233 immutable->pkey_tbl_len = attr.pkey_tbl_len;
234 immutable->gid_tbl_len = attr.gid_tbl_len;
235 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
236
237 return 0;
238 }
239
rxe_alloc_pd(struct ib_device * dev,struct ib_ucontext * context,struct ib_udata * udata)240 static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
241 struct ib_ucontext *context,
242 struct ib_udata *udata)
243 {
244 struct rxe_dev *rxe = to_rdev(dev);
245 struct rxe_pd *pd;
246
247 pd = rxe_alloc(&rxe->pd_pool);
248 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
249 }
250
rxe_dealloc_pd(struct ib_pd * ibpd)251 static int rxe_dealloc_pd(struct ib_pd *ibpd)
252 {
253 struct rxe_pd *pd = to_rpd(ibpd);
254
255 rxe_drop_ref(pd);
256 return 0;
257 }
258
rxe_init_av(struct rxe_dev * rxe,struct rdma_ah_attr * attr,struct rxe_av * av)259 static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
260 struct rxe_av *av)
261 {
262 int err;
263 union ib_gid sgid;
264 struct ib_gid_attr sgid_attr;
265
266 err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr),
267 rdma_ah_read_grh(attr)->sgid_index, &sgid,
268 &sgid_attr);
269 if (err) {
270 pr_err("Failed to query sgid. err = %d\n", err);
271 return err;
272 }
273
274 err = rxe_av_from_attr(rxe, rdma_ah_get_port_num(attr), av, attr);
275 if (!err)
276 err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
277
278 if (sgid_attr.ndev)
279 dev_put(sgid_attr.ndev);
280 return err;
281 }
282
rxe_create_ah(struct ib_pd * ibpd,struct rdma_ah_attr * attr,struct ib_udata * udata)283 static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
284 struct rdma_ah_attr *attr,
285 struct ib_udata *udata)
286
287 {
288 int err;
289 struct rxe_dev *rxe = to_rdev(ibpd->device);
290 struct rxe_pd *pd = to_rpd(ibpd);
291 struct rxe_ah *ah;
292
293 err = rxe_av_chk_attr(rxe, attr);
294 if (err)
295 goto err1;
296
297 ah = rxe_alloc(&rxe->ah_pool);
298 if (!ah) {
299 err = -ENOMEM;
300 goto err1;
301 }
302
303 rxe_add_ref(pd);
304 ah->pd = pd;
305
306 err = rxe_init_av(rxe, attr, &ah->av);
307 if (err)
308 goto err2;
309
310 return &ah->ibah;
311
312 err2:
313 rxe_drop_ref(pd);
314 rxe_drop_ref(ah);
315 err1:
316 return ERR_PTR(err);
317 }
318
rxe_modify_ah(struct ib_ah * ibah,struct rdma_ah_attr * attr)319 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
320 {
321 int err;
322 struct rxe_dev *rxe = to_rdev(ibah->device);
323 struct rxe_ah *ah = to_rah(ibah);
324
325 err = rxe_av_chk_attr(rxe, attr);
326 if (err)
327 return err;
328
329 err = rxe_init_av(rxe, attr, &ah->av);
330 if (err)
331 return err;
332
333 return 0;
334 }
335
rxe_query_ah(struct ib_ah * ibah,struct rdma_ah_attr * attr)336 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
337 {
338 struct rxe_dev *rxe = to_rdev(ibah->device);
339 struct rxe_ah *ah = to_rah(ibah);
340
341 memset(attr, 0, sizeof(*attr));
342 attr->type = ibah->type;
343 rxe_av_to_attr(rxe, &ah->av, attr);
344 return 0;
345 }
346
rxe_destroy_ah(struct ib_ah * ibah)347 static int rxe_destroy_ah(struct ib_ah *ibah)
348 {
349 struct rxe_ah *ah = to_rah(ibah);
350
351 rxe_drop_ref(ah->pd);
352 rxe_drop_ref(ah);
353 return 0;
354 }
355
post_one_recv(struct rxe_rq * rq,struct ib_recv_wr * ibwr)356 static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
357 {
358 int err;
359 int i;
360 u32 length;
361 struct rxe_recv_wqe *recv_wqe;
362 int num_sge = ibwr->num_sge;
363
364 if (unlikely(queue_full(rq->queue))) {
365 err = -ENOMEM;
366 goto err1;
367 }
368
369 if (unlikely(num_sge > rq->max_sge)) {
370 err = -EINVAL;
371 goto err1;
372 }
373
374 length = 0;
375 for (i = 0; i < num_sge; i++)
376 length += ibwr->sg_list[i].length;
377
378 recv_wqe = producer_addr(rq->queue);
379 recv_wqe->wr_id = ibwr->wr_id;
380 recv_wqe->num_sge = num_sge;
381
382 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
383 num_sge * sizeof(struct ib_sge));
384
385 recv_wqe->dma.length = length;
386 recv_wqe->dma.resid = length;
387 recv_wqe->dma.num_sge = num_sge;
388 recv_wqe->dma.cur_sge = 0;
389 recv_wqe->dma.sge_offset = 0;
390
391 /* make sure all changes to the work queue are written before we
392 * update the producer pointer
393 */
394 smp_wmb();
395
396 advance_producer(rq->queue);
397 return 0;
398
399 err1:
400 return err;
401 }
402
rxe_create_srq(struct ib_pd * ibpd,struct ib_srq_init_attr * init,struct ib_udata * udata)403 static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
404 struct ib_srq_init_attr *init,
405 struct ib_udata *udata)
406 {
407 int err;
408 struct rxe_dev *rxe = to_rdev(ibpd->device);
409 struct rxe_pd *pd = to_rpd(ibpd);
410 struct rxe_srq *srq;
411 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
412
413 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
414 if (err)
415 goto err1;
416
417 srq = rxe_alloc(&rxe->srq_pool);
418 if (!srq) {
419 err = -ENOMEM;
420 goto err1;
421 }
422
423 rxe_add_index(srq);
424 rxe_add_ref(pd);
425 srq->pd = pd;
426
427 err = rxe_srq_from_init(rxe, srq, init, context, udata);
428 if (err)
429 goto err2;
430
431 return &srq->ibsrq;
432
433 err2:
434 rxe_drop_ref(pd);
435 rxe_drop_index(srq);
436 rxe_drop_ref(srq);
437 err1:
438 return ERR_PTR(err);
439 }
440
rxe_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask mask,struct ib_udata * udata)441 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
442 enum ib_srq_attr_mask mask,
443 struct ib_udata *udata)
444 {
445 int err;
446 struct rxe_srq *srq = to_rsrq(ibsrq);
447 struct rxe_dev *rxe = to_rdev(ibsrq->device);
448
449 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
450 if (err)
451 goto err1;
452
453 err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
454 if (err)
455 goto err1;
456
457 return 0;
458
459 err1:
460 return err;
461 }
462
rxe_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr)463 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
464 {
465 struct rxe_srq *srq = to_rsrq(ibsrq);
466
467 if (srq->error)
468 return -EINVAL;
469
470 attr->max_wr = srq->rq.queue->buf->index_mask;
471 attr->max_sge = srq->rq.max_sge;
472 attr->srq_limit = srq->limit;
473 return 0;
474 }
475
rxe_destroy_srq(struct ib_srq * ibsrq)476 static int rxe_destroy_srq(struct ib_srq *ibsrq)
477 {
478 struct rxe_srq *srq = to_rsrq(ibsrq);
479
480 if (srq->rq.queue)
481 rxe_queue_cleanup(srq->rq.queue);
482
483 rxe_drop_ref(srq->pd);
484 rxe_drop_index(srq);
485 rxe_drop_ref(srq);
486
487 return 0;
488 }
489
rxe_post_srq_recv(struct ib_srq * ibsrq,struct ib_recv_wr * wr,struct ib_recv_wr ** bad_wr)490 static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
491 struct ib_recv_wr **bad_wr)
492 {
493 int err = 0;
494 unsigned long flags;
495 struct rxe_srq *srq = to_rsrq(ibsrq);
496
497 spin_lock_irqsave(&srq->rq.producer_lock, flags);
498
499 while (wr) {
500 err = post_one_recv(&srq->rq, wr);
501 if (unlikely(err))
502 break;
503 wr = wr->next;
504 }
505
506 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
507
508 if (err)
509 *bad_wr = wr;
510
511 return err;
512 }
513
rxe_create_qp(struct ib_pd * ibpd,struct ib_qp_init_attr * init,struct ib_udata * udata)514 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
515 struct ib_qp_init_attr *init,
516 struct ib_udata *udata)
517 {
518 int err;
519 struct rxe_dev *rxe = to_rdev(ibpd->device);
520 struct rxe_pd *pd = to_rpd(ibpd);
521 struct rxe_qp *qp;
522
523 err = rxe_qp_chk_init(rxe, init);
524 if (err)
525 goto err1;
526
527 qp = rxe_alloc(&rxe->qp_pool);
528 if (!qp) {
529 err = -ENOMEM;
530 goto err1;
531 }
532
533 if (udata) {
534 if (udata->inlen) {
535 err = -EINVAL;
536 goto err2;
537 }
538 qp->is_user = 1;
539 }
540
541 rxe_add_index(qp);
542
543 err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
544 if (err)
545 goto err3;
546
547 return &qp->ibqp;
548
549 err3:
550 rxe_drop_index(qp);
551 err2:
552 rxe_drop_ref(qp);
553 err1:
554 return ERR_PTR(err);
555 }
556
rxe_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int mask,struct ib_udata * udata)557 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
558 int mask, struct ib_udata *udata)
559 {
560 int err;
561 struct rxe_dev *rxe = to_rdev(ibqp->device);
562 struct rxe_qp *qp = to_rqp(ibqp);
563
564 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
565 if (err)
566 goto err1;
567
568 err = rxe_qp_from_attr(qp, attr, mask, udata);
569 if (err)
570 goto err1;
571
572 return 0;
573
574 err1:
575 return err;
576 }
577
rxe_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int mask,struct ib_qp_init_attr * init)578 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
579 int mask, struct ib_qp_init_attr *init)
580 {
581 struct rxe_qp *qp = to_rqp(ibqp);
582
583 rxe_qp_to_init(qp, init);
584 rxe_qp_to_attr(qp, attr, mask);
585
586 return 0;
587 }
588
rxe_destroy_qp(struct ib_qp * ibqp)589 static int rxe_destroy_qp(struct ib_qp *ibqp)
590 {
591 struct rxe_qp *qp = to_rqp(ibqp);
592
593 rxe_qp_destroy(qp);
594 rxe_drop_index(qp);
595 rxe_drop_ref(qp);
596 return 0;
597 }
598
validate_send_wr(struct rxe_qp * qp,struct ib_send_wr * ibwr,unsigned int mask,unsigned int length)599 static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
600 unsigned int mask, unsigned int length)
601 {
602 int num_sge = ibwr->num_sge;
603 struct rxe_sq *sq = &qp->sq;
604
605 if (unlikely(num_sge > sq->max_sge))
606 goto err1;
607
608 if (unlikely(mask & WR_ATOMIC_MASK)) {
609 if (length < 8)
610 goto err1;
611
612 if (atomic_wr(ibwr)->remote_addr & 0x7)
613 goto err1;
614 }
615
616 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
617 (length > sq->max_inline)))
618 goto err1;
619
620 return 0;
621
622 err1:
623 return -EINVAL;
624 }
625
init_send_wr(struct rxe_qp * qp,struct rxe_send_wr * wr,struct ib_send_wr * ibwr)626 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
627 struct ib_send_wr *ibwr)
628 {
629 wr->wr_id = ibwr->wr_id;
630 wr->num_sge = ibwr->num_sge;
631 wr->opcode = ibwr->opcode;
632 wr->send_flags = ibwr->send_flags;
633
634 if (qp_type(qp) == IB_QPT_UD ||
635 qp_type(qp) == IB_QPT_SMI ||
636 qp_type(qp) == IB_QPT_GSI) {
637 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
638 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
639 if (qp_type(qp) == IB_QPT_GSI)
640 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
641 if (wr->opcode == IB_WR_SEND_WITH_IMM)
642 wr->ex.imm_data = ibwr->ex.imm_data;
643 } else {
644 switch (wr->opcode) {
645 case IB_WR_RDMA_WRITE_WITH_IMM:
646 wr->ex.imm_data = ibwr->ex.imm_data;
647 case IB_WR_RDMA_READ:
648 case IB_WR_RDMA_WRITE:
649 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
650 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
651 break;
652 case IB_WR_SEND_WITH_IMM:
653 wr->ex.imm_data = ibwr->ex.imm_data;
654 break;
655 case IB_WR_SEND_WITH_INV:
656 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
657 break;
658 case IB_WR_ATOMIC_CMP_AND_SWP:
659 case IB_WR_ATOMIC_FETCH_AND_ADD:
660 wr->wr.atomic.remote_addr =
661 atomic_wr(ibwr)->remote_addr;
662 wr->wr.atomic.compare_add =
663 atomic_wr(ibwr)->compare_add;
664 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
665 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
666 break;
667 case IB_WR_LOCAL_INV:
668 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
669 break;
670 case IB_WR_REG_MR:
671 wr->wr.reg.mr = reg_wr(ibwr)->mr;
672 wr->wr.reg.key = reg_wr(ibwr)->key;
673 wr->wr.reg.access = reg_wr(ibwr)->access;
674 break;
675 default:
676 break;
677 }
678 }
679 }
680
init_send_wqe(struct rxe_qp * qp,struct ib_send_wr * ibwr,unsigned int mask,unsigned int length,struct rxe_send_wqe * wqe)681 static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
682 unsigned int mask, unsigned int length,
683 struct rxe_send_wqe *wqe)
684 {
685 int num_sge = ibwr->num_sge;
686 struct ib_sge *sge;
687 int i;
688 u8 *p;
689
690 init_send_wr(qp, &wqe->wr, ibwr);
691
692 if (qp_type(qp) == IB_QPT_UD ||
693 qp_type(qp) == IB_QPT_SMI ||
694 qp_type(qp) == IB_QPT_GSI)
695 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
696
697 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
698 p = wqe->dma.inline_data;
699
700 sge = ibwr->sg_list;
701 for (i = 0; i < num_sge; i++, sge++) {
702 memcpy(p, (void *)(uintptr_t)sge->addr,
703 sge->length);
704
705 p += sge->length;
706 }
707 } else if (mask & WR_REG_MASK) {
708 wqe->mask = mask;
709 wqe->state = wqe_state_posted;
710 return 0;
711 } else
712 memcpy(wqe->dma.sge, ibwr->sg_list,
713 num_sge * sizeof(struct ib_sge));
714
715 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
716 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
717 wqe->mask = mask;
718 wqe->dma.length = length;
719 wqe->dma.resid = length;
720 wqe->dma.num_sge = num_sge;
721 wqe->dma.cur_sge = 0;
722 wqe->dma.sge_offset = 0;
723 wqe->state = wqe_state_posted;
724 wqe->ssn = atomic_add_return(1, &qp->ssn);
725
726 return 0;
727 }
728
post_one_send(struct rxe_qp * qp,struct ib_send_wr * ibwr,unsigned int mask,u32 length)729 static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
730 unsigned int mask, u32 length)
731 {
732 int err;
733 struct rxe_sq *sq = &qp->sq;
734 struct rxe_send_wqe *send_wqe;
735 unsigned long flags;
736
737 err = validate_send_wr(qp, ibwr, mask, length);
738 if (err)
739 return err;
740
741 spin_lock_irqsave(&qp->sq.sq_lock, flags);
742
743 if (unlikely(queue_full(sq->queue))) {
744 err = -ENOMEM;
745 goto err1;
746 }
747
748 send_wqe = producer_addr(sq->queue);
749
750 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
751 if (unlikely(err))
752 goto err1;
753
754 /*
755 * make sure all changes to the work queue are
756 * written before we update the producer pointer
757 */
758 smp_wmb();
759
760 advance_producer(sq->queue);
761 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
762
763 return 0;
764
765 err1:
766 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
767 return err;
768 }
769
rxe_post_send_kernel(struct rxe_qp * qp,struct ib_send_wr * wr,struct ib_send_wr ** bad_wr)770 static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
771 struct ib_send_wr **bad_wr)
772 {
773 int err = 0;
774 unsigned int mask;
775 unsigned int length = 0;
776 int i;
777 int must_sched;
778
779 while (wr) {
780 mask = wr_opcode_mask(wr->opcode, qp);
781 if (unlikely(!mask)) {
782 err = -EINVAL;
783 *bad_wr = wr;
784 break;
785 }
786
787 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
788 !(mask & WR_INLINE_MASK))) {
789 err = -EINVAL;
790 *bad_wr = wr;
791 break;
792 }
793
794 length = 0;
795 for (i = 0; i < wr->num_sge; i++)
796 length += wr->sg_list[i].length;
797
798 err = post_one_send(qp, wr, mask, length);
799
800 if (err) {
801 *bad_wr = wr;
802 break;
803 }
804 wr = wr->next;
805 }
806
807 /*
808 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
809 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
810 */
811 must_sched = (qp_type(qp) == IB_QPT_GSI) ||
812 (queue_count(qp->sq.queue) > 1);
813
814 rxe_run_task(&qp->req.task, must_sched);
815 if (unlikely(qp->req.state == QP_STATE_ERROR))
816 rxe_run_task(&qp->comp.task, 1);
817
818 return err;
819 }
820
rxe_post_send(struct ib_qp * ibqp,struct ib_send_wr * wr,struct ib_send_wr ** bad_wr)821 static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
822 struct ib_send_wr **bad_wr)
823 {
824 struct rxe_qp *qp = to_rqp(ibqp);
825
826 if (unlikely(!qp->valid)) {
827 *bad_wr = wr;
828 return -EINVAL;
829 }
830
831 if (unlikely(qp->req.state < QP_STATE_READY)) {
832 *bad_wr = wr;
833 return -EINVAL;
834 }
835
836 if (qp->is_user) {
837 /* Utilize process context to do protocol processing */
838 rxe_run_task(&qp->req.task, 0);
839 return 0;
840 } else
841 return rxe_post_send_kernel(qp, wr, bad_wr);
842 }
843
rxe_post_recv(struct ib_qp * ibqp,struct ib_recv_wr * wr,struct ib_recv_wr ** bad_wr)844 static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
845 struct ib_recv_wr **bad_wr)
846 {
847 int err = 0;
848 struct rxe_qp *qp = to_rqp(ibqp);
849 struct rxe_rq *rq = &qp->rq;
850 unsigned long flags;
851
852 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
853 *bad_wr = wr;
854 err = -EINVAL;
855 goto err1;
856 }
857
858 if (unlikely(qp->srq)) {
859 *bad_wr = wr;
860 err = -EINVAL;
861 goto err1;
862 }
863
864 spin_lock_irqsave(&rq->producer_lock, flags);
865
866 while (wr) {
867 err = post_one_recv(rq, wr);
868 if (unlikely(err)) {
869 *bad_wr = wr;
870 break;
871 }
872 wr = wr->next;
873 }
874
875 spin_unlock_irqrestore(&rq->producer_lock, flags);
876
877 if (qp->resp.state == QP_STATE_ERROR)
878 rxe_run_task(&qp->resp.task, 1);
879
880 err1:
881 return err;
882 }
883
rxe_create_cq(struct ib_device * dev,const struct ib_cq_init_attr * attr,struct ib_ucontext * context,struct ib_udata * udata)884 static struct ib_cq *rxe_create_cq(struct ib_device *dev,
885 const struct ib_cq_init_attr *attr,
886 struct ib_ucontext *context,
887 struct ib_udata *udata)
888 {
889 int err;
890 struct rxe_dev *rxe = to_rdev(dev);
891 struct rxe_cq *cq;
892
893 if (attr->flags)
894 return ERR_PTR(-EINVAL);
895
896 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
897 if (err)
898 goto err1;
899
900 cq = rxe_alloc(&rxe->cq_pool);
901 if (!cq) {
902 err = -ENOMEM;
903 goto err1;
904 }
905
906 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
907 context, udata);
908 if (err)
909 goto err2;
910
911 return &cq->ibcq;
912
913 err2:
914 rxe_drop_ref(cq);
915 err1:
916 return ERR_PTR(err);
917 }
918
rxe_destroy_cq(struct ib_cq * ibcq)919 static int rxe_destroy_cq(struct ib_cq *ibcq)
920 {
921 struct rxe_cq *cq = to_rcq(ibcq);
922
923 rxe_cq_disable(cq);
924
925 rxe_drop_ref(cq);
926 return 0;
927 }
928
rxe_resize_cq(struct ib_cq * ibcq,int cqe,struct ib_udata * udata)929 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
930 {
931 int err;
932 struct rxe_cq *cq = to_rcq(ibcq);
933 struct rxe_dev *rxe = to_rdev(ibcq->device);
934
935 err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
936 if (err)
937 goto err1;
938
939 err = rxe_cq_resize_queue(cq, cqe, udata);
940 if (err)
941 goto err1;
942
943 return 0;
944
945 err1:
946 return err;
947 }
948
rxe_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)949 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
950 {
951 int i;
952 struct rxe_cq *cq = to_rcq(ibcq);
953 struct rxe_cqe *cqe;
954 unsigned long flags;
955
956 spin_lock_irqsave(&cq->cq_lock, flags);
957 for (i = 0; i < num_entries; i++) {
958 cqe = queue_head(cq->queue);
959 if (!cqe)
960 break;
961
962 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
963 advance_consumer(cq->queue);
964 }
965 spin_unlock_irqrestore(&cq->cq_lock, flags);
966
967 return i;
968 }
969
rxe_peek_cq(struct ib_cq * ibcq,int wc_cnt)970 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
971 {
972 struct rxe_cq *cq = to_rcq(ibcq);
973 int count = queue_count(cq->queue);
974
975 return (count > wc_cnt) ? wc_cnt : count;
976 }
977
rxe_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)978 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
979 {
980 struct rxe_cq *cq = to_rcq(ibcq);
981 unsigned long irq_flags;
982 int ret = 0;
983
984 spin_lock_irqsave(&cq->cq_lock, irq_flags);
985 if (cq->notify != IB_CQ_NEXT_COMP)
986 cq->notify = flags & IB_CQ_SOLICITED_MASK;
987
988 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
989 ret = 1;
990
991 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
992
993 return ret;
994 }
995
rxe_get_dma_mr(struct ib_pd * ibpd,int access)996 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
997 {
998 struct rxe_dev *rxe = to_rdev(ibpd->device);
999 struct rxe_pd *pd = to_rpd(ibpd);
1000 struct rxe_mem *mr;
1001 int err;
1002
1003 mr = rxe_alloc(&rxe->mr_pool);
1004 if (!mr) {
1005 err = -ENOMEM;
1006 goto err1;
1007 }
1008
1009 rxe_add_index(mr);
1010
1011 rxe_add_ref(pd);
1012
1013 err = rxe_mem_init_dma(rxe, pd, access, mr);
1014 if (err)
1015 goto err2;
1016
1017 return &mr->ibmr;
1018
1019 err2:
1020 rxe_drop_ref(pd);
1021 rxe_drop_index(mr);
1022 rxe_drop_ref(mr);
1023 err1:
1024 return ERR_PTR(err);
1025 }
1026
rxe_reg_user_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 iova,int access,struct ib_udata * udata)1027 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1028 u64 start,
1029 u64 length,
1030 u64 iova,
1031 int access, struct ib_udata *udata)
1032 {
1033 int err;
1034 struct rxe_dev *rxe = to_rdev(ibpd->device);
1035 struct rxe_pd *pd = to_rpd(ibpd);
1036 struct rxe_mem *mr;
1037
1038 mr = rxe_alloc(&rxe->mr_pool);
1039 if (!mr) {
1040 err = -ENOMEM;
1041 goto err2;
1042 }
1043
1044 rxe_add_index(mr);
1045
1046 rxe_add_ref(pd);
1047
1048 err = rxe_mem_init_user(rxe, pd, start, length, iova,
1049 access, udata, mr);
1050 if (err)
1051 goto err3;
1052
1053 return &mr->ibmr;
1054
1055 err3:
1056 rxe_drop_ref(pd);
1057 rxe_drop_index(mr);
1058 rxe_drop_ref(mr);
1059 err2:
1060 return ERR_PTR(err);
1061 }
1062
rxe_dereg_mr(struct ib_mr * ibmr)1063 static int rxe_dereg_mr(struct ib_mr *ibmr)
1064 {
1065 struct rxe_mem *mr = to_rmr(ibmr);
1066
1067 mr->state = RXE_MEM_STATE_ZOMBIE;
1068 rxe_drop_ref(mr->pd);
1069 rxe_drop_index(mr);
1070 rxe_drop_ref(mr);
1071 return 0;
1072 }
1073
rxe_alloc_mr(struct ib_pd * ibpd,enum ib_mr_type mr_type,u32 max_num_sg)1074 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1075 enum ib_mr_type mr_type,
1076 u32 max_num_sg)
1077 {
1078 struct rxe_dev *rxe = to_rdev(ibpd->device);
1079 struct rxe_pd *pd = to_rpd(ibpd);
1080 struct rxe_mem *mr;
1081 int err;
1082
1083 if (mr_type != IB_MR_TYPE_MEM_REG)
1084 return ERR_PTR(-EINVAL);
1085
1086 mr = rxe_alloc(&rxe->mr_pool);
1087 if (!mr) {
1088 err = -ENOMEM;
1089 goto err1;
1090 }
1091
1092 rxe_add_index(mr);
1093
1094 rxe_add_ref(pd);
1095
1096 err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
1097 if (err)
1098 goto err2;
1099
1100 return &mr->ibmr;
1101
1102 err2:
1103 rxe_drop_ref(pd);
1104 rxe_drop_index(mr);
1105 rxe_drop_ref(mr);
1106 err1:
1107 return ERR_PTR(err);
1108 }
1109
rxe_set_page(struct ib_mr * ibmr,u64 addr)1110 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1111 {
1112 struct rxe_mem *mr = to_rmr(ibmr);
1113 struct rxe_map *map;
1114 struct rxe_phys_buf *buf;
1115
1116 if (unlikely(mr->nbuf == mr->num_buf))
1117 return -ENOMEM;
1118
1119 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1120 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1121
1122 buf->addr = addr;
1123 buf->size = ibmr->page_size;
1124 mr->nbuf++;
1125
1126 return 0;
1127 }
1128
rxe_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)1129 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1130 int sg_nents, unsigned int *sg_offset)
1131 {
1132 struct rxe_mem *mr = to_rmr(ibmr);
1133 int n;
1134
1135 mr->nbuf = 0;
1136
1137 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1138
1139 mr->va = ibmr->iova;
1140 mr->iova = ibmr->iova;
1141 mr->length = ibmr->length;
1142 mr->page_shift = ilog2(ibmr->page_size);
1143 mr->page_mask = ibmr->page_size - 1;
1144 mr->offset = mr->iova & mr->page_mask;
1145
1146 return n;
1147 }
1148
rxe_attach_mcast(struct ib_qp * ibqp,union ib_gid * mgid,u16 mlid)1149 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1150 {
1151 int err;
1152 struct rxe_dev *rxe = to_rdev(ibqp->device);
1153 struct rxe_qp *qp = to_rqp(ibqp);
1154 struct rxe_mc_grp *grp;
1155
1156 /* takes a ref on grp if successful */
1157 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1158 if (err)
1159 return err;
1160
1161 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1162
1163 rxe_drop_ref(grp);
1164 return err;
1165 }
1166
rxe_detach_mcast(struct ib_qp * ibqp,union ib_gid * mgid,u16 mlid)1167 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1168 {
1169 struct rxe_dev *rxe = to_rdev(ibqp->device);
1170 struct rxe_qp *qp = to_rqp(ibqp);
1171
1172 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1173 }
1174
parent_show(struct device * device,struct device_attribute * attr,char * buf)1175 static ssize_t parent_show(struct device *device,
1176 struct device_attribute *attr, char *buf)
1177 {
1178 struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1179 ib_dev.dev);
1180
1181 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
1182 }
1183
1184 static DEVICE_ATTR_RO(parent);
1185
1186 static struct device_attribute *rxe_dev_attributes[] = {
1187 &dev_attr_parent,
1188 };
1189
rxe_register_device(struct rxe_dev * rxe)1190 int rxe_register_device(struct rxe_dev *rxe)
1191 {
1192 int err;
1193 int i;
1194 struct ib_device *dev = &rxe->ib_dev;
1195
1196 strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1197 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1198
1199 dev->owner = THIS_MODULE;
1200 dev->node_type = RDMA_NODE_IB_CA;
1201 dev->phys_port_cnt = 1;
1202 dev->num_comp_vectors = num_possible_cpus();
1203 dev->dev.parent = rxe_dma_device(rxe);
1204 dev->local_dma_lkey = 0;
1205 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1206 rxe->ndev->dev_addr);
1207 dev->dev.dma_ops = &dma_virt_ops;
1208 dma_coerce_mask_and_coherent(&dev->dev,
1209 dma_get_required_mask(&dev->dev));
1210
1211 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1212 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1213 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1214 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1215 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1216 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1217 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1218 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1219 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1220 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1221 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1222 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1223 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1224 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1225 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1226 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1227 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1228 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1229 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1230 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1231 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1232 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1233 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1234 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1235 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1236 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1237 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1238 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1239 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1240 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1241 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1242 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1243 ;
1244
1245 dev->query_device = rxe_query_device;
1246 dev->modify_device = rxe_modify_device;
1247 dev->query_port = rxe_query_port;
1248 dev->modify_port = rxe_modify_port;
1249 dev->get_link_layer = rxe_get_link_layer;
1250 dev->query_gid = rxe_query_gid;
1251 dev->get_netdev = rxe_get_netdev;
1252 dev->add_gid = rxe_add_gid;
1253 dev->del_gid = rxe_del_gid;
1254 dev->query_pkey = rxe_query_pkey;
1255 dev->alloc_ucontext = rxe_alloc_ucontext;
1256 dev->dealloc_ucontext = rxe_dealloc_ucontext;
1257 dev->mmap = rxe_mmap;
1258 dev->get_port_immutable = rxe_port_immutable;
1259 dev->alloc_pd = rxe_alloc_pd;
1260 dev->dealloc_pd = rxe_dealloc_pd;
1261 dev->create_ah = rxe_create_ah;
1262 dev->modify_ah = rxe_modify_ah;
1263 dev->query_ah = rxe_query_ah;
1264 dev->destroy_ah = rxe_destroy_ah;
1265 dev->create_srq = rxe_create_srq;
1266 dev->modify_srq = rxe_modify_srq;
1267 dev->query_srq = rxe_query_srq;
1268 dev->destroy_srq = rxe_destroy_srq;
1269 dev->post_srq_recv = rxe_post_srq_recv;
1270 dev->create_qp = rxe_create_qp;
1271 dev->modify_qp = rxe_modify_qp;
1272 dev->query_qp = rxe_query_qp;
1273 dev->destroy_qp = rxe_destroy_qp;
1274 dev->post_send = rxe_post_send;
1275 dev->post_recv = rxe_post_recv;
1276 dev->create_cq = rxe_create_cq;
1277 dev->destroy_cq = rxe_destroy_cq;
1278 dev->resize_cq = rxe_resize_cq;
1279 dev->poll_cq = rxe_poll_cq;
1280 dev->peek_cq = rxe_peek_cq;
1281 dev->req_notify_cq = rxe_req_notify_cq;
1282 dev->get_dma_mr = rxe_get_dma_mr;
1283 dev->reg_user_mr = rxe_reg_user_mr;
1284 dev->dereg_mr = rxe_dereg_mr;
1285 dev->alloc_mr = rxe_alloc_mr;
1286 dev->map_mr_sg = rxe_map_mr_sg;
1287 dev->attach_mcast = rxe_attach_mcast;
1288 dev->detach_mcast = rxe_detach_mcast;
1289 dev->get_hw_stats = rxe_ib_get_hw_stats;
1290 dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
1291
1292 rxe->tfm = crypto_alloc_shash("crc32", 0, 0);
1293 if (IS_ERR(rxe->tfm)) {
1294 pr_err("failed to allocate crc algorithm err:%ld\n",
1295 PTR_ERR(rxe->tfm));
1296 return PTR_ERR(rxe->tfm);
1297 }
1298
1299 err = ib_register_device(dev, NULL);
1300 if (err) {
1301 pr_warn("%s failed with error %d\n", __func__, err);
1302 goto err1;
1303 }
1304
1305 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1306 err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1307 if (err) {
1308 pr_warn("%s failed with error %d for attr number %d\n",
1309 __func__, err, i);
1310 goto err2;
1311 }
1312 }
1313
1314 return 0;
1315
1316 err2:
1317 ib_unregister_device(dev);
1318 err1:
1319 crypto_free_shash(rxe->tfm);
1320
1321 return err;
1322 }
1323
rxe_unregister_device(struct rxe_dev * rxe)1324 int rxe_unregister_device(struct rxe_dev *rxe)
1325 {
1326 int i;
1327 struct ib_device *dev = &rxe->ib_dev;
1328
1329 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1330 device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1331
1332 ib_unregister_device(dev);
1333
1334 return 0;
1335 }
1336