1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include <linux/dma-mapping.h>
8 #include <net/addrconf.h>
9 #include <rdma/uverbs_ioctl.h>
10
11 #include "rxe.h"
12 #include "rxe_queue.h"
13 #include "rxe_hw_counters.h"
14
15 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr);
16
17 /* dev */
rxe_query_device(struct ib_device * ibdev,struct ib_device_attr * attr,struct ib_udata * udata)18 static int rxe_query_device(struct ib_device *ibdev,
19 struct ib_device_attr *attr,
20 struct ib_udata *udata)
21 {
22 struct rxe_dev *rxe = to_rdev(ibdev);
23 int err;
24
25 if (udata->inlen || udata->outlen) {
26 rxe_dbg_dev(rxe, "malformed udata\n");
27 err = -EINVAL;
28 goto err_out;
29 }
30
31 memcpy(attr, &rxe->attr, sizeof(*attr));
32
33 return 0;
34
35 err_out:
36 rxe_err_dev(rxe, "returned err = %d\n", err);
37 return err;
38 }
39
rxe_query_port(struct ib_device * ibdev,u32 port_num,struct ib_port_attr * attr)40 static int rxe_query_port(struct ib_device *ibdev,
41 u32 port_num, struct ib_port_attr *attr)
42 {
43 struct rxe_dev *rxe = to_rdev(ibdev);
44 struct net_device *ndev;
45 int err, ret;
46
47 if (port_num != 1) {
48 err = -EINVAL;
49 rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
50 goto err_out;
51 }
52
53 ndev = rxe_ib_device_get_netdev(ibdev);
54 if (!ndev) {
55 err = -ENODEV;
56 goto err_out;
57 }
58
59 memcpy(attr, &rxe->port.attr, sizeof(*attr));
60
61 mutex_lock(&rxe->usdev_lock);
62 ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed,
63 &attr->active_width);
64
65 if (attr->state == IB_PORT_ACTIVE)
66 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
67 else if (dev_get_flags(ndev) & IFF_UP)
68 attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
69 else
70 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
71
72 mutex_unlock(&rxe->usdev_lock);
73
74 dev_put(ndev);
75 return ret;
76
77 err_out:
78 rxe_err_dev(rxe, "returned err = %d\n", err);
79 return err;
80 }
81
rxe_query_pkey(struct ib_device * ibdev,u32 port_num,u16 index,u16 * pkey)82 static int rxe_query_pkey(struct ib_device *ibdev,
83 u32 port_num, u16 index, u16 *pkey)
84 {
85 struct rxe_dev *rxe = to_rdev(ibdev);
86 int err;
87
88 if (index != 0) {
89 err = -EINVAL;
90 rxe_dbg_dev(rxe, "bad pkey index = %d\n", index);
91 goto err_out;
92 }
93
94 *pkey = IB_DEFAULT_PKEY_FULL;
95 return 0;
96
97 err_out:
98 rxe_err_dev(rxe, "returned err = %d\n", err);
99 return err;
100 }
101
rxe_modify_device(struct ib_device * ibdev,int mask,struct ib_device_modify * attr)102 static int rxe_modify_device(struct ib_device *ibdev,
103 int mask, struct ib_device_modify *attr)
104 {
105 struct rxe_dev *rxe = to_rdev(ibdev);
106 int err;
107
108 if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
109 IB_DEVICE_MODIFY_NODE_DESC)) {
110 err = -EOPNOTSUPP;
111 rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
112 goto err_out;
113 }
114
115 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
116 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
117
118 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
119 memcpy(rxe->ib_dev.node_desc,
120 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
121 }
122
123 return 0;
124
125 err_out:
126 rxe_err_dev(rxe, "returned err = %d\n", err);
127 return err;
128 }
129
rxe_modify_port(struct ib_device * ibdev,u32 port_num,int mask,struct ib_port_modify * attr)130 static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
131 int mask, struct ib_port_modify *attr)
132 {
133 struct rxe_dev *rxe = to_rdev(ibdev);
134 struct rxe_port *port;
135 int err;
136
137 if (port_num != 1) {
138 err = -EINVAL;
139 rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
140 goto err_out;
141 }
142
143 //TODO is shutdown useful
144 if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) {
145 err = -EOPNOTSUPP;
146 rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
147 goto err_out;
148 }
149
150 port = &rxe->port;
151 port->attr.port_cap_flags |= attr->set_port_cap_mask;
152 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
153
154 if (mask & IB_PORT_RESET_QKEY_CNTR)
155 port->attr.qkey_viol_cntr = 0;
156
157 return 0;
158
159 err_out:
160 rxe_err_dev(rxe, "returned err = %d\n", err);
161 return err;
162 }
163
rxe_get_link_layer(struct ib_device * ibdev,u32 port_num)164 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev,
165 u32 port_num)
166 {
167 struct rxe_dev *rxe = to_rdev(ibdev);
168 int err;
169
170 if (port_num != 1) {
171 err = -EINVAL;
172 rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
173 goto err_out;
174 }
175
176 return IB_LINK_LAYER_ETHERNET;
177
178 err_out:
179 rxe_err_dev(rxe, "returned err = %d\n", err);
180 return err;
181 }
182
rxe_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)183 static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
184 struct ib_port_immutable *immutable)
185 {
186 struct rxe_dev *rxe = to_rdev(ibdev);
187 struct ib_port_attr attr = {};
188 int err;
189
190 if (port_num != 1) {
191 err = -EINVAL;
192 rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
193 goto err_out;
194 }
195
196 err = ib_query_port(ibdev, port_num, &attr);
197 if (err)
198 goto err_out;
199
200 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
201 immutable->pkey_tbl_len = attr.pkey_tbl_len;
202 immutable->gid_tbl_len = attr.gid_tbl_len;
203 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
204
205 return 0;
206
207 err_out:
208 rxe_err_dev(rxe, "returned err = %d\n", err);
209 return err;
210 }
211
212 /* uc */
rxe_alloc_ucontext(struct ib_ucontext * ibuc,struct ib_udata * udata)213 static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
214 {
215 struct rxe_dev *rxe = to_rdev(ibuc->device);
216 struct rxe_ucontext *uc = to_ruc(ibuc);
217 int err;
218
219 err = rxe_add_to_pool(&rxe->uc_pool, uc);
220 if (err)
221 rxe_err_dev(rxe, "unable to create uc\n");
222
223 return err;
224 }
225
rxe_dealloc_ucontext(struct ib_ucontext * ibuc)226 static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
227 {
228 struct rxe_ucontext *uc = to_ruc(ibuc);
229 int err;
230
231 err = rxe_cleanup(uc);
232 if (err)
233 rxe_err_uc(uc, "cleanup failed, err = %d\n", err);
234 }
235
236 /* pd */
rxe_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)237 static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
238 {
239 struct rxe_dev *rxe = to_rdev(ibpd->device);
240 struct rxe_pd *pd = to_rpd(ibpd);
241 int err;
242
243 err = rxe_add_to_pool(&rxe->pd_pool, pd);
244 if (err) {
245 rxe_dbg_dev(rxe, "unable to alloc pd\n");
246 goto err_out;
247 }
248
249 return 0;
250
251 err_out:
252 rxe_err_dev(rxe, "returned err = %d\n", err);
253 return err;
254 }
255
rxe_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)256 static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
257 {
258 struct rxe_pd *pd = to_rpd(ibpd);
259 int err;
260
261 err = rxe_cleanup(pd);
262 if (err)
263 rxe_err_pd(pd, "cleanup failed, err = %d\n", err);
264
265 return 0;
266 }
267
268 /* ah */
rxe_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)269 static int rxe_create_ah(struct ib_ah *ibah,
270 struct rdma_ah_init_attr *init_attr,
271 struct ib_udata *udata)
272 {
273 struct rxe_dev *rxe = to_rdev(ibah->device);
274 struct rxe_ah *ah = to_rah(ibah);
275 struct rxe_create_ah_resp __user *uresp = NULL;
276 int err, cleanup_err;
277
278 if (udata) {
279 /* test if new user provider */
280 if (udata->outlen >= sizeof(*uresp))
281 uresp = udata->outbuf;
282 ah->is_user = true;
283 } else {
284 ah->is_user = false;
285 }
286
287 err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
288 init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
289 if (err) {
290 rxe_dbg_dev(rxe, "unable to create ah\n");
291 goto err_out;
292 }
293
294 /* create index > 0 */
295 ah->ah_num = ah->elem.index;
296
297 err = rxe_ah_chk_attr(ah, init_attr->ah_attr);
298 if (err) {
299 rxe_dbg_ah(ah, "bad attr\n");
300 goto err_cleanup;
301 }
302
303 if (uresp) {
304 /* only if new user provider */
305 err = copy_to_user(&uresp->ah_num, &ah->ah_num,
306 sizeof(uresp->ah_num));
307 if (err) {
308 err = -EFAULT;
309 rxe_dbg_ah(ah, "unable to copy to user\n");
310 goto err_cleanup;
311 }
312 } else if (ah->is_user) {
313 /* only if old user provider */
314 ah->ah_num = 0;
315 }
316
317 rxe_init_av(init_attr->ah_attr, &ah->av);
318 rxe_finalize(ah);
319
320 return 0;
321
322 err_cleanup:
323 cleanup_err = rxe_cleanup(ah);
324 if (cleanup_err)
325 rxe_err_ah(ah, "cleanup failed, err = %d\n", cleanup_err);
326 err_out:
327 rxe_err_ah(ah, "returned err = %d\n", err);
328 return err;
329 }
330
rxe_modify_ah(struct ib_ah * ibah,struct rdma_ah_attr * attr)331 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
332 {
333 struct rxe_ah *ah = to_rah(ibah);
334 int err;
335
336 err = rxe_ah_chk_attr(ah, attr);
337 if (err) {
338 rxe_dbg_ah(ah, "bad attr\n");
339 goto err_out;
340 }
341
342 rxe_init_av(attr, &ah->av);
343
344 return 0;
345
346 err_out:
347 rxe_err_ah(ah, "returned err = %d\n", err);
348 return err;
349 }
350
rxe_query_ah(struct ib_ah * ibah,struct rdma_ah_attr * attr)351 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
352 {
353 struct rxe_ah *ah = to_rah(ibah);
354
355 memset(attr, 0, sizeof(*attr));
356 attr->type = ibah->type;
357 rxe_av_to_attr(&ah->av, attr);
358
359 return 0;
360 }
361
rxe_destroy_ah(struct ib_ah * ibah,u32 flags)362 static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
363 {
364 struct rxe_ah *ah = to_rah(ibah);
365 int err;
366
367 err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
368 if (err)
369 rxe_err_ah(ah, "cleanup failed, err = %d\n", err);
370
371 return 0;
372 }
373
374 /* srq */
rxe_create_srq(struct ib_srq * ibsrq,struct ib_srq_init_attr * init,struct ib_udata * udata)375 static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
376 struct ib_udata *udata)
377 {
378 struct rxe_dev *rxe = to_rdev(ibsrq->device);
379 struct rxe_pd *pd = to_rpd(ibsrq->pd);
380 struct rxe_srq *srq = to_rsrq(ibsrq);
381 struct rxe_create_srq_resp __user *uresp = NULL;
382 int err, cleanup_err;
383
384 if (udata) {
385 if (udata->outlen < sizeof(*uresp)) {
386 err = -EINVAL;
387 rxe_err_dev(rxe, "malformed udata\n");
388 goto err_out;
389 }
390 uresp = udata->outbuf;
391 }
392
393 if (init->srq_type != IB_SRQT_BASIC) {
394 err = -EOPNOTSUPP;
395 rxe_dbg_dev(rxe, "srq type = %d, not supported\n",
396 init->srq_type);
397 goto err_out;
398 }
399
400 err = rxe_srq_chk_init(rxe, init);
401 if (err) {
402 rxe_dbg_dev(rxe, "invalid init attributes\n");
403 goto err_out;
404 }
405
406 err = rxe_add_to_pool(&rxe->srq_pool, srq);
407 if (err) {
408 rxe_dbg_dev(rxe, "unable to create srq, err = %d\n", err);
409 goto err_out;
410 }
411
412 rxe_get(pd);
413 srq->pd = pd;
414
415 err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
416 if (err) {
417 rxe_dbg_srq(srq, "create srq failed, err = %d\n", err);
418 goto err_cleanup;
419 }
420
421 return 0;
422
423 err_cleanup:
424 cleanup_err = rxe_cleanup(srq);
425 if (cleanup_err)
426 rxe_err_srq(srq, "cleanup failed, err = %d\n", cleanup_err);
427 err_out:
428 rxe_err_dev(rxe, "returned err = %d\n", err);
429 return err;
430 }
431
rxe_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask mask,struct ib_udata * udata)432 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
433 enum ib_srq_attr_mask mask,
434 struct ib_udata *udata)
435 {
436 struct rxe_srq *srq = to_rsrq(ibsrq);
437 struct rxe_dev *rxe = to_rdev(ibsrq->device);
438 struct rxe_modify_srq_cmd cmd = {};
439 int err;
440
441 if (udata) {
442 if (udata->inlen < sizeof(cmd)) {
443 err = -EINVAL;
444 rxe_dbg_srq(srq, "malformed udata\n");
445 goto err_out;
446 }
447
448 err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
449 if (err) {
450 err = -EFAULT;
451 rxe_dbg_srq(srq, "unable to read udata\n");
452 goto err_out;
453 }
454 }
455
456 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
457 if (err) {
458 rxe_dbg_srq(srq, "bad init attributes\n");
459 goto err_out;
460 }
461
462 err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata);
463 if (err) {
464 rxe_dbg_srq(srq, "bad attr\n");
465 goto err_out;
466 }
467
468 return 0;
469
470 err_out:
471 rxe_err_srq(srq, "returned err = %d\n", err);
472 return err;
473 }
474
rxe_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr)475 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
476 {
477 struct rxe_srq *srq = to_rsrq(ibsrq);
478 int err;
479
480 if (srq->error) {
481 err = -EINVAL;
482 rxe_dbg_srq(srq, "srq in error state\n");
483 goto err_out;
484 }
485
486 attr->max_wr = srq->rq.queue->buf->index_mask;
487 attr->max_sge = srq->rq.max_sge;
488 attr->srq_limit = srq->limit;
489 return 0;
490
491 err_out:
492 rxe_err_srq(srq, "returned err = %d\n", err);
493 return err;
494 }
495
rxe_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)496 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
497 const struct ib_recv_wr **bad_wr)
498 {
499 int err = 0;
500 struct rxe_srq *srq = to_rsrq(ibsrq);
501 unsigned long flags;
502
503 spin_lock_irqsave(&srq->rq.producer_lock, flags);
504
505 while (wr) {
506 err = post_one_recv(&srq->rq, wr);
507 if (unlikely(err))
508 break;
509 wr = wr->next;
510 }
511
512 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
513
514 if (err) {
515 *bad_wr = wr;
516 rxe_err_srq(srq, "returned err = %d\n", err);
517 }
518
519 return err;
520 }
521
rxe_destroy_srq(struct ib_srq * ibsrq,struct ib_udata * udata)522 static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
523 {
524 struct rxe_srq *srq = to_rsrq(ibsrq);
525 int err;
526
527 err = rxe_cleanup(srq);
528 if (err)
529 rxe_err_srq(srq, "cleanup failed, err = %d\n", err);
530
531 return 0;
532 }
533
534 /* qp */
rxe_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * init,struct ib_udata * udata)535 static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
536 struct ib_udata *udata)
537 {
538 struct rxe_dev *rxe = to_rdev(ibqp->device);
539 struct rxe_pd *pd = to_rpd(ibqp->pd);
540 struct rxe_qp *qp = to_rqp(ibqp);
541 struct rxe_create_qp_resp __user *uresp = NULL;
542 int err, cleanup_err;
543
544 if (udata) {
545 if (udata->inlen) {
546 err = -EINVAL;
547 rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
548 goto err_out;
549 }
550
551 if (udata->outlen < sizeof(*uresp)) {
552 err = -EINVAL;
553 rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
554 goto err_out;
555 }
556
557 qp->is_user = true;
558 uresp = udata->outbuf;
559 } else {
560 qp->is_user = false;
561 }
562
563 if (init->create_flags) {
564 err = -EOPNOTSUPP;
565 rxe_dbg_dev(rxe, "unsupported create_flags, err = %d\n", err);
566 goto err_out;
567 }
568
569 err = rxe_qp_chk_init(rxe, init);
570 if (err) {
571 rxe_dbg_dev(rxe, "bad init attr, err = %d\n", err);
572 goto err_out;
573 }
574
575 err = rxe_add_to_pool(&rxe->qp_pool, qp);
576 if (err) {
577 rxe_dbg_dev(rxe, "unable to create qp, err = %d\n", err);
578 goto err_out;
579 }
580
581 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
582 if (err) {
583 rxe_dbg_qp(qp, "create qp failed, err = %d\n", err);
584 goto err_cleanup;
585 }
586
587 rxe_finalize(qp);
588 return 0;
589
590 err_cleanup:
591 cleanup_err = rxe_cleanup(qp);
592 if (cleanup_err)
593 rxe_err_qp(qp, "cleanup failed, err = %d\n", cleanup_err);
594 err_out:
595 rxe_err_dev(rxe, "returned err = %d\n", err);
596 return err;
597 }
598
rxe_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int mask,struct ib_udata * udata)599 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
600 int mask, struct ib_udata *udata)
601 {
602 struct rxe_dev *rxe = to_rdev(ibqp->device);
603 struct rxe_qp *qp = to_rqp(ibqp);
604 int err;
605
606 if (mask & ~IB_QP_ATTR_STANDARD_BITS) {
607 err = -EOPNOTSUPP;
608 rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d\n",
609 mask, err);
610 goto err_out;
611 }
612
613 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
614 if (err) {
615 rxe_dbg_qp(qp, "bad mask/attr, err = %d\n", err);
616 goto err_out;
617 }
618
619 err = rxe_qp_from_attr(qp, attr, mask, udata);
620 if (err) {
621 rxe_dbg_qp(qp, "modify qp failed, err = %d\n", err);
622 goto err_out;
623 }
624
625 if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
626 qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
627 qp->ibqp.qp_num,
628 qp->attr.dest_qp_num);
629
630 return 0;
631
632 err_out:
633 rxe_err_qp(qp, "returned err = %d\n", err);
634 return err;
635 }
636
rxe_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int mask,struct ib_qp_init_attr * init)637 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
638 int mask, struct ib_qp_init_attr *init)
639 {
640 struct rxe_qp *qp = to_rqp(ibqp);
641
642 rxe_qp_to_init(qp, init);
643 rxe_qp_to_attr(qp, attr, mask);
644
645 return 0;
646 }
647
rxe_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)648 static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
649 {
650 struct rxe_qp *qp = to_rqp(ibqp);
651 int err;
652
653 err = rxe_qp_chk_destroy(qp);
654 if (err) {
655 rxe_dbg_qp(qp, "unable to destroy qp, err = %d\n", err);
656 goto err_out;
657 }
658
659 err = rxe_cleanup(qp);
660 if (err)
661 rxe_err_qp(qp, "cleanup failed, err = %d\n", err);
662
663 return 0;
664
665 err_out:
666 rxe_err_qp(qp, "returned err = %d\n", err);
667 return err;
668 }
669
670 /* send wr */
671
672 /* sanity check incoming send work request */
validate_send_wr(struct rxe_qp * qp,const struct ib_send_wr * ibwr,unsigned int * maskp,unsigned int * lengthp)673 static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
674 unsigned int *maskp, unsigned int *lengthp)
675 {
676 int num_sge = ibwr->num_sge;
677 struct rxe_sq *sq = &qp->sq;
678 unsigned int mask = 0;
679 unsigned long length = 0;
680 int err = -EINVAL;
681 int i;
682
683 do {
684 mask = wr_opcode_mask(ibwr->opcode, qp);
685 if (!mask) {
686 rxe_err_qp(qp, "bad wr opcode for qp type\n");
687 break;
688 }
689
690 if (num_sge > sq->max_sge) {
691 rxe_err_qp(qp, "num_sge > max_sge\n");
692 break;
693 }
694
695 length = 0;
696 for (i = 0; i < ibwr->num_sge; i++)
697 length += ibwr->sg_list[i].length;
698
699 if (length > RXE_PORT_MAX_MSG_SZ) {
700 rxe_err_qp(qp, "message length too long\n");
701 break;
702 }
703
704 if (mask & WR_ATOMIC_MASK) {
705 if (length != 8) {
706 rxe_err_qp(qp, "atomic length != 8\n");
707 break;
708 }
709 if (atomic_wr(ibwr)->remote_addr & 0x7) {
710 rxe_err_qp(qp, "misaligned atomic address\n");
711 break;
712 }
713 }
714 if (ibwr->send_flags & IB_SEND_INLINE) {
715 if (!(mask & WR_INLINE_MASK)) {
716 rxe_err_qp(qp, "opcode doesn't support inline data\n");
717 break;
718 }
719 if (length > sq->max_inline) {
720 rxe_err_qp(qp, "inline length too big\n");
721 break;
722 }
723 }
724
725 err = 0;
726 } while (0);
727
728 *maskp = mask;
729 *lengthp = (int)length;
730
731 return err;
732 }
733
init_send_wr(struct rxe_qp * qp,struct rxe_send_wr * wr,const struct ib_send_wr * ibwr)734 static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
735 const struct ib_send_wr *ibwr)
736 {
737 wr->wr_id = ibwr->wr_id;
738 wr->opcode = ibwr->opcode;
739 wr->send_flags = ibwr->send_flags;
740
741 if (qp_type(qp) == IB_QPT_UD ||
742 qp_type(qp) == IB_QPT_GSI) {
743 struct ib_ah *ibah = ud_wr(ibwr)->ah;
744
745 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
746 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
747 wr->wr.ud.ah_num = to_rah(ibah)->ah_num;
748 if (qp_type(qp) == IB_QPT_GSI)
749 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
750
751 switch (wr->opcode) {
752 case IB_WR_SEND_WITH_IMM:
753 wr->ex.imm_data = ibwr->ex.imm_data;
754 break;
755 case IB_WR_SEND:
756 break;
757 default:
758 rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP\n",
759 wr->opcode);
760 return -EINVAL;
761 }
762 } else {
763 switch (wr->opcode) {
764 case IB_WR_RDMA_WRITE_WITH_IMM:
765 wr->ex.imm_data = ibwr->ex.imm_data;
766 fallthrough;
767 case IB_WR_RDMA_READ:
768 case IB_WR_RDMA_WRITE:
769 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
770 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
771 break;
772 case IB_WR_SEND_WITH_IMM:
773 wr->ex.imm_data = ibwr->ex.imm_data;
774 break;
775 case IB_WR_SEND_WITH_INV:
776 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
777 break;
778 case IB_WR_RDMA_READ_WITH_INV:
779 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
780 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
781 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
782 break;
783 case IB_WR_ATOMIC_CMP_AND_SWP:
784 case IB_WR_ATOMIC_FETCH_AND_ADD:
785 wr->wr.atomic.remote_addr =
786 atomic_wr(ibwr)->remote_addr;
787 wr->wr.atomic.compare_add =
788 atomic_wr(ibwr)->compare_add;
789 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
790 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
791 break;
792 case IB_WR_LOCAL_INV:
793 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
794 break;
795 case IB_WR_REG_MR:
796 wr->wr.reg.mr = reg_wr(ibwr)->mr;
797 wr->wr.reg.key = reg_wr(ibwr)->key;
798 wr->wr.reg.access = reg_wr(ibwr)->access;
799 break;
800 case IB_WR_SEND:
801 case IB_WR_BIND_MW:
802 case IB_WR_FLUSH:
803 case IB_WR_ATOMIC_WRITE:
804 break;
805 default:
806 rxe_err_qp(qp, "unsupported wr opcode %d\n",
807 wr->opcode);
808 return -EINVAL;
809 }
810 }
811
812 return 0;
813 }
814
copy_inline_data_to_wqe(struct rxe_send_wqe * wqe,const struct ib_send_wr * ibwr)815 static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
816 const struct ib_send_wr *ibwr)
817 {
818 struct ib_sge *sge = ibwr->sg_list;
819 u8 *p = wqe->dma.inline_data;
820 int i;
821
822 for (i = 0; i < ibwr->num_sge; i++, sge++) {
823 memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length);
824 p += sge->length;
825 }
826 }
827
init_send_wqe(struct rxe_qp * qp,const struct ib_send_wr * ibwr,unsigned int mask,unsigned int length,struct rxe_send_wqe * wqe)828 static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
829 unsigned int mask, unsigned int length,
830 struct rxe_send_wqe *wqe)
831 {
832 int num_sge = ibwr->num_sge;
833 int err;
834
835 err = init_send_wr(qp, &wqe->wr, ibwr);
836 if (err)
837 return err;
838
839 /* local operation */
840 if (unlikely(mask & WR_LOCAL_OP_MASK)) {
841 wqe->mask = mask;
842 wqe->state = wqe_state_posted;
843 return 0;
844 }
845
846 if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
847 copy_inline_data_to_wqe(wqe, ibwr);
848 else
849 memcpy(wqe->dma.sge, ibwr->sg_list,
850 num_sge * sizeof(struct ib_sge));
851
852 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
853 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
854 wqe->mask = mask;
855 wqe->dma.length = length;
856 wqe->dma.resid = length;
857 wqe->dma.num_sge = num_sge;
858 wqe->dma.cur_sge = 0;
859 wqe->dma.sge_offset = 0;
860 wqe->state = wqe_state_posted;
861 wqe->ssn = atomic_add_return(1, &qp->ssn);
862
863 return 0;
864 }
865
post_one_send(struct rxe_qp * qp,const struct ib_send_wr * ibwr)866 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr)
867 {
868 int err;
869 struct rxe_sq *sq = &qp->sq;
870 struct rxe_send_wqe *send_wqe;
871 unsigned int mask;
872 unsigned int length;
873 int full;
874
875 err = validate_send_wr(qp, ibwr, &mask, &length);
876 if (err)
877 return err;
878
879 full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
880 if (unlikely(full)) {
881 rxe_err_qp(qp, "send queue full\n");
882 return -ENOMEM;
883 }
884
885 send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_FROM_ULP);
886 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
887 if (!err)
888 queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP);
889
890 return err;
891 }
892
rxe_post_send_kernel(struct rxe_qp * qp,const struct ib_send_wr * ibwr,const struct ib_send_wr ** bad_wr)893 static int rxe_post_send_kernel(struct rxe_qp *qp,
894 const struct ib_send_wr *ibwr,
895 const struct ib_send_wr **bad_wr)
896 {
897 int err = 0;
898 unsigned long flags;
899 int good = 0;
900
901 spin_lock_irqsave(&qp->sq.sq_lock, flags);
902 while (ibwr) {
903 err = post_one_send(qp, ibwr);
904 if (err) {
905 *bad_wr = ibwr;
906 break;
907 } else {
908 good++;
909 }
910 ibwr = ibwr->next;
911 }
912 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
913
914 /* kickoff processing of any posted wqes */
915 if (good)
916 rxe_sched_task(&qp->send_task);
917
918 return err;
919 }
920
rxe_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)921 static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
922 const struct ib_send_wr **bad_wr)
923 {
924 struct rxe_qp *qp = to_rqp(ibqp);
925 int err;
926 unsigned long flags;
927
928 spin_lock_irqsave(&qp->state_lock, flags);
929 /* caller has already called destroy_qp */
930 if (WARN_ON_ONCE(!qp->valid)) {
931 spin_unlock_irqrestore(&qp->state_lock, flags);
932 rxe_err_qp(qp, "qp has been destroyed\n");
933 return -EINVAL;
934 }
935
936 if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
937 spin_unlock_irqrestore(&qp->state_lock, flags);
938 *bad_wr = wr;
939 rxe_err_qp(qp, "qp not ready to send\n");
940 return -EINVAL;
941 }
942 spin_unlock_irqrestore(&qp->state_lock, flags);
943
944 if (qp->is_user) {
945 /* Utilize process context to do protocol processing */
946 rxe_sched_task(&qp->send_task);
947 } else {
948 err = rxe_post_send_kernel(qp, wr, bad_wr);
949 if (err)
950 return err;
951 }
952
953 return 0;
954 }
955
956 /* recv wr */
post_one_recv(struct rxe_rq * rq,const struct ib_recv_wr * ibwr)957 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
958 {
959 int i;
960 unsigned long length;
961 struct rxe_recv_wqe *recv_wqe;
962 int num_sge = ibwr->num_sge;
963 int full;
964 int err;
965
966 full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
967 if (unlikely(full)) {
968 err = -ENOMEM;
969 rxe_dbg("queue full\n");
970 goto err_out;
971 }
972
973 if (unlikely(num_sge > rq->max_sge)) {
974 err = -EINVAL;
975 rxe_dbg("bad num_sge > max_sge\n");
976 goto err_out;
977 }
978
979 length = 0;
980 for (i = 0; i < num_sge; i++)
981 length += ibwr->sg_list[i].length;
982
983 if (length > RXE_PORT_MAX_MSG_SZ) {
984 err = -EINVAL;
985 rxe_dbg("message length too long\n");
986 goto err_out;
987 }
988
989 recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
990
991 recv_wqe->wr_id = ibwr->wr_id;
992 recv_wqe->dma.length = length;
993 recv_wqe->dma.resid = length;
994 recv_wqe->dma.num_sge = num_sge;
995 recv_wqe->dma.cur_sge = 0;
996 recv_wqe->dma.sge_offset = 0;
997 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
998 num_sge * sizeof(struct ib_sge));
999
1000 queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
1001
1002 return 0;
1003
1004 err_out:
1005 rxe_dbg("returned err = %d\n", err);
1006 return err;
1007 }
1008
rxe_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1009 static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1010 const struct ib_recv_wr **bad_wr)
1011 {
1012 int err = 0;
1013 struct rxe_qp *qp = to_rqp(ibqp);
1014 struct rxe_rq *rq = &qp->rq;
1015 unsigned long flags;
1016
1017 spin_lock_irqsave(&qp->state_lock, flags);
1018 /* caller has already called destroy_qp */
1019 if (WARN_ON_ONCE(!qp->valid)) {
1020 spin_unlock_irqrestore(&qp->state_lock, flags);
1021 rxe_err_qp(qp, "qp has been destroyed\n");
1022 return -EINVAL;
1023 }
1024
1025 /* see C10-97.2.1 */
1026 if (unlikely((qp_state(qp) < IB_QPS_INIT))) {
1027 spin_unlock_irqrestore(&qp->state_lock, flags);
1028 *bad_wr = wr;
1029 rxe_dbg_qp(qp, "qp not ready to post recv\n");
1030 return -EINVAL;
1031 }
1032 spin_unlock_irqrestore(&qp->state_lock, flags);
1033
1034 if (unlikely(qp->srq)) {
1035 *bad_wr = wr;
1036 rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead\n");
1037 return -EINVAL;
1038 }
1039
1040 spin_lock_irqsave(&rq->producer_lock, flags);
1041
1042 while (wr) {
1043 err = post_one_recv(rq, wr);
1044 if (unlikely(err)) {
1045 *bad_wr = wr;
1046 break;
1047 }
1048 wr = wr->next;
1049 }
1050
1051 spin_unlock_irqrestore(&rq->producer_lock, flags);
1052
1053 spin_lock_irqsave(&qp->state_lock, flags);
1054 if (qp_state(qp) == IB_QPS_ERR)
1055 rxe_sched_task(&qp->recv_task);
1056 spin_unlock_irqrestore(&qp->state_lock, flags);
1057
1058 return err;
1059 }
1060
1061 /* cq */
rxe_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)1062 static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1063 struct uverbs_attr_bundle *attrs)
1064 {
1065 struct ib_udata *udata = &attrs->driver_udata;
1066 struct ib_device *dev = ibcq->device;
1067 struct rxe_dev *rxe = to_rdev(dev);
1068 struct rxe_cq *cq = to_rcq(ibcq);
1069 struct rxe_create_cq_resp __user *uresp = NULL;
1070 int err, cleanup_err;
1071
1072 if (udata) {
1073 if (udata->outlen < sizeof(*uresp)) {
1074 err = -EINVAL;
1075 rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
1076 goto err_out;
1077 }
1078 uresp = udata->outbuf;
1079 }
1080
1081 if (attr->flags) {
1082 err = -EOPNOTSUPP;
1083 rxe_dbg_dev(rxe, "bad attr->flags, err = %d\n", err);
1084 goto err_out;
1085 }
1086
1087 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
1088 if (err) {
1089 rxe_dbg_dev(rxe, "bad init attributes, err = %d\n", err);
1090 goto err_out;
1091 }
1092
1093 err = rxe_add_to_pool(&rxe->cq_pool, cq);
1094 if (err) {
1095 rxe_dbg_dev(rxe, "unable to create cq, err = %d\n", err);
1096 goto err_out;
1097 }
1098
1099 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
1100 uresp);
1101 if (err) {
1102 rxe_dbg_cq(cq, "create cq failed, err = %d\n", err);
1103 goto err_cleanup;
1104 }
1105
1106 return 0;
1107
1108 err_cleanup:
1109 cleanup_err = rxe_cleanup(cq);
1110 if (cleanup_err)
1111 rxe_err_cq(cq, "cleanup failed, err = %d\n", cleanup_err);
1112 err_out:
1113 rxe_err_dev(rxe, "returned err = %d\n", err);
1114 return err;
1115 }
1116
rxe_resize_cq(struct ib_cq * ibcq,int cqe,struct ib_udata * udata)1117 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
1118 {
1119 struct rxe_cq *cq = to_rcq(ibcq);
1120 struct rxe_dev *rxe = to_rdev(ibcq->device);
1121 struct rxe_resize_cq_resp __user *uresp = NULL;
1122 int err;
1123
1124 if (udata) {
1125 if (udata->outlen < sizeof(*uresp)) {
1126 err = -EINVAL;
1127 rxe_dbg_cq(cq, "malformed udata\n");
1128 goto err_out;
1129 }
1130 uresp = udata->outbuf;
1131 }
1132
1133 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
1134 if (err) {
1135 rxe_dbg_cq(cq, "bad attr, err = %d\n", err);
1136 goto err_out;
1137 }
1138
1139 err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
1140 if (err) {
1141 rxe_dbg_cq(cq, "resize cq failed, err = %d\n", err);
1142 goto err_out;
1143 }
1144
1145 return 0;
1146
1147 err_out:
1148 rxe_err_cq(cq, "returned err = %d\n", err);
1149 return err;
1150 }
1151
rxe_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)1152 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
1153 {
1154 int i;
1155 struct rxe_cq *cq = to_rcq(ibcq);
1156 struct rxe_cqe *cqe;
1157 unsigned long flags;
1158
1159 spin_lock_irqsave(&cq->cq_lock, flags);
1160 for (i = 0; i < num_entries; i++) {
1161 cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP);
1162 if (!cqe)
1163 break; /* queue empty */
1164
1165 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
1166 queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP);
1167 }
1168 spin_unlock_irqrestore(&cq->cq_lock, flags);
1169
1170 return i;
1171 }
1172
rxe_peek_cq(struct ib_cq * ibcq,int wc_cnt)1173 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
1174 {
1175 struct rxe_cq *cq = to_rcq(ibcq);
1176 int count;
1177
1178 count = queue_count(cq->queue, QUEUE_TYPE_TO_ULP);
1179
1180 return (count > wc_cnt) ? wc_cnt : count;
1181 }
1182
rxe_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)1183 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1184 {
1185 struct rxe_cq *cq = to_rcq(ibcq);
1186 int ret = 0;
1187 int empty;
1188 unsigned long irq_flags;
1189
1190 spin_lock_irqsave(&cq->cq_lock, irq_flags);
1191 cq->notify |= flags & IB_CQ_SOLICITED_MASK;
1192 empty = queue_empty(cq->queue, QUEUE_TYPE_TO_ULP);
1193
1194 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
1195 ret = 1;
1196
1197 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
1198
1199 return ret;
1200 }
1201
rxe_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)1202 static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1203 {
1204 struct rxe_cq *cq = to_rcq(ibcq);
1205 int err;
1206
1207 /* See IBA C11-17: The CI shall return an error if this Verb is
1208 * invoked while a Work Queue is still associated with the CQ.
1209 */
1210 if (atomic_read(&cq->num_wq)) {
1211 err = -EINVAL;
1212 rxe_dbg_cq(cq, "still in use\n");
1213 goto err_out;
1214 }
1215
1216 err = rxe_cleanup(cq);
1217 if (err)
1218 rxe_err_cq(cq, "cleanup failed, err = %d\n", err);
1219
1220 return 0;
1221
1222 err_out:
1223 rxe_err_cq(cq, "returned err = %d\n", err);
1224 return err;
1225 }
1226
1227 /* mr */
rxe_get_dma_mr(struct ib_pd * ibpd,int access)1228 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
1229 {
1230 struct rxe_dev *rxe = to_rdev(ibpd->device);
1231 struct rxe_pd *pd = to_rpd(ibpd);
1232 struct rxe_mr *mr;
1233 int err;
1234
1235 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1236 if (!mr)
1237 return ERR_PTR(-ENOMEM);
1238
1239 err = rxe_add_to_pool(&rxe->mr_pool, mr);
1240 if (err) {
1241 rxe_dbg_dev(rxe, "unable to create mr\n");
1242 goto err_free;
1243 }
1244
1245 rxe_get(pd);
1246 mr->ibmr.pd = ibpd;
1247 mr->ibmr.device = ibpd->device;
1248
1249 rxe_mr_init_dma(access, mr);
1250 rxe_finalize(mr);
1251 return &mr->ibmr;
1252
1253 err_free:
1254 kfree(mr);
1255 rxe_err_pd(pd, "returned err = %d\n", err);
1256 return ERR_PTR(err);
1257 }
1258
rxe_reg_user_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 iova,int access,struct ib_udata * udata)1259 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
1260 u64 length, u64 iova, int access,
1261 struct ib_udata *udata)
1262 {
1263 struct rxe_dev *rxe = to_rdev(ibpd->device);
1264 struct rxe_pd *pd = to_rpd(ibpd);
1265 struct rxe_mr *mr;
1266 int err, cleanup_err;
1267
1268 if (access & ~RXE_ACCESS_SUPPORTED_MR) {
1269 rxe_err_pd(pd, "access = %#x not supported (%#x)\n", access,
1270 RXE_ACCESS_SUPPORTED_MR);
1271 return ERR_PTR(-EOPNOTSUPP);
1272 }
1273
1274 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1275 if (!mr)
1276 return ERR_PTR(-ENOMEM);
1277
1278 err = rxe_add_to_pool(&rxe->mr_pool, mr);
1279 if (err) {
1280 rxe_dbg_pd(pd, "unable to create mr\n");
1281 goto err_free;
1282 }
1283
1284 rxe_get(pd);
1285 mr->ibmr.pd = ibpd;
1286 mr->ibmr.device = ibpd->device;
1287
1288 err = rxe_mr_init_user(rxe, start, length, access, mr);
1289 if (err) {
1290 rxe_dbg_mr(mr, "reg_user_mr failed, err = %d\n", err);
1291 goto err_cleanup;
1292 }
1293
1294 rxe_finalize(mr);
1295 return &mr->ibmr;
1296
1297 err_cleanup:
1298 cleanup_err = rxe_cleanup(mr);
1299 if (cleanup_err)
1300 rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
1301 err_free:
1302 kfree(mr);
1303 rxe_err_pd(pd, "returned err = %d\n", err);
1304 return ERR_PTR(err);
1305 }
1306
rxe_rereg_user_mr(struct ib_mr * ibmr,int flags,u64 start,u64 length,u64 iova,int access,struct ib_pd * ibpd,struct ib_udata * udata)1307 static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags,
1308 u64 start, u64 length, u64 iova,
1309 int access, struct ib_pd *ibpd,
1310 struct ib_udata *udata)
1311 {
1312 struct rxe_mr *mr = to_rmr(ibmr);
1313 struct rxe_pd *old_pd = to_rpd(ibmr->pd);
1314 struct rxe_pd *pd = to_rpd(ibpd);
1315
1316 /* for now only support the two easy cases:
1317 * rereg_pd and rereg_access
1318 */
1319 if (flags & ~RXE_MR_REREG_SUPPORTED) {
1320 rxe_err_mr(mr, "flags = %#x not supported\n", flags);
1321 return ERR_PTR(-EOPNOTSUPP);
1322 }
1323
1324 if (flags & IB_MR_REREG_PD) {
1325 rxe_put(old_pd);
1326 rxe_get(pd);
1327 mr->ibmr.pd = ibpd;
1328 }
1329
1330 if (flags & IB_MR_REREG_ACCESS) {
1331 if (access & ~RXE_ACCESS_SUPPORTED_MR) {
1332 rxe_err_mr(mr, "access = %#x not supported\n", access);
1333 return ERR_PTR(-EOPNOTSUPP);
1334 }
1335 mr->access = access;
1336 }
1337
1338 return NULL;
1339 }
1340
rxe_alloc_mr(struct ib_pd * ibpd,enum ib_mr_type mr_type,u32 max_num_sg)1341 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
1342 u32 max_num_sg)
1343 {
1344 struct rxe_dev *rxe = to_rdev(ibpd->device);
1345 struct rxe_pd *pd = to_rpd(ibpd);
1346 struct rxe_mr *mr;
1347 int err, cleanup_err;
1348
1349 if (mr_type != IB_MR_TYPE_MEM_REG) {
1350 err = -EINVAL;
1351 rxe_dbg_pd(pd, "mr type %d not supported, err = %d\n",
1352 mr_type, err);
1353 goto err_out;
1354 }
1355
1356 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1357 if (!mr)
1358 return ERR_PTR(-ENOMEM);
1359
1360 err = rxe_add_to_pool(&rxe->mr_pool, mr);
1361 if (err)
1362 goto err_free;
1363
1364 rxe_get(pd);
1365 mr->ibmr.pd = ibpd;
1366 mr->ibmr.device = ibpd->device;
1367
1368 err = rxe_mr_init_fast(max_num_sg, mr);
1369 if (err) {
1370 rxe_dbg_mr(mr, "alloc_mr failed, err = %d\n", err);
1371 goto err_cleanup;
1372 }
1373
1374 rxe_finalize(mr);
1375 return &mr->ibmr;
1376
1377 err_cleanup:
1378 cleanup_err = rxe_cleanup(mr);
1379 if (cleanup_err)
1380 rxe_err_mr(mr, "cleanup failed, err = %d\n", err);
1381 err_free:
1382 kfree(mr);
1383 err_out:
1384 rxe_err_pd(pd, "returned err = %d\n", err);
1385 return ERR_PTR(err);
1386 }
1387
rxe_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1388 static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1389 {
1390 struct rxe_mr *mr = to_rmr(ibmr);
1391 int err, cleanup_err;
1392
1393 /* See IBA 10.6.7.2.6 */
1394 if (atomic_read(&mr->num_mw) > 0) {
1395 err = -EINVAL;
1396 rxe_dbg_mr(mr, "mr has mw's bound\n");
1397 goto err_out;
1398 }
1399
1400 cleanup_err = rxe_cleanup(mr);
1401 if (cleanup_err)
1402 rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
1403
1404 kfree_rcu_mightsleep(mr);
1405 return 0;
1406
1407 err_out:
1408 rxe_err_mr(mr, "returned err = %d\n", err);
1409 return err;
1410 }
1411
parent_show(struct device * device,struct device_attribute * attr,char * buf)1412 static ssize_t parent_show(struct device *device,
1413 struct device_attribute *attr, char *buf)
1414 {
1415 struct rxe_dev *rxe =
1416 rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
1417
1418 return sysfs_emit(buf, "%s\n", rxe_parent_name(rxe, 1));
1419 }
1420
1421 static DEVICE_ATTR_RO(parent);
1422
1423 static struct attribute *rxe_dev_attributes[] = {
1424 &dev_attr_parent.attr,
1425 NULL
1426 };
1427
1428 static const struct attribute_group rxe_attr_group = {
1429 .attrs = rxe_dev_attributes,
1430 };
1431
rxe_enable_driver(struct ib_device * ib_dev)1432 static int rxe_enable_driver(struct ib_device *ib_dev)
1433 {
1434 struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
1435 struct net_device *ndev;
1436
1437 ndev = rxe_ib_device_get_netdev(ib_dev);
1438 if (!ndev)
1439 return -ENODEV;
1440
1441 rxe_set_port_state(rxe);
1442 dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(ndev));
1443
1444 dev_put(ndev);
1445 return 0;
1446 }
1447
1448 static const struct ib_device_ops rxe_dev_ops = {
1449 .owner = THIS_MODULE,
1450 .driver_id = RDMA_DRIVER_RXE,
1451 .uverbs_abi_ver = RXE_UVERBS_ABI_VERSION,
1452
1453 .alloc_hw_port_stats = rxe_ib_alloc_hw_port_stats,
1454 .alloc_mr = rxe_alloc_mr,
1455 .alloc_mw = rxe_alloc_mw,
1456 .alloc_pd = rxe_alloc_pd,
1457 .alloc_ucontext = rxe_alloc_ucontext,
1458 .attach_mcast = rxe_attach_mcast,
1459 .create_ah = rxe_create_ah,
1460 .create_cq = rxe_create_cq,
1461 .create_qp = rxe_create_qp,
1462 .create_srq = rxe_create_srq,
1463 .create_user_ah = rxe_create_ah,
1464 .dealloc_driver = rxe_dealloc,
1465 .dealloc_mw = rxe_dealloc_mw,
1466 .dealloc_pd = rxe_dealloc_pd,
1467 .dealloc_ucontext = rxe_dealloc_ucontext,
1468 .dereg_mr = rxe_dereg_mr,
1469 .destroy_ah = rxe_destroy_ah,
1470 .destroy_cq = rxe_destroy_cq,
1471 .destroy_qp = rxe_destroy_qp,
1472 .destroy_srq = rxe_destroy_srq,
1473 .detach_mcast = rxe_detach_mcast,
1474 .device_group = &rxe_attr_group,
1475 .enable_driver = rxe_enable_driver,
1476 .get_dma_mr = rxe_get_dma_mr,
1477 .get_hw_stats = rxe_ib_get_hw_stats,
1478 .get_link_layer = rxe_get_link_layer,
1479 .get_port_immutable = rxe_port_immutable,
1480 .map_mr_sg = rxe_map_mr_sg,
1481 .mmap = rxe_mmap,
1482 .modify_ah = rxe_modify_ah,
1483 .modify_device = rxe_modify_device,
1484 .modify_port = rxe_modify_port,
1485 .modify_qp = rxe_modify_qp,
1486 .modify_srq = rxe_modify_srq,
1487 .peek_cq = rxe_peek_cq,
1488 .poll_cq = rxe_poll_cq,
1489 .post_recv = rxe_post_recv,
1490 .post_send = rxe_post_send,
1491 .post_srq_recv = rxe_post_srq_recv,
1492 .query_ah = rxe_query_ah,
1493 .query_device = rxe_query_device,
1494 .query_pkey = rxe_query_pkey,
1495 .query_port = rxe_query_port,
1496 .query_qp = rxe_query_qp,
1497 .query_srq = rxe_query_srq,
1498 .reg_user_mr = rxe_reg_user_mr,
1499 .req_notify_cq = rxe_req_notify_cq,
1500 .rereg_user_mr = rxe_rereg_user_mr,
1501 .resize_cq = rxe_resize_cq,
1502
1503 INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
1504 INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
1505 INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
1506 INIT_RDMA_OBJ_SIZE(ib_qp, rxe_qp, ibqp),
1507 INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
1508 INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
1509 INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
1510 };
1511
rxe_register_device(struct rxe_dev * rxe,const char * ibdev_name,struct net_device * ndev)1512 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
1513 struct net_device *ndev)
1514 {
1515 int err;
1516 struct ib_device *dev = &rxe->ib_dev;
1517
1518 strscpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1519
1520 dev->node_type = RDMA_NODE_IB_CA;
1521 dev->phys_port_cnt = 1;
1522 dev->num_comp_vectors = num_possible_cpus();
1523 dev->local_dma_lkey = 0;
1524 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1525 ndev->dev_addr);
1526
1527 dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
1528 BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
1529
1530 ib_set_device_ops(dev, &rxe_dev_ops);
1531 err = ib_device_set_netdev(&rxe->ib_dev, ndev, 1);
1532 if (err)
1533 return err;
1534
1535 err = rxe_icrc_init(rxe);
1536 if (err)
1537 return err;
1538
1539 err = ib_register_device(dev, ibdev_name, NULL);
1540 if (err)
1541 rxe_dbg_dev(rxe, "failed with error %d\n", err);
1542
1543 /*
1544 * Note that rxe may be invalid at this point if another thread
1545 * unregistered it.
1546 */
1547 return err;
1548 }
1549