1 /*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
39 #include <linux/in.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
45 #include <linux/nsproxy.h>
46
47 #include <linux/nospec.h>
48
49 #include <rdma/rdma_user_cm.h>
50 #include <rdma/ib_marshall.h>
51 #include <rdma/rdma_cm.h>
52 #include <rdma/rdma_cm_ib.h>
53 #include <rdma/ib_addr.h>
54 #include <rdma/ib.h>
55
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
58 MODULE_LICENSE("Dual BSD/GPL");
59
60 static unsigned int max_backlog = 1024;
61
62 static struct ctl_table_header *ucma_ctl_table_hdr;
63 static struct ctl_table ucma_ctl_table[] = {
64 {
65 .procname = "max_backlog",
66 .data = &max_backlog,
67 .maxlen = sizeof max_backlog,
68 .mode = 0644,
69 .proc_handler = proc_dointvec,
70 },
71 { }
72 };
73
74 struct ucma_file {
75 struct mutex mut;
76 struct file *filp;
77 struct list_head ctx_list;
78 struct list_head event_list;
79 wait_queue_head_t poll_wait;
80 struct workqueue_struct *close_wq;
81 };
82
83 struct ucma_context {
84 int id;
85 struct completion comp;
86 atomic_t ref;
87 int events_reported;
88 int backlog;
89
90 struct ucma_file *file;
91 struct rdma_cm_id *cm_id;
92 u64 uid;
93
94 struct list_head list;
95 struct list_head mc_list;
96 /* mark that device is in process of destroying the internal HW
97 * resources, protected by the global mut
98 */
99 int closing;
100 /* sync between removal event and id destroy, protected by file mut */
101 int destroying;
102 struct work_struct close_work;
103 };
104
105 struct ucma_multicast {
106 struct ucma_context *ctx;
107 int id;
108 int events_reported;
109
110 u64 uid;
111 u8 join_state;
112 struct list_head list;
113 struct sockaddr_storage addr;
114 };
115
116 struct ucma_event {
117 struct ucma_context *ctx;
118 struct ucma_multicast *mc;
119 struct list_head list;
120 struct rdma_cm_id *cm_id;
121 struct rdma_ucm_event_resp resp;
122 struct work_struct close_work;
123 };
124
125 static DEFINE_MUTEX(mut);
126 static DEFINE_IDR(ctx_idr);
127 static DEFINE_IDR(multicast_idr);
128
129 static const struct file_operations ucma_fops;
130
_ucma_find_context(int id,struct ucma_file * file)131 static inline struct ucma_context *_ucma_find_context(int id,
132 struct ucma_file *file)
133 {
134 struct ucma_context *ctx;
135
136 ctx = idr_find(&ctx_idr, id);
137 if (!ctx)
138 ctx = ERR_PTR(-ENOENT);
139 else if (ctx->file != file || !ctx->cm_id)
140 ctx = ERR_PTR(-EINVAL);
141 return ctx;
142 }
143
ucma_get_ctx(struct ucma_file * file,int id)144 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
145 {
146 struct ucma_context *ctx;
147
148 mutex_lock(&mut);
149 ctx = _ucma_find_context(id, file);
150 if (!IS_ERR(ctx)) {
151 if (ctx->closing)
152 ctx = ERR_PTR(-EIO);
153 else
154 atomic_inc(&ctx->ref);
155 }
156 mutex_unlock(&mut);
157 return ctx;
158 }
159
ucma_put_ctx(struct ucma_context * ctx)160 static void ucma_put_ctx(struct ucma_context *ctx)
161 {
162 if (atomic_dec_and_test(&ctx->ref))
163 complete(&ctx->comp);
164 }
165
ucma_close_event_id(struct work_struct * work)166 static void ucma_close_event_id(struct work_struct *work)
167 {
168 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
169
170 rdma_destroy_id(uevent_close->cm_id);
171 kfree(uevent_close);
172 }
173
ucma_close_id(struct work_struct * work)174 static void ucma_close_id(struct work_struct *work)
175 {
176 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
177
178 /* once all inflight tasks are finished, we close all underlying
179 * resources. The context is still alive till its explicit destryoing
180 * by its creator.
181 */
182 ucma_put_ctx(ctx);
183 wait_for_completion(&ctx->comp);
184 /* No new events will be generated after destroying the id. */
185 rdma_destroy_id(ctx->cm_id);
186 }
187
ucma_alloc_ctx(struct ucma_file * file)188 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
189 {
190 struct ucma_context *ctx;
191
192 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
193 if (!ctx)
194 return NULL;
195
196 INIT_WORK(&ctx->close_work, ucma_close_id);
197 atomic_set(&ctx->ref, 1);
198 init_completion(&ctx->comp);
199 INIT_LIST_HEAD(&ctx->mc_list);
200 ctx->file = file;
201
202 mutex_lock(&mut);
203 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
204 mutex_unlock(&mut);
205 if (ctx->id < 0)
206 goto error;
207
208 list_add_tail(&ctx->list, &file->ctx_list);
209 return ctx;
210
211 error:
212 kfree(ctx);
213 return NULL;
214 }
215
ucma_alloc_multicast(struct ucma_context * ctx)216 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
217 {
218 struct ucma_multicast *mc;
219
220 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
221 if (!mc)
222 return NULL;
223
224 mutex_lock(&mut);
225 mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL);
226 mutex_unlock(&mut);
227 if (mc->id < 0)
228 goto error;
229
230 mc->ctx = ctx;
231 list_add_tail(&mc->list, &ctx->mc_list);
232 return mc;
233
234 error:
235 kfree(mc);
236 return NULL;
237 }
238
ucma_copy_conn_event(struct rdma_ucm_conn_param * dst,struct rdma_conn_param * src)239 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
240 struct rdma_conn_param *src)
241 {
242 if (src->private_data_len)
243 memcpy(dst->private_data, src->private_data,
244 src->private_data_len);
245 dst->private_data_len = src->private_data_len;
246 dst->responder_resources =src->responder_resources;
247 dst->initiator_depth = src->initiator_depth;
248 dst->flow_control = src->flow_control;
249 dst->retry_count = src->retry_count;
250 dst->rnr_retry_count = src->rnr_retry_count;
251 dst->srq = src->srq;
252 dst->qp_num = src->qp_num;
253 }
254
ucma_copy_ud_event(struct ib_device * device,struct rdma_ucm_ud_param * dst,struct rdma_ud_param * src)255 static void ucma_copy_ud_event(struct ib_device *device,
256 struct rdma_ucm_ud_param *dst,
257 struct rdma_ud_param *src)
258 {
259 if (src->private_data_len)
260 memcpy(dst->private_data, src->private_data,
261 src->private_data_len);
262 dst->private_data_len = src->private_data_len;
263 ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
264 dst->qp_num = src->qp_num;
265 dst->qkey = src->qkey;
266 }
267
ucma_set_event_context(struct ucma_context * ctx,struct rdma_cm_event * event,struct ucma_event * uevent)268 static void ucma_set_event_context(struct ucma_context *ctx,
269 struct rdma_cm_event *event,
270 struct ucma_event *uevent)
271 {
272 uevent->ctx = ctx;
273 switch (event->event) {
274 case RDMA_CM_EVENT_MULTICAST_JOIN:
275 case RDMA_CM_EVENT_MULTICAST_ERROR:
276 uevent->mc = (struct ucma_multicast *)
277 event->param.ud.private_data;
278 uevent->resp.uid = uevent->mc->uid;
279 uevent->resp.id = uevent->mc->id;
280 break;
281 default:
282 uevent->resp.uid = ctx->uid;
283 uevent->resp.id = ctx->id;
284 break;
285 }
286 }
287
288 /* Called with file->mut locked for the relevant context. */
ucma_removal_event_handler(struct rdma_cm_id * cm_id)289 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
290 {
291 struct ucma_context *ctx = cm_id->context;
292 struct ucma_event *con_req_eve;
293 int event_found = 0;
294
295 if (ctx->destroying)
296 return;
297
298 /* only if context is pointing to cm_id that it owns it and can be
299 * queued to be closed, otherwise that cm_id is an inflight one that
300 * is part of that context event list pending to be detached and
301 * reattached to its new context as part of ucma_get_event,
302 * handled separately below.
303 */
304 if (ctx->cm_id == cm_id) {
305 mutex_lock(&mut);
306 ctx->closing = 1;
307 mutex_unlock(&mut);
308 queue_work(ctx->file->close_wq, &ctx->close_work);
309 return;
310 }
311
312 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
313 if (con_req_eve->cm_id == cm_id &&
314 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
315 list_del(&con_req_eve->list);
316 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
317 queue_work(ctx->file->close_wq, &con_req_eve->close_work);
318 event_found = 1;
319 break;
320 }
321 }
322 if (!event_found)
323 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
324 }
325
ucma_event_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * event)326 static int ucma_event_handler(struct rdma_cm_id *cm_id,
327 struct rdma_cm_event *event)
328 {
329 struct ucma_event *uevent;
330 struct ucma_context *ctx = cm_id->context;
331 int ret = 0;
332
333 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
334 if (!uevent)
335 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
336
337 mutex_lock(&ctx->file->mut);
338 uevent->cm_id = cm_id;
339 ucma_set_event_context(ctx, event, uevent);
340 uevent->resp.event = event->event;
341 uevent->resp.status = event->status;
342 if (cm_id->qp_type == IB_QPT_UD)
343 ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud,
344 &event->param.ud);
345 else
346 ucma_copy_conn_event(&uevent->resp.param.conn,
347 &event->param.conn);
348
349 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
350 if (!ctx->backlog) {
351 ret = -ENOMEM;
352 kfree(uevent);
353 goto out;
354 }
355 ctx->backlog--;
356 } else if (!ctx->uid || ctx->cm_id != cm_id) {
357 /*
358 * We ignore events for new connections until userspace has set
359 * their context. This can only happen if an error occurs on a
360 * new connection before the user accepts it. This is okay,
361 * since the accept will just fail later. However, we do need
362 * to release the underlying HW resources in case of a device
363 * removal event.
364 */
365 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
366 ucma_removal_event_handler(cm_id);
367
368 kfree(uevent);
369 goto out;
370 }
371
372 list_add_tail(&uevent->list, &ctx->file->event_list);
373 wake_up_interruptible(&ctx->file->poll_wait);
374 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
375 ucma_removal_event_handler(cm_id);
376 out:
377 mutex_unlock(&ctx->file->mut);
378 return ret;
379 }
380
ucma_get_event(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)381 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
382 int in_len, int out_len)
383 {
384 struct ucma_context *ctx;
385 struct rdma_ucm_get_event cmd;
386 struct ucma_event *uevent;
387 int ret = 0;
388
389 if (out_len < sizeof uevent->resp)
390 return -ENOSPC;
391
392 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
393 return -EFAULT;
394
395 mutex_lock(&file->mut);
396 while (list_empty(&file->event_list)) {
397 mutex_unlock(&file->mut);
398
399 if (file->filp->f_flags & O_NONBLOCK)
400 return -EAGAIN;
401
402 if (wait_event_interruptible(file->poll_wait,
403 !list_empty(&file->event_list)))
404 return -ERESTARTSYS;
405
406 mutex_lock(&file->mut);
407 }
408
409 uevent = list_entry(file->event_list.next, struct ucma_event, list);
410
411 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
412 ctx = ucma_alloc_ctx(file);
413 if (!ctx) {
414 ret = -ENOMEM;
415 goto done;
416 }
417 uevent->ctx->backlog++;
418 ctx->cm_id = uevent->cm_id;
419 ctx->cm_id->context = ctx;
420 uevent->resp.id = ctx->id;
421 }
422
423 if (copy_to_user((void __user *)(unsigned long)cmd.response,
424 &uevent->resp, sizeof uevent->resp)) {
425 ret = -EFAULT;
426 goto done;
427 }
428
429 list_del(&uevent->list);
430 uevent->ctx->events_reported++;
431 if (uevent->mc)
432 uevent->mc->events_reported++;
433 kfree(uevent);
434 done:
435 mutex_unlock(&file->mut);
436 return ret;
437 }
438
ucma_get_qp_type(struct rdma_ucm_create_id * cmd,enum ib_qp_type * qp_type)439 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
440 {
441 switch (cmd->ps) {
442 case RDMA_PS_TCP:
443 *qp_type = IB_QPT_RC;
444 return 0;
445 case RDMA_PS_UDP:
446 case RDMA_PS_IPOIB:
447 *qp_type = IB_QPT_UD;
448 return 0;
449 case RDMA_PS_IB:
450 *qp_type = cmd->qp_type;
451 return 0;
452 default:
453 return -EINVAL;
454 }
455 }
456
ucma_create_id(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)457 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
458 int in_len, int out_len)
459 {
460 struct rdma_ucm_create_id cmd;
461 struct rdma_ucm_create_id_resp resp;
462 struct ucma_context *ctx;
463 struct rdma_cm_id *cm_id;
464 enum ib_qp_type qp_type;
465 int ret;
466
467 if (out_len < sizeof(resp))
468 return -ENOSPC;
469
470 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
471 return -EFAULT;
472
473 ret = ucma_get_qp_type(&cmd, &qp_type);
474 if (ret)
475 return ret;
476
477 mutex_lock(&file->mut);
478 ctx = ucma_alloc_ctx(file);
479 mutex_unlock(&file->mut);
480 if (!ctx)
481 return -ENOMEM;
482
483 ctx->uid = cmd.uid;
484 cm_id = rdma_create_id(current->nsproxy->net_ns,
485 ucma_event_handler, ctx, cmd.ps, qp_type);
486 if (IS_ERR(cm_id)) {
487 ret = PTR_ERR(cm_id);
488 goto err1;
489 }
490
491 resp.id = ctx->id;
492 if (copy_to_user((void __user *)(unsigned long)cmd.response,
493 &resp, sizeof(resp))) {
494 ret = -EFAULT;
495 goto err2;
496 }
497
498 ctx->cm_id = cm_id;
499 return 0;
500
501 err2:
502 rdma_destroy_id(cm_id);
503 err1:
504 mutex_lock(&mut);
505 idr_remove(&ctx_idr, ctx->id);
506 mutex_unlock(&mut);
507 mutex_lock(&file->mut);
508 list_del(&ctx->list);
509 mutex_unlock(&file->mut);
510 kfree(ctx);
511 return ret;
512 }
513
ucma_cleanup_multicast(struct ucma_context * ctx)514 static void ucma_cleanup_multicast(struct ucma_context *ctx)
515 {
516 struct ucma_multicast *mc, *tmp;
517
518 mutex_lock(&mut);
519 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
520 list_del(&mc->list);
521 idr_remove(&multicast_idr, mc->id);
522 kfree(mc);
523 }
524 mutex_unlock(&mut);
525 }
526
ucma_cleanup_mc_events(struct ucma_multicast * mc)527 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
528 {
529 struct ucma_event *uevent, *tmp;
530
531 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
532 if (uevent->mc != mc)
533 continue;
534
535 list_del(&uevent->list);
536 kfree(uevent);
537 }
538 }
539
540 /*
541 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
542 * this point, no new events will be reported from the hardware. However, we
543 * still need to cleanup the UCMA context for this ID. Specifically, there
544 * might be events that have not yet been consumed by the user space software.
545 * These might include pending connect requests which we have not completed
546 * processing. We cannot call rdma_destroy_id while holding the lock of the
547 * context (file->mut), as it might cause a deadlock. We therefore extract all
548 * relevant events from the context pending events list while holding the
549 * mutex. After that we release them as needed.
550 */
ucma_free_ctx(struct ucma_context * ctx)551 static int ucma_free_ctx(struct ucma_context *ctx)
552 {
553 int events_reported;
554 struct ucma_event *uevent, *tmp;
555 LIST_HEAD(list);
556
557
558 ucma_cleanup_multicast(ctx);
559
560 /* Cleanup events not yet reported to the user. */
561 mutex_lock(&ctx->file->mut);
562 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
563 if (uevent->ctx == ctx)
564 list_move_tail(&uevent->list, &list);
565 }
566 list_del(&ctx->list);
567 mutex_unlock(&ctx->file->mut);
568
569 list_for_each_entry_safe(uevent, tmp, &list, list) {
570 list_del(&uevent->list);
571 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
572 rdma_destroy_id(uevent->cm_id);
573 kfree(uevent);
574 }
575
576 events_reported = ctx->events_reported;
577 kfree(ctx);
578 return events_reported;
579 }
580
ucma_destroy_id(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)581 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
582 int in_len, int out_len)
583 {
584 struct rdma_ucm_destroy_id cmd;
585 struct rdma_ucm_destroy_id_resp resp;
586 struct ucma_context *ctx;
587 int ret = 0;
588
589 if (out_len < sizeof(resp))
590 return -ENOSPC;
591
592 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
593 return -EFAULT;
594
595 mutex_lock(&mut);
596 ctx = _ucma_find_context(cmd.id, file);
597 if (!IS_ERR(ctx))
598 idr_remove(&ctx_idr, ctx->id);
599 mutex_unlock(&mut);
600
601 if (IS_ERR(ctx))
602 return PTR_ERR(ctx);
603
604 mutex_lock(&ctx->file->mut);
605 ctx->destroying = 1;
606 mutex_unlock(&ctx->file->mut);
607
608 flush_workqueue(ctx->file->close_wq);
609 /* At this point it's guaranteed that there is no inflight
610 * closing task */
611 mutex_lock(&mut);
612 if (!ctx->closing) {
613 mutex_unlock(&mut);
614 ucma_put_ctx(ctx);
615 wait_for_completion(&ctx->comp);
616 rdma_destroy_id(ctx->cm_id);
617 } else {
618 mutex_unlock(&mut);
619 }
620
621 resp.events_reported = ucma_free_ctx(ctx);
622 if (copy_to_user((void __user *)(unsigned long)cmd.response,
623 &resp, sizeof(resp)))
624 ret = -EFAULT;
625
626 return ret;
627 }
628
ucma_bind_ip(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)629 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
630 int in_len, int out_len)
631 {
632 struct rdma_ucm_bind_ip cmd;
633 struct ucma_context *ctx;
634 int ret;
635
636 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
637 return -EFAULT;
638
639 if (!rdma_addr_size_in6(&cmd.addr))
640 return -EINVAL;
641
642 ctx = ucma_get_ctx(file, cmd.id);
643 if (IS_ERR(ctx))
644 return PTR_ERR(ctx);
645
646 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
647 ucma_put_ctx(ctx);
648 return ret;
649 }
650
ucma_bind(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)651 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
652 int in_len, int out_len)
653 {
654 struct rdma_ucm_bind cmd;
655 struct ucma_context *ctx;
656 int ret;
657
658 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
659 return -EFAULT;
660
661 if (cmd.reserved || !cmd.addr_size ||
662 cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
663 return -EINVAL;
664
665 ctx = ucma_get_ctx(file, cmd.id);
666 if (IS_ERR(ctx))
667 return PTR_ERR(ctx);
668
669 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
670 ucma_put_ctx(ctx);
671 return ret;
672 }
673
ucma_resolve_ip(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)674 static ssize_t ucma_resolve_ip(struct ucma_file *file,
675 const char __user *inbuf,
676 int in_len, int out_len)
677 {
678 struct rdma_ucm_resolve_ip cmd;
679 struct ucma_context *ctx;
680 int ret;
681
682 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
683 return -EFAULT;
684
685 if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
686 !rdma_addr_size_in6(&cmd.dst_addr))
687 return -EINVAL;
688
689 ctx = ucma_get_ctx(file, cmd.id);
690 if (IS_ERR(ctx))
691 return PTR_ERR(ctx);
692
693 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
694 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
695 ucma_put_ctx(ctx);
696 return ret;
697 }
698
ucma_resolve_addr(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)699 static ssize_t ucma_resolve_addr(struct ucma_file *file,
700 const char __user *inbuf,
701 int in_len, int out_len)
702 {
703 struct rdma_ucm_resolve_addr cmd;
704 struct ucma_context *ctx;
705 int ret;
706
707 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
708 return -EFAULT;
709
710 if (cmd.reserved ||
711 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
712 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
713 return -EINVAL;
714
715 ctx = ucma_get_ctx(file, cmd.id);
716 if (IS_ERR(ctx))
717 return PTR_ERR(ctx);
718
719 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
720 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
721 ucma_put_ctx(ctx);
722 return ret;
723 }
724
ucma_resolve_route(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)725 static ssize_t ucma_resolve_route(struct ucma_file *file,
726 const char __user *inbuf,
727 int in_len, int out_len)
728 {
729 struct rdma_ucm_resolve_route cmd;
730 struct ucma_context *ctx;
731 int ret;
732
733 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
734 return -EFAULT;
735
736 ctx = ucma_get_ctx(file, cmd.id);
737 if (IS_ERR(ctx))
738 return PTR_ERR(ctx);
739
740 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
741 ucma_put_ctx(ctx);
742 return ret;
743 }
744
ucma_copy_ib_route(struct rdma_ucm_query_route_resp * resp,struct rdma_route * route)745 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
746 struct rdma_route *route)
747 {
748 struct rdma_dev_addr *dev_addr;
749
750 resp->num_paths = route->num_paths;
751 switch (route->num_paths) {
752 case 0:
753 dev_addr = &route->addr.dev_addr;
754 rdma_addr_get_dgid(dev_addr,
755 (union ib_gid *) &resp->ib_route[0].dgid);
756 rdma_addr_get_sgid(dev_addr,
757 (union ib_gid *) &resp->ib_route[0].sgid);
758 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
759 break;
760 case 2:
761 ib_copy_path_rec_to_user(&resp->ib_route[1],
762 &route->path_rec[1]);
763 /* fall through */
764 case 1:
765 ib_copy_path_rec_to_user(&resp->ib_route[0],
766 &route->path_rec[0]);
767 break;
768 default:
769 break;
770 }
771 }
772
ucma_copy_iboe_route(struct rdma_ucm_query_route_resp * resp,struct rdma_route * route)773 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
774 struct rdma_route *route)
775 {
776
777 resp->num_paths = route->num_paths;
778 switch (route->num_paths) {
779 case 0:
780 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
781 (union ib_gid *)&resp->ib_route[0].dgid);
782 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
783 (union ib_gid *)&resp->ib_route[0].sgid);
784 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
785 break;
786 case 2:
787 ib_copy_path_rec_to_user(&resp->ib_route[1],
788 &route->path_rec[1]);
789 /* fall through */
790 case 1:
791 ib_copy_path_rec_to_user(&resp->ib_route[0],
792 &route->path_rec[0]);
793 break;
794 default:
795 break;
796 }
797 }
798
ucma_copy_iw_route(struct rdma_ucm_query_route_resp * resp,struct rdma_route * route)799 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
800 struct rdma_route *route)
801 {
802 struct rdma_dev_addr *dev_addr;
803
804 dev_addr = &route->addr.dev_addr;
805 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
806 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
807 }
808
ucma_query_route(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)809 static ssize_t ucma_query_route(struct ucma_file *file,
810 const char __user *inbuf,
811 int in_len, int out_len)
812 {
813 struct rdma_ucm_query cmd;
814 struct rdma_ucm_query_route_resp resp;
815 struct ucma_context *ctx;
816 struct sockaddr *addr;
817 int ret = 0;
818
819 if (out_len < sizeof(resp))
820 return -ENOSPC;
821
822 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
823 return -EFAULT;
824
825 ctx = ucma_get_ctx(file, cmd.id);
826 if (IS_ERR(ctx))
827 return PTR_ERR(ctx);
828
829 memset(&resp, 0, sizeof resp);
830 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
831 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
832 sizeof(struct sockaddr_in) :
833 sizeof(struct sockaddr_in6));
834 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
835 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
836 sizeof(struct sockaddr_in) :
837 sizeof(struct sockaddr_in6));
838 if (!ctx->cm_id->device)
839 goto out;
840
841 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
842 resp.port_num = ctx->cm_id->port_num;
843
844 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
845 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
846 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
847 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
848 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
849 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
850
851 out:
852 if (copy_to_user((void __user *)(unsigned long)cmd.response,
853 &resp, sizeof(resp)))
854 ret = -EFAULT;
855
856 ucma_put_ctx(ctx);
857 return ret;
858 }
859
ucma_query_device_addr(struct rdma_cm_id * cm_id,struct rdma_ucm_query_addr_resp * resp)860 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
861 struct rdma_ucm_query_addr_resp *resp)
862 {
863 if (!cm_id->device)
864 return;
865
866 resp->node_guid = (__force __u64) cm_id->device->node_guid;
867 resp->port_num = cm_id->port_num;
868 resp->pkey = (__force __u16) cpu_to_be16(
869 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
870 }
871
ucma_query_addr(struct ucma_context * ctx,void __user * response,int out_len)872 static ssize_t ucma_query_addr(struct ucma_context *ctx,
873 void __user *response, int out_len)
874 {
875 struct rdma_ucm_query_addr_resp resp;
876 struct sockaddr *addr;
877 int ret = 0;
878
879 if (out_len < sizeof(resp))
880 return -ENOSPC;
881
882 memset(&resp, 0, sizeof resp);
883
884 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
885 resp.src_size = rdma_addr_size(addr);
886 memcpy(&resp.src_addr, addr, resp.src_size);
887
888 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
889 resp.dst_size = rdma_addr_size(addr);
890 memcpy(&resp.dst_addr, addr, resp.dst_size);
891
892 ucma_query_device_addr(ctx->cm_id, &resp);
893
894 if (copy_to_user(response, &resp, sizeof(resp)))
895 ret = -EFAULT;
896
897 return ret;
898 }
899
ucma_query_path(struct ucma_context * ctx,void __user * response,int out_len)900 static ssize_t ucma_query_path(struct ucma_context *ctx,
901 void __user *response, int out_len)
902 {
903 struct rdma_ucm_query_path_resp *resp;
904 int i, ret = 0;
905
906 if (out_len < sizeof(*resp))
907 return -ENOSPC;
908
909 resp = kzalloc(out_len, GFP_KERNEL);
910 if (!resp)
911 return -ENOMEM;
912
913 resp->num_paths = ctx->cm_id->route.num_paths;
914 for (i = 0, out_len -= sizeof(*resp);
915 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
916 i++, out_len -= sizeof(struct ib_path_rec_data)) {
917 struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
918
919 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
920 IB_PATH_BIDIRECTIONAL;
921 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
922 struct sa_path_rec ib;
923
924 sa_convert_path_opa_to_ib(&ib, rec);
925 ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
926
927 } else {
928 ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
929 }
930 }
931
932 if (copy_to_user(response, resp,
933 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
934 ret = -EFAULT;
935
936 kfree(resp);
937 return ret;
938 }
939
ucma_query_gid(struct ucma_context * ctx,void __user * response,int out_len)940 static ssize_t ucma_query_gid(struct ucma_context *ctx,
941 void __user *response, int out_len)
942 {
943 struct rdma_ucm_query_addr_resp resp;
944 struct sockaddr_ib *addr;
945 int ret = 0;
946
947 if (out_len < sizeof(resp))
948 return -ENOSPC;
949
950 memset(&resp, 0, sizeof resp);
951
952 ucma_query_device_addr(ctx->cm_id, &resp);
953
954 addr = (struct sockaddr_ib *) &resp.src_addr;
955 resp.src_size = sizeof(*addr);
956 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
957 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
958 } else {
959 addr->sib_family = AF_IB;
960 addr->sib_pkey = (__force __be16) resp.pkey;
961 rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
962 (union ib_gid *) &addr->sib_addr);
963 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
964 &ctx->cm_id->route.addr.src_addr);
965 }
966
967 addr = (struct sockaddr_ib *) &resp.dst_addr;
968 resp.dst_size = sizeof(*addr);
969 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
970 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
971 } else {
972 addr->sib_family = AF_IB;
973 addr->sib_pkey = (__force __be16) resp.pkey;
974 rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
975 (union ib_gid *) &addr->sib_addr);
976 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
977 &ctx->cm_id->route.addr.dst_addr);
978 }
979
980 if (copy_to_user(response, &resp, sizeof(resp)))
981 ret = -EFAULT;
982
983 return ret;
984 }
985
ucma_query(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)986 static ssize_t ucma_query(struct ucma_file *file,
987 const char __user *inbuf,
988 int in_len, int out_len)
989 {
990 struct rdma_ucm_query cmd;
991 struct ucma_context *ctx;
992 void __user *response;
993 int ret;
994
995 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
996 return -EFAULT;
997
998 response = (void __user *)(unsigned long) cmd.response;
999 ctx = ucma_get_ctx(file, cmd.id);
1000 if (IS_ERR(ctx))
1001 return PTR_ERR(ctx);
1002
1003 switch (cmd.option) {
1004 case RDMA_USER_CM_QUERY_ADDR:
1005 ret = ucma_query_addr(ctx, response, out_len);
1006 break;
1007 case RDMA_USER_CM_QUERY_PATH:
1008 ret = ucma_query_path(ctx, response, out_len);
1009 break;
1010 case RDMA_USER_CM_QUERY_GID:
1011 ret = ucma_query_gid(ctx, response, out_len);
1012 break;
1013 default:
1014 ret = -ENOSYS;
1015 break;
1016 }
1017
1018 ucma_put_ctx(ctx);
1019 return ret;
1020 }
1021
ucma_copy_conn_param(struct rdma_cm_id * id,struct rdma_conn_param * dst,struct rdma_ucm_conn_param * src)1022 static void ucma_copy_conn_param(struct rdma_cm_id *id,
1023 struct rdma_conn_param *dst,
1024 struct rdma_ucm_conn_param *src)
1025 {
1026 dst->private_data = src->private_data;
1027 dst->private_data_len = src->private_data_len;
1028 dst->responder_resources =src->responder_resources;
1029 dst->initiator_depth = src->initiator_depth;
1030 dst->flow_control = src->flow_control;
1031 dst->retry_count = src->retry_count;
1032 dst->rnr_retry_count = src->rnr_retry_count;
1033 dst->srq = src->srq;
1034 dst->qp_num = src->qp_num;
1035 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1036 }
1037
ucma_connect(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1038 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1039 int in_len, int out_len)
1040 {
1041 struct rdma_ucm_connect cmd;
1042 struct rdma_conn_param conn_param;
1043 struct ucma_context *ctx;
1044 int ret;
1045
1046 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1047 return -EFAULT;
1048
1049 if (!cmd.conn_param.valid)
1050 return -EINVAL;
1051
1052 ctx = ucma_get_ctx(file, cmd.id);
1053 if (IS_ERR(ctx))
1054 return PTR_ERR(ctx);
1055
1056 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1057 ret = rdma_connect(ctx->cm_id, &conn_param);
1058 ucma_put_ctx(ctx);
1059 return ret;
1060 }
1061
ucma_listen(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1062 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1063 int in_len, int out_len)
1064 {
1065 struct rdma_ucm_listen cmd;
1066 struct ucma_context *ctx;
1067 int ret;
1068
1069 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1070 return -EFAULT;
1071
1072 ctx = ucma_get_ctx(file, cmd.id);
1073 if (IS_ERR(ctx))
1074 return PTR_ERR(ctx);
1075
1076 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1077 cmd.backlog : max_backlog;
1078 ret = rdma_listen(ctx->cm_id, ctx->backlog);
1079 ucma_put_ctx(ctx);
1080 return ret;
1081 }
1082
ucma_accept(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1083 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1084 int in_len, int out_len)
1085 {
1086 struct rdma_ucm_accept cmd;
1087 struct rdma_conn_param conn_param;
1088 struct ucma_context *ctx;
1089 int ret;
1090
1091 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1092 return -EFAULT;
1093
1094 ctx = ucma_get_ctx(file, cmd.id);
1095 if (IS_ERR(ctx))
1096 return PTR_ERR(ctx);
1097
1098 if (cmd.conn_param.valid) {
1099 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1100 mutex_lock(&file->mut);
1101 ret = rdma_accept(ctx->cm_id, &conn_param);
1102 if (!ret)
1103 ctx->uid = cmd.uid;
1104 mutex_unlock(&file->mut);
1105 } else
1106 ret = rdma_accept(ctx->cm_id, NULL);
1107
1108 ucma_put_ctx(ctx);
1109 return ret;
1110 }
1111
ucma_reject(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1112 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1113 int in_len, int out_len)
1114 {
1115 struct rdma_ucm_reject cmd;
1116 struct ucma_context *ctx;
1117 int ret;
1118
1119 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1120 return -EFAULT;
1121
1122 ctx = ucma_get_ctx(file, cmd.id);
1123 if (IS_ERR(ctx))
1124 return PTR_ERR(ctx);
1125
1126 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1127 ucma_put_ctx(ctx);
1128 return ret;
1129 }
1130
ucma_disconnect(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1131 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1132 int in_len, int out_len)
1133 {
1134 struct rdma_ucm_disconnect cmd;
1135 struct ucma_context *ctx;
1136 int ret;
1137
1138 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1139 return -EFAULT;
1140
1141 ctx = ucma_get_ctx(file, cmd.id);
1142 if (IS_ERR(ctx))
1143 return PTR_ERR(ctx);
1144
1145 ret = rdma_disconnect(ctx->cm_id);
1146 ucma_put_ctx(ctx);
1147 return ret;
1148 }
1149
ucma_init_qp_attr(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1150 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1151 const char __user *inbuf,
1152 int in_len, int out_len)
1153 {
1154 struct rdma_ucm_init_qp_attr cmd;
1155 struct ib_uverbs_qp_attr resp;
1156 struct ucma_context *ctx;
1157 struct ib_qp_attr qp_attr;
1158 int ret;
1159
1160 if (out_len < sizeof(resp))
1161 return -ENOSPC;
1162
1163 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1164 return -EFAULT;
1165
1166 if (cmd.qp_state > IB_QPS_ERR)
1167 return -EINVAL;
1168
1169 ctx = ucma_get_ctx(file, cmd.id);
1170 if (IS_ERR(ctx))
1171 return PTR_ERR(ctx);
1172
1173 if (!ctx->cm_id->device) {
1174 ret = -EINVAL;
1175 goto out;
1176 }
1177
1178 resp.qp_attr_mask = 0;
1179 memset(&qp_attr, 0, sizeof qp_attr);
1180 qp_attr.qp_state = cmd.qp_state;
1181 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1182 if (ret)
1183 goto out;
1184
1185 ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
1186 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1187 &resp, sizeof(resp)))
1188 ret = -EFAULT;
1189
1190 out:
1191 ucma_put_ctx(ctx);
1192 return ret;
1193 }
1194
ucma_set_option_id(struct ucma_context * ctx,int optname,void * optval,size_t optlen)1195 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1196 void *optval, size_t optlen)
1197 {
1198 int ret = 0;
1199
1200 switch (optname) {
1201 case RDMA_OPTION_ID_TOS:
1202 if (optlen != sizeof(u8)) {
1203 ret = -EINVAL;
1204 break;
1205 }
1206 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1207 break;
1208 case RDMA_OPTION_ID_REUSEADDR:
1209 if (optlen != sizeof(int)) {
1210 ret = -EINVAL;
1211 break;
1212 }
1213 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1214 break;
1215 case RDMA_OPTION_ID_AFONLY:
1216 if (optlen != sizeof(int)) {
1217 ret = -EINVAL;
1218 break;
1219 }
1220 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1221 break;
1222 default:
1223 ret = -ENOSYS;
1224 }
1225
1226 return ret;
1227 }
1228
ucma_set_ib_path(struct ucma_context * ctx,struct ib_path_rec_data * path_data,size_t optlen)1229 static int ucma_set_ib_path(struct ucma_context *ctx,
1230 struct ib_path_rec_data *path_data, size_t optlen)
1231 {
1232 struct sa_path_rec sa_path;
1233 struct rdma_cm_event event;
1234 int ret;
1235
1236 if (optlen % sizeof(*path_data))
1237 return -EINVAL;
1238
1239 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1240 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1241 IB_PATH_BIDIRECTIONAL))
1242 break;
1243 }
1244
1245 if (!optlen)
1246 return -EINVAL;
1247
1248 if (!ctx->cm_id->device)
1249 return -EINVAL;
1250
1251 memset(&sa_path, 0, sizeof(sa_path));
1252
1253 sa_path.rec_type = SA_PATH_REC_TYPE_IB;
1254 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1255
1256 if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
1257 struct sa_path_rec opa;
1258
1259 sa_convert_path_ib_to_opa(&opa, &sa_path);
1260 ret = rdma_set_ib_paths(ctx->cm_id, &opa, 1);
1261 } else {
1262 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1263 }
1264 if (ret)
1265 return ret;
1266
1267 memset(&event, 0, sizeof event);
1268 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1269 return ucma_event_handler(ctx->cm_id, &event);
1270 }
1271
ucma_set_option_ib(struct ucma_context * ctx,int optname,void * optval,size_t optlen)1272 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1273 void *optval, size_t optlen)
1274 {
1275 int ret;
1276
1277 switch (optname) {
1278 case RDMA_OPTION_IB_PATH:
1279 ret = ucma_set_ib_path(ctx, optval, optlen);
1280 break;
1281 default:
1282 ret = -ENOSYS;
1283 }
1284
1285 return ret;
1286 }
1287
ucma_set_option_level(struct ucma_context * ctx,int level,int optname,void * optval,size_t optlen)1288 static int ucma_set_option_level(struct ucma_context *ctx, int level,
1289 int optname, void *optval, size_t optlen)
1290 {
1291 int ret;
1292
1293 switch (level) {
1294 case RDMA_OPTION_ID:
1295 ret = ucma_set_option_id(ctx, optname, optval, optlen);
1296 break;
1297 case RDMA_OPTION_IB:
1298 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1299 break;
1300 default:
1301 ret = -ENOSYS;
1302 }
1303
1304 return ret;
1305 }
1306
ucma_set_option(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1307 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1308 int in_len, int out_len)
1309 {
1310 struct rdma_ucm_set_option cmd;
1311 struct ucma_context *ctx;
1312 void *optval;
1313 int ret;
1314
1315 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1316 return -EFAULT;
1317
1318 ctx = ucma_get_ctx(file, cmd.id);
1319 if (IS_ERR(ctx))
1320 return PTR_ERR(ctx);
1321
1322 if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1323 return -EINVAL;
1324
1325 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1326 cmd.optlen);
1327 if (IS_ERR(optval)) {
1328 ret = PTR_ERR(optval);
1329 goto out;
1330 }
1331
1332 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1333 cmd.optlen);
1334 kfree(optval);
1335
1336 out:
1337 ucma_put_ctx(ctx);
1338 return ret;
1339 }
1340
ucma_notify(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1341 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1342 int in_len, int out_len)
1343 {
1344 struct rdma_ucm_notify cmd;
1345 struct ucma_context *ctx;
1346 int ret = -EINVAL;
1347
1348 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1349 return -EFAULT;
1350
1351 ctx = ucma_get_ctx(file, cmd.id);
1352 if (IS_ERR(ctx))
1353 return PTR_ERR(ctx);
1354
1355 if (ctx->cm_id->device)
1356 ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
1357
1358 ucma_put_ctx(ctx);
1359 return ret;
1360 }
1361
ucma_process_join(struct ucma_file * file,struct rdma_ucm_join_mcast * cmd,int out_len)1362 static ssize_t ucma_process_join(struct ucma_file *file,
1363 struct rdma_ucm_join_mcast *cmd, int out_len)
1364 {
1365 struct rdma_ucm_create_id_resp resp;
1366 struct ucma_context *ctx;
1367 struct ucma_multicast *mc;
1368 struct sockaddr *addr;
1369 int ret;
1370 u8 join_state;
1371
1372 if (out_len < sizeof(resp))
1373 return -ENOSPC;
1374
1375 addr = (struct sockaddr *) &cmd->addr;
1376 if (cmd->addr_size != rdma_addr_size(addr))
1377 return -EINVAL;
1378
1379 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1380 join_state = BIT(FULLMEMBER_JOIN);
1381 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1382 join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1383 else
1384 return -EINVAL;
1385
1386 ctx = ucma_get_ctx(file, cmd->id);
1387 if (IS_ERR(ctx))
1388 return PTR_ERR(ctx);
1389
1390 mutex_lock(&file->mut);
1391 mc = ucma_alloc_multicast(ctx);
1392 if (!mc) {
1393 ret = -ENOMEM;
1394 goto err1;
1395 }
1396 mc->join_state = join_state;
1397 mc->uid = cmd->uid;
1398 memcpy(&mc->addr, addr, cmd->addr_size);
1399 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1400 join_state, mc);
1401 if (ret)
1402 goto err2;
1403
1404 resp.id = mc->id;
1405 if (copy_to_user((void __user *)(unsigned long) cmd->response,
1406 &resp, sizeof(resp))) {
1407 ret = -EFAULT;
1408 goto err3;
1409 }
1410
1411 mutex_lock(&mut);
1412 idr_replace(&multicast_idr, mc, mc->id);
1413 mutex_unlock(&mut);
1414
1415 mutex_unlock(&file->mut);
1416 ucma_put_ctx(ctx);
1417 return 0;
1418
1419 err3:
1420 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1421 ucma_cleanup_mc_events(mc);
1422 err2:
1423 mutex_lock(&mut);
1424 idr_remove(&multicast_idr, mc->id);
1425 mutex_unlock(&mut);
1426 list_del(&mc->list);
1427 kfree(mc);
1428 err1:
1429 mutex_unlock(&file->mut);
1430 ucma_put_ctx(ctx);
1431 return ret;
1432 }
1433
ucma_join_ip_multicast(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1434 static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1435 const char __user *inbuf,
1436 int in_len, int out_len)
1437 {
1438 struct rdma_ucm_join_ip_mcast cmd;
1439 struct rdma_ucm_join_mcast join_cmd;
1440
1441 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1442 return -EFAULT;
1443
1444 join_cmd.response = cmd.response;
1445 join_cmd.uid = cmd.uid;
1446 join_cmd.id = cmd.id;
1447 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
1448 if (!join_cmd.addr_size)
1449 return -EINVAL;
1450
1451 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1452 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1453
1454 return ucma_process_join(file, &join_cmd, out_len);
1455 }
1456
ucma_join_multicast(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1457 static ssize_t ucma_join_multicast(struct ucma_file *file,
1458 const char __user *inbuf,
1459 int in_len, int out_len)
1460 {
1461 struct rdma_ucm_join_mcast cmd;
1462
1463 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1464 return -EFAULT;
1465
1466 if (!rdma_addr_size_kss(&cmd.addr))
1467 return -EINVAL;
1468
1469 return ucma_process_join(file, &cmd, out_len);
1470 }
1471
ucma_leave_multicast(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1472 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1473 const char __user *inbuf,
1474 int in_len, int out_len)
1475 {
1476 struct rdma_ucm_destroy_id cmd;
1477 struct rdma_ucm_destroy_id_resp resp;
1478 struct ucma_multicast *mc;
1479 int ret = 0;
1480
1481 if (out_len < sizeof(resp))
1482 return -ENOSPC;
1483
1484 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1485 return -EFAULT;
1486
1487 mutex_lock(&mut);
1488 mc = idr_find(&multicast_idr, cmd.id);
1489 if (!mc)
1490 mc = ERR_PTR(-ENOENT);
1491 else if (mc->ctx->file != file)
1492 mc = ERR_PTR(-EINVAL);
1493 else if (!atomic_inc_not_zero(&mc->ctx->ref))
1494 mc = ERR_PTR(-ENXIO);
1495 else
1496 idr_remove(&multicast_idr, mc->id);
1497 mutex_unlock(&mut);
1498
1499 if (IS_ERR(mc)) {
1500 ret = PTR_ERR(mc);
1501 goto out;
1502 }
1503
1504 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1505 mutex_lock(&mc->ctx->file->mut);
1506 ucma_cleanup_mc_events(mc);
1507 list_del(&mc->list);
1508 mutex_unlock(&mc->ctx->file->mut);
1509
1510 ucma_put_ctx(mc->ctx);
1511 resp.events_reported = mc->events_reported;
1512 kfree(mc);
1513
1514 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1515 &resp, sizeof(resp)))
1516 ret = -EFAULT;
1517 out:
1518 return ret;
1519 }
1520
ucma_lock_files(struct ucma_file * file1,struct ucma_file * file2)1521 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1522 {
1523 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1524 if (file1 < file2) {
1525 mutex_lock(&file1->mut);
1526 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1527 } else {
1528 mutex_lock(&file2->mut);
1529 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1530 }
1531 }
1532
ucma_unlock_files(struct ucma_file * file1,struct ucma_file * file2)1533 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1534 {
1535 if (file1 < file2) {
1536 mutex_unlock(&file2->mut);
1537 mutex_unlock(&file1->mut);
1538 } else {
1539 mutex_unlock(&file1->mut);
1540 mutex_unlock(&file2->mut);
1541 }
1542 }
1543
ucma_move_events(struct ucma_context * ctx,struct ucma_file * file)1544 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1545 {
1546 struct ucma_event *uevent, *tmp;
1547
1548 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1549 if (uevent->ctx == ctx)
1550 list_move_tail(&uevent->list, &file->event_list);
1551 }
1552
ucma_migrate_id(struct ucma_file * new_file,const char __user * inbuf,int in_len,int out_len)1553 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1554 const char __user *inbuf,
1555 int in_len, int out_len)
1556 {
1557 struct rdma_ucm_migrate_id cmd;
1558 struct rdma_ucm_migrate_resp resp;
1559 struct ucma_context *ctx;
1560 struct fd f;
1561 struct ucma_file *cur_file;
1562 int ret = 0;
1563
1564 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1565 return -EFAULT;
1566
1567 /* Get current fd to protect against it being closed */
1568 f = fdget(cmd.fd);
1569 if (!f.file)
1570 return -ENOENT;
1571 if (f.file->f_op != &ucma_fops) {
1572 ret = -EINVAL;
1573 goto file_put;
1574 }
1575
1576 /* Validate current fd and prevent destruction of id. */
1577 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1578 if (IS_ERR(ctx)) {
1579 ret = PTR_ERR(ctx);
1580 goto file_put;
1581 }
1582
1583 cur_file = ctx->file;
1584 if (cur_file == new_file) {
1585 resp.events_reported = ctx->events_reported;
1586 goto response;
1587 }
1588
1589 /*
1590 * Migrate events between fd's, maintaining order, and avoiding new
1591 * events being added before existing events.
1592 */
1593 ucma_lock_files(cur_file, new_file);
1594 mutex_lock(&mut);
1595
1596 list_move_tail(&ctx->list, &new_file->ctx_list);
1597 ucma_move_events(ctx, new_file);
1598 ctx->file = new_file;
1599 resp.events_reported = ctx->events_reported;
1600
1601 mutex_unlock(&mut);
1602 ucma_unlock_files(cur_file, new_file);
1603
1604 response:
1605 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1606 &resp, sizeof(resp)))
1607 ret = -EFAULT;
1608
1609 ucma_put_ctx(ctx);
1610 file_put:
1611 fdput(f);
1612 return ret;
1613 }
1614
1615 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1616 const char __user *inbuf,
1617 int in_len, int out_len) = {
1618 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1619 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1620 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1621 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1622 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1623 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1624 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1625 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1626 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1627 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1628 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1629 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1630 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1631 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1632 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1633 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1634 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1635 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1636 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
1637 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
1638 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
1639 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1640 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
1641 };
1642
ucma_write(struct file * filp,const char __user * buf,size_t len,loff_t * pos)1643 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1644 size_t len, loff_t *pos)
1645 {
1646 struct ucma_file *file = filp->private_data;
1647 struct rdma_ucm_cmd_hdr hdr;
1648 ssize_t ret;
1649
1650 if (!ib_safe_file_access(filp)) {
1651 pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1652 task_tgid_vnr(current), current->comm);
1653 return -EACCES;
1654 }
1655
1656 if (len < sizeof(hdr))
1657 return -EINVAL;
1658
1659 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1660 return -EFAULT;
1661
1662 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1663 return -EINVAL;
1664 hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
1665
1666 if (hdr.in + sizeof(hdr) > len)
1667 return -EINVAL;
1668
1669 if (!ucma_cmd_table[hdr.cmd])
1670 return -ENOSYS;
1671
1672 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1673 if (!ret)
1674 ret = len;
1675
1676 return ret;
1677 }
1678
ucma_poll(struct file * filp,struct poll_table_struct * wait)1679 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1680 {
1681 struct ucma_file *file = filp->private_data;
1682 unsigned int mask = 0;
1683
1684 poll_wait(filp, &file->poll_wait, wait);
1685
1686 if (!list_empty(&file->event_list))
1687 mask = POLLIN | POLLRDNORM;
1688
1689 return mask;
1690 }
1691
1692 /*
1693 * ucma_open() does not need the BKL:
1694 *
1695 * - no global state is referred to;
1696 * - there is no ioctl method to race against;
1697 * - no further module initialization is required for open to work
1698 * after the device is registered.
1699 */
ucma_open(struct inode * inode,struct file * filp)1700 static int ucma_open(struct inode *inode, struct file *filp)
1701 {
1702 struct ucma_file *file;
1703
1704 file = kmalloc(sizeof *file, GFP_KERNEL);
1705 if (!file)
1706 return -ENOMEM;
1707
1708 file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1709 WQ_MEM_RECLAIM);
1710 if (!file->close_wq) {
1711 kfree(file);
1712 return -ENOMEM;
1713 }
1714
1715 INIT_LIST_HEAD(&file->event_list);
1716 INIT_LIST_HEAD(&file->ctx_list);
1717 init_waitqueue_head(&file->poll_wait);
1718 mutex_init(&file->mut);
1719
1720 filp->private_data = file;
1721 file->filp = filp;
1722
1723 return nonseekable_open(inode, filp);
1724 }
1725
ucma_close(struct inode * inode,struct file * filp)1726 static int ucma_close(struct inode *inode, struct file *filp)
1727 {
1728 struct ucma_file *file = filp->private_data;
1729 struct ucma_context *ctx, *tmp;
1730
1731 mutex_lock(&file->mut);
1732 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1733 ctx->destroying = 1;
1734 mutex_unlock(&file->mut);
1735
1736 mutex_lock(&mut);
1737 idr_remove(&ctx_idr, ctx->id);
1738 mutex_unlock(&mut);
1739
1740 flush_workqueue(file->close_wq);
1741 /* At that step once ctx was marked as destroying and workqueue
1742 * was flushed we are safe from any inflights handlers that
1743 * might put other closing task.
1744 */
1745 mutex_lock(&mut);
1746 if (!ctx->closing) {
1747 mutex_unlock(&mut);
1748 ucma_put_ctx(ctx);
1749 wait_for_completion(&ctx->comp);
1750 /* rdma_destroy_id ensures that no event handlers are
1751 * inflight for that id before releasing it.
1752 */
1753 rdma_destroy_id(ctx->cm_id);
1754 } else {
1755 mutex_unlock(&mut);
1756 }
1757
1758 ucma_free_ctx(ctx);
1759 mutex_lock(&file->mut);
1760 }
1761 mutex_unlock(&file->mut);
1762 destroy_workqueue(file->close_wq);
1763 kfree(file);
1764 return 0;
1765 }
1766
1767 static const struct file_operations ucma_fops = {
1768 .owner = THIS_MODULE,
1769 .open = ucma_open,
1770 .release = ucma_close,
1771 .write = ucma_write,
1772 .poll = ucma_poll,
1773 .llseek = no_llseek,
1774 };
1775
1776 static struct miscdevice ucma_misc = {
1777 .minor = MISC_DYNAMIC_MINOR,
1778 .name = "rdma_cm",
1779 .nodename = "infiniband/rdma_cm",
1780 .mode = 0666,
1781 .fops = &ucma_fops,
1782 };
1783
show_abi_version(struct device * dev,struct device_attribute * attr,char * buf)1784 static ssize_t show_abi_version(struct device *dev,
1785 struct device_attribute *attr,
1786 char *buf)
1787 {
1788 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1789 }
1790 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1791
ucma_init(void)1792 static int __init ucma_init(void)
1793 {
1794 int ret;
1795
1796 ret = misc_register(&ucma_misc);
1797 if (ret)
1798 return ret;
1799
1800 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1801 if (ret) {
1802 pr_err("rdma_ucm: couldn't create abi_version attr\n");
1803 goto err1;
1804 }
1805
1806 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1807 if (!ucma_ctl_table_hdr) {
1808 pr_err("rdma_ucm: couldn't register sysctl paths\n");
1809 ret = -ENOMEM;
1810 goto err2;
1811 }
1812 return 0;
1813 err2:
1814 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1815 err1:
1816 misc_deregister(&ucma_misc);
1817 return ret;
1818 }
1819
ucma_cleanup(void)1820 static void __exit ucma_cleanup(void)
1821 {
1822 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1823 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1824 misc_deregister(&ucma_misc);
1825 idr_destroy(&ctx_idr);
1826 idr_destroy(&multicast_idr);
1827 }
1828
1829 module_init(ucma_init);
1830 module_exit(ucma_cleanup);
1831