Lines Matching +full:sub +full:- +full:mailboxes
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
43 struct list_head list; /* headed in ev_file->event_list */
60 struct list_head file_list; /* headed in ev_file->
63 struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
64 * devx_obj_event->obj_sub_list
67 struct list_head event_list; /* headed in ev_file->event_list or in
125 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx)) in mlx5_ib_devx_create()
126 return -EINVAL; in mlx5_ib_devx_create()
130 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX)) in mlx5_ib_devx_create()
133 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & in mlx5_ib_devx_create()
140 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); in mlx5_ib_devx_create()
156 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); in mlx5_ib_devx_destroy()
213 opcode = (obj->obj_id >> 32) & 0xffff; in get_dec_obj_type()
220 return (obj->obj_id >> 48); in get_dec_obj_type()
267 return eqe->data.qp_srq.type; in get_event_obj_type()
275 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type); in get_event_obj_type()
523 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata); in devx_is_valid_obj_id()
532 to_mcq(uobj->object)->mcq.cqn) == in devx_is_valid_obj_id()
537 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq); in devx_is_valid_obj_id()
540 switch (srq->common.res) { in devx_is_valid_obj_id()
548 if (!dev->mdev->issi) in devx_is_valid_obj_id()
555 to_msrq(uobj->object)->msrq.srqn) == in devx_is_valid_obj_id()
561 struct mlx5_ib_qp *qp = to_mqp(uobj->object); in devx_is_valid_obj_id()
563 if (qp->type == IB_QPT_RAW_PACKET || in devx_is_valid_obj_id()
564 (qp->flags & IB_QP_CREATE_SOURCE_QPN)) { in devx_is_valid_obj_id()
566 &qp->raw_packet_qp; in devx_is_valid_obj_id()
567 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; in devx_is_valid_obj_id()
568 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; in devx_is_valid_obj_id()
571 rq->base.mqp.qpn) == obj_id || in devx_is_valid_obj_id()
573 sq->base.mqp.qpn) == obj_id || in devx_is_valid_obj_id()
575 rq->tirn) == obj_id || in devx_is_valid_obj_id()
577 sq->tisn) == obj_id); in devx_is_valid_obj_id()
580 if (qp->type == MLX5_IB_QPT_DCT) in devx_is_valid_obj_id()
582 qp->dct.mdct.mqp.qpn) == obj_id; in devx_is_valid_obj_id()
584 qp->ibqp.qp_num) == obj_id; in devx_is_valid_obj_id()
589 to_mrwq(uobj->object)->core_qp.qpn) == in devx_is_valid_obj_id()
594 to_mrwq_ind_table(uobj->object)->rqtn) == in devx_is_valid_obj_id()
600 struct devx_obj *devx_uobj = uobj->object; in devx_is_valid_obj_id()
603 devx_uobj->flow_counter_bulk_size) { in devx_is_valid_obj_id()
606 end = devx_uobj->obj_id + in devx_is_valid_obj_id()
607 devx_uobj->flow_counter_bulk_size; in devx_is_valid_obj_id()
608 return devx_uobj->obj_id <= obj_id && end > obj_id; in devx_is_valid_obj_id()
611 return devx_uobj->obj_id == obj_id; in devx_is_valid_obj_id()
864 if (c->devx_uid) in devx_get_uid()
865 return c->devx_uid; in devx_get_uid()
867 dev = to_mdev(c->ibucontext.device); in devx_get_uid()
868 if (dev->devx_whitelist_uid) in devx_get_uid()
869 return dev->devx_whitelist_uid; in devx_get_uid()
871 return -EOPNOTSUPP; in devx_get_uid()
874 if (!c->devx_uid) in devx_get_uid()
875 return -EINVAL; in devx_get_uid()
877 return c->devx_uid; in devx_get_uid()
885 if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) && in devx_is_general_cmd()
925 return -EFAULT; in UVERBS_HANDLER()
930 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
932 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn); in UVERBS_HANDLER()
938 return -EFAULT; in UVERBS_HANDLER()
954 * mailboxes (except tagging them with UID), we expose to the user its UAR
974 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
978 return -EFAULT; in UVERBS_HANDLER()
980 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true); in UVERBS_HANDLER()
986 return -EFAULT; in UVERBS_HANDLER()
1007 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1015 return -EINVAL; in UVERBS_HANDLER()
1022 err = mlx5_cmd_exec(dev->mdev, cmd_in, in UVERBS_HANDLER()
1219 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr; in devx_handle_mkey_indirect()
1224 mkey = &devx_mr->mmkey; in devx_handle_mkey_indirect()
1227 mkey->key = mlx5_idx_to_mkey( in devx_handle_mkey_indirect()
1229 mkey->type = MLX5_MKEY_INDIRECT_DEVX; in devx_handle_mkey_indirect()
1230 mkey->iova = MLX5_GET64(mkc, mkc, start_addr); in devx_handle_mkey_indirect()
1231 mkey->size = MLX5_GET64(mkc, mkc, len); in devx_handle_mkey_indirect()
1232 mkey->pd = MLX5_GET(mkc, mkc, pd); in devx_handle_mkey_indirect()
1233 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size); in devx_handle_mkey_indirect()
1235 return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mkey->key), mkey, in devx_handle_mkey_indirect()
1250 return -EINVAL; in devx_handle_mkey_create()
1260 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY; in devx_handle_mkey_create()
1269 struct devx_event_subscription *sub) in devx_cleanup_subscription() argument
1274 if (sub->is_cleaned) in devx_cleanup_subscription()
1277 sub->is_cleaned = 1; in devx_cleanup_subscription()
1278 list_del_rcu(&sub->xa_list); in devx_cleanup_subscription()
1280 if (list_empty(&sub->obj_list)) in devx_cleanup_subscription()
1283 list_del_rcu(&sub->obj_list); in devx_cleanup_subscription()
1285 event = xa_load(&dev->devx_event_table.event_xa, in devx_cleanup_subscription()
1286 sub->xa_key_level1); in devx_cleanup_subscription()
1289 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2); in devx_cleanup_subscription()
1290 if (list_empty(&xa_val_level2->obj_sub_list)) { in devx_cleanup_subscription()
1291 xa_erase(&event->object_ids, in devx_cleanup_subscription()
1292 sub->xa_key_level2); in devx_cleanup_subscription()
1303 struct devx_obj *obj = uobject->object; in devx_obj_cleanup()
1308 dev = mlx5_udata_to_mdev(&attrs->driver_udata); in devx_obj_cleanup()
1309 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { in devx_obj_cleanup()
1315 xa_erase(&obj->ib_dev->odp_mkeys, in devx_obj_cleanup()
1316 mlx5_base_mkey(obj->devx_mr.mmkey.key)); in devx_obj_cleanup()
1317 synchronize_srcu(&dev->odp_srcu); in devx_obj_cleanup()
1320 if (obj->flags & DEVX_OBJ_FLAGS_DCT) in devx_obj_cleanup()
1321 ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); in devx_obj_cleanup()
1322 else if (obj->flags & DEVX_OBJ_FLAGS_CQ) in devx_obj_cleanup()
1323 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); in devx_obj_cleanup()
1325 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, in devx_obj_cleanup()
1326 obj->dinlen, out, sizeof(out)); in devx_obj_cleanup()
1330 devx_event_table = &dev->devx_event_table; in devx_obj_cleanup()
1332 mutex_lock(&devx_event_table->event_xa_lock); in devx_obj_cleanup()
1333 list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list) in devx_obj_cleanup()
1335 mutex_unlock(&devx_event_table->event_xa_lock); in devx_obj_cleanup()
1347 u32 obj_id = mcq->cqn; in devx_cq_comp()
1349 table = &obj->ib_dev->devx_event_table; in devx_cq_comp()
1351 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP); in devx_cq_comp()
1355 obj_event = xa_load(&event->object_ids, obj_id); in devx_cq_comp()
1359 dispatch_event_fd(&obj_event->obj_sub_list, eqe); in devx_cq_comp()
1376 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1377 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1387 return -EINVAL; in UVERBS_HANDLER()
1394 return -EINVAL; in UVERBS_HANDLER()
1402 return -ENOMEM; in UVERBS_HANDLER()
1414 obj->flags |= DEVX_OBJ_FLAGS_DCT; in UVERBS_HANDLER()
1415 err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in, in UVERBS_HANDLER()
1418 obj->flags |= DEVX_OBJ_FLAGS_CQ; in UVERBS_HANDLER()
1419 obj->core_cq.comp = devx_cq_comp; in UVERBS_HANDLER()
1420 err = mlx5_core_create_cq(dev->mdev, &obj->core_cq, in UVERBS_HANDLER()
1424 err = mlx5_cmd_exec(dev->mdev, cmd_in, in UVERBS_HANDLER()
1443 obj->flow_counter_bulk_size = bulk; in UVERBS_HANDLER()
1446 uobj->object = obj; in UVERBS_HANDLER()
1447 INIT_LIST_HEAD(&obj->event_sub); in UVERBS_HANDLER()
1448 obj->ib_dev = dev; in UVERBS_HANDLER()
1449 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, in UVERBS_HANDLER()
1451 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32)); in UVERBS_HANDLER()
1459 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id); in UVERBS_HANDLER()
1461 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { in UVERBS_HANDLER()
1469 if (obj->flags & DEVX_OBJ_FLAGS_DCT) in UVERBS_HANDLER()
1470 mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); in UVERBS_HANDLER()
1471 else if (obj->flags & DEVX_OBJ_FLAGS_CQ) in UVERBS_HANDLER()
1472 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); in UVERBS_HANDLER()
1474 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out, in UVERBS_HANDLER()
1490 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1491 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1497 return -EINVAL; in UVERBS_HANDLER()
1504 return -EINVAL; in UVERBS_HANDLER()
1507 return -EINVAL; in UVERBS_HANDLER()
1516 err = mlx5_cmd_exec(mdev->mdev, cmd_in, in UVERBS_HANDLER()
1535 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1539 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1542 return -EINVAL; in UVERBS_HANDLER()
1549 return -EINVAL; in UVERBS_HANDLER()
1552 return -EINVAL; in UVERBS_HANDLER()
1559 err = mlx5_cmd_exec(mdev->mdev, cmd_in, in UVERBS_HANDLER()
1585 spin_lock_init(&ev_queue->lock); in devx_init_event_queue()
1586 INIT_LIST_HEAD(&ev_queue->event_list); in devx_init_event_queue()
1587 init_waitqueue_head(&ev_queue->poll_wait); in devx_init_event_queue()
1588 atomic_set(&ev_queue->bytes_in_use, 0); in devx_init_event_queue()
1589 ev_queue->is_destroyed = 0; in devx_init_event_queue()
1599 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata); in UVERBS_HANDLER()
1603 devx_init_event_queue(&ev_file->ev_queue); in UVERBS_HANDLER()
1604 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx); in UVERBS_HANDLER()
1615 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1616 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1629 spin_lock_init(&ev_file->lock); in UVERBS_HANDLER()
1630 INIT_LIST_HEAD(&ev_file->event_list); in UVERBS_HANDLER()
1631 init_waitqueue_head(&ev_file->poll_wait); in UVERBS_HANDLER()
1633 ev_file->omit_data = 1; in UVERBS_HANDLER()
1634 INIT_LIST_HEAD(&ev_file->subscribed_events_list); in UVERBS_HANDLER()
1635 ev_file->dev = dev; in UVERBS_HANDLER()
1636 get_device(&dev->ib_dev.dev); in UVERBS_HANDLER()
1644 struct devx_async_cmd_event_file *ev_file = async_data->ev_file; in devx_query_callback()
1645 struct devx_async_event_queue *ev_queue = &ev_file->ev_queue; in devx_query_callback()
1653 spin_lock_irqsave(&ev_queue->lock, flags); in devx_query_callback()
1654 list_add_tail(&async_data->list, &ev_queue->event_list); in devx_query_callback()
1655 spin_unlock_irqrestore(&ev_queue->lock, flags); in devx_query_callback()
1657 wake_up_interruptible(&ev_queue->poll_wait); in devx_query_callback()
1672 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1676 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1681 return -EINVAL; in UVERBS_HANDLER()
1688 return -EINVAL; in UVERBS_HANDLER()
1696 return -EINVAL; in UVERBS_HANDLER()
1706 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) > in UVERBS_HANDLER()
1708 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use); in UVERBS_HANDLER()
1709 return -EAGAIN; in UVERBS_HANDLER()
1715 err = -ENOMEM; in UVERBS_HANDLER()
1719 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs, in UVERBS_HANDLER()
1724 async_data->cmd_out_len = cmd_out_len; in UVERBS_HANDLER()
1725 async_data->mdev = mdev; in UVERBS_HANDLER()
1726 async_data->ev_file = ev_file; in UVERBS_HANDLER()
1729 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in, in UVERBS_HANDLER()
1732 async_data->hdr.out_data, in UVERBS_HANDLER()
1733 async_data->cmd_out_len, in UVERBS_HANDLER()
1734 devx_query_callback, &async_data->cb_work); in UVERBS_HANDLER()
1744 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use); in UVERBS_HANDLER()
1761 event = xa_load(&devx_event_table->event_xa, key_level1); in subscribe_event_xa_dealloc()
1764 xa_val_level2 = xa_load(&event->object_ids, in subscribe_event_xa_dealloc()
1766 if (list_empty(&xa_val_level2->obj_sub_list)) { in subscribe_event_xa_dealloc()
1767 xa_erase(&event->object_ids, in subscribe_event_xa_dealloc()
1783 event = xa_load(&devx_event_table->event_xa, key_level1); in subscribe_event_xa_alloc()
1787 return -ENOMEM; in subscribe_event_xa_alloc()
1789 INIT_LIST_HEAD(&event->unaffiliated_list); in subscribe_event_xa_alloc()
1790 xa_init(&event->object_ids); in subscribe_event_xa_alloc()
1792 err = xa_insert(&devx_event_table->event_xa, in subscribe_event_xa_alloc()
1805 obj_event = xa_load(&event->object_ids, key_level2); in subscribe_event_xa_alloc()
1810 return -ENOMEM; in subscribe_event_xa_alloc()
1812 err = xa_insert(&event->object_ids, in subscribe_event_xa_alloc()
1820 INIT_LIST_HEAD(&obj_event->obj_sub_list); in subscribe_event_xa_alloc()
1900 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1901 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1905 struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table; in UVERBS_HANDLER()
1919 if (!c->devx_uid) in UVERBS_HANDLER()
1920 return -EINVAL; in UVERBS_HANDLER()
1923 obj = (struct devx_obj *)devx_uobj->object; in UVERBS_HANDLER()
1925 obj_id = get_dec_obj_id(obj->obj_id); in UVERBS_HANDLER()
1949 return -EINVAL; in UVERBS_HANDLER()
1965 return -EINVAL; in UVERBS_HANDLER()
1970 if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj)) in UVERBS_HANDLER()
1971 return -EINVAL; in UVERBS_HANDLER()
1978 mutex_lock(&devx_event_table->event_xa_lock); in UVERBS_HANDLER()
1997 err = -ENOMEM; in UVERBS_HANDLER()
2001 list_add_tail(&event_sub->event_list, &sub_list); in UVERBS_HANDLER()
2002 uverbs_uobject_get(&ev_file->uobj); in UVERBS_HANDLER()
2004 event_sub->eventfd = in UVERBS_HANDLER()
2007 if (IS_ERR(event_sub->eventfd)) { in UVERBS_HANDLER()
2008 err = PTR_ERR(event_sub->eventfd); in UVERBS_HANDLER()
2009 event_sub->eventfd = NULL; in UVERBS_HANDLER()
2014 event_sub->cookie = cookie; in UVERBS_HANDLER()
2015 event_sub->ev_file = ev_file; in UVERBS_HANDLER()
2017 event_sub->xa_key_level1 = key_level1; in UVERBS_HANDLER()
2018 event_sub->xa_key_level2 = obj_id; in UVERBS_HANDLER()
2019 INIT_LIST_HEAD(&event_sub->obj_list); in UVERBS_HANDLER()
2030 list_del_init(&event_sub->event_list); in UVERBS_HANDLER()
2032 spin_lock_irq(&ev_file->lock); in UVERBS_HANDLER()
2033 list_add_tail_rcu(&event_sub->file_list, in UVERBS_HANDLER()
2034 &ev_file->subscribed_events_list); in UVERBS_HANDLER()
2035 spin_unlock_irq(&ev_file->lock); in UVERBS_HANDLER()
2037 event = xa_load(&devx_event_table->event_xa, in UVERBS_HANDLER()
2038 event_sub->xa_key_level1); in UVERBS_HANDLER()
2042 list_add_tail_rcu(&event_sub->xa_list, in UVERBS_HANDLER()
2043 &event->unaffiliated_list); in UVERBS_HANDLER()
2047 obj_event = xa_load(&event->object_ids, obj_id); in UVERBS_HANDLER()
2049 list_add_tail_rcu(&event_sub->xa_list, in UVERBS_HANDLER()
2050 &obj_event->obj_sub_list); in UVERBS_HANDLER()
2051 list_add_tail_rcu(&event_sub->obj_list, in UVERBS_HANDLER()
2052 &obj->event_sub); in UVERBS_HANDLER()
2055 mutex_unlock(&devx_event_table->event_xa_lock); in UVERBS_HANDLER()
2060 list_del(&event_sub->event_list); in UVERBS_HANDLER()
2063 event_sub->xa_key_level1, in UVERBS_HANDLER()
2067 if (event_sub->eventfd) in UVERBS_HANDLER()
2068 eventfd_ctx_put(event_sub->eventfd); in UVERBS_HANDLER()
2069 uverbs_uobject_put(&event_sub->ev_file->uobj); in UVERBS_HANDLER()
2073 mutex_unlock(&devx_event_table->event_xa_lock); in UVERBS_HANDLER()
2090 return -EFAULT; in devx_umem_get()
2104 obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access); in devx_umem_get()
2105 if (IS_ERR(obj->umem)) in devx_umem_get()
2106 return PTR_ERR(obj->umem); in devx_umem_get()
2108 mlx5_ib_cont_pages(obj->umem, obj->umem->address, in devx_umem_get()
2110 &obj->page_shift, &obj->ncont, NULL); in devx_umem_get()
2113 ib_umem_release(obj->umem); in devx_umem_get()
2114 return -EINVAL; in devx_umem_get()
2117 page_mask = (1 << obj->page_shift) - 1; in devx_umem_get()
2118 obj->page_offset = obj->umem->address & page_mask; in devx_umem_get()
2127 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) + in devx_umem_reg_cmd_alloc()
2128 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont); in devx_umem_reg_cmd_alloc()
2129 cmd->in = uverbs_zalloc(attrs, cmd->inlen); in devx_umem_reg_cmd_alloc()
2130 return PTR_ERR_OR_ZERO(cmd->in); in devx_umem_reg_cmd_alloc()
2140 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem); in devx_umem_reg_cmd_build()
2143 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM); in devx_umem_reg_cmd_build()
2144 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont); in devx_umem_reg_cmd_build()
2145 MLX5_SET(umem, umem, log_page_size, obj->page_shift - in devx_umem_reg_cmd_build()
2147 MLX5_SET(umem, umem, page_offset, obj->page_offset); in devx_umem_reg_cmd_build()
2148 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt, in devx_umem_reg_cmd_build()
2149 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) | in devx_umem_reg_cmd_build()
2162 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
2163 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
2166 if (!c->devx_uid) in UVERBS_HANDLER()
2167 return -EINVAL; in UVERBS_HANDLER()
2171 return -ENOMEM; in UVERBS_HANDLER()
2173 err = devx_umem_get(dev, &c->ibucontext, attrs, obj); in UVERBS_HANDLER()
2183 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid); in UVERBS_HANDLER()
2184 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out, in UVERBS_HANDLER()
2189 obj->mdev = dev->mdev; in UVERBS_HANDLER()
2190 uobj->object = obj; in UVERBS_HANDLER()
2191 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id); in UVERBS_HANDLER()
2199 ib_umem_release(obj->umem); in UVERBS_HANDLER()
2209 struct devx_umem *obj = uobject->object; in devx_umem_cleanup()
2213 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); in devx_umem_cleanup()
2217 ib_umem_release(obj->umem); in devx_umem_cleanup()
2261 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; in devx_get_obj_id_from_event()
2264 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff; in devx_get_obj_id_from_event()
2268 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; in devx_get_obj_id_from_event()
2271 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; in devx_get_obj_id_from_event()
2274 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id); in devx_get_obj_id_from_event()
2288 ev_file = event_sub->ev_file; in deliver_event()
2290 if (ev_file->omit_data) { in deliver_event()
2291 spin_lock_irqsave(&ev_file->lock, flags); in deliver_event()
2292 if (!list_empty(&event_sub->event_list) || in deliver_event()
2293 ev_file->is_destroyed) { in deliver_event()
2294 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2298 list_add_tail(&event_sub->event_list, &ev_file->event_list); in deliver_event()
2299 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2300 wake_up_interruptible(&ev_file->poll_wait); in deliver_event()
2307 spin_lock_irqsave(&ev_file->lock, flags); in deliver_event()
2308 ev_file->is_overflow_err = 1; in deliver_event()
2309 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2310 return -ENOMEM; in deliver_event()
2313 event_data->hdr.cookie = event_sub->cookie; in deliver_event()
2314 memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe)); in deliver_event()
2316 spin_lock_irqsave(&ev_file->lock, flags); in deliver_event()
2317 if (!ev_file->is_destroyed) in deliver_event()
2318 list_add_tail(&event_data->list, &ev_file->event_list); in deliver_event()
2321 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2322 wake_up_interruptible(&ev_file->poll_wait); in deliver_event()
2333 if (item->eventfd) in dispatch_event_fd()
2334 eventfd_signal(item->eventfd, 1); in dispatch_event_fd()
2358 is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type); in devx_event_notifier()
2364 event = xa_load(&table->event_xa, event_type | (obj_type << 16)); in devx_event_notifier()
2371 dispatch_event_fd(&event->unaffiliated_list, data); in devx_event_notifier()
2377 obj_event = xa_load(&event->object_ids, obj_id); in devx_event_notifier()
2383 dispatch_event_fd(&obj_event->obj_sub_list, data); in devx_event_notifier()
2391 struct mlx5_devx_event_table *table = &dev->devx_event_table; in mlx5_ib_devx_init()
2396 dev->devx_whitelist_uid = uid; in mlx5_ib_devx_init()
2397 xa_init(&table->event_xa); in mlx5_ib_devx_init()
2398 mutex_init(&table->event_xa_lock); in mlx5_ib_devx_init()
2399 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY); in mlx5_ib_devx_init()
2400 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb); in mlx5_ib_devx_init()
2408 struct mlx5_devx_event_table *table = &dev->devx_event_table; in mlx5_ib_devx_cleanup()
2409 struct devx_event_subscription *sub, *tmp; in mlx5_ib_devx_cleanup() local
2414 if (dev->devx_whitelist_uid) { in mlx5_ib_devx_cleanup()
2415 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb); in mlx5_ib_devx_cleanup()
2416 mutex_lock(&dev->devx_event_table.event_xa_lock); in mlx5_ib_devx_cleanup()
2417 xa_for_each(&table->event_xa, id, entry) { in mlx5_ib_devx_cleanup()
2420 sub, tmp, &event->unaffiliated_list, xa_list) in mlx5_ib_devx_cleanup()
2421 devx_cleanup_subscription(dev, sub); in mlx5_ib_devx_cleanup()
2424 mutex_unlock(&dev->devx_event_table.event_xa_lock); in mlx5_ib_devx_cleanup()
2425 xa_destroy(&table->event_xa); in mlx5_ib_devx_cleanup()
2427 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid); in mlx5_ib_devx_cleanup()
2434 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data; in devx_async_cmd_event_read()
2435 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; in devx_async_cmd_event_read()
2440 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2442 while (list_empty(&ev_queue->event_list)) { in devx_async_cmd_event_read()
2443 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2445 if (filp->f_flags & O_NONBLOCK) in devx_async_cmd_event_read()
2446 return -EAGAIN; in devx_async_cmd_event_read()
2449 ev_queue->poll_wait, in devx_async_cmd_event_read()
2450 (!list_empty(&ev_queue->event_list) || in devx_async_cmd_event_read()
2451 ev_queue->is_destroyed))) { in devx_async_cmd_event_read()
2452 return -ERESTARTSYS; in devx_async_cmd_event_read()
2455 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2456 if (ev_queue->is_destroyed) { in devx_async_cmd_event_read()
2457 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2458 return -EIO; in devx_async_cmd_event_read()
2462 event = list_entry(ev_queue->event_list.next, in devx_async_cmd_event_read()
2464 eventsz = event->cmd_out_len + in devx_async_cmd_event_read()
2468 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2469 return -ENOSPC; in devx_async_cmd_event_read()
2472 list_del(ev_queue->event_list.next); in devx_async_cmd_event_read()
2473 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2475 if (copy_to_user(buf, &event->hdr, eventsz)) in devx_async_cmd_event_read()
2476 ret = -EFAULT; in devx_async_cmd_event_read()
2480 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use); in devx_async_cmd_event_read()
2488 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data; in devx_async_cmd_event_poll()
2489 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; in devx_async_cmd_event_poll()
2492 poll_wait(filp, &ev_queue->poll_wait, wait); in devx_async_cmd_event_poll()
2494 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_poll()
2495 if (ev_queue->is_destroyed) in devx_async_cmd_event_poll()
2497 else if (!list_empty(&ev_queue->event_list)) in devx_async_cmd_event_poll()
2499 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_poll()
2515 struct devx_async_event_file *ev_file = filp->private_data; in devx_async_event_read()
2523 omit_data = ev_file->omit_data; in devx_async_event_read()
2525 spin_lock_irq(&ev_file->lock); in devx_async_event_read()
2527 if (ev_file->is_overflow_err) { in devx_async_event_read()
2528 ev_file->is_overflow_err = 0; in devx_async_event_read()
2529 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2530 return -EOVERFLOW; in devx_async_event_read()
2534 while (list_empty(&ev_file->event_list)) { in devx_async_event_read()
2535 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2537 if (filp->f_flags & O_NONBLOCK) in devx_async_event_read()
2538 return -EAGAIN; in devx_async_event_read()
2540 if (wait_event_interruptible(ev_file->poll_wait, in devx_async_event_read()
2541 (!list_empty(&ev_file->event_list) || in devx_async_event_read()
2542 ev_file->is_destroyed))) { in devx_async_event_read()
2543 return -ERESTARTSYS; in devx_async_event_read()
2546 spin_lock_irq(&ev_file->lock); in devx_async_event_read()
2547 if (ev_file->is_destroyed) { in devx_async_event_read()
2548 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2549 return -EIO; in devx_async_event_read()
2554 event_sub = list_first_entry(&ev_file->event_list, in devx_async_event_read()
2557 eventsz = sizeof(event_sub->cookie); in devx_async_event_read()
2558 event_data = &event_sub->cookie; in devx_async_event_read()
2560 event = list_first_entry(&ev_file->event_list, in devx_async_event_read()
2564 event_data = &event->hdr; in devx_async_event_read()
2568 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2569 return -EINVAL; in devx_async_event_read()
2573 list_del_init(&event_sub->event_list); in devx_async_event_read()
2575 list_del(&event->list); in devx_async_event_read()
2577 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2581 ret = -EFAULT; in devx_async_event_read()
2593 struct devx_async_event_file *ev_file = filp->private_data; in devx_async_event_poll()
2596 poll_wait(filp, &ev_file->poll_wait, wait); in devx_async_event_poll()
2598 spin_lock_irq(&ev_file->lock); in devx_async_event_poll()
2599 if (ev_file->is_destroyed) in devx_async_event_poll()
2601 else if (!list_empty(&ev_file->event_list)) in devx_async_event_poll()
2603 spin_unlock_irq(&ev_file->lock); in devx_async_event_poll()
2613 if (event_sub->eventfd) in devx_free_subscription()
2614 eventfd_ctx_put(event_sub->eventfd); in devx_free_subscription()
2615 uverbs_uobject_put(&event_sub->ev_file->uobj); in devx_free_subscription()
2633 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; in devx_async_cmd_event_destroy_uobj()
2636 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_destroy_uobj()
2637 ev_queue->is_destroyed = 1; in devx_async_cmd_event_destroy_uobj()
2638 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_destroy_uobj()
2639 wake_up_interruptible(&ev_queue->poll_wait); in devx_async_cmd_event_destroy_uobj()
2641 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx); in devx_async_cmd_event_destroy_uobj()
2643 spin_lock_irq(&comp_ev_file->ev_queue.lock); in devx_async_cmd_event_destroy_uobj()
2645 &comp_ev_file->ev_queue.event_list, list) { in devx_async_cmd_event_destroy_uobj()
2646 list_del(&entry->list); in devx_async_cmd_event_destroy_uobj()
2649 spin_unlock_irq(&comp_ev_file->ev_queue.lock); in devx_async_cmd_event_destroy_uobj()
2660 struct mlx5_ib_dev *dev = ev_file->dev; in devx_async_event_destroy_uobj()
2662 spin_lock_irq(&ev_file->lock); in devx_async_event_destroy_uobj()
2663 ev_file->is_destroyed = 1; in devx_async_event_destroy_uobj()
2666 if (ev_file->omit_data) { in devx_async_event_destroy_uobj()
2669 list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list, in devx_async_event_destroy_uobj()
2671 list_del_init(&event_sub->event_list); in devx_async_event_destroy_uobj()
2676 list_for_each_entry_safe(entry, tmp, &ev_file->event_list, in devx_async_event_destroy_uobj()
2678 list_del(&entry->list); in devx_async_event_destroy_uobj()
2683 spin_unlock_irq(&ev_file->lock); in devx_async_event_destroy_uobj()
2684 wake_up_interruptible(&ev_file->poll_wait); in devx_async_event_destroy_uobj()
2686 mutex_lock(&dev->devx_event_table.event_xa_lock); in devx_async_event_destroy_uobj()
2689 &ev_file->subscribed_events_list, file_list) { in devx_async_event_destroy_uobj()
2691 list_del_rcu(&event_sub->file_list); in devx_async_event_destroy_uobj()
2693 call_rcu(&event_sub->rcu, devx_free_subscription); in devx_async_event_destroy_uobj()
2695 mutex_unlock(&dev->devx_event_table.event_xa_lock); in devx_async_event_destroy_uobj()
2697 put_device(&dev->ib_dev.dev); in devx_async_event_destroy_uobj()
2910 return MLX5_CAP_GEN(dev->mdev, log_max_uctx); in devx_is_supported()