1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
4 */
5
6 #include <rdma/uverbs_std_types.h>
7 #include "rdma_core.h"
8 #include "uverbs.h"
9 #include "core_priv.h"
10
uverbs_free_qp(struct ib_uobject * uobject,enum rdma_remove_reason why,struct uverbs_attr_bundle * attrs)11 static int uverbs_free_qp(struct ib_uobject *uobject,
12 enum rdma_remove_reason why,
13 struct uverbs_attr_bundle *attrs)
14 {
15 struct ib_qp *qp = uobject->object;
16 struct ib_uqp_object *uqp =
17 container_of(uobject, struct ib_uqp_object, uevent.uobject);
18 int ret;
19
20 /*
21 * If this is a user triggered destroy then do not allow destruction
22 * until the user cleans up all the mcast bindings. Unlike in other
23 * places we forcibly clean up the mcast attachments for !DESTROY
24 * because the mcast attaches are not ubojects and will not be
25 * destroyed by anything else during cleanup processing.
26 */
27 if (why == RDMA_REMOVE_DESTROY) {
28 if (!list_empty(&uqp->mcast_list))
29 return -EBUSY;
30 } else if (qp == qp->real_qp) {
31 ib_uverbs_detach_umcast(qp, uqp);
32 }
33
34 ret = ib_destroy_qp_user(qp, &attrs->driver_udata);
35 if (ib_is_destroy_retryable(ret, why, uobject))
36 return ret;
37
38 if (uqp->uxrcd)
39 atomic_dec(&uqp->uxrcd->refcnt);
40
41 ib_uverbs_release_uevent(&uqp->uevent);
42 return ret;
43 }
44
check_creation_flags(enum ib_qp_type qp_type,u32 create_flags)45 static int check_creation_flags(enum ib_qp_type qp_type,
46 u32 create_flags)
47 {
48 create_flags &= ~IB_UVERBS_QP_CREATE_SQ_SIG_ALL;
49
50 if (!create_flags || qp_type == IB_QPT_DRIVER)
51 return 0;
52
53 if (qp_type != IB_QPT_RAW_PACKET && qp_type != IB_QPT_UD)
54 return -EINVAL;
55
56 if ((create_flags & IB_UVERBS_QP_CREATE_SCATTER_FCS ||
57 create_flags & IB_UVERBS_QP_CREATE_CVLAN_STRIPPING) &&
58 qp_type != IB_QPT_RAW_PACKET)
59 return -EINVAL;
60
61 return 0;
62 }
63
set_caps(struct ib_qp_init_attr * attr,struct ib_uverbs_qp_cap * cap,bool req)64 static void set_caps(struct ib_qp_init_attr *attr,
65 struct ib_uverbs_qp_cap *cap, bool req)
66 {
67 if (req) {
68 attr->cap.max_send_wr = cap->max_send_wr;
69 attr->cap.max_recv_wr = cap->max_recv_wr;
70 attr->cap.max_send_sge = cap->max_send_sge;
71 attr->cap.max_recv_sge = cap->max_recv_sge;
72 attr->cap.max_inline_data = cap->max_inline_data;
73 } else {
74 cap->max_send_wr = attr->cap.max_send_wr;
75 cap->max_recv_wr = attr->cap.max_recv_wr;
76 cap->max_send_sge = attr->cap.max_send_sge;
77 cap->max_recv_sge = attr->cap.max_recv_sge;
78 cap->max_inline_data = attr->cap.max_inline_data;
79 }
80 }
81
UVERBS_HANDLER(UVERBS_METHOD_QP_CREATE)82 static int UVERBS_HANDLER(UVERBS_METHOD_QP_CREATE)(
83 struct uverbs_attr_bundle *attrs)
84 {
85 struct ib_uqp_object *obj = container_of(
86 uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_QP_HANDLE),
87 typeof(*obj), uevent.uobject);
88 struct ib_qp_init_attr attr = {};
89 struct ib_uverbs_qp_cap cap = {};
90 struct ib_rwq_ind_table *rwq_ind_tbl = NULL;
91 struct ib_qp *qp;
92 struct ib_pd *pd = NULL;
93 struct ib_srq *srq = NULL;
94 struct ib_cq *recv_cq = NULL;
95 struct ib_cq *send_cq = NULL;
96 struct ib_xrcd *xrcd = NULL;
97 struct ib_uobject *xrcd_uobj = NULL;
98 struct ib_device *device;
99 u64 user_handle;
100 int ret;
101
102 ret = uverbs_copy_from_or_zero(&cap, attrs,
103 UVERBS_ATTR_CREATE_QP_CAP);
104 if (!ret)
105 ret = uverbs_copy_from(&user_handle, attrs,
106 UVERBS_ATTR_CREATE_QP_USER_HANDLE);
107 if (!ret)
108 ret = uverbs_get_const(&attr.qp_type, attrs,
109 UVERBS_ATTR_CREATE_QP_TYPE);
110 if (ret)
111 return ret;
112
113 switch (attr.qp_type) {
114 case IB_QPT_XRC_TGT:
115 if (uverbs_attr_is_valid(attrs,
116 UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE) ||
117 uverbs_attr_is_valid(attrs,
118 UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE) ||
119 uverbs_attr_is_valid(attrs,
120 UVERBS_ATTR_CREATE_QP_PD_HANDLE) ||
121 uverbs_attr_is_valid(attrs,
122 UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE))
123 return -EINVAL;
124
125 xrcd_uobj = uverbs_attr_get_uobject(attrs,
126 UVERBS_ATTR_CREATE_QP_XRCD_HANDLE);
127 if (IS_ERR(xrcd_uobj))
128 return PTR_ERR(xrcd_uobj);
129
130 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
131 if (!xrcd)
132 return -EINVAL;
133 device = xrcd->device;
134 break;
135 case IB_UVERBS_QPT_RAW_PACKET:
136 if (!capable(CAP_NET_RAW))
137 return -EPERM;
138 fallthrough;
139 case IB_UVERBS_QPT_RC:
140 case IB_UVERBS_QPT_UC:
141 case IB_UVERBS_QPT_UD:
142 case IB_UVERBS_QPT_XRC_INI:
143 case IB_UVERBS_QPT_DRIVER:
144 if (uverbs_attr_is_valid(attrs,
145 UVERBS_ATTR_CREATE_QP_XRCD_HANDLE) ||
146 (uverbs_attr_is_valid(attrs,
147 UVERBS_ATTR_CREATE_QP_SRQ_HANDLE) &&
148 attr.qp_type == IB_QPT_XRC_INI))
149 return -EINVAL;
150
151 pd = uverbs_attr_get_obj(attrs,
152 UVERBS_ATTR_CREATE_QP_PD_HANDLE);
153 if (IS_ERR(pd))
154 return PTR_ERR(pd);
155
156 rwq_ind_tbl = uverbs_attr_get_obj(attrs,
157 UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE);
158 if (!IS_ERR(rwq_ind_tbl)) {
159 if (cap.max_recv_wr || cap.max_recv_sge ||
160 uverbs_attr_is_valid(attrs,
161 UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE) ||
162 uverbs_attr_is_valid(attrs,
163 UVERBS_ATTR_CREATE_QP_SRQ_HANDLE))
164 return -EINVAL;
165
166 /* send_cq is optinal */
167 if (cap.max_send_wr) {
168 send_cq = uverbs_attr_get_obj(attrs,
169 UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE);
170 if (IS_ERR(send_cq))
171 return PTR_ERR(send_cq);
172 }
173 attr.rwq_ind_tbl = rwq_ind_tbl;
174 } else {
175 send_cq = uverbs_attr_get_obj(attrs,
176 UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE);
177 if (IS_ERR(send_cq))
178 return PTR_ERR(send_cq);
179
180 if (attr.qp_type != IB_QPT_XRC_INI) {
181 recv_cq = uverbs_attr_get_obj(attrs,
182 UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE);
183 if (IS_ERR(recv_cq))
184 return PTR_ERR(recv_cq);
185 }
186 }
187
188 device = pd->device;
189 break;
190 default:
191 return -EINVAL;
192 }
193
194 ret = uverbs_get_flags32(&attr.create_flags, attrs,
195 UVERBS_ATTR_CREATE_QP_FLAGS,
196 IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
197 IB_UVERBS_QP_CREATE_SCATTER_FCS |
198 IB_UVERBS_QP_CREATE_CVLAN_STRIPPING |
199 IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING |
200 IB_UVERBS_QP_CREATE_SQ_SIG_ALL);
201 if (ret)
202 return ret;
203
204 ret = check_creation_flags(attr.qp_type, attr.create_flags);
205 if (ret)
206 return ret;
207
208 if (uverbs_attr_is_valid(attrs,
209 UVERBS_ATTR_CREATE_QP_SOURCE_QPN)) {
210 ret = uverbs_copy_from(&attr.source_qpn, attrs,
211 UVERBS_ATTR_CREATE_QP_SOURCE_QPN);
212 if (ret)
213 return ret;
214 attr.create_flags |= IB_QP_CREATE_SOURCE_QPN;
215 }
216
217 srq = uverbs_attr_get_obj(attrs,
218 UVERBS_ATTR_CREATE_QP_SRQ_HANDLE);
219 if (!IS_ERR(srq)) {
220 if ((srq->srq_type == IB_SRQT_XRC &&
221 attr.qp_type != IB_QPT_XRC_TGT) ||
222 (srq->srq_type != IB_SRQT_XRC &&
223 attr.qp_type == IB_QPT_XRC_TGT))
224 return -EINVAL;
225 attr.srq = srq;
226 }
227
228 obj->uevent.event_file = ib_uverbs_get_async_event(attrs,
229 UVERBS_ATTR_CREATE_QP_EVENT_FD);
230 INIT_LIST_HEAD(&obj->uevent.event_list);
231 INIT_LIST_HEAD(&obj->mcast_list);
232 obj->uevent.uobject.user_handle = user_handle;
233 attr.event_handler = ib_uverbs_qp_event_handler;
234 attr.send_cq = send_cq;
235 attr.recv_cq = recv_cq;
236 attr.xrcd = xrcd;
237 if (attr.create_flags & IB_UVERBS_QP_CREATE_SQ_SIG_ALL) {
238 /* This creation bit is uverbs one, need to mask before
239 * calling drivers. It was added to prevent an extra user attr
240 * only for that when using ioctl.
241 */
242 attr.create_flags &= ~IB_UVERBS_QP_CREATE_SQ_SIG_ALL;
243 attr.sq_sig_type = IB_SIGNAL_ALL_WR;
244 } else {
245 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
246 }
247
248 set_caps(&attr, &cap, true);
249 mutex_init(&obj->mcast_lock);
250
251 if (attr.qp_type == IB_QPT_XRC_TGT)
252 qp = ib_create_qp(pd, &attr);
253 else
254 qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
255 obj);
256
257 if (IS_ERR(qp)) {
258 ret = PTR_ERR(qp);
259 goto err_put;
260 }
261
262 if (attr.qp_type != IB_QPT_XRC_TGT) {
263 atomic_inc(&pd->usecnt);
264 if (attr.send_cq)
265 atomic_inc(&attr.send_cq->usecnt);
266 if (attr.recv_cq)
267 atomic_inc(&attr.recv_cq->usecnt);
268 if (attr.srq)
269 atomic_inc(&attr.srq->usecnt);
270 if (attr.rwq_ind_tbl)
271 atomic_inc(&attr.rwq_ind_tbl->usecnt);
272 } else {
273 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
274 uobject);
275 atomic_inc(&obj->uxrcd->refcnt);
276 /* It is done in _ib_create_qp for other QP types */
277 qp->uobject = obj;
278 }
279
280 obj->uevent.uobject.object = qp;
281 uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_QP_HANDLE);
282
283 if (attr.qp_type != IB_QPT_XRC_TGT) {
284 ret = ib_create_qp_security(qp, device);
285 if (ret)
286 return ret;
287 }
288
289 set_caps(&attr, &cap, false);
290 ret = uverbs_copy_to_struct_or_zero(attrs,
291 UVERBS_ATTR_CREATE_QP_RESP_CAP, &cap,
292 sizeof(cap));
293 if (ret)
294 return ret;
295
296 ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_QP_RESP_QP_NUM,
297 &qp->qp_num,
298 sizeof(qp->qp_num));
299
300 return ret;
301 err_put:
302 if (obj->uevent.event_file)
303 uverbs_uobject_put(&obj->uevent.event_file->uobj);
304 return ret;
305 };
306
307 DECLARE_UVERBS_NAMED_METHOD(
308 UVERBS_METHOD_QP_CREATE,
309 UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_HANDLE,
310 UVERBS_OBJECT_QP,
311 UVERBS_ACCESS_NEW,
312 UA_MANDATORY),
313 UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_XRCD_HANDLE,
314 UVERBS_OBJECT_XRCD,
315 UVERBS_ACCESS_READ,
316 UA_OPTIONAL),
317 UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_PD_HANDLE,
318 UVERBS_OBJECT_PD,
319 UVERBS_ACCESS_READ,
320 UA_OPTIONAL),
321 UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_SRQ_HANDLE,
322 UVERBS_OBJECT_SRQ,
323 UVERBS_ACCESS_READ,
324 UA_OPTIONAL),
325 UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE,
326 UVERBS_OBJECT_CQ,
327 UVERBS_ACCESS_READ,
328 UA_OPTIONAL),
329 UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE,
330 UVERBS_OBJECT_CQ,
331 UVERBS_ACCESS_READ,
332 UA_OPTIONAL),
333 UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE,
334 UVERBS_OBJECT_RWQ_IND_TBL,
335 UVERBS_ACCESS_READ,
336 UA_OPTIONAL),
337 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_USER_HANDLE,
338 UVERBS_ATTR_TYPE(u64),
339 UA_MANDATORY),
340 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_CAP,
341 UVERBS_ATTR_STRUCT(struct ib_uverbs_qp_cap,
342 max_inline_data),
343 UA_MANDATORY),
344 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_QP_TYPE,
345 enum ib_uverbs_qp_type,
346 UA_MANDATORY),
347 UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_QP_FLAGS,
348 enum ib_uverbs_qp_create_flags,
349 UA_OPTIONAL),
350 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_SOURCE_QPN,
351 UVERBS_ATTR_TYPE(u32),
352 UA_OPTIONAL),
353 UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_QP_EVENT_FD,
354 UVERBS_OBJECT_ASYNC_EVENT,
355 UVERBS_ACCESS_READ,
356 UA_OPTIONAL),
357 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_QP_RESP_CAP,
358 UVERBS_ATTR_STRUCT(struct ib_uverbs_qp_cap,
359 max_inline_data),
360 UA_MANDATORY),
361 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_QP_RESP_QP_NUM,
362 UVERBS_ATTR_TYPE(u32),
363 UA_MANDATORY),
364 UVERBS_ATTR_UHW());
365
UVERBS_HANDLER(UVERBS_METHOD_QP_DESTROY)366 static int UVERBS_HANDLER(UVERBS_METHOD_QP_DESTROY)(
367 struct uverbs_attr_bundle *attrs)
368 {
369 struct ib_uobject *uobj =
370 uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_QP_HANDLE);
371 struct ib_uqp_object *obj =
372 container_of(uobj, struct ib_uqp_object, uevent.uobject);
373 struct ib_uverbs_destroy_qp_resp resp = {
374 .events_reported = obj->uevent.events_reported
375 };
376
377 return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_QP_RESP, &resp,
378 sizeof(resp));
379 }
380
381 DECLARE_UVERBS_NAMED_METHOD(
382 UVERBS_METHOD_QP_DESTROY,
383 UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_QP_HANDLE,
384 UVERBS_OBJECT_QP,
385 UVERBS_ACCESS_DESTROY,
386 UA_MANDATORY),
387 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_QP_RESP,
388 UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_qp_resp),
389 UA_MANDATORY));
390
391 DECLARE_UVERBS_NAMED_OBJECT(
392 UVERBS_OBJECT_QP,
393 UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp),
394 &UVERBS_METHOD(UVERBS_METHOD_QP_CREATE),
395 &UVERBS_METHOD(UVERBS_METHOD_QP_DESTROY));
396
397 const struct uapi_definition uverbs_def_obj_qp[] = {
398 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_QP,
399 UAPI_DEF_OBJ_NEEDS_FN(destroy_qp)),
400 {}
401 };
402