1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6 #include <linux/module.h>
7 #include <linux/mlx5/qp.h>
8 #include <linux/slab.h>
9 #include <rdma/ib_umem.h>
10 #include <rdma/ib_user_verbs.h>
11 #include "mlx5_ib.h"
12 #include "srq.h"
13
get_wqe(struct mlx5_ib_srq * srq,int n)14 static void *get_wqe(struct mlx5_ib_srq *srq, int n)
15 {
16 return mlx5_frag_buf_get_wqe(&srq->fbc, n);
17 }
18
mlx5_ib_srq_event(struct mlx5_core_srq * srq,enum mlx5_event type)19 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
20 {
21 struct ib_event event;
22 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
23
24 if (ibsrq->event_handler) {
25 event.device = ibsrq->device;
26 event.element.srq = ibsrq;
27 switch (type) {
28 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
29 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
30 break;
31 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
32 event.event = IB_EVENT_SRQ_ERR;
33 break;
34 default:
35 pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
36 type, srq->srqn);
37 return;
38 }
39
40 ibsrq->event_handler(&event, ibsrq->srq_context);
41 }
42 }
43
create_srq_user(struct ib_pd * pd,struct mlx5_ib_srq * srq,struct mlx5_srq_attr * in,struct ib_udata * udata,int buf_size)44 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
45 struct mlx5_srq_attr *in,
46 struct ib_udata *udata, int buf_size)
47 {
48 struct mlx5_ib_dev *dev = to_mdev(pd->device);
49 struct mlx5_ib_create_srq ucmd = {};
50 struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
51 udata, struct mlx5_ib_ucontext, ibucontext);
52 size_t ucmdlen;
53 int err;
54 u32 uidx = MLX5_IB_DEFAULT_UIDX;
55
56 ucmdlen = min(udata->inlen, sizeof(ucmd));
57
58 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
59 mlx5_ib_dbg(dev, "failed copy udata\n");
60 return -EFAULT;
61 }
62
63 if (ucmd.reserved0 || ucmd.reserved1)
64 return -EINVAL;
65
66 if (udata->inlen > sizeof(ucmd) &&
67 !ib_is_udata_cleared(udata, sizeof(ucmd),
68 udata->inlen - sizeof(ucmd)))
69 return -EINVAL;
70
71 if (in->type != IB_SRQT_BASIC) {
72 err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx);
73 if (err)
74 return err;
75 }
76
77 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
78
79 srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0);
80 if (IS_ERR(srq->umem)) {
81 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
82 err = PTR_ERR(srq->umem);
83 return err;
84 }
85 in->umem = srq->umem;
86
87 err = mlx5_ib_db_map_user(ucontext, ucmd.db_addr, &srq->db);
88 if (err) {
89 mlx5_ib_dbg(dev, "map doorbell failed\n");
90 goto err_umem;
91 }
92
93 in->uid = (in->type != IB_SRQT_XRC) ? to_mpd(pd)->uid : 0;
94 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
95 in->type != IB_SRQT_BASIC)
96 in->user_index = uidx;
97
98 return 0;
99
100 err_umem:
101 ib_umem_release(srq->umem);
102
103 return err;
104 }
105
create_srq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_srq * srq,struct mlx5_srq_attr * in,int buf_size)106 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
107 struct mlx5_srq_attr *in, int buf_size)
108 {
109 int err;
110 int i;
111 struct mlx5_wqe_srq_next_seg *next;
112
113 err = mlx5_db_alloc(dev->mdev, &srq->db);
114 if (err) {
115 mlx5_ib_warn(dev, "alloc dbell rec failed\n");
116 return err;
117 }
118
119 if (mlx5_frag_buf_alloc_node(dev->mdev, buf_size, &srq->buf,
120 dev->mdev->priv.numa_node)) {
121 mlx5_ib_dbg(dev, "buf alloc failed\n");
122 err = -ENOMEM;
123 goto err_db;
124 }
125
126 mlx5_init_fbc(srq->buf.frags, srq->msrq.wqe_shift, ilog2(srq->msrq.max),
127 &srq->fbc);
128
129 srq->head = 0;
130 srq->tail = srq->msrq.max - 1;
131 srq->wqe_ctr = 0;
132
133 for (i = 0; i < srq->msrq.max; i++) {
134 next = get_wqe(srq, i);
135 next->next_wqe_index =
136 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
137 }
138
139 mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
140 in->pas = kvcalloc(srq->buf.npages, sizeof(*in->pas), GFP_KERNEL);
141 if (!in->pas) {
142 err = -ENOMEM;
143 goto err_buf;
144 }
145 mlx5_fill_page_frag_array(&srq->buf, in->pas);
146
147 srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL);
148 if (!srq->wrid) {
149 err = -ENOMEM;
150 goto err_in;
151 }
152 srq->wq_sig = 0;
153
154 in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
155 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
156 in->type != IB_SRQT_BASIC)
157 in->user_index = MLX5_IB_DEFAULT_UIDX;
158
159 return 0;
160
161 err_in:
162 kvfree(in->pas);
163
164 err_buf:
165 mlx5_frag_buf_free(dev->mdev, &srq->buf);
166
167 err_db:
168 mlx5_db_free(dev->mdev, &srq->db);
169 return err;
170 }
171
destroy_srq_user(struct ib_pd * pd,struct mlx5_ib_srq * srq,struct ib_udata * udata)172 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
173 struct ib_udata *udata)
174 {
175 mlx5_ib_db_unmap_user(
176 rdma_udata_to_drv_context(
177 udata,
178 struct mlx5_ib_ucontext,
179 ibucontext),
180 &srq->db);
181 ib_umem_release(srq->umem);
182 }
183
184
destroy_srq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_srq * srq)185 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
186 {
187 kvfree(srq->wrid);
188 mlx5_frag_buf_free(dev->mdev, &srq->buf);
189 mlx5_db_free(dev->mdev, &srq->db);
190 }
191
mlx5_ib_create_srq(struct ib_srq * ib_srq,struct ib_srq_init_attr * init_attr,struct ib_udata * udata)192 int mlx5_ib_create_srq(struct ib_srq *ib_srq,
193 struct ib_srq_init_attr *init_attr,
194 struct ib_udata *udata)
195 {
196 struct mlx5_ib_dev *dev = to_mdev(ib_srq->device);
197 struct mlx5_ib_srq *srq = to_msrq(ib_srq);
198 size_t desc_size;
199 size_t buf_size;
200 int err;
201 struct mlx5_srq_attr in = {};
202 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
203
204 if (init_attr->srq_type != IB_SRQT_BASIC &&
205 init_attr->srq_type != IB_SRQT_XRC &&
206 init_attr->srq_type != IB_SRQT_TM)
207 return -EOPNOTSUPP;
208
209 /* Sanity check SRQ size before proceeding */
210 if (init_attr->attr.max_wr >= max_srq_wqes) {
211 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
212 init_attr->attr.max_wr,
213 max_srq_wqes);
214 return -EINVAL;
215 }
216
217 mutex_init(&srq->mutex);
218 spin_lock_init(&srq->lock);
219 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
220 srq->msrq.max_gs = init_attr->attr.max_sge;
221
222 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
223 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
224 if (desc_size == 0 || srq->msrq.max_gs > desc_size)
225 return -EINVAL;
226
227 desc_size = roundup_pow_of_two(desc_size);
228 desc_size = max_t(size_t, 32, desc_size);
229 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
230 return -EINVAL;
231
232 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
233 sizeof(struct mlx5_wqe_data_seg);
234 srq->msrq.wqe_shift = ilog2(desc_size);
235 buf_size = srq->msrq.max * desc_size;
236 if (buf_size < desc_size)
237 return -EINVAL;
238
239 in.type = init_attr->srq_type;
240
241 if (udata)
242 err = create_srq_user(ib_srq->pd, srq, &in, udata, buf_size);
243 else
244 err = create_srq_kernel(dev, srq, &in, buf_size);
245
246 if (err) {
247 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
248 udata ? "user" : "kernel", err);
249 return err;
250 }
251
252 in.log_size = ilog2(srq->msrq.max);
253 in.wqe_shift = srq->msrq.wqe_shift - 4;
254 if (srq->wq_sig)
255 in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
256
257 if (init_attr->srq_type == IB_SRQT_XRC && init_attr->ext.xrc.xrcd)
258 in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
259 else
260 in.xrcd = dev->devr.xrcdn0;
261
262 if (init_attr->srq_type == IB_SRQT_TM) {
263 in.tm_log_list_size =
264 ilog2(init_attr->ext.tag_matching.max_num_tags) + 1;
265 if (in.tm_log_list_size >
266 MLX5_CAP_GEN(dev->mdev, log_tag_matching_list_sz)) {
267 mlx5_ib_dbg(dev, "TM SRQ max_num_tags exceeding limit\n");
268 err = -EINVAL;
269 goto err_usr_kern_srq;
270 }
271 in.flags |= MLX5_SRQ_FLAG_RNDV;
272 }
273
274 if (ib_srq_has_cq(init_attr->srq_type))
275 in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn;
276 else
277 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
278
279 in.pd = to_mpd(ib_srq->pd)->pdn;
280 in.db_record = srq->db.dma;
281 err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
282 kvfree(in.pas);
283 if (err) {
284 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
285 goto err_usr_kern_srq;
286 }
287
288 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
289
290 srq->msrq.event = mlx5_ib_srq_event;
291 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
292
293 if (udata) {
294 struct mlx5_ib_create_srq_resp resp = {
295 .srqn = srq->msrq.srqn,
296 };
297
298 if (ib_copy_to_udata(udata, &resp, min(udata->outlen,
299 sizeof(resp)))) {
300 mlx5_ib_dbg(dev, "copy to user failed\n");
301 err = -EFAULT;
302 goto err_core;
303 }
304 }
305
306 init_attr->attr.max_wr = srq->msrq.max - 1;
307
308 return 0;
309
310 err_core:
311 mlx5_cmd_destroy_srq(dev, &srq->msrq);
312
313 err_usr_kern_srq:
314 if (udata)
315 destroy_srq_user(ib_srq->pd, srq, udata);
316 else
317 destroy_srq_kernel(dev, srq);
318
319 return err;
320 }
321
mlx5_ib_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask attr_mask,struct ib_udata * udata)322 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
323 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
324 {
325 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
326 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
327 int ret;
328
329 /* We don't support resizing SRQs yet */
330 if (attr_mask & IB_SRQ_MAX_WR)
331 return -EINVAL;
332
333 if (attr_mask & IB_SRQ_LIMIT) {
334 if (attr->srq_limit >= srq->msrq.max)
335 return -EINVAL;
336
337 mutex_lock(&srq->mutex);
338 ret = mlx5_cmd_arm_srq(dev, &srq->msrq, attr->srq_limit, 1);
339 mutex_unlock(&srq->mutex);
340
341 if (ret)
342 return ret;
343 }
344
345 return 0;
346 }
347
mlx5_ib_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * srq_attr)348 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
349 {
350 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
351 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
352 int ret;
353 struct mlx5_srq_attr *out;
354
355 out = kzalloc(sizeof(*out), GFP_KERNEL);
356 if (!out)
357 return -ENOMEM;
358
359 ret = mlx5_cmd_query_srq(dev, &srq->msrq, out);
360 if (ret)
361 goto out_box;
362
363 srq_attr->srq_limit = out->lwm;
364 srq_attr->max_wr = srq->msrq.max - 1;
365 srq_attr->max_sge = srq->msrq.max_gs;
366
367 out_box:
368 kfree(out);
369 return ret;
370 }
371
mlx5_ib_destroy_srq(struct ib_srq * srq,struct ib_udata * udata)372 int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
373 {
374 struct mlx5_ib_dev *dev = to_mdev(srq->device);
375 struct mlx5_ib_srq *msrq = to_msrq(srq);
376 int ret;
377
378 ret = mlx5_cmd_destroy_srq(dev, &msrq->msrq);
379 if (ret)
380 return ret;
381
382 if (udata)
383 destroy_srq_user(srq->pd, msrq, udata);
384 else
385 destroy_srq_kernel(dev, msrq);
386 return 0;
387 }
388
mlx5_ib_free_srq_wqe(struct mlx5_ib_srq * srq,int wqe_index)389 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
390 {
391 struct mlx5_wqe_srq_next_seg *next;
392
393 /* always called with interrupts disabled. */
394 spin_lock(&srq->lock);
395
396 next = get_wqe(srq, srq->tail);
397 next->next_wqe_index = cpu_to_be16(wqe_index);
398 srq->tail = wqe_index;
399
400 spin_unlock(&srq->lock);
401 }
402
mlx5_ib_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)403 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
404 const struct ib_recv_wr **bad_wr)
405 {
406 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
407 struct mlx5_wqe_srq_next_seg *next;
408 struct mlx5_wqe_data_seg *scat;
409 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
410 struct mlx5_core_dev *mdev = dev->mdev;
411 unsigned long flags;
412 int err = 0;
413 int nreq;
414 int i;
415
416 spin_lock_irqsave(&srq->lock, flags);
417
418 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
419 err = -EIO;
420 *bad_wr = wr;
421 goto out;
422 }
423
424 for (nreq = 0; wr; nreq++, wr = wr->next) {
425 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
426 err = -EINVAL;
427 *bad_wr = wr;
428 break;
429 }
430
431 if (unlikely(srq->head == srq->tail)) {
432 err = -ENOMEM;
433 *bad_wr = wr;
434 break;
435 }
436
437 srq->wrid[srq->head] = wr->wr_id;
438
439 next = get_wqe(srq, srq->head);
440 srq->head = be16_to_cpu(next->next_wqe_index);
441 scat = (struct mlx5_wqe_data_seg *)(next + 1);
442
443 for (i = 0; i < wr->num_sge; i++) {
444 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
445 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
446 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
447 }
448
449 if (i < srq->msrq.max_avail_gather) {
450 scat[i].byte_count = 0;
451 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
452 scat[i].addr = 0;
453 }
454 }
455
456 if (likely(nreq)) {
457 srq->wqe_ctr += nreq;
458
459 /* Make sure that descriptors are written before
460 * doorbell record.
461 */
462 wmb();
463
464 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
465 }
466 out:
467 spin_unlock_irqrestore(&srq->lock, flags);
468
469 return err;
470 }
471