• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/mlx4/cq.h>
35 #include <linux/mlx4/qp.h>
36 #include <linux/mlx4/srq.h>
37 #include <linux/slab.h>
38 
39 #include "mlx4_ib.h"
40 #include <rdma/mlx4-abi.h>
41 
mlx4_ib_cq_comp(struct mlx4_cq * cq)42 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
43 {
44 	struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
45 	ibcq->comp_handler(ibcq, ibcq->cq_context);
46 }
47 
mlx4_ib_cq_event(struct mlx4_cq * cq,enum mlx4_event type)48 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
49 {
50 	struct ib_event event;
51 	struct ib_cq *ibcq;
52 
53 	if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
54 		pr_warn("Unexpected event type %d "
55 		       "on CQ %06x\n", type, cq->cqn);
56 		return;
57 	}
58 
59 	ibcq = &to_mibcq(cq)->ibcq;
60 	if (ibcq->event_handler) {
61 		event.device     = ibcq->device;
62 		event.event      = IB_EVENT_CQ_ERR;
63 		event.element.cq = ibcq;
64 		ibcq->event_handler(&event, ibcq->cq_context);
65 	}
66 }
67 
get_cqe_from_buf(struct mlx4_ib_cq_buf * buf,int n)68 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
69 {
70 	return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
71 }
72 
get_cqe(struct mlx4_ib_cq * cq,int n)73 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
74 {
75 	return get_cqe_from_buf(&cq->buf, n);
76 }
77 
get_sw_cqe(struct mlx4_ib_cq * cq,int n)78 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
79 {
80 	struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
81 	struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
82 
83 	return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
84 		!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
85 }
86 
next_cqe_sw(struct mlx4_ib_cq * cq)87 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
88 {
89 	return get_sw_cqe(cq, cq->mcq.cons_index);
90 }
91 
mlx4_ib_modify_cq(struct ib_cq * cq,u16 cq_count,u16 cq_period)92 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
93 {
94 	struct mlx4_ib_cq *mcq = to_mcq(cq);
95 	struct mlx4_ib_dev *dev = to_mdev(cq->device);
96 
97 	return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
98 }
99 
mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev * dev,struct mlx4_ib_cq_buf * buf,int nent)100 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
101 {
102 	int err;
103 
104 	err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
105 			     PAGE_SIZE * 2, &buf->buf);
106 
107 	if (err)
108 		goto out;
109 
110 	buf->entry_size = dev->dev->caps.cqe_size;
111 	err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
112 				    &buf->mtt);
113 	if (err)
114 		goto err_buf;
115 
116 	err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
117 	if (err)
118 		goto err_mtt;
119 
120 	return 0;
121 
122 err_mtt:
123 	mlx4_mtt_cleanup(dev->dev, &buf->mtt);
124 
125 err_buf:
126 	mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
127 
128 out:
129 	return err;
130 }
131 
mlx4_ib_free_cq_buf(struct mlx4_ib_dev * dev,struct mlx4_ib_cq_buf * buf,int cqe)132 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
133 {
134 	mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
135 }
136 
mlx4_ib_get_cq_umem(struct mlx4_ib_dev * dev,struct ib_ucontext * context,struct mlx4_ib_cq_buf * buf,struct ib_umem ** umem,u64 buf_addr,int cqe)137 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
138 			       struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
139 			       u64 buf_addr, int cqe)
140 {
141 	int err;
142 	int cqe_size = dev->dev->caps.cqe_size;
143 
144 	*umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
145 			    IB_ACCESS_LOCAL_WRITE, 1);
146 	if (IS_ERR(*umem))
147 		return PTR_ERR(*umem);
148 
149 	err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
150 			    (*umem)->page_shift, &buf->mtt);
151 	if (err)
152 		goto err_buf;
153 
154 	err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
155 	if (err)
156 		goto err_mtt;
157 
158 	return 0;
159 
160 err_mtt:
161 	mlx4_mtt_cleanup(dev->dev, &buf->mtt);
162 
163 err_buf:
164 	ib_umem_release(*umem);
165 
166 	return err;
167 }
168 
169 #define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_FLAGS_TIMESTAMP_COMPLETION
mlx4_ib_create_cq(struct ib_device * ibdev,const struct ib_cq_init_attr * attr,struct ib_ucontext * context,struct ib_udata * udata)170 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
171 				const struct ib_cq_init_attr *attr,
172 				struct ib_ucontext *context,
173 				struct ib_udata *udata)
174 {
175 	int entries = attr->cqe;
176 	int vector = attr->comp_vector;
177 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
178 	struct mlx4_ib_cq *cq;
179 	struct mlx4_uar *uar;
180 	int err;
181 
182 	if (entries < 1 || entries > dev->dev->caps.max_cqes)
183 		return ERR_PTR(-EINVAL);
184 
185 	if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
186 		return ERR_PTR(-EINVAL);
187 
188 	cq = kmalloc(sizeof *cq, GFP_KERNEL);
189 	if (!cq)
190 		return ERR_PTR(-ENOMEM);
191 
192 	entries      = roundup_pow_of_two(entries + 1);
193 	cq->ibcq.cqe = entries - 1;
194 	mutex_init(&cq->resize_mutex);
195 	spin_lock_init(&cq->lock);
196 	cq->resize_buf = NULL;
197 	cq->resize_umem = NULL;
198 	cq->create_flags = attr->flags;
199 	INIT_LIST_HEAD(&cq->send_qp_list);
200 	INIT_LIST_HEAD(&cq->recv_qp_list);
201 
202 	if (context) {
203 		struct mlx4_ib_create_cq ucmd;
204 
205 		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
206 			err = -EFAULT;
207 			goto err_cq;
208 		}
209 
210 		err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
211 					  ucmd.buf_addr, entries);
212 		if (err)
213 			goto err_cq;
214 
215 		err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
216 					  &cq->db);
217 		if (err)
218 			goto err_mtt;
219 
220 		uar = &to_mucontext(context)->uar;
221 		cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
222 	} else {
223 		err = mlx4_db_alloc(dev->dev, &cq->db, 1);
224 		if (err)
225 			goto err_cq;
226 
227 		cq->mcq.set_ci_db  = cq->db.db;
228 		cq->mcq.arm_db     = cq->db.db + 1;
229 		*cq->mcq.set_ci_db = 0;
230 		*cq->mcq.arm_db    = 0;
231 
232 		err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
233 		if (err)
234 			goto err_db;
235 
236 		uar = &dev->priv_uar;
237 		cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
238 	}
239 
240 	if (dev->eq_table)
241 		vector = dev->eq_table[vector % ibdev->num_comp_vectors];
242 
243 	err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
244 			    cq->db.dma, &cq->mcq, vector, 0,
245 			    !!(cq->create_flags & IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
246 	if (err)
247 		goto err_dbmap;
248 
249 	if (context)
250 		cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
251 	else
252 		cq->mcq.comp = mlx4_ib_cq_comp;
253 	cq->mcq.event = mlx4_ib_cq_event;
254 
255 	if (context)
256 		if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
257 			err = -EFAULT;
258 			goto err_cq_free;
259 		}
260 
261 	return &cq->ibcq;
262 
263 err_cq_free:
264 	mlx4_cq_free(dev->dev, &cq->mcq);
265 
266 err_dbmap:
267 	if (context)
268 		mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
269 
270 err_mtt:
271 	mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
272 
273 	if (context)
274 		ib_umem_release(cq->umem);
275 	else
276 		mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
277 
278 err_db:
279 	if (!context)
280 		mlx4_db_free(dev->dev, &cq->db);
281 
282 err_cq:
283 	kfree(cq);
284 
285 	return ERR_PTR(err);
286 }
287 
mlx4_alloc_resize_buf(struct mlx4_ib_dev * dev,struct mlx4_ib_cq * cq,int entries)288 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
289 				  int entries)
290 {
291 	int err;
292 
293 	if (cq->resize_buf)
294 		return -EBUSY;
295 
296 	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
297 	if (!cq->resize_buf)
298 		return -ENOMEM;
299 
300 	err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
301 	if (err) {
302 		kfree(cq->resize_buf);
303 		cq->resize_buf = NULL;
304 		return err;
305 	}
306 
307 	cq->resize_buf->cqe = entries - 1;
308 
309 	return 0;
310 }
311 
mlx4_alloc_resize_umem(struct mlx4_ib_dev * dev,struct mlx4_ib_cq * cq,int entries,struct ib_udata * udata)312 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
313 				   int entries, struct ib_udata *udata)
314 {
315 	struct mlx4_ib_resize_cq ucmd;
316 	int err;
317 
318 	if (cq->resize_umem)
319 		return -EBUSY;
320 
321 	if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
322 		return -EFAULT;
323 
324 	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
325 	if (!cq->resize_buf)
326 		return -ENOMEM;
327 
328 	err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
329 				  &cq->resize_umem, ucmd.buf_addr, entries);
330 	if (err) {
331 		kfree(cq->resize_buf);
332 		cq->resize_buf = NULL;
333 		return err;
334 	}
335 
336 	cq->resize_buf->cqe = entries - 1;
337 
338 	return 0;
339 }
340 
mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq * cq)341 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
342 {
343 	u32 i;
344 
345 	i = cq->mcq.cons_index;
346 	while (get_sw_cqe(cq, i))
347 		++i;
348 
349 	return i - cq->mcq.cons_index;
350 }
351 
mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq * cq)352 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
353 {
354 	struct mlx4_cqe *cqe, *new_cqe;
355 	int i;
356 	int cqe_size = cq->buf.entry_size;
357 	int cqe_inc = cqe_size == 64 ? 1 : 0;
358 
359 	i = cq->mcq.cons_index;
360 	cqe = get_cqe(cq, i & cq->ibcq.cqe);
361 	cqe += cqe_inc;
362 
363 	while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
364 		new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
365 					   (i + 1) & cq->resize_buf->cqe);
366 		memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
367 		new_cqe += cqe_inc;
368 
369 		new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
370 			(((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
371 		cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
372 		cqe += cqe_inc;
373 	}
374 	++cq->mcq.cons_index;
375 }
376 
mlx4_ib_resize_cq(struct ib_cq * ibcq,int entries,struct ib_udata * udata)377 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
378 {
379 	struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
380 	struct mlx4_ib_cq *cq = to_mcq(ibcq);
381 	struct mlx4_mtt mtt;
382 	int outst_cqe;
383 	int err;
384 
385 	mutex_lock(&cq->resize_mutex);
386 	if (entries < 1 || entries > dev->dev->caps.max_cqes) {
387 		err = -EINVAL;
388 		goto out;
389 	}
390 
391 	entries = roundup_pow_of_two(entries + 1);
392 	if (entries == ibcq->cqe + 1) {
393 		err = 0;
394 		goto out;
395 	}
396 
397 	if (entries > dev->dev->caps.max_cqes + 1) {
398 		err = -EINVAL;
399 		goto out;
400 	}
401 
402 	if (ibcq->uobject) {
403 		err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
404 		if (err)
405 			goto out;
406 	} else {
407 		/* Can't be smaller than the number of outstanding CQEs */
408 		outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
409 		if (entries < outst_cqe + 1) {
410 			err = -EINVAL;
411 			goto out;
412 		}
413 
414 		err = mlx4_alloc_resize_buf(dev, cq, entries);
415 		if (err)
416 			goto out;
417 	}
418 
419 	mtt = cq->buf.mtt;
420 
421 	err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
422 	if (err)
423 		goto err_buf;
424 
425 	mlx4_mtt_cleanup(dev->dev, &mtt);
426 	if (ibcq->uobject) {
427 		cq->buf      = cq->resize_buf->buf;
428 		cq->ibcq.cqe = cq->resize_buf->cqe;
429 		ib_umem_release(cq->umem);
430 		cq->umem     = cq->resize_umem;
431 
432 		kfree(cq->resize_buf);
433 		cq->resize_buf = NULL;
434 		cq->resize_umem = NULL;
435 	} else {
436 		struct mlx4_ib_cq_buf tmp_buf;
437 		int tmp_cqe = 0;
438 
439 		spin_lock_irq(&cq->lock);
440 		if (cq->resize_buf) {
441 			mlx4_ib_cq_resize_copy_cqes(cq);
442 			tmp_buf = cq->buf;
443 			tmp_cqe = cq->ibcq.cqe;
444 			cq->buf      = cq->resize_buf->buf;
445 			cq->ibcq.cqe = cq->resize_buf->cqe;
446 
447 			kfree(cq->resize_buf);
448 			cq->resize_buf = NULL;
449 		}
450 		spin_unlock_irq(&cq->lock);
451 
452 		if (tmp_cqe)
453 			mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
454 	}
455 
456 	goto out;
457 
458 err_buf:
459 	mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
460 	if (!ibcq->uobject)
461 		mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
462 				    cq->resize_buf->cqe);
463 
464 	kfree(cq->resize_buf);
465 	cq->resize_buf = NULL;
466 
467 	if (cq->resize_umem) {
468 		ib_umem_release(cq->resize_umem);
469 		cq->resize_umem = NULL;
470 	}
471 
472 out:
473 	mutex_unlock(&cq->resize_mutex);
474 
475 	return err;
476 }
477 
mlx4_ib_destroy_cq(struct ib_cq * cq)478 int mlx4_ib_destroy_cq(struct ib_cq *cq)
479 {
480 	struct mlx4_ib_dev *dev = to_mdev(cq->device);
481 	struct mlx4_ib_cq *mcq = to_mcq(cq);
482 
483 	mlx4_cq_free(dev->dev, &mcq->mcq);
484 	mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
485 
486 	if (cq->uobject) {
487 		mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
488 		ib_umem_release(mcq->umem);
489 	} else {
490 		mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
491 		mlx4_db_free(dev->dev, &mcq->db);
492 	}
493 
494 	kfree(mcq);
495 
496 	return 0;
497 }
498 
dump_cqe(void * cqe)499 static void dump_cqe(void *cqe)
500 {
501 	__be32 *buf = cqe;
502 
503 	pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
504 	       be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
505 	       be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
506 	       be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
507 }
508 
mlx4_ib_handle_error_cqe(struct mlx4_err_cqe * cqe,struct ib_wc * wc)509 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
510 				     struct ib_wc *wc)
511 {
512 	if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
513 		pr_debug("local QP operation err "
514 		       "(QPN %06x, WQE index %x, vendor syndrome %02x, "
515 		       "opcode = %02x)\n",
516 		       be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
517 		       cqe->vendor_err_syndrome,
518 		       cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
519 		dump_cqe(cqe);
520 	}
521 
522 	switch (cqe->syndrome) {
523 	case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
524 		wc->status = IB_WC_LOC_LEN_ERR;
525 		break;
526 	case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
527 		wc->status = IB_WC_LOC_QP_OP_ERR;
528 		break;
529 	case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
530 		wc->status = IB_WC_LOC_PROT_ERR;
531 		break;
532 	case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
533 		wc->status = IB_WC_WR_FLUSH_ERR;
534 		break;
535 	case MLX4_CQE_SYNDROME_MW_BIND_ERR:
536 		wc->status = IB_WC_MW_BIND_ERR;
537 		break;
538 	case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
539 		wc->status = IB_WC_BAD_RESP_ERR;
540 		break;
541 	case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
542 		wc->status = IB_WC_LOC_ACCESS_ERR;
543 		break;
544 	case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
545 		wc->status = IB_WC_REM_INV_REQ_ERR;
546 		break;
547 	case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
548 		wc->status = IB_WC_REM_ACCESS_ERR;
549 		break;
550 	case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
551 		wc->status = IB_WC_REM_OP_ERR;
552 		break;
553 	case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
554 		wc->status = IB_WC_RETRY_EXC_ERR;
555 		break;
556 	case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
557 		wc->status = IB_WC_RNR_RETRY_EXC_ERR;
558 		break;
559 	case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
560 		wc->status = IB_WC_REM_ABORT_ERR;
561 		break;
562 	default:
563 		wc->status = IB_WC_GENERAL_ERR;
564 		break;
565 	}
566 
567 	wc->vendor_err = cqe->vendor_err_syndrome;
568 }
569 
mlx4_ib_ipoib_csum_ok(__be16 status,__be16 checksum)570 static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
571 {
572 	return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4      |
573 				      MLX4_CQE_STATUS_IPV4F     |
574 				      MLX4_CQE_STATUS_IPV4OPT   |
575 				      MLX4_CQE_STATUS_IPV6      |
576 				      MLX4_CQE_STATUS_IPOK)) ==
577 		cpu_to_be16(MLX4_CQE_STATUS_IPV4        |
578 			    MLX4_CQE_STATUS_IPOK))              &&
579 		(status & cpu_to_be16(MLX4_CQE_STATUS_UDP       |
580 				      MLX4_CQE_STATUS_TCP))     &&
581 		checksum == cpu_to_be16(0xffff);
582 }
583 
use_tunnel_data(struct mlx4_ib_qp * qp,struct mlx4_ib_cq * cq,struct ib_wc * wc,unsigned tail,struct mlx4_cqe * cqe,int is_eth)584 static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
585 			    unsigned tail, struct mlx4_cqe *cqe, int is_eth)
586 {
587 	struct mlx4_ib_proxy_sqp_hdr *hdr;
588 
589 	ib_dma_sync_single_for_cpu(qp->ibqp.device,
590 				   qp->sqp_proxy_rcv[tail].map,
591 				   sizeof (struct mlx4_ib_proxy_sqp_hdr),
592 				   DMA_FROM_DEVICE);
593 	hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
594 	wc->pkey_index	= be16_to_cpu(hdr->tun.pkey_index);
595 	wc->src_qp	= be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
596 	wc->wc_flags   |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
597 	wc->dlid_path_bits = 0;
598 
599 	if (is_eth) {
600 		wc->slid = 0;
601 		wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
602 		memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
603 		memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
604 		wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
605 	} else {
606 		wc->slid        = be16_to_cpu(hdr->tun.slid_mac_47_32);
607 		wc->sl          = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
608 	}
609 }
610 
mlx4_ib_qp_sw_comp(struct mlx4_ib_qp * qp,int num_entries,struct ib_wc * wc,int * npolled,int is_send)611 static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
612 			       struct ib_wc *wc, int *npolled, int is_send)
613 {
614 	struct mlx4_ib_wq *wq;
615 	unsigned cur;
616 	int i;
617 
618 	wq = is_send ? &qp->sq : &qp->rq;
619 	cur = wq->head - wq->tail;
620 
621 	if (cur == 0)
622 		return;
623 
624 	for (i = 0;  i < cur && *npolled < num_entries; i++) {
625 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
626 		wc->status = IB_WC_WR_FLUSH_ERR;
627 		wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
628 		wq->tail++;
629 		(*npolled)++;
630 		wc->qp = &qp->ibqp;
631 		wc++;
632 	}
633 }
634 
mlx4_ib_poll_sw_comp(struct mlx4_ib_cq * cq,int num_entries,struct ib_wc * wc,int * npolled)635 static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
636 				 struct ib_wc *wc, int *npolled)
637 {
638 	struct mlx4_ib_qp *qp;
639 
640 	*npolled = 0;
641 	/* Find uncompleted WQEs belonging to that cq and return
642 	 * simulated FLUSH_ERR completions
643 	 */
644 	list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
645 		mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
646 		if (*npolled >= num_entries)
647 			goto out;
648 	}
649 
650 	list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
651 		mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
652 		if (*npolled >= num_entries)
653 			goto out;
654 	}
655 
656 out:
657 	return;
658 }
659 
mlx4_ib_poll_one(struct mlx4_ib_cq * cq,struct mlx4_ib_qp ** cur_qp,struct ib_wc * wc)660 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
661 			    struct mlx4_ib_qp **cur_qp,
662 			    struct ib_wc *wc)
663 {
664 	struct mlx4_cqe *cqe;
665 	struct mlx4_qp *mqp;
666 	struct mlx4_ib_wq *wq;
667 	struct mlx4_ib_srq *srq;
668 	struct mlx4_srq *msrq = NULL;
669 	int is_send;
670 	int is_error;
671 	int is_eth;
672 	u32 g_mlpath_rqpn;
673 	u16 wqe_ctr;
674 	unsigned tail = 0;
675 
676 repoll:
677 	cqe = next_cqe_sw(cq);
678 	if (!cqe)
679 		return -EAGAIN;
680 
681 	if (cq->buf.entry_size == 64)
682 		cqe++;
683 
684 	++cq->mcq.cons_index;
685 
686 	/*
687 	 * Make sure we read CQ entry contents after we've checked the
688 	 * ownership bit.
689 	 */
690 	rmb();
691 
692 	is_send  = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
693 	is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
694 		MLX4_CQE_OPCODE_ERROR;
695 
696 	/* Resize CQ in progress */
697 	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
698 		if (cq->resize_buf) {
699 			struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
700 
701 			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
702 			cq->buf      = cq->resize_buf->buf;
703 			cq->ibcq.cqe = cq->resize_buf->cqe;
704 
705 			kfree(cq->resize_buf);
706 			cq->resize_buf = NULL;
707 		}
708 
709 		goto repoll;
710 	}
711 
712 	if (!*cur_qp ||
713 	    (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
714 		/*
715 		 * We do not have to take the QP table lock here,
716 		 * because CQs will be locked while QPs are removed
717 		 * from the table.
718 		 */
719 		mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
720 				       be32_to_cpu(cqe->vlan_my_qpn));
721 		*cur_qp = to_mibqp(mqp);
722 	}
723 
724 	wc->qp = &(*cur_qp)->ibqp;
725 
726 	if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
727 		u32 srq_num;
728 		g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
729 		srq_num       = g_mlpath_rqpn & 0xffffff;
730 		/* SRQ is also in the radix tree */
731 		msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
732 				       srq_num);
733 	}
734 
735 	if (is_send) {
736 		wq = &(*cur_qp)->sq;
737 		if (!(*cur_qp)->sq_signal_bits) {
738 			wqe_ctr = be16_to_cpu(cqe->wqe_index);
739 			wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
740 		}
741 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
742 		++wq->tail;
743 	} else if ((*cur_qp)->ibqp.srq) {
744 		srq = to_msrq((*cur_qp)->ibqp.srq);
745 		wqe_ctr = be16_to_cpu(cqe->wqe_index);
746 		wc->wr_id = srq->wrid[wqe_ctr];
747 		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
748 	} else if (msrq) {
749 		srq = to_mibsrq(msrq);
750 		wqe_ctr = be16_to_cpu(cqe->wqe_index);
751 		wc->wr_id = srq->wrid[wqe_ctr];
752 		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
753 	} else {
754 		wq	  = &(*cur_qp)->rq;
755 		tail	  = wq->tail & (wq->wqe_cnt - 1);
756 		wc->wr_id = wq->wrid[tail];
757 		++wq->tail;
758 	}
759 
760 	if (unlikely(is_error)) {
761 		mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
762 		return 0;
763 	}
764 
765 	wc->status = IB_WC_SUCCESS;
766 
767 	if (is_send) {
768 		wc->wc_flags = 0;
769 		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
770 		case MLX4_OPCODE_RDMA_WRITE_IMM:
771 			wc->wc_flags |= IB_WC_WITH_IMM;
772 		case MLX4_OPCODE_RDMA_WRITE:
773 			wc->opcode    = IB_WC_RDMA_WRITE;
774 			break;
775 		case MLX4_OPCODE_SEND_IMM:
776 			wc->wc_flags |= IB_WC_WITH_IMM;
777 		case MLX4_OPCODE_SEND:
778 		case MLX4_OPCODE_SEND_INVAL:
779 			wc->opcode    = IB_WC_SEND;
780 			break;
781 		case MLX4_OPCODE_RDMA_READ:
782 			wc->opcode    = IB_WC_RDMA_READ;
783 			wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
784 			break;
785 		case MLX4_OPCODE_ATOMIC_CS:
786 			wc->opcode    = IB_WC_COMP_SWAP;
787 			wc->byte_len  = 8;
788 			break;
789 		case MLX4_OPCODE_ATOMIC_FA:
790 			wc->opcode    = IB_WC_FETCH_ADD;
791 			wc->byte_len  = 8;
792 			break;
793 		case MLX4_OPCODE_MASKED_ATOMIC_CS:
794 			wc->opcode    = IB_WC_MASKED_COMP_SWAP;
795 			wc->byte_len  = 8;
796 			break;
797 		case MLX4_OPCODE_MASKED_ATOMIC_FA:
798 			wc->opcode    = IB_WC_MASKED_FETCH_ADD;
799 			wc->byte_len  = 8;
800 			break;
801 		case MLX4_OPCODE_LSO:
802 			wc->opcode    = IB_WC_LSO;
803 			break;
804 		case MLX4_OPCODE_FMR:
805 			wc->opcode    = IB_WC_REG_MR;
806 			break;
807 		case MLX4_OPCODE_LOCAL_INVAL:
808 			wc->opcode    = IB_WC_LOCAL_INV;
809 			break;
810 		}
811 	} else {
812 		wc->byte_len = be32_to_cpu(cqe->byte_cnt);
813 
814 		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
815 		case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
816 			wc->opcode	= IB_WC_RECV_RDMA_WITH_IMM;
817 			wc->wc_flags	= IB_WC_WITH_IMM;
818 			wc->ex.imm_data = cqe->immed_rss_invalid;
819 			break;
820 		case MLX4_RECV_OPCODE_SEND_INVAL:
821 			wc->opcode	= IB_WC_RECV;
822 			wc->wc_flags	= IB_WC_WITH_INVALIDATE;
823 			wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
824 			break;
825 		case MLX4_RECV_OPCODE_SEND:
826 			wc->opcode   = IB_WC_RECV;
827 			wc->wc_flags = 0;
828 			break;
829 		case MLX4_RECV_OPCODE_SEND_IMM:
830 			wc->opcode	= IB_WC_RECV;
831 			wc->wc_flags	= IB_WC_WITH_IMM;
832 			wc->ex.imm_data = cqe->immed_rss_invalid;
833 			break;
834 		}
835 
836 		is_eth = (rdma_port_get_link_layer(wc->qp->device,
837 						  (*cur_qp)->port) ==
838 			  IB_LINK_LAYER_ETHERNET);
839 		if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
840 			if ((*cur_qp)->mlx4_ib_qp_type &
841 			    (MLX4_IB_QPT_PROXY_SMI_OWNER |
842 			     MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
843 				use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
844 						is_eth);
845 				return 0;
846 			}
847 		}
848 
849 		g_mlpath_rqpn	   = be32_to_cpu(cqe->g_mlpath_rqpn);
850 		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
851 		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
852 		wc->wc_flags	  |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
853 		wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
854 		wc->wc_flags	  |= mlx4_ib_ipoib_csum_ok(cqe->status,
855 					cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
856 		if (is_eth) {
857 			wc->slid = 0;
858 			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
859 			if (be32_to_cpu(cqe->vlan_my_qpn) &
860 					MLX4_CQE_CVLAN_PRESENT_MASK) {
861 				wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
862 					MLX4_CQE_VID_MASK;
863 			} else {
864 				wc->vlan_id = 0xffff;
865 			}
866 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
867 			wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
868 		} else {
869 			wc->slid = be16_to_cpu(cqe->rlid);
870 			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;
871 			wc->vlan_id = 0xffff;
872 		}
873 	}
874 
875 	return 0;
876 }
877 
mlx4_ib_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)878 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
879 {
880 	struct mlx4_ib_cq *cq = to_mcq(ibcq);
881 	struct mlx4_ib_qp *cur_qp = NULL;
882 	unsigned long flags;
883 	int npolled;
884 	struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
885 
886 	spin_lock_irqsave(&cq->lock, flags);
887 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
888 		mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
889 		goto out;
890 	}
891 
892 	for (npolled = 0; npolled < num_entries; ++npolled) {
893 		if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
894 			break;
895 	}
896 
897 	mlx4_cq_set_ci(&cq->mcq);
898 
899 out:
900 	spin_unlock_irqrestore(&cq->lock, flags);
901 
902 	return npolled;
903 }
904 
mlx4_ib_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)905 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
906 {
907 	mlx4_cq_arm(&to_mcq(ibcq)->mcq,
908 		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
909 		    MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
910 		    to_mdev(ibcq->device)->uar_map,
911 		    MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
912 
913 	return 0;
914 }
915 
__mlx4_ib_cq_clean(struct mlx4_ib_cq * cq,u32 qpn,struct mlx4_ib_srq * srq)916 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
917 {
918 	u32 prod_index;
919 	int nfreed = 0;
920 	struct mlx4_cqe *cqe, *dest;
921 	u8 owner_bit;
922 	int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
923 
924 	/*
925 	 * First we need to find the current producer index, so we
926 	 * know where to start cleaning from.  It doesn't matter if HW
927 	 * adds new entries after this loop -- the QP we're worried
928 	 * about is already in RESET, so the new entries won't come
929 	 * from our QP and therefore don't need to be checked.
930 	 */
931 	for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
932 		if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
933 			break;
934 
935 	/*
936 	 * Now sweep backwards through the CQ, removing CQ entries
937 	 * that match our QP by copying older entries on top of them.
938 	 */
939 	while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
940 		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
941 		cqe += cqe_inc;
942 
943 		if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
944 			if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
945 				mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
946 			++nfreed;
947 		} else if (nfreed) {
948 			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
949 			dest += cqe_inc;
950 
951 			owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
952 			memcpy(dest, cqe, sizeof *cqe);
953 			dest->owner_sr_opcode = owner_bit |
954 				(dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
955 		}
956 	}
957 
958 	if (nfreed) {
959 		cq->mcq.cons_index += nfreed;
960 		/*
961 		 * Make sure update of buffer contents is done before
962 		 * updating consumer index.
963 		 */
964 		wmb();
965 		mlx4_cq_set_ci(&cq->mcq);
966 	}
967 }
968 
mlx4_ib_cq_clean(struct mlx4_ib_cq * cq,u32 qpn,struct mlx4_ib_srq * srq)969 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
970 {
971 	spin_lock_irq(&cq->lock);
972 	__mlx4_ib_cq_clean(cq, qpn, srq);
973 	spin_unlock_irq(&cq->lock);
974 }
975