• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/log2.h>
35 #include <linux/etherdevice.h>
36 #include <net/ip.h>
37 #include <linux/slab.h>
38 #include <linux/netdevice.h>
39 
40 #include <rdma/ib_cache.h>
41 #include <rdma/ib_pack.h>
42 #include <rdma/ib_addr.h>
43 #include <rdma/ib_mad.h>
44 #include <rdma/uverbs_ioctl.h>
45 
46 #include <linux/mlx4/driver.h>
47 #include <linux/mlx4/qp.h>
48 
49 #include "mlx4_ib.h"
50 #include <rdma/mlx4-abi.h>
51 
52 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
53 			     struct mlx4_ib_cq *recv_cq);
54 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
55 			       struct mlx4_ib_cq *recv_cq);
56 static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
57 			      struct ib_udata *udata);
58 
59 enum {
60 	MLX4_IB_ACK_REQ_FREQ	= 8,
61 };
62 
63 enum {
64 	MLX4_IB_DEFAULT_SCHED_QUEUE	= 0x83,
65 	MLX4_IB_DEFAULT_QP0_SCHED_QUEUE	= 0x3f,
66 	MLX4_IB_LINK_TYPE_IB		= 0,
67 	MLX4_IB_LINK_TYPE_ETH		= 1
68 };
69 
70 enum {
71 	MLX4_IB_MIN_SQ_STRIDE	= 6,
72 	MLX4_IB_CACHE_LINE_SIZE	= 64,
73 };
74 
75 enum {
76 	MLX4_RAW_QP_MTU		= 7,
77 	MLX4_RAW_QP_MSGMAX	= 31,
78 };
79 
80 #ifndef ETH_ALEN
81 #define ETH_ALEN        6
82 #endif
83 
84 static const __be32 mlx4_ib_opcode[] = {
85 	[IB_WR_SEND]				= cpu_to_be32(MLX4_OPCODE_SEND),
86 	[IB_WR_LSO]				= cpu_to_be32(MLX4_OPCODE_LSO),
87 	[IB_WR_SEND_WITH_IMM]			= cpu_to_be32(MLX4_OPCODE_SEND_IMM),
88 	[IB_WR_RDMA_WRITE]			= cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
89 	[IB_WR_RDMA_WRITE_WITH_IMM]		= cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
90 	[IB_WR_RDMA_READ]			= cpu_to_be32(MLX4_OPCODE_RDMA_READ),
91 	[IB_WR_ATOMIC_CMP_AND_SWP]		= cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
92 	[IB_WR_ATOMIC_FETCH_AND_ADD]		= cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
93 	[IB_WR_SEND_WITH_INV]			= cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
94 	[IB_WR_LOCAL_INV]			= cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
95 	[IB_WR_REG_MR]				= cpu_to_be32(MLX4_OPCODE_FMR),
96 	[IB_WR_MASKED_ATOMIC_CMP_AND_SWP]	= cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
97 	[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]	= cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
98 };
99 
100 enum mlx4_ib_source_type {
101 	MLX4_IB_QP_SRC	= 0,
102 	MLX4_IB_RWQ_SRC	= 1,
103 };
104 
is_tunnel_qp(struct mlx4_ib_dev * dev,struct mlx4_ib_qp * qp)105 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
106 {
107 	if (!mlx4_is_master(dev->dev))
108 		return 0;
109 
110 	return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn &&
111 	       qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn +
112 		8 * MLX4_MFUNC_MAX;
113 }
114 
is_sqp(struct mlx4_ib_dev * dev,struct mlx4_ib_qp * qp)115 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
116 {
117 	int proxy_sqp = 0;
118 	int real_sqp = 0;
119 	int i;
120 	/* PPF or Native -- real SQP */
121 	real_sqp = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
122 		    qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
123 		    qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3);
124 	if (real_sqp)
125 		return 1;
126 	/* VF or PF -- proxy SQP */
127 	if (mlx4_is_mfunc(dev->dev)) {
128 		for (i = 0; i < dev->dev->caps.num_ports; i++) {
129 			if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy ||
130 			    qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) {
131 				proxy_sqp = 1;
132 				break;
133 			}
134 		}
135 	}
136 	if (proxy_sqp)
137 		return 1;
138 
139 	return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP);
140 }
141 
142 /* used for INIT/CLOSE port logic */
is_qp0(struct mlx4_ib_dev * dev,struct mlx4_ib_qp * qp)143 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
144 {
145 	int proxy_qp0 = 0;
146 	int real_qp0 = 0;
147 	int i;
148 	/* PPF or Native -- real QP0 */
149 	real_qp0 = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
150 		    qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
151 		    qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1);
152 	if (real_qp0)
153 		return 1;
154 	/* VF or PF -- proxy QP0 */
155 	if (mlx4_is_mfunc(dev->dev)) {
156 		for (i = 0; i < dev->dev->caps.num_ports; i++) {
157 			if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy) {
158 				proxy_qp0 = 1;
159 				break;
160 			}
161 		}
162 	}
163 	return proxy_qp0;
164 }
165 
get_wqe(struct mlx4_ib_qp * qp,int offset)166 static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
167 {
168 	return mlx4_buf_offset(&qp->buf, offset);
169 }
170 
get_recv_wqe(struct mlx4_ib_qp * qp,int n)171 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n)
172 {
173 	return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
174 }
175 
get_send_wqe(struct mlx4_ib_qp * qp,int n)176 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
177 {
178 	return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift));
179 }
180 
181 /*
182  * Stamp a SQ WQE so that it is invalid if prefetched by marking the
183  * first four bytes of every 64 byte chunk with 0xffffffff, except for
184  * the very first chunk of the WQE.
185  */
stamp_send_wqe(struct mlx4_ib_qp * qp,int n)186 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n)
187 {
188 	__be32 *wqe;
189 	int i;
190 	int s;
191 	void *buf;
192 	struct mlx4_wqe_ctrl_seg *ctrl;
193 
194 	buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
195 	ctrl = (struct mlx4_wqe_ctrl_seg *)buf;
196 	s = (ctrl->qpn_vlan.fence_size & 0x3f) << 4;
197 	for (i = 64; i < s; i += 64) {
198 		wqe = buf + i;
199 		*wqe = cpu_to_be32(0xffffffff);
200 	}
201 }
202 
mlx4_ib_qp_event(struct mlx4_qp * qp,enum mlx4_event type)203 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
204 {
205 	struct ib_event event;
206 	struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
207 
208 	if (type == MLX4_EVENT_TYPE_PATH_MIG)
209 		to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
210 
211 	if (ibqp->event_handler) {
212 		event.device     = ibqp->device;
213 		event.element.qp = ibqp;
214 		switch (type) {
215 		case MLX4_EVENT_TYPE_PATH_MIG:
216 			event.event = IB_EVENT_PATH_MIG;
217 			break;
218 		case MLX4_EVENT_TYPE_COMM_EST:
219 			event.event = IB_EVENT_COMM_EST;
220 			break;
221 		case MLX4_EVENT_TYPE_SQ_DRAINED:
222 			event.event = IB_EVENT_SQ_DRAINED;
223 			break;
224 		case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
225 			event.event = IB_EVENT_QP_LAST_WQE_REACHED;
226 			break;
227 		case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
228 			event.event = IB_EVENT_QP_FATAL;
229 			break;
230 		case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
231 			event.event = IB_EVENT_PATH_MIG_ERR;
232 			break;
233 		case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
234 			event.event = IB_EVENT_QP_REQ_ERR;
235 			break;
236 		case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
237 			event.event = IB_EVENT_QP_ACCESS_ERR;
238 			break;
239 		default:
240 			pr_warn("Unexpected event type %d "
241 			       "on QP %06x\n", type, qp->qpn);
242 			return;
243 		}
244 
245 		ibqp->event_handler(&event, ibqp->qp_context);
246 	}
247 }
248 
mlx4_ib_wq_event(struct mlx4_qp * qp,enum mlx4_event type)249 static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type)
250 {
251 	pr_warn_ratelimited("Unexpected event type %d on WQ 0x%06x. Events are not supported for WQs\n",
252 			    type, qp->qpn);
253 }
254 
send_wqe_overhead(enum mlx4_ib_qp_type type,u32 flags)255 static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
256 {
257 	/*
258 	 * UD WQEs must have a datagram segment.
259 	 * RC and UC WQEs might have a remote address segment.
260 	 * MLX WQEs need two extra inline data segments (for the UD
261 	 * header and space for the ICRC).
262 	 */
263 	switch (type) {
264 	case MLX4_IB_QPT_UD:
265 		return sizeof (struct mlx4_wqe_ctrl_seg) +
266 			sizeof (struct mlx4_wqe_datagram_seg) +
267 			((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0);
268 	case MLX4_IB_QPT_PROXY_SMI_OWNER:
269 	case MLX4_IB_QPT_PROXY_SMI:
270 	case MLX4_IB_QPT_PROXY_GSI:
271 		return sizeof (struct mlx4_wqe_ctrl_seg) +
272 			sizeof (struct mlx4_wqe_datagram_seg) + 64;
273 	case MLX4_IB_QPT_TUN_SMI_OWNER:
274 	case MLX4_IB_QPT_TUN_GSI:
275 		return sizeof (struct mlx4_wqe_ctrl_seg) +
276 			sizeof (struct mlx4_wqe_datagram_seg);
277 
278 	case MLX4_IB_QPT_UC:
279 		return sizeof (struct mlx4_wqe_ctrl_seg) +
280 			sizeof (struct mlx4_wqe_raddr_seg);
281 	case MLX4_IB_QPT_RC:
282 		return sizeof (struct mlx4_wqe_ctrl_seg) +
283 			sizeof (struct mlx4_wqe_masked_atomic_seg) +
284 			sizeof (struct mlx4_wqe_raddr_seg);
285 	case MLX4_IB_QPT_SMI:
286 	case MLX4_IB_QPT_GSI:
287 		return sizeof (struct mlx4_wqe_ctrl_seg) +
288 			ALIGN(MLX4_IB_UD_HEADER_SIZE +
289 			      DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE,
290 					   MLX4_INLINE_ALIGN) *
291 			      sizeof (struct mlx4_wqe_inline_seg),
292 			      sizeof (struct mlx4_wqe_data_seg)) +
293 			ALIGN(4 +
294 			      sizeof (struct mlx4_wqe_inline_seg),
295 			      sizeof (struct mlx4_wqe_data_seg));
296 	default:
297 		return sizeof (struct mlx4_wqe_ctrl_seg);
298 	}
299 }
300 
set_rq_size(struct mlx4_ib_dev * dev,struct ib_qp_cap * cap,bool is_user,bool has_rq,struct mlx4_ib_qp * qp,u32 inl_recv_sz)301 static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
302 		       bool is_user, bool has_rq, struct mlx4_ib_qp *qp,
303 		       u32 inl_recv_sz)
304 {
305 	/* Sanity check RQ size before proceeding */
306 	if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
307 	    cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
308 		return -EINVAL;
309 
310 	if (!has_rq) {
311 		if (cap->max_recv_wr || inl_recv_sz)
312 			return -EINVAL;
313 
314 		qp->rq.wqe_cnt = qp->rq.max_gs = 0;
315 	} else {
316 		u32 max_inl_recv_sz = dev->dev->caps.max_rq_sg *
317 			sizeof(struct mlx4_wqe_data_seg);
318 		u32 wqe_size;
319 
320 		/* HW requires >= 1 RQ entry with >= 1 gather entry */
321 		if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge ||
322 				inl_recv_sz > max_inl_recv_sz))
323 			return -EINVAL;
324 
325 		qp->rq.wqe_cnt	 = roundup_pow_of_two(max(1U, cap->max_recv_wr));
326 		qp->rq.max_gs	 = roundup_pow_of_two(max(1U, cap->max_recv_sge));
327 		wqe_size = qp->rq.max_gs * sizeof(struct mlx4_wqe_data_seg);
328 		qp->rq.wqe_shift = ilog2(max_t(u32, wqe_size, inl_recv_sz));
329 	}
330 
331 	/* leave userspace return values as they were, so as not to break ABI */
332 	if (is_user) {
333 		cap->max_recv_wr  = qp->rq.max_post = qp->rq.wqe_cnt;
334 		cap->max_recv_sge = qp->rq.max_gs;
335 	} else {
336 		cap->max_recv_wr  = qp->rq.max_post =
337 			min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
338 		cap->max_recv_sge = min(qp->rq.max_gs,
339 					min(dev->dev->caps.max_sq_sg,
340 					    dev->dev->caps.max_rq_sg));
341 	}
342 
343 	return 0;
344 }
345 
set_kernel_sq_size(struct mlx4_ib_dev * dev,struct ib_qp_cap * cap,enum mlx4_ib_qp_type type,struct mlx4_ib_qp * qp)346 static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
347 			      enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp)
348 {
349 	int s;
350 
351 	/* Sanity check SQ size before proceeding */
352 	if (cap->max_send_wr  > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
353 	    cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
354 	    cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
355 	    sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
356 		return -EINVAL;
357 
358 	/*
359 	 * For MLX transport we need 2 extra S/G entries:
360 	 * one for the header and one for the checksum at the end
361 	 */
362 	if ((type == MLX4_IB_QPT_SMI || type == MLX4_IB_QPT_GSI ||
363 	     type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) &&
364 	    cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
365 		return -EINVAL;
366 
367 	s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
368 		cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
369 		send_wqe_overhead(type, qp->flags);
370 
371 	if (s > dev->dev->caps.max_sq_desc_sz)
372 		return -EINVAL;
373 
374 	qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
375 
376 	/*
377 	 * We need to leave 2 KB + 1 WR of headroom in the SQ to
378 	 * allow HW to prefetch.
379 	 */
380 	qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift);
381 	qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr +
382 					    qp->sq_spare_wqes);
383 
384 	qp->sq.max_gs =
385 		(min(dev->dev->caps.max_sq_desc_sz,
386 		     (1 << qp->sq.wqe_shift)) -
387 		 send_wqe_overhead(type, qp->flags)) /
388 		sizeof (struct mlx4_wqe_data_seg);
389 
390 	qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
391 		(qp->sq.wqe_cnt << qp->sq.wqe_shift);
392 	if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
393 		qp->rq.offset = 0;
394 		qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
395 	} else {
396 		qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift;
397 		qp->sq.offset = 0;
398 	}
399 
400 	cap->max_send_wr  = qp->sq.max_post =
401 		qp->sq.wqe_cnt - qp->sq_spare_wqes;
402 	cap->max_send_sge = min(qp->sq.max_gs,
403 				min(dev->dev->caps.max_sq_sg,
404 				    dev->dev->caps.max_rq_sg));
405 	/* We don't support inline sends for kernel QPs (yet) */
406 	cap->max_inline_data = 0;
407 
408 	return 0;
409 }
410 
set_user_sq_size(struct mlx4_ib_dev * dev,struct mlx4_ib_qp * qp,struct mlx4_ib_create_qp * ucmd)411 static int set_user_sq_size(struct mlx4_ib_dev *dev,
412 			    struct mlx4_ib_qp *qp,
413 			    struct mlx4_ib_create_qp *ucmd)
414 {
415 	/* Sanity check SQ size before proceeding */
416 	if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes	 ||
417 	    ucmd->log_sq_stride >
418 		ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
419 	    ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
420 		return -EINVAL;
421 
422 	qp->sq.wqe_cnt   = 1 << ucmd->log_sq_bb_count;
423 	qp->sq.wqe_shift = ucmd->log_sq_stride;
424 
425 	qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
426 		(qp->sq.wqe_cnt << qp->sq.wqe_shift);
427 
428 	return 0;
429 }
430 
alloc_proxy_bufs(struct ib_device * dev,struct mlx4_ib_qp * qp)431 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
432 {
433 	int i;
434 
435 	qp->sqp_proxy_rcv =
436 		kmalloc_array(qp->rq.wqe_cnt, sizeof(struct mlx4_ib_buf),
437 			      GFP_KERNEL);
438 	if (!qp->sqp_proxy_rcv)
439 		return -ENOMEM;
440 	for (i = 0; i < qp->rq.wqe_cnt; i++) {
441 		qp->sqp_proxy_rcv[i].addr =
442 			kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr),
443 				GFP_KERNEL);
444 		if (!qp->sqp_proxy_rcv[i].addr)
445 			goto err;
446 		qp->sqp_proxy_rcv[i].map =
447 			ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr,
448 					  sizeof (struct mlx4_ib_proxy_sqp_hdr),
449 					  DMA_FROM_DEVICE);
450 		if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) {
451 			kfree(qp->sqp_proxy_rcv[i].addr);
452 			goto err;
453 		}
454 	}
455 	return 0;
456 
457 err:
458 	while (i > 0) {
459 		--i;
460 		ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
461 				    sizeof (struct mlx4_ib_proxy_sqp_hdr),
462 				    DMA_FROM_DEVICE);
463 		kfree(qp->sqp_proxy_rcv[i].addr);
464 	}
465 	kfree(qp->sqp_proxy_rcv);
466 	qp->sqp_proxy_rcv = NULL;
467 	return -ENOMEM;
468 }
469 
free_proxy_bufs(struct ib_device * dev,struct mlx4_ib_qp * qp)470 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
471 {
472 	int i;
473 
474 	for (i = 0; i < qp->rq.wqe_cnt; i++) {
475 		ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
476 				    sizeof (struct mlx4_ib_proxy_sqp_hdr),
477 				    DMA_FROM_DEVICE);
478 		kfree(qp->sqp_proxy_rcv[i].addr);
479 	}
480 	kfree(qp->sqp_proxy_rcv);
481 }
482 
qp_has_rq(struct ib_qp_init_attr * attr)483 static bool qp_has_rq(struct ib_qp_init_attr *attr)
484 {
485 	if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
486 		return false;
487 
488 	return !attr->srq;
489 }
490 
qp0_enabled_vf(struct mlx4_dev * dev,int qpn)491 static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn)
492 {
493 	int i;
494 	for (i = 0; i < dev->caps.num_ports; i++) {
495 		if (qpn == dev->caps.spec_qps[i].qp0_proxy)
496 			return !!dev->caps.spec_qps[i].qp0_qkey;
497 	}
498 	return 0;
499 }
500 
mlx4_ib_free_qp_counter(struct mlx4_ib_dev * dev,struct mlx4_ib_qp * qp)501 static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
502 				    struct mlx4_ib_qp *qp)
503 {
504 	mutex_lock(&dev->counters_table[qp->port - 1].mutex);
505 	mlx4_counter_free(dev->dev, qp->counter_index->index);
506 	list_del(&qp->counter_index->list);
507 	mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
508 
509 	kfree(qp->counter_index);
510 	qp->counter_index = NULL;
511 }
512 
set_qp_rss(struct mlx4_ib_dev * dev,struct mlx4_ib_rss * rss_ctx,struct ib_qp_init_attr * init_attr,struct mlx4_ib_create_qp_rss * ucmd)513 static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
514 		      struct ib_qp_init_attr *init_attr,
515 		      struct mlx4_ib_create_qp_rss *ucmd)
516 {
517 	rss_ctx->base_qpn_tbl_sz = init_attr->rwq_ind_tbl->ind_tbl[0]->wq_num |
518 		(init_attr->rwq_ind_tbl->log_ind_tbl_size << 24);
519 
520 	if ((ucmd->rx_hash_function == MLX4_IB_RX_HASH_FUNC_TOEPLITZ) &&
521 	    (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) {
522 		memcpy(rss_ctx->rss_key, ucmd->rx_hash_key,
523 		       MLX4_EN_RSS_KEY_SIZE);
524 	} else {
525 		pr_debug("RX Hash function is not supported\n");
526 		return (-EOPNOTSUPP);
527 	}
528 
529 	if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4	|
530 					  MLX4_IB_RX_HASH_DST_IPV4	|
531 					  MLX4_IB_RX_HASH_SRC_IPV6	|
532 					  MLX4_IB_RX_HASH_DST_IPV6	|
533 					  MLX4_IB_RX_HASH_SRC_PORT_TCP	|
534 					  MLX4_IB_RX_HASH_DST_PORT_TCP	|
535 					  MLX4_IB_RX_HASH_SRC_PORT_UDP	|
536 					  MLX4_IB_RX_HASH_DST_PORT_UDP  |
537 					  MLX4_IB_RX_HASH_INNER)) {
538 		pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
539 			 ucmd->rx_hash_fields_mask);
540 		return (-EOPNOTSUPP);
541 	}
542 
543 	if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
544 	    (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
545 		rss_ctx->flags = MLX4_RSS_IPV4;
546 	} else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) ||
547 		   (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
548 		pr_debug("RX Hash fields_mask is not supported - both IPv4 SRC and DST must be set\n");
549 		return (-EOPNOTSUPP);
550 	}
551 
552 	if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) &&
553 	    (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) {
554 		rss_ctx->flags |= MLX4_RSS_IPV6;
555 	} else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) ||
556 		   (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) {
557 		pr_debug("RX Hash fields_mask is not supported - both IPv6 SRC and DST must be set\n");
558 		return (-EOPNOTSUPP);
559 	}
560 
561 	if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) &&
562 	    (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) {
563 		if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
564 			pr_debug("RX Hash fields_mask for UDP is not supported\n");
565 			return (-EOPNOTSUPP);
566 		}
567 
568 		if (rss_ctx->flags & MLX4_RSS_IPV4)
569 			rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
570 		if (rss_ctx->flags & MLX4_RSS_IPV6)
571 			rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
572 		if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
573 			pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
574 			return (-EOPNOTSUPP);
575 		}
576 	} else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) ||
577 		   (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) {
578 		pr_debug("RX Hash fields_mask is not supported - both UDP SRC and DST must be set\n");
579 		return (-EOPNOTSUPP);
580 	}
581 
582 	if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
583 	    (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
584 		if (rss_ctx->flags & MLX4_RSS_IPV4)
585 			rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
586 		if (rss_ctx->flags & MLX4_RSS_IPV6)
587 			rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
588 		if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
589 			pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
590 			return (-EOPNOTSUPP);
591 		}
592 	} else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
593 		   (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
594 		pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
595 		return (-EOPNOTSUPP);
596 	}
597 
598 	if (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_INNER) {
599 		if (dev->dev->caps.tunnel_offload_mode ==
600 		    MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
601 			/*
602 			 * Hash according to inner headers if exist, otherwise
603 			 * according to outer headers.
604 			 */
605 			rss_ctx->flags |= MLX4_RSS_BY_INNER_HEADERS_IPONLY;
606 		} else {
607 			pr_debug("RSS Hash for inner headers isn't supported\n");
608 			return (-EOPNOTSUPP);
609 		}
610 	}
611 
612 	return 0;
613 }
614 
create_qp_rss(struct mlx4_ib_dev * dev,struct ib_qp_init_attr * init_attr,struct mlx4_ib_create_qp_rss * ucmd,struct mlx4_ib_qp * qp)615 static int create_qp_rss(struct mlx4_ib_dev *dev,
616 			 struct ib_qp_init_attr *init_attr,
617 			 struct mlx4_ib_create_qp_rss *ucmd,
618 			 struct mlx4_ib_qp *qp)
619 {
620 	int qpn;
621 	int err;
622 
623 	qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
624 
625 	err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, 0, qp->mqp.usage);
626 	if (err)
627 		return err;
628 
629 	err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
630 	if (err)
631 		goto err_qpn;
632 
633 	INIT_LIST_HEAD(&qp->gid_list);
634 	INIT_LIST_HEAD(&qp->steering_rules);
635 
636 	qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
637 	qp->state = IB_QPS_RESET;
638 
639 	/* Set dummy send resources to be compatible with HV and PRM */
640 	qp->sq_no_prefetch = 1;
641 	qp->sq.wqe_cnt = 1;
642 	qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
643 	qp->buf_size = qp->sq.wqe_cnt << MLX4_IB_MIN_SQ_STRIDE;
644 	qp->mtt = (to_mqp(
645 		   (struct ib_qp *)init_attr->rwq_ind_tbl->ind_tbl[0]))->mtt;
646 
647 	qp->rss_ctx = kzalloc(sizeof(*qp->rss_ctx), GFP_KERNEL);
648 	if (!qp->rss_ctx) {
649 		err = -ENOMEM;
650 		goto err_qp_alloc;
651 	}
652 
653 	err = set_qp_rss(dev, qp->rss_ctx, init_attr, ucmd);
654 	if (err)
655 		goto err;
656 
657 	return 0;
658 
659 err:
660 	kfree(qp->rss_ctx);
661 
662 err_qp_alloc:
663 	mlx4_qp_remove(dev->dev, &qp->mqp);
664 	mlx4_qp_free(dev->dev, &qp->mqp);
665 
666 err_qpn:
667 	mlx4_qp_release_range(dev->dev, qpn, 1);
668 	return err;
669 }
670 
_mlx4_ib_create_qp_rss(struct ib_pd * pd,struct mlx4_ib_qp * qp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)671 static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp,
672 				  struct ib_qp_init_attr *init_attr,
673 				  struct ib_udata *udata)
674 {
675 	struct mlx4_ib_create_qp_rss ucmd = {};
676 	size_t required_cmd_sz;
677 	int err;
678 
679 	if (!udata) {
680 		pr_debug("RSS QP with NULL udata\n");
681 		return -EINVAL;
682 	}
683 
684 	if (udata->outlen)
685 		return -EOPNOTSUPP;
686 
687 	required_cmd_sz = offsetof(typeof(ucmd), reserved1) +
688 					sizeof(ucmd.reserved1);
689 	if (udata->inlen < required_cmd_sz) {
690 		pr_debug("invalid inlen\n");
691 		return -EINVAL;
692 	}
693 
694 	if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
695 		pr_debug("copy failed\n");
696 		return -EFAULT;
697 	}
698 
699 	if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)))
700 		return -EOPNOTSUPP;
701 
702 	if (ucmd.comp_mask || ucmd.reserved1)
703 		return -EOPNOTSUPP;
704 
705 	if (udata->inlen > sizeof(ucmd) &&
706 	    !ib_is_udata_cleared(udata, sizeof(ucmd),
707 				 udata->inlen - sizeof(ucmd))) {
708 		pr_debug("inlen is not supported\n");
709 		return -EOPNOTSUPP;
710 	}
711 
712 	if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
713 		pr_debug("RSS QP with unsupported QP type %d\n",
714 			 init_attr->qp_type);
715 		return -EOPNOTSUPP;
716 	}
717 
718 	if (init_attr->create_flags) {
719 		pr_debug("RSS QP doesn't support create flags\n");
720 		return -EOPNOTSUPP;
721 	}
722 
723 	if (init_attr->send_cq || init_attr->cap.max_send_wr) {
724 		pr_debug("RSS QP with unsupported send attributes\n");
725 		return -EOPNOTSUPP;
726 	}
727 
728 	qp->pri.vid = 0xFFFF;
729 	qp->alt.vid = 0xFFFF;
730 
731 	err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp);
732 	if (err)
733 		return err;
734 
735 	qp->ibqp.qp_num = qp->mqp.qpn;
736 	return 0;
737 }
738 
739 /*
740  * This function allocates a WQN from a range which is consecutive and aligned
741  * to its size. In case the range is full, then it creates a new range and
742  * allocates WQN from it. The new range will be used for following allocations.
743  */
mlx4_ib_alloc_wqn(struct mlx4_ib_ucontext * context,struct mlx4_ib_qp * qp,int range_size,int * wqn)744 static int mlx4_ib_alloc_wqn(struct mlx4_ib_ucontext *context,
745 			     struct mlx4_ib_qp *qp, int range_size, int *wqn)
746 {
747 	struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device);
748 	struct mlx4_wqn_range *range;
749 	int err = 0;
750 
751 	mutex_lock(&context->wqn_ranges_mutex);
752 
753 	range = list_first_entry_or_null(&context->wqn_ranges_list,
754 					 struct mlx4_wqn_range, list);
755 
756 	if (!range || (range->refcount == range->size) || range->dirty) {
757 		range = kzalloc(sizeof(*range), GFP_KERNEL);
758 		if (!range) {
759 			err = -ENOMEM;
760 			goto out;
761 		}
762 
763 		err = mlx4_qp_reserve_range(dev->dev, range_size,
764 					    range_size, &range->base_wqn, 0,
765 					    qp->mqp.usage);
766 		if (err) {
767 			kfree(range);
768 			goto out;
769 		}
770 
771 		range->size = range_size;
772 		list_add(&range->list, &context->wqn_ranges_list);
773 	} else if (range_size != 1) {
774 		/*
775 		 * Requesting a new range (>1) when last range is still open, is
776 		 * not valid.
777 		 */
778 		err = -EINVAL;
779 		goto out;
780 	}
781 
782 	qp->wqn_range = range;
783 
784 	*wqn = range->base_wqn + range->refcount;
785 
786 	range->refcount++;
787 
788 out:
789 	mutex_unlock(&context->wqn_ranges_mutex);
790 
791 	return err;
792 }
793 
mlx4_ib_release_wqn(struct mlx4_ib_ucontext * context,struct mlx4_ib_qp * qp,bool dirty_release)794 static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context,
795 				struct mlx4_ib_qp *qp, bool dirty_release)
796 {
797 	struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device);
798 	struct mlx4_wqn_range *range;
799 
800 	mutex_lock(&context->wqn_ranges_mutex);
801 
802 	range = qp->wqn_range;
803 
804 	range->refcount--;
805 	if (!range->refcount) {
806 		mlx4_qp_release_range(dev->dev, range->base_wqn,
807 				      range->size);
808 		list_del(&range->list);
809 		kfree(range);
810 	} else if (dirty_release) {
811 	/*
812 	 * A range which one of its WQNs is destroyed, won't be able to be
813 	 * reused for further WQN allocations.
814 	 * The next created WQ will allocate a new range.
815 	 */
816 		range->dirty = true;
817 	}
818 
819 	mutex_unlock(&context->wqn_ranges_mutex);
820 }
821 
create_rq(struct ib_pd * pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,struct mlx4_ib_qp * qp)822 static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
823 		     struct ib_udata *udata, struct mlx4_ib_qp *qp)
824 {
825 	struct mlx4_ib_dev *dev = to_mdev(pd->device);
826 	int qpn;
827 	int err;
828 	struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
829 		udata, struct mlx4_ib_ucontext, ibucontext);
830 	struct mlx4_ib_cq *mcq;
831 	unsigned long flags;
832 	int range_size;
833 	struct mlx4_ib_create_wq wq;
834 	size_t copy_len;
835 	int shift;
836 	int n;
837 
838 	qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
839 
840 	spin_lock_init(&qp->sq.lock);
841 	spin_lock_init(&qp->rq.lock);
842 	INIT_LIST_HEAD(&qp->gid_list);
843 	INIT_LIST_HEAD(&qp->steering_rules);
844 
845 	qp->state = IB_QPS_RESET;
846 
847 	copy_len = min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
848 
849 	if (ib_copy_from_udata(&wq, udata, copy_len)) {
850 		err = -EFAULT;
851 		goto err;
852 	}
853 
854 	if (wq.comp_mask || wq.reserved[0] || wq.reserved[1] ||
855 	    wq.reserved[2]) {
856 		pr_debug("user command isn't supported\n");
857 		err = -EOPNOTSUPP;
858 		goto err;
859 	}
860 
861 	if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) {
862 		pr_debug("WQN range size must be equal or smaller than %d\n",
863 			 dev->dev->caps.max_rss_tbl_sz);
864 		err = -EOPNOTSUPP;
865 		goto err;
866 	}
867 	range_size = 1 << wq.log_range_size;
868 
869 	if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS)
870 		qp->flags |= MLX4_IB_QP_SCATTER_FCS;
871 
872 	err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz);
873 	if (err)
874 		goto err;
875 
876 	qp->sq_no_prefetch = 1;
877 	qp->sq.wqe_cnt = 1;
878 	qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
879 	qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
880 		       (qp->sq.wqe_cnt << qp->sq.wqe_shift);
881 
882 	qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0);
883 	if (IS_ERR(qp->umem)) {
884 		err = PTR_ERR(qp->umem);
885 		goto err;
886 	}
887 
888 	shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
889 	err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
890 
891 	if (err)
892 		goto err_buf;
893 
894 	err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
895 	if (err)
896 		goto err_mtt;
897 
898 	err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db);
899 	if (err)
900 		goto err_mtt;
901 	qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
902 
903 	err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn);
904 	if (err)
905 		goto err_wrid;
906 
907 	err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
908 	if (err)
909 		goto err_qpn;
910 
911 	/*
912 	 * Hardware wants QPN written in big-endian order (after
913 	 * shifting) for send doorbell.  Precompute this value to save
914 	 * a little bit when posting sends.
915 	 */
916 	qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
917 
918 	qp->mqp.event = mlx4_ib_wq_event;
919 
920 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
921 	mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
922 			 to_mcq(init_attr->recv_cq));
923 	/* Maintain device to QPs access, needed for further handling
924 	 * via reset flow
925 	 */
926 	list_add_tail(&qp->qps_list, &dev->qp_list);
927 	/* Maintain CQ to QPs access, needed for further handling
928 	 * via reset flow
929 	 */
930 	mcq = to_mcq(init_attr->send_cq);
931 	list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
932 	mcq = to_mcq(init_attr->recv_cq);
933 	list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
934 	mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
935 			   to_mcq(init_attr->recv_cq));
936 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
937 	return 0;
938 
939 err_qpn:
940 	mlx4_ib_release_wqn(context, qp, 0);
941 err_wrid:
942 	mlx4_ib_db_unmap_user(context, &qp->db);
943 
944 err_mtt:
945 	mlx4_mtt_cleanup(dev->dev, &qp->mtt);
946 err_buf:
947 	ib_umem_release(qp->umem);
948 err:
949 	return err;
950 }
951 
create_qp_common(struct ib_pd * pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,int sqpn,struct mlx4_ib_qp * qp)952 static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
953 			    struct ib_udata *udata, int sqpn,
954 			    struct mlx4_ib_qp *qp)
955 {
956 	struct mlx4_ib_dev *dev = to_mdev(pd->device);
957 	int qpn;
958 	int err;
959 	struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
960 		udata, struct mlx4_ib_ucontext, ibucontext);
961 	enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
962 	struct mlx4_ib_cq *mcq;
963 	unsigned long flags;
964 
965 	/* When tunneling special qps, we use a plain UD qp */
966 	if (sqpn) {
967 		if (mlx4_is_mfunc(dev->dev) &&
968 		    (!mlx4_is_master(dev->dev) ||
969 		     !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) {
970 			if (init_attr->qp_type == IB_QPT_GSI)
971 				qp_type = MLX4_IB_QPT_PROXY_GSI;
972 			else {
973 				if (mlx4_is_master(dev->dev) ||
974 				    qp0_enabled_vf(dev->dev, sqpn))
975 					qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER;
976 				else
977 					qp_type = MLX4_IB_QPT_PROXY_SMI;
978 			}
979 		}
980 		qpn = sqpn;
981 		/* add extra sg entry for tunneling */
982 		init_attr->cap.max_recv_sge++;
983 	} else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) {
984 		struct mlx4_ib_qp_tunnel_init_attr *tnl_init =
985 			container_of(init_attr,
986 				     struct mlx4_ib_qp_tunnel_init_attr, init_attr);
987 		if ((tnl_init->proxy_qp_type != IB_QPT_SMI &&
988 		     tnl_init->proxy_qp_type != IB_QPT_GSI)   ||
989 		    !mlx4_is_master(dev->dev))
990 			return -EINVAL;
991 		if (tnl_init->proxy_qp_type == IB_QPT_GSI)
992 			qp_type = MLX4_IB_QPT_TUN_GSI;
993 		else if (tnl_init->slave == mlx4_master_func_num(dev->dev) ||
994 			 mlx4_vf_smi_enabled(dev->dev, tnl_init->slave,
995 					     tnl_init->port))
996 			qp_type = MLX4_IB_QPT_TUN_SMI_OWNER;
997 		else
998 			qp_type = MLX4_IB_QPT_TUN_SMI;
999 		/* we are definitely in the PPF here, since we are creating
1000 		 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
1001 		qpn = dev->dev->phys_caps.base_tunnel_sqpn + 8 * tnl_init->slave
1002 			+ tnl_init->proxy_qp_type * 2 + tnl_init->port - 1;
1003 		sqpn = qpn;
1004 	}
1005 
1006 	if (init_attr->qp_type == IB_QPT_SMI ||
1007 	    init_attr->qp_type == IB_QPT_GSI || qp_type == MLX4_IB_QPT_SMI ||
1008 	    qp_type == MLX4_IB_QPT_GSI ||
1009 	    (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
1010 			MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
1011 		qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
1012 		if (!qp->sqp)
1013 			return -ENOMEM;
1014 	}
1015 
1016 	qp->mlx4_ib_qp_type = qp_type;
1017 
1018 	spin_lock_init(&qp->sq.lock);
1019 	spin_lock_init(&qp->rq.lock);
1020 	INIT_LIST_HEAD(&qp->gid_list);
1021 	INIT_LIST_HEAD(&qp->steering_rules);
1022 
1023 	qp->state = IB_QPS_RESET;
1024 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
1025 		qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
1026 
1027 	if (udata) {
1028 		struct mlx4_ib_create_qp ucmd;
1029 		size_t copy_len;
1030 		int shift;
1031 		int n;
1032 
1033 		copy_len = sizeof(struct mlx4_ib_create_qp);
1034 
1035 		if (ib_copy_from_udata(&ucmd, udata, copy_len)) {
1036 			err = -EFAULT;
1037 			goto err;
1038 		}
1039 
1040 		qp->inl_recv_sz = ucmd.inl_recv_sz;
1041 
1042 		if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
1043 			if (!(dev->dev->caps.flags &
1044 			      MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
1045 				pr_debug("scatter FCS is unsupported\n");
1046 				err = -EOPNOTSUPP;
1047 				goto err;
1048 			}
1049 
1050 			qp->flags |= MLX4_IB_QP_SCATTER_FCS;
1051 		}
1052 
1053 		err = set_rq_size(dev, &init_attr->cap, udata,
1054 				  qp_has_rq(init_attr), qp, qp->inl_recv_sz);
1055 		if (err)
1056 			goto err;
1057 
1058 		qp->sq_no_prefetch = ucmd.sq_no_prefetch;
1059 
1060 		err = set_user_sq_size(dev, qp, &ucmd);
1061 		if (err)
1062 			goto err;
1063 
1064 		qp->umem =
1065 			ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
1066 		if (IS_ERR(qp->umem)) {
1067 			err = PTR_ERR(qp->umem);
1068 			goto err;
1069 		}
1070 
1071 		shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
1072 		err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
1073 
1074 		if (err)
1075 			goto err_buf;
1076 
1077 		err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
1078 		if (err)
1079 			goto err_mtt;
1080 
1081 		if (qp_has_rq(init_attr)) {
1082 			err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db);
1083 			if (err)
1084 				goto err_mtt;
1085 		}
1086 		qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
1087 	} else {
1088 		err = set_rq_size(dev, &init_attr->cap, udata,
1089 				  qp_has_rq(init_attr), qp, 0);
1090 		if (err)
1091 			goto err;
1092 
1093 		qp->sq_no_prefetch = 0;
1094 
1095 		if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
1096 			qp->flags |= MLX4_IB_QP_LSO;
1097 
1098 		if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
1099 			if (dev->steering_support ==
1100 			    MLX4_STEERING_MODE_DEVICE_MANAGED)
1101 				qp->flags |= MLX4_IB_QP_NETIF;
1102 			else {
1103 				err = -EINVAL;
1104 				goto err;
1105 			}
1106 		}
1107 
1108 		err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
1109 		if (err)
1110 			goto err;
1111 
1112 		if (qp_has_rq(init_attr)) {
1113 			err = mlx4_db_alloc(dev->dev, &qp->db, 0);
1114 			if (err)
1115 				goto err;
1116 
1117 			*qp->db.db = 0;
1118 		}
1119 
1120 		if (mlx4_buf_alloc(dev->dev, qp->buf_size,  PAGE_SIZE * 2,
1121 				   &qp->buf)) {
1122 			err = -ENOMEM;
1123 			goto err_db;
1124 		}
1125 
1126 		err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
1127 				    &qp->mtt);
1128 		if (err)
1129 			goto err_buf;
1130 
1131 		err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
1132 		if (err)
1133 			goto err_mtt;
1134 
1135 		qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
1136 					     sizeof(u64), GFP_KERNEL);
1137 		qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
1138 					     sizeof(u64), GFP_KERNEL);
1139 		if (!qp->sq.wrid || !qp->rq.wrid) {
1140 			err = -ENOMEM;
1141 			goto err_wrid;
1142 		}
1143 		qp->mqp.usage = MLX4_RES_USAGE_DRIVER;
1144 	}
1145 
1146 	if (sqpn) {
1147 		if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
1148 		    MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
1149 			if (alloc_proxy_bufs(pd->device, qp)) {
1150 				err = -ENOMEM;
1151 				goto err_wrid;
1152 			}
1153 		}
1154 	} else {
1155 		/* Raw packet QPNs may not have bits 6,7 set in their qp_num;
1156 		 * otherwise, the WQE BlueFlame setup flow wrongly causes
1157 		 * VLAN insertion. */
1158 		if (init_attr->qp_type == IB_QPT_RAW_PACKET)
1159 			err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn,
1160 						    (init_attr->cap.max_send_wr ?
1161 						     MLX4_RESERVE_ETH_BF_QP : 0) |
1162 						    (init_attr->cap.max_recv_wr ?
1163 						     MLX4_RESERVE_A0_QP : 0),
1164 						    qp->mqp.usage);
1165 		else
1166 			if (qp->flags & MLX4_IB_QP_NETIF)
1167 				err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
1168 			else
1169 				err = mlx4_qp_reserve_range(dev->dev, 1, 1,
1170 							    &qpn, 0, qp->mqp.usage);
1171 		if (err)
1172 			goto err_proxy;
1173 	}
1174 
1175 	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
1176 		qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
1177 
1178 	err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
1179 	if (err)
1180 		goto err_qpn;
1181 
1182 	if (init_attr->qp_type == IB_QPT_XRC_TGT)
1183 		qp->mqp.qpn |= (1 << 23);
1184 
1185 	/*
1186 	 * Hardware wants QPN written in big-endian order (after
1187 	 * shifting) for send doorbell.  Precompute this value to save
1188 	 * a little bit when posting sends.
1189 	 */
1190 	qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
1191 
1192 	qp->mqp.event = mlx4_ib_qp_event;
1193 
1194 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
1195 	mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
1196 			 to_mcq(init_attr->recv_cq));
1197 	/* Maintain device to QPs access, needed for further handling
1198 	 * via reset flow
1199 	 */
1200 	list_add_tail(&qp->qps_list, &dev->qp_list);
1201 	/* Maintain CQ to QPs access, needed for further handling
1202 	 * via reset flow
1203 	 */
1204 	mcq = to_mcq(init_attr->send_cq);
1205 	list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
1206 	mcq = to_mcq(init_attr->recv_cq);
1207 	list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
1208 	mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
1209 			   to_mcq(init_attr->recv_cq));
1210 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
1211 	return 0;
1212 
1213 err_qpn:
1214 	if (!sqpn) {
1215 		if (qp->flags & MLX4_IB_QP_NETIF)
1216 			mlx4_ib_steer_qp_free(dev, qpn, 1);
1217 		else
1218 			mlx4_qp_release_range(dev->dev, qpn, 1);
1219 	}
1220 err_proxy:
1221 	if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
1222 		free_proxy_bufs(pd->device, qp);
1223 err_wrid:
1224 	if (udata) {
1225 		if (qp_has_rq(init_attr))
1226 			mlx4_ib_db_unmap_user(context, &qp->db);
1227 	} else {
1228 		kvfree(qp->sq.wrid);
1229 		kvfree(qp->rq.wrid);
1230 	}
1231 
1232 err_mtt:
1233 	mlx4_mtt_cleanup(dev->dev, &qp->mtt);
1234 
1235 err_buf:
1236 	if (!qp->umem)
1237 		mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
1238 	ib_umem_release(qp->umem);
1239 
1240 err_db:
1241 	if (!udata && qp_has_rq(init_attr))
1242 		mlx4_db_free(dev->dev, &qp->db);
1243 
1244 err:
1245 	kfree(qp->sqp);
1246 	return err;
1247 }
1248 
to_mlx4_state(enum ib_qp_state state)1249 static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
1250 {
1251 	switch (state) {
1252 	case IB_QPS_RESET:	return MLX4_QP_STATE_RST;
1253 	case IB_QPS_INIT:	return MLX4_QP_STATE_INIT;
1254 	case IB_QPS_RTR:	return MLX4_QP_STATE_RTR;
1255 	case IB_QPS_RTS:	return MLX4_QP_STATE_RTS;
1256 	case IB_QPS_SQD:	return MLX4_QP_STATE_SQD;
1257 	case IB_QPS_SQE:	return MLX4_QP_STATE_SQER;
1258 	case IB_QPS_ERR:	return MLX4_QP_STATE_ERR;
1259 	default:		return -1;
1260 	}
1261 }
1262 
mlx4_ib_lock_cqs(struct mlx4_ib_cq * send_cq,struct mlx4_ib_cq * recv_cq)1263 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
1264 	__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1265 {
1266 	if (send_cq == recv_cq) {
1267 		spin_lock(&send_cq->lock);
1268 		__acquire(&recv_cq->lock);
1269 	} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1270 		spin_lock(&send_cq->lock);
1271 		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1272 	} else {
1273 		spin_lock(&recv_cq->lock);
1274 		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1275 	}
1276 }
1277 
mlx4_ib_unlock_cqs(struct mlx4_ib_cq * send_cq,struct mlx4_ib_cq * recv_cq)1278 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
1279 	__releases(&send_cq->lock) __releases(&recv_cq->lock)
1280 {
1281 	if (send_cq == recv_cq) {
1282 		__release(&recv_cq->lock);
1283 		spin_unlock(&send_cq->lock);
1284 	} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1285 		spin_unlock(&recv_cq->lock);
1286 		spin_unlock(&send_cq->lock);
1287 	} else {
1288 		spin_unlock(&send_cq->lock);
1289 		spin_unlock(&recv_cq->lock);
1290 	}
1291 }
1292 
del_gid_entries(struct mlx4_ib_qp * qp)1293 static void del_gid_entries(struct mlx4_ib_qp *qp)
1294 {
1295 	struct mlx4_ib_gid_entry *ge, *tmp;
1296 
1297 	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1298 		list_del(&ge->list);
1299 		kfree(ge);
1300 	}
1301 }
1302 
get_pd(struct mlx4_ib_qp * qp)1303 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp)
1304 {
1305 	if (qp->ibqp.qp_type == IB_QPT_XRC_TGT)
1306 		return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd);
1307 	else
1308 		return to_mpd(qp->ibqp.pd);
1309 }
1310 
get_cqs(struct mlx4_ib_qp * qp,enum mlx4_ib_source_type src,struct mlx4_ib_cq ** send_cq,struct mlx4_ib_cq ** recv_cq)1311 static void get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src,
1312 		    struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
1313 {
1314 	switch (qp->ibqp.qp_type) {
1315 	case IB_QPT_XRC_TGT:
1316 		*send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq);
1317 		*recv_cq = *send_cq;
1318 		break;
1319 	case IB_QPT_XRC_INI:
1320 		*send_cq = to_mcq(qp->ibqp.send_cq);
1321 		*recv_cq = *send_cq;
1322 		break;
1323 	default:
1324 		*recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) :
1325 						     to_mcq(qp->ibwq.cq);
1326 		*send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) :
1327 						     *recv_cq;
1328 		break;
1329 	}
1330 }
1331 
destroy_qp_rss(struct mlx4_ib_dev * dev,struct mlx4_ib_qp * qp)1332 static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1333 {
1334 	if (qp->state != IB_QPS_RESET) {
1335 		int i;
1336 
1337 		for (i = 0; i < (1 << qp->ibqp.rwq_ind_tbl->log_ind_tbl_size);
1338 		     i++) {
1339 			struct ib_wq *ibwq = qp->ibqp.rwq_ind_tbl->ind_tbl[i];
1340 			struct mlx4_ib_qp *wq =	to_mqp((struct ib_qp *)ibwq);
1341 
1342 			mutex_lock(&wq->mutex);
1343 
1344 			wq->rss_usecnt--;
1345 
1346 			mutex_unlock(&wq->mutex);
1347 		}
1348 
1349 		if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
1350 				   MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
1351 			pr_warn("modify QP %06x to RESET failed.\n",
1352 				qp->mqp.qpn);
1353 	}
1354 
1355 	mlx4_qp_remove(dev->dev, &qp->mqp);
1356 	mlx4_qp_free(dev->dev, &qp->mqp);
1357 	mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
1358 	del_gid_entries(qp);
1359 }
1360 
destroy_qp_common(struct mlx4_ib_dev * dev,struct mlx4_ib_qp * qp,enum mlx4_ib_source_type src,struct ib_udata * udata)1361 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
1362 			      enum mlx4_ib_source_type src,
1363 			      struct ib_udata *udata)
1364 {
1365 	struct mlx4_ib_cq *send_cq, *recv_cq;
1366 	unsigned long flags;
1367 
1368 	if (qp->state != IB_QPS_RESET) {
1369 		if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
1370 				   MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
1371 			pr_warn("modify QP %06x to RESET failed.\n",
1372 			       qp->mqp.qpn);
1373 		if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
1374 			mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
1375 			qp->pri.smac = 0;
1376 			qp->pri.smac_port = 0;
1377 		}
1378 		if (qp->alt.smac) {
1379 			mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
1380 			qp->alt.smac = 0;
1381 		}
1382 		if (qp->pri.vid < 0x1000) {
1383 			mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
1384 			qp->pri.vid = 0xFFFF;
1385 			qp->pri.candidate_vid = 0xFFFF;
1386 			qp->pri.update_vid = 0;
1387 		}
1388 		if (qp->alt.vid < 0x1000) {
1389 			mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
1390 			qp->alt.vid = 0xFFFF;
1391 			qp->alt.candidate_vid = 0xFFFF;
1392 			qp->alt.update_vid = 0;
1393 		}
1394 	}
1395 
1396 	get_cqs(qp, src, &send_cq, &recv_cq);
1397 
1398 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
1399 	mlx4_ib_lock_cqs(send_cq, recv_cq);
1400 
1401 	/* del from lists under both locks above to protect reset flow paths */
1402 	list_del(&qp->qps_list);
1403 	list_del(&qp->cq_send_list);
1404 	list_del(&qp->cq_recv_list);
1405 	if (!udata) {
1406 		__mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
1407 				 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
1408 		if (send_cq != recv_cq)
1409 			__mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1410 	}
1411 
1412 	mlx4_qp_remove(dev->dev, &qp->mqp);
1413 
1414 	mlx4_ib_unlock_cqs(send_cq, recv_cq);
1415 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
1416 
1417 	mlx4_qp_free(dev->dev, &qp->mqp);
1418 
1419 	if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) {
1420 		if (qp->flags & MLX4_IB_QP_NETIF)
1421 			mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
1422 		else if (src == MLX4_IB_RWQ_SRC)
1423 			mlx4_ib_release_wqn(
1424 				rdma_udata_to_drv_context(
1425 					udata,
1426 					struct mlx4_ib_ucontext,
1427 					ibucontext),
1428 				qp, 1);
1429 		else
1430 			mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
1431 	}
1432 
1433 	mlx4_mtt_cleanup(dev->dev, &qp->mtt);
1434 
1435 	if (udata) {
1436 		if (qp->rq.wqe_cnt) {
1437 			struct mlx4_ib_ucontext *mcontext =
1438 				rdma_udata_to_drv_context(
1439 					udata,
1440 					struct mlx4_ib_ucontext,
1441 					ibucontext);
1442 
1443 			mlx4_ib_db_unmap_user(mcontext, &qp->db);
1444 		}
1445 	} else {
1446 		kvfree(qp->sq.wrid);
1447 		kvfree(qp->rq.wrid);
1448 		if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
1449 		    MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
1450 			free_proxy_bufs(&dev->ib_dev, qp);
1451 		mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
1452 		if (qp->rq.wqe_cnt)
1453 			mlx4_db_free(dev->dev, &qp->db);
1454 	}
1455 	ib_umem_release(qp->umem);
1456 
1457 	del_gid_entries(qp);
1458 }
1459 
get_sqp_num(struct mlx4_ib_dev * dev,struct ib_qp_init_attr * attr)1460 static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr)
1461 {
1462 	/* Native or PPF */
1463 	if (!mlx4_is_mfunc(dev->dev) ||
1464 	    (mlx4_is_master(dev->dev) &&
1465 	     attr->create_flags & MLX4_IB_SRIOV_SQP)) {
1466 		return  dev->dev->phys_caps.base_sqpn +
1467 			(attr->qp_type == IB_QPT_SMI ? 0 : 2) +
1468 			attr->port_num - 1;
1469 	}
1470 	/* PF or VF -- creating proxies */
1471 	if (attr->qp_type == IB_QPT_SMI)
1472 		return dev->dev->caps.spec_qps[attr->port_num - 1].qp0_proxy;
1473 	else
1474 		return dev->dev->caps.spec_qps[attr->port_num - 1].qp1_proxy;
1475 }
1476 
_mlx4_ib_create_qp(struct ib_pd * pd,struct mlx4_ib_qp * qp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)1477 static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
1478 			      struct ib_qp_init_attr *init_attr,
1479 			      struct ib_udata *udata)
1480 {
1481 	int err;
1482 	int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
1483 	u16 xrcdn = 0;
1484 
1485 	if (init_attr->rwq_ind_tbl)
1486 		return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata);
1487 
1488 	/*
1489 	 * We only support LSO, vendor flag1, and multicast loopback blocking,
1490 	 * and only for kernel UD QPs.
1491 	 */
1492 	if (init_attr->create_flags & ~(MLX4_IB_QP_LSO |
1493 					MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK |
1494 					MLX4_IB_SRIOV_TUNNEL_QP |
1495 					MLX4_IB_SRIOV_SQP |
1496 					MLX4_IB_QP_NETIF |
1497 					MLX4_IB_QP_CREATE_ROCE_V2_GSI))
1498 		return -EINVAL;
1499 
1500 	if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
1501 		if (init_attr->qp_type != IB_QPT_UD)
1502 			return -EINVAL;
1503 	}
1504 
1505 	if (init_attr->create_flags) {
1506 		if (udata && init_attr->create_flags & ~(sup_u_create_flags))
1507 			return -EINVAL;
1508 
1509 		if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
1510 						 MLX4_IB_QP_CREATE_ROCE_V2_GSI  |
1511 						 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
1512 		     init_attr->qp_type != IB_QPT_UD) ||
1513 		    (init_attr->create_flags & MLX4_IB_SRIOV_SQP &&
1514 		     init_attr->qp_type > IB_QPT_GSI) ||
1515 		    (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI &&
1516 		     init_attr->qp_type != IB_QPT_GSI))
1517 			return -EINVAL;
1518 	}
1519 
1520 	switch (init_attr->qp_type) {
1521 	case IB_QPT_XRC_TGT:
1522 		pd = to_mxrcd(init_attr->xrcd)->pd;
1523 		xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1524 		init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
1525 		fallthrough;
1526 	case IB_QPT_XRC_INI:
1527 		if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1528 			return -ENOSYS;
1529 		init_attr->recv_cq = init_attr->send_cq;
1530 		fallthrough;
1531 	case IB_QPT_RC:
1532 	case IB_QPT_UC:
1533 	case IB_QPT_RAW_PACKET:
1534 	case IB_QPT_UD:
1535 		qp->pri.vid = 0xFFFF;
1536 		qp->alt.vid = 0xFFFF;
1537 		err = create_qp_common(pd, init_attr, udata, 0, qp);
1538 		if (err)
1539 			return err;
1540 
1541 		qp->ibqp.qp_num = qp->mqp.qpn;
1542 		qp->xrcdn = xrcdn;
1543 		break;
1544 	case IB_QPT_SMI:
1545 	case IB_QPT_GSI:
1546 	{
1547 		int sqpn;
1548 
1549 		if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) {
1550 			int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev,
1551 							1, 1, &sqpn, 0,
1552 							MLX4_RES_USAGE_DRIVER);
1553 
1554 			if (res)
1555 				return res;
1556 		} else {
1557 			sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
1558 		}
1559 
1560 		qp->pri.vid = 0xFFFF;
1561 		qp->alt.vid = 0xFFFF;
1562 		err = create_qp_common(pd, init_attr, udata, sqpn, qp);
1563 		if (err)
1564 			return err;
1565 
1566 		qp->port	= init_attr->port_num;
1567 		qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 :
1568 			init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI ? sqpn : 1;
1569 		break;
1570 	}
1571 	default:
1572 		/* Don't support raw QPs */
1573 		return -EOPNOTSUPP;
1574 	}
1575 	return 0;
1576 }
1577 
mlx4_ib_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)1578 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
1579 				struct ib_qp_init_attr *init_attr,
1580 				struct ib_udata *udata) {
1581 	struct ib_device *device = pd ? pd->device : init_attr->xrcd->device;
1582 	struct mlx4_ib_dev *dev = to_mdev(device);
1583 	struct mlx4_ib_qp *qp;
1584 	int ret;
1585 
1586 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1587 	if (!qp)
1588 		return ERR_PTR(-ENOMEM);
1589 
1590 	mutex_init(&qp->mutex);
1591 	ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata);
1592 	if (ret) {
1593 		kfree(qp);
1594 		return ERR_PTR(ret);
1595 	}
1596 
1597 	if (init_attr->qp_type == IB_QPT_GSI &&
1598 	    !(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) {
1599 		struct mlx4_ib_sqp *sqp = qp->sqp;
1600 		int is_eth = rdma_cap_eth_ah(&dev->ib_dev, init_attr->port_num);
1601 
1602 		if (is_eth &&
1603 		    dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
1604 			init_attr->create_flags |= MLX4_IB_QP_CREATE_ROCE_V2_GSI;
1605 			sqp->roce_v2_gsi = ib_create_qp(pd, init_attr);
1606 
1607 			if (IS_ERR(sqp->roce_v2_gsi)) {
1608 				pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi));
1609 				sqp->roce_v2_gsi = NULL;
1610 			} else {
1611 				to_mqp(sqp->roce_v2_gsi)->flags |=
1612 					MLX4_IB_ROCE_V2_GSI_QP;
1613 			}
1614 
1615 			init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI;
1616 		}
1617 	}
1618 	return &qp->ibqp;
1619 }
1620 
_mlx4_ib_destroy_qp(struct ib_qp * qp,struct ib_udata * udata)1621 static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
1622 {
1623 	struct mlx4_ib_dev *dev = to_mdev(qp->device);
1624 	struct mlx4_ib_qp *mqp = to_mqp(qp);
1625 
1626 	if (is_qp0(dev, mqp))
1627 		mlx4_CLOSE_PORT(dev->dev, mqp->port);
1628 
1629 	if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI &&
1630 	    dev->qp1_proxy[mqp->port - 1] == mqp) {
1631 		mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
1632 		dev->qp1_proxy[mqp->port - 1] = NULL;
1633 		mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
1634 	}
1635 
1636 	if (mqp->counter_index)
1637 		mlx4_ib_free_qp_counter(dev, mqp);
1638 
1639 	if (qp->rwq_ind_tbl) {
1640 		destroy_qp_rss(dev, mqp);
1641 	} else {
1642 		destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, udata);
1643 	}
1644 
1645 	kfree(mqp->sqp);
1646 	kfree(mqp);
1647 
1648 	return 0;
1649 }
1650 
mlx4_ib_destroy_qp(struct ib_qp * qp,struct ib_udata * udata)1651 int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
1652 {
1653 	struct mlx4_ib_qp *mqp = to_mqp(qp);
1654 
1655 	if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
1656 		struct mlx4_ib_sqp *sqp = mqp->sqp;
1657 
1658 		if (sqp->roce_v2_gsi)
1659 			ib_destroy_qp(sqp->roce_v2_gsi);
1660 	}
1661 
1662 	return _mlx4_ib_destroy_qp(qp, udata);
1663 }
1664 
to_mlx4_st(struct mlx4_ib_dev * dev,enum mlx4_ib_qp_type type)1665 static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
1666 {
1667 	switch (type) {
1668 	case MLX4_IB_QPT_RC:		return MLX4_QP_ST_RC;
1669 	case MLX4_IB_QPT_UC:		return MLX4_QP_ST_UC;
1670 	case MLX4_IB_QPT_UD:		return MLX4_QP_ST_UD;
1671 	case MLX4_IB_QPT_XRC_INI:
1672 	case MLX4_IB_QPT_XRC_TGT:	return MLX4_QP_ST_XRC;
1673 	case MLX4_IB_QPT_SMI:
1674 	case MLX4_IB_QPT_GSI:
1675 	case MLX4_IB_QPT_RAW_PACKET:	return MLX4_QP_ST_MLX;
1676 
1677 	case MLX4_IB_QPT_PROXY_SMI_OWNER:
1678 	case MLX4_IB_QPT_TUN_SMI_OWNER:	return (mlx4_is_mfunc(dev->dev) ?
1679 						MLX4_QP_ST_MLX : -1);
1680 	case MLX4_IB_QPT_PROXY_SMI:
1681 	case MLX4_IB_QPT_TUN_SMI:
1682 	case MLX4_IB_QPT_PROXY_GSI:
1683 	case MLX4_IB_QPT_TUN_GSI:	return (mlx4_is_mfunc(dev->dev) ?
1684 						MLX4_QP_ST_UD : -1);
1685 	default:			return -1;
1686 	}
1687 }
1688 
to_mlx4_access_flags(struct mlx4_ib_qp * qp,const struct ib_qp_attr * attr,int attr_mask)1689 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr,
1690 				   int attr_mask)
1691 {
1692 	u8 dest_rd_atomic;
1693 	u32 access_flags;
1694 	u32 hw_access_flags = 0;
1695 
1696 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1697 		dest_rd_atomic = attr->max_dest_rd_atomic;
1698 	else
1699 		dest_rd_atomic = qp->resp_depth;
1700 
1701 	if (attr_mask & IB_QP_ACCESS_FLAGS)
1702 		access_flags = attr->qp_access_flags;
1703 	else
1704 		access_flags = qp->atomic_rd_en;
1705 
1706 	if (!dest_rd_atomic)
1707 		access_flags &= IB_ACCESS_REMOTE_WRITE;
1708 
1709 	if (access_flags & IB_ACCESS_REMOTE_READ)
1710 		hw_access_flags |= MLX4_QP_BIT_RRE;
1711 	if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1712 		hw_access_flags |= MLX4_QP_BIT_RAE;
1713 	if (access_flags & IB_ACCESS_REMOTE_WRITE)
1714 		hw_access_flags |= MLX4_QP_BIT_RWE;
1715 
1716 	return cpu_to_be32(hw_access_flags);
1717 }
1718 
store_sqp_attrs(struct mlx4_ib_sqp * sqp,const struct ib_qp_attr * attr,int attr_mask)1719 static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr,
1720 			    int attr_mask)
1721 {
1722 	if (attr_mask & IB_QP_PKEY_INDEX)
1723 		sqp->pkey_index = attr->pkey_index;
1724 	if (attr_mask & IB_QP_QKEY)
1725 		sqp->qkey = attr->qkey;
1726 	if (attr_mask & IB_QP_SQ_PSN)
1727 		sqp->send_psn = attr->sq_psn;
1728 }
1729 
mlx4_set_sched(struct mlx4_qp_path * path,u8 port)1730 static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
1731 {
1732 	path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
1733 }
1734 
_mlx4_set_path(struct mlx4_ib_dev * dev,const struct rdma_ah_attr * ah,u64 smac,u16 vlan_tag,struct mlx4_qp_path * path,struct mlx4_roce_smac_vlan_info * smac_info,u8 port)1735 static int _mlx4_set_path(struct mlx4_ib_dev *dev,
1736 			  const struct rdma_ah_attr *ah,
1737 			  u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
1738 			  struct mlx4_roce_smac_vlan_info *smac_info, u8 port)
1739 {
1740 	int vidx;
1741 	int smac_index;
1742 	int err;
1743 
1744 	path->grh_mylmc = rdma_ah_get_path_bits(ah) & 0x7f;
1745 	path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
1746 	if (rdma_ah_get_static_rate(ah)) {
1747 		path->static_rate = rdma_ah_get_static_rate(ah) +
1748 				    MLX4_STAT_RATE_OFFSET;
1749 		while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
1750 		       !(1 << path->static_rate & dev->dev->caps.stat_rate_support))
1751 			--path->static_rate;
1752 	} else
1753 		path->static_rate = 0;
1754 
1755 	if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) {
1756 		const struct ib_global_route *grh = rdma_ah_read_grh(ah);
1757 		int real_sgid_index =
1758 			mlx4_ib_gid_index_to_real_index(dev, grh->sgid_attr);
1759 
1760 		if (real_sgid_index < 0)
1761 			return real_sgid_index;
1762 		if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
1763 			pr_err("sgid_index (%u) too large. max is %d\n",
1764 			       real_sgid_index, dev->dev->caps.gid_table_len[port] - 1);
1765 			return -1;
1766 		}
1767 
1768 		path->grh_mylmc |= 1 << 7;
1769 		path->mgid_index = real_sgid_index;
1770 		path->hop_limit  = grh->hop_limit;
1771 		path->tclass_flowlabel =
1772 			cpu_to_be32((grh->traffic_class << 20) |
1773 				    (grh->flow_label));
1774 		memcpy(path->rgid, grh->dgid.raw, 16);
1775 	}
1776 
1777 	if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
1778 		if (!(rdma_ah_get_ah_flags(ah) & IB_AH_GRH))
1779 			return -1;
1780 
1781 		path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
1782 			((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 7) << 3);
1783 
1784 		path->feup |= MLX4_FEUP_FORCE_ETH_UP;
1785 		if (vlan_tag < 0x1000) {
1786 			if (smac_info->vid < 0x1000) {
1787 				/* both valid vlan ids */
1788 				if (smac_info->vid != vlan_tag) {
1789 					/* different VIDs.  unreg old and reg new */
1790 					err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
1791 					if (err)
1792 						return err;
1793 					smac_info->candidate_vid = vlan_tag;
1794 					smac_info->candidate_vlan_index = vidx;
1795 					smac_info->candidate_vlan_port = port;
1796 					smac_info->update_vid = 1;
1797 					path->vlan_index = vidx;
1798 				} else {
1799 					path->vlan_index = smac_info->vlan_index;
1800 				}
1801 			} else {
1802 				/* no current vlan tag in qp */
1803 				err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
1804 				if (err)
1805 					return err;
1806 				smac_info->candidate_vid = vlan_tag;
1807 				smac_info->candidate_vlan_index = vidx;
1808 				smac_info->candidate_vlan_port = port;
1809 				smac_info->update_vid = 1;
1810 				path->vlan_index = vidx;
1811 			}
1812 			path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
1813 			path->fl = 1 << 6;
1814 		} else {
1815 			/* have current vlan tag. unregister it at modify-qp success */
1816 			if (smac_info->vid < 0x1000) {
1817 				smac_info->candidate_vid = 0xFFFF;
1818 				smac_info->update_vid = 1;
1819 			}
1820 		}
1821 
1822 		/* get smac_index for RoCE use.
1823 		 * If no smac was yet assigned, register one.
1824 		 * If one was already assigned, but the new mac differs,
1825 		 * unregister the old one and register the new one.
1826 		*/
1827 		if ((!smac_info->smac && !smac_info->smac_port) ||
1828 		    smac_info->smac != smac) {
1829 			/* register candidate now, unreg if needed, after success */
1830 			smac_index = mlx4_register_mac(dev->dev, port, smac);
1831 			if (smac_index >= 0) {
1832 				smac_info->candidate_smac_index = smac_index;
1833 				smac_info->candidate_smac = smac;
1834 				smac_info->candidate_smac_port = port;
1835 			} else {
1836 				return -EINVAL;
1837 			}
1838 		} else {
1839 			smac_index = smac_info->smac_index;
1840 		}
1841 		memcpy(path->dmac, ah->roce.dmac, 6);
1842 		path->ackto = MLX4_IB_LINK_TYPE_ETH;
1843 		/* put MAC table smac index for IBoE */
1844 		path->grh_mylmc = (u8) (smac_index) | 0x80;
1845 	} else {
1846 		path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
1847 			((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 0xf) << 2);
1848 	}
1849 
1850 	return 0;
1851 }
1852 
mlx4_set_path(struct mlx4_ib_dev * dev,const struct ib_qp_attr * qp,enum ib_qp_attr_mask qp_attr_mask,struct mlx4_ib_qp * mqp,struct mlx4_qp_path * path,u8 port,u16 vlan_id,u8 * smac)1853 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
1854 			 enum ib_qp_attr_mask qp_attr_mask,
1855 			 struct mlx4_ib_qp *mqp,
1856 			 struct mlx4_qp_path *path, u8 port,
1857 			 u16 vlan_id, u8 *smac)
1858 {
1859 	return _mlx4_set_path(dev, &qp->ah_attr,
1860 			      mlx4_mac_to_u64(smac),
1861 			      vlan_id,
1862 			      path, &mqp->pri, port);
1863 }
1864 
mlx4_set_alt_path(struct mlx4_ib_dev * dev,const struct ib_qp_attr * qp,enum ib_qp_attr_mask qp_attr_mask,struct mlx4_ib_qp * mqp,struct mlx4_qp_path * path,u8 port)1865 static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
1866 			     const struct ib_qp_attr *qp,
1867 			     enum ib_qp_attr_mask qp_attr_mask,
1868 			     struct mlx4_ib_qp *mqp,
1869 			     struct mlx4_qp_path *path, u8 port)
1870 {
1871 	return _mlx4_set_path(dev, &qp->alt_ah_attr,
1872 			      0,
1873 			      0xffff,
1874 			      path, &mqp->alt, port);
1875 }
1876 
update_mcg_macs(struct mlx4_ib_dev * dev,struct mlx4_ib_qp * qp)1877 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1878 {
1879 	struct mlx4_ib_gid_entry *ge, *tmp;
1880 
1881 	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1882 		if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) {
1883 			ge->added = 1;
1884 			ge->port = qp->port;
1885 		}
1886 	}
1887 }
1888 
handle_eth_ud_smac_index(struct mlx4_ib_dev * dev,struct mlx4_ib_qp * qp,struct mlx4_qp_context * context)1889 static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev,
1890 				    struct mlx4_ib_qp *qp,
1891 				    struct mlx4_qp_context *context)
1892 {
1893 	u64 u64_mac;
1894 	int smac_index;
1895 
1896 	u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]);
1897 
1898 	context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
1899 	if (!qp->pri.smac && !qp->pri.smac_port) {
1900 		smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
1901 		if (smac_index >= 0) {
1902 			qp->pri.candidate_smac_index = smac_index;
1903 			qp->pri.candidate_smac = u64_mac;
1904 			qp->pri.candidate_smac_port = qp->port;
1905 			context->pri_path.grh_mylmc = 0x80 | (u8) smac_index;
1906 		} else {
1907 			return -ENOENT;
1908 		}
1909 	}
1910 	return 0;
1911 }
1912 
create_qp_lb_counter(struct mlx4_ib_dev * dev,struct mlx4_ib_qp * qp)1913 static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1914 {
1915 	struct counter_index *new_counter_index;
1916 	int err;
1917 	u32 tmp_idx;
1918 
1919 	if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) !=
1920 	    IB_LINK_LAYER_ETHERNET ||
1921 	    !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) ||
1922 	    !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK))
1923 		return 0;
1924 
1925 	err = mlx4_counter_alloc(dev->dev, &tmp_idx, MLX4_RES_USAGE_DRIVER);
1926 	if (err)
1927 		return err;
1928 
1929 	new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL);
1930 	if (!new_counter_index) {
1931 		mlx4_counter_free(dev->dev, tmp_idx);
1932 		return -ENOMEM;
1933 	}
1934 
1935 	new_counter_index->index = tmp_idx;
1936 	new_counter_index->allocated = 1;
1937 	qp->counter_index = new_counter_index;
1938 
1939 	mutex_lock(&dev->counters_table[qp->port - 1].mutex);
1940 	list_add_tail(&new_counter_index->list,
1941 		      &dev->counters_table[qp->port - 1].counters_list);
1942 	mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
1943 
1944 	return 0;
1945 }
1946 
1947 enum {
1948 	MLX4_QPC_ROCE_MODE_1 = 0,
1949 	MLX4_QPC_ROCE_MODE_2 = 2,
1950 	MLX4_QPC_ROCE_MODE_UNDEFINED = 0xff
1951 };
1952 
gid_type_to_qpc(enum ib_gid_type gid_type)1953 static u8 gid_type_to_qpc(enum ib_gid_type gid_type)
1954 {
1955 	switch (gid_type) {
1956 	case IB_GID_TYPE_ROCE:
1957 		return MLX4_QPC_ROCE_MODE_1;
1958 	case IB_GID_TYPE_ROCE_UDP_ENCAP:
1959 		return MLX4_QPC_ROCE_MODE_2;
1960 	default:
1961 		return MLX4_QPC_ROCE_MODE_UNDEFINED;
1962 	}
1963 }
1964 
1965 /*
1966  * Go over all RSS QP's childes (WQs) and apply their HW state according to
1967  * their logic state if the RSS QP is the first RSS QP associated for the WQ.
1968  */
bringup_rss_rwqs(struct ib_rwq_ind_table * ind_tbl,u8 port_num,struct ib_udata * udata)1969 static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num,
1970 			    struct ib_udata *udata)
1971 {
1972 	int err = 0;
1973 	int i;
1974 
1975 	for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
1976 		struct ib_wq *ibwq = ind_tbl->ind_tbl[i];
1977 		struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
1978 
1979 		mutex_lock(&wq->mutex);
1980 
1981 		/* Mlx4_ib restrictions:
1982 		 * WQ's is associated to a port according to the RSS QP it is
1983 		 * associates to.
1984 		 * In case the WQ is associated to a different port by another
1985 		 * RSS QP, return a failure.
1986 		 */
1987 		if ((wq->rss_usecnt > 0) && (wq->port != port_num)) {
1988 			err = -EINVAL;
1989 			mutex_unlock(&wq->mutex);
1990 			break;
1991 		}
1992 		wq->port = port_num;
1993 		if ((wq->rss_usecnt == 0) && (ibwq->state == IB_WQS_RDY)) {
1994 			err = _mlx4_ib_modify_wq(ibwq, IB_WQS_RDY, udata);
1995 			if (err) {
1996 				mutex_unlock(&wq->mutex);
1997 				break;
1998 			}
1999 		}
2000 		wq->rss_usecnt++;
2001 
2002 		mutex_unlock(&wq->mutex);
2003 	}
2004 
2005 	if (i && err) {
2006 		int j;
2007 
2008 		for (j = (i - 1); j >= 0; j--) {
2009 			struct ib_wq *ibwq = ind_tbl->ind_tbl[j];
2010 			struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
2011 
2012 			mutex_lock(&wq->mutex);
2013 
2014 			if ((wq->rss_usecnt == 1) &&
2015 			    (ibwq->state == IB_WQS_RDY))
2016 				if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET,
2017 						       udata))
2018 					pr_warn("failed to reverse WQN=0x%06x\n",
2019 						ibwq->wq_num);
2020 			wq->rss_usecnt--;
2021 
2022 			mutex_unlock(&wq->mutex);
2023 		}
2024 	}
2025 
2026 	return err;
2027 }
2028 
bring_down_rss_rwqs(struct ib_rwq_ind_table * ind_tbl,struct ib_udata * udata)2029 static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl,
2030 				struct ib_udata *udata)
2031 {
2032 	int i;
2033 
2034 	for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
2035 		struct ib_wq *ibwq = ind_tbl->ind_tbl[i];
2036 		struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
2037 
2038 		mutex_lock(&wq->mutex);
2039 
2040 		if ((wq->rss_usecnt == 1) && (ibwq->state == IB_WQS_RDY))
2041 			if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET, udata))
2042 				pr_warn("failed to reverse WQN=%x\n",
2043 					ibwq->wq_num);
2044 		wq->rss_usecnt--;
2045 
2046 		mutex_unlock(&wq->mutex);
2047 	}
2048 }
2049 
fill_qp_rss_context(struct mlx4_qp_context * context,struct mlx4_ib_qp * qp)2050 static void fill_qp_rss_context(struct mlx4_qp_context *context,
2051 				struct mlx4_ib_qp *qp)
2052 {
2053 	struct mlx4_rss_context *rss_context;
2054 
2055 	rss_context = (void *)context + offsetof(struct mlx4_qp_context,
2056 			pri_path) + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
2057 
2058 	rss_context->base_qpn = cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz);
2059 	rss_context->default_qpn =
2060 		cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz & 0xffffff);
2061 	if (qp->rss_ctx->flags & (MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6))
2062 		rss_context->base_qpn_udp = rss_context->default_qpn;
2063 	rss_context->flags = qp->rss_ctx->flags;
2064 	/* Currently support just toeplitz */
2065 	rss_context->hash_fn = MLX4_RSS_HASH_TOP;
2066 
2067 	memcpy(rss_context->rss_key, qp->rss_ctx->rss_key,
2068 	       MLX4_EN_RSS_KEY_SIZE);
2069 }
2070 
__mlx4_ib_modify_qp(void * src,enum mlx4_ib_source_type src_type,const struct ib_qp_attr * attr,int attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state,struct ib_udata * udata)2071 static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
2072 			       const struct ib_qp_attr *attr, int attr_mask,
2073 			       enum ib_qp_state cur_state,
2074 			       enum ib_qp_state new_state,
2075 			       struct ib_udata *udata)
2076 {
2077 	struct ib_srq  *ibsrq;
2078 	const struct ib_gid_attr *gid_attr = NULL;
2079 	struct ib_rwq_ind_table *rwq_ind_tbl;
2080 	enum ib_qp_type qp_type;
2081 	struct mlx4_ib_dev *dev;
2082 	struct mlx4_ib_qp *qp;
2083 	struct mlx4_ib_pd *pd;
2084 	struct mlx4_ib_cq *send_cq, *recv_cq;
2085 	struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
2086 		udata, struct mlx4_ib_ucontext, ibucontext);
2087 	struct mlx4_qp_context *context;
2088 	enum mlx4_qp_optpar optpar = 0;
2089 	int sqd_event;
2090 	int steer_qp = 0;
2091 	int err = -EINVAL;
2092 	int counter_index;
2093 
2094 	if (src_type == MLX4_IB_RWQ_SRC) {
2095 		struct ib_wq *ibwq;
2096 
2097 		ibwq	    = (struct ib_wq *)src;
2098 		ibsrq	    = NULL;
2099 		rwq_ind_tbl = NULL;
2100 		qp_type     = IB_QPT_RAW_PACKET;
2101 		qp	    = to_mqp((struct ib_qp *)ibwq);
2102 		dev	    = to_mdev(ibwq->device);
2103 		pd	    = to_mpd(ibwq->pd);
2104 	} else {
2105 		struct ib_qp *ibqp;
2106 
2107 		ibqp	    = (struct ib_qp *)src;
2108 		ibsrq	    = ibqp->srq;
2109 		rwq_ind_tbl = ibqp->rwq_ind_tbl;
2110 		qp_type     = ibqp->qp_type;
2111 		qp	    = to_mqp(ibqp);
2112 		dev	    = to_mdev(ibqp->device);
2113 		pd	    = get_pd(qp);
2114 	}
2115 
2116 	/* APM is not supported under RoCE */
2117 	if (attr_mask & IB_QP_ALT_PATH &&
2118 	    rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
2119 	    IB_LINK_LAYER_ETHERNET)
2120 		return -ENOTSUPP;
2121 
2122 	context = kzalloc(sizeof *context, GFP_KERNEL);
2123 	if (!context)
2124 		return -ENOMEM;
2125 
2126 	context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
2127 				     (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16));
2128 
2129 	if (!(attr_mask & IB_QP_PATH_MIG_STATE))
2130 		context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
2131 	else {
2132 		optpar |= MLX4_QP_OPTPAR_PM_STATE;
2133 		switch (attr->path_mig_state) {
2134 		case IB_MIG_MIGRATED:
2135 			context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
2136 			break;
2137 		case IB_MIG_REARM:
2138 			context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11);
2139 			break;
2140 		case IB_MIG_ARMED:
2141 			context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11);
2142 			break;
2143 		}
2144 	}
2145 
2146 	if (qp->inl_recv_sz)
2147 		context->param3 |= cpu_to_be32(1 << 25);
2148 
2149 	if (qp->flags & MLX4_IB_QP_SCATTER_FCS)
2150 		context->param3 |= cpu_to_be32(1 << 29);
2151 
2152 	if (qp_type == IB_QPT_GSI || qp_type == IB_QPT_SMI)
2153 		context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
2154 	else if (qp_type == IB_QPT_RAW_PACKET)
2155 		context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
2156 	else if (qp_type == IB_QPT_UD) {
2157 		if (qp->flags & MLX4_IB_QP_LSO)
2158 			context->mtu_msgmax = (IB_MTU_4096 << 5) |
2159 					      ilog2(dev->dev->caps.max_gso_sz);
2160 		else
2161 			context->mtu_msgmax = (IB_MTU_4096 << 5) | 13;
2162 	} else if (attr_mask & IB_QP_PATH_MTU) {
2163 		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
2164 			pr_err("path MTU (%u) is invalid\n",
2165 			       attr->path_mtu);
2166 			goto out;
2167 		}
2168 		context->mtu_msgmax = (attr->path_mtu << 5) |
2169 			ilog2(dev->dev->caps.max_msg_sz);
2170 	}
2171 
2172 	if (!rwq_ind_tbl) { /* PRM RSS receive side should be left zeros */
2173 		if (qp->rq.wqe_cnt)
2174 			context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
2175 		context->rq_size_stride |= qp->rq.wqe_shift - 4;
2176 	}
2177 
2178 	if (qp->sq.wqe_cnt)
2179 		context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
2180 	context->sq_size_stride |= qp->sq.wqe_shift - 4;
2181 
2182 	if (new_state == IB_QPS_RESET && qp->counter_index)
2183 		mlx4_ib_free_qp_counter(dev, qp);
2184 
2185 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2186 		context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
2187 		context->xrcd = cpu_to_be32((u32) qp->xrcdn);
2188 		if (qp_type == IB_QPT_RAW_PACKET)
2189 			context->param3 |= cpu_to_be32(1 << 30);
2190 	}
2191 
2192 	if (ucontext)
2193 		context->usr_page = cpu_to_be32(
2194 			mlx4_to_hw_uar_index(dev->dev, ucontext->uar.index));
2195 	else
2196 		context->usr_page = cpu_to_be32(
2197 			mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
2198 
2199 	if (attr_mask & IB_QP_DEST_QPN)
2200 		context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
2201 
2202 	if (attr_mask & IB_QP_PORT) {
2203 		if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD &&
2204 		    !(attr_mask & IB_QP_AV)) {
2205 			mlx4_set_sched(&context->pri_path, attr->port_num);
2206 			optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE;
2207 		}
2208 	}
2209 
2210 	if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
2211 		err = create_qp_lb_counter(dev, qp);
2212 		if (err)
2213 			goto out;
2214 
2215 		counter_index =
2216 			dev->counters_table[qp->port - 1].default_counter;
2217 		if (qp->counter_index)
2218 			counter_index = qp->counter_index->index;
2219 
2220 		if (counter_index != -1) {
2221 			context->pri_path.counter_index = counter_index;
2222 			optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
2223 			if (qp->counter_index) {
2224 				context->pri_path.fl |=
2225 					MLX4_FL_ETH_SRC_CHECK_MC_LB;
2226 				context->pri_path.vlan_control |=
2227 					MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
2228 			}
2229 		} else
2230 			context->pri_path.counter_index =
2231 				MLX4_SINK_COUNTER_INDEX(dev->dev);
2232 
2233 		if (qp->flags & MLX4_IB_QP_NETIF) {
2234 			mlx4_ib_steer_qp_reg(dev, qp, 1);
2235 			steer_qp = 1;
2236 		}
2237 
2238 		if (qp_type == IB_QPT_GSI) {
2239 			enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ?
2240 				IB_GID_TYPE_ROCE_UDP_ENCAP : IB_GID_TYPE_ROCE;
2241 			u8 qpc_roce_mode = gid_type_to_qpc(gid_type);
2242 
2243 			context->rlkey_roce_mode |= (qpc_roce_mode << 6);
2244 		}
2245 	}
2246 
2247 	if (attr_mask & IB_QP_PKEY_INDEX) {
2248 		if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
2249 			context->pri_path.disable_pkey_check = 0x40;
2250 		context->pri_path.pkey_index = attr->pkey_index;
2251 		optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
2252 	}
2253 
2254 	if (attr_mask & IB_QP_AV) {
2255 		u8 port_num = mlx4_is_bonded(dev->dev) ? 1 :
2256 			attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
2257 		u16 vlan = 0xffff;
2258 		u8 smac[ETH_ALEN];
2259 		int is_eth =
2260 			rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
2261 			rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
2262 
2263 		if (is_eth) {
2264 			gid_attr = attr->ah_attr.grh.sgid_attr;
2265 			err = rdma_read_gid_l2_fields(gid_attr, &vlan,
2266 						      &smac[0]);
2267 			if (err)
2268 				goto out;
2269 		}
2270 
2271 		if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
2272 				  port_num, vlan, smac))
2273 			goto out;
2274 
2275 		optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
2276 			   MLX4_QP_OPTPAR_SCHED_QUEUE);
2277 
2278 		if (is_eth &&
2279 		    (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR)) {
2280 			u8 qpc_roce_mode = gid_type_to_qpc(gid_attr->gid_type);
2281 
2282 			if (qpc_roce_mode == MLX4_QPC_ROCE_MODE_UNDEFINED) {
2283 				err = -EINVAL;
2284 				goto out;
2285 			}
2286 			context->rlkey_roce_mode |= (qpc_roce_mode << 6);
2287 		}
2288 
2289 	}
2290 
2291 	if (attr_mask & IB_QP_TIMEOUT) {
2292 		context->pri_path.ackto |= attr->timeout << 3;
2293 		optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;
2294 	}
2295 
2296 	if (attr_mask & IB_QP_ALT_PATH) {
2297 		if (attr->alt_port_num == 0 ||
2298 		    attr->alt_port_num > dev->dev->caps.num_ports)
2299 			goto out;
2300 
2301 		if (attr->alt_pkey_index >=
2302 		    dev->dev->caps.pkey_table_len[attr->alt_port_num])
2303 			goto out;
2304 
2305 		if (mlx4_set_alt_path(dev, attr, attr_mask, qp,
2306 				      &context->alt_path,
2307 				      attr->alt_port_num))
2308 			goto out;
2309 
2310 		context->alt_path.pkey_index = attr->alt_pkey_index;
2311 		context->alt_path.ackto = attr->alt_timeout << 3;
2312 		optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
2313 	}
2314 
2315 	context->pd = cpu_to_be32(pd->pdn);
2316 
2317 	if (!rwq_ind_tbl) {
2318 		context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
2319 		get_cqs(qp, src_type, &send_cq, &recv_cq);
2320 	} else { /* Set dummy CQs to be compatible with HV and PRM */
2321 		send_cq = to_mcq(rwq_ind_tbl->ind_tbl[0]->cq);
2322 		recv_cq = send_cq;
2323 	}
2324 	context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
2325 	context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
2326 
2327 	/* Set "fast registration enabled" for all kernel QPs */
2328 	if (!ucontext)
2329 		context->params1 |= cpu_to_be32(1 << 11);
2330 
2331 	if (attr_mask & IB_QP_RNR_RETRY) {
2332 		context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
2333 		optpar |= MLX4_QP_OPTPAR_RNR_RETRY;
2334 	}
2335 
2336 	if (attr_mask & IB_QP_RETRY_CNT) {
2337 		context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
2338 		optpar |= MLX4_QP_OPTPAR_RETRY_COUNT;
2339 	}
2340 
2341 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2342 		if (attr->max_rd_atomic)
2343 			context->params1 |=
2344 				cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
2345 		optpar |= MLX4_QP_OPTPAR_SRA_MAX;
2346 	}
2347 
2348 	if (attr_mask & IB_QP_SQ_PSN)
2349 		context->next_send_psn = cpu_to_be32(attr->sq_psn);
2350 
2351 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2352 		if (attr->max_dest_rd_atomic)
2353 			context->params2 |=
2354 				cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
2355 		optpar |= MLX4_QP_OPTPAR_RRA_MAX;
2356 	}
2357 
2358 	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
2359 		context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask);
2360 		optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
2361 	}
2362 
2363 	if (ibsrq)
2364 		context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);
2365 
2366 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2367 		context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
2368 		optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT;
2369 	}
2370 	if (attr_mask & IB_QP_RQ_PSN)
2371 		context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
2372 
2373 	/* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
2374 	if (attr_mask & IB_QP_QKEY) {
2375 		if (qp->mlx4_ib_qp_type &
2376 		    (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))
2377 			context->qkey = cpu_to_be32(IB_QP_SET_QKEY);
2378 		else {
2379 			if (mlx4_is_mfunc(dev->dev) &&
2380 			    !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) &&
2381 			    (attr->qkey & MLX4_RESERVED_QKEY_MASK) ==
2382 			    MLX4_RESERVED_QKEY_BASE) {
2383 				pr_err("Cannot use reserved QKEY"
2384 				       " 0x%x (range 0xffff0000..0xffffffff"
2385 				       " is reserved)\n", attr->qkey);
2386 				err = -EINVAL;
2387 				goto out;
2388 			}
2389 			context->qkey = cpu_to_be32(attr->qkey);
2390 		}
2391 		optpar |= MLX4_QP_OPTPAR_Q_KEY;
2392 	}
2393 
2394 	if (ibsrq)
2395 		context->srqn = cpu_to_be32(1 << 24 |
2396 					    to_msrq(ibsrq)->msrq.srqn);
2397 
2398 	if (qp->rq.wqe_cnt &&
2399 	    cur_state == IB_QPS_RESET &&
2400 	    new_state == IB_QPS_INIT)
2401 		context->db_rec_addr = cpu_to_be64(qp->db.dma);
2402 
2403 	if (cur_state == IB_QPS_INIT &&
2404 	    new_state == IB_QPS_RTR  &&
2405 	    (qp_type == IB_QPT_GSI || qp_type == IB_QPT_SMI ||
2406 	     qp_type == IB_QPT_UD || qp_type == IB_QPT_RAW_PACKET)) {
2407 		context->pri_path.sched_queue = (qp->port - 1) << 6;
2408 		if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
2409 		    qp->mlx4_ib_qp_type &
2410 		    (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) {
2411 			context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
2412 			if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI)
2413 				context->pri_path.fl = 0x80;
2414 		} else {
2415 			if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
2416 				context->pri_path.fl = 0x80;
2417 			context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
2418 		}
2419 		if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
2420 		    IB_LINK_LAYER_ETHERNET) {
2421 			if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI ||
2422 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI)
2423 				context->pri_path.feup = 1 << 7; /* don't fsm */
2424 			/* handle smac_index */
2425 			if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD ||
2426 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
2427 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
2428 				err = handle_eth_ud_smac_index(dev, qp, context);
2429 				if (err) {
2430 					err = -EINVAL;
2431 					goto out;
2432 				}
2433 				if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
2434 					dev->qp1_proxy[qp->port - 1] = qp;
2435 			}
2436 		}
2437 	}
2438 
2439 	if (qp_type == IB_QPT_RAW_PACKET) {
2440 		context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
2441 					MLX4_IB_LINK_TYPE_ETH;
2442 		if (dev->dev->caps.tunnel_offload_mode ==  MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2443 			/* set QP to receive both tunneled & non-tunneled packets */
2444 			if (!rwq_ind_tbl)
2445 				context->srqn = cpu_to_be32(7 << 28);
2446 		}
2447 	}
2448 
2449 	if (qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
2450 		int is_eth = rdma_port_get_link_layer(
2451 				&dev->ib_dev, qp->port) ==
2452 				IB_LINK_LAYER_ETHERNET;
2453 		if (is_eth) {
2454 			context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
2455 			optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
2456 		}
2457 	}
2458 
2459 	if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD	&&
2460 	    attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
2461 		sqd_event = 1;
2462 	else
2463 		sqd_event = 0;
2464 
2465 	if (!ucontext &&
2466 	    cur_state == IB_QPS_RESET &&
2467 	    new_state == IB_QPS_INIT)
2468 		context->rlkey_roce_mode |= (1 << 4);
2469 
2470 	/*
2471 	 * Before passing a kernel QP to the HW, make sure that the
2472 	 * ownership bits of the send queue are set and the SQ
2473 	 * headroom is stamped so that the hardware doesn't start
2474 	 * processing stale work requests.
2475 	 */
2476 	if (!ucontext &&
2477 	    cur_state == IB_QPS_RESET &&
2478 	    new_state == IB_QPS_INIT) {
2479 		struct mlx4_wqe_ctrl_seg *ctrl;
2480 		int i;
2481 
2482 		for (i = 0; i < qp->sq.wqe_cnt; ++i) {
2483 			ctrl = get_send_wqe(qp, i);
2484 			ctrl->owner_opcode = cpu_to_be32(1 << 31);
2485 			ctrl->qpn_vlan.fence_size =
2486 				1 << (qp->sq.wqe_shift - 4);
2487 			stamp_send_wqe(qp, i);
2488 		}
2489 	}
2490 
2491 	if (rwq_ind_tbl	&&
2492 	    cur_state == IB_QPS_RESET &&
2493 	    new_state == IB_QPS_INIT) {
2494 		fill_qp_rss_context(context, qp);
2495 		context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET);
2496 	}
2497 
2498 	err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
2499 			     to_mlx4_state(new_state), context, optpar,
2500 			     sqd_event, &qp->mqp);
2501 	if (err)
2502 		goto out;
2503 
2504 	qp->state = new_state;
2505 
2506 	if (attr_mask & IB_QP_ACCESS_FLAGS)
2507 		qp->atomic_rd_en = attr->qp_access_flags;
2508 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
2509 		qp->resp_depth = attr->max_dest_rd_atomic;
2510 	if (attr_mask & IB_QP_PORT) {
2511 		qp->port = attr->port_num;
2512 		update_mcg_macs(dev, qp);
2513 	}
2514 	if (attr_mask & IB_QP_ALT_PATH)
2515 		qp->alt_port = attr->alt_port_num;
2516 
2517 	if (is_sqp(dev, qp))
2518 		store_sqp_attrs(qp->sqp, attr, attr_mask);
2519 
2520 	/*
2521 	 * If we moved QP0 to RTR, bring the IB link up; if we moved
2522 	 * QP0 to RESET or ERROR, bring the link back down.
2523 	 */
2524 	if (is_qp0(dev, qp)) {
2525 		if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
2526 			if (mlx4_INIT_PORT(dev->dev, qp->port))
2527 				pr_warn("INIT_PORT failed for port %d\n",
2528 				       qp->port);
2529 
2530 		if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
2531 		    (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
2532 			mlx4_CLOSE_PORT(dev->dev, qp->port);
2533 	}
2534 
2535 	/*
2536 	 * If we moved a kernel QP to RESET, clean up all old CQ
2537 	 * entries and reinitialize the QP.
2538 	 */
2539 	if (new_state == IB_QPS_RESET) {
2540 		if (!ucontext) {
2541 			mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
2542 					 ibsrq ? to_msrq(ibsrq) : NULL);
2543 			if (send_cq != recv_cq)
2544 				mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
2545 
2546 			qp->rq.head = 0;
2547 			qp->rq.tail = 0;
2548 			qp->sq.head = 0;
2549 			qp->sq.tail = 0;
2550 			qp->sq_next_wqe = 0;
2551 			if (qp->rq.wqe_cnt)
2552 				*qp->db.db  = 0;
2553 
2554 			if (qp->flags & MLX4_IB_QP_NETIF)
2555 				mlx4_ib_steer_qp_reg(dev, qp, 0);
2556 		}
2557 		if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
2558 			mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
2559 			qp->pri.smac = 0;
2560 			qp->pri.smac_port = 0;
2561 		}
2562 		if (qp->alt.smac) {
2563 			mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
2564 			qp->alt.smac = 0;
2565 		}
2566 		if (qp->pri.vid < 0x1000) {
2567 			mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
2568 			qp->pri.vid = 0xFFFF;
2569 			qp->pri.candidate_vid = 0xFFFF;
2570 			qp->pri.update_vid = 0;
2571 		}
2572 
2573 		if (qp->alt.vid < 0x1000) {
2574 			mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
2575 			qp->alt.vid = 0xFFFF;
2576 			qp->alt.candidate_vid = 0xFFFF;
2577 			qp->alt.update_vid = 0;
2578 		}
2579 	}
2580 out:
2581 	if (err && qp->counter_index)
2582 		mlx4_ib_free_qp_counter(dev, qp);
2583 	if (err && steer_qp)
2584 		mlx4_ib_steer_qp_reg(dev, qp, 0);
2585 	kfree(context);
2586 	if (qp->pri.candidate_smac ||
2587 	    (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
2588 		if (err) {
2589 			mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
2590 		} else {
2591 			if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
2592 				mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
2593 			qp->pri.smac = qp->pri.candidate_smac;
2594 			qp->pri.smac_index = qp->pri.candidate_smac_index;
2595 			qp->pri.smac_port = qp->pri.candidate_smac_port;
2596 		}
2597 		qp->pri.candidate_smac = 0;
2598 		qp->pri.candidate_smac_index = 0;
2599 		qp->pri.candidate_smac_port = 0;
2600 	}
2601 	if (qp->alt.candidate_smac) {
2602 		if (err) {
2603 			mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac);
2604 		} else {
2605 			if (qp->alt.smac)
2606 				mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
2607 			qp->alt.smac = qp->alt.candidate_smac;
2608 			qp->alt.smac_index = qp->alt.candidate_smac_index;
2609 			qp->alt.smac_port = qp->alt.candidate_smac_port;
2610 		}
2611 		qp->alt.candidate_smac = 0;
2612 		qp->alt.candidate_smac_index = 0;
2613 		qp->alt.candidate_smac_port = 0;
2614 	}
2615 
2616 	if (qp->pri.update_vid) {
2617 		if (err) {
2618 			if (qp->pri.candidate_vid < 0x1000)
2619 				mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port,
2620 						     qp->pri.candidate_vid);
2621 		} else {
2622 			if (qp->pri.vid < 0x1000)
2623 				mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port,
2624 						     qp->pri.vid);
2625 			qp->pri.vid = qp->pri.candidate_vid;
2626 			qp->pri.vlan_port = qp->pri.candidate_vlan_port;
2627 			qp->pri.vlan_index =  qp->pri.candidate_vlan_index;
2628 		}
2629 		qp->pri.candidate_vid = 0xFFFF;
2630 		qp->pri.update_vid = 0;
2631 	}
2632 
2633 	if (qp->alt.update_vid) {
2634 		if (err) {
2635 			if (qp->alt.candidate_vid < 0x1000)
2636 				mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port,
2637 						     qp->alt.candidate_vid);
2638 		} else {
2639 			if (qp->alt.vid < 0x1000)
2640 				mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port,
2641 						     qp->alt.vid);
2642 			qp->alt.vid = qp->alt.candidate_vid;
2643 			qp->alt.vlan_port = qp->alt.candidate_vlan_port;
2644 			qp->alt.vlan_index =  qp->alt.candidate_vlan_index;
2645 		}
2646 		qp->alt.candidate_vid = 0xFFFF;
2647 		qp->alt.update_vid = 0;
2648 	}
2649 
2650 	return err;
2651 }
2652 
2653 enum {
2654 	MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK = (IB_QP_STATE	|
2655 					      IB_QP_PORT),
2656 };
2657 
_mlx4_ib_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)2658 static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2659 			      int attr_mask, struct ib_udata *udata)
2660 {
2661 	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
2662 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
2663 	enum ib_qp_state cur_state, new_state;
2664 	int err = -EINVAL;
2665 	mutex_lock(&qp->mutex);
2666 
2667 	cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
2668 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
2669 
2670 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
2671 				attr_mask)) {
2672 		pr_debug("qpn 0x%x: invalid attribute mask specified "
2673 			 "for transition %d to %d. qp_type %d,"
2674 			 " attr_mask 0x%x\n",
2675 			 ibqp->qp_num, cur_state, new_state,
2676 			 ibqp->qp_type, attr_mask);
2677 		goto out;
2678 	}
2679 
2680 	if (ibqp->rwq_ind_tbl) {
2681 		if (!(((cur_state == IB_QPS_RESET) &&
2682 		       (new_state == IB_QPS_INIT)) ||
2683 		      ((cur_state == IB_QPS_INIT)  &&
2684 		       (new_state == IB_QPS_RTR)))) {
2685 			pr_debug("qpn 0x%x: RSS QP unsupported transition %d to %d\n",
2686 				 ibqp->qp_num, cur_state, new_state);
2687 
2688 			err = -EOPNOTSUPP;
2689 			goto out;
2690 		}
2691 
2692 		if (attr_mask & ~MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK) {
2693 			pr_debug("qpn 0x%x: RSS QP unsupported attribute mask 0x%x for transition %d to %d\n",
2694 				 ibqp->qp_num, attr_mask, cur_state, new_state);
2695 
2696 			err = -EOPNOTSUPP;
2697 			goto out;
2698 		}
2699 	}
2700 
2701 	if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
2702 		if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
2703 			if ((ibqp->qp_type == IB_QPT_RC) ||
2704 			    (ibqp->qp_type == IB_QPT_UD) ||
2705 			    (ibqp->qp_type == IB_QPT_UC) ||
2706 			    (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
2707 			    (ibqp->qp_type == IB_QPT_XRC_INI)) {
2708 				attr->port_num = mlx4_ib_bond_next_port(dev);
2709 			}
2710 		} else {
2711 			/* no sense in changing port_num
2712 			 * when ports are bonded */
2713 			attr_mask &= ~IB_QP_PORT;
2714 		}
2715 	}
2716 
2717 	if ((attr_mask & IB_QP_PORT) &&
2718 	    (attr->port_num == 0 || attr->port_num > dev->num_ports)) {
2719 		pr_debug("qpn 0x%x: invalid port number (%d) specified "
2720 			 "for transition %d to %d. qp_type %d\n",
2721 			 ibqp->qp_num, attr->port_num, cur_state,
2722 			 new_state, ibqp->qp_type);
2723 		goto out;
2724 	}
2725 
2726 	if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
2727 	    (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
2728 	     IB_LINK_LAYER_ETHERNET))
2729 		goto out;
2730 
2731 	if (attr_mask & IB_QP_PKEY_INDEX) {
2732 		int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
2733 		if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) {
2734 			pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
2735 				 "for transition %d to %d. qp_type %d\n",
2736 				 ibqp->qp_num, attr->pkey_index, cur_state,
2737 				 new_state, ibqp->qp_type);
2738 			goto out;
2739 		}
2740 	}
2741 
2742 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
2743 	    attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
2744 		pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
2745 			 "Transition %d to %d. qp_type %d\n",
2746 			 ibqp->qp_num, attr->max_rd_atomic, cur_state,
2747 			 new_state, ibqp->qp_type);
2748 		goto out;
2749 	}
2750 
2751 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
2752 	    attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
2753 		pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
2754 			 "Transition %d to %d. qp_type %d\n",
2755 			 ibqp->qp_num, attr->max_dest_rd_atomic, cur_state,
2756 			 new_state, ibqp->qp_type);
2757 		goto out;
2758 	}
2759 
2760 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
2761 		err = 0;
2762 		goto out;
2763 	}
2764 
2765 	if (ibqp->rwq_ind_tbl && (new_state == IB_QPS_INIT)) {
2766 		err = bringup_rss_rwqs(ibqp->rwq_ind_tbl, attr->port_num,
2767 				       udata);
2768 		if (err)
2769 			goto out;
2770 	}
2771 
2772 	err = __mlx4_ib_modify_qp(ibqp, MLX4_IB_QP_SRC, attr, attr_mask,
2773 				  cur_state, new_state, udata);
2774 
2775 	if (ibqp->rwq_ind_tbl && err)
2776 		bring_down_rss_rwqs(ibqp->rwq_ind_tbl, udata);
2777 
2778 	if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
2779 		attr->port_num = 1;
2780 
2781 out:
2782 	mutex_unlock(&qp->mutex);
2783 	return err;
2784 }
2785 
mlx4_ib_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)2786 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2787 		      int attr_mask, struct ib_udata *udata)
2788 {
2789 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
2790 	int ret;
2791 
2792 	ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata);
2793 
2794 	if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
2795 		struct mlx4_ib_sqp *sqp = mqp->sqp;
2796 		int err = 0;
2797 
2798 		if (sqp->roce_v2_gsi)
2799 			err = ib_modify_qp(sqp->roce_v2_gsi, attr, attr_mask);
2800 		if (err)
2801 			pr_err("Failed to modify GSI QP for RoCEv2 (%d)\n",
2802 			       err);
2803 	}
2804 	return ret;
2805 }
2806 
vf_get_qp0_qkey(struct mlx4_dev * dev,int qpn,u32 * qkey)2807 static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
2808 {
2809 	int i;
2810 	for (i = 0; i < dev->caps.num_ports; i++) {
2811 		if (qpn == dev->caps.spec_qps[i].qp0_proxy ||
2812 		    qpn == dev->caps.spec_qps[i].qp0_tunnel) {
2813 			*qkey = dev->caps.spec_qps[i].qp0_qkey;
2814 			return 0;
2815 		}
2816 	}
2817 	return -EINVAL;
2818 }
2819 
build_sriov_qp0_header(struct mlx4_ib_qp * qp,const struct ib_ud_wr * wr,void * wqe,unsigned * mlx_seg_len)2820 static int build_sriov_qp0_header(struct mlx4_ib_qp *qp,
2821 				  const struct ib_ud_wr *wr,
2822 				  void *wqe, unsigned *mlx_seg_len)
2823 {
2824 	struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device);
2825 	struct mlx4_ib_sqp *sqp = qp->sqp;
2826 	struct ib_device *ib_dev = qp->ibqp.device;
2827 	struct mlx4_wqe_mlx_seg *mlx = wqe;
2828 	struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
2829 	struct mlx4_ib_ah *ah = to_mah(wr->ah);
2830 	u16 pkey;
2831 	u32 qkey;
2832 	int send_size;
2833 	int header_size;
2834 	int spc;
2835 	int err;
2836 	int i;
2837 
2838 	if (wr->wr.opcode != IB_WR_SEND)
2839 		return -EINVAL;
2840 
2841 	send_size = 0;
2842 
2843 	for (i = 0; i < wr->wr.num_sge; ++i)
2844 		send_size += wr->wr.sg_list[i].length;
2845 
2846 	/* for proxy-qp0 sends, need to add in size of tunnel header */
2847 	/* for tunnel-qp0 sends, tunnel header is already in s/g list */
2848 	if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
2849 		send_size += sizeof (struct mlx4_ib_tunnel_header);
2850 
2851 	ib_ud_header_init(send_size, 1, 0, 0, 0, 0, 0, 0, &sqp->ud_header);
2852 
2853 	if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
2854 		sqp->ud_header.lrh.service_level =
2855 			be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
2856 		sqp->ud_header.lrh.destination_lid =
2857 			cpu_to_be16(ah->av.ib.g_slid & 0x7f);
2858 		sqp->ud_header.lrh.source_lid =
2859 			cpu_to_be16(ah->av.ib.g_slid & 0x7f);
2860 	}
2861 
2862 	mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
2863 
2864 	/* force loopback */
2865 	mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_VL15 | 0x1 | MLX4_WQE_MLX_SLR);
2866 	mlx->rlid = sqp->ud_header.lrh.destination_lid;
2867 
2868 	sqp->ud_header.lrh.virtual_lane    = 0;
2869 	sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
2870 	err = ib_get_cached_pkey(ib_dev, qp->port, 0, &pkey);
2871 	if (err)
2872 		return err;
2873 	sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
2874 	if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
2875 		sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
2876 	else
2877 		sqp->ud_header.bth.destination_qpn =
2878 			cpu_to_be32(mdev->dev->caps.spec_qps[qp->port - 1].qp0_tunnel);
2879 
2880 	sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
2881 	if (mlx4_is_master(mdev->dev)) {
2882 		if (mlx4_get_parav_qkey(mdev->dev, qp->mqp.qpn, &qkey))
2883 			return -EINVAL;
2884 	} else {
2885 		if (vf_get_qp0_qkey(mdev->dev, qp->mqp.qpn, &qkey))
2886 			return -EINVAL;
2887 	}
2888 	sqp->ud_header.deth.qkey = cpu_to_be32(qkey);
2889 	sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->mqp.qpn);
2890 
2891 	sqp->ud_header.bth.opcode        = IB_OPCODE_UD_SEND_ONLY;
2892 	sqp->ud_header.immediate_present = 0;
2893 
2894 	header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
2895 
2896 	/*
2897 	 * Inline data segments may not cross a 64 byte boundary.  If
2898 	 * our UD header is bigger than the space available up to the
2899 	 * next 64 byte boundary in the WQE, use two inline data
2900 	 * segments to hold the UD header.
2901 	 */
2902 	spc = MLX4_INLINE_ALIGN -
2903 	      ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
2904 	if (header_size <= spc) {
2905 		inl->byte_count = cpu_to_be32(1 << 31 | header_size);
2906 		memcpy(inl + 1, sqp->header_buf, header_size);
2907 		i = 1;
2908 	} else {
2909 		inl->byte_count = cpu_to_be32(1 << 31 | spc);
2910 		memcpy(inl + 1, sqp->header_buf, spc);
2911 
2912 		inl = (void *) (inl + 1) + spc;
2913 		memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
2914 		/*
2915 		 * Need a barrier here to make sure all the data is
2916 		 * visible before the byte_count field is set.
2917 		 * Otherwise the HCA prefetcher could grab the 64-byte
2918 		 * chunk with this inline segment and get a valid (!=
2919 		 * 0xffffffff) byte count but stale data, and end up
2920 		 * generating a packet with bad headers.
2921 		 *
2922 		 * The first inline segment's byte_count field doesn't
2923 		 * need a barrier, because it comes after a
2924 		 * control/MLX segment and therefore is at an offset
2925 		 * of 16 mod 64.
2926 		 */
2927 		wmb();
2928 		inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc));
2929 		i = 2;
2930 	}
2931 
2932 	*mlx_seg_len =
2933 	ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
2934 	return 0;
2935 }
2936 
sl_to_vl(struct mlx4_ib_dev * dev,u8 sl,int port_num)2937 static u8 sl_to_vl(struct mlx4_ib_dev *dev, u8 sl, int port_num)
2938 {
2939 	union sl2vl_tbl_to_u64 tmp_vltab;
2940 	u8 vl;
2941 
2942 	if (sl > 15)
2943 		return 0xf;
2944 	tmp_vltab.sl64 = atomic64_read(&dev->sl2vl[port_num - 1]);
2945 	vl = tmp_vltab.sl8[sl >> 1];
2946 	if (sl & 1)
2947 		vl &= 0x0f;
2948 	else
2949 		vl >>= 4;
2950 	return vl;
2951 }
2952 
fill_gid_by_hw_index(struct mlx4_ib_dev * ibdev,u8 port_num,int index,union ib_gid * gid,enum ib_gid_type * gid_type)2953 static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num,
2954 				int index, union ib_gid *gid,
2955 				enum ib_gid_type *gid_type)
2956 {
2957 	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
2958 	struct mlx4_port_gid_table *port_gid_table;
2959 	unsigned long flags;
2960 
2961 	port_gid_table = &iboe->gids[port_num - 1];
2962 	spin_lock_irqsave(&iboe->lock, flags);
2963 	memcpy(gid, &port_gid_table->gids[index].gid, sizeof(*gid));
2964 	*gid_type = port_gid_table->gids[index].gid_type;
2965 	spin_unlock_irqrestore(&iboe->lock, flags);
2966 	if (rdma_is_zero_gid(gid))
2967 		return -ENOENT;
2968 
2969 	return 0;
2970 }
2971 
2972 #define MLX4_ROCEV2_QP1_SPORT 0xC000
build_mlx_header(struct mlx4_ib_qp * qp,const struct ib_ud_wr * wr,void * wqe,unsigned * mlx_seg_len)2973 static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr,
2974 			    void *wqe, unsigned *mlx_seg_len)
2975 {
2976 	struct mlx4_ib_sqp *sqp = qp->sqp;
2977 	struct ib_device *ib_dev = qp->ibqp.device;
2978 	struct mlx4_ib_dev *ibdev = to_mdev(ib_dev);
2979 	struct mlx4_wqe_mlx_seg *mlx = wqe;
2980 	struct mlx4_wqe_ctrl_seg *ctrl = wqe;
2981 	struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
2982 	struct mlx4_ib_ah *ah = to_mah(wr->ah);
2983 	union ib_gid sgid;
2984 	u16 pkey;
2985 	int send_size;
2986 	int header_size;
2987 	int spc;
2988 	int i;
2989 	int err = 0;
2990 	u16 vlan = 0xffff;
2991 	bool is_eth;
2992 	bool is_vlan = false;
2993 	bool is_grh;
2994 	bool is_udp = false;
2995 	int ip_version = 0;
2996 
2997 	send_size = 0;
2998 	for (i = 0; i < wr->wr.num_sge; ++i)
2999 		send_size += wr->wr.sg_list[i].length;
3000 
3001 	is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET;
3002 	is_grh = mlx4_ib_ah_grh_present(ah);
3003 	if (is_eth) {
3004 		enum ib_gid_type gid_type;
3005 		if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
3006 			/* When multi-function is enabled, the ib_core gid
3007 			 * indexes don't necessarily match the hw ones, so
3008 			 * we must use our own cache */
3009 			err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev,
3010 							   be32_to_cpu(ah->av.ib.port_pd) >> 24,
3011 							   ah->av.ib.gid_index, &sgid.raw[0]);
3012 			if (err)
3013 				return err;
3014 		} else  {
3015 			err = fill_gid_by_hw_index(ibdev, qp->port,
3016 						   ah->av.ib.gid_index, &sgid,
3017 						   &gid_type);
3018 			if (!err) {
3019 				is_udp = gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
3020 				if (is_udp) {
3021 					if (ipv6_addr_v4mapped((struct in6_addr *)&sgid))
3022 						ip_version = 4;
3023 					else
3024 						ip_version = 6;
3025 					is_grh = false;
3026 				}
3027 			} else {
3028 				return err;
3029 			}
3030 		}
3031 		if (ah->av.eth.vlan != cpu_to_be16(0xffff)) {
3032 			vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
3033 			is_vlan = true;
3034 		}
3035 	}
3036 	err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh,
3037 			  ip_version, is_udp, 0, &sqp->ud_header);
3038 	if (err)
3039 		return err;
3040 
3041 	if (!is_eth) {
3042 		sqp->ud_header.lrh.service_level =
3043 			be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
3044 		sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid;
3045 		sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f);
3046 	}
3047 
3048 	if (is_grh || (ip_version == 6)) {
3049 		sqp->ud_header.grh.traffic_class =
3050 			(be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
3051 		sqp->ud_header.grh.flow_label    =
3052 			ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
3053 		sqp->ud_header.grh.hop_limit     = ah->av.ib.hop_limit;
3054 		if (is_eth) {
3055 			memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
3056 		} else {
3057 			if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
3058 				/* When multi-function is enabled, the ib_core gid
3059 				 * indexes don't necessarily match the hw ones, so
3060 				 * we must use our own cache
3061 				 */
3062 				sqp->ud_header.grh.source_gid.global
3063 					.subnet_prefix =
3064 					cpu_to_be64(atomic64_read(
3065 						&(to_mdev(ib_dev)
3066 							  ->sriov
3067 							  .demux[qp->port - 1]
3068 							  .subnet_prefix)));
3069 				sqp->ud_header.grh.source_gid.global
3070 					.interface_id =
3071 					to_mdev(ib_dev)
3072 						->sriov.demux[qp->port - 1]
3073 						.guid_cache[ah->av.ib.gid_index];
3074 			} else {
3075 				sqp->ud_header.grh.source_gid =
3076 					ah->ibah.sgid_attr->gid;
3077 			}
3078 		}
3079 		memcpy(sqp->ud_header.grh.destination_gid.raw,
3080 		       ah->av.ib.dgid, 16);
3081 	}
3082 
3083 	if (ip_version == 4) {
3084 		sqp->ud_header.ip4.tos =
3085 			(be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
3086 		sqp->ud_header.ip4.id = 0;
3087 		sqp->ud_header.ip4.frag_off = htons(IP_DF);
3088 		sqp->ud_header.ip4.ttl = ah->av.eth.hop_limit;
3089 
3090 		memcpy(&sqp->ud_header.ip4.saddr,
3091 		       sgid.raw + 12, 4);
3092 		memcpy(&sqp->ud_header.ip4.daddr, ah->av.ib.dgid + 12, 4);
3093 		sqp->ud_header.ip4.check = ib_ud_ip4_csum(&sqp->ud_header);
3094 	}
3095 
3096 	if (is_udp) {
3097 		sqp->ud_header.udp.dport = htons(ROCE_V2_UDP_DPORT);
3098 		sqp->ud_header.udp.sport = htons(MLX4_ROCEV2_QP1_SPORT);
3099 		sqp->ud_header.udp.csum = 0;
3100 	}
3101 
3102 	mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
3103 
3104 	if (!is_eth) {
3105 		mlx->flags |=
3106 			cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
3107 				    (sqp->ud_header.lrh.destination_lid ==
3108 						     IB_LID_PERMISSIVE ?
3109 					     MLX4_WQE_MLX_SLR :
3110 					     0) |
3111 				    (sqp->ud_header.lrh.service_level << 8));
3112 		if (ah->av.ib.port_pd & cpu_to_be32(0x80000000))
3113 			mlx->flags |= cpu_to_be32(0x1); /* force loopback */
3114 		mlx->rlid = sqp->ud_header.lrh.destination_lid;
3115 	}
3116 
3117 	switch (wr->wr.opcode) {
3118 	case IB_WR_SEND:
3119 		sqp->ud_header.bth.opcode	 = IB_OPCODE_UD_SEND_ONLY;
3120 		sqp->ud_header.immediate_present = 0;
3121 		break;
3122 	case IB_WR_SEND_WITH_IMM:
3123 		sqp->ud_header.bth.opcode	 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
3124 		sqp->ud_header.immediate_present = 1;
3125 		sqp->ud_header.immediate_data    = wr->wr.ex.imm_data;
3126 		break;
3127 	default:
3128 		return -EINVAL;
3129 	}
3130 
3131 	if (is_eth) {
3132 		struct in6_addr in6;
3133 		u16 ether_type;
3134 		u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
3135 
3136 		ether_type = (!is_udp) ? ETH_P_IBOE:
3137 			(ip_version == 4 ? ETH_P_IP : ETH_P_IPV6);
3138 
3139 		mlx->sched_prio = cpu_to_be16(pcp);
3140 
3141 		ether_addr_copy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac);
3142 		memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
3143 		memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2);
3144 		memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
3145 		memcpy(&in6, sgid.raw, sizeof(in6));
3146 
3147 
3148 		if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
3149 			mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
3150 		if (!is_vlan) {
3151 			sqp->ud_header.eth.type = cpu_to_be16(ether_type);
3152 		} else {
3153 			sqp->ud_header.vlan.type = cpu_to_be16(ether_type);
3154 			sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
3155 		}
3156 	} else {
3157 		sqp->ud_header.lrh.virtual_lane =
3158 			!qp->ibqp.qp_num ?
3159 				15 :
3160 				sl_to_vl(to_mdev(ib_dev),
3161 					 sqp->ud_header.lrh.service_level,
3162 					 qp->port);
3163 		if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
3164 			return -EINVAL;
3165 		if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
3166 			sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
3167 	}
3168 	sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
3169 	if (!qp->ibqp.qp_num)
3170 		err = ib_get_cached_pkey(ib_dev, qp->port, sqp->pkey_index,
3171 					 &pkey);
3172 	else
3173 		err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index,
3174 					 &pkey);
3175 	if (err)
3176 		return err;
3177 
3178 	sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
3179 	sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
3180 	sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
3181 	sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
3182 					       sqp->qkey : wr->remote_qkey);
3183 	sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
3184 
3185 	header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
3186 
3187 	if (0) {
3188 		pr_err("built UD header of size %d:\n", header_size);
3189 		for (i = 0; i < header_size / 4; ++i) {
3190 			if (i % 8 == 0)
3191 				pr_err("  [%02x] ", i * 4);
3192 			pr_cont(" %08x",
3193 				be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
3194 			if ((i + 1) % 8 == 0)
3195 				pr_cont("\n");
3196 		}
3197 		pr_err("\n");
3198 	}
3199 
3200 	/*
3201 	 * Inline data segments may not cross a 64 byte boundary.  If
3202 	 * our UD header is bigger than the space available up to the
3203 	 * next 64 byte boundary in the WQE, use two inline data
3204 	 * segments to hold the UD header.
3205 	 */
3206 	spc = MLX4_INLINE_ALIGN -
3207 		((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
3208 	if (header_size <= spc) {
3209 		inl->byte_count = cpu_to_be32(1 << 31 | header_size);
3210 		memcpy(inl + 1, sqp->header_buf, header_size);
3211 		i = 1;
3212 	} else {
3213 		inl->byte_count = cpu_to_be32(1 << 31 | spc);
3214 		memcpy(inl + 1, sqp->header_buf, spc);
3215 
3216 		inl = (void *) (inl + 1) + spc;
3217 		memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
3218 		/*
3219 		 * Need a barrier here to make sure all the data is
3220 		 * visible before the byte_count field is set.
3221 		 * Otherwise the HCA prefetcher could grab the 64-byte
3222 		 * chunk with this inline segment and get a valid (!=
3223 		 * 0xffffffff) byte count but stale data, and end up
3224 		 * generating a packet with bad headers.
3225 		 *
3226 		 * The first inline segment's byte_count field doesn't
3227 		 * need a barrier, because it comes after a
3228 		 * control/MLX segment and therefore is at an offset
3229 		 * of 16 mod 64.
3230 		 */
3231 		wmb();
3232 		inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc));
3233 		i = 2;
3234 	}
3235 
3236 	*mlx_seg_len =
3237 		ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
3238 	return 0;
3239 }
3240 
mlx4_wq_overflow(struct mlx4_ib_wq * wq,int nreq,struct ib_cq * ib_cq)3241 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
3242 {
3243 	unsigned cur;
3244 	struct mlx4_ib_cq *cq;
3245 
3246 	cur = wq->head - wq->tail;
3247 	if (likely(cur + nreq < wq->max_post))
3248 		return 0;
3249 
3250 	cq = to_mcq(ib_cq);
3251 	spin_lock(&cq->lock);
3252 	cur = wq->head - wq->tail;
3253 	spin_unlock(&cq->lock);
3254 
3255 	return cur + nreq >= wq->max_post;
3256 }
3257 
convert_access(int acc)3258 static __be32 convert_access(int acc)
3259 {
3260 	return (acc & IB_ACCESS_REMOTE_ATOMIC ?
3261 		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC)       : 0) |
3262 	       (acc & IB_ACCESS_REMOTE_WRITE  ?
3263 		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) |
3264 	       (acc & IB_ACCESS_REMOTE_READ   ?
3265 		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ)  : 0) |
3266 	       (acc & IB_ACCESS_LOCAL_WRITE   ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE)  : 0) |
3267 		cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
3268 }
3269 
set_reg_seg(struct mlx4_wqe_fmr_seg * fseg,const struct ib_reg_wr * wr)3270 static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
3271 			const struct ib_reg_wr *wr)
3272 {
3273 	struct mlx4_ib_mr *mr = to_mmr(wr->mr);
3274 
3275 	fseg->flags		= convert_access(wr->access);
3276 	fseg->mem_key		= cpu_to_be32(wr->key);
3277 	fseg->buf_list		= cpu_to_be64(mr->page_map);
3278 	fseg->start_addr	= cpu_to_be64(mr->ibmr.iova);
3279 	fseg->reg_len		= cpu_to_be64(mr->ibmr.length);
3280 	fseg->offset		= 0; /* XXX -- is this just for ZBVA? */
3281 	fseg->page_size		= cpu_to_be32(ilog2(mr->ibmr.page_size));
3282 	fseg->reserved[0]	= 0;
3283 	fseg->reserved[1]	= 0;
3284 }
3285 
set_local_inv_seg(struct mlx4_wqe_local_inval_seg * iseg,u32 rkey)3286 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
3287 {
3288 	memset(iseg, 0, sizeof(*iseg));
3289 	iseg->mem_key = cpu_to_be32(rkey);
3290 }
3291 
set_raddr_seg(struct mlx4_wqe_raddr_seg * rseg,u64 remote_addr,u32 rkey)3292 static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
3293 					  u64 remote_addr, u32 rkey)
3294 {
3295 	rseg->raddr    = cpu_to_be64(remote_addr);
3296 	rseg->rkey     = cpu_to_be32(rkey);
3297 	rseg->reserved = 0;
3298 }
3299 
set_atomic_seg(struct mlx4_wqe_atomic_seg * aseg,const struct ib_atomic_wr * wr)3300 static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
3301 			   const struct ib_atomic_wr *wr)
3302 {
3303 	if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
3304 		aseg->swap_add = cpu_to_be64(wr->swap);
3305 		aseg->compare  = cpu_to_be64(wr->compare_add);
3306 	} else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
3307 		aseg->swap_add = cpu_to_be64(wr->compare_add);
3308 		aseg->compare  = cpu_to_be64(wr->compare_add_mask);
3309 	} else {
3310 		aseg->swap_add = cpu_to_be64(wr->compare_add);
3311 		aseg->compare  = 0;
3312 	}
3313 
3314 }
3315 
set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg * aseg,const struct ib_atomic_wr * wr)3316 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
3317 				  const struct ib_atomic_wr *wr)
3318 {
3319 	aseg->swap_add		= cpu_to_be64(wr->swap);
3320 	aseg->swap_add_mask	= cpu_to_be64(wr->swap_mask);
3321 	aseg->compare		= cpu_to_be64(wr->compare_add);
3322 	aseg->compare_mask	= cpu_to_be64(wr->compare_add_mask);
3323 }
3324 
set_datagram_seg(struct mlx4_wqe_datagram_seg * dseg,const struct ib_ud_wr * wr)3325 static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
3326 			     const struct ib_ud_wr *wr)
3327 {
3328 	memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
3329 	dseg->dqpn = cpu_to_be32(wr->remote_qpn);
3330 	dseg->qkey = cpu_to_be32(wr->remote_qkey);
3331 	dseg->vlan = to_mah(wr->ah)->av.eth.vlan;
3332 	memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6);
3333 }
3334 
set_tunnel_datagram_seg(struct mlx4_ib_dev * dev,struct mlx4_wqe_datagram_seg * dseg,const struct ib_ud_wr * wr,enum mlx4_ib_qp_type qpt)3335 static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
3336 				    struct mlx4_wqe_datagram_seg *dseg,
3337 				    const struct ib_ud_wr *wr,
3338 				    enum mlx4_ib_qp_type qpt)
3339 {
3340 	union mlx4_ext_av *av = &to_mah(wr->ah)->av;
3341 	struct mlx4_av sqp_av = {0};
3342 	int port = *((u8 *) &av->ib.port_pd) & 0x3;
3343 
3344 	/* force loopback */
3345 	sqp_av.port_pd = av->ib.port_pd | cpu_to_be32(0x80000000);
3346 	sqp_av.g_slid = av->ib.g_slid & 0x7f; /* no GRH */
3347 	sqp_av.sl_tclass_flowlabel = av->ib.sl_tclass_flowlabel &
3348 			cpu_to_be32(0xf0000000);
3349 
3350 	memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av));
3351 	if (qpt == MLX4_IB_QPT_PROXY_GSI)
3352 		dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp1_tunnel);
3353 	else
3354 		dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp0_tunnel);
3355 	/* Use QKEY from the QP context, which is set by master */
3356 	dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
3357 }
3358 
build_tunnel_header(const struct ib_ud_wr * wr,void * wqe,unsigned * mlx_seg_len)3359 static void build_tunnel_header(const struct ib_ud_wr *wr, void *wqe,
3360 				unsigned *mlx_seg_len)
3361 {
3362 	struct mlx4_wqe_inline_seg *inl = wqe;
3363 	struct mlx4_ib_tunnel_header hdr;
3364 	struct mlx4_ib_ah *ah = to_mah(wr->ah);
3365 	int spc;
3366 	int i;
3367 
3368 	memcpy(&hdr.av, &ah->av, sizeof hdr.av);
3369 	hdr.remote_qpn = cpu_to_be32(wr->remote_qpn);
3370 	hdr.pkey_index = cpu_to_be16(wr->pkey_index);
3371 	hdr.qkey = cpu_to_be32(wr->remote_qkey);
3372 	memcpy(hdr.mac, ah->av.eth.mac, 6);
3373 	hdr.vlan = ah->av.eth.vlan;
3374 
3375 	spc = MLX4_INLINE_ALIGN -
3376 		((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
3377 	if (sizeof (hdr) <= spc) {
3378 		memcpy(inl + 1, &hdr, sizeof (hdr));
3379 		wmb();
3380 		inl->byte_count = cpu_to_be32(1 << 31 | sizeof (hdr));
3381 		i = 1;
3382 	} else {
3383 		memcpy(inl + 1, &hdr, spc);
3384 		wmb();
3385 		inl->byte_count = cpu_to_be32(1 << 31 | spc);
3386 
3387 		inl = (void *) (inl + 1) + spc;
3388 		memcpy(inl + 1, (void *) &hdr + spc, sizeof (hdr) - spc);
3389 		wmb();
3390 		inl->byte_count = cpu_to_be32(1 << 31 | (sizeof (hdr) - spc));
3391 		i = 2;
3392 	}
3393 
3394 	*mlx_seg_len =
3395 		ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + sizeof (hdr), 16);
3396 }
3397 
set_mlx_icrc_seg(void * dseg)3398 static void set_mlx_icrc_seg(void *dseg)
3399 {
3400 	u32 *t = dseg;
3401 	struct mlx4_wqe_inline_seg *iseg = dseg;
3402 
3403 	t[1] = 0;
3404 
3405 	/*
3406 	 * Need a barrier here before writing the byte_count field to
3407 	 * make sure that all the data is visible before the
3408 	 * byte_count field is set.  Otherwise, if the segment begins
3409 	 * a new cacheline, the HCA prefetcher could grab the 64-byte
3410 	 * chunk and get a valid (!= * 0xffffffff) byte count but
3411 	 * stale data, and end up sending the wrong data.
3412 	 */
3413 	wmb();
3414 
3415 	iseg->byte_count = cpu_to_be32((1 << 31) | 4);
3416 }
3417 
set_data_seg(struct mlx4_wqe_data_seg * dseg,struct ib_sge * sg)3418 static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
3419 {
3420 	dseg->lkey       = cpu_to_be32(sg->lkey);
3421 	dseg->addr       = cpu_to_be64(sg->addr);
3422 
3423 	/*
3424 	 * Need a barrier here before writing the byte_count field to
3425 	 * make sure that all the data is visible before the
3426 	 * byte_count field is set.  Otherwise, if the segment begins
3427 	 * a new cacheline, the HCA prefetcher could grab the 64-byte
3428 	 * chunk and get a valid (!= * 0xffffffff) byte count but
3429 	 * stale data, and end up sending the wrong data.
3430 	 */
3431 	wmb();
3432 
3433 	dseg->byte_count = cpu_to_be32(sg->length);
3434 }
3435 
__set_data_seg(struct mlx4_wqe_data_seg * dseg,struct ib_sge * sg)3436 static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
3437 {
3438 	dseg->byte_count = cpu_to_be32(sg->length);
3439 	dseg->lkey       = cpu_to_be32(sg->lkey);
3440 	dseg->addr       = cpu_to_be64(sg->addr);
3441 }
3442 
build_lso_seg(struct mlx4_wqe_lso_seg * wqe,const struct ib_ud_wr * wr,struct mlx4_ib_qp * qp,unsigned * lso_seg_len,__be32 * lso_hdr_sz,__be32 * blh)3443 static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe,
3444 			 const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp,
3445 			 unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh)
3446 {
3447 	unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
3448 
3449 	if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
3450 		*blh = cpu_to_be32(1 << 6);
3451 
3452 	if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
3453 		     wr->wr.num_sge > qp->sq.max_gs - (halign >> 4)))
3454 		return -EINVAL;
3455 
3456 	memcpy(wqe->header, wr->header, wr->hlen);
3457 
3458 	*lso_hdr_sz  = cpu_to_be32(wr->mss << 16 | wr->hlen);
3459 	*lso_seg_len = halign;
3460 	return 0;
3461 }
3462 
send_ieth(const struct ib_send_wr * wr)3463 static __be32 send_ieth(const struct ib_send_wr *wr)
3464 {
3465 	switch (wr->opcode) {
3466 	case IB_WR_SEND_WITH_IMM:
3467 	case IB_WR_RDMA_WRITE_WITH_IMM:
3468 		return wr->ex.imm_data;
3469 
3470 	case IB_WR_SEND_WITH_INV:
3471 		return cpu_to_be32(wr->ex.invalidate_rkey);
3472 
3473 	default:
3474 		return 0;
3475 	}
3476 }
3477 
add_zero_len_inline(void * wqe)3478 static void add_zero_len_inline(void *wqe)
3479 {
3480 	struct mlx4_wqe_inline_seg *inl = wqe;
3481 	memset(wqe, 0, 16);
3482 	inl->byte_count = cpu_to_be32(1 << 31);
3483 }
3484 
_mlx4_ib_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr,bool drain)3485 static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3486 			      const struct ib_send_wr **bad_wr, bool drain)
3487 {
3488 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
3489 	void *wqe;
3490 	struct mlx4_wqe_ctrl_seg *ctrl;
3491 	struct mlx4_wqe_data_seg *dseg;
3492 	unsigned long flags;
3493 	int nreq;
3494 	int err = 0;
3495 	unsigned ind;
3496 	int size;
3497 	unsigned seglen;
3498 	__be32 dummy;
3499 	__be32 *lso_wqe;
3500 	__be32 lso_hdr_sz;
3501 	__be32 blh;
3502 	int i;
3503 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
3504 
3505 	if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
3506 		struct mlx4_ib_sqp *sqp = qp->sqp;
3507 
3508 		if (sqp->roce_v2_gsi) {
3509 			struct mlx4_ib_ah *ah = to_mah(ud_wr(wr)->ah);
3510 			enum ib_gid_type gid_type;
3511 			union ib_gid gid;
3512 
3513 			if (!fill_gid_by_hw_index(mdev, qp->port,
3514 					   ah->av.ib.gid_index,
3515 					   &gid, &gid_type))
3516 				qp = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
3517 						to_mqp(sqp->roce_v2_gsi) : qp;
3518 			else
3519 				pr_err("Failed to get gid at index %d. RoCEv2 will not work properly\n",
3520 				       ah->av.ib.gid_index);
3521 		}
3522 	}
3523 
3524 	spin_lock_irqsave(&qp->sq.lock, flags);
3525 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR &&
3526 	    !drain) {
3527 		err = -EIO;
3528 		*bad_wr = wr;
3529 		nreq = 0;
3530 		goto out;
3531 	}
3532 
3533 	ind = qp->sq_next_wqe;
3534 
3535 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
3536 		lso_wqe = &dummy;
3537 		blh = 0;
3538 
3539 		if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
3540 			err = -ENOMEM;
3541 			*bad_wr = wr;
3542 			goto out;
3543 		}
3544 
3545 		if (unlikely(wr->num_sge > qp->sq.max_gs)) {
3546 			err = -EINVAL;
3547 			*bad_wr = wr;
3548 			goto out;
3549 		}
3550 
3551 		ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
3552 		qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
3553 
3554 		ctrl->srcrb_flags =
3555 			(wr->send_flags & IB_SEND_SIGNALED ?
3556 			 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |
3557 			(wr->send_flags & IB_SEND_SOLICITED ?
3558 			 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |
3559 			((wr->send_flags & IB_SEND_IP_CSUM) ?
3560 			 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
3561 				     MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) |
3562 			qp->sq_signal_bits;
3563 
3564 		ctrl->imm = send_ieth(wr);
3565 
3566 		wqe += sizeof *ctrl;
3567 		size = sizeof *ctrl / 16;
3568 
3569 		switch (qp->mlx4_ib_qp_type) {
3570 		case MLX4_IB_QPT_RC:
3571 		case MLX4_IB_QPT_UC:
3572 			switch (wr->opcode) {
3573 			case IB_WR_ATOMIC_CMP_AND_SWP:
3574 			case IB_WR_ATOMIC_FETCH_AND_ADD:
3575 			case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
3576 				set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
3577 					      atomic_wr(wr)->rkey);
3578 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
3579 
3580 				set_atomic_seg(wqe, atomic_wr(wr));
3581 				wqe  += sizeof (struct mlx4_wqe_atomic_seg);
3582 
3583 				size += (sizeof (struct mlx4_wqe_raddr_seg) +
3584 					 sizeof (struct mlx4_wqe_atomic_seg)) / 16;
3585 
3586 				break;
3587 
3588 			case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
3589 				set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
3590 					      atomic_wr(wr)->rkey);
3591 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
3592 
3593 				set_masked_atomic_seg(wqe, atomic_wr(wr));
3594 				wqe  += sizeof (struct mlx4_wqe_masked_atomic_seg);
3595 
3596 				size += (sizeof (struct mlx4_wqe_raddr_seg) +
3597 					 sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
3598 
3599 				break;
3600 
3601 			case IB_WR_RDMA_READ:
3602 			case IB_WR_RDMA_WRITE:
3603 			case IB_WR_RDMA_WRITE_WITH_IMM:
3604 				set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
3605 					      rdma_wr(wr)->rkey);
3606 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
3607 				size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
3608 				break;
3609 
3610 			case IB_WR_LOCAL_INV:
3611 				ctrl->srcrb_flags |=
3612 					cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
3613 				set_local_inv_seg(wqe, wr->ex.invalidate_rkey);
3614 				wqe  += sizeof (struct mlx4_wqe_local_inval_seg);
3615 				size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
3616 				break;
3617 
3618 			case IB_WR_REG_MR:
3619 				ctrl->srcrb_flags |=
3620 					cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
3621 				set_reg_seg(wqe, reg_wr(wr));
3622 				wqe  += sizeof(struct mlx4_wqe_fmr_seg);
3623 				size += sizeof(struct mlx4_wqe_fmr_seg) / 16;
3624 				break;
3625 
3626 			default:
3627 				/* No extra segments required for sends */
3628 				break;
3629 			}
3630 			break;
3631 
3632 		case MLX4_IB_QPT_TUN_SMI_OWNER:
3633 			err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
3634 						     &seglen);
3635 			if (unlikely(err)) {
3636 				*bad_wr = wr;
3637 				goto out;
3638 			}
3639 			wqe  += seglen;
3640 			size += seglen / 16;
3641 			break;
3642 		case MLX4_IB_QPT_TUN_SMI:
3643 		case MLX4_IB_QPT_TUN_GSI:
3644 			/* this is a UD qp used in MAD responses to slaves. */
3645 			set_datagram_seg(wqe, ud_wr(wr));
3646 			/* set the forced-loopback bit in the data seg av */
3647 			*(__be32 *) wqe |= cpu_to_be32(0x80000000);
3648 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
3649 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
3650 			break;
3651 		case MLX4_IB_QPT_UD:
3652 			set_datagram_seg(wqe, ud_wr(wr));
3653 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
3654 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
3655 
3656 			if (wr->opcode == IB_WR_LSO) {
3657 				err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen,
3658 						&lso_hdr_sz, &blh);
3659 				if (unlikely(err)) {
3660 					*bad_wr = wr;
3661 					goto out;
3662 				}
3663 				lso_wqe = (__be32 *) wqe;
3664 				wqe  += seglen;
3665 				size += seglen / 16;
3666 			}
3667 			break;
3668 
3669 		case MLX4_IB_QPT_PROXY_SMI_OWNER:
3670 			err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
3671 						     &seglen);
3672 			if (unlikely(err)) {
3673 				*bad_wr = wr;
3674 				goto out;
3675 			}
3676 			wqe  += seglen;
3677 			size += seglen / 16;
3678 			/* to start tunnel header on a cache-line boundary */
3679 			add_zero_len_inline(wqe);
3680 			wqe += 16;
3681 			size++;
3682 			build_tunnel_header(ud_wr(wr), wqe, &seglen);
3683 			wqe  += seglen;
3684 			size += seglen / 16;
3685 			break;
3686 		case MLX4_IB_QPT_PROXY_SMI:
3687 		case MLX4_IB_QPT_PROXY_GSI:
3688 			/* If we are tunneling special qps, this is a UD qp.
3689 			 * In this case we first add a UD segment targeting
3690 			 * the tunnel qp, and then add a header with address
3691 			 * information */
3692 			set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe,
3693 						ud_wr(wr),
3694 						qp->mlx4_ib_qp_type);
3695 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
3696 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
3697 			build_tunnel_header(ud_wr(wr), wqe, &seglen);
3698 			wqe  += seglen;
3699 			size += seglen / 16;
3700 			break;
3701 
3702 		case MLX4_IB_QPT_SMI:
3703 		case MLX4_IB_QPT_GSI:
3704 			err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen);
3705 			if (unlikely(err)) {
3706 				*bad_wr = wr;
3707 				goto out;
3708 			}
3709 			wqe  += seglen;
3710 			size += seglen / 16;
3711 			break;
3712 
3713 		default:
3714 			break;
3715 		}
3716 
3717 		/*
3718 		 * Write data segments in reverse order, so as to
3719 		 * overwrite cacheline stamp last within each
3720 		 * cacheline.  This avoids issues with WQE
3721 		 * prefetching.
3722 		 */
3723 
3724 		dseg = wqe;
3725 		dseg += wr->num_sge - 1;
3726 		size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
3727 
3728 		/* Add one more inline data segment for ICRC for MLX sends */
3729 		if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
3730 			     qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI ||
3731 			     qp->mlx4_ib_qp_type &
3732 			     (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))) {
3733 			set_mlx_icrc_seg(dseg + 1);
3734 			size += sizeof (struct mlx4_wqe_data_seg) / 16;
3735 		}
3736 
3737 		for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
3738 			set_data_seg(dseg, wr->sg_list + i);
3739 
3740 		/*
3741 		 * Possibly overwrite stamping in cacheline with LSO
3742 		 * segment only after making sure all data segments
3743 		 * are written.
3744 		 */
3745 		wmb();
3746 		*lso_wqe = lso_hdr_sz;
3747 
3748 		ctrl->qpn_vlan.fence_size = (wr->send_flags & IB_SEND_FENCE ?
3749 					     MLX4_WQE_CTRL_FENCE : 0) | size;
3750 
3751 		/*
3752 		 * Make sure descriptor is fully written before
3753 		 * setting ownership bit (because HW can start
3754 		 * executing as soon as we do).
3755 		 */
3756 		wmb();
3757 
3758 		if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) {
3759 			*bad_wr = wr;
3760 			err = -EINVAL;
3761 			goto out;
3762 		}
3763 
3764 		ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
3765 			(ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
3766 
3767 		/*
3768 		 * We can improve latency by not stamping the last
3769 		 * send queue WQE until after ringing the doorbell, so
3770 		 * only stamp here if there are still more WQEs to post.
3771 		 */
3772 		if (wr->next)
3773 			stamp_send_wqe(qp, ind + qp->sq_spare_wqes);
3774 		ind++;
3775 	}
3776 
3777 out:
3778 	if (likely(nreq)) {
3779 		qp->sq.head += nreq;
3780 
3781 		/*
3782 		 * Make sure that descriptors are written before
3783 		 * doorbell record.
3784 		 */
3785 		wmb();
3786 
3787 		writel_relaxed(qp->doorbell_qpn,
3788 			to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
3789 
3790 		stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1);
3791 
3792 		qp->sq_next_wqe = ind;
3793 	}
3794 
3795 	spin_unlock_irqrestore(&qp->sq.lock, flags);
3796 
3797 	return err;
3798 }
3799 
mlx4_ib_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3800 int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3801 		      const struct ib_send_wr **bad_wr)
3802 {
3803 	return _mlx4_ib_post_send(ibqp, wr, bad_wr, false);
3804 }
3805 
_mlx4_ib_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr,bool drain)3806 static int _mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3807 			      const struct ib_recv_wr **bad_wr, bool drain)
3808 {
3809 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
3810 	struct mlx4_wqe_data_seg *scat;
3811 	unsigned long flags;
3812 	int err = 0;
3813 	int nreq;
3814 	int ind;
3815 	int max_gs;
3816 	int i;
3817 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
3818 
3819 	max_gs = qp->rq.max_gs;
3820 	spin_lock_irqsave(&qp->rq.lock, flags);
3821 
3822 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR &&
3823 	    !drain) {
3824 		err = -EIO;
3825 		*bad_wr = wr;
3826 		nreq = 0;
3827 		goto out;
3828 	}
3829 
3830 	ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
3831 
3832 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
3833 		if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
3834 			err = -ENOMEM;
3835 			*bad_wr = wr;
3836 			goto out;
3837 		}
3838 
3839 		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
3840 			err = -EINVAL;
3841 			*bad_wr = wr;
3842 			goto out;
3843 		}
3844 
3845 		scat = get_recv_wqe(qp, ind);
3846 
3847 		if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
3848 		    MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
3849 			ib_dma_sync_single_for_device(ibqp->device,
3850 						      qp->sqp_proxy_rcv[ind].map,
3851 						      sizeof (struct mlx4_ib_proxy_sqp_hdr),
3852 						      DMA_FROM_DEVICE);
3853 			scat->byte_count =
3854 				cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr));
3855 			/* use dma lkey from upper layer entry */
3856 			scat->lkey = cpu_to_be32(wr->sg_list->lkey);
3857 			scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map);
3858 			scat++;
3859 			max_gs--;
3860 		}
3861 
3862 		for (i = 0; i < wr->num_sge; ++i)
3863 			__set_data_seg(scat + i, wr->sg_list + i);
3864 
3865 		if (i < max_gs) {
3866 			scat[i].byte_count = 0;
3867 			scat[i].lkey       = cpu_to_be32(MLX4_INVALID_LKEY);
3868 			scat[i].addr       = 0;
3869 		}
3870 
3871 		qp->rq.wrid[ind] = wr->wr_id;
3872 
3873 		ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
3874 	}
3875 
3876 out:
3877 	if (likely(nreq)) {
3878 		qp->rq.head += nreq;
3879 
3880 		/*
3881 		 * Make sure that descriptors are written before
3882 		 * doorbell record.
3883 		 */
3884 		wmb();
3885 
3886 		*qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
3887 	}
3888 
3889 	spin_unlock_irqrestore(&qp->rq.lock, flags);
3890 
3891 	return err;
3892 }
3893 
mlx4_ib_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)3894 int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3895 		      const struct ib_recv_wr **bad_wr)
3896 {
3897 	return _mlx4_ib_post_recv(ibqp, wr, bad_wr, false);
3898 }
3899 
to_ib_qp_state(enum mlx4_qp_state mlx4_state)3900 static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state)
3901 {
3902 	switch (mlx4_state) {
3903 	case MLX4_QP_STATE_RST:      return IB_QPS_RESET;
3904 	case MLX4_QP_STATE_INIT:     return IB_QPS_INIT;
3905 	case MLX4_QP_STATE_RTR:      return IB_QPS_RTR;
3906 	case MLX4_QP_STATE_RTS:      return IB_QPS_RTS;
3907 	case MLX4_QP_STATE_SQ_DRAINING:
3908 	case MLX4_QP_STATE_SQD:      return IB_QPS_SQD;
3909 	case MLX4_QP_STATE_SQER:     return IB_QPS_SQE;
3910 	case MLX4_QP_STATE_ERR:      return IB_QPS_ERR;
3911 	default:		     return -1;
3912 	}
3913 }
3914 
to_ib_mig_state(int mlx4_mig_state)3915 static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state)
3916 {
3917 	switch (mlx4_mig_state) {
3918 	case MLX4_QP_PM_ARMED:		return IB_MIG_ARMED;
3919 	case MLX4_QP_PM_REARM:		return IB_MIG_REARM;
3920 	case MLX4_QP_PM_MIGRATED:	return IB_MIG_MIGRATED;
3921 	default: return -1;
3922 	}
3923 }
3924 
to_ib_qp_access_flags(int mlx4_flags)3925 static int to_ib_qp_access_flags(int mlx4_flags)
3926 {
3927 	int ib_flags = 0;
3928 
3929 	if (mlx4_flags & MLX4_QP_BIT_RRE)
3930 		ib_flags |= IB_ACCESS_REMOTE_READ;
3931 	if (mlx4_flags & MLX4_QP_BIT_RWE)
3932 		ib_flags |= IB_ACCESS_REMOTE_WRITE;
3933 	if (mlx4_flags & MLX4_QP_BIT_RAE)
3934 		ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
3935 
3936 	return ib_flags;
3937 }
3938 
to_rdma_ah_attr(struct mlx4_ib_dev * ibdev,struct rdma_ah_attr * ah_attr,struct mlx4_qp_path * path)3939 static void to_rdma_ah_attr(struct mlx4_ib_dev *ibdev,
3940 			    struct rdma_ah_attr *ah_attr,
3941 			    struct mlx4_qp_path *path)
3942 {
3943 	struct mlx4_dev *dev = ibdev->dev;
3944 	u8 port_num = path->sched_queue & 0x40 ? 2 : 1;
3945 
3946 	memset(ah_attr, 0, sizeof(*ah_attr));
3947 	if (port_num == 0 || port_num > dev->caps.num_ports)
3948 		return;
3949 	ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num);
3950 
3951 	if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3952 		rdma_ah_set_sl(ah_attr, ((path->sched_queue >> 3) & 0x7) |
3953 			       ((path->sched_queue & 4) << 1));
3954 	else
3955 		rdma_ah_set_sl(ah_attr, (path->sched_queue >> 2) & 0xf);
3956 	rdma_ah_set_port_num(ah_attr, port_num);
3957 
3958 	rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
3959 	rdma_ah_set_path_bits(ah_attr, path->grh_mylmc & 0x7f);
3960 	rdma_ah_set_static_rate(ah_attr,
3961 				path->static_rate ? path->static_rate - 5 : 0);
3962 	if (path->grh_mylmc & (1 << 7)) {
3963 		rdma_ah_set_grh(ah_attr, NULL,
3964 				be32_to_cpu(path->tclass_flowlabel) & 0xfffff,
3965 				path->mgid_index,
3966 				path->hop_limit,
3967 				(be32_to_cpu(path->tclass_flowlabel)
3968 				 >> 20) & 0xff);
3969 		rdma_ah_set_dgid_raw(ah_attr, path->rgid);
3970 	}
3971 }
3972 
mlx4_ib_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)3973 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
3974 		     struct ib_qp_init_attr *qp_init_attr)
3975 {
3976 	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
3977 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
3978 	struct mlx4_qp_context context;
3979 	int mlx4_state;
3980 	int err = 0;
3981 
3982 	if (ibqp->rwq_ind_tbl)
3983 		return -EOPNOTSUPP;
3984 
3985 	mutex_lock(&qp->mutex);
3986 
3987 	if (qp->state == IB_QPS_RESET) {
3988 		qp_attr->qp_state = IB_QPS_RESET;
3989 		goto done;
3990 	}
3991 
3992 	err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
3993 	if (err) {
3994 		err = -EINVAL;
3995 		goto out;
3996 	}
3997 
3998 	mlx4_state = be32_to_cpu(context.flags) >> 28;
3999 
4000 	qp->state		     = to_ib_qp_state(mlx4_state);
4001 	qp_attr->qp_state	     = qp->state;
4002 	qp_attr->path_mtu	     = context.mtu_msgmax >> 5;
4003 	qp_attr->path_mig_state	     =
4004 		to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);
4005 	qp_attr->qkey		     = be32_to_cpu(context.qkey);
4006 	qp_attr->rq_psn		     = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff;
4007 	qp_attr->sq_psn		     = be32_to_cpu(context.next_send_psn) & 0xffffff;
4008 	qp_attr->dest_qp_num	     = be32_to_cpu(context.remote_qpn) & 0xffffff;
4009 	qp_attr->qp_access_flags     =
4010 		to_ib_qp_access_flags(be32_to_cpu(context.params2));
4011 
4012 	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
4013 		to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
4014 		to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
4015 		qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
4016 		qp_attr->alt_port_num	=
4017 			rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
4018 	}
4019 
4020 	qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
4021 	if (qp_attr->qp_state == IB_QPS_INIT)
4022 		qp_attr->port_num = qp->port;
4023 	else
4024 		qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1;
4025 
4026 	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
4027 	qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING;
4028 
4029 	qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7);
4030 
4031 	qp_attr->max_dest_rd_atomic =
4032 		1 << ((be32_to_cpu(context.params2) >> 21) & 0x7);
4033 	qp_attr->min_rnr_timer	    =
4034 		(be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f;
4035 	qp_attr->timeout	    = context.pri_path.ackto >> 3;
4036 	qp_attr->retry_cnt	    = (be32_to_cpu(context.params1) >> 16) & 0x7;
4037 	qp_attr->rnr_retry	    = (be32_to_cpu(context.params1) >> 13) & 0x7;
4038 	qp_attr->alt_timeout	    = context.alt_path.ackto >> 3;
4039 
4040 done:
4041 	qp_attr->cur_qp_state	     = qp_attr->qp_state;
4042 	qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
4043 	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
4044 
4045 	if (!ibqp->uobject) {
4046 		qp_attr->cap.max_send_wr  = qp->sq.wqe_cnt;
4047 		qp_attr->cap.max_send_sge = qp->sq.max_gs;
4048 	} else {
4049 		qp_attr->cap.max_send_wr  = 0;
4050 		qp_attr->cap.max_send_sge = 0;
4051 	}
4052 
4053 	/*
4054 	 * We don't support inline sends for kernel QPs (yet), and we
4055 	 * don't know what userspace's value should be.
4056 	 */
4057 	qp_attr->cap.max_inline_data = 0;
4058 
4059 	qp_init_attr->cap	     = qp_attr->cap;
4060 
4061 	qp_init_attr->create_flags = 0;
4062 	if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)
4063 		qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
4064 
4065 	if (qp->flags & MLX4_IB_QP_LSO)
4066 		qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
4067 
4068 	if (qp->flags & MLX4_IB_QP_NETIF)
4069 		qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP;
4070 
4071 	qp_init_attr->sq_sig_type =
4072 		qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ?
4073 		IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
4074 
4075 out:
4076 	mutex_unlock(&qp->mutex);
4077 	return err;
4078 }
4079 
mlx4_ib_create_wq(struct ib_pd * pd,struct ib_wq_init_attr * init_attr,struct ib_udata * udata)4080 struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
4081 				struct ib_wq_init_attr *init_attr,
4082 				struct ib_udata *udata)
4083 {
4084 	struct mlx4_dev *dev = to_mdev(pd->device)->dev;
4085 	struct ib_qp_init_attr ib_qp_init_attr = {};
4086 	struct mlx4_ib_qp *qp;
4087 	struct mlx4_ib_create_wq ucmd;
4088 	int err, required_cmd_sz;
4089 
4090 	if (!udata)
4091 		return ERR_PTR(-EINVAL);
4092 
4093 	required_cmd_sz = offsetof(typeof(ucmd), comp_mask) +
4094 			  sizeof(ucmd.comp_mask);
4095 	if (udata->inlen < required_cmd_sz) {
4096 		pr_debug("invalid inlen\n");
4097 		return ERR_PTR(-EINVAL);
4098 	}
4099 
4100 	if (udata->inlen > sizeof(ucmd) &&
4101 	    !ib_is_udata_cleared(udata, sizeof(ucmd),
4102 				 udata->inlen - sizeof(ucmd))) {
4103 		pr_debug("inlen is not supported\n");
4104 		return ERR_PTR(-EOPNOTSUPP);
4105 	}
4106 
4107 	if (udata->outlen)
4108 		return ERR_PTR(-EOPNOTSUPP);
4109 
4110 	if (init_attr->wq_type != IB_WQT_RQ) {
4111 		pr_debug("unsupported wq type %d\n", init_attr->wq_type);
4112 		return ERR_PTR(-EOPNOTSUPP);
4113 	}
4114 
4115 	if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS ||
4116 	    !(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
4117 		pr_debug("unsupported create_flags %u\n",
4118 			 init_attr->create_flags);
4119 		return ERR_PTR(-EOPNOTSUPP);
4120 	}
4121 
4122 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
4123 	if (!qp)
4124 		return ERR_PTR(-ENOMEM);
4125 
4126 	mutex_init(&qp->mutex);
4127 	qp->pri.vid = 0xFFFF;
4128 	qp->alt.vid = 0xFFFF;
4129 
4130 	ib_qp_init_attr.qp_context = init_attr->wq_context;
4131 	ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET;
4132 	ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr;
4133 	ib_qp_init_attr.cap.max_recv_sge = init_attr->max_sge;
4134 	ib_qp_init_attr.recv_cq = init_attr->cq;
4135 	ib_qp_init_attr.send_cq = ib_qp_init_attr.recv_cq; /* Dummy CQ */
4136 
4137 	if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS)
4138 		ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS;
4139 
4140 	err = create_rq(pd, &ib_qp_init_attr, udata, qp);
4141 	if (err) {
4142 		kfree(qp);
4143 		return ERR_PTR(err);
4144 	}
4145 
4146 	qp->ibwq.event_handler = init_attr->event_handler;
4147 	qp->ibwq.wq_num = qp->mqp.qpn;
4148 	qp->ibwq.state = IB_WQS_RESET;
4149 
4150 	return &qp->ibwq;
4151 }
4152 
ib_wq2qp_state(enum ib_wq_state state)4153 static int ib_wq2qp_state(enum ib_wq_state state)
4154 {
4155 	switch (state) {
4156 	case IB_WQS_RESET:
4157 		return IB_QPS_RESET;
4158 	case IB_WQS_RDY:
4159 		return IB_QPS_RTR;
4160 	default:
4161 		return IB_QPS_ERR;
4162 	}
4163 }
4164 
_mlx4_ib_modify_wq(struct ib_wq * ibwq,enum ib_wq_state new_state,struct ib_udata * udata)4165 static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
4166 			      struct ib_udata *udata)
4167 {
4168 	struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
4169 	enum ib_qp_state qp_cur_state;
4170 	enum ib_qp_state qp_new_state;
4171 	int attr_mask;
4172 	int err;
4173 
4174 	/* ib_qp.state represents the WQ HW state while ib_wq.state represents
4175 	 * the WQ logic state.
4176 	 */
4177 	qp_cur_state = qp->state;
4178 	qp_new_state = ib_wq2qp_state(new_state);
4179 
4180 	if (ib_wq2qp_state(new_state) == qp_cur_state)
4181 		return 0;
4182 
4183 	if (new_state == IB_WQS_RDY) {
4184 		struct ib_qp_attr attr = {};
4185 
4186 		attr.port_num = qp->port;
4187 		attr_mask = IB_QP_PORT;
4188 
4189 		err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, &attr,
4190 					  attr_mask, IB_QPS_RESET, IB_QPS_INIT,
4191 					  udata);
4192 		if (err) {
4193 			pr_debug("WQN=0x%06x failed to apply RST->INIT on the HW QP\n",
4194 				 ibwq->wq_num);
4195 			return err;
4196 		}
4197 
4198 		qp_cur_state = IB_QPS_INIT;
4199 	}
4200 
4201 	attr_mask = 0;
4202 	err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL, attr_mask,
4203 				  qp_cur_state,  qp_new_state, udata);
4204 
4205 	if (err && (qp_cur_state == IB_QPS_INIT)) {
4206 		qp_new_state = IB_QPS_RESET;
4207 		if (__mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL,
4208 					attr_mask, IB_QPS_INIT, IB_QPS_RESET,
4209 					udata)) {
4210 			pr_warn("WQN=0x%06x failed with reverting HW's resources failure\n",
4211 				ibwq->wq_num);
4212 			qp_new_state = IB_QPS_INIT;
4213 		}
4214 	}
4215 
4216 	qp->state = qp_new_state;
4217 
4218 	return err;
4219 }
4220 
mlx4_ib_modify_wq(struct ib_wq * ibwq,struct ib_wq_attr * wq_attr,u32 wq_attr_mask,struct ib_udata * udata)4221 int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
4222 		      u32 wq_attr_mask, struct ib_udata *udata)
4223 {
4224 	struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
4225 	struct mlx4_ib_modify_wq ucmd = {};
4226 	size_t required_cmd_sz;
4227 	enum ib_wq_state cur_state, new_state;
4228 	int err = 0;
4229 
4230 	required_cmd_sz = offsetof(typeof(ucmd), reserved) +
4231 				   sizeof(ucmd.reserved);
4232 	if (udata->inlen < required_cmd_sz)
4233 		return -EINVAL;
4234 
4235 	if (udata->inlen > sizeof(ucmd) &&
4236 	    !ib_is_udata_cleared(udata, sizeof(ucmd),
4237 				 udata->inlen - sizeof(ucmd)))
4238 		return -EOPNOTSUPP;
4239 
4240 	if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
4241 		return -EFAULT;
4242 
4243 	if (ucmd.comp_mask || ucmd.reserved)
4244 		return -EOPNOTSUPP;
4245 
4246 	if (wq_attr_mask & IB_WQ_FLAGS)
4247 		return -EOPNOTSUPP;
4248 
4249 	cur_state = wq_attr->curr_wq_state;
4250 	new_state = wq_attr->wq_state;
4251 
4252 	if ((new_state == IB_WQS_RDY) && (cur_state == IB_WQS_ERR))
4253 		return -EINVAL;
4254 
4255 	if ((new_state == IB_WQS_ERR) && (cur_state == IB_WQS_RESET))
4256 		return -EINVAL;
4257 
4258 	/* Need to protect against the parent RSS which also may modify WQ
4259 	 * state.
4260 	 */
4261 	mutex_lock(&qp->mutex);
4262 
4263 	/* Can update HW state only if a RSS QP has already associated to this
4264 	 * WQ, so we can apply its port on the WQ.
4265 	 */
4266 	if (qp->rss_usecnt)
4267 		err = _mlx4_ib_modify_wq(ibwq, new_state, udata);
4268 
4269 	if (!err)
4270 		ibwq->state = new_state;
4271 
4272 	mutex_unlock(&qp->mutex);
4273 
4274 	return err;
4275 }
4276 
mlx4_ib_destroy_wq(struct ib_wq * ibwq,struct ib_udata * udata)4277 int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
4278 {
4279 	struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
4280 	struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
4281 
4282 	if (qp->counter_index)
4283 		mlx4_ib_free_qp_counter(dev, qp);
4284 
4285 	destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
4286 
4287 	kfree(qp);
4288 	return 0;
4289 }
4290 
mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table * rwq_ind_table,struct ib_rwq_ind_table_init_attr * init_attr,struct ib_udata * udata)4291 int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table,
4292 				 struct ib_rwq_ind_table_init_attr *init_attr,
4293 				 struct ib_udata *udata)
4294 {
4295 	struct mlx4_ib_create_rwq_ind_tbl_resp resp = {};
4296 	unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size;
4297 	struct ib_device *device = rwq_ind_table->device;
4298 	unsigned int base_wqn;
4299 	size_t min_resp_len;
4300 	int i, err = 0;
4301 
4302 	if (udata->inlen > 0 &&
4303 	    !ib_is_udata_cleared(udata, 0,
4304 				 udata->inlen))
4305 		return -EOPNOTSUPP;
4306 
4307 	min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
4308 	if (udata->outlen && udata->outlen < min_resp_len)
4309 		return -EINVAL;
4310 
4311 	if (ind_tbl_size >
4312 	    device->attrs.rss_caps.max_rwq_indirection_table_size) {
4313 		pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
4314 			 ind_tbl_size,
4315 			 device->attrs.rss_caps.max_rwq_indirection_table_size);
4316 		return -EINVAL;
4317 	}
4318 
4319 	base_wqn = init_attr->ind_tbl[0]->wq_num;
4320 
4321 	if (base_wqn % ind_tbl_size) {
4322 		pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
4323 			 base_wqn);
4324 		return -EINVAL;
4325 	}
4326 
4327 	for (i = 1; i < ind_tbl_size; i++) {
4328 		if (++base_wqn != init_attr->ind_tbl[i]->wq_num) {
4329 			pr_debug("indirection table's WQNs aren't consecutive\n");
4330 			return -EINVAL;
4331 		}
4332 	}
4333 
4334 	if (udata->outlen) {
4335 		resp.response_length = offsetof(typeof(resp), response_length) +
4336 					sizeof(resp.response_length);
4337 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
4338 	}
4339 
4340 	return err;
4341 }
4342 
4343 struct mlx4_ib_drain_cqe {
4344 	struct ib_cqe cqe;
4345 	struct completion done;
4346 };
4347 
mlx4_ib_drain_qp_done(struct ib_cq * cq,struct ib_wc * wc)4348 static void mlx4_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
4349 {
4350 	struct mlx4_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
4351 						     struct mlx4_ib_drain_cqe,
4352 						     cqe);
4353 
4354 	complete(&cqe->done);
4355 }
4356 
4357 /* This function returns only once the drained WR was completed */
handle_drain_completion(struct ib_cq * cq,struct mlx4_ib_drain_cqe * sdrain,struct mlx4_ib_dev * dev)4358 static void handle_drain_completion(struct ib_cq *cq,
4359 				    struct mlx4_ib_drain_cqe *sdrain,
4360 				    struct mlx4_ib_dev *dev)
4361 {
4362 	struct mlx4_dev *mdev = dev->dev;
4363 
4364 	if (cq->poll_ctx == IB_POLL_DIRECT) {
4365 		while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
4366 			ib_process_cq_direct(cq, -1);
4367 		return;
4368 	}
4369 
4370 	if (mdev->persist->state == MLX4_DEVICE_STATE_INTERNAL_ERROR) {
4371 		struct mlx4_ib_cq *mcq = to_mcq(cq);
4372 		bool triggered = false;
4373 		unsigned long flags;
4374 
4375 		spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
4376 		/* Make sure that the CQ handler won't run if wasn't run yet */
4377 		if (!mcq->mcq.reset_notify_added)
4378 			mcq->mcq.reset_notify_added = 1;
4379 		else
4380 			triggered = true;
4381 		spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
4382 
4383 		if (triggered) {
4384 			/* Wait for any scheduled/running task to be ended */
4385 			switch (cq->poll_ctx) {
4386 			case IB_POLL_SOFTIRQ:
4387 				irq_poll_disable(&cq->iop);
4388 				irq_poll_enable(&cq->iop);
4389 				break;
4390 			case IB_POLL_WORKQUEUE:
4391 				cancel_work_sync(&cq->work);
4392 				break;
4393 			default:
4394 				WARN_ON_ONCE(1);
4395 			}
4396 		}
4397 
4398 		/* Run the CQ handler - this makes sure that the drain WR will
4399 		 * be processed if wasn't processed yet.
4400 		 */
4401 		mcq->mcq.comp(&mcq->mcq);
4402 	}
4403 
4404 	wait_for_completion(&sdrain->done);
4405 }
4406 
mlx4_ib_drain_sq(struct ib_qp * qp)4407 void mlx4_ib_drain_sq(struct ib_qp *qp)
4408 {
4409 	struct ib_cq *cq = qp->send_cq;
4410 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
4411 	struct mlx4_ib_drain_cqe sdrain;
4412 	const struct ib_send_wr *bad_swr;
4413 	struct ib_rdma_wr swr = {
4414 		.wr = {
4415 			.next = NULL,
4416 			{ .wr_cqe	= &sdrain.cqe, },
4417 			.opcode	= IB_WR_RDMA_WRITE,
4418 		},
4419 	};
4420 	int ret;
4421 	struct mlx4_ib_dev *dev = to_mdev(qp->device);
4422 	struct mlx4_dev *mdev = dev->dev;
4423 
4424 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
4425 	if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) {
4426 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
4427 		return;
4428 	}
4429 
4430 	sdrain.cqe.done = mlx4_ib_drain_qp_done;
4431 	init_completion(&sdrain.done);
4432 
4433 	ret = _mlx4_ib_post_send(qp, &swr.wr, &bad_swr, true);
4434 	if (ret) {
4435 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
4436 		return;
4437 	}
4438 
4439 	handle_drain_completion(cq, &sdrain, dev);
4440 }
4441 
mlx4_ib_drain_rq(struct ib_qp * qp)4442 void mlx4_ib_drain_rq(struct ib_qp *qp)
4443 {
4444 	struct ib_cq *cq = qp->recv_cq;
4445 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
4446 	struct mlx4_ib_drain_cqe rdrain;
4447 	struct ib_recv_wr rwr = {};
4448 	const struct ib_recv_wr *bad_rwr;
4449 	int ret;
4450 	struct mlx4_ib_dev *dev = to_mdev(qp->device);
4451 	struct mlx4_dev *mdev = dev->dev;
4452 
4453 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
4454 	if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) {
4455 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
4456 		return;
4457 	}
4458 
4459 	rwr.wr_cqe = &rdrain.cqe;
4460 	rdrain.cqe.done = mlx4_ib_drain_qp_done;
4461 	init_completion(&rdrain.done);
4462 
4463 	ret = _mlx4_ib_post_recv(qp, &rwr, &bad_rwr, true);
4464 	if (ret) {
4465 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
4466 		return;
4467 	}
4468 
4469 	handle_drain_completion(cq, &rdrain, dev);
4470 }
4471