• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/gfp.h>
34 #include <linux/export.h>
35 #include <linux/mlx5/cmd.h>
36 #include <linux/mlx5/qp.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/transobj.h>
39 
40 #include "mlx5_core.h"
41 
mlx5_get_rsc(struct mlx5_core_dev * dev,u32 rsn)42 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
43 						 u32 rsn)
44 {
45 	struct mlx5_qp_table *table = &dev->priv.qp_table;
46 	struct mlx5_core_rsc_common *common;
47 	unsigned long flags;
48 
49 	spin_lock_irqsave(&table->lock, flags);
50 
51 	common = radix_tree_lookup(&table->tree, rsn);
52 	if (common)
53 		atomic_inc(&common->refcount);
54 
55 	spin_unlock_irqrestore(&table->lock, flags);
56 
57 	if (!common) {
58 		mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
59 			       rsn);
60 		return NULL;
61 	}
62 	return common;
63 }
64 
mlx5_core_put_rsc(struct mlx5_core_rsc_common * common)65 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
66 {
67 	if (atomic_dec_and_test(&common->refcount))
68 		complete(&common->free);
69 }
70 
qp_allowed_event_types(void)71 static u64 qp_allowed_event_types(void)
72 {
73 	u64 mask;
74 
75 	mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
76 	       BIT(MLX5_EVENT_TYPE_COMM_EST) |
77 	       BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
78 	       BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
79 	       BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
80 	       BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
81 	       BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
82 	       BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
83 
84 	return mask;
85 }
86 
rq_allowed_event_types(void)87 static u64 rq_allowed_event_types(void)
88 {
89 	u64 mask;
90 
91 	mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
92 	       BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
93 
94 	return mask;
95 }
96 
sq_allowed_event_types(void)97 static u64 sq_allowed_event_types(void)
98 {
99 	return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
100 }
101 
is_event_type_allowed(int rsc_type,int event_type)102 static bool is_event_type_allowed(int rsc_type, int event_type)
103 {
104 	switch (rsc_type) {
105 	case MLX5_EVENT_QUEUE_TYPE_QP:
106 		return BIT(event_type) & qp_allowed_event_types();
107 	case MLX5_EVENT_QUEUE_TYPE_RQ:
108 		return BIT(event_type) & rq_allowed_event_types();
109 	case MLX5_EVENT_QUEUE_TYPE_SQ:
110 		return BIT(event_type) & sq_allowed_event_types();
111 	default:
112 		WARN(1, "Event arrived for unknown resource type");
113 		return false;
114 	}
115 }
116 
mlx5_rsc_event(struct mlx5_core_dev * dev,u32 rsn,int event_type)117 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
118 {
119 	struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
120 	struct mlx5_core_qp *qp;
121 
122 	if (!common)
123 		return;
124 
125 	if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
126 		mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
127 			       event_type, rsn);
128 		goto out;
129 	}
130 
131 	switch (common->res) {
132 	case MLX5_RES_QP:
133 	case MLX5_RES_RQ:
134 	case MLX5_RES_SQ:
135 		qp = (struct mlx5_core_qp *)common;
136 		qp->event(qp, event_type);
137 		break;
138 
139 	default:
140 		mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
141 	}
142 out:
143 	mlx5_core_put_rsc(common);
144 }
145 
create_qprqsq_common(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,int rsc_type)146 static int create_qprqsq_common(struct mlx5_core_dev *dev,
147 				struct mlx5_core_qp *qp,
148 				int rsc_type)
149 {
150 	struct mlx5_qp_table *table = &dev->priv.qp_table;
151 	int err;
152 
153 	qp->common.res = rsc_type;
154 	spin_lock_irq(&table->lock);
155 	err = radix_tree_insert(&table->tree,
156 				qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
157 				qp);
158 	spin_unlock_irq(&table->lock);
159 	if (err)
160 		return err;
161 
162 	atomic_set(&qp->common.refcount, 1);
163 	init_completion(&qp->common.free);
164 	qp->pid = current->pid;
165 
166 	return 0;
167 }
168 
destroy_qprqsq_common(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)169 static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
170 				  struct mlx5_core_qp *qp)
171 {
172 	struct mlx5_qp_table *table = &dev->priv.qp_table;
173 	unsigned long flags;
174 
175 	spin_lock_irqsave(&table->lock, flags);
176 	radix_tree_delete(&table->tree,
177 			  qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
178 	spin_unlock_irqrestore(&table->lock, flags);
179 	mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
180 	wait_for_completion(&qp->common.free);
181 }
182 
mlx5_core_create_qp(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,u32 * in,int inlen)183 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
184 			struct mlx5_core_qp *qp,
185 			u32 *in, int inlen)
186 {
187 	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
188 	u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
189 	u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
190 	int err;
191 
192 	MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
193 
194 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
195 	if (err)
196 		return err;
197 
198 	qp->qpn = MLX5_GET(create_qp_out, out, qpn);
199 	mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
200 
201 	err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
202 	if (err)
203 		goto err_cmd;
204 
205 	err = mlx5_debug_qp_add(dev, qp);
206 	if (err)
207 		mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
208 			      qp->qpn);
209 
210 	atomic_inc(&dev->num_qps);
211 
212 	return 0;
213 
214 err_cmd:
215 	memset(din, 0, sizeof(din));
216 	memset(dout, 0, sizeof(dout));
217 	MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
218 	MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
219 	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
220 	return err;
221 }
222 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
223 
mlx5_core_destroy_qp(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)224 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
225 			 struct mlx5_core_qp *qp)
226 {
227 	u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
228 	u32 in[MLX5_ST_SZ_DW(destroy_qp_in)]   = {0};
229 	int err;
230 
231 	mlx5_debug_qp_remove(dev, qp);
232 
233 	destroy_qprqsq_common(dev, qp);
234 
235 	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
236 	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
237 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
238 	if (err)
239 		return err;
240 
241 	atomic_dec(&dev->num_qps);
242 	return 0;
243 }
244 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
245 
mlx5_core_set_delay_drop(struct mlx5_core_dev * dev,u32 timeout_usec)246 int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
247 			     u32 timeout_usec)
248 {
249 	u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
250 	u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)]   = {0};
251 
252 	MLX5_SET(set_delay_drop_params_in, in, opcode,
253 		 MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
254 	MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
255 		 timeout_usec / 100);
256 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
257 }
258 EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
259 
260 struct mbox_info {
261 	u32 *in;
262 	u32 *out;
263 	int inlen;
264 	int outlen;
265 };
266 
mbox_alloc(struct mbox_info * mbox,int inlen,int outlen)267 static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
268 {
269 	mbox->inlen  = inlen;
270 	mbox->outlen = outlen;
271 	mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
272 	mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
273 	if (!mbox->in || !mbox->out) {
274 		kfree(mbox->in);
275 		kfree(mbox->out);
276 		return -ENOMEM;
277 	}
278 
279 	return 0;
280 }
281 
mbox_free(struct mbox_info * mbox)282 static void mbox_free(struct mbox_info *mbox)
283 {
284 	kfree(mbox->in);
285 	kfree(mbox->out);
286 }
287 
modify_qp_mbox_alloc(struct mlx5_core_dev * dev,u16 opcode,int qpn,u32 opt_param_mask,void * qpc,struct mbox_info * mbox)288 static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
289 				u32 opt_param_mask, void *qpc,
290 				struct mbox_info *mbox)
291 {
292 	mbox->out = NULL;
293 	mbox->in = NULL;
294 
295 #define MBOX_ALLOC(mbox, typ)  \
296 	mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
297 
298 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
299 	MLX5_SET(typ##_in, in, opcode, _opcode); \
300 	MLX5_SET(typ##_in, in, qpn, _qpn)
301 
302 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
303 	MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
304 	MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
305 	memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
306 
307 	switch (opcode) {
308 	/* 2RST & 2ERR */
309 	case MLX5_CMD_OP_2RST_QP:
310 		if (MBOX_ALLOC(mbox, qp_2rst))
311 			return -ENOMEM;
312 		MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
313 		break;
314 	case MLX5_CMD_OP_2ERR_QP:
315 		if (MBOX_ALLOC(mbox, qp_2err))
316 			return -ENOMEM;
317 		MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
318 		break;
319 
320 	/* MODIFY with QPC */
321 	case MLX5_CMD_OP_RST2INIT_QP:
322 		if (MBOX_ALLOC(mbox, rst2init_qp))
323 			return -ENOMEM;
324 		 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
325 				   opt_param_mask, qpc);
326 		 break;
327 	case MLX5_CMD_OP_INIT2RTR_QP:
328 		if (MBOX_ALLOC(mbox, init2rtr_qp))
329 			return -ENOMEM;
330 		 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
331 				   opt_param_mask, qpc);
332 		 break;
333 	case MLX5_CMD_OP_RTR2RTS_QP:
334 		if (MBOX_ALLOC(mbox, rtr2rts_qp))
335 			return -ENOMEM;
336 		 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
337 				   opt_param_mask, qpc);
338 		 break;
339 	case MLX5_CMD_OP_RTS2RTS_QP:
340 		if (MBOX_ALLOC(mbox, rts2rts_qp))
341 			return -ENOMEM;
342 		MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
343 				  opt_param_mask, qpc);
344 		break;
345 	case MLX5_CMD_OP_SQERR2RTS_QP:
346 		if (MBOX_ALLOC(mbox, sqerr2rts_qp))
347 			return -ENOMEM;
348 		MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
349 				  opt_param_mask, qpc);
350 		break;
351 	case MLX5_CMD_OP_INIT2INIT_QP:
352 		if (MBOX_ALLOC(mbox, init2init_qp))
353 			return -ENOMEM;
354 		MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
355 				  opt_param_mask, qpc);
356 		break;
357 	default:
358 		mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
359 			      opcode, qpn);
360 		return -EINVAL;
361 	}
362 	return 0;
363 }
364 
mlx5_core_qp_modify(struct mlx5_core_dev * dev,u16 opcode,u32 opt_param_mask,void * qpc,struct mlx5_core_qp * qp)365 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
366 			u32 opt_param_mask, void *qpc,
367 			struct mlx5_core_qp *qp)
368 {
369 	struct mbox_info mbox;
370 	int err;
371 
372 	err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
373 				   opt_param_mask, qpc, &mbox);
374 	if (err)
375 		return err;
376 
377 	err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
378 	mbox_free(&mbox);
379 	return err;
380 }
381 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
382 
mlx5_init_qp_table(struct mlx5_core_dev * dev)383 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
384 {
385 	struct mlx5_qp_table *table = &dev->priv.qp_table;
386 
387 	memset(table, 0, sizeof(*table));
388 	spin_lock_init(&table->lock);
389 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
390 	mlx5_qp_debugfs_init(dev);
391 }
392 
mlx5_cleanup_qp_table(struct mlx5_core_dev * dev)393 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
394 {
395 	mlx5_qp_debugfs_cleanup(dev);
396 }
397 
mlx5_core_qp_query(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,u32 * out,int outlen)398 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
399 		       u32 *out, int outlen)
400 {
401 	u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
402 
403 	MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
404 	MLX5_SET(query_qp_in, in, qpn, qp->qpn);
405 	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
406 }
407 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
408 
mlx5_core_xrcd_alloc(struct mlx5_core_dev * dev,u32 * xrcdn)409 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
410 {
411 	u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
412 	u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)]   = {0};
413 	int err;
414 
415 	MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
416 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
417 	if (!err)
418 		*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
419 	return err;
420 }
421 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
422 
mlx5_core_xrcd_dealloc(struct mlx5_core_dev * dev,u32 xrcdn)423 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
424 {
425 	u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
426 	u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)]   = {0};
427 
428 	MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
429 	MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
430 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
431 }
432 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
433 
mlx5_core_create_rq_tracked(struct mlx5_core_dev * dev,u32 * in,int inlen,struct mlx5_core_qp * rq)434 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
435 				struct mlx5_core_qp *rq)
436 {
437 	int err;
438 	u32 rqn;
439 
440 	err = mlx5_core_create_rq(dev, in, inlen, &rqn);
441 	if (err)
442 		return err;
443 
444 	rq->qpn = rqn;
445 	err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
446 	if (err)
447 		goto err_destroy_rq;
448 
449 	return 0;
450 
451 err_destroy_rq:
452 	mlx5_core_destroy_rq(dev, rq->qpn);
453 
454 	return err;
455 }
456 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
457 
mlx5_core_destroy_rq_tracked(struct mlx5_core_dev * dev,struct mlx5_core_qp * rq)458 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
459 				  struct mlx5_core_qp *rq)
460 {
461 	destroy_qprqsq_common(dev, rq);
462 	mlx5_core_destroy_rq(dev, rq->qpn);
463 }
464 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
465 
mlx5_core_create_sq_tracked(struct mlx5_core_dev * dev,u32 * in,int inlen,struct mlx5_core_qp * sq)466 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
467 				struct mlx5_core_qp *sq)
468 {
469 	int err;
470 	u32 sqn;
471 
472 	err = mlx5_core_create_sq(dev, in, inlen, &sqn);
473 	if (err)
474 		return err;
475 
476 	sq->qpn = sqn;
477 	err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
478 	if (err)
479 		goto err_destroy_sq;
480 
481 	return 0;
482 
483 err_destroy_sq:
484 	mlx5_core_destroy_sq(dev, sq->qpn);
485 
486 	return err;
487 }
488 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
489 
mlx5_core_destroy_sq_tracked(struct mlx5_core_dev * dev,struct mlx5_core_qp * sq)490 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
491 				  struct mlx5_core_qp *sq)
492 {
493 	destroy_qprqsq_common(dev, sq);
494 	mlx5_core_destroy_sq(dev, sq->qpn);
495 }
496 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
497 
mlx5_core_alloc_q_counter(struct mlx5_core_dev * dev,u16 * counter_id)498 int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
499 {
500 	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]   = {0};
501 	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
502 	int err;
503 
504 	MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
505 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
506 	if (!err)
507 		*counter_id = MLX5_GET(alloc_q_counter_out, out,
508 				       counter_set_id);
509 	return err;
510 }
511 EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
512 
mlx5_core_dealloc_q_counter(struct mlx5_core_dev * dev,u16 counter_id)513 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
514 {
515 	u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)]   = {0};
516 	u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
517 
518 	MLX5_SET(dealloc_q_counter_in, in, opcode,
519 		 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
520 	MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
521 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
522 }
523 EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
524 
mlx5_core_query_q_counter(struct mlx5_core_dev * dev,u16 counter_id,int reset,void * out,int out_size)525 int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
526 			      int reset, void *out, int out_size)
527 {
528 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
529 
530 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
531 	MLX5_SET(query_q_counter_in, in, clear, reset);
532 	MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
533 	return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
534 }
535 EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
536