• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
38 
39 /* Mutex to hold while enabling or disabling RoCE */
40 static DEFINE_MUTEX(mlx5_roce_en_lock);
41 
_mlx5_query_vport_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport,u32 * out,int outlen)42 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
43 				   u16 vport, u32 *out, int outlen)
44 {
45 	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
46 
47 	MLX5_SET(query_vport_state_in, in, opcode,
48 		 MLX5_CMD_OP_QUERY_VPORT_STATE);
49 	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
50 	MLX5_SET(query_vport_state_in, in, vport_number, vport);
51 	if (vport)
52 		MLX5_SET(query_vport_state_in, in, other_vport, 1);
53 
54 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
55 }
56 
mlx5_query_vport_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport)57 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
58 {
59 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
60 
61 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
62 
63 	return MLX5_GET(query_vport_state_out, out, state);
64 }
65 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
66 
mlx5_query_vport_admin_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport)67 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
68 {
69 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
70 
71 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
72 
73 	return MLX5_GET(query_vport_state_out, out, admin_state);
74 }
75 EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
76 
mlx5_modify_vport_admin_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport,u8 state)77 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
78 				  u16 vport, u8 state)
79 {
80 	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)]   = {0};
81 	u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
82 
83 	MLX5_SET(modify_vport_state_in, in, opcode,
84 		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
85 	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
86 	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
87 	if (vport)
88 		MLX5_SET(modify_vport_state_in, in, other_vport, 1);
89 	MLX5_SET(modify_vport_state_in, in, admin_state, state);
90 
91 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
92 }
93 EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
94 
mlx5_query_nic_vport_context(struct mlx5_core_dev * mdev,u16 vport,u32 * out,int outlen)95 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
96 					u32 *out, int outlen)
97 {
98 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
99 
100 	MLX5_SET(query_nic_vport_context_in, in, opcode,
101 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
102 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
103 	if (vport)
104 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
105 
106 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
107 }
108 
mlx5_modify_nic_vport_context(struct mlx5_core_dev * mdev,void * in,int inlen)109 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
110 					 int inlen)
111 {
112 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
113 
114 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
115 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
116 	return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
117 }
118 
mlx5_query_nic_vport_min_inline(struct mlx5_core_dev * mdev,u16 vport,u8 * min_inline)119 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
120 				    u16 vport, u8 *min_inline)
121 {
122 	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
123 	int err;
124 
125 	err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
126 	if (!err)
127 		*min_inline = MLX5_GET(query_nic_vport_context_out, out,
128 				       nic_vport_context.min_wqe_inline_mode);
129 	return err;
130 }
131 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
132 
mlx5_query_min_inline(struct mlx5_core_dev * mdev,u8 * min_inline_mode)133 void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
134 			   u8 *min_inline_mode)
135 {
136 	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
137 	case MLX5_CAP_INLINE_MODE_L2:
138 		*min_inline_mode = MLX5_INLINE_MODE_L2;
139 		break;
140 	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
141 		mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
142 		break;
143 	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
144 		*min_inline_mode = MLX5_INLINE_MODE_NONE;
145 		break;
146 	}
147 }
148 EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
149 
mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev * mdev,u16 vport,u8 min_inline)150 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
151 				     u16 vport, u8 min_inline)
152 {
153 	u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
154 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
155 	void *nic_vport_ctx;
156 
157 	MLX5_SET(modify_nic_vport_context_in, in,
158 		 field_select.min_inline, 1);
159 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
160 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
161 
162 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
163 				     in, nic_vport_context);
164 	MLX5_SET(nic_vport_context, nic_vport_ctx,
165 		 min_wqe_inline_mode, min_inline);
166 
167 	return mlx5_modify_nic_vport_context(mdev, in, inlen);
168 }
169 
mlx5_query_nic_vport_mac_address(struct mlx5_core_dev * mdev,u16 vport,u8 * addr)170 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
171 				     u16 vport, u8 *addr)
172 {
173 	u32 *out;
174 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
175 	u8 *out_addr;
176 	int err;
177 
178 	out = kvzalloc(outlen, GFP_KERNEL);
179 	if (!out)
180 		return -ENOMEM;
181 
182 	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
183 				nic_vport_context.permanent_address);
184 
185 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
186 	if (!err)
187 		ether_addr_copy(addr, &out_addr[2]);
188 
189 	kvfree(out);
190 	return err;
191 }
192 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
193 
mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev * mdev,u16 vport,u8 * addr)194 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
195 				      u16 vport, u8 *addr)
196 {
197 	void *in;
198 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
199 	int err;
200 	void *nic_vport_ctx;
201 	u8 *perm_mac;
202 
203 	in = kvzalloc(inlen, GFP_KERNEL);
204 	if (!in)
205 		return -ENOMEM;
206 
207 	MLX5_SET(modify_nic_vport_context_in, in,
208 		 field_select.permanent_address, 1);
209 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
210 
211 	if (vport)
212 		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
213 
214 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
215 				     in, nic_vport_context);
216 	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
217 				permanent_address);
218 
219 	ether_addr_copy(&perm_mac[2], addr);
220 
221 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
222 
223 	kvfree(in);
224 
225 	return err;
226 }
227 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
228 
mlx5_query_nic_vport_mtu(struct mlx5_core_dev * mdev,u16 * mtu)229 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
230 {
231 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
232 	u32 *out;
233 	int err;
234 
235 	out = kvzalloc(outlen, GFP_KERNEL);
236 	if (!out)
237 		return -ENOMEM;
238 
239 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
240 	if (!err)
241 		*mtu = MLX5_GET(query_nic_vport_context_out, out,
242 				nic_vport_context.mtu);
243 
244 	kvfree(out);
245 	return err;
246 }
247 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
248 
mlx5_modify_nic_vport_mtu(struct mlx5_core_dev * mdev,u16 mtu)249 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
250 {
251 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
252 	void *in;
253 	int err;
254 
255 	in = kvzalloc(inlen, GFP_KERNEL);
256 	if (!in)
257 		return -ENOMEM;
258 
259 	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
260 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
261 
262 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
263 
264 	kvfree(in);
265 	return err;
266 }
267 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
268 
mlx5_query_nic_vport_mac_list(struct mlx5_core_dev * dev,u32 vport,enum mlx5_list_type list_type,u8 addr_list[][ETH_ALEN],int * list_size)269 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
270 				  u32 vport,
271 				  enum mlx5_list_type list_type,
272 				  u8 addr_list[][ETH_ALEN],
273 				  int *list_size)
274 {
275 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
276 	void *nic_vport_ctx;
277 	int max_list_size;
278 	int req_list_size;
279 	int out_sz;
280 	void *out;
281 	int err;
282 	int i;
283 
284 	req_list_size = *list_size;
285 
286 	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
287 		1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
288 		1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
289 
290 	if (req_list_size > max_list_size) {
291 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
292 			       req_list_size, max_list_size);
293 		req_list_size = max_list_size;
294 	}
295 
296 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
297 			req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
298 
299 	out = kzalloc(out_sz, GFP_KERNEL);
300 	if (!out)
301 		return -ENOMEM;
302 
303 	MLX5_SET(query_nic_vport_context_in, in, opcode,
304 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
305 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
306 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
307 
308 	if (vport)
309 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
310 
311 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
312 	if (err)
313 		goto out;
314 
315 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
316 				     nic_vport_context);
317 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
318 				 allowed_list_size);
319 
320 	*list_size = req_list_size;
321 	for (i = 0; i < req_list_size; i++) {
322 		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
323 					nic_vport_ctx,
324 					current_uc_mac_address[i]) + 2;
325 		ether_addr_copy(addr_list[i], mac_addr);
326 	}
327 out:
328 	kfree(out);
329 	return err;
330 }
331 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
332 
mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev * dev,enum mlx5_list_type list_type,u8 addr_list[][ETH_ALEN],int list_size)333 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
334 				   enum mlx5_list_type list_type,
335 				   u8 addr_list[][ETH_ALEN],
336 				   int list_size)
337 {
338 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
339 	void *nic_vport_ctx;
340 	int max_list_size;
341 	int in_sz;
342 	void *in;
343 	int err;
344 	int i;
345 
346 	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
347 		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
348 		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
349 
350 	if (list_size > max_list_size)
351 		return -ENOSPC;
352 
353 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
354 		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
355 
356 	memset(out, 0, sizeof(out));
357 	in = kzalloc(in_sz, GFP_KERNEL);
358 	if (!in)
359 		return -ENOMEM;
360 
361 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
362 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
363 	MLX5_SET(modify_nic_vport_context_in, in,
364 		 field_select.addresses_list, 1);
365 
366 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
367 				     nic_vport_context);
368 
369 	MLX5_SET(nic_vport_context, nic_vport_ctx,
370 		 allowed_list_type, list_type);
371 	MLX5_SET(nic_vport_context, nic_vport_ctx,
372 		 allowed_list_size, list_size);
373 
374 	for (i = 0; i < list_size; i++) {
375 		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
376 					    nic_vport_ctx,
377 					    current_uc_mac_address[i]) + 2;
378 		ether_addr_copy(curr_mac, addr_list[i]);
379 	}
380 
381 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
382 	kfree(in);
383 	return err;
384 }
385 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
386 
mlx5_query_nic_vport_vlans(struct mlx5_core_dev * dev,u32 vport,u16 vlans[],int * size)387 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
388 			       u32 vport,
389 			       u16 vlans[],
390 			       int *size)
391 {
392 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
393 	void *nic_vport_ctx;
394 	int req_list_size;
395 	int max_list_size;
396 	int out_sz;
397 	void *out;
398 	int err;
399 	int i;
400 
401 	req_list_size = *size;
402 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
403 	if (req_list_size > max_list_size) {
404 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
405 			       req_list_size, max_list_size);
406 		req_list_size = max_list_size;
407 	}
408 
409 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
410 			req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
411 
412 	memset(in, 0, sizeof(in));
413 	out = kzalloc(out_sz, GFP_KERNEL);
414 	if (!out)
415 		return -ENOMEM;
416 
417 	MLX5_SET(query_nic_vport_context_in, in, opcode,
418 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
419 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
420 		 MLX5_NVPRT_LIST_TYPE_VLAN);
421 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
422 
423 	if (vport)
424 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
425 
426 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
427 	if (err)
428 		goto out;
429 
430 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
431 				     nic_vport_context);
432 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
433 				 allowed_list_size);
434 
435 	*size = req_list_size;
436 	for (i = 0; i < req_list_size; i++) {
437 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
438 					       nic_vport_ctx,
439 					       current_uc_mac_address[i]);
440 		vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
441 	}
442 out:
443 	kfree(out);
444 	return err;
445 }
446 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
447 
mlx5_modify_nic_vport_vlans(struct mlx5_core_dev * dev,u16 vlans[],int list_size)448 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
449 				u16 vlans[],
450 				int list_size)
451 {
452 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
453 	void *nic_vport_ctx;
454 	int max_list_size;
455 	int in_sz;
456 	void *in;
457 	int err;
458 	int i;
459 
460 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
461 
462 	if (list_size > max_list_size)
463 		return -ENOSPC;
464 
465 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
466 		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
467 
468 	memset(out, 0, sizeof(out));
469 	in = kzalloc(in_sz, GFP_KERNEL);
470 	if (!in)
471 		return -ENOMEM;
472 
473 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
474 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
475 	MLX5_SET(modify_nic_vport_context_in, in,
476 		 field_select.addresses_list, 1);
477 
478 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
479 				     nic_vport_context);
480 
481 	MLX5_SET(nic_vport_context, nic_vport_ctx,
482 		 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
483 	MLX5_SET(nic_vport_context, nic_vport_ctx,
484 		 allowed_list_size, list_size);
485 
486 	for (i = 0; i < list_size; i++) {
487 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
488 					       nic_vport_ctx,
489 					       current_uc_mac_address[i]);
490 		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
491 	}
492 
493 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
494 	kfree(in);
495 	return err;
496 }
497 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
498 
mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev * mdev,u64 * system_image_guid)499 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
500 					   u64 *system_image_guid)
501 {
502 	u32 *out;
503 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
504 
505 	out = kvzalloc(outlen, GFP_KERNEL);
506 	if (!out)
507 		return -ENOMEM;
508 
509 	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
510 
511 	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
512 					nic_vport_context.system_image_guid);
513 
514 	kfree(out);
515 
516 	return 0;
517 }
518 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
519 
mlx5_query_nic_vport_node_guid(struct mlx5_core_dev * mdev,u64 * node_guid)520 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
521 {
522 	u32 *out;
523 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
524 
525 	out = kvzalloc(outlen, GFP_KERNEL);
526 	if (!out)
527 		return -ENOMEM;
528 
529 	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
530 
531 	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
532 				nic_vport_context.node_guid);
533 
534 	kfree(out);
535 
536 	return 0;
537 }
538 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
539 
mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev * mdev,u32 vport,u64 node_guid)540 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
541 				    u32 vport, u64 node_guid)
542 {
543 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
544 	void *nic_vport_context;
545 	void *in;
546 	int err;
547 
548 	if (!vport)
549 		return -EINVAL;
550 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
551 		return -EACCES;
552 
553 	in = kvzalloc(inlen, GFP_KERNEL);
554 	if (!in)
555 		return -ENOMEM;
556 
557 	MLX5_SET(modify_nic_vport_context_in, in,
558 		 field_select.node_guid, 1);
559 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
560 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
561 
562 	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
563 					 in, nic_vport_context);
564 	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
565 
566 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
567 
568 	kvfree(in);
569 
570 	return err;
571 }
572 
mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev * mdev,u16 * qkey_viol_cntr)573 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
574 					u16 *qkey_viol_cntr)
575 {
576 	u32 *out;
577 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
578 
579 	out = kvzalloc(outlen, GFP_KERNEL);
580 	if (!out)
581 		return -ENOMEM;
582 
583 	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
584 
585 	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
586 				   nic_vport_context.qkey_violation_counter);
587 
588 	kfree(out);
589 
590 	return 0;
591 }
592 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
593 
mlx5_query_hca_vport_gid(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,u16 gid_index,union ib_gid * gid)594 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
595 			     u8 port_num, u16  vf_num, u16 gid_index,
596 			     union ib_gid *gid)
597 {
598 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
599 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
600 	int is_group_manager;
601 	void *out = NULL;
602 	void *in = NULL;
603 	union ib_gid *tmp;
604 	int tbsz;
605 	int nout;
606 	int err;
607 
608 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
609 	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
610 	mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
611 		      vf_num, gid_index, tbsz);
612 
613 	if (gid_index > tbsz && gid_index != 0xffff)
614 		return -EINVAL;
615 
616 	if (gid_index == 0xffff)
617 		nout = tbsz;
618 	else
619 		nout = 1;
620 
621 	out_sz += nout * sizeof(*gid);
622 
623 	in = kzalloc(in_sz, GFP_KERNEL);
624 	out = kzalloc(out_sz, GFP_KERNEL);
625 	if (!in || !out) {
626 		err = -ENOMEM;
627 		goto out;
628 	}
629 
630 	MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
631 	if (other_vport) {
632 		if (is_group_manager) {
633 			MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
634 			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
635 		} else {
636 			err = -EPERM;
637 			goto out;
638 		}
639 	}
640 	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
641 
642 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
643 		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
644 
645 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
646 	if (err)
647 		goto out;
648 
649 	tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
650 	gid->global.subnet_prefix = tmp->global.subnet_prefix;
651 	gid->global.interface_id = tmp->global.interface_id;
652 
653 out:
654 	kfree(in);
655 	kfree(out);
656 	return err;
657 }
658 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
659 
mlx5_query_hca_vport_pkey(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,u16 pkey_index,u16 * pkey)660 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
661 			      u8 port_num, u16 vf_num, u16 pkey_index,
662 			      u16 *pkey)
663 {
664 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
665 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
666 	int is_group_manager;
667 	void *out = NULL;
668 	void *in = NULL;
669 	void *pkarr;
670 	int nout;
671 	int tbsz;
672 	int err;
673 	int i;
674 
675 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
676 
677 	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
678 	if (pkey_index > tbsz && pkey_index != 0xffff)
679 		return -EINVAL;
680 
681 	if (pkey_index == 0xffff)
682 		nout = tbsz;
683 	else
684 		nout = 1;
685 
686 	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
687 
688 	in = kzalloc(in_sz, GFP_KERNEL);
689 	out = kzalloc(out_sz, GFP_KERNEL);
690 	if (!in || !out) {
691 		err = -ENOMEM;
692 		goto out;
693 	}
694 
695 	MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
696 	if (other_vport) {
697 		if (is_group_manager) {
698 			MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
699 			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
700 		} else {
701 			err = -EPERM;
702 			goto out;
703 		}
704 	}
705 	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
706 
707 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
708 		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
709 
710 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
711 	if (err)
712 		goto out;
713 
714 	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
715 	for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
716 		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
717 
718 out:
719 	kfree(in);
720 	kfree(out);
721 	return err;
722 }
723 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
724 
mlx5_query_hca_vport_context(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,struct mlx5_hca_vport_context * rep)725 int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
726 				 u8 other_vport, u8 port_num,
727 				 u16 vf_num,
728 				 struct mlx5_hca_vport_context *rep)
729 {
730 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
731 	int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
732 	int is_group_manager;
733 	void *out;
734 	void *ctx;
735 	int err;
736 
737 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
738 
739 	out = kzalloc(out_sz, GFP_KERNEL);
740 	if (!out)
741 		return -ENOMEM;
742 
743 	MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
744 
745 	if (other_vport) {
746 		if (is_group_manager) {
747 			MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
748 			MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
749 		} else {
750 			err = -EPERM;
751 			goto ex;
752 		}
753 	}
754 
755 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
756 		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
757 
758 	err = mlx5_cmd_exec(dev, in, sizeof(in), out,  out_sz);
759 	if (err)
760 		goto ex;
761 
762 	ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
763 	rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
764 	rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
765 	rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
766 	rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
767 	rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
768 	rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
769 				      port_physical_state);
770 	rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
771 	rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
772 					       port_physical_state);
773 	rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
774 	rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
775 	rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
776 	rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
777 					  cap_mask1_field_select);
778 	rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
779 	rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
780 					  cap_mask2_field_select);
781 	rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
782 	rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
783 					   init_type_reply);
784 	rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
785 	rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
786 					  subnet_timeout);
787 	rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
788 	rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
789 	rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
790 						  qkey_violation_counter);
791 	rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
792 						  pkey_violation_counter);
793 	rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
794 	rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
795 					    system_image_guid);
796 
797 ex:
798 	kfree(out);
799 	return err;
800 }
801 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
802 
mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev * dev,u64 * sys_image_guid)803 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
804 					   u64 *sys_image_guid)
805 {
806 	struct mlx5_hca_vport_context *rep;
807 	int err;
808 
809 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
810 	if (!rep)
811 		return -ENOMEM;
812 
813 	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
814 	if (!err)
815 		*sys_image_guid = rep->sys_image_guid;
816 
817 	kfree(rep);
818 	return err;
819 }
820 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
821 
mlx5_query_hca_vport_node_guid(struct mlx5_core_dev * dev,u64 * node_guid)822 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
823 				   u64 *node_guid)
824 {
825 	struct mlx5_hca_vport_context *rep;
826 	int err;
827 
828 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
829 	if (!rep)
830 		return -ENOMEM;
831 
832 	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
833 	if (!err)
834 		*node_guid = rep->node_guid;
835 
836 	kfree(rep);
837 	return err;
838 }
839 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
840 
mlx5_query_nic_vport_promisc(struct mlx5_core_dev * mdev,u32 vport,int * promisc_uc,int * promisc_mc,int * promisc_all)841 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
842 				 u32 vport,
843 				 int *promisc_uc,
844 				 int *promisc_mc,
845 				 int *promisc_all)
846 {
847 	u32 *out;
848 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
849 	int err;
850 
851 	out = kzalloc(outlen, GFP_KERNEL);
852 	if (!out)
853 		return -ENOMEM;
854 
855 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
856 	if (err)
857 		goto out;
858 
859 	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
860 			       nic_vport_context.promisc_uc);
861 	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
862 			       nic_vport_context.promisc_mc);
863 	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
864 				nic_vport_context.promisc_all);
865 
866 out:
867 	kfree(out);
868 	return err;
869 }
870 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
871 
mlx5_modify_nic_vport_promisc(struct mlx5_core_dev * mdev,int promisc_uc,int promisc_mc,int promisc_all)872 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
873 				  int promisc_uc,
874 				  int promisc_mc,
875 				  int promisc_all)
876 {
877 	void *in;
878 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
879 	int err;
880 
881 	in = kvzalloc(inlen, GFP_KERNEL);
882 	if (!in)
883 		return -ENOMEM;
884 
885 	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
886 	MLX5_SET(modify_nic_vport_context_in, in,
887 		 nic_vport_context.promisc_uc, promisc_uc);
888 	MLX5_SET(modify_nic_vport_context_in, in,
889 		 nic_vport_context.promisc_mc, promisc_mc);
890 	MLX5_SET(modify_nic_vport_context_in, in,
891 		 nic_vport_context.promisc_all, promisc_all);
892 
893 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
894 
895 	kvfree(in);
896 
897 	return err;
898 }
899 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
900 
901 enum {
902 	UC_LOCAL_LB,
903 	MC_LOCAL_LB
904 };
905 
mlx5_nic_vport_update_local_lb(struct mlx5_core_dev * mdev,bool enable)906 int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
907 {
908 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
909 	void *in;
910 	int err;
911 
912 	if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
913 	    !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
914 		return 0;
915 
916 	in = kvzalloc(inlen, GFP_KERNEL);
917 	if (!in)
918 		return -ENOMEM;
919 
920 	MLX5_SET(modify_nic_vport_context_in, in,
921 		 nic_vport_context.disable_mc_local_lb, !enable);
922 	MLX5_SET(modify_nic_vport_context_in, in,
923 		 nic_vport_context.disable_uc_local_lb, !enable);
924 
925 	if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
926 		MLX5_SET(modify_nic_vport_context_in, in,
927 			 field_select.disable_mc_local_lb, 1);
928 
929 	if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
930 		MLX5_SET(modify_nic_vport_context_in, in,
931 			 field_select.disable_uc_local_lb, 1);
932 
933 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
934 
935 	if (!err)
936 		mlx5_core_dbg(mdev, "%s local_lb\n",
937 			      enable ? "enable" : "disable");
938 
939 	kvfree(in);
940 	return err;
941 }
942 EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
943 
mlx5_nic_vport_query_local_lb(struct mlx5_core_dev * mdev,bool * status)944 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
945 {
946 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
947 	u32 *out;
948 	int value;
949 	int err;
950 
951 	out = kzalloc(outlen, GFP_KERNEL);
952 	if (!out)
953 		return -ENOMEM;
954 
955 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
956 	if (err)
957 		goto out;
958 
959 	value = MLX5_GET(query_nic_vport_context_out, out,
960 			 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
961 
962 	value |= MLX5_GET(query_nic_vport_context_out, out,
963 			  nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
964 
965 	*status = !value;
966 
967 out:
968 	kfree(out);
969 	return err;
970 }
971 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
972 
973 enum mlx5_vport_roce_state {
974 	MLX5_VPORT_ROCE_DISABLED = 0,
975 	MLX5_VPORT_ROCE_ENABLED  = 1,
976 };
977 
mlx5_nic_vport_update_roce_state(struct mlx5_core_dev * mdev,enum mlx5_vport_roce_state state)978 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
979 					    enum mlx5_vport_roce_state state)
980 {
981 	void *in;
982 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
983 	int err;
984 
985 	in = kvzalloc(inlen, GFP_KERNEL);
986 	if (!in)
987 		return -ENOMEM;
988 
989 	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
990 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
991 		 state);
992 
993 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
994 
995 	kvfree(in);
996 
997 	return err;
998 }
999 
mlx5_nic_vport_enable_roce(struct mlx5_core_dev * mdev)1000 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
1001 {
1002 	int err = 0;
1003 
1004 	mutex_lock(&mlx5_roce_en_lock);
1005 	if (!mdev->roce.roce_en)
1006 		err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
1007 
1008 	if (!err)
1009 		mdev->roce.roce_en++;
1010 	mutex_unlock(&mlx5_roce_en_lock);
1011 
1012 	return err;
1013 }
1014 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
1015 
mlx5_nic_vport_disable_roce(struct mlx5_core_dev * mdev)1016 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
1017 {
1018 	int err = 0;
1019 
1020 	mutex_lock(&mlx5_roce_en_lock);
1021 	if (mdev->roce.roce_en) {
1022 		mdev->roce.roce_en--;
1023 		if (mdev->roce.roce_en == 0)
1024 			err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
1025 
1026 		if (err)
1027 			mdev->roce.roce_en++;
1028 	}
1029 	mutex_unlock(&mlx5_roce_en_lock);
1030 	return err;
1031 }
1032 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
1033 
mlx5_core_query_vport_counter(struct mlx5_core_dev * dev,u8 other_vport,int vf,u8 port_num,void * out,size_t out_sz)1034 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
1035 				  int vf, u8 port_num, void *out,
1036 				  size_t out_sz)
1037 {
1038 	int	in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1039 	int	is_group_manager;
1040 	void   *in;
1041 	int	err;
1042 
1043 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1044 	in = kvzalloc(in_sz, GFP_KERNEL);
1045 	if (!in) {
1046 		err = -ENOMEM;
1047 		return err;
1048 	}
1049 
1050 	MLX5_SET(query_vport_counter_in, in, opcode,
1051 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1052 	if (other_vport) {
1053 		if (is_group_manager) {
1054 			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1055 			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
1056 		} else {
1057 			err = -EPERM;
1058 			goto free;
1059 		}
1060 	}
1061 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1062 		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1063 
1064 	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_sz);
1065 free:
1066 	kvfree(in);
1067 	return err;
1068 }
1069 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1070 
mlx5_core_modify_hca_vport_context(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,int vf,struct mlx5_hca_vport_context * req)1071 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
1072 				       u8 other_vport, u8 port_num,
1073 				       int vf,
1074 				       struct mlx5_hca_vport_context *req)
1075 {
1076 	int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
1077 	u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
1078 	int is_group_manager;
1079 	void *in;
1080 	int err;
1081 	void *ctx;
1082 
1083 	mlx5_core_dbg(dev, "vf %d\n", vf);
1084 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1085 	in = kzalloc(in_sz, GFP_KERNEL);
1086 	if (!in)
1087 		return -ENOMEM;
1088 
1089 	memset(out, 0, sizeof(out));
1090 	MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
1091 	if (other_vport) {
1092 		if (is_group_manager) {
1093 			MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
1094 			MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
1095 		} else {
1096 			err = -EPERM;
1097 			goto ex;
1098 		}
1099 	}
1100 
1101 	if (MLX5_CAP_GEN(dev, num_ports) > 1)
1102 		MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1103 
1104 	ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1105 	MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1106 	MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
1107 	MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
1108 	MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
1109 	MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
1110 	MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
1111 	MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
1112 	MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1113 	MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1114 	MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1115 	MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
1116 	MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
1117 	MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
1118 	MLX5_SET(hca_vport_context, ctx, lid, req->lid);
1119 	MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
1120 	MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
1121 	MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
1122 	MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
1123 	MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
1124 	MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
1125 	MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
1126 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
1127 ex:
1128 	kfree(in);
1129 	return err;
1130 }
1131 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
1132