• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/eswitch.h>
38 #include "mlx5_core.h"
39 
40 /* Mutex to hold while enabling or disabling RoCE */
41 static DEFINE_MUTEX(mlx5_roce_en_lock);
42 
mlx5_query_vport_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport)43 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
44 {
45 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
46 	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
47 	int err;
48 
49 	MLX5_SET(query_vport_state_in, in, opcode,
50 		 MLX5_CMD_OP_QUERY_VPORT_STATE);
51 	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
52 	MLX5_SET(query_vport_state_in, in, vport_number, vport);
53 	if (vport)
54 		MLX5_SET(query_vport_state_in, in, other_vport, 1);
55 
56 	err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
57 	if (err)
58 		return 0;
59 
60 	return MLX5_GET(query_vport_state_out, out, state);
61 }
62 
mlx5_modify_vport_admin_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport,u8 other_vport,u8 state)63 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
64 				  u16 vport, u8 other_vport, u8 state)
65 {
66 	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
67 
68 	MLX5_SET(modify_vport_state_in, in, opcode,
69 		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
70 	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
71 	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
72 	MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
73 	MLX5_SET(modify_vport_state_in, in, admin_state, state);
74 
75 	return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
76 }
77 
mlx5_query_nic_vport_context(struct mlx5_core_dev * mdev,u16 vport,u32 * out)78 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
79 					u32 *out)
80 {
81 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
82 
83 	MLX5_SET(query_nic_vport_context_in, in, opcode,
84 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
85 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
86 	if (vport)
87 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
88 
89 	return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
90 }
91 
mlx5_query_nic_vport_min_inline(struct mlx5_core_dev * mdev,u16 vport,u8 * min_inline)92 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
93 				    u16 vport, u8 *min_inline)
94 {
95 	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
96 	int err;
97 
98 	err = mlx5_query_nic_vport_context(mdev, vport, out);
99 	if (!err)
100 		*min_inline = MLX5_GET(query_nic_vport_context_out, out,
101 				       nic_vport_context.min_wqe_inline_mode);
102 	return err;
103 }
104 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
105 
mlx5_query_min_inline(struct mlx5_core_dev * mdev,u8 * min_inline_mode)106 void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
107 			   u8 *min_inline_mode)
108 {
109 	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
110 	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
111 		if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
112 			break;
113 		fallthrough;
114 	case MLX5_CAP_INLINE_MODE_L2:
115 		*min_inline_mode = MLX5_INLINE_MODE_L2;
116 		break;
117 	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
118 		*min_inline_mode = MLX5_INLINE_MODE_NONE;
119 		break;
120 	}
121 }
122 EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
123 
mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev * mdev,u16 vport,u8 min_inline)124 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
125 				     u16 vport, u8 min_inline)
126 {
127 	u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
128 	void *nic_vport_ctx;
129 
130 	MLX5_SET(modify_nic_vport_context_in, in,
131 		 field_select.min_inline, 1);
132 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
133 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
134 
135 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
136 				     in, nic_vport_context);
137 	MLX5_SET(nic_vport_context, nic_vport_ctx,
138 		 min_wqe_inline_mode, min_inline);
139 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
140 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
141 
142 	return mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
143 }
144 
mlx5_query_nic_vport_mac_address(struct mlx5_core_dev * mdev,u16 vport,bool other,u8 * addr)145 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
146 				     u16 vport, bool other, u8 *addr)
147 {
148 	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
149 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
150 	u8 *out_addr;
151 	int err;
152 
153 	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
154 				nic_vport_context.permanent_address);
155 
156 	MLX5_SET(query_nic_vport_context_in, in, opcode,
157 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
158 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
159 	MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
160 
161 	err = mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
162 	if (!err)
163 		ether_addr_copy(addr, &out_addr[2]);
164 
165 	return err;
166 }
167 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
168 
mlx5_query_mac_address(struct mlx5_core_dev * mdev,u8 * addr)169 int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
170 {
171 	return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr);
172 }
173 EXPORT_SYMBOL_GPL(mlx5_query_mac_address);
174 
mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev * mdev,u16 vport,const u8 * addr)175 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
176 				      u16 vport, const u8 *addr)
177 {
178 	void *in;
179 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
180 	int err;
181 	void *nic_vport_ctx;
182 	u8 *perm_mac;
183 
184 	in = kvzalloc(inlen, GFP_KERNEL);
185 	if (!in)
186 		return -ENOMEM;
187 
188 	MLX5_SET(modify_nic_vport_context_in, in,
189 		 field_select.permanent_address, 1);
190 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
191 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
192 
193 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
194 				     in, nic_vport_context);
195 	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
196 				permanent_address);
197 
198 	ether_addr_copy(&perm_mac[2], addr);
199 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
200 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
201 
202 	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
203 
204 	kvfree(in);
205 
206 	return err;
207 }
208 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
209 
mlx5_query_nic_vport_mtu(struct mlx5_core_dev * mdev,u16 * mtu)210 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
211 {
212 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
213 	u32 *out;
214 	int err;
215 
216 	out = kvzalloc(outlen, GFP_KERNEL);
217 	if (!out)
218 		return -ENOMEM;
219 
220 	err = mlx5_query_nic_vport_context(mdev, 0, out);
221 	if (!err)
222 		*mtu = MLX5_GET(query_nic_vport_context_out, out,
223 				nic_vport_context.mtu);
224 
225 	kvfree(out);
226 	return err;
227 }
228 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
229 
mlx5_modify_nic_vport_mtu(struct mlx5_core_dev * mdev,u16 mtu)230 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
231 {
232 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
233 	void *in;
234 	int err;
235 
236 	in = kvzalloc(inlen, GFP_KERNEL);
237 	if (!in)
238 		return -ENOMEM;
239 
240 	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
241 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
242 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
243 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
244 
245 	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
246 
247 	kvfree(in);
248 	return err;
249 }
250 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
251 
mlx5_query_nic_vport_mac_list(struct mlx5_core_dev * dev,u16 vport,enum mlx5_list_type list_type,u8 addr_list[][ETH_ALEN],int * list_size)252 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
253 				  u16 vport,
254 				  enum mlx5_list_type list_type,
255 				  u8 addr_list[][ETH_ALEN],
256 				  int *list_size)
257 {
258 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
259 	void *nic_vport_ctx;
260 	int max_list_size;
261 	int req_list_size;
262 	int out_sz;
263 	void *out;
264 	int err;
265 	int i;
266 
267 	req_list_size = *list_size;
268 
269 	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
270 		1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
271 		1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
272 
273 	if (req_list_size > max_list_size) {
274 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
275 			       req_list_size, max_list_size);
276 		req_list_size = max_list_size;
277 	}
278 
279 	out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
280 			req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
281 
282 	out = kzalloc(out_sz, GFP_KERNEL);
283 	if (!out)
284 		return -ENOMEM;
285 
286 	MLX5_SET(query_nic_vport_context_in, in, opcode,
287 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
288 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
289 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
290 	MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
291 
292 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
293 	if (err)
294 		goto out;
295 
296 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
297 				     nic_vport_context);
298 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
299 				 allowed_list_size);
300 
301 	*list_size = req_list_size;
302 	for (i = 0; i < req_list_size; i++) {
303 		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
304 					nic_vport_ctx,
305 					current_uc_mac_address[i]) + 2;
306 		ether_addr_copy(addr_list[i], mac_addr);
307 	}
308 out:
309 	kfree(out);
310 	return err;
311 }
312 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
313 
mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev * dev,enum mlx5_list_type list_type,u8 addr_list[][ETH_ALEN],int list_size)314 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
315 				   enum mlx5_list_type list_type,
316 				   u8 addr_list[][ETH_ALEN],
317 				   int list_size)
318 {
319 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {};
320 	void *nic_vport_ctx;
321 	int max_list_size;
322 	int in_sz;
323 	void *in;
324 	int err;
325 	int i;
326 
327 	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
328 		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
329 		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
330 
331 	if (list_size > max_list_size)
332 		return -ENOSPC;
333 
334 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
335 		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
336 
337 	in = kzalloc(in_sz, GFP_KERNEL);
338 	if (!in)
339 		return -ENOMEM;
340 
341 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
342 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
343 	MLX5_SET(modify_nic_vport_context_in, in,
344 		 field_select.addresses_list, 1);
345 
346 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
347 				     nic_vport_context);
348 
349 	MLX5_SET(nic_vport_context, nic_vport_ctx,
350 		 allowed_list_type, list_type);
351 	MLX5_SET(nic_vport_context, nic_vport_ctx,
352 		 allowed_list_size, list_size);
353 
354 	for (i = 0; i < list_size; i++) {
355 		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
356 					    nic_vport_ctx,
357 					    current_uc_mac_address[i]) + 2;
358 		ether_addr_copy(curr_mac, addr_list[i]);
359 	}
360 
361 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
362 	kfree(in);
363 	return err;
364 }
365 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
366 
mlx5_modify_nic_vport_vlans(struct mlx5_core_dev * dev,u16 vlans[],int list_size)367 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
368 				u16 vlans[],
369 				int list_size)
370 {
371 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
372 	void *nic_vport_ctx;
373 	int max_list_size;
374 	int in_sz;
375 	void *in;
376 	int err;
377 	int i;
378 
379 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
380 
381 	if (list_size > max_list_size)
382 		return -ENOSPC;
383 
384 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
385 		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
386 
387 	memset(out, 0, sizeof(out));
388 	in = kzalloc(in_sz, GFP_KERNEL);
389 	if (!in)
390 		return -ENOMEM;
391 
392 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
393 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
394 	MLX5_SET(modify_nic_vport_context_in, in,
395 		 field_select.addresses_list, 1);
396 
397 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
398 				     nic_vport_context);
399 
400 	MLX5_SET(nic_vport_context, nic_vport_ctx,
401 		 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
402 	MLX5_SET(nic_vport_context, nic_vport_ctx,
403 		 allowed_list_size, list_size);
404 
405 	for (i = 0; i < list_size; i++) {
406 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
407 					       nic_vport_ctx,
408 					       current_uc_mac_address[i]);
409 		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
410 	}
411 
412 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
413 	kfree(in);
414 	return err;
415 }
416 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
417 
mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev * mdev,u64 * system_image_guid)418 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
419 					   u64 *system_image_guid)
420 {
421 	u32 *out;
422 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
423 
424 	out = kvzalloc(outlen, GFP_KERNEL);
425 	if (!out)
426 		return -ENOMEM;
427 
428 	mlx5_query_nic_vport_context(mdev, 0, out);
429 
430 	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
431 					nic_vport_context.system_image_guid);
432 
433 	kvfree(out);
434 
435 	return 0;
436 }
437 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
438 
mlx5_query_nic_vport_node_guid(struct mlx5_core_dev * mdev,u64 * node_guid)439 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
440 {
441 	u32 *out;
442 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
443 
444 	out = kvzalloc(outlen, GFP_KERNEL);
445 	if (!out)
446 		return -ENOMEM;
447 
448 	mlx5_query_nic_vport_context(mdev, 0, out);
449 
450 	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
451 				nic_vport_context.node_guid);
452 
453 	kvfree(out);
454 
455 	return 0;
456 }
457 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
458 
mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev * mdev,u16 vport,u64 node_guid)459 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
460 				    u16 vport, u64 node_guid)
461 {
462 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
463 	void *nic_vport_context;
464 	void *in;
465 	int err;
466 
467 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
468 		return -EACCES;
469 
470 	in = kvzalloc(inlen, GFP_KERNEL);
471 	if (!in)
472 		return -ENOMEM;
473 
474 	MLX5_SET(modify_nic_vport_context_in, in,
475 		 field_select.node_guid, 1);
476 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
477 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
478 
479 	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
480 					 in, nic_vport_context);
481 	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
482 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
483 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
484 
485 	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
486 
487 	kvfree(in);
488 
489 	return err;
490 }
491 
mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev * mdev,u16 * qkey_viol_cntr)492 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
493 					u16 *qkey_viol_cntr)
494 {
495 	u32 *out;
496 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
497 
498 	out = kvzalloc(outlen, GFP_KERNEL);
499 	if (!out)
500 		return -ENOMEM;
501 
502 	mlx5_query_nic_vport_context(mdev, 0, out);
503 
504 	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
505 				   nic_vport_context.qkey_violation_counter);
506 
507 	kvfree(out);
508 
509 	return 0;
510 }
511 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
512 
mlx5_query_hca_vport_gid(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,u16 gid_index,union ib_gid * gid)513 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
514 			     u8 port_num, u16  vf_num, u16 gid_index,
515 			     union ib_gid *gid)
516 {
517 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
518 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
519 	int is_group_manager;
520 	void *out = NULL;
521 	void *in = NULL;
522 	union ib_gid *tmp;
523 	int tbsz;
524 	int nout;
525 	int err;
526 
527 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
528 	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
529 	mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
530 		      vf_num, gid_index, tbsz);
531 
532 	if (gid_index > tbsz && gid_index != 0xffff)
533 		return -EINVAL;
534 
535 	if (gid_index == 0xffff)
536 		nout = tbsz;
537 	else
538 		nout = 1;
539 
540 	out_sz += nout * sizeof(*gid);
541 
542 	in = kzalloc(in_sz, GFP_KERNEL);
543 	out = kzalloc(out_sz, GFP_KERNEL);
544 	if (!in || !out) {
545 		err = -ENOMEM;
546 		goto out;
547 	}
548 
549 	MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
550 	if (other_vport) {
551 		if (is_group_manager) {
552 			MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
553 			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
554 		} else {
555 			err = -EPERM;
556 			goto out;
557 		}
558 	}
559 	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
560 
561 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
562 		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
563 
564 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
565 	if (err)
566 		goto out;
567 
568 	tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
569 	gid->global.subnet_prefix = tmp->global.subnet_prefix;
570 	gid->global.interface_id = tmp->global.interface_id;
571 
572 out:
573 	kfree(in);
574 	kfree(out);
575 	return err;
576 }
577 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
578 
mlx5_query_hca_vport_pkey(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,u16 pkey_index,u16 * pkey)579 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
580 			      u8 port_num, u16 vf_num, u16 pkey_index,
581 			      u16 *pkey)
582 {
583 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
584 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
585 	int is_group_manager;
586 	void *out = NULL;
587 	void *in = NULL;
588 	void *pkarr;
589 	int nout;
590 	int tbsz;
591 	int err;
592 	int i;
593 
594 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
595 
596 	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
597 	if (pkey_index > tbsz && pkey_index != 0xffff)
598 		return -EINVAL;
599 
600 	if (pkey_index == 0xffff)
601 		nout = tbsz;
602 	else
603 		nout = 1;
604 
605 	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
606 
607 	in = kzalloc(in_sz, GFP_KERNEL);
608 	out = kzalloc(out_sz, GFP_KERNEL);
609 	if (!in || !out) {
610 		err = -ENOMEM;
611 		goto out;
612 	}
613 
614 	MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
615 	if (other_vport) {
616 		if (is_group_manager) {
617 			MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
618 			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
619 		} else {
620 			err = -EPERM;
621 			goto out;
622 		}
623 	}
624 	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
625 
626 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
627 		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
628 
629 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
630 	if (err)
631 		goto out;
632 
633 	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
634 	for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
635 		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
636 
637 out:
638 	kfree(in);
639 	kfree(out);
640 	return err;
641 }
642 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
643 
mlx5_query_hca_vport_context(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,struct mlx5_hca_vport_context * rep)644 int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
645 				 u8 other_vport, u8 port_num,
646 				 u16 vf_num,
647 				 struct mlx5_hca_vport_context *rep)
648 {
649 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
650 	int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {};
651 	int is_group_manager;
652 	void *out;
653 	void *ctx;
654 	int err;
655 
656 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
657 
658 	out = kzalloc(out_sz, GFP_KERNEL);
659 	if (!out)
660 		return -ENOMEM;
661 
662 	MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
663 
664 	if (other_vport) {
665 		if (is_group_manager) {
666 			MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
667 			MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
668 		} else {
669 			err = -EPERM;
670 			goto ex;
671 		}
672 	}
673 
674 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
675 		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
676 
677 	err = mlx5_cmd_exec_inout(dev, query_hca_vport_context, in, out);
678 	if (err)
679 		goto ex;
680 
681 	ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
682 	rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
683 	rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
684 	rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
685 	rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
686 	rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
687 	rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
688 				      port_physical_state);
689 	rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
690 	rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
691 					       port_physical_state);
692 	rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
693 	rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
694 	rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
695 	rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
696 					  cap_mask1_field_select);
697 	rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
698 	rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
699 					  cap_mask2_field_select);
700 	rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
701 	rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
702 					   init_type_reply);
703 	rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
704 	rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
705 					  subnet_timeout);
706 	rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
707 	rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
708 	rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
709 						  qkey_violation_counter);
710 	rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
711 						  pkey_violation_counter);
712 	rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
713 	rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
714 					    system_image_guid);
715 
716 ex:
717 	kfree(out);
718 	return err;
719 }
720 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
721 
mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev * dev,u64 * sys_image_guid)722 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
723 					   u64 *sys_image_guid)
724 {
725 	struct mlx5_hca_vport_context *rep;
726 	int err;
727 
728 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
729 	if (!rep)
730 		return -ENOMEM;
731 
732 	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
733 	if (!err)
734 		*sys_image_guid = rep->sys_image_guid;
735 
736 	kfree(rep);
737 	return err;
738 }
739 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
740 
mlx5_query_hca_vport_node_guid(struct mlx5_core_dev * dev,u64 * node_guid)741 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
742 				   u64 *node_guid)
743 {
744 	struct mlx5_hca_vport_context *rep;
745 	int err;
746 
747 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
748 	if (!rep)
749 		return -ENOMEM;
750 
751 	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
752 	if (!err)
753 		*node_guid = rep->node_guid;
754 
755 	kfree(rep);
756 	return err;
757 }
758 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
759 
mlx5_query_nic_vport_promisc(struct mlx5_core_dev * mdev,u16 vport,int * promisc_uc,int * promisc_mc,int * promisc_all)760 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
761 				 u16 vport,
762 				 int *promisc_uc,
763 				 int *promisc_mc,
764 				 int *promisc_all)
765 {
766 	u32 *out;
767 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
768 	int err;
769 
770 	out = kzalloc(outlen, GFP_KERNEL);
771 	if (!out)
772 		return -ENOMEM;
773 
774 	err = mlx5_query_nic_vport_context(mdev, vport, out);
775 	if (err)
776 		goto out;
777 
778 	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
779 			       nic_vport_context.promisc_uc);
780 	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
781 			       nic_vport_context.promisc_mc);
782 	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
783 				nic_vport_context.promisc_all);
784 
785 out:
786 	kfree(out);
787 	return err;
788 }
789 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
790 
mlx5_modify_nic_vport_promisc(struct mlx5_core_dev * mdev,int promisc_uc,int promisc_mc,int promisc_all)791 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
792 				  int promisc_uc,
793 				  int promisc_mc,
794 				  int promisc_all)
795 {
796 	void *in;
797 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
798 	int err;
799 
800 	in = kvzalloc(inlen, GFP_KERNEL);
801 	if (!in)
802 		return -ENOMEM;
803 
804 	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
805 	MLX5_SET(modify_nic_vport_context_in, in,
806 		 nic_vport_context.promisc_uc, promisc_uc);
807 	MLX5_SET(modify_nic_vport_context_in, in,
808 		 nic_vport_context.promisc_mc, promisc_mc);
809 	MLX5_SET(modify_nic_vport_context_in, in,
810 		 nic_vport_context.promisc_all, promisc_all);
811 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
812 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
813 
814 	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
815 
816 	kvfree(in);
817 
818 	return err;
819 }
820 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
821 
822 enum {
823 	UC_LOCAL_LB,
824 	MC_LOCAL_LB
825 };
826 
mlx5_nic_vport_update_local_lb(struct mlx5_core_dev * mdev,bool enable)827 int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
828 {
829 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
830 	void *in;
831 	int err;
832 
833 	if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
834 	    !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
835 		return 0;
836 
837 	in = kvzalloc(inlen, GFP_KERNEL);
838 	if (!in)
839 		return -ENOMEM;
840 
841 	MLX5_SET(modify_nic_vport_context_in, in,
842 		 nic_vport_context.disable_mc_local_lb, !enable);
843 	MLX5_SET(modify_nic_vport_context_in, in,
844 		 nic_vport_context.disable_uc_local_lb, !enable);
845 
846 	if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
847 		MLX5_SET(modify_nic_vport_context_in, in,
848 			 field_select.disable_mc_local_lb, 1);
849 
850 	if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
851 		MLX5_SET(modify_nic_vport_context_in, in,
852 			 field_select.disable_uc_local_lb, 1);
853 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
854 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
855 
856 	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
857 
858 	if (!err)
859 		mlx5_core_dbg(mdev, "%s local_lb\n",
860 			      enable ? "enable" : "disable");
861 
862 	kvfree(in);
863 	return err;
864 }
865 EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
866 
mlx5_nic_vport_query_local_lb(struct mlx5_core_dev * mdev,bool * status)867 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
868 {
869 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
870 	u32 *out;
871 	int value;
872 	int err;
873 
874 	out = kzalloc(outlen, GFP_KERNEL);
875 	if (!out)
876 		return -ENOMEM;
877 
878 	err = mlx5_query_nic_vport_context(mdev, 0, out);
879 	if (err)
880 		goto out;
881 
882 	value = MLX5_GET(query_nic_vport_context_out, out,
883 			 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
884 
885 	value |= MLX5_GET(query_nic_vport_context_out, out,
886 			  nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
887 
888 	*status = !value;
889 
890 out:
891 	kfree(out);
892 	return err;
893 }
894 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
895 
896 enum mlx5_vport_roce_state {
897 	MLX5_VPORT_ROCE_DISABLED = 0,
898 	MLX5_VPORT_ROCE_ENABLED  = 1,
899 };
900 
mlx5_nic_vport_update_roce_state(struct mlx5_core_dev * mdev,enum mlx5_vport_roce_state state)901 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
902 					    enum mlx5_vport_roce_state state)
903 {
904 	void *in;
905 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
906 	int err;
907 
908 	in = kvzalloc(inlen, GFP_KERNEL);
909 	if (!in)
910 		return -ENOMEM;
911 
912 	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
913 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
914 		 state);
915 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
916 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
917 
918 	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
919 
920 	kvfree(in);
921 
922 	return err;
923 }
924 
mlx5_nic_vport_enable_roce(struct mlx5_core_dev * mdev)925 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
926 {
927 	int err = 0;
928 
929 	mutex_lock(&mlx5_roce_en_lock);
930 	if (!mdev->roce.roce_en)
931 		err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
932 
933 	if (!err)
934 		mdev->roce.roce_en++;
935 	mutex_unlock(&mlx5_roce_en_lock);
936 
937 	return err;
938 }
939 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
940 
mlx5_nic_vport_disable_roce(struct mlx5_core_dev * mdev)941 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
942 {
943 	int err = 0;
944 
945 	mutex_lock(&mlx5_roce_en_lock);
946 	if (mdev->roce.roce_en) {
947 		mdev->roce.roce_en--;
948 		if (mdev->roce.roce_en == 0)
949 			err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
950 
951 		if (err)
952 			mdev->roce.roce_en++;
953 	}
954 	mutex_unlock(&mlx5_roce_en_lock);
955 	return err;
956 }
957 EXPORT_SYMBOL(mlx5_nic_vport_disable_roce);
958 
mlx5_core_query_vport_counter(struct mlx5_core_dev * dev,u8 other_vport,int vf,u8 port_num,void * out)959 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
960 				  int vf, u8 port_num, void *out)
961 {
962 	int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
963 	int is_group_manager;
964 	void *in;
965 	int err;
966 
967 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
968 	in = kvzalloc(in_sz, GFP_KERNEL);
969 	if (!in) {
970 		err = -ENOMEM;
971 		return err;
972 	}
973 
974 	MLX5_SET(query_vport_counter_in, in, opcode,
975 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
976 	if (other_vport) {
977 		if (is_group_manager) {
978 			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
979 			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
980 		} else {
981 			err = -EPERM;
982 			goto free;
983 		}
984 	}
985 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
986 		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
987 
988 	err = mlx5_cmd_exec_inout(dev, query_vport_counter, in, out);
989 free:
990 	kvfree(in);
991 	return err;
992 }
993 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
994 
mlx5_query_vport_down_stats(struct mlx5_core_dev * mdev,u16 vport,u8 other_vport,u64 * rx_discard_vport_down,u64 * tx_discard_vport_down)995 int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
996 				u8 other_vport, u64 *rx_discard_vport_down,
997 				u64 *tx_discard_vport_down)
998 {
999 	u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
1000 	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
1001 	int err;
1002 
1003 	MLX5_SET(query_vnic_env_in, in, opcode,
1004 		 MLX5_CMD_OP_QUERY_VNIC_ENV);
1005 	MLX5_SET(query_vnic_env_in, in, op_mod, 0);
1006 	MLX5_SET(query_vnic_env_in, in, vport_number, vport);
1007 	MLX5_SET(query_vnic_env_in, in, other_vport, other_vport);
1008 
1009 	err = mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
1010 	if (err)
1011 		return err;
1012 
1013 	*rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1014 					    vport_env.receive_discard_vport_down);
1015 	*tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1016 					    vport_env.transmit_discard_vport_down);
1017 	return 0;
1018 }
1019 
mlx5_core_modify_hca_vport_context(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,int vf,struct mlx5_hca_vport_context * req)1020 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
1021 				       u8 other_vport, u8 port_num,
1022 				       int vf,
1023 				       struct mlx5_hca_vport_context *req)
1024 {
1025 	int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
1026 	int is_group_manager;
1027 	void *ctx;
1028 	void *in;
1029 	int err;
1030 
1031 	mlx5_core_dbg(dev, "vf %d\n", vf);
1032 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1033 	in = kzalloc(in_sz, GFP_KERNEL);
1034 	if (!in)
1035 		return -ENOMEM;
1036 
1037 	MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
1038 	if (other_vport) {
1039 		if (is_group_manager) {
1040 			MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
1041 			MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
1042 		} else {
1043 			err = -EPERM;
1044 			goto ex;
1045 		}
1046 	}
1047 
1048 	if (MLX5_CAP_GEN(dev, num_ports) > 1)
1049 		MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1050 
1051 	ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1052 	MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1053 	if (req->field_select & MLX5_HCA_VPORT_SEL_STATE_POLICY)
1054 		MLX5_SET(hca_vport_context, ctx, vport_state_policy,
1055 			 req->policy);
1056 	if (req->field_select & MLX5_HCA_VPORT_SEL_PORT_GUID)
1057 		MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1058 	if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
1059 		MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1060 	MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1061 	MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
1062 		 req->cap_mask1_perm);
1063 	err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in);
1064 ex:
1065 	kfree(in);
1066 	return err;
1067 }
1068 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
1069 
mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev * master_mdev,struct mlx5_core_dev * port_mdev)1070 int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
1071 				       struct mlx5_core_dev *port_mdev)
1072 {
1073 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1074 	void *in;
1075 	int err;
1076 
1077 	in = kvzalloc(inlen, GFP_KERNEL);
1078 	if (!in)
1079 		return -ENOMEM;
1080 
1081 	err = mlx5_nic_vport_enable_roce(port_mdev);
1082 	if (err)
1083 		goto free;
1084 
1085 	MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1086 	MLX5_SET(modify_nic_vport_context_in, in,
1087 		 nic_vport_context.affiliated_vhca_id,
1088 		 MLX5_CAP_GEN(master_mdev, vhca_id));
1089 	MLX5_SET(modify_nic_vport_context_in, in,
1090 		 nic_vport_context.affiliation_criteria,
1091 		 MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
1092 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
1093 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
1094 
1095 	err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
1096 	if (err)
1097 		mlx5_nic_vport_disable_roce(port_mdev);
1098 
1099 free:
1100 	kvfree(in);
1101 	return err;
1102 }
1103 EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
1104 
mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev * port_mdev)1105 int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
1106 {
1107 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1108 	void *in;
1109 	int err;
1110 
1111 	in = kvzalloc(inlen, GFP_KERNEL);
1112 	if (!in)
1113 		return -ENOMEM;
1114 
1115 	MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1116 	MLX5_SET(modify_nic_vport_context_in, in,
1117 		 nic_vport_context.affiliated_vhca_id, 0);
1118 	MLX5_SET(modify_nic_vport_context_in, in,
1119 		 nic_vport_context.affiliation_criteria, 0);
1120 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
1121 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
1122 
1123 	err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
1124 	if (!err)
1125 		mlx5_nic_vport_disable_roce(port_mdev);
1126 
1127 	kvfree(in);
1128 	return err;
1129 }
1130 EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
1131 
mlx5_query_nic_system_image_guid(struct mlx5_core_dev * mdev)1132 u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
1133 {
1134 	int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
1135 	u64 tmp = 0;
1136 
1137 	if (mdev->sys_image_guid)
1138 		return mdev->sys_image_guid;
1139 
1140 	if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
1141 		mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
1142 	else
1143 		mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
1144 
1145 	mdev->sys_image_guid = tmp;
1146 
1147 	return tmp;
1148 }
1149 EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
1150 
1151 /**
1152  * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
1153  *
1154  * @dev:	Pointer to core device
1155  *
1156  * mlx5_eswitch_get_total_vports returns total number of vports for
1157  * the eswitch.
1158  */
mlx5_eswitch_get_total_vports(const struct mlx5_core_dev * dev)1159 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
1160 {
1161 	return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev);
1162 }
1163 EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
1164