• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/eswitch.h>
35 #include <linux/mlx5/mlx5_ifc_vdpa.h>
36 #include "mlx5_core.h"
37 
38 /* intf dev list mutex */
39 static DEFINE_MUTEX(mlx5_intf_mutex);
40 static DEFINE_IDA(mlx5_adev_ida);
41 
is_eth_rep_supported(struct mlx5_core_dev * dev)42 static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
43 {
44 	if (!IS_ENABLED(CONFIG_MLX5_ESWITCH))
45 		return false;
46 
47 	if (!MLX5_ESWITCH_MANAGER(dev))
48 		return false;
49 
50 	if (!is_mdev_switchdev_mode(dev))
51 		return false;
52 
53 	return true;
54 }
55 
mlx5_eth_supported(struct mlx5_core_dev * dev)56 bool mlx5_eth_supported(struct mlx5_core_dev *dev)
57 {
58 	if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
59 		return false;
60 
61 	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
62 		return false;
63 
64 	if (!MLX5_CAP_GEN(dev, eth_net_offloads)) {
65 		mlx5_core_warn(dev, "Missing eth_net_offloads capability\n");
66 		return false;
67 	}
68 
69 	if (!MLX5_CAP_GEN(dev, nic_flow_table)) {
70 		mlx5_core_warn(dev, "Missing nic_flow_table capability\n");
71 		return false;
72 	}
73 
74 	if (!MLX5_CAP_ETH(dev, csum_cap)) {
75 		mlx5_core_warn(dev, "Missing csum_cap capability\n");
76 		return false;
77 	}
78 
79 	if (!MLX5_CAP_ETH(dev, max_lso_cap)) {
80 		mlx5_core_warn(dev, "Missing max_lso_cap capability\n");
81 		return false;
82 	}
83 
84 	if (!MLX5_CAP_ETH(dev, vlan_cap)) {
85 		mlx5_core_warn(dev, "Missing vlan_cap capability\n");
86 		return false;
87 	}
88 
89 	if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) {
90 		mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n");
91 		return false;
92 	}
93 
94 	if (MLX5_CAP_FLOWTABLE(dev,
95 			       flow_table_properties_nic_receive.max_ft_level) < 3) {
96 		mlx5_core_warn(dev, "max_ft_level < 3\n");
97 		return false;
98 	}
99 
100 	if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable))
101 		mlx5_core_warn(dev, "Self loop back prevention is not supported\n");
102 	if (!MLX5_CAP_GEN(dev, cq_moderation))
103 		mlx5_core_warn(dev, "CQ moderation is not supported\n");
104 
105 	return true;
106 }
107 
is_eth_enabled(struct mlx5_core_dev * dev)108 static bool is_eth_enabled(struct mlx5_core_dev *dev)
109 {
110 	union devlink_param_value val;
111 	int err;
112 
113 	err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
114 						 DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
115 						 &val);
116 	return err ? false : val.vbool;
117 }
118 
mlx5_vnet_supported(struct mlx5_core_dev * dev)119 bool mlx5_vnet_supported(struct mlx5_core_dev *dev)
120 {
121 	if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
122 		return false;
123 
124 	if (mlx5_core_is_pf(dev))
125 		return false;
126 
127 	if (!(MLX5_CAP_GEN_64(dev, general_obj_types) &
128 	      MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
129 		return false;
130 
131 	if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) &
132 	      MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
133 		return false;
134 
135 	if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type))
136 		return false;
137 
138 	return true;
139 }
140 
is_vnet_enabled(struct mlx5_core_dev * dev)141 static bool is_vnet_enabled(struct mlx5_core_dev *dev)
142 {
143 	union devlink_param_value val;
144 	int err;
145 
146 	err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
147 						 DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
148 						 &val);
149 	return err ? false : val.vbool;
150 }
151 
is_ib_rep_supported(struct mlx5_core_dev * dev)152 static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
153 {
154 	if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
155 		return false;
156 
157 	if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
158 		return false;
159 
160 	if (!is_eth_rep_supported(dev))
161 		return false;
162 
163 	if (!MLX5_ESWITCH_MANAGER(dev))
164 		return false;
165 
166 	if (!is_mdev_switchdev_mode(dev))
167 		return false;
168 
169 	if (mlx5_core_mp_enabled(dev))
170 		return false;
171 
172 	return true;
173 }
174 
is_mp_supported(struct mlx5_core_dev * dev)175 static bool is_mp_supported(struct mlx5_core_dev *dev)
176 {
177 	if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
178 		return false;
179 
180 	if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
181 		return false;
182 
183 	if (is_ib_rep_supported(dev))
184 		return false;
185 
186 	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
187 		return false;
188 
189 	if (!mlx5_core_is_mp_slave(dev))
190 		return false;
191 
192 	return true;
193 }
194 
mlx5_rdma_supported(struct mlx5_core_dev * dev)195 bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
196 {
197 	if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
198 		return false;
199 
200 	if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
201 		return false;
202 
203 	if (is_ib_rep_supported(dev))
204 		return false;
205 
206 	if (is_mp_supported(dev))
207 		return false;
208 
209 	return true;
210 }
211 
is_ib_enabled(struct mlx5_core_dev * dev)212 static bool is_ib_enabled(struct mlx5_core_dev *dev)
213 {
214 	union devlink_param_value val;
215 	int err;
216 
217 	err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
218 						 DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
219 						 &val);
220 	return err ? false : val.vbool;
221 }
222 
223 enum {
224 	MLX5_INTERFACE_PROTOCOL_ETH,
225 	MLX5_INTERFACE_PROTOCOL_ETH_REP,
226 
227 	MLX5_INTERFACE_PROTOCOL_IB,
228 	MLX5_INTERFACE_PROTOCOL_IB_REP,
229 	MLX5_INTERFACE_PROTOCOL_MPIB,
230 
231 	MLX5_INTERFACE_PROTOCOL_VNET,
232 };
233 
234 static const struct mlx5_adev_device {
235 	const char *suffix;
236 	bool (*is_supported)(struct mlx5_core_dev *dev);
237 	bool (*is_enabled)(struct mlx5_core_dev *dev);
238 } mlx5_adev_devices[] = {
239 	[MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet",
240 					   .is_supported = &mlx5_vnet_supported,
241 					   .is_enabled = &is_vnet_enabled },
242 	[MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma",
243 					 .is_supported = &mlx5_rdma_supported,
244 					 .is_enabled = &is_ib_enabled },
245 	[MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
246 					  .is_supported = &mlx5_eth_supported,
247 					  .is_enabled = &is_eth_enabled },
248 	[MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
249 					   .is_supported = &is_eth_rep_supported },
250 	[MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep",
251 					   .is_supported = &is_ib_rep_supported },
252 	[MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
253 					   .is_supported = &is_mp_supported },
254 };
255 
mlx5_adev_idx_alloc(void)256 int mlx5_adev_idx_alloc(void)
257 {
258 	return ida_alloc(&mlx5_adev_ida, GFP_KERNEL);
259 }
260 
mlx5_adev_idx_free(int idx)261 void mlx5_adev_idx_free(int idx)
262 {
263 	ida_free(&mlx5_adev_ida, idx);
264 }
265 
mlx5_adev_init(struct mlx5_core_dev * dev)266 int mlx5_adev_init(struct mlx5_core_dev *dev)
267 {
268 	struct mlx5_priv *priv = &dev->priv;
269 
270 	priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices),
271 			     sizeof(struct mlx5_adev *), GFP_KERNEL);
272 	if (!priv->adev)
273 		return -ENOMEM;
274 
275 	return 0;
276 }
277 
mlx5_adev_cleanup(struct mlx5_core_dev * dev)278 void mlx5_adev_cleanup(struct mlx5_core_dev *dev)
279 {
280 	struct mlx5_priv *priv = &dev->priv;
281 
282 	kfree(priv->adev);
283 }
284 
adev_release(struct device * dev)285 static void adev_release(struct device *dev)
286 {
287 	struct mlx5_adev *mlx5_adev =
288 		container_of(dev, struct mlx5_adev, adev.dev);
289 	struct mlx5_priv *priv = &mlx5_adev->mdev->priv;
290 	int idx = mlx5_adev->idx;
291 
292 	kfree(mlx5_adev);
293 	priv->adev[idx] = NULL;
294 }
295 
add_adev(struct mlx5_core_dev * dev,int idx)296 static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx)
297 {
298 	const char *suffix = mlx5_adev_devices[idx].suffix;
299 	struct auxiliary_device *adev;
300 	struct mlx5_adev *madev;
301 	int ret;
302 
303 	madev = kzalloc(sizeof(*madev), GFP_KERNEL);
304 	if (!madev)
305 		return ERR_PTR(-ENOMEM);
306 
307 	adev = &madev->adev;
308 	adev->id = dev->priv.adev_idx;
309 	adev->name = suffix;
310 	adev->dev.parent = dev->device;
311 	adev->dev.release = adev_release;
312 	madev->mdev = dev;
313 	madev->idx = idx;
314 
315 	ret = auxiliary_device_init(adev);
316 	if (ret) {
317 		kfree(madev);
318 		return ERR_PTR(ret);
319 	}
320 
321 	ret = auxiliary_device_add(adev);
322 	if (ret) {
323 		auxiliary_device_uninit(adev);
324 		return ERR_PTR(ret);
325 	}
326 	return madev;
327 }
328 
del_adev(struct auxiliary_device * adev)329 static void del_adev(struct auxiliary_device *adev)
330 {
331 	auxiliary_device_delete(adev);
332 	auxiliary_device_uninit(adev);
333 }
334 
mlx5_attach_device(struct mlx5_core_dev * dev)335 int mlx5_attach_device(struct mlx5_core_dev *dev)
336 {
337 	struct mlx5_priv *priv = &dev->priv;
338 	struct auxiliary_device *adev;
339 	struct auxiliary_driver *adrv;
340 	int ret = 0, i;
341 
342 	mutex_lock(&mlx5_intf_mutex);
343 	priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
344 	for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
345 		if (!priv->adev[i]) {
346 			bool is_supported = false;
347 
348 			if (mlx5_adev_devices[i].is_enabled) {
349 				bool enabled;
350 
351 				enabled = mlx5_adev_devices[i].is_enabled(dev);
352 				if (!enabled)
353 					continue;
354 			}
355 
356 			if (mlx5_adev_devices[i].is_supported)
357 				is_supported = mlx5_adev_devices[i].is_supported(dev);
358 
359 			if (!is_supported)
360 				continue;
361 
362 			priv->adev[i] = add_adev(dev, i);
363 			if (IS_ERR(priv->adev[i])) {
364 				ret = PTR_ERR(priv->adev[i]);
365 				priv->adev[i] = NULL;
366 			}
367 		} else {
368 			adev = &priv->adev[i]->adev;
369 
370 			/* Pay attention that this is not PCI driver that
371 			 * mlx5_core_dev is connected, but auxiliary driver.
372 			 *
373 			 * Here we can race of module unload with devlink
374 			 * reload, but we don't need to take extra lock because
375 			 * we are holding global mlx5_intf_mutex.
376 			 */
377 			if (!adev->dev.driver)
378 				continue;
379 			adrv = to_auxiliary_drv(adev->dev.driver);
380 
381 			if (adrv->resume)
382 				ret = adrv->resume(adev);
383 		}
384 		if (ret) {
385 			mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
386 				       i, mlx5_adev_devices[i].suffix);
387 
388 			break;
389 		}
390 	}
391 	mutex_unlock(&mlx5_intf_mutex);
392 	return ret;
393 }
394 
mlx5_detach_device(struct mlx5_core_dev * dev)395 void mlx5_detach_device(struct mlx5_core_dev *dev)
396 {
397 	struct mlx5_priv *priv = &dev->priv;
398 	struct auxiliary_device *adev;
399 	struct auxiliary_driver *adrv;
400 	pm_message_t pm = {};
401 	int i;
402 
403 	mutex_lock(&mlx5_intf_mutex);
404 	for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
405 		if (!priv->adev[i])
406 			continue;
407 
408 		if (mlx5_adev_devices[i].is_enabled) {
409 			bool enabled;
410 
411 			enabled = mlx5_adev_devices[i].is_enabled(dev);
412 			if (!enabled)
413 				goto skip_suspend;
414 		}
415 
416 		adev = &priv->adev[i]->adev;
417 		/* Auxiliary driver was unbind manually through sysfs */
418 		if (!adev->dev.driver)
419 			goto skip_suspend;
420 
421 		adrv = to_auxiliary_drv(adev->dev.driver);
422 
423 		if (adrv->suspend) {
424 			adrv->suspend(adev, pm);
425 			continue;
426 		}
427 
428 skip_suspend:
429 		del_adev(&priv->adev[i]->adev);
430 		priv->adev[i] = NULL;
431 	}
432 	priv->flags |= MLX5_PRIV_FLAGS_DETACH;
433 	mutex_unlock(&mlx5_intf_mutex);
434 }
435 
mlx5_register_device(struct mlx5_core_dev * dev)436 int mlx5_register_device(struct mlx5_core_dev *dev)
437 {
438 	int ret;
439 
440 	mutex_lock(&mlx5_intf_mutex);
441 	dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
442 	ret = mlx5_rescan_drivers_locked(dev);
443 	mutex_unlock(&mlx5_intf_mutex);
444 	if (ret)
445 		mlx5_unregister_device(dev);
446 
447 	return ret;
448 }
449 
mlx5_unregister_device(struct mlx5_core_dev * dev)450 void mlx5_unregister_device(struct mlx5_core_dev *dev)
451 {
452 	mutex_lock(&mlx5_intf_mutex);
453 	dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
454 	mlx5_rescan_drivers_locked(dev);
455 	mutex_unlock(&mlx5_intf_mutex);
456 }
457 
add_drivers(struct mlx5_core_dev * dev)458 static int add_drivers(struct mlx5_core_dev *dev)
459 {
460 	struct mlx5_priv *priv = &dev->priv;
461 	int i, ret = 0;
462 
463 	for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
464 		bool is_supported = false;
465 
466 		if (priv->adev[i])
467 			continue;
468 
469 		if (mlx5_adev_devices[i].is_supported)
470 			is_supported = mlx5_adev_devices[i].is_supported(dev);
471 
472 		if (!is_supported)
473 			continue;
474 
475 		priv->adev[i] = add_adev(dev, i);
476 		if (IS_ERR(priv->adev[i])) {
477 			mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
478 				       i, mlx5_adev_devices[i].suffix);
479 			/* We continue to rescan drivers and leave to the caller
480 			 * to make decision if to release everything or continue.
481 			 */
482 			ret = PTR_ERR(priv->adev[i]);
483 			priv->adev[i] = NULL;
484 		}
485 	}
486 	return ret;
487 }
488 
delete_drivers(struct mlx5_core_dev * dev)489 static void delete_drivers(struct mlx5_core_dev *dev)
490 {
491 	struct mlx5_priv *priv = &dev->priv;
492 	bool delete_all;
493 	int i;
494 
495 	delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
496 
497 	for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
498 		bool is_supported = false;
499 
500 		if (!priv->adev[i])
501 			continue;
502 
503 		if (mlx5_adev_devices[i].is_enabled) {
504 			bool enabled;
505 
506 			enabled = mlx5_adev_devices[i].is_enabled(dev);
507 			if (!enabled)
508 				goto del_adev;
509 		}
510 
511 		if (mlx5_adev_devices[i].is_supported && !delete_all)
512 			is_supported = mlx5_adev_devices[i].is_supported(dev);
513 
514 		if (is_supported)
515 			continue;
516 
517 del_adev:
518 		del_adev(&priv->adev[i]->adev);
519 		priv->adev[i] = NULL;
520 	}
521 }
522 
523 /* This function is used after mlx5_core_dev is reconfigured.
524  */
mlx5_rescan_drivers_locked(struct mlx5_core_dev * dev)525 int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
526 {
527 	struct mlx5_priv *priv = &dev->priv;
528 
529 	lockdep_assert_held(&mlx5_intf_mutex);
530 	if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
531 		return 0;
532 
533 	delete_drivers(dev);
534 	if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
535 		return 0;
536 
537 	return add_drivers(dev);
538 }
539 
mlx5_gen_pci_id(const struct mlx5_core_dev * dev)540 static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
541 {
542 	return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
543 		     (dev->pdev->bus->number << 8) |
544 		     PCI_SLOT(dev->pdev->devfn));
545 }
546 
_next_phys_dev(struct mlx5_core_dev * mdev,const struct mlx5_core_dev * curr)547 static int _next_phys_dev(struct mlx5_core_dev *mdev,
548 			  const struct mlx5_core_dev *curr)
549 {
550 	if (!mlx5_core_is_pf(mdev))
551 		return 0;
552 
553 	if (mdev == curr)
554 		return 0;
555 
556 	if (mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
557 		return 0;
558 
559 	return 1;
560 }
561 
pci_get_other_drvdata(struct device * this,struct device * other)562 static void *pci_get_other_drvdata(struct device *this, struct device *other)
563 {
564 	if (this->driver != other->driver)
565 		return NULL;
566 
567 	return pci_get_drvdata(to_pci_dev(other));
568 }
569 
next_phys_dev(struct device * dev,const void * data)570 static int next_phys_dev(struct device *dev, const void *data)
571 {
572 	struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
573 
574 	mdev = pci_get_other_drvdata(this->device, dev);
575 	if (!mdev)
576 		return 0;
577 
578 	return _next_phys_dev(mdev, data);
579 }
580 
next_phys_dev_lag(struct device * dev,const void * data)581 static int next_phys_dev_lag(struct device *dev, const void *data)
582 {
583 	struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
584 
585 	mdev = pci_get_other_drvdata(this->device, dev);
586 	if (!mdev)
587 		return 0;
588 
589 	if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
590 	    !MLX5_CAP_GEN(mdev, lag_master) ||
591 	    MLX5_CAP_GEN(mdev, num_lag_ports) != MLX5_MAX_PORTS)
592 		return 0;
593 
594 	return _next_phys_dev(mdev, data);
595 }
596 
mlx5_get_next_dev(struct mlx5_core_dev * dev,int (* match)(struct device * dev,const void * data))597 static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
598 					       int (*match)(struct device *dev, const void *data))
599 {
600 	struct device *next;
601 
602 	if (!mlx5_core_is_pf(dev))
603 		return NULL;
604 
605 	next = bus_find_device(&pci_bus_type, NULL, dev, match);
606 	if (!next)
607 		return NULL;
608 
609 	put_device(next);
610 	return pci_get_drvdata(to_pci_dev(next));
611 }
612 
613 /* Must be called with intf_mutex held */
mlx5_get_next_phys_dev(struct mlx5_core_dev * dev)614 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
615 {
616 	lockdep_assert_held(&mlx5_intf_mutex);
617 	return mlx5_get_next_dev(dev, &next_phys_dev);
618 }
619 
620 /* Must be called with intf_mutex held */
mlx5_get_next_phys_dev_lag(struct mlx5_core_dev * dev)621 struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
622 {
623 	lockdep_assert_held(&mlx5_intf_mutex);
624 	return mlx5_get_next_dev(dev, &next_phys_dev_lag);
625 }
626 
mlx5_dev_list_lock(void)627 void mlx5_dev_list_lock(void)
628 {
629 	mutex_lock(&mlx5_intf_mutex);
630 }
mlx5_dev_list_unlock(void)631 void mlx5_dev_list_unlock(void)
632 {
633 	mutex_unlock(&mlx5_intf_mutex);
634 }
635 
mlx5_dev_list_trylock(void)636 int mlx5_dev_list_trylock(void)
637 {
638 	return mutex_trylock(&mlx5_intf_mutex);
639 }
640