• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/fs.h>
34 #include <net/switchdev.h>
35 #include <net/pkt_cls.h>
36 #include <net/act_api.h>
37 #include <net/devlink.h>
38 #include <net/ipv6_stubs.h>
39 
40 #include "eswitch.h"
41 #include "en.h"
42 #include "en_rep.h"
43 #include "en/params.h"
44 #include "en/txrx.h"
45 #include "en_tc.h"
46 #include "en/rep/tc.h"
47 #include "en/rep/neigh.h"
48 #include "en/rep/bridge.h"
49 #include "en/devlink.h"
50 #include "fs_core.h"
51 #include "lib/mlx5.h"
52 #include "lib/devcom.h"
53 #include "lib/vxlan.h"
54 #define CREATE_TRACE_POINTS
55 #include "diag/en_rep_tracepoint.h"
56 #include "en_accel/ipsec.h"
57 
58 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
59 	max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
60 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
61 
62 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
63 
mlx5e_rep_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)64 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
65 				  struct ethtool_drvinfo *drvinfo)
66 {
67 	struct mlx5e_priv *priv = netdev_priv(dev);
68 	struct mlx5_core_dev *mdev = priv->mdev;
69 	int count;
70 
71 	strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
72 		sizeof(drvinfo->driver));
73 	count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
74 			 "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
75 			 fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
76 	if (count >= sizeof(drvinfo->fw_version))
77 		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
78 			 "%d.%d.%04d", fw_rev_maj(mdev),
79 			 fw_rev_min(mdev), fw_rev_sub(mdev));
80 }
81 
82 static const struct counter_desc sw_rep_stats_desc[] = {
83 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
84 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
85 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
86 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
87 };
88 
89 struct vport_stats {
90 	u64 vport_rx_packets;
91 	u64 vport_tx_packets;
92 	u64 vport_rx_bytes;
93 	u64 vport_tx_bytes;
94 };
95 
96 static const struct counter_desc vport_rep_stats_desc[] = {
97 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
98 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
99 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
100 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
101 };
102 
103 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
104 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
105 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)106 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
107 {
108 	return NUM_VPORT_REP_SW_COUNTERS;
109 }
110 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)111 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
112 {
113 	int i;
114 
115 	for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
116 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
117 		       sw_rep_stats_desc[i].format);
118 	return idx;
119 }
120 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)121 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
122 {
123 	int i;
124 
125 	for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
126 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
127 						   sw_rep_stats_desc, i);
128 	return idx;
129 }
130 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)131 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
132 {
133 	struct mlx5e_sw_stats *s = &priv->stats.sw;
134 	struct rtnl_link_stats64 stats64 = {};
135 
136 	memset(s, 0, sizeof(*s));
137 	mlx5e_fold_sw_stats64(priv, &stats64);
138 
139 	s->rx_packets = stats64.rx_packets;
140 	s->rx_bytes   = stats64.rx_bytes;
141 	s->tx_packets = stats64.tx_packets;
142 	s->tx_bytes   = stats64.tx_bytes;
143 	s->tx_queue_dropped = stats64.tx_dropped;
144 }
145 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)146 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
147 {
148 	return NUM_VPORT_REP_HW_COUNTERS;
149 }
150 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)151 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
152 {
153 	int i;
154 
155 	for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
156 		strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
157 	return idx;
158 }
159 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)160 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
161 {
162 	int i;
163 
164 	for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
165 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
166 						   vport_rep_stats_desc, i);
167 	return idx;
168 }
169 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)170 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
171 {
172 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
173 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
174 	struct mlx5_eswitch_rep *rep = rpriv->rep;
175 	struct rtnl_link_stats64 *vport_stats;
176 	struct ifla_vf_stats vf_stats;
177 	int err;
178 
179 	err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
180 	if (err) {
181 		netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
182 			    rep->vport, err);
183 		return;
184 	}
185 
186 	vport_stats = &priv->stats.vf_vport;
187 	/* flip tx/rx as we are reporting the counters for the switch vport */
188 	vport_stats->rx_packets = vf_stats.tx_packets;
189 	vport_stats->rx_bytes   = vf_stats.tx_bytes;
190 	vport_stats->tx_packets = vf_stats.rx_packets;
191 	vport_stats->tx_bytes   = vf_stats.rx_bytes;
192 }
193 
mlx5e_rep_get_strings(struct net_device * dev,u32 stringset,uint8_t * data)194 static void mlx5e_rep_get_strings(struct net_device *dev,
195 				  u32 stringset, uint8_t *data)
196 {
197 	struct mlx5e_priv *priv = netdev_priv(dev);
198 
199 	switch (stringset) {
200 	case ETH_SS_STATS:
201 		mlx5e_stats_fill_strings(priv, data);
202 		break;
203 	}
204 }
205 
mlx5e_rep_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)206 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
207 					struct ethtool_stats *stats, u64 *data)
208 {
209 	struct mlx5e_priv *priv = netdev_priv(dev);
210 
211 	mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
212 }
213 
mlx5e_rep_get_sset_count(struct net_device * dev,int sset)214 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
215 {
216 	struct mlx5e_priv *priv = netdev_priv(dev);
217 
218 	switch (sset) {
219 	case ETH_SS_STATS:
220 		return mlx5e_stats_total_num(priv);
221 	default:
222 		return -EOPNOTSUPP;
223 	}
224 }
225 
mlx5e_rep_get_ringparam(struct net_device * dev,struct ethtool_ringparam * param)226 static void mlx5e_rep_get_ringparam(struct net_device *dev,
227 				struct ethtool_ringparam *param)
228 {
229 	struct mlx5e_priv *priv = netdev_priv(dev);
230 
231 	mlx5e_ethtool_get_ringparam(priv, param);
232 }
233 
mlx5e_rep_set_ringparam(struct net_device * dev,struct ethtool_ringparam * param)234 static int mlx5e_rep_set_ringparam(struct net_device *dev,
235 			       struct ethtool_ringparam *param)
236 {
237 	struct mlx5e_priv *priv = netdev_priv(dev);
238 
239 	return mlx5e_ethtool_set_ringparam(priv, param);
240 }
241 
mlx5e_rep_get_channels(struct net_device * dev,struct ethtool_channels * ch)242 static void mlx5e_rep_get_channels(struct net_device *dev,
243 				   struct ethtool_channels *ch)
244 {
245 	struct mlx5e_priv *priv = netdev_priv(dev);
246 
247 	mlx5e_ethtool_get_channels(priv, ch);
248 }
249 
mlx5e_rep_set_channels(struct net_device * dev,struct ethtool_channels * ch)250 static int mlx5e_rep_set_channels(struct net_device *dev,
251 				  struct ethtool_channels *ch)
252 {
253 	struct mlx5e_priv *priv = netdev_priv(dev);
254 
255 	return mlx5e_ethtool_set_channels(priv, ch);
256 }
257 
mlx5e_rep_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)258 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
259 				  struct ethtool_coalesce *coal,
260 				  struct kernel_ethtool_coalesce *kernel_coal,
261 				  struct netlink_ext_ack *extack)
262 {
263 	struct mlx5e_priv *priv = netdev_priv(netdev);
264 
265 	return mlx5e_ethtool_get_coalesce(priv, coal);
266 }
267 
mlx5e_rep_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)268 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
269 				  struct ethtool_coalesce *coal,
270 				  struct kernel_ethtool_coalesce *kernel_coal,
271 				  struct netlink_ext_ack *extack)
272 {
273 	struct mlx5e_priv *priv = netdev_priv(netdev);
274 
275 	return mlx5e_ethtool_set_coalesce(priv, coal);
276 }
277 
mlx5e_rep_get_rxfh_key_size(struct net_device * netdev)278 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
279 {
280 	struct mlx5e_priv *priv = netdev_priv(netdev);
281 
282 	return mlx5e_ethtool_get_rxfh_key_size(priv);
283 }
284 
mlx5e_rep_get_rxfh_indir_size(struct net_device * netdev)285 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
286 {
287 	struct mlx5e_priv *priv = netdev_priv(netdev);
288 
289 	return mlx5e_ethtool_get_rxfh_indir_size(priv);
290 }
291 
292 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
293 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
294 				     ETHTOOL_COALESCE_MAX_FRAMES |
295 				     ETHTOOL_COALESCE_USE_ADAPTIVE,
296 	.get_drvinfo	   = mlx5e_rep_get_drvinfo,
297 	.get_link	   = ethtool_op_get_link,
298 	.get_strings       = mlx5e_rep_get_strings,
299 	.get_sset_count    = mlx5e_rep_get_sset_count,
300 	.get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
301 	.get_ringparam     = mlx5e_rep_get_ringparam,
302 	.set_ringparam     = mlx5e_rep_set_ringparam,
303 	.get_channels      = mlx5e_rep_get_channels,
304 	.set_channels      = mlx5e_rep_set_channels,
305 	.get_coalesce      = mlx5e_rep_get_coalesce,
306 	.set_coalesce      = mlx5e_rep_set_coalesce,
307 	.get_rxfh_key_size   = mlx5e_rep_get_rxfh_key_size,
308 	.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
309 };
310 
mlx5e_sqs2vport_stop(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep)311 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
312 				 struct mlx5_eswitch_rep *rep)
313 {
314 	struct mlx5e_rep_sq *rep_sq, *tmp;
315 	struct mlx5e_rep_priv *rpriv;
316 
317 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
318 		return;
319 
320 	rpriv = mlx5e_rep_to_rep_priv(rep);
321 	list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
322 		mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
323 		if (rep_sq->send_to_vport_rule_peer)
324 			mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
325 		list_del(&rep_sq->list);
326 		kfree(rep_sq);
327 	}
328 }
329 
mlx5e_sqs2vport_start(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,u32 * sqns_array,int sqns_num)330 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
331 				 struct mlx5_eswitch_rep *rep,
332 				 u32 *sqns_array, int sqns_num)
333 {
334 	struct mlx5_eswitch *peer_esw = NULL;
335 	struct mlx5_flow_handle *flow_rule;
336 	struct mlx5e_rep_priv *rpriv;
337 	struct mlx5e_rep_sq *rep_sq;
338 	int err;
339 	int i;
340 
341 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
342 		return 0;
343 
344 	rpriv = mlx5e_rep_to_rep_priv(rep);
345 	if (mlx5_devcom_is_paired(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS))
346 		peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom,
347 						     MLX5_DEVCOM_ESW_OFFLOADS);
348 
349 	for (i = 0; i < sqns_num; i++) {
350 		rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
351 		if (!rep_sq) {
352 			err = -ENOMEM;
353 			goto out_err;
354 		}
355 
356 		/* Add re-inject rule to the PF/representor sqs */
357 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep,
358 								sqns_array[i]);
359 		if (IS_ERR(flow_rule)) {
360 			err = PTR_ERR(flow_rule);
361 			kfree(rep_sq);
362 			goto out_err;
363 		}
364 		rep_sq->send_to_vport_rule = flow_rule;
365 		rep_sq->sqn = sqns_array[i];
366 
367 		if (peer_esw) {
368 			flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
369 									rep, sqns_array[i]);
370 			if (IS_ERR(flow_rule)) {
371 				err = PTR_ERR(flow_rule);
372 				mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
373 				kfree(rep_sq);
374 				goto out_err;
375 			}
376 			rep_sq->send_to_vport_rule_peer = flow_rule;
377 		}
378 
379 		list_add(&rep_sq->list, &rpriv->vport_sqs_list);
380 	}
381 
382 	if (peer_esw)
383 		mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
384 
385 	return 0;
386 
387 out_err:
388 	mlx5e_sqs2vport_stop(esw, rep);
389 
390 	if (peer_esw)
391 		mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
392 
393 	return err;
394 }
395 
mlx5e_add_sqs_fwd_rules(struct mlx5e_priv * priv)396 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
397 {
398 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
399 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
400 	struct mlx5_eswitch_rep *rep = rpriv->rep;
401 	struct mlx5e_channel *c;
402 	int n, tc, num_sqs = 0;
403 	int err = -ENOMEM;
404 	u32 *sqs;
405 
406 	sqs = kcalloc(priv->channels.num * mlx5e_get_dcb_num_tc(&priv->channels.params),
407 		      sizeof(*sqs), GFP_KERNEL);
408 	if (!sqs)
409 		goto out;
410 
411 	for (n = 0; n < priv->channels.num; n++) {
412 		c = priv->channels.c[n];
413 		for (tc = 0; tc < c->num_tc; tc++)
414 			sqs[num_sqs++] = c->sq[tc].sqn;
415 	}
416 
417 	err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
418 	kfree(sqs);
419 
420 out:
421 	if (err)
422 		netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
423 	return err;
424 }
425 
mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv * priv)426 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
427 {
428 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
429 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
430 	struct mlx5_eswitch_rep *rep = rpriv->rep;
431 
432 	mlx5e_sqs2vport_stop(esw, rep);
433 }
434 
mlx5e_rep_open(struct net_device * dev)435 static int mlx5e_rep_open(struct net_device *dev)
436 {
437 	struct mlx5e_priv *priv = netdev_priv(dev);
438 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
439 	struct mlx5_eswitch_rep *rep = rpriv->rep;
440 	int err;
441 
442 	mutex_lock(&priv->state_lock);
443 	err = mlx5e_open_locked(dev);
444 	if (err)
445 		goto unlock;
446 
447 	if (!mlx5_modify_vport_admin_state(priv->mdev,
448 					   MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
449 					   rep->vport, 1,
450 					   MLX5_VPORT_ADMIN_STATE_UP))
451 		netif_carrier_on(dev);
452 
453 unlock:
454 	mutex_unlock(&priv->state_lock);
455 	return err;
456 }
457 
mlx5e_rep_close(struct net_device * dev)458 static int mlx5e_rep_close(struct net_device *dev)
459 {
460 	struct mlx5e_priv *priv = netdev_priv(dev);
461 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
462 	struct mlx5_eswitch_rep *rep = rpriv->rep;
463 	int ret;
464 
465 	mutex_lock(&priv->state_lock);
466 	mlx5_modify_vport_admin_state(priv->mdev,
467 				      MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
468 				      rep->vport, 1,
469 				      MLX5_VPORT_ADMIN_STATE_DOWN);
470 	ret = mlx5e_close_locked(dev);
471 	mutex_unlock(&priv->state_lock);
472 	return ret;
473 }
474 
mlx5e_is_uplink_rep(struct mlx5e_priv * priv)475 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
476 {
477 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
478 	struct mlx5_eswitch_rep *rep;
479 
480 	if (!MLX5_ESWITCH_MANAGER(priv->mdev))
481 		return false;
482 
483 	if (!rpriv) /* non vport rep mlx5e instances don't use this field */
484 		return false;
485 
486 	rep = rpriv->rep;
487 	return (rep->vport == MLX5_VPORT_UPLINK);
488 }
489 
mlx5e_rep_has_offload_stats(const struct net_device * dev,int attr_id)490 bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
491 {
492 	switch (attr_id) {
493 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
494 			return true;
495 	}
496 
497 	return false;
498 }
499 
500 static int
mlx5e_get_sw_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)501 mlx5e_get_sw_stats64(const struct net_device *dev,
502 		     struct rtnl_link_stats64 *stats)
503 {
504 	struct mlx5e_priv *priv = netdev_priv(dev);
505 
506 	mlx5e_fold_sw_stats64(priv, stats);
507 	return 0;
508 }
509 
mlx5e_rep_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)510 int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
511 				void *sp)
512 {
513 	switch (attr_id) {
514 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
515 		return mlx5e_get_sw_stats64(dev, sp);
516 	}
517 
518 	return -EINVAL;
519 }
520 
521 static void
mlx5e_rep_get_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)522 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
523 {
524 	struct mlx5e_priv *priv = netdev_priv(dev);
525 
526 	/* update HW stats in background for next time */
527 	mlx5e_queue_update_stats(priv);
528 	memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
529 }
530 
mlx5e_rep_change_mtu(struct net_device * netdev,int new_mtu)531 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
532 {
533 	return mlx5e_change_mtu(netdev, new_mtu, NULL);
534 }
535 
mlx5e_rep_get_devlink_port(struct net_device * netdev)536 static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev)
537 {
538 	struct mlx5e_priv *priv = netdev_priv(netdev);
539 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
540 	struct mlx5_core_dev *dev = priv->mdev;
541 
542 	return mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
543 }
544 
mlx5e_rep_change_carrier(struct net_device * dev,bool new_carrier)545 static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
546 {
547 	struct mlx5e_priv *priv = netdev_priv(dev);
548 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
549 	struct mlx5_eswitch_rep *rep = rpriv->rep;
550 	int err;
551 
552 	if (new_carrier) {
553 		err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
554 						    rep->vport, 1, MLX5_VPORT_ADMIN_STATE_UP);
555 		if (err)
556 			return err;
557 		netif_carrier_on(dev);
558 	} else {
559 		err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
560 						    rep->vport, 1, MLX5_VPORT_ADMIN_STATE_DOWN);
561 		if (err)
562 			return err;
563 		netif_carrier_off(dev);
564 	}
565 	return 0;
566 }
567 
568 static const struct net_device_ops mlx5e_netdev_ops_rep = {
569 	.ndo_open                = mlx5e_rep_open,
570 	.ndo_stop                = mlx5e_rep_close,
571 	.ndo_start_xmit          = mlx5e_xmit,
572 	.ndo_setup_tc            = mlx5e_rep_setup_tc,
573 	.ndo_get_devlink_port    = mlx5e_rep_get_devlink_port,
574 	.ndo_get_stats64         = mlx5e_rep_get_stats,
575 	.ndo_has_offload_stats	 = mlx5e_rep_has_offload_stats,
576 	.ndo_get_offload_stats	 = mlx5e_rep_get_offload_stats,
577 	.ndo_change_mtu          = mlx5e_rep_change_mtu,
578 	.ndo_change_carrier      = mlx5e_rep_change_carrier,
579 };
580 
mlx5e_eswitch_uplink_rep(const struct net_device * netdev)581 bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev)
582 {
583 	return netdev->netdev_ops == &mlx5e_netdev_ops &&
584 	       mlx5e_is_uplink_rep(netdev_priv(netdev));
585 }
586 
mlx5e_eswitch_vf_rep(const struct net_device * netdev)587 bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
588 {
589 	return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
590 }
591 
mlx5e_build_rep_params(struct net_device * netdev)592 static void mlx5e_build_rep_params(struct net_device *netdev)
593 {
594 	struct mlx5e_priv *priv = netdev_priv(netdev);
595 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
596 	struct mlx5_eswitch_rep *rep = rpriv->rep;
597 	struct mlx5_core_dev *mdev = priv->mdev;
598 	struct mlx5e_params *params;
599 
600 	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
601 					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
602 					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
603 
604 	params = &priv->channels.params;
605 
606 	params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
607 	params->hard_mtu    = MLX5E_ETH_HARD_MTU;
608 	params->sw_mtu      = netdev->mtu;
609 
610 	/* SQ */
611 	if (rep->vport == MLX5_VPORT_UPLINK)
612 		params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
613 	else
614 		params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
615 
616 	/* RQ */
617 	mlx5e_build_rq_params(mdev, params);
618 
619 	/* CQ moderation params */
620 	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
621 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
622 
623 	params->mqprio.num_tc       = 1;
624 	params->tunneled_offload_en = false;
625 	if (rep->vport != MLX5_VPORT_UPLINK)
626 		params->vlan_strip_disable = true;
627 
628 	/* Set an initial non-zero value, so that mlx5e_select_queue won't
629 	 * divide by zero if called before first activating channels.
630 	 */
631 	priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
632 
633 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
634 }
635 
mlx5e_build_rep_netdev(struct net_device * netdev,struct mlx5_core_dev * mdev)636 static void mlx5e_build_rep_netdev(struct net_device *netdev,
637 				   struct mlx5_core_dev *mdev)
638 {
639 	SET_NETDEV_DEV(netdev, mdev->device);
640 	netdev->netdev_ops = &mlx5e_netdev_ops_rep;
641 	eth_hw_addr_random(netdev);
642 	netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
643 
644 	netdev->watchdog_timeo    = 15 * HZ;
645 
646 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
647 	netdev->hw_features    |= NETIF_F_HW_TC;
648 #endif
649 	netdev->hw_features    |= NETIF_F_SG;
650 	netdev->hw_features    |= NETIF_F_IP_CSUM;
651 	netdev->hw_features    |= NETIF_F_IPV6_CSUM;
652 	netdev->hw_features    |= NETIF_F_GRO;
653 	netdev->hw_features    |= NETIF_F_TSO;
654 	netdev->hw_features    |= NETIF_F_TSO6;
655 	netdev->hw_features    |= NETIF_F_RXCSUM;
656 
657 	netdev->features |= netdev->hw_features;
658 	netdev->features |= NETIF_F_NETNS_LOCAL;
659 }
660 
mlx5e_init_rep(struct mlx5_core_dev * mdev,struct net_device * netdev)661 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
662 			  struct net_device *netdev)
663 {
664 	struct mlx5e_priv *priv = netdev_priv(netdev);
665 
666 	mlx5e_build_rep_params(netdev);
667 	mlx5e_timestamp_init(priv);
668 
669 	return 0;
670 }
671 
mlx5e_init_ul_rep(struct mlx5_core_dev * mdev,struct net_device * netdev)672 static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
673 			     struct net_device *netdev)
674 {
675 	struct mlx5e_priv *priv = netdev_priv(netdev);
676 	int err;
677 
678 	err = mlx5e_ipsec_init(priv);
679 	if (err)
680 		mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
681 
682 	mlx5e_vxlan_set_netdev_info(priv);
683 	return mlx5e_init_rep(mdev, netdev);
684 }
685 
mlx5e_cleanup_rep(struct mlx5e_priv * priv)686 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
687 {
688 	mlx5e_ipsec_cleanup(priv);
689 }
690 
mlx5e_create_rep_ttc_table(struct mlx5e_priv * priv)691 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
692 {
693 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
694 	struct mlx5_eswitch_rep *rep = rpriv->rep;
695 	struct ttc_params ttc_params = {};
696 	int err;
697 
698 	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
699 					      MLX5_FLOW_NAMESPACE_KERNEL);
700 
701 	/* The inner_ttc in the ttc params is intentionally not set */
702 	mlx5e_set_ttc_params(priv, &ttc_params, false);
703 
704 	if (rep->vport != MLX5_VPORT_UPLINK)
705 		/* To give uplik rep TTC a lower level for chaining from root ft */
706 		ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
707 
708 	priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
709 	if (IS_ERR(priv->fs.ttc)) {
710 		err = PTR_ERR(priv->fs.ttc);
711 		netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
712 			   err);
713 		return err;
714 	}
715 	return 0;
716 }
717 
mlx5e_create_rep_root_ft(struct mlx5e_priv * priv)718 static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
719 {
720 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
721 	struct mlx5_eswitch_rep *rep = rpriv->rep;
722 	struct mlx5_flow_table_attr ft_attr = {};
723 	struct mlx5_flow_namespace *ns;
724 	int err = 0;
725 
726 	if (rep->vport != MLX5_VPORT_UPLINK) {
727 		/* non uplik reps will skip any bypass tables and go directly to
728 		 * their own ttc
729 		 */
730 		rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
731 		return 0;
732 	}
733 
734 	/* uplink root ft will be used to auto chain, to ethtool or ttc tables */
735 	ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
736 	if (!ns) {
737 		netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
738 		return -EOPNOTSUPP;
739 	}
740 
741 	ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
742 	ft_attr.prio = 1;
743 	ft_attr.level = 1;
744 
745 	rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
746 	if (IS_ERR(rpriv->root_ft)) {
747 		err = PTR_ERR(rpriv->root_ft);
748 		rpriv->root_ft = NULL;
749 	}
750 
751 	return err;
752 }
753 
mlx5e_destroy_rep_root_ft(struct mlx5e_priv * priv)754 static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
755 {
756 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
757 	struct mlx5_eswitch_rep *rep = rpriv->rep;
758 
759 	if (rep->vport != MLX5_VPORT_UPLINK)
760 		return;
761 	mlx5_destroy_flow_table(rpriv->root_ft);
762 }
763 
mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv * priv)764 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
765 {
766 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
767 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
768 	struct mlx5_eswitch_rep *rep = rpriv->rep;
769 	struct mlx5_flow_handle *flow_rule;
770 	struct mlx5_flow_destination dest;
771 
772 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
773 	dest.ft = rpriv->root_ft;
774 
775 	flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
776 	if (IS_ERR(flow_rule))
777 		return PTR_ERR(flow_rule);
778 	rpriv->vport_rx_rule = flow_rule;
779 	return 0;
780 }
781 
rep_vport_rx_rule_destroy(struct mlx5e_priv * priv)782 static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv)
783 {
784 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
785 
786 	if (!rpriv->vport_rx_rule)
787 		return;
788 
789 	mlx5_del_flow_rules(rpriv->vport_rx_rule);
790 	rpriv->vport_rx_rule = NULL;
791 }
792 
mlx5e_rep_bond_update(struct mlx5e_priv * priv,bool cleanup)793 int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
794 {
795 	rep_vport_rx_rule_destroy(priv);
796 
797 	return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv);
798 }
799 
mlx5e_init_rep_rx(struct mlx5e_priv * priv)800 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
801 {
802 	struct mlx5_core_dev *mdev = priv->mdev;
803 	int err;
804 
805 	priv->rx_res = mlx5e_rx_res_alloc();
806 	if (!priv->rx_res)
807 		return -ENOMEM;
808 
809 	mlx5e_init_l2_addr(priv);
810 
811 	err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
812 	if (err) {
813 		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
814 		return err;
815 	}
816 
817 	err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
818 				priv->max_nch, priv->drop_rq.rqn,
819 				&priv->channels.params.packet_merge,
820 				priv->channels.params.num_channels);
821 	if (err)
822 		goto err_close_drop_rq;
823 
824 	err = mlx5e_create_rep_ttc_table(priv);
825 	if (err)
826 		goto err_destroy_rx_res;
827 
828 	err = mlx5e_create_rep_root_ft(priv);
829 	if (err)
830 		goto err_destroy_ttc_table;
831 
832 	err = mlx5e_create_rep_vport_rx_rule(priv);
833 	if (err)
834 		goto err_destroy_root_ft;
835 
836 	mlx5e_ethtool_init_steering(priv);
837 
838 	return 0;
839 
840 err_destroy_root_ft:
841 	mlx5e_destroy_rep_root_ft(priv);
842 err_destroy_ttc_table:
843 	mlx5_destroy_ttc_table(priv->fs.ttc);
844 err_destroy_rx_res:
845 	mlx5e_rx_res_destroy(priv->rx_res);
846 err_close_drop_rq:
847 	mlx5e_close_drop_rq(&priv->drop_rq);
848 	mlx5e_rx_res_free(priv->rx_res);
849 	priv->rx_res = NULL;
850 	return err;
851 }
852 
mlx5e_cleanup_rep_rx(struct mlx5e_priv * priv)853 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
854 {
855 	mlx5e_ethtool_cleanup_steering(priv);
856 	rep_vport_rx_rule_destroy(priv);
857 	mlx5e_destroy_rep_root_ft(priv);
858 	mlx5_destroy_ttc_table(priv->fs.ttc);
859 	mlx5e_rx_res_destroy(priv->rx_res);
860 	mlx5e_close_drop_rq(&priv->drop_rq);
861 	mlx5e_rx_res_free(priv->rx_res);
862 	priv->rx_res = NULL;
863 }
864 
mlx5e_init_ul_rep_rx(struct mlx5e_priv * priv)865 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
866 {
867 	mlx5e_create_q_counters(priv);
868 	return mlx5e_init_rep_rx(priv);
869 }
870 
mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv * priv)871 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
872 {
873 	mlx5e_cleanup_rep_rx(priv);
874 	mlx5e_destroy_q_counters(priv);
875 }
876 
mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv * rpriv)877 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
878 {
879 	struct mlx5_rep_uplink_priv *uplink_priv;
880 	struct net_device *netdev;
881 	struct mlx5e_priv *priv;
882 	int err;
883 
884 	netdev = rpriv->netdev;
885 	priv = netdev_priv(netdev);
886 	uplink_priv = &rpriv->uplink_priv;
887 
888 	err = mlx5e_rep_tc_init(rpriv);
889 	if (err)
890 		return err;
891 
892 	mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
893 
894 	mlx5e_rep_bond_init(rpriv);
895 	err = mlx5e_rep_tc_netdevice_event_register(rpriv);
896 	if (err) {
897 		mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n",
898 			      err);
899 		goto err_event_reg;
900 	}
901 
902 	return 0;
903 
904 err_event_reg:
905 	mlx5e_rep_bond_cleanup(rpriv);
906 	mlx5e_rep_tc_cleanup(rpriv);
907 	return err;
908 }
909 
mlx5e_init_rep_tx(struct mlx5e_priv * priv)910 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
911 {
912 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
913 	int err;
914 
915 	err = mlx5e_create_tises(priv);
916 	if (err) {
917 		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
918 		return err;
919 	}
920 
921 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
922 		err = mlx5e_init_uplink_rep_tx(rpriv);
923 		if (err)
924 			goto destroy_tises;
925 	}
926 
927 	return 0;
928 
929 destroy_tises:
930 	mlx5e_destroy_tises(priv);
931 	return err;
932 }
933 
mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv * rpriv)934 static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
935 {
936 	mlx5e_rep_tc_netdevice_event_unregister(rpriv);
937 	mlx5e_rep_bond_cleanup(rpriv);
938 	mlx5e_rep_tc_cleanup(rpriv);
939 }
940 
mlx5e_cleanup_rep_tx(struct mlx5e_priv * priv)941 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
942 {
943 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
944 
945 	mlx5e_destroy_tises(priv);
946 
947 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
948 		mlx5e_cleanup_uplink_rep_tx(rpriv);
949 }
950 
mlx5e_rep_enable(struct mlx5e_priv * priv)951 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
952 {
953 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
954 
955 	mlx5e_set_netdev_mtu_boundaries(priv);
956 	mlx5e_rep_neigh_init(rpriv);
957 }
958 
mlx5e_rep_disable(struct mlx5e_priv * priv)959 static void mlx5e_rep_disable(struct mlx5e_priv *priv)
960 {
961 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
962 
963 	mlx5e_rep_neigh_cleanup(rpriv);
964 }
965 
mlx5e_update_rep_rx(struct mlx5e_priv * priv)966 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
967 {
968 	return 0;
969 }
970 
uplink_rep_async_event(struct notifier_block * nb,unsigned long event,void * data)971 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
972 {
973 	struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
974 
975 	if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
976 		struct mlx5_eqe *eqe = data;
977 
978 		switch (eqe->sub_type) {
979 		case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
980 		case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
981 			queue_work(priv->wq, &priv->update_carrier_work);
982 			break;
983 		default:
984 			return NOTIFY_DONE;
985 		}
986 
987 		return NOTIFY_OK;
988 	}
989 
990 	if (event == MLX5_DEV_EVENT_PORT_AFFINITY)
991 		return mlx5e_rep_tc_event_port_affinity(priv);
992 
993 	return NOTIFY_DONE;
994 }
995 
mlx5e_uplink_rep_enable(struct mlx5e_priv * priv)996 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
997 {
998 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
999 	struct net_device *netdev = priv->netdev;
1000 	struct mlx5_core_dev *mdev = priv->mdev;
1001 	u16 max_mtu;
1002 
1003 	netdev->min_mtu = ETH_MIN_MTU;
1004 	mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1005 	netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1006 	mlx5e_set_dev_port_mtu(priv);
1007 
1008 	mlx5e_rep_tc_enable(priv);
1009 
1010 	if (MLX5_CAP_GEN(mdev, uplink_follow))
1011 		mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
1012 					      0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
1013 	mlx5_lag_add_netdev(mdev, netdev);
1014 	priv->events_nb.notifier_call = uplink_rep_async_event;
1015 	mlx5_notifier_register(mdev, &priv->events_nb);
1016 	mlx5e_dcbnl_initialize(priv);
1017 	mlx5e_dcbnl_init_app(priv);
1018 	mlx5e_rep_neigh_init(rpriv);
1019 	mlx5e_rep_bridge_init(priv);
1020 
1021 	netdev->wanted_features |= NETIF_F_HW_TC;
1022 
1023 	rtnl_lock();
1024 	if (netif_running(netdev))
1025 		mlx5e_open(netdev);
1026 	udp_tunnel_nic_reset_ntf(priv->netdev);
1027 	netif_device_attach(netdev);
1028 	rtnl_unlock();
1029 }
1030 
mlx5e_uplink_rep_disable(struct mlx5e_priv * priv)1031 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1032 {
1033 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1034 	struct mlx5_core_dev *mdev = priv->mdev;
1035 
1036 	rtnl_lock();
1037 	if (netif_running(priv->netdev))
1038 		mlx5e_close(priv->netdev);
1039 	netif_device_detach(priv->netdev);
1040 	rtnl_unlock();
1041 
1042 	mlx5e_rep_bridge_cleanup(priv);
1043 	mlx5e_rep_neigh_cleanup(rpriv);
1044 	mlx5e_dcbnl_delete_app(priv);
1045 	mlx5_notifier_unregister(mdev, &priv->events_nb);
1046 	mlx5e_rep_tc_disable(priv);
1047 	mlx5_lag_remove_netdev(mdev, priv->netdev);
1048 	mlx5_vxlan_reset_to_default(mdev->vxlan);
1049 }
1050 
1051 static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
1052 static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
1053 
1054 /* The stats groups order is opposite to the update_stats() order calls */
1055 static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
1056 	&MLX5E_STATS_GRP(sw_rep),
1057 	&MLX5E_STATS_GRP(vport_rep),
1058 };
1059 
mlx5e_rep_stats_grps_num(struct mlx5e_priv * priv)1060 static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
1061 {
1062 	return ARRAY_SIZE(mlx5e_rep_stats_grps);
1063 }
1064 
1065 /* The stats groups order is opposite to the update_stats() order calls */
1066 static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
1067 	&MLX5E_STATS_GRP(sw),
1068 	&MLX5E_STATS_GRP(qcnt),
1069 	&MLX5E_STATS_GRP(vnic_env),
1070 	&MLX5E_STATS_GRP(vport),
1071 	&MLX5E_STATS_GRP(802_3),
1072 	&MLX5E_STATS_GRP(2863),
1073 	&MLX5E_STATS_GRP(2819),
1074 	&MLX5E_STATS_GRP(phy),
1075 	&MLX5E_STATS_GRP(eth_ext),
1076 	&MLX5E_STATS_GRP(pcie),
1077 	&MLX5E_STATS_GRP(per_prio),
1078 	&MLX5E_STATS_GRP(pme),
1079 	&MLX5E_STATS_GRP(channels),
1080 	&MLX5E_STATS_GRP(per_port_buff_congest),
1081 #ifdef CONFIG_MLX5_EN_IPSEC
1082 	&MLX5E_STATS_GRP(ipsec_sw),
1083 	&MLX5E_STATS_GRP(ipsec_hw),
1084 #endif
1085 };
1086 
mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv * priv)1087 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
1088 {
1089 	return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
1090 }
1091 
1092 static const struct mlx5e_profile mlx5e_rep_profile = {
1093 	.init			= mlx5e_init_rep,
1094 	.cleanup		= mlx5e_cleanup_rep,
1095 	.init_rx		= mlx5e_init_rep_rx,
1096 	.cleanup_rx		= mlx5e_cleanup_rep_rx,
1097 	.init_tx		= mlx5e_init_rep_tx,
1098 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1099 	.enable		        = mlx5e_rep_enable,
1100 	.disable	        = mlx5e_rep_disable,
1101 	.update_rx		= mlx5e_update_rep_rx,
1102 	.update_stats           = mlx5e_stats_update_ndo_stats,
1103 	.rx_handlers            = &mlx5e_rx_handlers_rep,
1104 	.max_tc			= 1,
1105 	.rq_groups		= MLX5E_NUM_RQ_GROUPS(REGULAR),
1106 	.stats_grps		= mlx5e_rep_stats_grps,
1107 	.stats_grps_num		= mlx5e_rep_stats_grps_num,
1108 	.rx_ptp_support		= false,
1109 };
1110 
1111 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1112 	.init			= mlx5e_init_ul_rep,
1113 	.cleanup		= mlx5e_cleanup_rep,
1114 	.init_rx		= mlx5e_init_ul_rep_rx,
1115 	.cleanup_rx		= mlx5e_cleanup_ul_rep_rx,
1116 	.init_tx		= mlx5e_init_rep_tx,
1117 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1118 	.enable		        = mlx5e_uplink_rep_enable,
1119 	.disable	        = mlx5e_uplink_rep_disable,
1120 	.update_rx		= mlx5e_update_rep_rx,
1121 	.update_stats           = mlx5e_stats_update_ndo_stats,
1122 	.update_carrier	        = mlx5e_update_carrier,
1123 	.rx_handlers            = &mlx5e_rx_handlers_rep,
1124 	.max_tc			= MLX5E_MAX_NUM_TC,
1125 	/* XSK is needed so we can replace profile with NIC netdev */
1126 	.rq_groups		= MLX5E_NUM_RQ_GROUPS(XSK),
1127 	.stats_grps		= mlx5e_ul_rep_stats_grps,
1128 	.stats_grps_num		= mlx5e_ul_rep_stats_grps_num,
1129 	.rx_ptp_support		= false,
1130 };
1131 
1132 /* e-Switch vport representors */
1133 static int
mlx5e_vport_uplink_rep_load(struct mlx5_core_dev * dev,struct mlx5_eswitch_rep * rep)1134 mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1135 {
1136 	struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
1137 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1138 	struct devlink_port *dl_port;
1139 	int err;
1140 
1141 	rpriv->netdev = priv->netdev;
1142 
1143 	err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
1144 					  rpriv);
1145 	if (err)
1146 		return err;
1147 
1148 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1149 	if (dl_port)
1150 		devlink_port_type_eth_set(dl_port, rpriv->netdev);
1151 
1152 	return 0;
1153 }
1154 
1155 static void
mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv * rpriv)1156 mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
1157 {
1158 	struct net_device *netdev = rpriv->netdev;
1159 	struct devlink_port *dl_port;
1160 	struct mlx5_core_dev *dev;
1161 	struct mlx5e_priv *priv;
1162 
1163 	priv = netdev_priv(netdev);
1164 	dev = priv->mdev;
1165 
1166 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1167 	if (dl_port)
1168 		devlink_port_type_clear(dl_port);
1169 	mlx5e_netdev_attach_nic_profile(priv);
1170 }
1171 
1172 static int
mlx5e_vport_vf_rep_load(struct mlx5_core_dev * dev,struct mlx5_eswitch_rep * rep)1173 mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1174 {
1175 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1176 	const struct mlx5e_profile *profile;
1177 	struct devlink_port *dl_port;
1178 	struct net_device *netdev;
1179 	struct mlx5e_priv *priv;
1180 	unsigned int txqs, rxqs;
1181 	int nch, err;
1182 
1183 	profile = &mlx5e_rep_profile;
1184 	nch = mlx5e_get_max_num_channels(dev);
1185 	txqs = nch * profile->max_tc;
1186 	rxqs = nch * profile->rq_groups;
1187 	netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs);
1188 	if (!netdev) {
1189 		mlx5_core_warn(dev,
1190 			       "Failed to create representor netdev for vport %d\n",
1191 			       rep->vport);
1192 		return -EINVAL;
1193 	}
1194 
1195 	mlx5e_build_rep_netdev(netdev, dev);
1196 	rpriv->netdev = netdev;
1197 
1198 	priv = netdev_priv(netdev);
1199 	priv->profile = profile;
1200 	priv->ppriv = rpriv;
1201 	err = profile->init(dev, netdev);
1202 	if (err) {
1203 		netdev_warn(netdev, "rep profile init failed, %d\n", err);
1204 		goto err_destroy_netdev;
1205 	}
1206 
1207 	err = mlx5e_attach_netdev(netdev_priv(netdev));
1208 	if (err) {
1209 		netdev_warn(netdev,
1210 			    "Failed to attach representor netdev for vport %d\n",
1211 			    rep->vport);
1212 		goto err_cleanup_profile;
1213 	}
1214 
1215 	err = register_netdev(netdev);
1216 	if (err) {
1217 		netdev_warn(netdev,
1218 			    "Failed to register representor netdev for vport %d\n",
1219 			    rep->vport);
1220 		goto err_detach_netdev;
1221 	}
1222 
1223 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1224 	if (dl_port)
1225 		devlink_port_type_eth_set(dl_port, netdev);
1226 	return 0;
1227 
1228 err_detach_netdev:
1229 	mlx5e_detach_netdev(netdev_priv(netdev));
1230 
1231 err_cleanup_profile:
1232 	priv->profile->cleanup(priv);
1233 
1234 err_destroy_netdev:
1235 	mlx5e_destroy_netdev(netdev_priv(netdev));
1236 	return err;
1237 }
1238 
1239 static int
mlx5e_vport_rep_load(struct mlx5_core_dev * dev,struct mlx5_eswitch_rep * rep)1240 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1241 {
1242 	struct mlx5e_rep_priv *rpriv;
1243 	int err;
1244 
1245 	rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1246 	if (!rpriv)
1247 		return -ENOMEM;
1248 
1249 	/* rpriv->rep to be looked up when profile->init() is called */
1250 	rpriv->rep = rep;
1251 	rep->rep_data[REP_ETH].priv = rpriv;
1252 	INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1253 
1254 	if (rep->vport == MLX5_VPORT_UPLINK)
1255 		err = mlx5e_vport_uplink_rep_load(dev, rep);
1256 	else
1257 		err = mlx5e_vport_vf_rep_load(dev, rep);
1258 
1259 	if (err)
1260 		kfree(rpriv);
1261 
1262 	return err;
1263 }
1264 
1265 static void
mlx5e_vport_rep_unload(struct mlx5_eswitch_rep * rep)1266 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1267 {
1268 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1269 	struct net_device *netdev = rpriv->netdev;
1270 	struct mlx5e_priv *priv = netdev_priv(netdev);
1271 	struct mlx5_core_dev *dev = priv->mdev;
1272 	struct devlink_port *dl_port;
1273 	void *ppriv = priv->ppriv;
1274 
1275 	if (rep->vport == MLX5_VPORT_UPLINK) {
1276 		mlx5e_vport_uplink_rep_unload(rpriv);
1277 		goto free_ppriv;
1278 	}
1279 
1280 	dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1281 	if (dl_port)
1282 		devlink_port_type_clear(dl_port);
1283 	unregister_netdev(netdev);
1284 	mlx5e_detach_netdev(priv);
1285 	priv->profile->cleanup(priv);
1286 	mlx5e_destroy_netdev(priv);
1287 free_ppriv:
1288 	kfree(ppriv); /* mlx5e_rep_priv */
1289 }
1290 
mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep * rep)1291 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1292 {
1293 	struct mlx5e_rep_priv *rpriv;
1294 
1295 	rpriv = mlx5e_rep_to_rep_priv(rep);
1296 
1297 	return rpriv->netdev;
1298 }
1299 
mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep * rep)1300 static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep)
1301 {
1302 	struct mlx5e_rep_priv *rpriv;
1303 	struct mlx5e_rep_sq *rep_sq;
1304 
1305 	rpriv = mlx5e_rep_to_rep_priv(rep);
1306 	list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1307 		if (!rep_sq->send_to_vport_rule_peer)
1308 			continue;
1309 		mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
1310 		rep_sq->send_to_vport_rule_peer = NULL;
1311 	}
1312 }
1313 
mlx5e_vport_rep_event_pair(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,struct mlx5_eswitch * peer_esw)1314 static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw,
1315 				      struct mlx5_eswitch_rep *rep,
1316 				      struct mlx5_eswitch *peer_esw)
1317 {
1318 	struct mlx5_flow_handle *flow_rule;
1319 	struct mlx5e_rep_priv *rpriv;
1320 	struct mlx5e_rep_sq *rep_sq;
1321 
1322 	rpriv = mlx5e_rep_to_rep_priv(rep);
1323 	list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1324 		if (rep_sq->send_to_vport_rule_peer)
1325 			continue;
1326 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn);
1327 		if (IS_ERR(flow_rule))
1328 			goto err_out;
1329 		rep_sq->send_to_vport_rule_peer = flow_rule;
1330 	}
1331 
1332 	return 0;
1333 err_out:
1334 	mlx5e_vport_rep_event_unpair(rep);
1335 	return PTR_ERR(flow_rule);
1336 }
1337 
mlx5e_vport_rep_event(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,enum mlx5_switchdev_event event,void * data)1338 static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw,
1339 				 struct mlx5_eswitch_rep *rep,
1340 				 enum mlx5_switchdev_event event,
1341 				 void *data)
1342 {
1343 	int err = 0;
1344 
1345 	if (event == MLX5_SWITCHDEV_EVENT_PAIR)
1346 		err = mlx5e_vport_rep_event_pair(esw, rep, data);
1347 	else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR)
1348 		mlx5e_vport_rep_event_unpair(rep);
1349 
1350 	return err;
1351 }
1352 
1353 static const struct mlx5_eswitch_rep_ops rep_ops = {
1354 	.load = mlx5e_vport_rep_load,
1355 	.unload = mlx5e_vport_rep_unload,
1356 	.get_proto_dev = mlx5e_vport_rep_get_proto_dev,
1357 	.event = mlx5e_vport_rep_event,
1358 };
1359 
mlx5e_rep_probe(struct auxiliary_device * adev,const struct auxiliary_device_id * id)1360 static int mlx5e_rep_probe(struct auxiliary_device *adev,
1361 			   const struct auxiliary_device_id *id)
1362 {
1363 	struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
1364 	struct mlx5_core_dev *mdev = edev->mdev;
1365 	struct mlx5_eswitch *esw;
1366 
1367 	esw = mdev->priv.eswitch;
1368 	mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
1369 	return 0;
1370 }
1371 
mlx5e_rep_remove(struct auxiliary_device * adev)1372 static void mlx5e_rep_remove(struct auxiliary_device *adev)
1373 {
1374 	struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev);
1375 	struct mlx5_core_dev *mdev = vdev->mdev;
1376 	struct mlx5_eswitch *esw;
1377 
1378 	esw = mdev->priv.eswitch;
1379 	mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
1380 }
1381 
1382 static const struct auxiliary_device_id mlx5e_rep_id_table[] = {
1383 	{ .name = MLX5_ADEV_NAME ".eth-rep", },
1384 	{},
1385 };
1386 
1387 MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table);
1388 
1389 static struct auxiliary_driver mlx5e_rep_driver = {
1390 	.name = "eth-rep",
1391 	.probe = mlx5e_rep_probe,
1392 	.remove = mlx5e_rep_remove,
1393 	.id_table = mlx5e_rep_id_table,
1394 };
1395 
mlx5e_rep_init(void)1396 int mlx5e_rep_init(void)
1397 {
1398 	return auxiliary_driver_register(&mlx5e_rep_driver);
1399 }
1400 
mlx5e_rep_cleanup(void)1401 void mlx5e_rep_cleanup(void)
1402 {
1403 	auxiliary_driver_unregister(&mlx5e_rep_driver);
1404 }
1405