• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/slab.h>
43 #include <linux/device.h>
44 #include <linux/skbuff.h>
45 #include <linux/if_vlan.h>
46 #include <net/switchdev.h>
47 #include <generated/utsrelease.h>
48 
49 #include "core.h"
50 #include "reg.h"
51 #include "port.h"
52 #include "trap.h"
53 #include "txheader.h"
54 
55 static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
56 static const char mlxsw_sx_driver_version[] = "1.0";
57 
58 struct mlxsw_sx_port;
59 
60 struct mlxsw_sx {
61 	struct mlxsw_sx_port **ports;
62 	struct mlxsw_core *core;
63 	const struct mlxsw_bus_info *bus_info;
64 	u8 hw_id[ETH_ALEN];
65 };
66 
67 struct mlxsw_sx_port_pcpu_stats {
68 	u64			rx_packets;
69 	u64			rx_bytes;
70 	u64			tx_packets;
71 	u64			tx_bytes;
72 	struct u64_stats_sync	syncp;
73 	u32			tx_dropped;
74 };
75 
76 struct mlxsw_sx_port {
77 	struct net_device *dev;
78 	struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
79 	struct mlxsw_sx *mlxsw_sx;
80 	u8 local_port;
81 };
82 
83 /* tx_hdr_version
84  * Tx header version.
85  * Must be set to 0.
86  */
87 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
88 
89 /* tx_hdr_ctl
90  * Packet control type.
91  * 0 - Ethernet control (e.g. EMADs, LACP)
92  * 1 - Ethernet data
93  */
94 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
95 
96 /* tx_hdr_proto
97  * Packet protocol type. Must be set to 1 (Ethernet).
98  */
99 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
100 
101 /* tx_hdr_etclass
102  * Egress TClass to be used on the egress device on the egress port.
103  * The MSB is specified in the 'ctclass3' field.
104  * Range is 0-15, where 15 is the highest priority.
105  */
106 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
107 
108 /* tx_hdr_swid
109  * Switch partition ID.
110  */
111 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
112 
113 /* tx_hdr_port_mid
114  * Destination local port for unicast packets.
115  * Destination multicast ID for multicast packets.
116  *
117  * Control packets are directed to a specific egress port, while data
118  * packets are transmitted through the CPU port (0) into the switch partition,
119  * where forwarding rules are applied.
120  */
121 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
122 
123 /* tx_hdr_ctclass3
124  * See field 'etclass'.
125  */
126 MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
127 
128 /* tx_hdr_rdq
129  * RDQ for control packets sent to remote CPU.
130  * Must be set to 0x1F for EMADs, otherwise 0.
131  */
132 MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
133 
134 /* tx_hdr_cpu_sig
135  * Signature control for packets going to CPU. Must be set to 0.
136  */
137 MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
138 
139 /* tx_hdr_sig
140  * Stacking protocl signature. Must be set to 0xE0E0.
141  */
142 MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
143 
144 /* tx_hdr_stclass
145  * Stacking TClass.
146  */
147 MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
148 
149 /* tx_hdr_emad
150  * EMAD bit. Must be set for EMADs.
151  */
152 MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
153 
154 /* tx_hdr_type
155  * 0 - Data packets
156  * 6 - Control packets
157  */
158 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
159 
mlxsw_sx_txhdr_construct(struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)160 static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
161 				     const struct mlxsw_tx_info *tx_info)
162 {
163 	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
164 	bool is_emad = tx_info->is_emad;
165 
166 	memset(txhdr, 0, MLXSW_TXHDR_LEN);
167 
168 	/* We currently set default values for the egress tclass (QoS). */
169 	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
170 	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
171 	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
172 	mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
173 						  MLXSW_TXHDR_ETCLASS_5);
174 	mlxsw_tx_hdr_swid_set(txhdr, 0);
175 	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
176 	mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
177 	mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
178 					      MLXSW_TXHDR_RDQ_OTHER);
179 	mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
180 	mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
181 	mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
182 	mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
183 					       MLXSW_TXHDR_NOT_EMAD);
184 	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
185 }
186 
mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port * mlxsw_sx_port,bool is_up)187 static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
188 					  bool is_up)
189 {
190 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
191 	char paos_pl[MLXSW_REG_PAOS_LEN];
192 
193 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
194 			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
195 			    MLXSW_PORT_ADMIN_STATUS_DOWN);
196 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
197 }
198 
mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port * mlxsw_sx_port,bool * p_is_up)199 static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
200 					 bool *p_is_up)
201 {
202 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
203 	char paos_pl[MLXSW_REG_PAOS_LEN];
204 	u8 oper_status;
205 	int err;
206 
207 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
208 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
209 	if (err)
210 		return err;
211 	oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
212 	*p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
213 	return 0;
214 }
215 
mlxsw_sx_port_mtu_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 mtu)216 static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
217 {
218 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
219 	char pmtu_pl[MLXSW_REG_PMTU_LEN];
220 	int max_mtu;
221 	int err;
222 
223 	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
224 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
225 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
226 	if (err)
227 		return err;
228 	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
229 
230 	if (mtu > max_mtu)
231 		return -EINVAL;
232 
233 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
234 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
235 }
236 
mlxsw_sx_port_swid_set(struct mlxsw_sx_port * mlxsw_sx_port,u8 swid)237 static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
238 {
239 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
240 	char pspa_pl[MLXSW_REG_PSPA_LEN];
241 
242 	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
243 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
244 }
245 
246 static int
mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port * mlxsw_sx_port)247 mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
248 {
249 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
250 	char sspr_pl[MLXSW_REG_SSPR_LEN];
251 
252 	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
253 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
254 }
255 
mlxsw_sx_port_module_check(struct mlxsw_sx_port * mlxsw_sx_port,bool * p_usable)256 static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
257 				      bool *p_usable)
258 {
259 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
260 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
261 	int err;
262 
263 	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port);
264 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
265 	if (err)
266 		return err;
267 	*p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
268 	return 0;
269 }
270 
mlxsw_sx_port_open(struct net_device * dev)271 static int mlxsw_sx_port_open(struct net_device *dev)
272 {
273 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
274 	int err;
275 
276 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
277 	if (err)
278 		return err;
279 	netif_start_queue(dev);
280 	return 0;
281 }
282 
mlxsw_sx_port_stop(struct net_device * dev)283 static int mlxsw_sx_port_stop(struct net_device *dev)
284 {
285 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
286 
287 	netif_stop_queue(dev);
288 	return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
289 }
290 
mlxsw_sx_port_xmit(struct sk_buff * skb,struct net_device * dev)291 static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
292 				      struct net_device *dev)
293 {
294 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
295 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
296 	struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
297 	const struct mlxsw_tx_info tx_info = {
298 		.local_port = mlxsw_sx_port->local_port,
299 		.is_emad = false,
300 	};
301 	u64 len;
302 	int err;
303 
304 	if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
305 		return NETDEV_TX_BUSY;
306 
307 	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
308 		struct sk_buff *skb_orig = skb;
309 
310 		skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
311 		if (!skb) {
312 			this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
313 			dev_kfree_skb_any(skb_orig);
314 			return NETDEV_TX_OK;
315 		}
316 		dev_consume_skb_any(skb_orig);
317 	}
318 	mlxsw_sx_txhdr_construct(skb, &tx_info);
319 	len = skb->len;
320 	/* Due to a race we might fail here because of a full queue. In that
321 	 * unlikely case we simply drop the packet.
322 	 */
323 	err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
324 
325 	if (!err) {
326 		pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
327 		u64_stats_update_begin(&pcpu_stats->syncp);
328 		pcpu_stats->tx_packets++;
329 		pcpu_stats->tx_bytes += len;
330 		u64_stats_update_end(&pcpu_stats->syncp);
331 	} else {
332 		this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
333 		dev_kfree_skb_any(skb);
334 	}
335 	return NETDEV_TX_OK;
336 }
337 
mlxsw_sx_port_change_mtu(struct net_device * dev,int mtu)338 static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
339 {
340 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
341 	int err;
342 
343 	err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
344 	if (err)
345 		return err;
346 	dev->mtu = mtu;
347 	return 0;
348 }
349 
350 static struct rtnl_link_stats64 *
mlxsw_sx_port_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)351 mlxsw_sx_port_get_stats64(struct net_device *dev,
352 			  struct rtnl_link_stats64 *stats)
353 {
354 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
355 	struct mlxsw_sx_port_pcpu_stats *p;
356 	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
357 	u32 tx_dropped = 0;
358 	unsigned int start;
359 	int i;
360 
361 	for_each_possible_cpu(i) {
362 		p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
363 		do {
364 			start = u64_stats_fetch_begin_irq(&p->syncp);
365 			rx_packets	= p->rx_packets;
366 			rx_bytes	= p->rx_bytes;
367 			tx_packets	= p->tx_packets;
368 			tx_bytes	= p->tx_bytes;
369 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
370 
371 		stats->rx_packets	+= rx_packets;
372 		stats->rx_bytes		+= rx_bytes;
373 		stats->tx_packets	+= tx_packets;
374 		stats->tx_bytes		+= tx_bytes;
375 		/* tx_dropped is u32, updated without syncp protection. */
376 		tx_dropped	+= p->tx_dropped;
377 	}
378 	stats->tx_dropped	= tx_dropped;
379 	return stats;
380 }
381 
382 static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
383 	.ndo_open		= mlxsw_sx_port_open,
384 	.ndo_stop		= mlxsw_sx_port_stop,
385 	.ndo_start_xmit		= mlxsw_sx_port_xmit,
386 	.ndo_change_mtu		= mlxsw_sx_port_change_mtu,
387 	.ndo_get_stats64	= mlxsw_sx_port_get_stats64,
388 };
389 
mlxsw_sx_port_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)390 static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
391 				      struct ethtool_drvinfo *drvinfo)
392 {
393 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
394 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
395 
396 	strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
397 	strlcpy(drvinfo->version, mlxsw_sx_driver_version,
398 		sizeof(drvinfo->version));
399 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
400 		 "%d.%d.%d",
401 		 mlxsw_sx->bus_info->fw_rev.major,
402 		 mlxsw_sx->bus_info->fw_rev.minor,
403 		 mlxsw_sx->bus_info->fw_rev.subminor);
404 	strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
405 		sizeof(drvinfo->bus_info));
406 }
407 
408 struct mlxsw_sx_port_hw_stats {
409 	char str[ETH_GSTRING_LEN];
410 	u64 (*getter)(char *payload);
411 };
412 
413 static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
414 	{
415 		.str = "a_frames_transmitted_ok",
416 		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
417 	},
418 	{
419 		.str = "a_frames_received_ok",
420 		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
421 	},
422 	{
423 		.str = "a_frame_check_sequence_errors",
424 		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
425 	},
426 	{
427 		.str = "a_alignment_errors",
428 		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
429 	},
430 	{
431 		.str = "a_octets_transmitted_ok",
432 		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
433 	},
434 	{
435 		.str = "a_octets_received_ok",
436 		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
437 	},
438 	{
439 		.str = "a_multicast_frames_xmitted_ok",
440 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
441 	},
442 	{
443 		.str = "a_broadcast_frames_xmitted_ok",
444 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
445 	},
446 	{
447 		.str = "a_multicast_frames_received_ok",
448 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
449 	},
450 	{
451 		.str = "a_broadcast_frames_received_ok",
452 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
453 	},
454 	{
455 		.str = "a_in_range_length_errors",
456 		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
457 	},
458 	{
459 		.str = "a_out_of_range_length_field",
460 		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
461 	},
462 	{
463 		.str = "a_frame_too_long_errors",
464 		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
465 	},
466 	{
467 		.str = "a_symbol_error_during_carrier",
468 		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
469 	},
470 	{
471 		.str = "a_mac_control_frames_transmitted",
472 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
473 	},
474 	{
475 		.str = "a_mac_control_frames_received",
476 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
477 	},
478 	{
479 		.str = "a_unsupported_opcodes_received",
480 		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
481 	},
482 	{
483 		.str = "a_pause_mac_ctrl_frames_received",
484 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
485 	},
486 	{
487 		.str = "a_pause_mac_ctrl_frames_xmitted",
488 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
489 	},
490 };
491 
492 #define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
493 
mlxsw_sx_port_get_strings(struct net_device * dev,u32 stringset,u8 * data)494 static void mlxsw_sx_port_get_strings(struct net_device *dev,
495 				      u32 stringset, u8 *data)
496 {
497 	u8 *p = data;
498 	int i;
499 
500 	switch (stringset) {
501 	case ETH_SS_STATS:
502 		for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
503 			memcpy(p, mlxsw_sx_port_hw_stats[i].str,
504 			       ETH_GSTRING_LEN);
505 			p += ETH_GSTRING_LEN;
506 		}
507 		break;
508 	}
509 }
510 
mlxsw_sx_port_get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)511 static void mlxsw_sx_port_get_stats(struct net_device *dev,
512 				    struct ethtool_stats *stats, u64 *data)
513 {
514 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
515 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
516 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
517 	int i;
518 	int err;
519 
520 	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
521 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
522 	for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
523 		data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
524 }
525 
mlxsw_sx_port_get_sset_count(struct net_device * dev,int sset)526 static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
527 {
528 	switch (sset) {
529 	case ETH_SS_STATS:
530 		return MLXSW_SX_PORT_HW_STATS_LEN;
531 	default:
532 		return -EOPNOTSUPP;
533 	}
534 }
535 
536 struct mlxsw_sx_port_link_mode {
537 	u32 mask;
538 	u32 supported;
539 	u32 advertised;
540 	u32 speed;
541 };
542 
543 static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
544 	{
545 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
546 		.supported	= SUPPORTED_100baseT_Full,
547 		.advertised	= ADVERTISED_100baseT_Full,
548 		.speed		= 100,
549 	},
550 	{
551 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
552 		.speed		= 100,
553 	},
554 	{
555 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
556 				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
557 		.supported	= SUPPORTED_1000baseKX_Full,
558 		.advertised	= ADVERTISED_1000baseKX_Full,
559 		.speed		= 1000,
560 	},
561 	{
562 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
563 		.supported	= SUPPORTED_10000baseT_Full,
564 		.advertised	= ADVERTISED_10000baseT_Full,
565 		.speed		= 10000,
566 	},
567 	{
568 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
569 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
570 		.supported	= SUPPORTED_10000baseKX4_Full,
571 		.advertised	= ADVERTISED_10000baseKX4_Full,
572 		.speed		= 10000,
573 	},
574 	{
575 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
576 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
577 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
578 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
579 		.supported	= SUPPORTED_10000baseKR_Full,
580 		.advertised	= ADVERTISED_10000baseKR_Full,
581 		.speed		= 10000,
582 	},
583 	{
584 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
585 		.supported	= SUPPORTED_20000baseKR2_Full,
586 		.advertised	= ADVERTISED_20000baseKR2_Full,
587 		.speed		= 20000,
588 	},
589 	{
590 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
591 		.supported	= SUPPORTED_40000baseCR4_Full,
592 		.advertised	= ADVERTISED_40000baseCR4_Full,
593 		.speed		= 40000,
594 	},
595 	{
596 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
597 		.supported	= SUPPORTED_40000baseKR4_Full,
598 		.advertised	= ADVERTISED_40000baseKR4_Full,
599 		.speed		= 40000,
600 	},
601 	{
602 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
603 		.supported	= SUPPORTED_40000baseSR4_Full,
604 		.advertised	= ADVERTISED_40000baseSR4_Full,
605 		.speed		= 40000,
606 	},
607 	{
608 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
609 		.supported	= SUPPORTED_40000baseLR4_Full,
610 		.advertised	= ADVERTISED_40000baseLR4_Full,
611 		.speed		= 40000,
612 	},
613 	{
614 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
615 				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
616 				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
617 		.speed		= 25000,
618 	},
619 	{
620 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
621 				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
622 				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
623 		.speed		= 50000,
624 	},
625 	{
626 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
627 		.supported	= SUPPORTED_56000baseKR4_Full,
628 		.advertised	= ADVERTISED_56000baseKR4_Full,
629 		.speed		= 56000,
630 	},
631 	{
632 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
633 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
634 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
635 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
636 		.speed		= 100000,
637 	},
638 };
639 
640 #define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
641 
mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)642 static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
643 {
644 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
645 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
646 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
647 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
648 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
649 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
650 		return SUPPORTED_FIBRE;
651 
652 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
653 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
654 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
655 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
656 			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
657 		return SUPPORTED_Backplane;
658 	return 0;
659 }
660 
mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)661 static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
662 {
663 	u32 modes = 0;
664 	int i;
665 
666 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
667 		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
668 			modes |= mlxsw_sx_port_link_mode[i].supported;
669 	}
670 	return modes;
671 }
672 
mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)673 static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
674 {
675 	u32 modes = 0;
676 	int i;
677 
678 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
679 		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
680 			modes |= mlxsw_sx_port_link_mode[i].advertised;
681 	}
682 	return modes;
683 }
684 
mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok,u32 ptys_eth_proto,struct ethtool_cmd * cmd)685 static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
686 					    struct ethtool_cmd *cmd)
687 {
688 	u32 speed = SPEED_UNKNOWN;
689 	u8 duplex = DUPLEX_UNKNOWN;
690 	int i;
691 
692 	if (!carrier_ok)
693 		goto out;
694 
695 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
696 		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
697 			speed = mlxsw_sx_port_link_mode[i].speed;
698 			duplex = DUPLEX_FULL;
699 			break;
700 		}
701 	}
702 out:
703 	ethtool_cmd_speed_set(cmd, speed);
704 	cmd->duplex = duplex;
705 }
706 
mlxsw_sx_port_connector_port(u32 ptys_eth_proto)707 static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
708 {
709 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
710 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
711 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
712 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
713 		return PORT_FIBRE;
714 
715 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
716 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
717 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
718 		return PORT_DA;
719 
720 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
721 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
722 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
723 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
724 		return PORT_NONE;
725 
726 	return PORT_OTHER;
727 }
728 
mlxsw_sx_port_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)729 static int mlxsw_sx_port_get_settings(struct net_device *dev,
730 				      struct ethtool_cmd *cmd)
731 {
732 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
733 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
734 	char ptys_pl[MLXSW_REG_PTYS_LEN];
735 	u32 eth_proto_cap;
736 	u32 eth_proto_admin;
737 	u32 eth_proto_oper;
738 	int err;
739 
740 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
741 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
742 	if (err) {
743 		netdev_err(dev, "Failed to get proto");
744 		return err;
745 	}
746 	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
747 			      &eth_proto_admin, &eth_proto_oper);
748 
749 	cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
750 			 mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
751 			 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
752 	cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
753 	mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
754 					eth_proto_oper, cmd);
755 
756 	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
757 	cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
758 	cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
759 
760 	cmd->transceiver = XCVR_INTERNAL;
761 	return 0;
762 }
763 
mlxsw_sx_to_ptys_advert_link(u32 advertising)764 static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
765 {
766 	u32 ptys_proto = 0;
767 	int i;
768 
769 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
770 		if (advertising & mlxsw_sx_port_link_mode[i].advertised)
771 			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
772 	}
773 	return ptys_proto;
774 }
775 
mlxsw_sx_to_ptys_speed(u32 speed)776 static u32 mlxsw_sx_to_ptys_speed(u32 speed)
777 {
778 	u32 ptys_proto = 0;
779 	int i;
780 
781 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
782 		if (speed == mlxsw_sx_port_link_mode[i].speed)
783 			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
784 	}
785 	return ptys_proto;
786 }
787 
mlxsw_sx_port_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)788 static int mlxsw_sx_port_set_settings(struct net_device *dev,
789 				      struct ethtool_cmd *cmd)
790 {
791 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
792 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
793 	char ptys_pl[MLXSW_REG_PTYS_LEN];
794 	u32 speed;
795 	u32 eth_proto_new;
796 	u32 eth_proto_cap;
797 	u32 eth_proto_admin;
798 	bool is_up;
799 	int err;
800 
801 	speed = ethtool_cmd_speed(cmd);
802 
803 	eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
804 		mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
805 		mlxsw_sx_to_ptys_speed(speed);
806 
807 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
808 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
809 	if (err) {
810 		netdev_err(dev, "Failed to get proto");
811 		return err;
812 	}
813 	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
814 
815 	eth_proto_new = eth_proto_new & eth_proto_cap;
816 	if (!eth_proto_new) {
817 		netdev_err(dev, "Not supported proto admin requested");
818 		return -EINVAL;
819 	}
820 	if (eth_proto_new == eth_proto_admin)
821 		return 0;
822 
823 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new);
824 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
825 	if (err) {
826 		netdev_err(dev, "Failed to set proto admin");
827 		return err;
828 	}
829 
830 	err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
831 	if (err) {
832 		netdev_err(dev, "Failed to get oper status");
833 		return err;
834 	}
835 	if (!is_up)
836 		return 0;
837 
838 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
839 	if (err) {
840 		netdev_err(dev, "Failed to set admin status");
841 		return err;
842 	}
843 
844 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
845 	if (err) {
846 		netdev_err(dev, "Failed to set admin status");
847 		return err;
848 	}
849 
850 	return 0;
851 }
852 
853 static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
854 	.get_drvinfo		= mlxsw_sx_port_get_drvinfo,
855 	.get_link		= ethtool_op_get_link,
856 	.get_strings		= mlxsw_sx_port_get_strings,
857 	.get_ethtool_stats	= mlxsw_sx_port_get_stats,
858 	.get_sset_count		= mlxsw_sx_port_get_sset_count,
859 	.get_settings		= mlxsw_sx_port_get_settings,
860 	.set_settings		= mlxsw_sx_port_set_settings,
861 };
862 
mlxsw_sx_port_attr_get(struct net_device * dev,struct switchdev_attr * attr)863 static int mlxsw_sx_port_attr_get(struct net_device *dev,
864 				  struct switchdev_attr *attr)
865 {
866 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
867 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
868 
869 	switch (attr->id) {
870 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
871 		attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
872 		memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
873 		break;
874 	default:
875 		return -EOPNOTSUPP;
876 	}
877 
878 	return 0;
879 }
880 
881 static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
882 	.switchdev_port_attr_get	= mlxsw_sx_port_attr_get,
883 };
884 
mlxsw_sx_hw_id_get(struct mlxsw_sx * mlxsw_sx)885 static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
886 {
887 	char spad_pl[MLXSW_REG_SPAD_LEN];
888 	int err;
889 
890 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
891 	if (err)
892 		return err;
893 	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
894 	return 0;
895 }
896 
mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port * mlxsw_sx_port)897 static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
898 {
899 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
900 	struct net_device *dev = mlxsw_sx_port->dev;
901 	char ppad_pl[MLXSW_REG_PPAD_LEN];
902 	int err;
903 
904 	mlxsw_reg_ppad_pack(ppad_pl, false, 0);
905 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
906 	if (err)
907 		return err;
908 	mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
909 	/* The last byte value in base mac address is guaranteed
910 	 * to be such it does not overflow when adding local_port
911 	 * value.
912 	 */
913 	dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
914 	return 0;
915 }
916 
mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 vid,enum mlxsw_reg_spms_state state)917 static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
918 				       u16 vid, enum mlxsw_reg_spms_state state)
919 {
920 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
921 	char *spms_pl;
922 	int err;
923 
924 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
925 	if (!spms_pl)
926 		return -ENOMEM;
927 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
928 	mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
929 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
930 	kfree(spms_pl);
931 	return err;
932 }
933 
mlxsw_sx_port_speed_set(struct mlxsw_sx_port * mlxsw_sx_port,u32 speed)934 static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
935 				   u32 speed)
936 {
937 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
938 	char ptys_pl[MLXSW_REG_PTYS_LEN];
939 
940 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed);
941 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
942 }
943 
944 static int
mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port * mlxsw_sx_port,enum mlxsw_reg_spmlr_learn_mode mode)945 mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
946 				    enum mlxsw_reg_spmlr_learn_mode mode)
947 {
948 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
949 	char spmlr_pl[MLXSW_REG_SPMLR_LEN];
950 
951 	mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
952 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
953 }
954 
mlxsw_sx_port_create(struct mlxsw_sx * mlxsw_sx,u8 local_port)955 static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
956 {
957 	struct mlxsw_sx_port *mlxsw_sx_port;
958 	struct net_device *dev;
959 	bool usable;
960 	int err;
961 
962 	dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
963 	if (!dev)
964 		return -ENOMEM;
965 	mlxsw_sx_port = netdev_priv(dev);
966 	mlxsw_sx_port->dev = dev;
967 	mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
968 	mlxsw_sx_port->local_port = local_port;
969 
970 	mlxsw_sx_port->pcpu_stats =
971 		netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
972 	if (!mlxsw_sx_port->pcpu_stats) {
973 		err = -ENOMEM;
974 		goto err_alloc_stats;
975 	}
976 
977 	dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
978 	dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
979 	dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
980 
981 	err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
982 	if (err) {
983 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
984 			mlxsw_sx_port->local_port);
985 		goto err_dev_addr_get;
986 	}
987 
988 	netif_carrier_off(dev);
989 
990 	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
991 			 NETIF_F_VLAN_CHALLENGED;
992 
993 	/* Each packet needs to have a Tx header (metadata) on top all other
994 	 * headers.
995 	 */
996 	dev->needed_headroom = MLXSW_TXHDR_LEN;
997 
998 	err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
999 	if (err) {
1000 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n",
1001 			mlxsw_sx_port->local_port);
1002 		goto err_port_module_check;
1003 	}
1004 
1005 	if (!usable) {
1006 		dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
1007 			mlxsw_sx_port->local_port);
1008 		goto port_not_usable;
1009 	}
1010 
1011 	err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1012 	if (err) {
1013 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1014 			mlxsw_sx_port->local_port);
1015 		goto err_port_system_port_mapping_set;
1016 	}
1017 
1018 	err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
1019 	if (err) {
1020 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
1021 			mlxsw_sx_port->local_port);
1022 		goto err_port_swid_set;
1023 	}
1024 
1025 	err = mlxsw_sx_port_speed_set(mlxsw_sx_port,
1026 				      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4);
1027 	if (err) {
1028 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
1029 			mlxsw_sx_port->local_port);
1030 		goto err_port_speed_set;
1031 	}
1032 
1033 	err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN);
1034 	if (err) {
1035 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
1036 			mlxsw_sx_port->local_port);
1037 		goto err_port_mtu_set;
1038 	}
1039 
1040 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
1041 	if (err)
1042 		goto err_port_admin_status_set;
1043 
1044 	err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
1045 					  MLXSW_PORT_DEFAULT_VID,
1046 					  MLXSW_REG_SPMS_STATE_FORWARDING);
1047 	if (err) {
1048 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
1049 			mlxsw_sx_port->local_port);
1050 		goto err_port_stp_state_set;
1051 	}
1052 
1053 	err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
1054 						  MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
1055 	if (err) {
1056 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
1057 			mlxsw_sx_port->local_port);
1058 		goto err_port_mac_learning_mode_set;
1059 	}
1060 
1061 	err = register_netdev(dev);
1062 	if (err) {
1063 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
1064 			mlxsw_sx_port->local_port);
1065 		goto err_register_netdev;
1066 	}
1067 
1068 	mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1069 	return 0;
1070 
1071 err_register_netdev:
1072 err_port_mac_learning_mode_set:
1073 err_port_stp_state_set:
1074 err_port_admin_status_set:
1075 err_port_mtu_set:
1076 err_port_speed_set:
1077 	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1078 err_port_swid_set:
1079 err_port_system_port_mapping_set:
1080 port_not_usable:
1081 err_port_module_check:
1082 err_dev_addr_get:
1083 	free_percpu(mlxsw_sx_port->pcpu_stats);
1084 err_alloc_stats:
1085 	free_netdev(dev);
1086 	return err;
1087 }
1088 
mlxsw_sx_port_remove(struct mlxsw_sx * mlxsw_sx,u8 local_port)1089 static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1090 {
1091 	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1092 
1093 	if (!mlxsw_sx_port)
1094 		return;
1095 	unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
1096 	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1097 	free_percpu(mlxsw_sx_port->pcpu_stats);
1098 	free_netdev(mlxsw_sx_port->dev);
1099 }
1100 
mlxsw_sx_ports_remove(struct mlxsw_sx * mlxsw_sx)1101 static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
1102 {
1103 	int i;
1104 
1105 	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1106 		mlxsw_sx_port_remove(mlxsw_sx, i);
1107 	kfree(mlxsw_sx->ports);
1108 }
1109 
mlxsw_sx_ports_create(struct mlxsw_sx * mlxsw_sx)1110 static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
1111 {
1112 	size_t alloc_size;
1113 	int i;
1114 	int err;
1115 
1116 	alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
1117 	mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
1118 	if (!mlxsw_sx->ports)
1119 		return -ENOMEM;
1120 
1121 	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1122 		err = mlxsw_sx_port_create(mlxsw_sx, i);
1123 		if (err)
1124 			goto err_port_create;
1125 	}
1126 	return 0;
1127 
1128 err_port_create:
1129 	for (i--; i >= 1; i--)
1130 		mlxsw_sx_port_remove(mlxsw_sx, i);
1131 	kfree(mlxsw_sx->ports);
1132 	return err;
1133 }
1134 
mlxsw_sx_pude_event_func(const struct mlxsw_reg_info * reg,char * pude_pl,void * priv)1135 static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
1136 				     char *pude_pl, void *priv)
1137 {
1138 	struct mlxsw_sx *mlxsw_sx = priv;
1139 	struct mlxsw_sx_port *mlxsw_sx_port;
1140 	enum mlxsw_reg_pude_oper_status status;
1141 	u8 local_port;
1142 
1143 	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1144 	mlxsw_sx_port = mlxsw_sx->ports[local_port];
1145 	if (!mlxsw_sx_port) {
1146 		dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1147 			 local_port);
1148 		return;
1149 	}
1150 
1151 	status = mlxsw_reg_pude_oper_status_get(pude_pl);
1152 	if (status == MLXSW_PORT_OPER_STATUS_UP) {
1153 		netdev_info(mlxsw_sx_port->dev, "link up\n");
1154 		netif_carrier_on(mlxsw_sx_port->dev);
1155 	} else {
1156 		netdev_info(mlxsw_sx_port->dev, "link down\n");
1157 		netif_carrier_off(mlxsw_sx_port->dev);
1158 	}
1159 }
1160 
1161 static struct mlxsw_event_listener mlxsw_sx_pude_event = {
1162 	.func = mlxsw_sx_pude_event_func,
1163 	.trap_id = MLXSW_TRAP_ID_PUDE,
1164 };
1165 
mlxsw_sx_event_register(struct mlxsw_sx * mlxsw_sx,enum mlxsw_event_trap_id trap_id)1166 static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
1167 				   enum mlxsw_event_trap_id trap_id)
1168 {
1169 	struct mlxsw_event_listener *el;
1170 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
1171 	int err;
1172 
1173 	switch (trap_id) {
1174 	case MLXSW_TRAP_ID_PUDE:
1175 		el = &mlxsw_sx_pude_event;
1176 		break;
1177 	}
1178 	err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx);
1179 	if (err)
1180 		return err;
1181 
1182 	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
1183 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
1184 	if (err)
1185 		goto err_event_trap_set;
1186 
1187 	return 0;
1188 
1189 err_event_trap_set:
1190 	mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
1191 	return err;
1192 }
1193 
mlxsw_sx_event_unregister(struct mlxsw_sx * mlxsw_sx,enum mlxsw_event_trap_id trap_id)1194 static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx,
1195 				      enum mlxsw_event_trap_id trap_id)
1196 {
1197 	struct mlxsw_event_listener *el;
1198 
1199 	switch (trap_id) {
1200 	case MLXSW_TRAP_ID_PUDE:
1201 		el = &mlxsw_sx_pude_event;
1202 		break;
1203 	}
1204 	mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
1205 }
1206 
mlxsw_sx_rx_listener_func(struct sk_buff * skb,u8 local_port,void * priv)1207 static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
1208 				      void *priv)
1209 {
1210 	struct mlxsw_sx *mlxsw_sx = priv;
1211 	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1212 	struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
1213 
1214 	if (unlikely(!mlxsw_sx_port)) {
1215 		dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
1216 				     local_port);
1217 		return;
1218 	}
1219 
1220 	skb->dev = mlxsw_sx_port->dev;
1221 
1222 	pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
1223 	u64_stats_update_begin(&pcpu_stats->syncp);
1224 	pcpu_stats->rx_packets++;
1225 	pcpu_stats->rx_bytes += skb->len;
1226 	u64_stats_update_end(&pcpu_stats->syncp);
1227 
1228 	skb->protocol = eth_type_trans(skb, skb->dev);
1229 	netif_receive_skb(skb);
1230 }
1231 
1232 static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = {
1233 	{
1234 		.func = mlxsw_sx_rx_listener_func,
1235 		.local_port = MLXSW_PORT_DONT_CARE,
1236 		.trap_id = MLXSW_TRAP_ID_FDB_MC,
1237 	},
1238 	/* Traps for specific L2 packet types, not trapped as FDB MC */
1239 	{
1240 		.func = mlxsw_sx_rx_listener_func,
1241 		.local_port = MLXSW_PORT_DONT_CARE,
1242 		.trap_id = MLXSW_TRAP_ID_STP,
1243 	},
1244 	{
1245 		.func = mlxsw_sx_rx_listener_func,
1246 		.local_port = MLXSW_PORT_DONT_CARE,
1247 		.trap_id = MLXSW_TRAP_ID_LACP,
1248 	},
1249 	{
1250 		.func = mlxsw_sx_rx_listener_func,
1251 		.local_port = MLXSW_PORT_DONT_CARE,
1252 		.trap_id = MLXSW_TRAP_ID_EAPOL,
1253 	},
1254 	{
1255 		.func = mlxsw_sx_rx_listener_func,
1256 		.local_port = MLXSW_PORT_DONT_CARE,
1257 		.trap_id = MLXSW_TRAP_ID_LLDP,
1258 	},
1259 	{
1260 		.func = mlxsw_sx_rx_listener_func,
1261 		.local_port = MLXSW_PORT_DONT_CARE,
1262 		.trap_id = MLXSW_TRAP_ID_MMRP,
1263 	},
1264 	{
1265 		.func = mlxsw_sx_rx_listener_func,
1266 		.local_port = MLXSW_PORT_DONT_CARE,
1267 		.trap_id = MLXSW_TRAP_ID_MVRP,
1268 	},
1269 	{
1270 		.func = mlxsw_sx_rx_listener_func,
1271 		.local_port = MLXSW_PORT_DONT_CARE,
1272 		.trap_id = MLXSW_TRAP_ID_RPVST,
1273 	},
1274 	{
1275 		.func = mlxsw_sx_rx_listener_func,
1276 		.local_port = MLXSW_PORT_DONT_CARE,
1277 		.trap_id = MLXSW_TRAP_ID_DHCP,
1278 	},
1279 	{
1280 		.func = mlxsw_sx_rx_listener_func,
1281 		.local_port = MLXSW_PORT_DONT_CARE,
1282 		.trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1283 	},
1284 	{
1285 		.func = mlxsw_sx_rx_listener_func,
1286 		.local_port = MLXSW_PORT_DONT_CARE,
1287 		.trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
1288 	},
1289 	{
1290 		.func = mlxsw_sx_rx_listener_func,
1291 		.local_port = MLXSW_PORT_DONT_CARE,
1292 		.trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
1293 	},
1294 	{
1295 		.func = mlxsw_sx_rx_listener_func,
1296 		.local_port = MLXSW_PORT_DONT_CARE,
1297 		.trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
1298 	},
1299 	{
1300 		.func = mlxsw_sx_rx_listener_func,
1301 		.local_port = MLXSW_PORT_DONT_CARE,
1302 		.trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
1303 	},
1304 };
1305 
mlxsw_sx_traps_init(struct mlxsw_sx * mlxsw_sx)1306 static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
1307 {
1308 	char htgt_pl[MLXSW_REG_HTGT_LEN];
1309 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
1310 	int i;
1311 	int err;
1312 
1313 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
1314 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1315 	if (err)
1316 		return err;
1317 
1318 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
1319 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1320 	if (err)
1321 		return err;
1322 
1323 	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
1324 		err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
1325 						      &mlxsw_sx_rx_listener[i],
1326 						      mlxsw_sx);
1327 		if (err)
1328 			goto err_rx_listener_register;
1329 
1330 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
1331 				    mlxsw_sx_rx_listener[i].trap_id);
1332 		err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
1333 		if (err)
1334 			goto err_rx_trap_set;
1335 	}
1336 	return 0;
1337 
1338 err_rx_trap_set:
1339 	mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
1340 					  &mlxsw_sx_rx_listener[i],
1341 					  mlxsw_sx);
1342 err_rx_listener_register:
1343 	for (i--; i >= 0; i--) {
1344 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1345 				    mlxsw_sx_rx_listener[i].trap_id);
1346 		mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
1347 
1348 		mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
1349 						  &mlxsw_sx_rx_listener[i],
1350 						  mlxsw_sx);
1351 	}
1352 	return err;
1353 }
1354 
mlxsw_sx_traps_fini(struct mlxsw_sx * mlxsw_sx)1355 static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
1356 {
1357 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
1358 	int i;
1359 
1360 	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
1361 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1362 				    mlxsw_sx_rx_listener[i].trap_id);
1363 		mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
1364 
1365 		mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
1366 						  &mlxsw_sx_rx_listener[i],
1367 						  mlxsw_sx);
1368 	}
1369 }
1370 
mlxsw_sx_flood_init(struct mlxsw_sx * mlxsw_sx)1371 static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
1372 {
1373 	char sfgc_pl[MLXSW_REG_SFGC_LEN];
1374 	char sgcr_pl[MLXSW_REG_SGCR_LEN];
1375 	char *sftr_pl;
1376 	int err;
1377 
1378 	/* Configure a flooding table, which includes only CPU port. */
1379 	sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
1380 	if (!sftr_pl)
1381 		return -ENOMEM;
1382 	mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
1383 			    MLXSW_PORT_CPU_PORT, true);
1384 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
1385 	kfree(sftr_pl);
1386 	if (err)
1387 		return err;
1388 
1389 	/* Flood different packet types using the flooding table. */
1390 	mlxsw_reg_sfgc_pack(sfgc_pl,
1391 			    MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
1392 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1393 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1394 			    0);
1395 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1396 	if (err)
1397 		return err;
1398 
1399 	mlxsw_reg_sfgc_pack(sfgc_pl,
1400 			    MLXSW_REG_SFGC_TYPE_BROADCAST,
1401 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1402 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1403 			    0);
1404 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1405 	if (err)
1406 		return err;
1407 
1408 	mlxsw_reg_sfgc_pack(sfgc_pl,
1409 			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
1410 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1411 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1412 			    0);
1413 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1414 	if (err)
1415 		return err;
1416 
1417 	mlxsw_reg_sfgc_pack(sfgc_pl,
1418 			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
1419 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1420 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1421 			    0);
1422 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1423 	if (err)
1424 		return err;
1425 
1426 	mlxsw_reg_sfgc_pack(sfgc_pl,
1427 			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
1428 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1429 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1430 			    0);
1431 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1432 	if (err)
1433 		return err;
1434 
1435 	mlxsw_reg_sgcr_pack(sgcr_pl, true);
1436 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
1437 }
1438 
mlxsw_sx_init(void * priv,struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info)1439 static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
1440 			 const struct mlxsw_bus_info *mlxsw_bus_info)
1441 {
1442 	struct mlxsw_sx *mlxsw_sx = priv;
1443 	int err;
1444 
1445 	mlxsw_sx->core = mlxsw_core;
1446 	mlxsw_sx->bus_info = mlxsw_bus_info;
1447 
1448 	err = mlxsw_sx_hw_id_get(mlxsw_sx);
1449 	if (err) {
1450 		dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
1451 		return err;
1452 	}
1453 
1454 	err = mlxsw_sx_ports_create(mlxsw_sx);
1455 	if (err) {
1456 		dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
1457 		return err;
1458 	}
1459 
1460 	err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
1461 	if (err) {
1462 		dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n");
1463 		goto err_event_register;
1464 	}
1465 
1466 	err = mlxsw_sx_traps_init(mlxsw_sx);
1467 	if (err) {
1468 		dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n");
1469 		goto err_rx_listener_register;
1470 	}
1471 
1472 	err = mlxsw_sx_flood_init(mlxsw_sx);
1473 	if (err) {
1474 		dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
1475 		goto err_flood_init;
1476 	}
1477 
1478 	return 0;
1479 
1480 err_flood_init:
1481 	mlxsw_sx_traps_fini(mlxsw_sx);
1482 err_rx_listener_register:
1483 	mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
1484 err_event_register:
1485 	mlxsw_sx_ports_remove(mlxsw_sx);
1486 	return err;
1487 }
1488 
mlxsw_sx_fini(void * priv)1489 static void mlxsw_sx_fini(void *priv)
1490 {
1491 	struct mlxsw_sx *mlxsw_sx = priv;
1492 
1493 	mlxsw_sx_traps_fini(mlxsw_sx);
1494 	mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
1495 	mlxsw_sx_ports_remove(mlxsw_sx);
1496 }
1497 
1498 static struct mlxsw_config_profile mlxsw_sx_config_profile = {
1499 	.used_max_vepa_channels		= 1,
1500 	.max_vepa_channels		= 0,
1501 	.used_max_lag			= 1,
1502 	.max_lag			= 64,
1503 	.used_max_port_per_lag		= 1,
1504 	.max_port_per_lag		= 16,
1505 	.used_max_mid			= 1,
1506 	.max_mid			= 7000,
1507 	.used_max_pgt			= 1,
1508 	.max_pgt			= 0,
1509 	.used_max_system_port		= 1,
1510 	.max_system_port		= 48000,
1511 	.used_max_vlan_groups		= 1,
1512 	.max_vlan_groups		= 127,
1513 	.used_max_regions		= 1,
1514 	.max_regions			= 400,
1515 	.used_flood_tables		= 1,
1516 	.max_flood_tables		= 2,
1517 	.max_vid_flood_tables		= 1,
1518 	.used_flood_mode		= 1,
1519 	.flood_mode			= 3,
1520 	.used_max_ib_mc			= 1,
1521 	.max_ib_mc			= 0,
1522 	.used_max_pkey			= 1,
1523 	.max_pkey			= 0,
1524 	.swid_config			= {
1525 		{
1526 			.used_type	= 1,
1527 			.type		= MLXSW_PORT_SWID_TYPE_ETH,
1528 		}
1529 	},
1530 };
1531 
1532 static struct mlxsw_driver mlxsw_sx_driver = {
1533 	.kind			= MLXSW_DEVICE_KIND_SWITCHX2,
1534 	.owner			= THIS_MODULE,
1535 	.priv_size		= sizeof(struct mlxsw_sx),
1536 	.init			= mlxsw_sx_init,
1537 	.fini			= mlxsw_sx_fini,
1538 	.txhdr_construct	= mlxsw_sx_txhdr_construct,
1539 	.txhdr_len		= MLXSW_TXHDR_LEN,
1540 	.profile		= &mlxsw_sx_config_profile,
1541 };
1542 
mlxsw_sx_module_init(void)1543 static int __init mlxsw_sx_module_init(void)
1544 {
1545 	return mlxsw_core_driver_register(&mlxsw_sx_driver);
1546 }
1547 
mlxsw_sx_module_exit(void)1548 static void __exit mlxsw_sx_module_exit(void)
1549 {
1550 	mlxsw_core_driver_unregister(&mlxsw_sx_driver);
1551 }
1552 
1553 module_init(mlxsw_sx_module_init);
1554 module_exit(mlxsw_sx_module_exit);
1555 
1556 MODULE_LICENSE("Dual BSD/GPL");
1557 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1558 MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
1559 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2);
1560