• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
12 #include <linux/skbuff.h>
13 #include <linux/if_vlan.h>
14 #include <net/switchdev.h>
15 
16 #include "pci.h"
17 #include "core.h"
18 #include "reg.h"
19 #include "port.h"
20 #include "trap.h"
21 #include "txheader.h"
22 #include "ib.h"
23 
24 static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
25 static const char mlxsw_sx_driver_version[] = "1.0";
26 
27 struct mlxsw_sx_port;
28 
29 struct mlxsw_sx {
30 	struct mlxsw_sx_port **ports;
31 	struct mlxsw_core *core;
32 	const struct mlxsw_bus_info *bus_info;
33 	u8 hw_id[ETH_ALEN];
34 };
35 
36 struct mlxsw_sx_port_pcpu_stats {
37 	u64			rx_packets;
38 	u64			rx_bytes;
39 	u64			tx_packets;
40 	u64			tx_bytes;
41 	struct u64_stats_sync	syncp;
42 	u32			tx_dropped;
43 };
44 
45 struct mlxsw_sx_port {
46 	struct net_device *dev;
47 	struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
48 	struct mlxsw_sx *mlxsw_sx;
49 	u8 local_port;
50 	struct {
51 		u8 module;
52 	} mapping;
53 };
54 
55 /* tx_hdr_version
56  * Tx header version.
57  * Must be set to 0.
58  */
59 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
60 
61 /* tx_hdr_ctl
62  * Packet control type.
63  * 0 - Ethernet control (e.g. EMADs, LACP)
64  * 1 - Ethernet data
65  */
66 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
67 
68 /* tx_hdr_proto
69  * Packet protocol type. Must be set to 1 (Ethernet).
70  */
71 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
72 
73 /* tx_hdr_etclass
74  * Egress TClass to be used on the egress device on the egress port.
75  * The MSB is specified in the 'ctclass3' field.
76  * Range is 0-15, where 15 is the highest priority.
77  */
78 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
79 
80 /* tx_hdr_swid
81  * Switch partition ID.
82  */
83 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
84 
85 /* tx_hdr_port_mid
86  * Destination local port for unicast packets.
87  * Destination multicast ID for multicast packets.
88  *
89  * Control packets are directed to a specific egress port, while data
90  * packets are transmitted through the CPU port (0) into the switch partition,
91  * where forwarding rules are applied.
92  */
93 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
94 
95 /* tx_hdr_ctclass3
96  * See field 'etclass'.
97  */
98 MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
99 
100 /* tx_hdr_rdq
101  * RDQ for control packets sent to remote CPU.
102  * Must be set to 0x1F for EMADs, otherwise 0.
103  */
104 MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
105 
106 /* tx_hdr_cpu_sig
107  * Signature control for packets going to CPU. Must be set to 0.
108  */
109 MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
110 
111 /* tx_hdr_sig
112  * Stacking protocl signature. Must be set to 0xE0E0.
113  */
114 MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
115 
116 /* tx_hdr_stclass
117  * Stacking TClass.
118  */
119 MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
120 
121 /* tx_hdr_emad
122  * EMAD bit. Must be set for EMADs.
123  */
124 MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
125 
126 /* tx_hdr_type
127  * 0 - Data packets
128  * 6 - Control packets
129  */
130 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
131 
mlxsw_sx_txhdr_construct(struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)132 static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
133 				     const struct mlxsw_tx_info *tx_info)
134 {
135 	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
136 	bool is_emad = tx_info->is_emad;
137 
138 	memset(txhdr, 0, MLXSW_TXHDR_LEN);
139 
140 	/* We currently set default values for the egress tclass (QoS). */
141 	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
142 	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
143 	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
144 	mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
145 						  MLXSW_TXHDR_ETCLASS_5);
146 	mlxsw_tx_hdr_swid_set(txhdr, 0);
147 	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
148 	mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
149 	mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
150 					      MLXSW_TXHDR_RDQ_OTHER);
151 	mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
152 	mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
153 	mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
154 	mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
155 					       MLXSW_TXHDR_NOT_EMAD);
156 	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
157 }
158 
mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port * mlxsw_sx_port,bool is_up)159 static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
160 					  bool is_up)
161 {
162 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
163 	char paos_pl[MLXSW_REG_PAOS_LEN];
164 
165 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
166 			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
167 			    MLXSW_PORT_ADMIN_STATUS_DOWN);
168 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
169 }
170 
mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port * mlxsw_sx_port,bool * p_is_up)171 static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
172 					 bool *p_is_up)
173 {
174 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
175 	char paos_pl[MLXSW_REG_PAOS_LEN];
176 	u8 oper_status;
177 	int err;
178 
179 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
180 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
181 	if (err)
182 		return err;
183 	oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
184 	*p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
185 	return 0;
186 }
187 
__mlxsw_sx_port_mtu_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 mtu)188 static int __mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port,
189 				   u16 mtu)
190 {
191 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
192 	char pmtu_pl[MLXSW_REG_PMTU_LEN];
193 	int max_mtu;
194 	int err;
195 
196 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
197 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
198 	if (err)
199 		return err;
200 	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
201 
202 	if (mtu > max_mtu)
203 		return -EINVAL;
204 
205 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
206 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
207 }
208 
mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 mtu)209 static int mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port *mlxsw_sx_port,
210 				     u16 mtu)
211 {
212 	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
213 	return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
214 }
215 
mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 mtu)216 static int mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port *mlxsw_sx_port,
217 				    u16 mtu)
218 {
219 	return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
220 }
221 
mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port * mlxsw_sx_port,u8 ib_port)222 static int mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port *mlxsw_sx_port,
223 				     u8 ib_port)
224 {
225 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
226 	char plib_pl[MLXSW_REG_PLIB_LEN] = {0};
227 	int err;
228 
229 	mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sx_port->local_port);
230 	mlxsw_reg_plib_ib_port_set(plib_pl, ib_port);
231 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(plib), plib_pl);
232 	return err;
233 }
234 
mlxsw_sx_port_swid_set(struct mlxsw_sx_port * mlxsw_sx_port,u8 swid)235 static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
236 {
237 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
238 	char pspa_pl[MLXSW_REG_PSPA_LEN];
239 
240 	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
241 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
242 }
243 
244 static int
mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port * mlxsw_sx_port)245 mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
246 {
247 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
248 	char sspr_pl[MLXSW_REG_SSPR_LEN];
249 
250 	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
251 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
252 }
253 
mlxsw_sx_port_module_info_get(struct mlxsw_sx * mlxsw_sx,u8 local_port,u8 * p_module,u8 * p_width)254 static int mlxsw_sx_port_module_info_get(struct mlxsw_sx *mlxsw_sx,
255 					 u8 local_port, u8 *p_module,
256 					 u8 *p_width)
257 {
258 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
259 	int err;
260 
261 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
262 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
263 	if (err)
264 		return err;
265 	*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
266 	*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
267 	return 0;
268 }
269 
mlxsw_sx_port_open(struct net_device * dev)270 static int mlxsw_sx_port_open(struct net_device *dev)
271 {
272 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
273 	int err;
274 
275 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
276 	if (err)
277 		return err;
278 	netif_start_queue(dev);
279 	return 0;
280 }
281 
mlxsw_sx_port_stop(struct net_device * dev)282 static int mlxsw_sx_port_stop(struct net_device *dev)
283 {
284 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
285 
286 	netif_stop_queue(dev);
287 	return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
288 }
289 
mlxsw_sx_port_xmit(struct sk_buff * skb,struct net_device * dev)290 static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
291 				      struct net_device *dev)
292 {
293 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
294 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
295 	struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
296 	const struct mlxsw_tx_info tx_info = {
297 		.local_port = mlxsw_sx_port->local_port,
298 		.is_emad = false,
299 	};
300 	u64 len;
301 	int err;
302 
303 	if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
304 		return NETDEV_TX_BUSY;
305 
306 	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
307 		struct sk_buff *skb_orig = skb;
308 
309 		skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
310 		if (!skb) {
311 			this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
312 			dev_kfree_skb_any(skb_orig);
313 			return NETDEV_TX_OK;
314 		}
315 		dev_consume_skb_any(skb_orig);
316 	}
317 	mlxsw_sx_txhdr_construct(skb, &tx_info);
318 	/* TX header is consumed by HW on the way so we shouldn't count its
319 	 * bytes as being sent.
320 	 */
321 	len = skb->len - MLXSW_TXHDR_LEN;
322 	/* Due to a race we might fail here because of a full queue. In that
323 	 * unlikely case we simply drop the packet.
324 	 */
325 	err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
326 
327 	if (!err) {
328 		pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
329 		u64_stats_update_begin(&pcpu_stats->syncp);
330 		pcpu_stats->tx_packets++;
331 		pcpu_stats->tx_bytes += len;
332 		u64_stats_update_end(&pcpu_stats->syncp);
333 	} else {
334 		this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
335 		dev_kfree_skb_any(skb);
336 	}
337 	return NETDEV_TX_OK;
338 }
339 
mlxsw_sx_port_change_mtu(struct net_device * dev,int mtu)340 static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
341 {
342 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
343 	int err;
344 
345 	err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, mtu);
346 	if (err)
347 		return err;
348 	dev->mtu = mtu;
349 	return 0;
350 }
351 
352 static void
mlxsw_sx_port_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)353 mlxsw_sx_port_get_stats64(struct net_device *dev,
354 			  struct rtnl_link_stats64 *stats)
355 {
356 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
357 	struct mlxsw_sx_port_pcpu_stats *p;
358 	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
359 	u32 tx_dropped = 0;
360 	unsigned int start;
361 	int i;
362 
363 	for_each_possible_cpu(i) {
364 		p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
365 		do {
366 			start = u64_stats_fetch_begin_irq(&p->syncp);
367 			rx_packets	= p->rx_packets;
368 			rx_bytes	= p->rx_bytes;
369 			tx_packets	= p->tx_packets;
370 			tx_bytes	= p->tx_bytes;
371 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
372 
373 		stats->rx_packets	+= rx_packets;
374 		stats->rx_bytes		+= rx_bytes;
375 		stats->tx_packets	+= tx_packets;
376 		stats->tx_bytes		+= tx_bytes;
377 		/* tx_dropped is u32, updated without syncp protection. */
378 		tx_dropped	+= p->tx_dropped;
379 	}
380 	stats->tx_dropped	= tx_dropped;
381 }
382 
mlxsw_sx_port_get_phys_port_name(struct net_device * dev,char * name,size_t len)383 static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
384 					    size_t len)
385 {
386 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
387 
388 	return mlxsw_core_port_get_phys_port_name(mlxsw_sx_port->mlxsw_sx->core,
389 						  mlxsw_sx_port->local_port,
390 						  name, len);
391 }
392 
393 static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
394 	.ndo_open		= mlxsw_sx_port_open,
395 	.ndo_stop		= mlxsw_sx_port_stop,
396 	.ndo_start_xmit		= mlxsw_sx_port_xmit,
397 	.ndo_change_mtu		= mlxsw_sx_port_change_mtu,
398 	.ndo_get_stats64	= mlxsw_sx_port_get_stats64,
399 	.ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name,
400 };
401 
mlxsw_sx_port_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)402 static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
403 				      struct ethtool_drvinfo *drvinfo)
404 {
405 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
406 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
407 
408 	strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
409 	strlcpy(drvinfo->version, mlxsw_sx_driver_version,
410 		sizeof(drvinfo->version));
411 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
412 		 "%d.%d.%d",
413 		 mlxsw_sx->bus_info->fw_rev.major,
414 		 mlxsw_sx->bus_info->fw_rev.minor,
415 		 mlxsw_sx->bus_info->fw_rev.subminor);
416 	strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
417 		sizeof(drvinfo->bus_info));
418 }
419 
420 struct mlxsw_sx_port_hw_stats {
421 	char str[ETH_GSTRING_LEN];
422 	u64 (*getter)(const char *payload);
423 };
424 
425 static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
426 	{
427 		.str = "a_frames_transmitted_ok",
428 		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
429 	},
430 	{
431 		.str = "a_frames_received_ok",
432 		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
433 	},
434 	{
435 		.str = "a_frame_check_sequence_errors",
436 		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
437 	},
438 	{
439 		.str = "a_alignment_errors",
440 		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
441 	},
442 	{
443 		.str = "a_octets_transmitted_ok",
444 		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
445 	},
446 	{
447 		.str = "a_octets_received_ok",
448 		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
449 	},
450 	{
451 		.str = "a_multicast_frames_xmitted_ok",
452 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
453 	},
454 	{
455 		.str = "a_broadcast_frames_xmitted_ok",
456 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
457 	},
458 	{
459 		.str = "a_multicast_frames_received_ok",
460 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
461 	},
462 	{
463 		.str = "a_broadcast_frames_received_ok",
464 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
465 	},
466 	{
467 		.str = "a_in_range_length_errors",
468 		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
469 	},
470 	{
471 		.str = "a_out_of_range_length_field",
472 		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
473 	},
474 	{
475 		.str = "a_frame_too_long_errors",
476 		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
477 	},
478 	{
479 		.str = "a_symbol_error_during_carrier",
480 		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
481 	},
482 	{
483 		.str = "a_mac_control_frames_transmitted",
484 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
485 	},
486 	{
487 		.str = "a_mac_control_frames_received",
488 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
489 	},
490 	{
491 		.str = "a_unsupported_opcodes_received",
492 		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
493 	},
494 	{
495 		.str = "a_pause_mac_ctrl_frames_received",
496 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
497 	},
498 	{
499 		.str = "a_pause_mac_ctrl_frames_xmitted",
500 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
501 	},
502 };
503 
504 #define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
505 
mlxsw_sx_port_get_strings(struct net_device * dev,u32 stringset,u8 * data)506 static void mlxsw_sx_port_get_strings(struct net_device *dev,
507 				      u32 stringset, u8 *data)
508 {
509 	u8 *p = data;
510 	int i;
511 
512 	switch (stringset) {
513 	case ETH_SS_STATS:
514 		for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
515 			memcpy(p, mlxsw_sx_port_hw_stats[i].str,
516 			       ETH_GSTRING_LEN);
517 			p += ETH_GSTRING_LEN;
518 		}
519 		break;
520 	}
521 }
522 
mlxsw_sx_port_get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)523 static void mlxsw_sx_port_get_stats(struct net_device *dev,
524 				    struct ethtool_stats *stats, u64 *data)
525 {
526 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
527 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
528 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
529 	int i;
530 	int err;
531 
532 	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
533 			     MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
534 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
535 	for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
536 		data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
537 }
538 
mlxsw_sx_port_get_sset_count(struct net_device * dev,int sset)539 static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
540 {
541 	switch (sset) {
542 	case ETH_SS_STATS:
543 		return MLXSW_SX_PORT_HW_STATS_LEN;
544 	default:
545 		return -EOPNOTSUPP;
546 	}
547 }
548 
549 struct mlxsw_sx_port_link_mode {
550 	u32 mask;
551 	u32 supported;
552 	u32 advertised;
553 	u32 speed;
554 };
555 
556 static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
557 	{
558 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
559 		.supported	= SUPPORTED_100baseT_Full,
560 		.advertised	= ADVERTISED_100baseT_Full,
561 		.speed		= 100,
562 	},
563 	{
564 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
565 		.speed		= 100,
566 	},
567 	{
568 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
569 				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
570 		.supported	= SUPPORTED_1000baseKX_Full,
571 		.advertised	= ADVERTISED_1000baseKX_Full,
572 		.speed		= 1000,
573 	},
574 	{
575 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
576 		.supported	= SUPPORTED_10000baseT_Full,
577 		.advertised	= ADVERTISED_10000baseT_Full,
578 		.speed		= 10000,
579 	},
580 	{
581 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
582 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
583 		.supported	= SUPPORTED_10000baseKX4_Full,
584 		.advertised	= ADVERTISED_10000baseKX4_Full,
585 		.speed		= 10000,
586 	},
587 	{
588 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
589 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
590 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
591 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
592 		.supported	= SUPPORTED_10000baseKR_Full,
593 		.advertised	= ADVERTISED_10000baseKR_Full,
594 		.speed		= 10000,
595 	},
596 	{
597 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
598 		.supported	= SUPPORTED_20000baseKR2_Full,
599 		.advertised	= ADVERTISED_20000baseKR2_Full,
600 		.speed		= 20000,
601 	},
602 	{
603 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
604 		.supported	= SUPPORTED_40000baseCR4_Full,
605 		.advertised	= ADVERTISED_40000baseCR4_Full,
606 		.speed		= 40000,
607 	},
608 	{
609 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
610 		.supported	= SUPPORTED_40000baseKR4_Full,
611 		.advertised	= ADVERTISED_40000baseKR4_Full,
612 		.speed		= 40000,
613 	},
614 	{
615 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
616 		.supported	= SUPPORTED_40000baseSR4_Full,
617 		.advertised	= ADVERTISED_40000baseSR4_Full,
618 		.speed		= 40000,
619 	},
620 	{
621 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
622 		.supported	= SUPPORTED_40000baseLR4_Full,
623 		.advertised	= ADVERTISED_40000baseLR4_Full,
624 		.speed		= 40000,
625 	},
626 	{
627 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
628 				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
629 				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
630 		.speed		= 25000,
631 	},
632 	{
633 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
634 				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
635 				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
636 		.speed		= 50000,
637 	},
638 	{
639 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
640 		.supported	= SUPPORTED_56000baseKR4_Full,
641 		.advertised	= ADVERTISED_56000baseKR4_Full,
642 		.speed		= 56000,
643 	},
644 	{
645 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
646 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
647 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
648 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
649 		.speed		= 100000,
650 	},
651 };
652 
653 #define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
654 #define MLXSW_SX_PORT_BASE_SPEED 10000 /* Mb/s */
655 
mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)656 static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
657 {
658 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
659 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
660 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
661 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
662 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
663 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
664 		return SUPPORTED_FIBRE;
665 
666 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
667 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
668 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
669 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
670 			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
671 		return SUPPORTED_Backplane;
672 	return 0;
673 }
674 
mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)675 static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
676 {
677 	u32 modes = 0;
678 	int i;
679 
680 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
681 		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
682 			modes |= mlxsw_sx_port_link_mode[i].supported;
683 	}
684 	return modes;
685 }
686 
mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)687 static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
688 {
689 	u32 modes = 0;
690 	int i;
691 
692 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
693 		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
694 			modes |= mlxsw_sx_port_link_mode[i].advertised;
695 	}
696 	return modes;
697 }
698 
mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok,u32 ptys_eth_proto,struct ethtool_link_ksettings * cmd)699 static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
700 					    struct ethtool_link_ksettings *cmd)
701 {
702 	u32 speed = SPEED_UNKNOWN;
703 	u8 duplex = DUPLEX_UNKNOWN;
704 	int i;
705 
706 	if (!carrier_ok)
707 		goto out;
708 
709 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
710 		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
711 			speed = mlxsw_sx_port_link_mode[i].speed;
712 			duplex = DUPLEX_FULL;
713 			break;
714 		}
715 	}
716 out:
717 	cmd->base.speed = speed;
718 	cmd->base.duplex = duplex;
719 }
720 
mlxsw_sx_port_connector_port(u32 ptys_eth_proto)721 static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
722 {
723 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
724 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
725 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
726 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
727 		return PORT_FIBRE;
728 
729 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
730 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
731 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
732 		return PORT_DA;
733 
734 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
735 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
736 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
737 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
738 		return PORT_NONE;
739 
740 	return PORT_OTHER;
741 }
742 
743 static int
mlxsw_sx_port_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)744 mlxsw_sx_port_get_link_ksettings(struct net_device *dev,
745 				 struct ethtool_link_ksettings *cmd)
746 {
747 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
748 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
749 	char ptys_pl[MLXSW_REG_PTYS_LEN];
750 	u32 eth_proto_cap;
751 	u32 eth_proto_admin;
752 	u32 eth_proto_oper;
753 	u32 supported, advertising, lp_advertising;
754 	int err;
755 
756 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
757 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
758 	if (err) {
759 		netdev_err(dev, "Failed to get proto");
760 		return err;
761 	}
762 	mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap,
763 				  &eth_proto_admin, &eth_proto_oper);
764 
765 	supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
766 			 mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
767 			 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
768 	advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
769 	mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
770 					eth_proto_oper, cmd);
771 
772 	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
773 	cmd->base.port = mlxsw_sx_port_connector_port(eth_proto_oper);
774 	lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
775 
776 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
777 						supported);
778 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
779 						advertising);
780 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
781 						lp_advertising);
782 
783 	return 0;
784 }
785 
mlxsw_sx_to_ptys_advert_link(u32 advertising)786 static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
787 {
788 	u32 ptys_proto = 0;
789 	int i;
790 
791 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
792 		if (advertising & mlxsw_sx_port_link_mode[i].advertised)
793 			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
794 	}
795 	return ptys_proto;
796 }
797 
mlxsw_sx_to_ptys_speed(u32 speed)798 static u32 mlxsw_sx_to_ptys_speed(u32 speed)
799 {
800 	u32 ptys_proto = 0;
801 	int i;
802 
803 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
804 		if (speed == mlxsw_sx_port_link_mode[i].speed)
805 			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
806 	}
807 	return ptys_proto;
808 }
809 
mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)810 static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)
811 {
812 	u32 ptys_proto = 0;
813 	int i;
814 
815 	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
816 		if (mlxsw_sx_port_link_mode[i].speed <= upper_speed)
817 			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
818 	}
819 	return ptys_proto;
820 }
821 
822 static int
mlxsw_sx_port_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)823 mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
824 				 const struct ethtool_link_ksettings *cmd)
825 {
826 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
827 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
828 	char ptys_pl[MLXSW_REG_PTYS_LEN];
829 	u32 speed;
830 	u32 eth_proto_new;
831 	u32 eth_proto_cap;
832 	u32 eth_proto_admin;
833 	u32 advertising;
834 	bool is_up;
835 	int err;
836 
837 	speed = cmd->base.speed;
838 
839 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
840 						cmd->link_modes.advertising);
841 
842 	eth_proto_new = cmd->base.autoneg == AUTONEG_ENABLE ?
843 		mlxsw_sx_to_ptys_advert_link(advertising) :
844 		mlxsw_sx_to_ptys_speed(speed);
845 
846 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
847 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
848 	if (err) {
849 		netdev_err(dev, "Failed to get proto");
850 		return err;
851 	}
852 	mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
853 				  NULL);
854 
855 	eth_proto_new = eth_proto_new & eth_proto_cap;
856 	if (!eth_proto_new) {
857 		netdev_err(dev, "Not supported proto admin requested");
858 		return -EINVAL;
859 	}
860 	if (eth_proto_new == eth_proto_admin)
861 		return 0;
862 
863 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
864 				eth_proto_new, true);
865 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
866 	if (err) {
867 		netdev_err(dev, "Failed to set proto admin");
868 		return err;
869 	}
870 
871 	err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
872 	if (err) {
873 		netdev_err(dev, "Failed to get oper status");
874 		return err;
875 	}
876 	if (!is_up)
877 		return 0;
878 
879 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
880 	if (err) {
881 		netdev_err(dev, "Failed to set admin status");
882 		return err;
883 	}
884 
885 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
886 	if (err) {
887 		netdev_err(dev, "Failed to set admin status");
888 		return err;
889 	}
890 
891 	return 0;
892 }
893 
894 static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
895 	.get_drvinfo		= mlxsw_sx_port_get_drvinfo,
896 	.get_link		= ethtool_op_get_link,
897 	.get_strings		= mlxsw_sx_port_get_strings,
898 	.get_ethtool_stats	= mlxsw_sx_port_get_stats,
899 	.get_sset_count		= mlxsw_sx_port_get_sset_count,
900 	.get_link_ksettings	= mlxsw_sx_port_get_link_ksettings,
901 	.set_link_ksettings	= mlxsw_sx_port_set_link_ksettings,
902 };
903 
mlxsw_sx_port_attr_get(struct net_device * dev,struct switchdev_attr * attr)904 static int mlxsw_sx_port_attr_get(struct net_device *dev,
905 				  struct switchdev_attr *attr)
906 {
907 	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
908 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
909 
910 	switch (attr->id) {
911 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
912 		attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
913 		memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
914 		break;
915 	default:
916 		return -EOPNOTSUPP;
917 	}
918 
919 	return 0;
920 }
921 
922 static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
923 	.switchdev_port_attr_get	= mlxsw_sx_port_attr_get,
924 };
925 
mlxsw_sx_hw_id_get(struct mlxsw_sx * mlxsw_sx)926 static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
927 {
928 	char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
929 	int err;
930 
931 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
932 	if (err)
933 		return err;
934 	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
935 	return 0;
936 }
937 
mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port * mlxsw_sx_port)938 static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
939 {
940 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
941 	struct net_device *dev = mlxsw_sx_port->dev;
942 	char ppad_pl[MLXSW_REG_PPAD_LEN];
943 	int err;
944 
945 	mlxsw_reg_ppad_pack(ppad_pl, false, 0);
946 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
947 	if (err)
948 		return err;
949 	mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
950 	/* The last byte value in base mac address is guaranteed
951 	 * to be such it does not overflow when adding local_port
952 	 * value.
953 	 */
954 	dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
955 	return 0;
956 }
957 
mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 vid,enum mlxsw_reg_spms_state state)958 static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
959 				       u16 vid, enum mlxsw_reg_spms_state state)
960 {
961 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
962 	char *spms_pl;
963 	int err;
964 
965 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
966 	if (!spms_pl)
967 		return -ENOMEM;
968 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
969 	mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
970 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
971 	kfree(spms_pl);
972 	return err;
973 }
974 
mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 speed,u16 width)975 static int mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
976 				      u16 speed, u16 width)
977 {
978 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
979 	char ptys_pl[MLXSW_REG_PTYS_LEN];
980 
981 	mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sx_port->local_port, speed,
982 			       width);
983 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
984 }
985 
986 static int
mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port * mlxsw_sx_port,u8 width)987 mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width)
988 {
989 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
990 	u32 upper_speed = MLXSW_SX_PORT_BASE_SPEED * width;
991 	char ptys_pl[MLXSW_REG_PTYS_LEN];
992 	u32 eth_proto_admin;
993 
994 	eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed);
995 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
996 				eth_proto_admin, true);
997 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
998 }
999 
1000 static int
mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port * mlxsw_sx_port,enum mlxsw_reg_spmlr_learn_mode mode)1001 mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
1002 				    enum mlxsw_reg_spmlr_learn_mode mode)
1003 {
1004 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
1005 	char spmlr_pl[MLXSW_REG_SPMLR_LEN];
1006 
1007 	mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
1008 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
1009 }
1010 
__mlxsw_sx_port_eth_create(struct mlxsw_sx * mlxsw_sx,u8 local_port,u8 module,u8 width)1011 static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1012 				      u8 module, u8 width)
1013 {
1014 	struct mlxsw_sx_port *mlxsw_sx_port;
1015 	struct net_device *dev;
1016 	int err;
1017 
1018 	dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
1019 	if (!dev)
1020 		return -ENOMEM;
1021 	SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev);
1022 	mlxsw_sx_port = netdev_priv(dev);
1023 	mlxsw_sx_port->dev = dev;
1024 	mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
1025 	mlxsw_sx_port->local_port = local_port;
1026 	mlxsw_sx_port->mapping.module = module;
1027 
1028 	mlxsw_sx_port->pcpu_stats =
1029 		netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
1030 	if (!mlxsw_sx_port->pcpu_stats) {
1031 		err = -ENOMEM;
1032 		goto err_alloc_stats;
1033 	}
1034 
1035 	dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
1036 	dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
1037 	dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
1038 
1039 	err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
1040 	if (err) {
1041 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
1042 			mlxsw_sx_port->local_port);
1043 		goto err_dev_addr_get;
1044 	}
1045 
1046 	netif_carrier_off(dev);
1047 
1048 	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1049 			 NETIF_F_VLAN_CHALLENGED;
1050 
1051 	dev->min_mtu = 0;
1052 	dev->max_mtu = ETH_MAX_MTU;
1053 
1054 	/* Each packet needs to have a Tx header (metadata) on top all other
1055 	 * headers.
1056 	 */
1057 	dev->needed_headroom = MLXSW_TXHDR_LEN;
1058 
1059 	err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1060 	if (err) {
1061 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1062 			mlxsw_sx_port->local_port);
1063 		goto err_port_system_port_mapping_set;
1064 	}
1065 
1066 	err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
1067 	if (err) {
1068 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
1069 			mlxsw_sx_port->local_port);
1070 		goto err_port_swid_set;
1071 	}
1072 
1073 	err = mlxsw_sx_port_speed_by_width_set(mlxsw_sx_port, width);
1074 	if (err) {
1075 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
1076 			mlxsw_sx_port->local_port);
1077 		goto err_port_speed_set;
1078 	}
1079 
1080 	err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, ETH_DATA_LEN);
1081 	if (err) {
1082 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
1083 			mlxsw_sx_port->local_port);
1084 		goto err_port_mtu_set;
1085 	}
1086 
1087 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
1088 	if (err)
1089 		goto err_port_admin_status_set;
1090 
1091 	err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
1092 					  MLXSW_PORT_DEFAULT_VID,
1093 					  MLXSW_REG_SPMS_STATE_FORWARDING);
1094 	if (err) {
1095 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
1096 			mlxsw_sx_port->local_port);
1097 		goto err_port_stp_state_set;
1098 	}
1099 
1100 	err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
1101 						  MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
1102 	if (err) {
1103 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
1104 			mlxsw_sx_port->local_port);
1105 		goto err_port_mac_learning_mode_set;
1106 	}
1107 
1108 	err = register_netdev(dev);
1109 	if (err) {
1110 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
1111 			mlxsw_sx_port->local_port);
1112 		goto err_register_netdev;
1113 	}
1114 
1115 	mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
1116 				mlxsw_sx_port, dev, module + 1, false, 0);
1117 	mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1118 	return 0;
1119 
1120 err_register_netdev:
1121 err_port_mac_learning_mode_set:
1122 err_port_stp_state_set:
1123 err_port_admin_status_set:
1124 err_port_mtu_set:
1125 err_port_speed_set:
1126 	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1127 err_port_swid_set:
1128 err_port_system_port_mapping_set:
1129 err_dev_addr_get:
1130 	free_percpu(mlxsw_sx_port->pcpu_stats);
1131 err_alloc_stats:
1132 	free_netdev(dev);
1133 	return err;
1134 }
1135 
mlxsw_sx_port_eth_create(struct mlxsw_sx * mlxsw_sx,u8 local_port,u8 module,u8 width)1136 static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1137 				    u8 module, u8 width)
1138 {
1139 	int err;
1140 
1141 	err = mlxsw_core_port_init(mlxsw_sx->core, local_port);
1142 	if (err) {
1143 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
1144 			local_port);
1145 		return err;
1146 	}
1147 	err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, width);
1148 	if (err)
1149 		goto err_port_create;
1150 
1151 	return 0;
1152 
1153 err_port_create:
1154 	mlxsw_core_port_fini(mlxsw_sx->core, local_port);
1155 	return err;
1156 }
1157 
__mlxsw_sx_port_eth_remove(struct mlxsw_sx * mlxsw_sx,u8 local_port)1158 static void __mlxsw_sx_port_eth_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1159 {
1160 	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1161 
1162 	mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
1163 	unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
1164 	mlxsw_sx->ports[local_port] = NULL;
1165 	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1166 	free_percpu(mlxsw_sx_port->pcpu_stats);
1167 	free_netdev(mlxsw_sx_port->dev);
1168 }
1169 
mlxsw_sx_port_created(struct mlxsw_sx * mlxsw_sx,u8 local_port)1170 static bool mlxsw_sx_port_created(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1171 {
1172 	return mlxsw_sx->ports[local_port] != NULL;
1173 }
1174 
__mlxsw_sx_port_ib_create(struct mlxsw_sx * mlxsw_sx,u8 local_port,u8 module,u8 width)1175 static int __mlxsw_sx_port_ib_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1176 				     u8 module, u8 width)
1177 {
1178 	struct mlxsw_sx_port *mlxsw_sx_port;
1179 	int err;
1180 
1181 	mlxsw_sx_port = kzalloc(sizeof(*mlxsw_sx_port), GFP_KERNEL);
1182 	if (!mlxsw_sx_port)
1183 		return -ENOMEM;
1184 	mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
1185 	mlxsw_sx_port->local_port = local_port;
1186 	mlxsw_sx_port->mapping.module = module;
1187 
1188 	err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1189 	if (err) {
1190 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1191 			mlxsw_sx_port->local_port);
1192 		goto err_port_system_port_mapping_set;
1193 	}
1194 
1195 	/* Adding port to Infiniband swid (1) */
1196 	err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 1);
1197 	if (err) {
1198 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
1199 			mlxsw_sx_port->local_port);
1200 		goto err_port_swid_set;
1201 	}
1202 
1203 	/* Expose the IB port number as it's front panel name */
1204 	err = mlxsw_sx_port_ib_port_set(mlxsw_sx_port, module + 1);
1205 	if (err) {
1206 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set IB port\n",
1207 			mlxsw_sx_port->local_port);
1208 		goto err_port_ib_set;
1209 	}
1210 
1211 	/* Supports all speeds from SDR to FDR (bitmask) and support bus width
1212 	 * of 1x, 2x and 4x (3 bits bitmask)
1213 	 */
1214 	err = mlxsw_sx_port_ib_speed_set(mlxsw_sx_port,
1215 					 MLXSW_REG_PTYS_IB_SPEED_EDR - 1,
1216 					 BIT(3) - 1);
1217 	if (err) {
1218 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
1219 			mlxsw_sx_port->local_port);
1220 		goto err_port_speed_set;
1221 	}
1222 
1223 	/* Change to the maximum MTU the device supports, the SMA will take
1224 	 * care of the active MTU
1225 	 */
1226 	err = mlxsw_sx_port_mtu_ib_set(mlxsw_sx_port, MLXSW_IB_DEFAULT_MTU);
1227 	if (err) {
1228 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
1229 			mlxsw_sx_port->local_port);
1230 		goto err_port_mtu_set;
1231 	}
1232 
1233 	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
1234 	if (err) {
1235 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to change admin state to UP\n",
1236 			mlxsw_sx_port->local_port);
1237 		goto err_port_admin_set;
1238 	}
1239 
1240 	mlxsw_core_port_ib_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
1241 			       mlxsw_sx_port);
1242 	mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1243 	return 0;
1244 
1245 err_port_admin_set:
1246 err_port_mtu_set:
1247 err_port_speed_set:
1248 err_port_ib_set:
1249 	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1250 err_port_swid_set:
1251 err_port_system_port_mapping_set:
1252 	kfree(mlxsw_sx_port);
1253 	return err;
1254 }
1255 
__mlxsw_sx_port_ib_remove(struct mlxsw_sx * mlxsw_sx,u8 local_port)1256 static void __mlxsw_sx_port_ib_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1257 {
1258 	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1259 
1260 	mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
1261 	mlxsw_sx->ports[local_port] = NULL;
1262 	mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
1263 	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1264 	kfree(mlxsw_sx_port);
1265 }
1266 
__mlxsw_sx_port_remove(struct mlxsw_sx * mlxsw_sx,u8 local_port)1267 static void __mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1268 {
1269 	enum devlink_port_type port_type =
1270 		mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
1271 
1272 	if (port_type == DEVLINK_PORT_TYPE_ETH)
1273 		__mlxsw_sx_port_eth_remove(mlxsw_sx, local_port);
1274 	else if (port_type == DEVLINK_PORT_TYPE_IB)
1275 		__mlxsw_sx_port_ib_remove(mlxsw_sx, local_port);
1276 }
1277 
mlxsw_sx_port_remove(struct mlxsw_sx * mlxsw_sx,u8 local_port)1278 static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1279 {
1280 	__mlxsw_sx_port_remove(mlxsw_sx, local_port);
1281 	mlxsw_core_port_fini(mlxsw_sx->core, local_port);
1282 }
1283 
mlxsw_sx_ports_remove(struct mlxsw_sx * mlxsw_sx)1284 static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
1285 {
1286 	int i;
1287 
1288 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++)
1289 		if (mlxsw_sx_port_created(mlxsw_sx, i))
1290 			mlxsw_sx_port_remove(mlxsw_sx, i);
1291 	kfree(mlxsw_sx->ports);
1292 	mlxsw_sx->ports = NULL;
1293 }
1294 
mlxsw_sx_ports_create(struct mlxsw_sx * mlxsw_sx)1295 static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
1296 {
1297 	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core);
1298 	size_t alloc_size;
1299 	u8 module, width;
1300 	int i;
1301 	int err;
1302 
1303 	alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports;
1304 	mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
1305 	if (!mlxsw_sx->ports)
1306 		return -ENOMEM;
1307 
1308 	for (i = 1; i < max_ports; i++) {
1309 		err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module,
1310 						    &width);
1311 		if (err)
1312 			goto err_port_module_info_get;
1313 		if (!width)
1314 			continue;
1315 		err = mlxsw_sx_port_eth_create(mlxsw_sx, i, module, width);
1316 		if (err)
1317 			goto err_port_create;
1318 	}
1319 	return 0;
1320 
1321 err_port_create:
1322 err_port_module_info_get:
1323 	for (i--; i >= 1; i--)
1324 		if (mlxsw_sx_port_created(mlxsw_sx, i))
1325 			mlxsw_sx_port_remove(mlxsw_sx, i);
1326 	kfree(mlxsw_sx->ports);
1327 	mlxsw_sx->ports = NULL;
1328 	return err;
1329 }
1330 
mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port * mlxsw_sx_port,enum mlxsw_reg_pude_oper_status status)1331 static void mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
1332 					 enum mlxsw_reg_pude_oper_status status)
1333 {
1334 	if (status == MLXSW_PORT_OPER_STATUS_UP) {
1335 		netdev_info(mlxsw_sx_port->dev, "link up\n");
1336 		netif_carrier_on(mlxsw_sx_port->dev);
1337 	} else {
1338 		netdev_info(mlxsw_sx_port->dev, "link down\n");
1339 		netif_carrier_off(mlxsw_sx_port->dev);
1340 	}
1341 }
1342 
mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port * mlxsw_sx_port,enum mlxsw_reg_pude_oper_status status)1343 static void mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
1344 					enum mlxsw_reg_pude_oper_status status)
1345 {
1346 	if (status == MLXSW_PORT_OPER_STATUS_UP)
1347 		pr_info("ib link for port %d - up\n",
1348 			mlxsw_sx_port->mapping.module + 1);
1349 	else
1350 		pr_info("ib link for port %d - down\n",
1351 			mlxsw_sx_port->mapping.module + 1);
1352 }
1353 
mlxsw_sx_pude_event_func(const struct mlxsw_reg_info * reg,char * pude_pl,void * priv)1354 static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
1355 				     char *pude_pl, void *priv)
1356 {
1357 	struct mlxsw_sx *mlxsw_sx = priv;
1358 	struct mlxsw_sx_port *mlxsw_sx_port;
1359 	enum mlxsw_reg_pude_oper_status status;
1360 	enum devlink_port_type port_type;
1361 	u8 local_port;
1362 
1363 	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1364 	mlxsw_sx_port = mlxsw_sx->ports[local_port];
1365 	if (!mlxsw_sx_port) {
1366 		dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1367 			 local_port);
1368 		return;
1369 	}
1370 
1371 	status = mlxsw_reg_pude_oper_status_get(pude_pl);
1372 	port_type = mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
1373 	if (port_type == DEVLINK_PORT_TYPE_ETH)
1374 		mlxsw_sx_pude_eth_event_func(mlxsw_sx_port, status);
1375 	else if (port_type == DEVLINK_PORT_TYPE_IB)
1376 		mlxsw_sx_pude_ib_event_func(mlxsw_sx_port, status);
1377 }
1378 
mlxsw_sx_rx_listener_func(struct sk_buff * skb,u8 local_port,void * priv)1379 static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
1380 				      void *priv)
1381 {
1382 	struct mlxsw_sx *mlxsw_sx = priv;
1383 	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1384 	struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
1385 
1386 	if (unlikely(!mlxsw_sx_port)) {
1387 		dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
1388 				     local_port);
1389 		return;
1390 	}
1391 
1392 	skb->dev = mlxsw_sx_port->dev;
1393 
1394 	pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
1395 	u64_stats_update_begin(&pcpu_stats->syncp);
1396 	pcpu_stats->rx_packets++;
1397 	pcpu_stats->rx_bytes += skb->len;
1398 	u64_stats_update_end(&pcpu_stats->syncp);
1399 
1400 	skb->protocol = eth_type_trans(skb, skb->dev);
1401 	netif_receive_skb(skb);
1402 }
1403 
mlxsw_sx_port_type_set(struct mlxsw_core * mlxsw_core,u8 local_port,enum devlink_port_type new_type)1404 static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1405 				  enum devlink_port_type new_type)
1406 {
1407 	struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1408 	u8 module, width;
1409 	int err;
1410 
1411 	if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) {
1412 		dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n",
1413 			local_port);
1414 		return -EINVAL;
1415 	}
1416 
1417 	if (new_type == DEVLINK_PORT_TYPE_AUTO)
1418 		return -EOPNOTSUPP;
1419 
1420 	__mlxsw_sx_port_remove(mlxsw_sx, local_port);
1421 	err = mlxsw_sx_port_module_info_get(mlxsw_sx, local_port, &module,
1422 					    &width);
1423 	if (err)
1424 		goto err_port_module_info_get;
1425 
1426 	if (new_type == DEVLINK_PORT_TYPE_ETH)
1427 		err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module,
1428 						 width);
1429 	else if (new_type == DEVLINK_PORT_TYPE_IB)
1430 		err = __mlxsw_sx_port_ib_create(mlxsw_sx, local_port, module,
1431 						width);
1432 
1433 err_port_module_info_get:
1434 	return err;
1435 }
1436 
1437 #define MLXSW_SX_RXL(_trap_id) \
1438 	MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU,	\
1439 		  false, SX2_RX, FORWARD)
1440 
1441 static const struct mlxsw_listener mlxsw_sx_listener[] = {
1442 	MLXSW_EVENTL(mlxsw_sx_pude_event_func, PUDE, EMAD),
1443 	MLXSW_SX_RXL(FDB_MC),
1444 	MLXSW_SX_RXL(STP),
1445 	MLXSW_SX_RXL(LACP),
1446 	MLXSW_SX_RXL(EAPOL),
1447 	MLXSW_SX_RXL(LLDP),
1448 	MLXSW_SX_RXL(MMRP),
1449 	MLXSW_SX_RXL(MVRP),
1450 	MLXSW_SX_RXL(RPVST),
1451 	MLXSW_SX_RXL(DHCP),
1452 	MLXSW_SX_RXL(IGMP_QUERY),
1453 	MLXSW_SX_RXL(IGMP_V1_REPORT),
1454 	MLXSW_SX_RXL(IGMP_V2_REPORT),
1455 	MLXSW_SX_RXL(IGMP_V2_LEAVE),
1456 	MLXSW_SX_RXL(IGMP_V3_REPORT),
1457 };
1458 
mlxsw_sx_traps_init(struct mlxsw_sx * mlxsw_sx)1459 static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
1460 {
1461 	char htgt_pl[MLXSW_REG_HTGT_LEN];
1462 	int i;
1463 	int err;
1464 
1465 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX,
1466 			    MLXSW_REG_HTGT_INVALID_POLICER,
1467 			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1468 			    MLXSW_REG_HTGT_DEFAULT_TC);
1469 	mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1470 					  MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX);
1471 
1472 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1473 	if (err)
1474 		return err;
1475 
1476 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL,
1477 			    MLXSW_REG_HTGT_INVALID_POLICER,
1478 			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1479 			    MLXSW_REG_HTGT_DEFAULT_TC);
1480 	mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1481 					MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL);
1482 
1483 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1484 	if (err)
1485 		return err;
1486 
1487 	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
1488 		err = mlxsw_core_trap_register(mlxsw_sx->core,
1489 					       &mlxsw_sx_listener[i],
1490 					       mlxsw_sx);
1491 		if (err)
1492 			goto err_listener_register;
1493 
1494 	}
1495 	return 0;
1496 
1497 err_listener_register:
1498 	for (i--; i >= 0; i--) {
1499 		mlxsw_core_trap_unregister(mlxsw_sx->core,
1500 					   &mlxsw_sx_listener[i],
1501 					   mlxsw_sx);
1502 	}
1503 	return err;
1504 }
1505 
mlxsw_sx_traps_fini(struct mlxsw_sx * mlxsw_sx)1506 static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
1507 {
1508 	int i;
1509 
1510 	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
1511 		mlxsw_core_trap_unregister(mlxsw_sx->core,
1512 					   &mlxsw_sx_listener[i],
1513 					   mlxsw_sx);
1514 	}
1515 }
1516 
mlxsw_sx_flood_init(struct mlxsw_sx * mlxsw_sx)1517 static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
1518 {
1519 	char sfgc_pl[MLXSW_REG_SFGC_LEN];
1520 	char sgcr_pl[MLXSW_REG_SGCR_LEN];
1521 	char *sftr_pl;
1522 	int err;
1523 
1524 	/* Configure a flooding table, which includes only CPU port. */
1525 	sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
1526 	if (!sftr_pl)
1527 		return -ENOMEM;
1528 	mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
1529 			    MLXSW_PORT_CPU_PORT, true);
1530 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
1531 	kfree(sftr_pl);
1532 	if (err)
1533 		return err;
1534 
1535 	/* Flood different packet types using the flooding table. */
1536 	mlxsw_reg_sfgc_pack(sfgc_pl,
1537 			    MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
1538 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1539 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1540 			    0);
1541 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1542 	if (err)
1543 		return err;
1544 
1545 	mlxsw_reg_sfgc_pack(sfgc_pl,
1546 			    MLXSW_REG_SFGC_TYPE_BROADCAST,
1547 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1548 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1549 			    0);
1550 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1551 	if (err)
1552 		return err;
1553 
1554 	mlxsw_reg_sfgc_pack(sfgc_pl,
1555 			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
1556 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1557 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1558 			    0);
1559 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1560 	if (err)
1561 		return err;
1562 
1563 	mlxsw_reg_sfgc_pack(sfgc_pl,
1564 			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
1565 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1566 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1567 			    0);
1568 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1569 	if (err)
1570 		return err;
1571 
1572 	mlxsw_reg_sfgc_pack(sfgc_pl,
1573 			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
1574 			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1575 			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1576 			    0);
1577 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1578 	if (err)
1579 		return err;
1580 
1581 	mlxsw_reg_sgcr_pack(sgcr_pl, true);
1582 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
1583 }
1584 
mlxsw_sx_basic_trap_groups_set(struct mlxsw_core * mlxsw_core)1585 static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
1586 {
1587 	char htgt_pl[MLXSW_REG_HTGT_LEN];
1588 
1589 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
1590 			    MLXSW_REG_HTGT_INVALID_POLICER,
1591 			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1592 			    MLXSW_REG_HTGT_DEFAULT_TC);
1593 	mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS);
1594 	mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1595 					MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD);
1596 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
1597 }
1598 
mlxsw_sx_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info)1599 static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
1600 			 const struct mlxsw_bus_info *mlxsw_bus_info)
1601 {
1602 	struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1603 	int err;
1604 
1605 	mlxsw_sx->core = mlxsw_core;
1606 	mlxsw_sx->bus_info = mlxsw_bus_info;
1607 
1608 	err = mlxsw_sx_hw_id_get(mlxsw_sx);
1609 	if (err) {
1610 		dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
1611 		return err;
1612 	}
1613 
1614 	err = mlxsw_sx_ports_create(mlxsw_sx);
1615 	if (err) {
1616 		dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
1617 		return err;
1618 	}
1619 
1620 	err = mlxsw_sx_traps_init(mlxsw_sx);
1621 	if (err) {
1622 		dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps\n");
1623 		goto err_listener_register;
1624 	}
1625 
1626 	err = mlxsw_sx_flood_init(mlxsw_sx);
1627 	if (err) {
1628 		dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
1629 		goto err_flood_init;
1630 	}
1631 
1632 	return 0;
1633 
1634 err_flood_init:
1635 	mlxsw_sx_traps_fini(mlxsw_sx);
1636 err_listener_register:
1637 	mlxsw_sx_ports_remove(mlxsw_sx);
1638 	return err;
1639 }
1640 
mlxsw_sx_fini(struct mlxsw_core * mlxsw_core)1641 static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
1642 {
1643 	struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1644 
1645 	mlxsw_sx_traps_fini(mlxsw_sx);
1646 	mlxsw_sx_ports_remove(mlxsw_sx);
1647 }
1648 
1649 static const struct mlxsw_config_profile mlxsw_sx_config_profile = {
1650 	.used_max_vepa_channels		= 1,
1651 	.max_vepa_channels		= 0,
1652 	.used_max_mid			= 1,
1653 	.max_mid			= 7000,
1654 	.used_max_pgt			= 1,
1655 	.max_pgt			= 0,
1656 	.used_max_system_port		= 1,
1657 	.max_system_port		= 48000,
1658 	.used_max_vlan_groups		= 1,
1659 	.max_vlan_groups		= 127,
1660 	.used_max_regions		= 1,
1661 	.max_regions			= 400,
1662 	.used_flood_tables		= 1,
1663 	.max_flood_tables		= 2,
1664 	.max_vid_flood_tables		= 1,
1665 	.used_flood_mode		= 1,
1666 	.flood_mode			= 3,
1667 	.used_max_ib_mc			= 1,
1668 	.max_ib_mc			= 6,
1669 	.used_max_pkey			= 1,
1670 	.max_pkey			= 0,
1671 	.swid_config			= {
1672 		{
1673 			.used_type	= 1,
1674 			.type		= MLXSW_PORT_SWID_TYPE_ETH,
1675 		},
1676 		{
1677 			.used_type	= 1,
1678 			.type		= MLXSW_PORT_SWID_TYPE_IB,
1679 		}
1680 	},
1681 };
1682 
1683 static struct mlxsw_driver mlxsw_sx_driver = {
1684 	.kind			= mlxsw_sx_driver_name,
1685 	.priv_size		= sizeof(struct mlxsw_sx),
1686 	.init			= mlxsw_sx_init,
1687 	.fini			= mlxsw_sx_fini,
1688 	.basic_trap_groups_set	= mlxsw_sx_basic_trap_groups_set,
1689 	.txhdr_construct	= mlxsw_sx_txhdr_construct,
1690 	.txhdr_len		= MLXSW_TXHDR_LEN,
1691 	.profile		= &mlxsw_sx_config_profile,
1692 	.port_type_set		= mlxsw_sx_port_type_set,
1693 };
1694 
1695 static const struct pci_device_id mlxsw_sx_pci_id_table[] = {
1696 	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
1697 	{0, },
1698 };
1699 
1700 static struct pci_driver mlxsw_sx_pci_driver = {
1701 	.name = mlxsw_sx_driver_name,
1702 	.id_table = mlxsw_sx_pci_id_table,
1703 };
1704 
mlxsw_sx_module_init(void)1705 static int __init mlxsw_sx_module_init(void)
1706 {
1707 	int err;
1708 
1709 	err = mlxsw_core_driver_register(&mlxsw_sx_driver);
1710 	if (err)
1711 		return err;
1712 
1713 	err = mlxsw_pci_driver_register(&mlxsw_sx_pci_driver);
1714 	if (err)
1715 		goto err_pci_driver_register;
1716 
1717 	return 0;
1718 
1719 err_pci_driver_register:
1720 	mlxsw_core_driver_unregister(&mlxsw_sx_driver);
1721 	return err;
1722 }
1723 
mlxsw_sx_module_exit(void)1724 static void __exit mlxsw_sx_module_exit(void)
1725 {
1726 	mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver);
1727 	mlxsw_core_driver_unregister(&mlxsw_sx_driver);
1728 }
1729 
1730 module_init(mlxsw_sx_module_init);
1731 module_exit(mlxsw_sx_module_exit);
1732 
1733 MODULE_LICENSE("Dual BSD/GPL");
1734 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1735 MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
1736 MODULE_DEVICE_TABLE(pci, mlxsw_sx_pci_id_table);
1737