1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015-2016 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <net/switchdev.h>
48
49 #include "pci.h"
50 #include "core.h"
51 #include "reg.h"
52 #include "port.h"
53 #include "trap.h"
54 #include "txheader.h"
55 #include "ib.h"
56
57 static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
58 static const char mlxsw_sx_driver_version[] = "1.0";
59
60 struct mlxsw_sx_port;
61
62 struct mlxsw_sx {
63 struct mlxsw_sx_port **ports;
64 struct mlxsw_core *core;
65 const struct mlxsw_bus_info *bus_info;
66 u8 hw_id[ETH_ALEN];
67 };
68
69 struct mlxsw_sx_port_pcpu_stats {
70 u64 rx_packets;
71 u64 rx_bytes;
72 u64 tx_packets;
73 u64 tx_bytes;
74 struct u64_stats_sync syncp;
75 u32 tx_dropped;
76 };
77
78 struct mlxsw_sx_port {
79 struct net_device *dev;
80 struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
81 struct mlxsw_sx *mlxsw_sx;
82 u8 local_port;
83 struct {
84 u8 module;
85 } mapping;
86 };
87
88 /* tx_hdr_version
89 * Tx header version.
90 * Must be set to 0.
91 */
92 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
93
94 /* tx_hdr_ctl
95 * Packet control type.
96 * 0 - Ethernet control (e.g. EMADs, LACP)
97 * 1 - Ethernet data
98 */
99 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
100
101 /* tx_hdr_proto
102 * Packet protocol type. Must be set to 1 (Ethernet).
103 */
104 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
105
106 /* tx_hdr_etclass
107 * Egress TClass to be used on the egress device on the egress port.
108 * The MSB is specified in the 'ctclass3' field.
109 * Range is 0-15, where 15 is the highest priority.
110 */
111 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
112
113 /* tx_hdr_swid
114 * Switch partition ID.
115 */
116 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
117
118 /* tx_hdr_port_mid
119 * Destination local port for unicast packets.
120 * Destination multicast ID for multicast packets.
121 *
122 * Control packets are directed to a specific egress port, while data
123 * packets are transmitted through the CPU port (0) into the switch partition,
124 * where forwarding rules are applied.
125 */
126 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
127
128 /* tx_hdr_ctclass3
129 * See field 'etclass'.
130 */
131 MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
132
133 /* tx_hdr_rdq
134 * RDQ for control packets sent to remote CPU.
135 * Must be set to 0x1F for EMADs, otherwise 0.
136 */
137 MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
138
139 /* tx_hdr_cpu_sig
140 * Signature control for packets going to CPU. Must be set to 0.
141 */
142 MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
143
144 /* tx_hdr_sig
145 * Stacking protocl signature. Must be set to 0xE0E0.
146 */
147 MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
148
149 /* tx_hdr_stclass
150 * Stacking TClass.
151 */
152 MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
153
154 /* tx_hdr_emad
155 * EMAD bit. Must be set for EMADs.
156 */
157 MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
158
159 /* tx_hdr_type
160 * 0 - Data packets
161 * 6 - Control packets
162 */
163 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
164
mlxsw_sx_txhdr_construct(struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)165 static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
166 const struct mlxsw_tx_info *tx_info)
167 {
168 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
169 bool is_emad = tx_info->is_emad;
170
171 memset(txhdr, 0, MLXSW_TXHDR_LEN);
172
173 /* We currently set default values for the egress tclass (QoS). */
174 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
175 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
176 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
177 mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
178 MLXSW_TXHDR_ETCLASS_5);
179 mlxsw_tx_hdr_swid_set(txhdr, 0);
180 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
181 mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
182 mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
183 MLXSW_TXHDR_RDQ_OTHER);
184 mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
185 mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
186 mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
187 mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
188 MLXSW_TXHDR_NOT_EMAD);
189 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
190 }
191
mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port * mlxsw_sx_port,bool is_up)192 static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
193 bool is_up)
194 {
195 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
196 char paos_pl[MLXSW_REG_PAOS_LEN];
197
198 mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
199 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
200 MLXSW_PORT_ADMIN_STATUS_DOWN);
201 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
202 }
203
mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port * mlxsw_sx_port,bool * p_is_up)204 static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
205 bool *p_is_up)
206 {
207 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
208 char paos_pl[MLXSW_REG_PAOS_LEN];
209 u8 oper_status;
210 int err;
211
212 mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
213 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
214 if (err)
215 return err;
216 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
217 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
218 return 0;
219 }
220
__mlxsw_sx_port_mtu_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 mtu)221 static int __mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port,
222 u16 mtu)
223 {
224 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
225 char pmtu_pl[MLXSW_REG_PMTU_LEN];
226 int max_mtu;
227 int err;
228
229 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
230 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
231 if (err)
232 return err;
233 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
234
235 if (mtu > max_mtu)
236 return -EINVAL;
237
238 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
239 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
240 }
241
mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 mtu)242 static int mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port *mlxsw_sx_port,
243 u16 mtu)
244 {
245 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
246 return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
247 }
248
mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 mtu)249 static int mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port *mlxsw_sx_port,
250 u16 mtu)
251 {
252 return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
253 }
254
mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port * mlxsw_sx_port,u8 ib_port)255 static int mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port *mlxsw_sx_port,
256 u8 ib_port)
257 {
258 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
259 char plib_pl[MLXSW_REG_PLIB_LEN] = {0};
260 int err;
261
262 mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sx_port->local_port);
263 mlxsw_reg_plib_ib_port_set(plib_pl, ib_port);
264 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(plib), plib_pl);
265 return err;
266 }
267
mlxsw_sx_port_swid_set(struct mlxsw_sx_port * mlxsw_sx_port,u8 swid)268 static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
269 {
270 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
271 char pspa_pl[MLXSW_REG_PSPA_LEN];
272
273 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
274 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
275 }
276
277 static int
mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port * mlxsw_sx_port)278 mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
279 {
280 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
281 char sspr_pl[MLXSW_REG_SSPR_LEN];
282
283 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
284 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
285 }
286
mlxsw_sx_port_module_info_get(struct mlxsw_sx * mlxsw_sx,u8 local_port,u8 * p_module,u8 * p_width)287 static int mlxsw_sx_port_module_info_get(struct mlxsw_sx *mlxsw_sx,
288 u8 local_port, u8 *p_module,
289 u8 *p_width)
290 {
291 char pmlp_pl[MLXSW_REG_PMLP_LEN];
292 int err;
293
294 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
295 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
296 if (err)
297 return err;
298 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
299 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
300 return 0;
301 }
302
mlxsw_sx_port_open(struct net_device * dev)303 static int mlxsw_sx_port_open(struct net_device *dev)
304 {
305 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
306 int err;
307
308 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
309 if (err)
310 return err;
311 netif_start_queue(dev);
312 return 0;
313 }
314
mlxsw_sx_port_stop(struct net_device * dev)315 static int mlxsw_sx_port_stop(struct net_device *dev)
316 {
317 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
318
319 netif_stop_queue(dev);
320 return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
321 }
322
mlxsw_sx_port_xmit(struct sk_buff * skb,struct net_device * dev)323 static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
324 struct net_device *dev)
325 {
326 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
327 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
328 struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
329 const struct mlxsw_tx_info tx_info = {
330 .local_port = mlxsw_sx_port->local_port,
331 .is_emad = false,
332 };
333 u64 len;
334 int err;
335
336 if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
337 return NETDEV_TX_BUSY;
338
339 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
340 struct sk_buff *skb_orig = skb;
341
342 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
343 if (!skb) {
344 this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
345 dev_kfree_skb_any(skb_orig);
346 return NETDEV_TX_OK;
347 }
348 dev_consume_skb_any(skb_orig);
349 }
350 mlxsw_sx_txhdr_construct(skb, &tx_info);
351 /* TX header is consumed by HW on the way so we shouldn't count its
352 * bytes as being sent.
353 */
354 len = skb->len - MLXSW_TXHDR_LEN;
355 /* Due to a race we might fail here because of a full queue. In that
356 * unlikely case we simply drop the packet.
357 */
358 err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
359
360 if (!err) {
361 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
362 u64_stats_update_begin(&pcpu_stats->syncp);
363 pcpu_stats->tx_packets++;
364 pcpu_stats->tx_bytes += len;
365 u64_stats_update_end(&pcpu_stats->syncp);
366 } else {
367 this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
368 dev_kfree_skb_any(skb);
369 }
370 return NETDEV_TX_OK;
371 }
372
mlxsw_sx_port_change_mtu(struct net_device * dev,int mtu)373 static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
374 {
375 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
376 int err;
377
378 err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, mtu);
379 if (err)
380 return err;
381 dev->mtu = mtu;
382 return 0;
383 }
384
385 static void
mlxsw_sx_port_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)386 mlxsw_sx_port_get_stats64(struct net_device *dev,
387 struct rtnl_link_stats64 *stats)
388 {
389 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
390 struct mlxsw_sx_port_pcpu_stats *p;
391 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
392 u32 tx_dropped = 0;
393 unsigned int start;
394 int i;
395
396 for_each_possible_cpu(i) {
397 p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
398 do {
399 start = u64_stats_fetch_begin_irq(&p->syncp);
400 rx_packets = p->rx_packets;
401 rx_bytes = p->rx_bytes;
402 tx_packets = p->tx_packets;
403 tx_bytes = p->tx_bytes;
404 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
405
406 stats->rx_packets += rx_packets;
407 stats->rx_bytes += rx_bytes;
408 stats->tx_packets += tx_packets;
409 stats->tx_bytes += tx_bytes;
410 /* tx_dropped is u32, updated without syncp protection. */
411 tx_dropped += p->tx_dropped;
412 }
413 stats->tx_dropped = tx_dropped;
414 }
415
mlxsw_sx_port_get_phys_port_name(struct net_device * dev,char * name,size_t len)416 static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
417 size_t len)
418 {
419 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
420 int err;
421
422 err = snprintf(name, len, "p%d", mlxsw_sx_port->mapping.module + 1);
423 if (err >= len)
424 return -EINVAL;
425
426 return 0;
427 }
428
429 static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
430 .ndo_open = mlxsw_sx_port_open,
431 .ndo_stop = mlxsw_sx_port_stop,
432 .ndo_start_xmit = mlxsw_sx_port_xmit,
433 .ndo_change_mtu = mlxsw_sx_port_change_mtu,
434 .ndo_get_stats64 = mlxsw_sx_port_get_stats64,
435 .ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name,
436 };
437
mlxsw_sx_port_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)438 static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
439 struct ethtool_drvinfo *drvinfo)
440 {
441 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
442 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
443
444 strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
445 strlcpy(drvinfo->version, mlxsw_sx_driver_version,
446 sizeof(drvinfo->version));
447 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
448 "%d.%d.%d",
449 mlxsw_sx->bus_info->fw_rev.major,
450 mlxsw_sx->bus_info->fw_rev.minor,
451 mlxsw_sx->bus_info->fw_rev.subminor);
452 strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
453 sizeof(drvinfo->bus_info));
454 }
455
456 struct mlxsw_sx_port_hw_stats {
457 char str[ETH_GSTRING_LEN];
458 u64 (*getter)(const char *payload);
459 };
460
461 static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
462 {
463 .str = "a_frames_transmitted_ok",
464 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
465 },
466 {
467 .str = "a_frames_received_ok",
468 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
469 },
470 {
471 .str = "a_frame_check_sequence_errors",
472 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
473 },
474 {
475 .str = "a_alignment_errors",
476 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
477 },
478 {
479 .str = "a_octets_transmitted_ok",
480 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
481 },
482 {
483 .str = "a_octets_received_ok",
484 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
485 },
486 {
487 .str = "a_multicast_frames_xmitted_ok",
488 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
489 },
490 {
491 .str = "a_broadcast_frames_xmitted_ok",
492 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
493 },
494 {
495 .str = "a_multicast_frames_received_ok",
496 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
497 },
498 {
499 .str = "a_broadcast_frames_received_ok",
500 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
501 },
502 {
503 .str = "a_in_range_length_errors",
504 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
505 },
506 {
507 .str = "a_out_of_range_length_field",
508 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
509 },
510 {
511 .str = "a_frame_too_long_errors",
512 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
513 },
514 {
515 .str = "a_symbol_error_during_carrier",
516 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
517 },
518 {
519 .str = "a_mac_control_frames_transmitted",
520 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
521 },
522 {
523 .str = "a_mac_control_frames_received",
524 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
525 },
526 {
527 .str = "a_unsupported_opcodes_received",
528 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
529 },
530 {
531 .str = "a_pause_mac_ctrl_frames_received",
532 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
533 },
534 {
535 .str = "a_pause_mac_ctrl_frames_xmitted",
536 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
537 },
538 };
539
540 #define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
541
mlxsw_sx_port_get_strings(struct net_device * dev,u32 stringset,u8 * data)542 static void mlxsw_sx_port_get_strings(struct net_device *dev,
543 u32 stringset, u8 *data)
544 {
545 u8 *p = data;
546 int i;
547
548 switch (stringset) {
549 case ETH_SS_STATS:
550 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
551 memcpy(p, mlxsw_sx_port_hw_stats[i].str,
552 ETH_GSTRING_LEN);
553 p += ETH_GSTRING_LEN;
554 }
555 break;
556 }
557 }
558
mlxsw_sx_port_get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)559 static void mlxsw_sx_port_get_stats(struct net_device *dev,
560 struct ethtool_stats *stats, u64 *data)
561 {
562 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
563 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
564 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
565 int i;
566 int err;
567
568 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
569 MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
570 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
571 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
572 data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
573 }
574
mlxsw_sx_port_get_sset_count(struct net_device * dev,int sset)575 static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
576 {
577 switch (sset) {
578 case ETH_SS_STATS:
579 return MLXSW_SX_PORT_HW_STATS_LEN;
580 default:
581 return -EOPNOTSUPP;
582 }
583 }
584
585 struct mlxsw_sx_port_link_mode {
586 u32 mask;
587 u32 supported;
588 u32 advertised;
589 u32 speed;
590 };
591
592 static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
593 {
594 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
595 .supported = SUPPORTED_100baseT_Full,
596 .advertised = ADVERTISED_100baseT_Full,
597 .speed = 100,
598 },
599 {
600 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
601 .speed = 100,
602 },
603 {
604 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
605 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
606 .supported = SUPPORTED_1000baseKX_Full,
607 .advertised = ADVERTISED_1000baseKX_Full,
608 .speed = 1000,
609 },
610 {
611 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
612 .supported = SUPPORTED_10000baseT_Full,
613 .advertised = ADVERTISED_10000baseT_Full,
614 .speed = 10000,
615 },
616 {
617 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
618 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
619 .supported = SUPPORTED_10000baseKX4_Full,
620 .advertised = ADVERTISED_10000baseKX4_Full,
621 .speed = 10000,
622 },
623 {
624 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
625 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
626 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
627 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
628 .supported = SUPPORTED_10000baseKR_Full,
629 .advertised = ADVERTISED_10000baseKR_Full,
630 .speed = 10000,
631 },
632 {
633 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
634 .supported = SUPPORTED_20000baseKR2_Full,
635 .advertised = ADVERTISED_20000baseKR2_Full,
636 .speed = 20000,
637 },
638 {
639 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
640 .supported = SUPPORTED_40000baseCR4_Full,
641 .advertised = ADVERTISED_40000baseCR4_Full,
642 .speed = 40000,
643 },
644 {
645 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
646 .supported = SUPPORTED_40000baseKR4_Full,
647 .advertised = ADVERTISED_40000baseKR4_Full,
648 .speed = 40000,
649 },
650 {
651 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
652 .supported = SUPPORTED_40000baseSR4_Full,
653 .advertised = ADVERTISED_40000baseSR4_Full,
654 .speed = 40000,
655 },
656 {
657 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
658 .supported = SUPPORTED_40000baseLR4_Full,
659 .advertised = ADVERTISED_40000baseLR4_Full,
660 .speed = 40000,
661 },
662 {
663 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
664 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
665 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
666 .speed = 25000,
667 },
668 {
669 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
670 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
671 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
672 .speed = 50000,
673 },
674 {
675 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
676 .supported = SUPPORTED_56000baseKR4_Full,
677 .advertised = ADVERTISED_56000baseKR4_Full,
678 .speed = 56000,
679 },
680 {
681 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
682 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
683 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
684 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
685 .speed = 100000,
686 },
687 };
688
689 #define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
690 #define MLXSW_SX_PORT_BASE_SPEED 10000 /* Mb/s */
691
mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)692 static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
693 {
694 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
695 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
696 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
697 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
698 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
699 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
700 return SUPPORTED_FIBRE;
701
702 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
703 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
704 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
705 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
706 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
707 return SUPPORTED_Backplane;
708 return 0;
709 }
710
mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)711 static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
712 {
713 u32 modes = 0;
714 int i;
715
716 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
717 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
718 modes |= mlxsw_sx_port_link_mode[i].supported;
719 }
720 return modes;
721 }
722
mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)723 static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
724 {
725 u32 modes = 0;
726 int i;
727
728 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
729 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
730 modes |= mlxsw_sx_port_link_mode[i].advertised;
731 }
732 return modes;
733 }
734
mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok,u32 ptys_eth_proto,struct ethtool_link_ksettings * cmd)735 static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
736 struct ethtool_link_ksettings *cmd)
737 {
738 u32 speed = SPEED_UNKNOWN;
739 u8 duplex = DUPLEX_UNKNOWN;
740 int i;
741
742 if (!carrier_ok)
743 goto out;
744
745 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
746 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
747 speed = mlxsw_sx_port_link_mode[i].speed;
748 duplex = DUPLEX_FULL;
749 break;
750 }
751 }
752 out:
753 cmd->base.speed = speed;
754 cmd->base.duplex = duplex;
755 }
756
mlxsw_sx_port_connector_port(u32 ptys_eth_proto)757 static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
758 {
759 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
760 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
761 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
762 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
763 return PORT_FIBRE;
764
765 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
766 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
767 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
768 return PORT_DA;
769
770 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
771 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
772 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
773 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
774 return PORT_NONE;
775
776 return PORT_OTHER;
777 }
778
779 static int
mlxsw_sx_port_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)780 mlxsw_sx_port_get_link_ksettings(struct net_device *dev,
781 struct ethtool_link_ksettings *cmd)
782 {
783 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
784 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
785 char ptys_pl[MLXSW_REG_PTYS_LEN];
786 u32 eth_proto_cap;
787 u32 eth_proto_admin;
788 u32 eth_proto_oper;
789 u32 supported, advertising, lp_advertising;
790 int err;
791
792 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
793 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
794 if (err) {
795 netdev_err(dev, "Failed to get proto");
796 return err;
797 }
798 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap,
799 ð_proto_admin, ð_proto_oper);
800
801 supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
802 mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
803 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
804 advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
805 mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
806 eth_proto_oper, cmd);
807
808 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
809 cmd->base.port = mlxsw_sx_port_connector_port(eth_proto_oper);
810 lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
811
812 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
813 supported);
814 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
815 advertising);
816 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
817 lp_advertising);
818
819 return 0;
820 }
821
mlxsw_sx_to_ptys_advert_link(u32 advertising)822 static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
823 {
824 u32 ptys_proto = 0;
825 int i;
826
827 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
828 if (advertising & mlxsw_sx_port_link_mode[i].advertised)
829 ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
830 }
831 return ptys_proto;
832 }
833
mlxsw_sx_to_ptys_speed(u32 speed)834 static u32 mlxsw_sx_to_ptys_speed(u32 speed)
835 {
836 u32 ptys_proto = 0;
837 int i;
838
839 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
840 if (speed == mlxsw_sx_port_link_mode[i].speed)
841 ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
842 }
843 return ptys_proto;
844 }
845
mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)846 static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)
847 {
848 u32 ptys_proto = 0;
849 int i;
850
851 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
852 if (mlxsw_sx_port_link_mode[i].speed <= upper_speed)
853 ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
854 }
855 return ptys_proto;
856 }
857
858 static int
mlxsw_sx_port_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)859 mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
860 const struct ethtool_link_ksettings *cmd)
861 {
862 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
863 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
864 char ptys_pl[MLXSW_REG_PTYS_LEN];
865 u32 speed;
866 u32 eth_proto_new;
867 u32 eth_proto_cap;
868 u32 eth_proto_admin;
869 u32 advertising;
870 bool is_up;
871 int err;
872
873 speed = cmd->base.speed;
874
875 ethtool_convert_link_mode_to_legacy_u32(&advertising,
876 cmd->link_modes.advertising);
877
878 eth_proto_new = cmd->base.autoneg == AUTONEG_ENABLE ?
879 mlxsw_sx_to_ptys_advert_link(advertising) :
880 mlxsw_sx_to_ptys_speed(speed);
881
882 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
883 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
884 if (err) {
885 netdev_err(dev, "Failed to get proto");
886 return err;
887 }
888 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin,
889 NULL);
890
891 eth_proto_new = eth_proto_new & eth_proto_cap;
892 if (!eth_proto_new) {
893 netdev_err(dev, "Not supported proto admin requested");
894 return -EINVAL;
895 }
896 if (eth_proto_new == eth_proto_admin)
897 return 0;
898
899 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
900 eth_proto_new);
901 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
902 if (err) {
903 netdev_err(dev, "Failed to set proto admin");
904 return err;
905 }
906
907 err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
908 if (err) {
909 netdev_err(dev, "Failed to get oper status");
910 return err;
911 }
912 if (!is_up)
913 return 0;
914
915 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
916 if (err) {
917 netdev_err(dev, "Failed to set admin status");
918 return err;
919 }
920
921 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
922 if (err) {
923 netdev_err(dev, "Failed to set admin status");
924 return err;
925 }
926
927 return 0;
928 }
929
930 static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
931 .get_drvinfo = mlxsw_sx_port_get_drvinfo,
932 .get_link = ethtool_op_get_link,
933 .get_strings = mlxsw_sx_port_get_strings,
934 .get_ethtool_stats = mlxsw_sx_port_get_stats,
935 .get_sset_count = mlxsw_sx_port_get_sset_count,
936 .get_link_ksettings = mlxsw_sx_port_get_link_ksettings,
937 .set_link_ksettings = mlxsw_sx_port_set_link_ksettings,
938 };
939
mlxsw_sx_port_attr_get(struct net_device * dev,struct switchdev_attr * attr)940 static int mlxsw_sx_port_attr_get(struct net_device *dev,
941 struct switchdev_attr *attr)
942 {
943 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
944 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
945
946 switch (attr->id) {
947 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
948 attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
949 memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
950 break;
951 default:
952 return -EOPNOTSUPP;
953 }
954
955 return 0;
956 }
957
958 static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
959 .switchdev_port_attr_get = mlxsw_sx_port_attr_get,
960 };
961
mlxsw_sx_hw_id_get(struct mlxsw_sx * mlxsw_sx)962 static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
963 {
964 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
965 int err;
966
967 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
968 if (err)
969 return err;
970 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
971 return 0;
972 }
973
mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port * mlxsw_sx_port)974 static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
975 {
976 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
977 struct net_device *dev = mlxsw_sx_port->dev;
978 char ppad_pl[MLXSW_REG_PPAD_LEN];
979 int err;
980
981 mlxsw_reg_ppad_pack(ppad_pl, false, 0);
982 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
983 if (err)
984 return err;
985 mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
986 /* The last byte value in base mac address is guaranteed
987 * to be such it does not overflow when adding local_port
988 * value.
989 */
990 dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
991 return 0;
992 }
993
mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 vid,enum mlxsw_reg_spms_state state)994 static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
995 u16 vid, enum mlxsw_reg_spms_state state)
996 {
997 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
998 char *spms_pl;
999 int err;
1000
1001 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
1002 if (!spms_pl)
1003 return -ENOMEM;
1004 mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
1005 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
1006 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
1007 kfree(spms_pl);
1008 return err;
1009 }
1010
mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port * mlxsw_sx_port,u16 speed,u16 width)1011 static int mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
1012 u16 speed, u16 width)
1013 {
1014 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
1015 char ptys_pl[MLXSW_REG_PTYS_LEN];
1016
1017 mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sx_port->local_port, speed,
1018 width);
1019 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
1020 }
1021
1022 static int
mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port * mlxsw_sx_port,u8 width)1023 mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width)
1024 {
1025 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
1026 u32 upper_speed = MLXSW_SX_PORT_BASE_SPEED * width;
1027 char ptys_pl[MLXSW_REG_PTYS_LEN];
1028 u32 eth_proto_admin;
1029
1030 eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed);
1031 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
1032 eth_proto_admin);
1033 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
1034 }
1035
1036 static int
mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port * mlxsw_sx_port,enum mlxsw_reg_spmlr_learn_mode mode)1037 mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
1038 enum mlxsw_reg_spmlr_learn_mode mode)
1039 {
1040 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
1041 char spmlr_pl[MLXSW_REG_SPMLR_LEN];
1042
1043 mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
1044 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
1045 }
1046
__mlxsw_sx_port_eth_create(struct mlxsw_sx * mlxsw_sx,u8 local_port,u8 module,u8 width)1047 static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1048 u8 module, u8 width)
1049 {
1050 struct mlxsw_sx_port *mlxsw_sx_port;
1051 struct net_device *dev;
1052 int err;
1053
1054 dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
1055 if (!dev)
1056 return -ENOMEM;
1057 SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev);
1058 mlxsw_sx_port = netdev_priv(dev);
1059 mlxsw_sx_port->dev = dev;
1060 mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
1061 mlxsw_sx_port->local_port = local_port;
1062 mlxsw_sx_port->mapping.module = module;
1063
1064 mlxsw_sx_port->pcpu_stats =
1065 netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
1066 if (!mlxsw_sx_port->pcpu_stats) {
1067 err = -ENOMEM;
1068 goto err_alloc_stats;
1069 }
1070
1071 dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
1072 dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
1073 dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
1074
1075 err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
1076 if (err) {
1077 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
1078 mlxsw_sx_port->local_port);
1079 goto err_dev_addr_get;
1080 }
1081
1082 netif_carrier_off(dev);
1083
1084 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1085 NETIF_F_VLAN_CHALLENGED;
1086
1087 dev->min_mtu = 0;
1088 dev->max_mtu = ETH_MAX_MTU;
1089
1090 /* Each packet needs to have a Tx header (metadata) on top all other
1091 * headers.
1092 */
1093 dev->needed_headroom = MLXSW_TXHDR_LEN;
1094
1095 err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1096 if (err) {
1097 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1098 mlxsw_sx_port->local_port);
1099 goto err_port_system_port_mapping_set;
1100 }
1101
1102 err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
1103 if (err) {
1104 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
1105 mlxsw_sx_port->local_port);
1106 goto err_port_swid_set;
1107 }
1108
1109 err = mlxsw_sx_port_speed_by_width_set(mlxsw_sx_port, width);
1110 if (err) {
1111 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
1112 mlxsw_sx_port->local_port);
1113 goto err_port_speed_set;
1114 }
1115
1116 err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, ETH_DATA_LEN);
1117 if (err) {
1118 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
1119 mlxsw_sx_port->local_port);
1120 goto err_port_mtu_set;
1121 }
1122
1123 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
1124 if (err)
1125 goto err_port_admin_status_set;
1126
1127 err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
1128 MLXSW_PORT_DEFAULT_VID,
1129 MLXSW_REG_SPMS_STATE_FORWARDING);
1130 if (err) {
1131 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
1132 mlxsw_sx_port->local_port);
1133 goto err_port_stp_state_set;
1134 }
1135
1136 err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
1137 MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
1138 if (err) {
1139 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
1140 mlxsw_sx_port->local_port);
1141 goto err_port_mac_learning_mode_set;
1142 }
1143
1144 err = register_netdev(dev);
1145 if (err) {
1146 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
1147 mlxsw_sx_port->local_port);
1148 goto err_register_netdev;
1149 }
1150
1151 mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
1152 mlxsw_sx_port, dev, false, 0);
1153 mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1154 return 0;
1155
1156 err_register_netdev:
1157 err_port_mac_learning_mode_set:
1158 err_port_stp_state_set:
1159 err_port_admin_status_set:
1160 err_port_mtu_set:
1161 err_port_speed_set:
1162 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1163 err_port_swid_set:
1164 err_port_system_port_mapping_set:
1165 err_dev_addr_get:
1166 free_percpu(mlxsw_sx_port->pcpu_stats);
1167 err_alloc_stats:
1168 free_netdev(dev);
1169 return err;
1170 }
1171
mlxsw_sx_port_eth_create(struct mlxsw_sx * mlxsw_sx,u8 local_port,u8 module,u8 width)1172 static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1173 u8 module, u8 width)
1174 {
1175 int err;
1176
1177 err = mlxsw_core_port_init(mlxsw_sx->core, local_port);
1178 if (err) {
1179 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
1180 local_port);
1181 return err;
1182 }
1183 err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, width);
1184 if (err)
1185 goto err_port_create;
1186
1187 return 0;
1188
1189 err_port_create:
1190 mlxsw_core_port_fini(mlxsw_sx->core, local_port);
1191 return err;
1192 }
1193
__mlxsw_sx_port_eth_remove(struct mlxsw_sx * mlxsw_sx,u8 local_port)1194 static void __mlxsw_sx_port_eth_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1195 {
1196 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1197
1198 mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
1199 unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
1200 mlxsw_sx->ports[local_port] = NULL;
1201 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1202 free_percpu(mlxsw_sx_port->pcpu_stats);
1203 free_netdev(mlxsw_sx_port->dev);
1204 }
1205
mlxsw_sx_port_created(struct mlxsw_sx * mlxsw_sx,u8 local_port)1206 static bool mlxsw_sx_port_created(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1207 {
1208 return mlxsw_sx->ports[local_port] != NULL;
1209 }
1210
__mlxsw_sx_port_ib_create(struct mlxsw_sx * mlxsw_sx,u8 local_port,u8 module,u8 width)1211 static int __mlxsw_sx_port_ib_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1212 u8 module, u8 width)
1213 {
1214 struct mlxsw_sx_port *mlxsw_sx_port;
1215 int err;
1216
1217 mlxsw_sx_port = kzalloc(sizeof(*mlxsw_sx_port), GFP_KERNEL);
1218 if (!mlxsw_sx_port)
1219 return -ENOMEM;
1220 mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
1221 mlxsw_sx_port->local_port = local_port;
1222 mlxsw_sx_port->mapping.module = module;
1223
1224 err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1225 if (err) {
1226 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1227 mlxsw_sx_port->local_port);
1228 goto err_port_system_port_mapping_set;
1229 }
1230
1231 /* Adding port to Infiniband swid (1) */
1232 err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 1);
1233 if (err) {
1234 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
1235 mlxsw_sx_port->local_port);
1236 goto err_port_swid_set;
1237 }
1238
1239 /* Expose the IB port number as it's front panel name */
1240 err = mlxsw_sx_port_ib_port_set(mlxsw_sx_port, module + 1);
1241 if (err) {
1242 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set IB port\n",
1243 mlxsw_sx_port->local_port);
1244 goto err_port_ib_set;
1245 }
1246
1247 /* Supports all speeds from SDR to FDR (bitmask) and support bus width
1248 * of 1x, 2x and 4x (3 bits bitmask)
1249 */
1250 err = mlxsw_sx_port_ib_speed_set(mlxsw_sx_port,
1251 MLXSW_REG_PTYS_IB_SPEED_EDR - 1,
1252 BIT(3) - 1);
1253 if (err) {
1254 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
1255 mlxsw_sx_port->local_port);
1256 goto err_port_speed_set;
1257 }
1258
1259 /* Change to the maximum MTU the device supports, the SMA will take
1260 * care of the active MTU
1261 */
1262 err = mlxsw_sx_port_mtu_ib_set(mlxsw_sx_port, MLXSW_IB_DEFAULT_MTU);
1263 if (err) {
1264 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
1265 mlxsw_sx_port->local_port);
1266 goto err_port_mtu_set;
1267 }
1268
1269 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
1270 if (err) {
1271 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to change admin state to UP\n",
1272 mlxsw_sx_port->local_port);
1273 goto err_port_admin_set;
1274 }
1275
1276 mlxsw_core_port_ib_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
1277 mlxsw_sx_port);
1278 mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1279 return 0;
1280
1281 err_port_admin_set:
1282 err_port_mtu_set:
1283 err_port_speed_set:
1284 err_port_ib_set:
1285 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1286 err_port_swid_set:
1287 err_port_system_port_mapping_set:
1288 kfree(mlxsw_sx_port);
1289 return err;
1290 }
1291
__mlxsw_sx_port_ib_remove(struct mlxsw_sx * mlxsw_sx,u8 local_port)1292 static void __mlxsw_sx_port_ib_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1293 {
1294 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1295
1296 mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
1297 mlxsw_sx->ports[local_port] = NULL;
1298 mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
1299 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1300 kfree(mlxsw_sx_port);
1301 }
1302
__mlxsw_sx_port_remove(struct mlxsw_sx * mlxsw_sx,u8 local_port)1303 static void __mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1304 {
1305 enum devlink_port_type port_type =
1306 mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
1307
1308 if (port_type == DEVLINK_PORT_TYPE_ETH)
1309 __mlxsw_sx_port_eth_remove(mlxsw_sx, local_port);
1310 else if (port_type == DEVLINK_PORT_TYPE_IB)
1311 __mlxsw_sx_port_ib_remove(mlxsw_sx, local_port);
1312 }
1313
mlxsw_sx_port_remove(struct mlxsw_sx * mlxsw_sx,u8 local_port)1314 static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1315 {
1316 __mlxsw_sx_port_remove(mlxsw_sx, local_port);
1317 mlxsw_core_port_fini(mlxsw_sx->core, local_port);
1318 }
1319
mlxsw_sx_ports_remove(struct mlxsw_sx * mlxsw_sx)1320 static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
1321 {
1322 int i;
1323
1324 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++)
1325 if (mlxsw_sx_port_created(mlxsw_sx, i))
1326 mlxsw_sx_port_remove(mlxsw_sx, i);
1327 kfree(mlxsw_sx->ports);
1328 }
1329
mlxsw_sx_ports_create(struct mlxsw_sx * mlxsw_sx)1330 static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
1331 {
1332 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core);
1333 size_t alloc_size;
1334 u8 module, width;
1335 int i;
1336 int err;
1337
1338 alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports;
1339 mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
1340 if (!mlxsw_sx->ports)
1341 return -ENOMEM;
1342
1343 for (i = 1; i < max_ports; i++) {
1344 err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module,
1345 &width);
1346 if (err)
1347 goto err_port_module_info_get;
1348 if (!width)
1349 continue;
1350 err = mlxsw_sx_port_eth_create(mlxsw_sx, i, module, width);
1351 if (err)
1352 goto err_port_create;
1353 }
1354 return 0;
1355
1356 err_port_create:
1357 err_port_module_info_get:
1358 for (i--; i >= 1; i--)
1359 if (mlxsw_sx_port_created(mlxsw_sx, i))
1360 mlxsw_sx_port_remove(mlxsw_sx, i);
1361 kfree(mlxsw_sx->ports);
1362 return err;
1363 }
1364
mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port * mlxsw_sx_port,enum mlxsw_reg_pude_oper_status status)1365 static void mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
1366 enum mlxsw_reg_pude_oper_status status)
1367 {
1368 if (status == MLXSW_PORT_OPER_STATUS_UP) {
1369 netdev_info(mlxsw_sx_port->dev, "link up\n");
1370 netif_carrier_on(mlxsw_sx_port->dev);
1371 } else {
1372 netdev_info(mlxsw_sx_port->dev, "link down\n");
1373 netif_carrier_off(mlxsw_sx_port->dev);
1374 }
1375 }
1376
mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port * mlxsw_sx_port,enum mlxsw_reg_pude_oper_status status)1377 static void mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
1378 enum mlxsw_reg_pude_oper_status status)
1379 {
1380 if (status == MLXSW_PORT_OPER_STATUS_UP)
1381 pr_info("ib link for port %d - up\n",
1382 mlxsw_sx_port->mapping.module + 1);
1383 else
1384 pr_info("ib link for port %d - down\n",
1385 mlxsw_sx_port->mapping.module + 1);
1386 }
1387
mlxsw_sx_pude_event_func(const struct mlxsw_reg_info * reg,char * pude_pl,void * priv)1388 static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
1389 char *pude_pl, void *priv)
1390 {
1391 struct mlxsw_sx *mlxsw_sx = priv;
1392 struct mlxsw_sx_port *mlxsw_sx_port;
1393 enum mlxsw_reg_pude_oper_status status;
1394 enum devlink_port_type port_type;
1395 u8 local_port;
1396
1397 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1398 mlxsw_sx_port = mlxsw_sx->ports[local_port];
1399 if (!mlxsw_sx_port) {
1400 dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1401 local_port);
1402 return;
1403 }
1404
1405 status = mlxsw_reg_pude_oper_status_get(pude_pl);
1406 port_type = mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
1407 if (port_type == DEVLINK_PORT_TYPE_ETH)
1408 mlxsw_sx_pude_eth_event_func(mlxsw_sx_port, status);
1409 else if (port_type == DEVLINK_PORT_TYPE_IB)
1410 mlxsw_sx_pude_ib_event_func(mlxsw_sx_port, status);
1411 }
1412
mlxsw_sx_rx_listener_func(struct sk_buff * skb,u8 local_port,void * priv)1413 static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
1414 void *priv)
1415 {
1416 struct mlxsw_sx *mlxsw_sx = priv;
1417 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1418 struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
1419
1420 if (unlikely(!mlxsw_sx_port)) {
1421 dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
1422 local_port);
1423 return;
1424 }
1425
1426 skb->dev = mlxsw_sx_port->dev;
1427
1428 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
1429 u64_stats_update_begin(&pcpu_stats->syncp);
1430 pcpu_stats->rx_packets++;
1431 pcpu_stats->rx_bytes += skb->len;
1432 u64_stats_update_end(&pcpu_stats->syncp);
1433
1434 skb->protocol = eth_type_trans(skb, skb->dev);
1435 netif_receive_skb(skb);
1436 }
1437
mlxsw_sx_port_type_set(struct mlxsw_core * mlxsw_core,u8 local_port,enum devlink_port_type new_type)1438 static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1439 enum devlink_port_type new_type)
1440 {
1441 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1442 u8 module, width;
1443 int err;
1444
1445 if (new_type == DEVLINK_PORT_TYPE_AUTO)
1446 return -EOPNOTSUPP;
1447
1448 __mlxsw_sx_port_remove(mlxsw_sx, local_port);
1449 err = mlxsw_sx_port_module_info_get(mlxsw_sx, local_port, &module,
1450 &width);
1451 if (err)
1452 goto err_port_module_info_get;
1453
1454 if (new_type == DEVLINK_PORT_TYPE_ETH)
1455 err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module,
1456 width);
1457 else if (new_type == DEVLINK_PORT_TYPE_IB)
1458 err = __mlxsw_sx_port_ib_create(mlxsw_sx, local_port, module,
1459 width);
1460
1461 err_port_module_info_get:
1462 return err;
1463 }
1464
1465 #define MLXSW_SX_RXL(_trap_id) \
1466 MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \
1467 false, SX2_RX, FORWARD)
1468
1469 static const struct mlxsw_listener mlxsw_sx_listener[] = {
1470 MLXSW_EVENTL(mlxsw_sx_pude_event_func, PUDE, EMAD),
1471 MLXSW_SX_RXL(FDB_MC),
1472 MLXSW_SX_RXL(STP),
1473 MLXSW_SX_RXL(LACP),
1474 MLXSW_SX_RXL(EAPOL),
1475 MLXSW_SX_RXL(LLDP),
1476 MLXSW_SX_RXL(MMRP),
1477 MLXSW_SX_RXL(MVRP),
1478 MLXSW_SX_RXL(RPVST),
1479 MLXSW_SX_RXL(DHCP),
1480 MLXSW_SX_RXL(IGMP_QUERY),
1481 MLXSW_SX_RXL(IGMP_V1_REPORT),
1482 MLXSW_SX_RXL(IGMP_V2_REPORT),
1483 MLXSW_SX_RXL(IGMP_V2_LEAVE),
1484 MLXSW_SX_RXL(IGMP_V3_REPORT),
1485 };
1486
mlxsw_sx_traps_init(struct mlxsw_sx * mlxsw_sx)1487 static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
1488 {
1489 char htgt_pl[MLXSW_REG_HTGT_LEN];
1490 int i;
1491 int err;
1492
1493 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX,
1494 MLXSW_REG_HTGT_INVALID_POLICER,
1495 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1496 MLXSW_REG_HTGT_DEFAULT_TC);
1497 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1498 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX);
1499
1500 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1501 if (err)
1502 return err;
1503
1504 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL,
1505 MLXSW_REG_HTGT_INVALID_POLICER,
1506 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1507 MLXSW_REG_HTGT_DEFAULT_TC);
1508 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1509 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL);
1510
1511 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1512 if (err)
1513 return err;
1514
1515 for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
1516 err = mlxsw_core_trap_register(mlxsw_sx->core,
1517 &mlxsw_sx_listener[i],
1518 mlxsw_sx);
1519 if (err)
1520 goto err_listener_register;
1521
1522 }
1523 return 0;
1524
1525 err_listener_register:
1526 for (i--; i >= 0; i--) {
1527 mlxsw_core_trap_unregister(mlxsw_sx->core,
1528 &mlxsw_sx_listener[i],
1529 mlxsw_sx);
1530 }
1531 return err;
1532 }
1533
mlxsw_sx_traps_fini(struct mlxsw_sx * mlxsw_sx)1534 static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
1535 {
1536 int i;
1537
1538 for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
1539 mlxsw_core_trap_unregister(mlxsw_sx->core,
1540 &mlxsw_sx_listener[i],
1541 mlxsw_sx);
1542 }
1543 }
1544
mlxsw_sx_flood_init(struct mlxsw_sx * mlxsw_sx)1545 static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
1546 {
1547 char sfgc_pl[MLXSW_REG_SFGC_LEN];
1548 char sgcr_pl[MLXSW_REG_SGCR_LEN];
1549 char *sftr_pl;
1550 int err;
1551
1552 /* Configure a flooding table, which includes only CPU port. */
1553 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
1554 if (!sftr_pl)
1555 return -ENOMEM;
1556 mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
1557 MLXSW_PORT_CPU_PORT, true);
1558 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
1559 kfree(sftr_pl);
1560 if (err)
1561 return err;
1562
1563 /* Flood different packet types using the flooding table. */
1564 mlxsw_reg_sfgc_pack(sfgc_pl,
1565 MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
1566 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1567 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1568 0);
1569 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1570 if (err)
1571 return err;
1572
1573 mlxsw_reg_sfgc_pack(sfgc_pl,
1574 MLXSW_REG_SFGC_TYPE_BROADCAST,
1575 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1576 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1577 0);
1578 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1579 if (err)
1580 return err;
1581
1582 mlxsw_reg_sfgc_pack(sfgc_pl,
1583 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
1584 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1585 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1586 0);
1587 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1588 if (err)
1589 return err;
1590
1591 mlxsw_reg_sfgc_pack(sfgc_pl,
1592 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
1593 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1594 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1595 0);
1596 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1597 if (err)
1598 return err;
1599
1600 mlxsw_reg_sfgc_pack(sfgc_pl,
1601 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
1602 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1603 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1604 0);
1605 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1606 if (err)
1607 return err;
1608
1609 mlxsw_reg_sgcr_pack(sgcr_pl, true);
1610 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
1611 }
1612
mlxsw_sx_basic_trap_groups_set(struct mlxsw_core * mlxsw_core)1613 static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
1614 {
1615 char htgt_pl[MLXSW_REG_HTGT_LEN];
1616
1617 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
1618 MLXSW_REG_HTGT_INVALID_POLICER,
1619 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1620 MLXSW_REG_HTGT_DEFAULT_TC);
1621 mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS);
1622 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1623 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD);
1624 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
1625 }
1626
mlxsw_sx_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info)1627 static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
1628 const struct mlxsw_bus_info *mlxsw_bus_info)
1629 {
1630 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1631 int err;
1632
1633 mlxsw_sx->core = mlxsw_core;
1634 mlxsw_sx->bus_info = mlxsw_bus_info;
1635
1636 err = mlxsw_sx_hw_id_get(mlxsw_sx);
1637 if (err) {
1638 dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
1639 return err;
1640 }
1641
1642 err = mlxsw_sx_ports_create(mlxsw_sx);
1643 if (err) {
1644 dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
1645 return err;
1646 }
1647
1648 err = mlxsw_sx_traps_init(mlxsw_sx);
1649 if (err) {
1650 dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps\n");
1651 goto err_listener_register;
1652 }
1653
1654 err = mlxsw_sx_flood_init(mlxsw_sx);
1655 if (err) {
1656 dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
1657 goto err_flood_init;
1658 }
1659
1660 return 0;
1661
1662 err_flood_init:
1663 mlxsw_sx_traps_fini(mlxsw_sx);
1664 err_listener_register:
1665 mlxsw_sx_ports_remove(mlxsw_sx);
1666 return err;
1667 }
1668
mlxsw_sx_fini(struct mlxsw_core * mlxsw_core)1669 static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
1670 {
1671 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1672
1673 mlxsw_sx_traps_fini(mlxsw_sx);
1674 mlxsw_sx_ports_remove(mlxsw_sx);
1675 }
1676
1677 static const struct mlxsw_config_profile mlxsw_sx_config_profile = {
1678 .used_max_vepa_channels = 1,
1679 .max_vepa_channels = 0,
1680 .used_max_mid = 1,
1681 .max_mid = 7000,
1682 .used_max_pgt = 1,
1683 .max_pgt = 0,
1684 .used_max_system_port = 1,
1685 .max_system_port = 48000,
1686 .used_max_vlan_groups = 1,
1687 .max_vlan_groups = 127,
1688 .used_max_regions = 1,
1689 .max_regions = 400,
1690 .used_flood_tables = 1,
1691 .max_flood_tables = 2,
1692 .max_vid_flood_tables = 1,
1693 .used_flood_mode = 1,
1694 .flood_mode = 3,
1695 .used_max_ib_mc = 1,
1696 .max_ib_mc = 6,
1697 .used_max_pkey = 1,
1698 .max_pkey = 0,
1699 .swid_config = {
1700 {
1701 .used_type = 1,
1702 .type = MLXSW_PORT_SWID_TYPE_ETH,
1703 },
1704 {
1705 .used_type = 1,
1706 .type = MLXSW_PORT_SWID_TYPE_IB,
1707 }
1708 },
1709 .resource_query_enable = 0,
1710 };
1711
1712 static struct mlxsw_driver mlxsw_sx_driver = {
1713 .kind = mlxsw_sx_driver_name,
1714 .priv_size = sizeof(struct mlxsw_sx),
1715 .init = mlxsw_sx_init,
1716 .fini = mlxsw_sx_fini,
1717 .basic_trap_groups_set = mlxsw_sx_basic_trap_groups_set,
1718 .txhdr_construct = mlxsw_sx_txhdr_construct,
1719 .txhdr_len = MLXSW_TXHDR_LEN,
1720 .profile = &mlxsw_sx_config_profile,
1721 .port_type_set = mlxsw_sx_port_type_set,
1722 };
1723
1724 static const struct pci_device_id mlxsw_sx_pci_id_table[] = {
1725 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
1726 {0, },
1727 };
1728
1729 static struct pci_driver mlxsw_sx_pci_driver = {
1730 .name = mlxsw_sx_driver_name,
1731 .id_table = mlxsw_sx_pci_id_table,
1732 };
1733
mlxsw_sx_module_init(void)1734 static int __init mlxsw_sx_module_init(void)
1735 {
1736 int err;
1737
1738 err = mlxsw_core_driver_register(&mlxsw_sx_driver);
1739 if (err)
1740 return err;
1741
1742 err = mlxsw_pci_driver_register(&mlxsw_sx_pci_driver);
1743 if (err)
1744 goto err_pci_driver_register;
1745
1746 return 0;
1747
1748 err_pci_driver_register:
1749 mlxsw_core_driver_unregister(&mlxsw_sx_driver);
1750 return err;
1751 }
1752
mlxsw_sx_module_exit(void)1753 static void __exit mlxsw_sx_module_exit(void)
1754 {
1755 mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver);
1756 mlxsw_core_driver_unregister(&mlxsw_sx_driver);
1757 }
1758
1759 module_init(mlxsw_sx_module_init);
1760 module_exit(mlxsw_sx_module_exit);
1761
1762 MODULE_LICENSE("Dual BSD/GPL");
1763 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1764 MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
1765 MODULE_DEVICE_TABLE(pci, mlxsw_sx_pci_id_table);
1766