1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Broadcom Starfighter 2 DSA switch driver
4 *
5 * Copyright (C) 2014, Broadcom Corporation
6 */
7
8 #include <linux/list.h>
9 #include <linux/module.h>
10 #include <linux/netdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/platform_device.h>
13 #include <linux/phy.h>
14 #include <linux/phy_fixed.h>
15 #include <linux/phylink.h>
16 #include <linux/mii.h>
17 #include <linux/clk.h>
18 #include <linux/of.h>
19 #include <linux/of_irq.h>
20 #include <linux/of_address.h>
21 #include <linux/of_net.h>
22 #include <linux/of_mdio.h>
23 #include <net/dsa.h>
24 #include <linux/ethtool.h>
25 #include <linux/if_bridge.h>
26 #include <linux/brcmphy.h>
27 #include <linux/etherdevice.h>
28 #include <linux/platform_data/b53.h>
29
30 #include "bcm_sf2.h"
31 #include "bcm_sf2_regs.h"
32 #include "b53/b53_priv.h"
33 #include "b53/b53_regs.h"
34
bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv * priv,int port)35 static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
36 {
37 switch (priv->type) {
38 case BCM4908_DEVICE_ID:
39 switch (port) {
40 case 7:
41 return REG_RGMII_11_CNTRL;
42 default:
43 break;
44 }
45 break;
46 default:
47 switch (port) {
48 case 0:
49 return REG_RGMII_0_CNTRL;
50 case 1:
51 return REG_RGMII_1_CNTRL;
52 case 2:
53 return REG_RGMII_2_CNTRL;
54 default:
55 break;
56 }
57 }
58
59 WARN_ONCE(1, "Unsupported port %d\n", port);
60
61 /* RO fallback reg */
62 return REG_SWITCH_STATUS;
63 }
64
65 /* Return the number of active ports, not counting the IMP (CPU) port */
bcm_sf2_num_active_ports(struct dsa_switch * ds)66 static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
67 {
68 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
69 unsigned int port, count = 0;
70
71 for (port = 0; port < ds->num_ports; port++) {
72 if (dsa_is_cpu_port(ds, port))
73 continue;
74 if (priv->port_sts[port].enabled)
75 count++;
76 }
77
78 return count;
79 }
80
bcm_sf2_recalc_clock(struct dsa_switch * ds)81 static void bcm_sf2_recalc_clock(struct dsa_switch *ds)
82 {
83 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
84 unsigned long new_rate;
85 unsigned int ports_active;
86 /* Frequenty in Mhz */
87 static const unsigned long rate_table[] = {
88 59220000,
89 60820000,
90 62500000,
91 62500000,
92 };
93
94 ports_active = bcm_sf2_num_active_ports(ds);
95 if (ports_active == 0 || !priv->clk_mdiv)
96 return;
97
98 /* If we overflow our table, just use the recommended operational
99 * frequency
100 */
101 if (ports_active > ARRAY_SIZE(rate_table))
102 new_rate = 90000000;
103 else
104 new_rate = rate_table[ports_active - 1];
105 clk_set_rate(priv->clk_mdiv, new_rate);
106 }
107
bcm_sf2_imp_setup(struct dsa_switch * ds,int port)108 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
109 {
110 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
111 unsigned int i;
112 u32 reg, offset;
113
114 /* Enable the port memories */
115 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
116 reg &= ~P_TXQ_PSM_VDD(port);
117 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
118
119 /* Enable forwarding */
120 core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
121
122 /* Enable IMP port in dumb mode */
123 reg = core_readl(priv, CORE_SWITCH_CTRL);
124 reg |= MII_DUMB_FWDG_EN;
125 core_writel(priv, reg, CORE_SWITCH_CTRL);
126
127 /* Configure Traffic Class to QoS mapping, allow each priority to map
128 * to a different queue number
129 */
130 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
131 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
132 reg |= i << (PRT_TO_QID_SHIFT * i);
133 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
134
135 b53_brcm_hdr_setup(ds, port);
136
137 if (port == 8) {
138 if (priv->type == BCM4908_DEVICE_ID ||
139 priv->type == BCM7445_DEVICE_ID)
140 offset = CORE_STS_OVERRIDE_IMP;
141 else
142 offset = CORE_STS_OVERRIDE_IMP2;
143
144 /* Force link status for IMP port */
145 reg = core_readl(priv, offset);
146 reg |= (MII_SW_OR | LINK_STS);
147 if (priv->type == BCM4908_DEVICE_ID)
148 reg |= GMII_SPEED_UP_2G;
149 else
150 reg &= ~GMII_SPEED_UP_2G;
151 core_writel(priv, reg, offset);
152
153 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
154 reg = core_readl(priv, CORE_IMP_CTL);
155 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
156 reg &= ~(RX_DIS | TX_DIS);
157 core_writel(priv, reg, CORE_IMP_CTL);
158 } else {
159 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
160 reg &= ~(RX_DIS | TX_DIS);
161 core_writel(priv, reg, CORE_G_PCTL_PORT(port));
162 }
163
164 priv->port_sts[port].enabled = true;
165 }
166
bcm_sf2_gphy_enable_set(struct dsa_switch * ds,bool enable)167 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
168 {
169 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
170 u32 reg;
171
172 reg = reg_readl(priv, REG_SPHY_CNTRL);
173 if (enable) {
174 reg |= PHY_RESET;
175 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
176 reg_writel(priv, reg, REG_SPHY_CNTRL);
177 udelay(21);
178 reg = reg_readl(priv, REG_SPHY_CNTRL);
179 reg &= ~PHY_RESET;
180 } else {
181 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
182 reg_writel(priv, reg, REG_SPHY_CNTRL);
183 mdelay(1);
184 reg |= CK25_DIS;
185 }
186 reg_writel(priv, reg, REG_SPHY_CNTRL);
187
188 /* Use PHY-driven LED signaling */
189 if (!enable) {
190 reg = reg_readl(priv, REG_LED_CNTRL(0));
191 reg |= SPDLNK_SRC_SEL;
192 reg_writel(priv, reg, REG_LED_CNTRL(0));
193 }
194 }
195
bcm_sf2_port_intr_enable(struct bcm_sf2_priv * priv,int port)196 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
197 int port)
198 {
199 unsigned int off;
200
201 switch (port) {
202 case 7:
203 off = P7_IRQ_OFF;
204 break;
205 case 0:
206 /* Port 0 interrupts are located on the first bank */
207 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
208 return;
209 default:
210 off = P_IRQ_OFF(port);
211 break;
212 }
213
214 intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
215 }
216
bcm_sf2_port_intr_disable(struct bcm_sf2_priv * priv,int port)217 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
218 int port)
219 {
220 unsigned int off;
221
222 switch (port) {
223 case 7:
224 off = P7_IRQ_OFF;
225 break;
226 case 0:
227 /* Port 0 interrupts are located on the first bank */
228 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
229 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
230 return;
231 default:
232 off = P_IRQ_OFF(port);
233 break;
234 }
235
236 intrl2_1_mask_set(priv, P_IRQ_MASK(off));
237 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
238 }
239
bcm_sf2_port_setup(struct dsa_switch * ds,int port,struct phy_device * phy)240 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
241 struct phy_device *phy)
242 {
243 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
244 unsigned int i;
245 u32 reg;
246
247 if (!dsa_is_user_port(ds, port))
248 return 0;
249
250 priv->port_sts[port].enabled = true;
251
252 bcm_sf2_recalc_clock(ds);
253
254 /* Clear the memory power down */
255 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
256 reg &= ~P_TXQ_PSM_VDD(port);
257 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
258
259 /* Enable Broadcom tags for that port if requested */
260 if (priv->brcm_tag_mask & BIT(port))
261 b53_brcm_hdr_setup(ds, port);
262
263 /* Configure Traffic Class to QoS mapping, allow each priority to map
264 * to a different queue number
265 */
266 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
267 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
268 reg |= i << (PRT_TO_QID_SHIFT * i);
269 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
270
271 /* Re-enable the GPHY and re-apply workarounds */
272 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
273 bcm_sf2_gphy_enable_set(ds, true);
274 if (phy) {
275 /* if phy_stop() has been called before, phy
276 * will be in halted state, and phy_start()
277 * will call resume.
278 *
279 * the resume path does not configure back
280 * autoneg settings, and since we hard reset
281 * the phy manually here, we need to reset the
282 * state machine also.
283 */
284 phy->state = PHY_READY;
285 phy_init_hw(phy);
286 }
287 }
288
289 /* Enable MoCA port interrupts to get notified */
290 if (port == priv->moca_port)
291 bcm_sf2_port_intr_enable(priv, port);
292
293 /* Set per-queue pause threshold to 32 */
294 core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));
295
296 /* Set ACB threshold to 24 */
297 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
298 reg = acb_readl(priv, ACB_QUEUE_CFG(port *
299 SF2_NUM_EGRESS_QUEUES + i));
300 reg &= ~XOFF_THRESHOLD_MASK;
301 reg |= 24;
302 acb_writel(priv, reg, ACB_QUEUE_CFG(port *
303 SF2_NUM_EGRESS_QUEUES + i));
304 }
305
306 return b53_enable_port(ds, port, phy);
307 }
308
bcm_sf2_port_disable(struct dsa_switch * ds,int port)309 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port)
310 {
311 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
312 u32 reg;
313
314 /* Disable learning while in WoL mode */
315 if (priv->wol_ports_mask & (1 << port)) {
316 reg = core_readl(priv, CORE_DIS_LEARN);
317 reg |= BIT(port);
318 core_writel(priv, reg, CORE_DIS_LEARN);
319 return;
320 }
321
322 if (port == priv->moca_port)
323 bcm_sf2_port_intr_disable(priv, port);
324
325 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
326 bcm_sf2_gphy_enable_set(ds, false);
327
328 b53_disable_port(ds, port);
329
330 /* Power down the port memory */
331 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
332 reg |= P_TXQ_PSM_VDD(port);
333 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
334
335 priv->port_sts[port].enabled = false;
336
337 bcm_sf2_recalc_clock(ds);
338 }
339
340
bcm_sf2_sw_indir_rw(struct bcm_sf2_priv * priv,int op,int addr,int regnum,u16 val)341 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
342 int regnum, u16 val)
343 {
344 int ret = 0;
345 u32 reg;
346
347 reg = reg_readl(priv, REG_SWITCH_CNTRL);
348 reg |= MDIO_MASTER_SEL;
349 reg_writel(priv, reg, REG_SWITCH_CNTRL);
350
351 /* Page << 8 | offset */
352 reg = 0x70;
353 reg <<= 2;
354 core_writel(priv, addr, reg);
355
356 /* Page << 8 | offset */
357 reg = 0x80 << 8 | regnum << 1;
358 reg <<= 2;
359
360 if (op)
361 ret = core_readl(priv, reg);
362 else
363 core_writel(priv, val, reg);
364
365 reg = reg_readl(priv, REG_SWITCH_CNTRL);
366 reg &= ~MDIO_MASTER_SEL;
367 reg_writel(priv, reg, REG_SWITCH_CNTRL);
368
369 return ret & 0xffff;
370 }
371
bcm_sf2_sw_mdio_read(struct mii_bus * bus,int addr,int regnum)372 static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
373 {
374 struct bcm_sf2_priv *priv = bus->priv;
375
376 /* Intercept reads from Broadcom pseudo-PHY address, else, send
377 * them to our master MDIO bus controller
378 */
379 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
380 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
381 else
382 return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
383 }
384
bcm_sf2_sw_mdio_write(struct mii_bus * bus,int addr,int regnum,u16 val)385 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
386 u16 val)
387 {
388 struct bcm_sf2_priv *priv = bus->priv;
389
390 /* Intercept writes to the Broadcom pseudo-PHY address, else,
391 * send them to our master MDIO bus controller
392 */
393 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
394 return bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
395 else
396 return mdiobus_write_nested(priv->master_mii_bus, addr,
397 regnum, val);
398 }
399
bcm_sf2_switch_0_isr(int irq,void * dev_id)400 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
401 {
402 struct dsa_switch *ds = dev_id;
403 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
404
405 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
406 ~priv->irq0_mask;
407 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
408
409 return IRQ_HANDLED;
410 }
411
bcm_sf2_switch_1_isr(int irq,void * dev_id)412 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
413 {
414 struct dsa_switch *ds = dev_id;
415 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
416
417 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
418 ~priv->irq1_mask;
419 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
420
421 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) {
422 priv->port_sts[7].link = true;
423 dsa_port_phylink_mac_change(ds, 7, true);
424 }
425 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) {
426 priv->port_sts[7].link = false;
427 dsa_port_phylink_mac_change(ds, 7, false);
428 }
429
430 return IRQ_HANDLED;
431 }
432
bcm_sf2_sw_rst(struct bcm_sf2_priv * priv)433 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
434 {
435 unsigned int timeout = 1000;
436 u32 reg;
437 int ret;
438
439 /* The watchdog reset does not work on 7278, we need to hit the
440 * "external" reset line through the reset controller.
441 */
442 if (priv->type == BCM7278_DEVICE_ID) {
443 ret = reset_control_assert(priv->rcdev);
444 if (ret)
445 return ret;
446
447 return reset_control_deassert(priv->rcdev);
448 }
449
450 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
451 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
452 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
453
454 do {
455 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
456 if (!(reg & SOFTWARE_RESET))
457 break;
458
459 usleep_range(1000, 2000);
460 } while (timeout-- > 0);
461
462 if (timeout == 0)
463 return -ETIMEDOUT;
464
465 return 0;
466 }
467
bcm_sf2_crossbar_setup(struct bcm_sf2_priv * priv)468 static void bcm_sf2_crossbar_setup(struct bcm_sf2_priv *priv)
469 {
470 struct device *dev = priv->dev->ds->dev;
471 int shift;
472 u32 mask;
473 u32 reg;
474 int i;
475
476 mask = BIT(priv->num_crossbar_int_ports) - 1;
477
478 reg = reg_readl(priv, REG_CROSSBAR);
479 switch (priv->type) {
480 case BCM4908_DEVICE_ID:
481 shift = CROSSBAR_BCM4908_INT_P7 * priv->num_crossbar_int_ports;
482 reg &= ~(mask << shift);
483 if (0) /* FIXME */
484 reg |= CROSSBAR_BCM4908_EXT_SERDES << shift;
485 else if (priv->int_phy_mask & BIT(7))
486 reg |= CROSSBAR_BCM4908_EXT_GPHY4 << shift;
487 else if (phy_interface_mode_is_rgmii(priv->port_sts[7].mode))
488 reg |= CROSSBAR_BCM4908_EXT_RGMII << shift;
489 else if (WARN(1, "Invalid port mode\n"))
490 return;
491 break;
492 default:
493 return;
494 }
495 reg_writel(priv, reg, REG_CROSSBAR);
496
497 reg = reg_readl(priv, REG_CROSSBAR);
498 for (i = 0; i < priv->num_crossbar_int_ports; i++) {
499 shift = i * priv->num_crossbar_int_ports;
500
501 dev_dbg(dev, "crossbar int port #%d - ext port #%d\n", i,
502 (reg >> shift) & mask);
503 }
504 }
505
bcm_sf2_intr_disable(struct bcm_sf2_priv * priv)506 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
507 {
508 intrl2_0_mask_set(priv, 0xffffffff);
509 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
510 intrl2_1_mask_set(priv, 0xffffffff);
511 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
512 }
513
bcm_sf2_identify_ports(struct bcm_sf2_priv * priv,struct device_node * dn)514 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
515 struct device_node *dn)
516 {
517 struct device *dev = priv->dev->ds->dev;
518 struct bcm_sf2_port_status *port_st;
519 struct device_node *port;
520 unsigned int port_num;
521 struct property *prop;
522 int err;
523
524 priv->moca_port = -1;
525
526 for_each_available_child_of_node(dn, port) {
527 if (of_property_read_u32(port, "reg", &port_num))
528 continue;
529
530 if (port_num >= DSA_MAX_PORTS) {
531 dev_err(dev, "Invalid port number %d\n", port_num);
532 continue;
533 }
534
535 port_st = &priv->port_sts[port_num];
536
537 /* Internal PHYs get assigned a specific 'phy-mode' property
538 * value: "internal" to help flag them before MDIO probing
539 * has completed, since they might be turned off at that
540 * time
541 */
542 err = of_get_phy_mode(port, &port_st->mode);
543 if (err)
544 continue;
545
546 if (port_st->mode == PHY_INTERFACE_MODE_INTERNAL)
547 priv->int_phy_mask |= 1 << port_num;
548
549 if (port_st->mode == PHY_INTERFACE_MODE_MOCA)
550 priv->moca_port = port_num;
551
552 if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
553 priv->brcm_tag_mask |= 1 << port_num;
554
555 /* Ensure that port 5 is not picked up as a DSA CPU port
556 * flavour but a regular port instead. We should be using
557 * devlink to be able to set the port flavour.
558 */
559 if (port_num == 5 && priv->type == BCM7278_DEVICE_ID) {
560 prop = of_find_property(port, "ethernet", NULL);
561 if (prop)
562 of_remove_property(port, prop);
563 }
564 }
565 }
566
bcm_sf2_mdio_register(struct dsa_switch * ds)567 static int bcm_sf2_mdio_register(struct dsa_switch *ds)
568 {
569 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
570 struct device_node *dn, *child;
571 struct phy_device *phydev;
572 struct property *prop;
573 static int index;
574 int err, reg;
575
576 /* Find our integrated MDIO bus node */
577 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
578 priv->master_mii_bus = of_mdio_find_bus(dn);
579 if (!priv->master_mii_bus) {
580 err = -EPROBE_DEFER;
581 goto err_of_node_put;
582 }
583
584 priv->master_mii_dn = dn;
585
586 priv->slave_mii_bus = mdiobus_alloc();
587 if (!priv->slave_mii_bus) {
588 err = -ENOMEM;
589 goto err_put_master_mii_bus_dev;
590 }
591
592 priv->slave_mii_bus->priv = priv;
593 priv->slave_mii_bus->name = "sf2 slave mii";
594 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
595 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
596 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
597 index++);
598 priv->slave_mii_bus->dev.of_node = dn;
599
600 /* Include the pseudo-PHY address to divert reads towards our
601 * workaround. This is only required for 7445D0, since 7445E0
602 * disconnects the internal switch pseudo-PHY such that we can use the
603 * regular SWITCH_MDIO master controller instead.
604 *
605 * Here we flag the pseudo PHY as needing special treatment and would
606 * otherwise make all other PHY read/writes go to the master MDIO bus
607 * controller that comes with this switch backed by the "mdio-unimac"
608 * driver.
609 */
610 if (of_machine_is_compatible("brcm,bcm7445d0"))
611 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0);
612 else
613 priv->indir_phy_mask = 0;
614
615 ds->phys_mii_mask = priv->indir_phy_mask;
616 ds->slave_mii_bus = priv->slave_mii_bus;
617 priv->slave_mii_bus->parent = ds->dev->parent;
618 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
619
620 /* We need to make sure that of_phy_connect() will not work by
621 * removing the 'phandle' and 'linux,phandle' properties and
622 * unregister the existing PHY device that was already registered.
623 */
624 for_each_available_child_of_node(dn, child) {
625 if (of_property_read_u32(child, "reg", ®) ||
626 reg >= PHY_MAX_ADDR)
627 continue;
628
629 if (!(priv->indir_phy_mask & BIT(reg)))
630 continue;
631
632 prop = of_find_property(child, "phandle", NULL);
633 if (prop)
634 of_remove_property(child, prop);
635
636 prop = of_find_property(child, "linux,phandle", NULL);
637 if (prop)
638 of_remove_property(child, prop);
639
640 phydev = of_phy_find_device(child);
641 if (phydev)
642 phy_device_remove(phydev);
643 }
644
645 err = mdiobus_register(priv->slave_mii_bus);
646 if (err && dn)
647 goto err_free_slave_mii_bus;
648
649 return 0;
650
651 err_free_slave_mii_bus:
652 mdiobus_free(priv->slave_mii_bus);
653 err_put_master_mii_bus_dev:
654 put_device(&priv->master_mii_bus->dev);
655 err_of_node_put:
656 of_node_put(dn);
657 return err;
658 }
659
bcm_sf2_mdio_unregister(struct bcm_sf2_priv * priv)660 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
661 {
662 mdiobus_unregister(priv->slave_mii_bus);
663 mdiobus_free(priv->slave_mii_bus);
664 put_device(&priv->master_mii_bus->dev);
665 of_node_put(priv->master_mii_dn);
666 }
667
bcm_sf2_sw_get_phy_flags(struct dsa_switch * ds,int port)668 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
669 {
670 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
671
672 /* The BCM7xxx PHY driver expects to find the integrated PHY revision
673 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
674 * the REG_PHY_REVISION register layout is.
675 */
676 if (priv->int_phy_mask & BIT(port))
677 return priv->hw_params.gphy_rev;
678 else
679 return 0;
680 }
681
bcm_sf2_sw_validate(struct dsa_switch * ds,int port,unsigned long * supported,struct phylink_link_state * state)682 static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
683 unsigned long *supported,
684 struct phylink_link_state *state)
685 {
686 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
687 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
688
689 if (!phy_interface_mode_is_rgmii(state->interface) &&
690 state->interface != PHY_INTERFACE_MODE_MII &&
691 state->interface != PHY_INTERFACE_MODE_REVMII &&
692 state->interface != PHY_INTERFACE_MODE_GMII &&
693 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
694 state->interface != PHY_INTERFACE_MODE_MOCA) {
695 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
696 if (port != core_readl(priv, CORE_IMP0_PRT_ID))
697 dev_err(ds->dev,
698 "Unsupported interface: %d for port %d\n",
699 state->interface, port);
700 return;
701 }
702
703 /* Allow all the expected bits */
704 phylink_set(mask, Autoneg);
705 phylink_set_port_modes(mask);
706 phylink_set(mask, Pause);
707 phylink_set(mask, Asym_Pause);
708
709 /* With the exclusion of MII and Reverse MII, we support Gigabit,
710 * including Half duplex
711 */
712 if (state->interface != PHY_INTERFACE_MODE_MII &&
713 state->interface != PHY_INTERFACE_MODE_REVMII) {
714 phylink_set(mask, 1000baseT_Full);
715 phylink_set(mask, 1000baseT_Half);
716 }
717
718 phylink_set(mask, 10baseT_Half);
719 phylink_set(mask, 10baseT_Full);
720 phylink_set(mask, 100baseT_Half);
721 phylink_set(mask, 100baseT_Full);
722
723 bitmap_and(supported, supported, mask,
724 __ETHTOOL_LINK_MODE_MASK_NBITS);
725 bitmap_and(state->advertising, state->advertising, mask,
726 __ETHTOOL_LINK_MODE_MASK_NBITS);
727 }
728
bcm_sf2_sw_mac_config(struct dsa_switch * ds,int port,unsigned int mode,const struct phylink_link_state * state)729 static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
730 unsigned int mode,
731 const struct phylink_link_state *state)
732 {
733 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
734 u32 id_mode_dis = 0, port_mode;
735 u32 reg_rgmii_ctrl;
736 u32 reg;
737
738 if (port == core_readl(priv, CORE_IMP0_PRT_ID))
739 return;
740
741 switch (state->interface) {
742 case PHY_INTERFACE_MODE_RGMII:
743 id_mode_dis = 1;
744 fallthrough;
745 case PHY_INTERFACE_MODE_RGMII_TXID:
746 port_mode = EXT_GPHY;
747 break;
748 case PHY_INTERFACE_MODE_MII:
749 port_mode = EXT_EPHY;
750 break;
751 case PHY_INTERFACE_MODE_REVMII:
752 port_mode = EXT_REVMII;
753 break;
754 default:
755 /* Nothing required for all other PHYs: internal and MoCA */
756 return;
757 }
758
759 reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
760
761 /* Clear id_mode_dis bit, and the existing port mode, let
762 * RGMII_MODE_EN bet set by mac_link_{up,down}
763 */
764 reg = reg_readl(priv, reg_rgmii_ctrl);
765 reg &= ~ID_MODE_DIS;
766 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
767
768 reg |= port_mode;
769 if (id_mode_dis)
770 reg |= ID_MODE_DIS;
771
772 reg_writel(priv, reg, reg_rgmii_ctrl);
773 }
774
bcm_sf2_sw_mac_link_set(struct dsa_switch * ds,int port,phy_interface_t interface,bool link)775 static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
776 phy_interface_t interface, bool link)
777 {
778 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
779 u32 reg_rgmii_ctrl;
780 u32 reg;
781
782 if (!phy_interface_mode_is_rgmii(interface) &&
783 interface != PHY_INTERFACE_MODE_MII &&
784 interface != PHY_INTERFACE_MODE_REVMII)
785 return;
786
787 reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
788
789 /* If the link is down, just disable the interface to conserve power */
790 reg = reg_readl(priv, reg_rgmii_ctrl);
791 if (link)
792 reg |= RGMII_MODE_EN;
793 else
794 reg &= ~RGMII_MODE_EN;
795 reg_writel(priv, reg, reg_rgmii_ctrl);
796 }
797
bcm_sf2_sw_mac_link_down(struct dsa_switch * ds,int port,unsigned int mode,phy_interface_t interface)798 static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
799 unsigned int mode,
800 phy_interface_t interface)
801 {
802 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
803 u32 reg, offset;
804
805 if (priv->wol_ports_mask & BIT(port))
806 return;
807
808 if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
809 if (priv->type == BCM4908_DEVICE_ID ||
810 priv->type == BCM7445_DEVICE_ID)
811 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
812 else
813 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
814
815 reg = core_readl(priv, offset);
816 reg &= ~LINK_STS;
817 core_writel(priv, reg, offset);
818 }
819
820 bcm_sf2_sw_mac_link_set(ds, port, interface, false);
821 }
822
bcm_sf2_sw_mac_link_up(struct dsa_switch * ds,int port,unsigned int mode,phy_interface_t interface,struct phy_device * phydev,int speed,int duplex,bool tx_pause,bool rx_pause)823 static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
824 unsigned int mode,
825 phy_interface_t interface,
826 struct phy_device *phydev,
827 int speed, int duplex,
828 bool tx_pause, bool rx_pause)
829 {
830 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
831 struct ethtool_eee *p = &priv->dev->ports[port].eee;
832
833 bcm_sf2_sw_mac_link_set(ds, port, interface, true);
834
835 if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
836 u32 reg_rgmii_ctrl = 0;
837 u32 reg, offset;
838
839 if (priv->type == BCM4908_DEVICE_ID ||
840 priv->type == BCM7445_DEVICE_ID)
841 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
842 else
843 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
844
845 if (interface == PHY_INTERFACE_MODE_RGMII ||
846 interface == PHY_INTERFACE_MODE_RGMII_TXID ||
847 interface == PHY_INTERFACE_MODE_MII ||
848 interface == PHY_INTERFACE_MODE_REVMII) {
849 reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
850 reg = reg_readl(priv, reg_rgmii_ctrl);
851 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
852
853 if (tx_pause)
854 reg |= TX_PAUSE_EN;
855 if (rx_pause)
856 reg |= RX_PAUSE_EN;
857
858 reg_writel(priv, reg, reg_rgmii_ctrl);
859 }
860
861 reg = SW_OVERRIDE | LINK_STS;
862 switch (speed) {
863 case SPEED_1000:
864 reg |= SPDSTS_1000 << SPEED_SHIFT;
865 break;
866 case SPEED_100:
867 reg |= SPDSTS_100 << SPEED_SHIFT;
868 break;
869 }
870
871 if (duplex == DUPLEX_FULL)
872 reg |= DUPLX_MODE;
873
874 if (tx_pause)
875 reg |= TXFLOW_CNTL;
876 if (rx_pause)
877 reg |= RXFLOW_CNTL;
878
879 core_writel(priv, reg, offset);
880 }
881
882 if (mode == MLO_AN_PHY && phydev)
883 p->eee_enabled = b53_eee_init(ds, port, phydev);
884 }
885
bcm_sf2_sw_fixed_state(struct dsa_switch * ds,int port,struct phylink_link_state * status)886 static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
887 struct phylink_link_state *status)
888 {
889 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
890
891 status->link = false;
892
893 /* MoCA port is special as we do not get link status from CORE_LNKSTS,
894 * which means that we need to force the link at the port override
895 * level to get the data to flow. We do use what the interrupt handler
896 * did determine before.
897 *
898 * For the other ports, we just force the link status, since this is
899 * a fixed PHY device.
900 */
901 if (port == priv->moca_port) {
902 status->link = priv->port_sts[port].link;
903 /* For MoCA interfaces, also force a link down notification
904 * since some version of the user-space daemon (mocad) use
905 * cmd->autoneg to force the link, which messes up the PHY
906 * state machine and make it go in PHY_FORCING state instead.
907 */
908 if (!status->link)
909 netif_carrier_off(dsa_to_port(ds, port)->slave);
910 status->duplex = DUPLEX_FULL;
911 } else {
912 status->link = true;
913 }
914 }
915
bcm_sf2_enable_acb(struct dsa_switch * ds)916 static void bcm_sf2_enable_acb(struct dsa_switch *ds)
917 {
918 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
919 u32 reg;
920
921 /* Enable ACB globally */
922 reg = acb_readl(priv, ACB_CONTROL);
923 reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
924 acb_writel(priv, reg, ACB_CONTROL);
925 reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
926 reg |= ACB_EN | ACB_ALGORITHM;
927 acb_writel(priv, reg, ACB_CONTROL);
928 }
929
bcm_sf2_sw_suspend(struct dsa_switch * ds)930 static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
931 {
932 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
933 unsigned int port;
934
935 bcm_sf2_intr_disable(priv);
936
937 /* Disable all ports physically present including the IMP
938 * port, the other ones have already been disabled during
939 * bcm_sf2_sw_setup
940 */
941 for (port = 0; port < ds->num_ports; port++) {
942 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
943 bcm_sf2_port_disable(ds, port);
944 }
945
946 if (!priv->wol_ports_mask)
947 clk_disable_unprepare(priv->clk);
948
949 return 0;
950 }
951
bcm_sf2_sw_resume(struct dsa_switch * ds)952 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
953 {
954 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
955 int ret;
956
957 if (!priv->wol_ports_mask)
958 clk_prepare_enable(priv->clk);
959
960 ret = bcm_sf2_sw_rst(priv);
961 if (ret) {
962 pr_err("%s: failed to software reset switch\n", __func__);
963 return ret;
964 }
965
966 bcm_sf2_crossbar_setup(priv);
967
968 ret = bcm_sf2_cfp_resume(ds);
969 if (ret)
970 return ret;
971
972 if (priv->hw_params.num_gphy == 1)
973 bcm_sf2_gphy_enable_set(ds, true);
974
975 ds->ops->setup(ds);
976
977 return 0;
978 }
979
bcm_sf2_sw_get_wol(struct dsa_switch * ds,int port,struct ethtool_wolinfo * wol)980 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
981 struct ethtool_wolinfo *wol)
982 {
983 struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
984 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
985 struct ethtool_wolinfo pwol = { };
986
987 /* Get the parent device WoL settings */
988 if (p->ethtool_ops->get_wol)
989 p->ethtool_ops->get_wol(p, &pwol);
990
991 /* Advertise the parent device supported settings */
992 wol->supported = pwol.supported;
993 memset(&wol->sopass, 0, sizeof(wol->sopass));
994
995 if (pwol.wolopts & WAKE_MAGICSECURE)
996 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
997
998 if (priv->wol_ports_mask & (1 << port))
999 wol->wolopts = pwol.wolopts;
1000 else
1001 wol->wolopts = 0;
1002 }
1003
bcm_sf2_sw_set_wol(struct dsa_switch * ds,int port,struct ethtool_wolinfo * wol)1004 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
1005 struct ethtool_wolinfo *wol)
1006 {
1007 struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
1008 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1009 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
1010 struct ethtool_wolinfo pwol = { };
1011
1012 if (p->ethtool_ops->get_wol)
1013 p->ethtool_ops->get_wol(p, &pwol);
1014 if (wol->wolopts & ~pwol.supported)
1015 return -EINVAL;
1016
1017 if (wol->wolopts)
1018 priv->wol_ports_mask |= (1 << port);
1019 else
1020 priv->wol_ports_mask &= ~(1 << port);
1021
1022 /* If we have at least one port enabled, make sure the CPU port
1023 * is also enabled. If the CPU port is the last one enabled, we disable
1024 * it since this configuration does not make sense.
1025 */
1026 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
1027 priv->wol_ports_mask |= (1 << cpu_port);
1028 else
1029 priv->wol_ports_mask &= ~(1 << cpu_port);
1030
1031 return p->ethtool_ops->set_wol(p, wol);
1032 }
1033
bcm_sf2_sw_setup(struct dsa_switch * ds)1034 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
1035 {
1036 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1037 unsigned int port;
1038
1039 /* Enable all valid ports and disable those unused */
1040 for (port = 0; port < priv->hw_params.num_ports; port++) {
1041 /* IMP port receives special treatment */
1042 if (dsa_is_user_port(ds, port))
1043 bcm_sf2_port_setup(ds, port, NULL);
1044 else if (dsa_is_cpu_port(ds, port))
1045 bcm_sf2_imp_setup(ds, port);
1046 else
1047 bcm_sf2_port_disable(ds, port);
1048 }
1049
1050 b53_configure_vlan(ds);
1051 bcm_sf2_enable_acb(ds);
1052
1053 return b53_setup_devlink_resources(ds);
1054 }
1055
bcm_sf2_sw_teardown(struct dsa_switch * ds)1056 static void bcm_sf2_sw_teardown(struct dsa_switch *ds)
1057 {
1058 dsa_devlink_resources_unregister(ds);
1059 }
1060
1061 /* The SWITCH_CORE register space is managed by b53 but operates on a page +
1062 * register basis so we need to translate that into an address that the
1063 * bus-glue understands.
1064 */
1065 #define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2)
1066
bcm_sf2_core_read8(struct b53_device * dev,u8 page,u8 reg,u8 * val)1067 static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg,
1068 u8 *val)
1069 {
1070 struct bcm_sf2_priv *priv = dev->priv;
1071
1072 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
1073
1074 return 0;
1075 }
1076
bcm_sf2_core_read16(struct b53_device * dev,u8 page,u8 reg,u16 * val)1077 static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg,
1078 u16 *val)
1079 {
1080 struct bcm_sf2_priv *priv = dev->priv;
1081
1082 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
1083
1084 return 0;
1085 }
1086
bcm_sf2_core_read32(struct b53_device * dev,u8 page,u8 reg,u32 * val)1087 static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg,
1088 u32 *val)
1089 {
1090 struct bcm_sf2_priv *priv = dev->priv;
1091
1092 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
1093
1094 return 0;
1095 }
1096
bcm_sf2_core_read64(struct b53_device * dev,u8 page,u8 reg,u64 * val)1097 static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg,
1098 u64 *val)
1099 {
1100 struct bcm_sf2_priv *priv = dev->priv;
1101
1102 *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg));
1103
1104 return 0;
1105 }
1106
bcm_sf2_core_write8(struct b53_device * dev,u8 page,u8 reg,u8 value)1107 static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg,
1108 u8 value)
1109 {
1110 struct bcm_sf2_priv *priv = dev->priv;
1111
1112 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
1113
1114 return 0;
1115 }
1116
bcm_sf2_core_write16(struct b53_device * dev,u8 page,u8 reg,u16 value)1117 static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg,
1118 u16 value)
1119 {
1120 struct bcm_sf2_priv *priv = dev->priv;
1121
1122 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
1123
1124 return 0;
1125 }
1126
bcm_sf2_core_write32(struct b53_device * dev,u8 page,u8 reg,u32 value)1127 static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg,
1128 u32 value)
1129 {
1130 struct bcm_sf2_priv *priv = dev->priv;
1131
1132 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
1133
1134 return 0;
1135 }
1136
bcm_sf2_core_write64(struct b53_device * dev,u8 page,u8 reg,u64 value)1137 static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
1138 u64 value)
1139 {
1140 struct bcm_sf2_priv *priv = dev->priv;
1141
1142 core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
1143
1144 return 0;
1145 }
1146
1147 static const struct b53_io_ops bcm_sf2_io_ops = {
1148 .read8 = bcm_sf2_core_read8,
1149 .read16 = bcm_sf2_core_read16,
1150 .read32 = bcm_sf2_core_read32,
1151 .read48 = bcm_sf2_core_read64,
1152 .read64 = bcm_sf2_core_read64,
1153 .write8 = bcm_sf2_core_write8,
1154 .write16 = bcm_sf2_core_write16,
1155 .write32 = bcm_sf2_core_write32,
1156 .write48 = bcm_sf2_core_write64,
1157 .write64 = bcm_sf2_core_write64,
1158 };
1159
bcm_sf2_sw_get_strings(struct dsa_switch * ds,int port,u32 stringset,uint8_t * data)1160 static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, int port,
1161 u32 stringset, uint8_t *data)
1162 {
1163 int cnt = b53_get_sset_count(ds, port, stringset);
1164
1165 b53_get_strings(ds, port, stringset, data);
1166 bcm_sf2_cfp_get_strings(ds, port, stringset,
1167 data + cnt * ETH_GSTRING_LEN);
1168 }
1169
bcm_sf2_sw_get_ethtool_stats(struct dsa_switch * ds,int port,uint64_t * data)1170 static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port,
1171 uint64_t *data)
1172 {
1173 int cnt = b53_get_sset_count(ds, port, ETH_SS_STATS);
1174
1175 b53_get_ethtool_stats(ds, port, data);
1176 bcm_sf2_cfp_get_ethtool_stats(ds, port, data + cnt);
1177 }
1178
bcm_sf2_sw_get_sset_count(struct dsa_switch * ds,int port,int sset)1179 static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port,
1180 int sset)
1181 {
1182 int cnt = b53_get_sset_count(ds, port, sset);
1183
1184 if (cnt < 0)
1185 return cnt;
1186
1187 cnt += bcm_sf2_cfp_get_sset_count(ds, port, sset);
1188
1189 return cnt;
1190 }
1191
1192 static const struct dsa_switch_ops bcm_sf2_ops = {
1193 .get_tag_protocol = b53_get_tag_protocol,
1194 .setup = bcm_sf2_sw_setup,
1195 .teardown = bcm_sf2_sw_teardown,
1196 .get_strings = bcm_sf2_sw_get_strings,
1197 .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
1198 .get_sset_count = bcm_sf2_sw_get_sset_count,
1199 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
1200 .get_phy_flags = bcm_sf2_sw_get_phy_flags,
1201 .phylink_validate = bcm_sf2_sw_validate,
1202 .phylink_mac_config = bcm_sf2_sw_mac_config,
1203 .phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
1204 .phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
1205 .phylink_fixed_state = bcm_sf2_sw_fixed_state,
1206 .suspend = bcm_sf2_sw_suspend,
1207 .resume = bcm_sf2_sw_resume,
1208 .get_wol = bcm_sf2_sw_get_wol,
1209 .set_wol = bcm_sf2_sw_set_wol,
1210 .port_enable = bcm_sf2_port_setup,
1211 .port_disable = bcm_sf2_port_disable,
1212 .get_mac_eee = b53_get_mac_eee,
1213 .set_mac_eee = b53_set_mac_eee,
1214 .port_bridge_join = b53_br_join,
1215 .port_bridge_leave = b53_br_leave,
1216 .port_pre_bridge_flags = b53_br_flags_pre,
1217 .port_bridge_flags = b53_br_flags,
1218 .port_stp_state_set = b53_br_set_stp_state,
1219 .port_fast_age = b53_br_fast_age,
1220 .port_vlan_filtering = b53_vlan_filtering,
1221 .port_vlan_add = b53_vlan_add,
1222 .port_vlan_del = b53_vlan_del,
1223 .port_fdb_dump = b53_fdb_dump,
1224 .port_fdb_add = b53_fdb_add,
1225 .port_fdb_del = b53_fdb_del,
1226 .get_rxnfc = bcm_sf2_get_rxnfc,
1227 .set_rxnfc = bcm_sf2_set_rxnfc,
1228 .port_mirror_add = b53_mirror_add,
1229 .port_mirror_del = b53_mirror_del,
1230 .port_mdb_add = b53_mdb_add,
1231 .port_mdb_del = b53_mdb_del,
1232 };
1233
1234 struct bcm_sf2_of_data {
1235 u32 type;
1236 const u16 *reg_offsets;
1237 unsigned int core_reg_align;
1238 unsigned int num_cfp_rules;
1239 unsigned int num_crossbar_int_ports;
1240 };
1241
1242 static const u16 bcm_sf2_4908_reg_offsets[] = {
1243 [REG_SWITCH_CNTRL] = 0x00,
1244 [REG_SWITCH_STATUS] = 0x04,
1245 [REG_DIR_DATA_WRITE] = 0x08,
1246 [REG_DIR_DATA_READ] = 0x0c,
1247 [REG_SWITCH_REVISION] = 0x10,
1248 [REG_PHY_REVISION] = 0x14,
1249 [REG_SPHY_CNTRL] = 0x24,
1250 [REG_CROSSBAR] = 0xc8,
1251 [REG_RGMII_11_CNTRL] = 0x014c,
1252 [REG_LED_0_CNTRL] = 0x40,
1253 [REG_LED_1_CNTRL] = 0x4c,
1254 [REG_LED_2_CNTRL] = 0x58,
1255 };
1256
1257 static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
1258 .type = BCM4908_DEVICE_ID,
1259 .core_reg_align = 0,
1260 .reg_offsets = bcm_sf2_4908_reg_offsets,
1261 .num_cfp_rules = 256,
1262 .num_crossbar_int_ports = 2,
1263 };
1264
1265 /* Register offsets for the SWITCH_REG_* block */
1266 static const u16 bcm_sf2_7445_reg_offsets[] = {
1267 [REG_SWITCH_CNTRL] = 0x00,
1268 [REG_SWITCH_STATUS] = 0x04,
1269 [REG_DIR_DATA_WRITE] = 0x08,
1270 [REG_DIR_DATA_READ] = 0x0C,
1271 [REG_SWITCH_REVISION] = 0x18,
1272 [REG_PHY_REVISION] = 0x1C,
1273 [REG_SPHY_CNTRL] = 0x2C,
1274 [REG_RGMII_0_CNTRL] = 0x34,
1275 [REG_RGMII_1_CNTRL] = 0x40,
1276 [REG_RGMII_2_CNTRL] = 0x4c,
1277 [REG_LED_0_CNTRL] = 0x90,
1278 [REG_LED_1_CNTRL] = 0x94,
1279 [REG_LED_2_CNTRL] = 0x98,
1280 };
1281
1282 static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
1283 .type = BCM7445_DEVICE_ID,
1284 .core_reg_align = 0,
1285 .reg_offsets = bcm_sf2_7445_reg_offsets,
1286 .num_cfp_rules = 256,
1287 };
1288
1289 static const u16 bcm_sf2_7278_reg_offsets[] = {
1290 [REG_SWITCH_CNTRL] = 0x00,
1291 [REG_SWITCH_STATUS] = 0x04,
1292 [REG_DIR_DATA_WRITE] = 0x08,
1293 [REG_DIR_DATA_READ] = 0x0c,
1294 [REG_SWITCH_REVISION] = 0x10,
1295 [REG_PHY_REVISION] = 0x14,
1296 [REG_SPHY_CNTRL] = 0x24,
1297 [REG_RGMII_0_CNTRL] = 0xe0,
1298 [REG_RGMII_1_CNTRL] = 0xec,
1299 [REG_RGMII_2_CNTRL] = 0xf8,
1300 [REG_LED_0_CNTRL] = 0x40,
1301 [REG_LED_1_CNTRL] = 0x4c,
1302 [REG_LED_2_CNTRL] = 0x58,
1303 };
1304
1305 static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
1306 .type = BCM7278_DEVICE_ID,
1307 .core_reg_align = 1,
1308 .reg_offsets = bcm_sf2_7278_reg_offsets,
1309 .num_cfp_rules = 128,
1310 };
1311
1312 static const struct of_device_id bcm_sf2_of_match[] = {
1313 { .compatible = "brcm,bcm4908-switch",
1314 .data = &bcm_sf2_4908_data
1315 },
1316 { .compatible = "brcm,bcm7445-switch-v4.0",
1317 .data = &bcm_sf2_7445_data
1318 },
1319 { .compatible = "brcm,bcm7278-switch-v4.0",
1320 .data = &bcm_sf2_7278_data
1321 },
1322 { .compatible = "brcm,bcm7278-switch-v4.8",
1323 .data = &bcm_sf2_7278_data
1324 },
1325 { /* sentinel */ },
1326 };
1327 MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
1328
bcm_sf2_sw_probe(struct platform_device * pdev)1329 static int bcm_sf2_sw_probe(struct platform_device *pdev)
1330 {
1331 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
1332 struct device_node *dn = pdev->dev.of_node;
1333 const struct of_device_id *of_id = NULL;
1334 const struct bcm_sf2_of_data *data;
1335 struct b53_platform_data *pdata;
1336 struct dsa_switch_ops *ops;
1337 struct device_node *ports;
1338 struct bcm_sf2_priv *priv;
1339 struct b53_device *dev;
1340 struct dsa_switch *ds;
1341 void __iomem **base;
1342 unsigned int i;
1343 u32 reg, rev;
1344 int ret;
1345
1346 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1347 if (!priv)
1348 return -ENOMEM;
1349
1350 ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
1351 if (!ops)
1352 return -ENOMEM;
1353
1354 dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
1355 if (!dev)
1356 return -ENOMEM;
1357
1358 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1359 if (!pdata)
1360 return -ENOMEM;
1361
1362 of_id = of_match_node(bcm_sf2_of_match, dn);
1363 if (!of_id || !of_id->data)
1364 return -EINVAL;
1365
1366 data = of_id->data;
1367
1368 /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
1369 priv->type = data->type;
1370 priv->reg_offsets = data->reg_offsets;
1371 priv->core_reg_align = data->core_reg_align;
1372 priv->num_cfp_rules = data->num_cfp_rules;
1373 priv->num_crossbar_int_ports = data->num_crossbar_int_ports;
1374
1375 priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev,
1376 "switch");
1377 if (IS_ERR(priv->rcdev))
1378 return PTR_ERR(priv->rcdev);
1379
1380 /* Auto-detection using standard registers will not work, so
1381 * provide an indication of what kind of device we are for
1382 * b53_common to work with
1383 */
1384 pdata->chip_id = priv->type;
1385 dev->pdata = pdata;
1386
1387 priv->dev = dev;
1388 ds = dev->ds;
1389 ds->ops = &bcm_sf2_ops;
1390
1391 /* Advertise the 8 egress queues */
1392 ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
1393
1394 dev_set_drvdata(&pdev->dev, priv);
1395
1396 spin_lock_init(&priv->indir_lock);
1397 mutex_init(&priv->cfp.lock);
1398 INIT_LIST_HEAD(&priv->cfp.rules_list);
1399
1400 /* CFP rule #0 cannot be used for specific classifications, flag it as
1401 * permanently used
1402 */
1403 set_bit(0, priv->cfp.used);
1404 set_bit(0, priv->cfp.unique);
1405
1406 /* Balance of_node_put() done by of_find_node_by_name() */
1407 of_node_get(dn);
1408 ports = of_find_node_by_name(dn, "ports");
1409 if (ports) {
1410 bcm_sf2_identify_ports(priv, ports);
1411 of_node_put(ports);
1412 }
1413
1414 priv->irq0 = irq_of_parse_and_map(dn, 0);
1415 priv->irq1 = irq_of_parse_and_map(dn, 1);
1416
1417 base = &priv->core;
1418 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1419 *base = devm_platform_ioremap_resource(pdev, i);
1420 if (IS_ERR(*base)) {
1421 pr_err("unable to find register: %s\n", reg_names[i]);
1422 return PTR_ERR(*base);
1423 }
1424 base++;
1425 }
1426
1427 priv->clk = devm_clk_get_optional(&pdev->dev, "sw_switch");
1428 if (IS_ERR(priv->clk))
1429 return PTR_ERR(priv->clk);
1430
1431 ret = clk_prepare_enable(priv->clk);
1432 if (ret)
1433 return ret;
1434
1435 priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
1436 if (IS_ERR(priv->clk_mdiv)) {
1437 ret = PTR_ERR(priv->clk_mdiv);
1438 goto out_clk;
1439 }
1440
1441 ret = clk_prepare_enable(priv->clk_mdiv);
1442 if (ret)
1443 goto out_clk;
1444
1445 ret = bcm_sf2_sw_rst(priv);
1446 if (ret) {
1447 pr_err("unable to software reset switch: %d\n", ret);
1448 goto out_clk_mdiv;
1449 }
1450
1451 bcm_sf2_crossbar_setup(priv);
1452
1453 bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1454
1455 ret = bcm_sf2_mdio_register(ds);
1456 if (ret) {
1457 pr_err("failed to register MDIO bus\n");
1458 goto out_clk_mdiv;
1459 }
1460
1461 bcm_sf2_gphy_enable_set(priv->dev->ds, false);
1462
1463 ret = bcm_sf2_cfp_rst(priv);
1464 if (ret) {
1465 pr_err("failed to reset CFP\n");
1466 goto out_mdio;
1467 }
1468
1469 /* Disable all interrupts and request them */
1470 bcm_sf2_intr_disable(priv);
1471
1472 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
1473 "switch_0", ds);
1474 if (ret < 0) {
1475 pr_err("failed to request switch_0 IRQ\n");
1476 goto out_mdio;
1477 }
1478
1479 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
1480 "switch_1", ds);
1481 if (ret < 0) {
1482 pr_err("failed to request switch_1 IRQ\n");
1483 goto out_mdio;
1484 }
1485
1486 /* Reset the MIB counters */
1487 reg = core_readl(priv, CORE_GMNCFGCFG);
1488 reg |= RST_MIB_CNT;
1489 core_writel(priv, reg, CORE_GMNCFGCFG);
1490 reg &= ~RST_MIB_CNT;
1491 core_writel(priv, reg, CORE_GMNCFGCFG);
1492
1493 /* Get the maximum number of ports for this switch */
1494 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1495 if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1496 priv->hw_params.num_ports = DSA_MAX_PORTS;
1497
1498 /* Assume a single GPHY setup if we can't read that property */
1499 if (of_property_read_u32(dn, "brcm,num-gphy",
1500 &priv->hw_params.num_gphy))
1501 priv->hw_params.num_gphy = 1;
1502
1503 rev = reg_readl(priv, REG_SWITCH_REVISION);
1504 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
1505 SWITCH_TOP_REV_MASK;
1506 priv->hw_params.core_rev = (rev & SF2_REV_MASK);
1507
1508 rev = reg_readl(priv, REG_PHY_REVISION);
1509 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
1510
1511 ret = b53_switch_register(dev);
1512 if (ret)
1513 goto out_mdio;
1514
1515 dev_info(&pdev->dev,
1516 "Starfighter 2 top: %x.%02x, core: %x.%02x, IRQs: %d, %d\n",
1517 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
1518 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1519 priv->irq0, priv->irq1);
1520
1521 return 0;
1522
1523 out_mdio:
1524 bcm_sf2_mdio_unregister(priv);
1525 out_clk_mdiv:
1526 clk_disable_unprepare(priv->clk_mdiv);
1527 out_clk:
1528 clk_disable_unprepare(priv->clk);
1529 return ret;
1530 }
1531
bcm_sf2_sw_remove(struct platform_device * pdev)1532 static int bcm_sf2_sw_remove(struct platform_device *pdev)
1533 {
1534 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1535
1536 if (!priv)
1537 return 0;
1538
1539 priv->wol_ports_mask = 0;
1540 /* Disable interrupts */
1541 bcm_sf2_intr_disable(priv);
1542 dsa_unregister_switch(priv->dev->ds);
1543 bcm_sf2_cfp_exit(priv->dev->ds);
1544 bcm_sf2_mdio_unregister(priv);
1545 clk_disable_unprepare(priv->clk_mdiv);
1546 clk_disable_unprepare(priv->clk);
1547 if (priv->type == BCM7278_DEVICE_ID)
1548 reset_control_assert(priv->rcdev);
1549
1550 platform_set_drvdata(pdev, NULL);
1551
1552 return 0;
1553 }
1554
bcm_sf2_sw_shutdown(struct platform_device * pdev)1555 static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
1556 {
1557 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1558
1559 if (!priv)
1560 return;
1561
1562 /* For a kernel about to be kexec'd we want to keep the GPHY on for a
1563 * successful MDIO bus scan to occur. If we did turn off the GPHY
1564 * before (e.g: port_disable), this will also power it back on.
1565 *
1566 * Do not rely on kexec_in_progress, just power the PHY on.
1567 */
1568 if (priv->hw_params.num_gphy == 1)
1569 bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1570
1571 dsa_switch_shutdown(priv->dev->ds);
1572
1573 platform_set_drvdata(pdev, NULL);
1574 }
1575
1576 #ifdef CONFIG_PM_SLEEP
bcm_sf2_suspend(struct device * dev)1577 static int bcm_sf2_suspend(struct device *dev)
1578 {
1579 struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
1580
1581 return dsa_switch_suspend(priv->dev->ds);
1582 }
1583
bcm_sf2_resume(struct device * dev)1584 static int bcm_sf2_resume(struct device *dev)
1585 {
1586 struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
1587
1588 return dsa_switch_resume(priv->dev->ds);
1589 }
1590 #endif /* CONFIG_PM_SLEEP */
1591
1592 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
1593 bcm_sf2_suspend, bcm_sf2_resume);
1594
1595
1596 static struct platform_driver bcm_sf2_driver = {
1597 .probe = bcm_sf2_sw_probe,
1598 .remove = bcm_sf2_sw_remove,
1599 .shutdown = bcm_sf2_sw_shutdown,
1600 .driver = {
1601 .name = "brcm-sf2",
1602 .of_match_table = bcm_sf2_of_match,
1603 .pm = &bcm_sf2_pm_ops,
1604 },
1605 };
1606 module_platform_driver(bcm_sf2_driver);
1607
1608 MODULE_AUTHOR("Broadcom Corporation");
1609 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1610 MODULE_LICENSE("GPL");
1611 MODULE_ALIAS("platform:brcm-sf2");
1612