1 /*
2 * Broadcom Starfighter 2 DSA switch driver
3 *
4 * Copyright (C) 2014, Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
17 #include <linux/phy.h>
18 #include <linux/phy_fixed.h>
19 #include <linux/phylink.h>
20 #include <linux/mii.h>
21 #include <linux/of.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_address.h>
24 #include <linux/of_net.h>
25 #include <linux/of_mdio.h>
26 #include <net/dsa.h>
27 #include <linux/ethtool.h>
28 #include <linux/if_bridge.h>
29 #include <linux/brcmphy.h>
30 #include <linux/etherdevice.h>
31 #include <linux/platform_data/b53.h>
32
33 #include "bcm_sf2.h"
34 #include "bcm_sf2_regs.h"
35 #include "b53/b53_priv.h"
36 #include "b53/b53_regs.h"
37
bcm_sf2_imp_setup(struct dsa_switch * ds,int port)38 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
39 {
40 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
41 unsigned int i;
42 u32 reg, offset;
43
44 /* Enable the port memories */
45 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
46 reg &= ~P_TXQ_PSM_VDD(port);
47 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
48
49 /* Enable forwarding */
50 core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
51
52 /* Enable IMP port in dumb mode */
53 reg = core_readl(priv, CORE_SWITCH_CTRL);
54 reg |= MII_DUMB_FWDG_EN;
55 core_writel(priv, reg, CORE_SWITCH_CTRL);
56
57 /* Configure Traffic Class to QoS mapping, allow each priority to map
58 * to a different queue number
59 */
60 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
61 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
62 reg |= i << (PRT_TO_QID_SHIFT * i);
63 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
64
65 b53_brcm_hdr_setup(ds, port);
66
67 if (port == 8) {
68 if (priv->type == BCM7445_DEVICE_ID)
69 offset = CORE_STS_OVERRIDE_IMP;
70 else
71 offset = CORE_STS_OVERRIDE_IMP2;
72
73 /* Force link status for IMP port */
74 reg = core_readl(priv, offset);
75 reg |= (MII_SW_OR | LINK_STS);
76 reg &= ~GMII_SPEED_UP_2G;
77 core_writel(priv, reg, offset);
78
79 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
80 reg = core_readl(priv, CORE_IMP_CTL);
81 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
82 reg &= ~(RX_DIS | TX_DIS);
83 core_writel(priv, reg, CORE_IMP_CTL);
84 } else {
85 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
86 reg &= ~(RX_DIS | TX_DIS);
87 core_writel(priv, reg, CORE_G_PCTL_PORT(port));
88 }
89 }
90
bcm_sf2_gphy_enable_set(struct dsa_switch * ds,bool enable)91 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
92 {
93 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
94 u32 reg;
95
96 reg = reg_readl(priv, REG_SPHY_CNTRL);
97 if (enable) {
98 reg |= PHY_RESET;
99 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
100 reg_writel(priv, reg, REG_SPHY_CNTRL);
101 udelay(21);
102 reg = reg_readl(priv, REG_SPHY_CNTRL);
103 reg &= ~PHY_RESET;
104 } else {
105 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
106 reg_writel(priv, reg, REG_SPHY_CNTRL);
107 mdelay(1);
108 reg |= CK25_DIS;
109 }
110 reg_writel(priv, reg, REG_SPHY_CNTRL);
111
112 /* Use PHY-driven LED signaling */
113 if (!enable) {
114 reg = reg_readl(priv, REG_LED_CNTRL(0));
115 reg |= SPDLNK_SRC_SEL;
116 reg_writel(priv, reg, REG_LED_CNTRL(0));
117 }
118 }
119
bcm_sf2_port_intr_enable(struct bcm_sf2_priv * priv,int port)120 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
121 int port)
122 {
123 unsigned int off;
124
125 switch (port) {
126 case 7:
127 off = P7_IRQ_OFF;
128 break;
129 case 0:
130 /* Port 0 interrupts are located on the first bank */
131 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
132 return;
133 default:
134 off = P_IRQ_OFF(port);
135 break;
136 }
137
138 intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
139 }
140
bcm_sf2_port_intr_disable(struct bcm_sf2_priv * priv,int port)141 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
142 int port)
143 {
144 unsigned int off;
145
146 switch (port) {
147 case 7:
148 off = P7_IRQ_OFF;
149 break;
150 case 0:
151 /* Port 0 interrupts are located on the first bank */
152 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
153 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
154 return;
155 default:
156 off = P_IRQ_OFF(port);
157 break;
158 }
159
160 intrl2_1_mask_set(priv, P_IRQ_MASK(off));
161 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
162 }
163
bcm_sf2_port_setup(struct dsa_switch * ds,int port,struct phy_device * phy)164 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
165 struct phy_device *phy)
166 {
167 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
168 unsigned int i;
169 u32 reg;
170
171 /* Clear the memory power down */
172 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
173 reg &= ~P_TXQ_PSM_VDD(port);
174 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
175
176 /* Enable learning */
177 reg = core_readl(priv, CORE_DIS_LEARN);
178 reg &= ~BIT(port);
179 core_writel(priv, reg, CORE_DIS_LEARN);
180
181 /* Enable Broadcom tags for that port if requested */
182 if (priv->brcm_tag_mask & BIT(port))
183 b53_brcm_hdr_setup(ds, port);
184
185 /* Configure Traffic Class to QoS mapping, allow each priority to map
186 * to a different queue number
187 */
188 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
189 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
190 reg |= i << (PRT_TO_QID_SHIFT * i);
191 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
192
193 /* Re-enable the GPHY and re-apply workarounds */
194 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
195 bcm_sf2_gphy_enable_set(ds, true);
196 if (phy) {
197 /* if phy_stop() has been called before, phy
198 * will be in halted state, and phy_start()
199 * will call resume.
200 *
201 * the resume path does not configure back
202 * autoneg settings, and since we hard reset
203 * the phy manually here, we need to reset the
204 * state machine also.
205 */
206 phy->state = PHY_READY;
207 phy_init_hw(phy);
208 }
209 }
210
211 /* Enable MoCA port interrupts to get notified */
212 if (port == priv->moca_port)
213 bcm_sf2_port_intr_enable(priv, port);
214
215 /* Set per-queue pause threshold to 32 */
216 core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));
217
218 /* Set ACB threshold to 24 */
219 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
220 reg = acb_readl(priv, ACB_QUEUE_CFG(port *
221 SF2_NUM_EGRESS_QUEUES + i));
222 reg &= ~XOFF_THRESHOLD_MASK;
223 reg |= 24;
224 acb_writel(priv, reg, ACB_QUEUE_CFG(port *
225 SF2_NUM_EGRESS_QUEUES + i));
226 }
227
228 return b53_enable_port(ds, port, phy);
229 }
230
bcm_sf2_port_disable(struct dsa_switch * ds,int port,struct phy_device * phy)231 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
232 struct phy_device *phy)
233 {
234 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
235 u32 reg;
236
237 /* Disable learning while in WoL mode */
238 if (priv->wol_ports_mask & (1 << port)) {
239 reg = core_readl(priv, CORE_DIS_LEARN);
240 reg |= BIT(port);
241 core_writel(priv, reg, CORE_DIS_LEARN);
242 return;
243 }
244
245 if (port == priv->moca_port)
246 bcm_sf2_port_intr_disable(priv, port);
247
248 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
249 bcm_sf2_gphy_enable_set(ds, false);
250
251 b53_disable_port(ds, port, phy);
252
253 /* Power down the port memory */
254 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
255 reg |= P_TXQ_PSM_VDD(port);
256 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
257 }
258
259
bcm_sf2_sw_indir_rw(struct bcm_sf2_priv * priv,int op,int addr,int regnum,u16 val)260 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
261 int regnum, u16 val)
262 {
263 int ret = 0;
264 u32 reg;
265
266 reg = reg_readl(priv, REG_SWITCH_CNTRL);
267 reg |= MDIO_MASTER_SEL;
268 reg_writel(priv, reg, REG_SWITCH_CNTRL);
269
270 /* Page << 8 | offset */
271 reg = 0x70;
272 reg <<= 2;
273 core_writel(priv, addr, reg);
274
275 /* Page << 8 | offset */
276 reg = 0x80 << 8 | regnum << 1;
277 reg <<= 2;
278
279 if (op)
280 ret = core_readl(priv, reg);
281 else
282 core_writel(priv, val, reg);
283
284 reg = reg_readl(priv, REG_SWITCH_CNTRL);
285 reg &= ~MDIO_MASTER_SEL;
286 reg_writel(priv, reg, REG_SWITCH_CNTRL);
287
288 return ret & 0xffff;
289 }
290
bcm_sf2_sw_mdio_read(struct mii_bus * bus,int addr,int regnum)291 static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
292 {
293 struct bcm_sf2_priv *priv = bus->priv;
294
295 /* Intercept reads from Broadcom pseudo-PHY address, else, send
296 * them to our master MDIO bus controller
297 */
298 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
299 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
300 else
301 return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
302 }
303
bcm_sf2_sw_mdio_write(struct mii_bus * bus,int addr,int regnum,u16 val)304 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
305 u16 val)
306 {
307 struct bcm_sf2_priv *priv = bus->priv;
308
309 /* Intercept writes to the Broadcom pseudo-PHY address, else,
310 * send them to our master MDIO bus controller
311 */
312 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
313 return bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
314 else
315 return mdiobus_write_nested(priv->master_mii_bus, addr,
316 regnum, val);
317 }
318
bcm_sf2_switch_0_isr(int irq,void * dev_id)319 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
320 {
321 struct dsa_switch *ds = dev_id;
322 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
323
324 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
325 ~priv->irq0_mask;
326 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
327
328 return IRQ_HANDLED;
329 }
330
bcm_sf2_switch_1_isr(int irq,void * dev_id)331 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
332 {
333 struct dsa_switch *ds = dev_id;
334 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
335
336 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
337 ~priv->irq1_mask;
338 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
339
340 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) {
341 priv->port_sts[7].link = true;
342 dsa_port_phylink_mac_change(ds, 7, true);
343 }
344 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) {
345 priv->port_sts[7].link = false;
346 dsa_port_phylink_mac_change(ds, 7, false);
347 }
348
349 return IRQ_HANDLED;
350 }
351
bcm_sf2_sw_rst(struct bcm_sf2_priv * priv)352 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
353 {
354 unsigned int timeout = 1000;
355 u32 reg;
356
357 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
358 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
359 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
360
361 do {
362 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
363 if (!(reg & SOFTWARE_RESET))
364 break;
365
366 usleep_range(1000, 2000);
367 } while (timeout-- > 0);
368
369 if (timeout == 0)
370 return -ETIMEDOUT;
371
372 return 0;
373 }
374
bcm_sf2_intr_disable(struct bcm_sf2_priv * priv)375 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
376 {
377 intrl2_0_mask_set(priv, 0xffffffff);
378 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
379 intrl2_1_mask_set(priv, 0xffffffff);
380 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
381 }
382
bcm_sf2_identify_ports(struct bcm_sf2_priv * priv,struct device_node * dn)383 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
384 struct device_node *dn)
385 {
386 struct device_node *port;
387 int mode;
388 unsigned int port_num;
389
390 priv->moca_port = -1;
391
392 for_each_available_child_of_node(dn, port) {
393 if (of_property_read_u32(port, "reg", &port_num))
394 continue;
395
396 /* Internal PHYs get assigned a specific 'phy-mode' property
397 * value: "internal" to help flag them before MDIO probing
398 * has completed, since they might be turned off at that
399 * time
400 */
401 mode = of_get_phy_mode(port);
402 if (mode < 0)
403 continue;
404
405 if (mode == PHY_INTERFACE_MODE_INTERNAL)
406 priv->int_phy_mask |= 1 << port_num;
407
408 if (mode == PHY_INTERFACE_MODE_MOCA)
409 priv->moca_port = port_num;
410
411 if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
412 priv->brcm_tag_mask |= 1 << port_num;
413 }
414 }
415
bcm_sf2_mdio_register(struct dsa_switch * ds)416 static int bcm_sf2_mdio_register(struct dsa_switch *ds)
417 {
418 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
419 struct device_node *dn;
420 static int index;
421 int err;
422
423 /* Find our integrated MDIO bus node */
424 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
425 priv->master_mii_bus = of_mdio_find_bus(dn);
426 if (!priv->master_mii_bus)
427 return -EPROBE_DEFER;
428
429 get_device(&priv->master_mii_bus->dev);
430 priv->master_mii_dn = dn;
431
432 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
433 if (!priv->slave_mii_bus)
434 return -ENOMEM;
435
436 priv->slave_mii_bus->priv = priv;
437 priv->slave_mii_bus->name = "sf2 slave mii";
438 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
439 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
440 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
441 index++);
442 priv->slave_mii_bus->dev.of_node = dn;
443
444 /* Include the pseudo-PHY address to divert reads towards our
445 * workaround. This is only required for 7445D0, since 7445E0
446 * disconnects the internal switch pseudo-PHY such that we can use the
447 * regular SWITCH_MDIO master controller instead.
448 *
449 * Here we flag the pseudo PHY as needing special treatment and would
450 * otherwise make all other PHY read/writes go to the master MDIO bus
451 * controller that comes with this switch backed by the "mdio-unimac"
452 * driver.
453 */
454 if (of_machine_is_compatible("brcm,bcm7445d0"))
455 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
456 else
457 priv->indir_phy_mask = 0;
458
459 ds->phys_mii_mask = priv->indir_phy_mask;
460 ds->slave_mii_bus = priv->slave_mii_bus;
461 priv->slave_mii_bus->parent = ds->dev->parent;
462 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
463
464 err = mdiobus_register(priv->slave_mii_bus);
465 if (err && dn)
466 of_node_put(dn);
467
468 return err;
469 }
470
bcm_sf2_mdio_unregister(struct bcm_sf2_priv * priv)471 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
472 {
473 mdiobus_unregister(priv->slave_mii_bus);
474 if (priv->master_mii_dn)
475 of_node_put(priv->master_mii_dn);
476 }
477
bcm_sf2_sw_get_phy_flags(struct dsa_switch * ds,int port)478 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
479 {
480 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
481
482 /* The BCM7xxx PHY driver expects to find the integrated PHY revision
483 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
484 * the REG_PHY_REVISION register layout is.
485 */
486
487 return priv->hw_params.gphy_rev;
488 }
489
bcm_sf2_sw_validate(struct dsa_switch * ds,int port,unsigned long * supported,struct phylink_link_state * state)490 static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
491 unsigned long *supported,
492 struct phylink_link_state *state)
493 {
494 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
495
496 if (!phy_interface_mode_is_rgmii(state->interface) &&
497 state->interface != PHY_INTERFACE_MODE_MII &&
498 state->interface != PHY_INTERFACE_MODE_REVMII &&
499 state->interface != PHY_INTERFACE_MODE_GMII &&
500 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
501 state->interface != PHY_INTERFACE_MODE_MOCA) {
502 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
503 dev_err(ds->dev,
504 "Unsupported interface: %d\n", state->interface);
505 return;
506 }
507
508 /* Allow all the expected bits */
509 phylink_set(mask, Autoneg);
510 phylink_set_port_modes(mask);
511 phylink_set(mask, Pause);
512 phylink_set(mask, Asym_Pause);
513
514 /* With the exclusion of MII and Reverse MII, we support Gigabit,
515 * including Half duplex
516 */
517 if (state->interface != PHY_INTERFACE_MODE_MII &&
518 state->interface != PHY_INTERFACE_MODE_REVMII) {
519 phylink_set(mask, 1000baseT_Full);
520 phylink_set(mask, 1000baseT_Half);
521 }
522
523 phylink_set(mask, 10baseT_Half);
524 phylink_set(mask, 10baseT_Full);
525 phylink_set(mask, 100baseT_Half);
526 phylink_set(mask, 100baseT_Full);
527
528 bitmap_and(supported, supported, mask,
529 __ETHTOOL_LINK_MODE_MASK_NBITS);
530 bitmap_and(state->advertising, state->advertising, mask,
531 __ETHTOOL_LINK_MODE_MASK_NBITS);
532 }
533
bcm_sf2_sw_mac_config(struct dsa_switch * ds,int port,unsigned int mode,const struct phylink_link_state * state)534 static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
535 unsigned int mode,
536 const struct phylink_link_state *state)
537 {
538 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
539 u32 id_mode_dis = 0, port_mode;
540 u32 reg, offset;
541
542 if (priv->type == BCM7445_DEVICE_ID)
543 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
544 else
545 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
546
547 switch (state->interface) {
548 case PHY_INTERFACE_MODE_RGMII:
549 id_mode_dis = 1;
550 /* fallthrough */
551 case PHY_INTERFACE_MODE_RGMII_TXID:
552 port_mode = EXT_GPHY;
553 break;
554 case PHY_INTERFACE_MODE_MII:
555 port_mode = EXT_EPHY;
556 break;
557 case PHY_INTERFACE_MODE_REVMII:
558 port_mode = EXT_REVMII;
559 break;
560 default:
561 /* all other PHYs: internal and MoCA */
562 goto force_link;
563 }
564
565 /* Clear id_mode_dis bit, and the existing port mode, let
566 * RGMII_MODE_EN bet set by mac_link_{up,down}
567 */
568 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
569 reg &= ~ID_MODE_DIS;
570 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
571 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
572
573 reg |= port_mode;
574 if (id_mode_dis)
575 reg |= ID_MODE_DIS;
576
577 if (state->pause & MLO_PAUSE_TXRX_MASK) {
578 if (state->pause & MLO_PAUSE_TX)
579 reg |= TX_PAUSE_EN;
580 reg |= RX_PAUSE_EN;
581 }
582
583 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
584
585 force_link:
586 /* Force link settings detected from the PHY */
587 reg = SW_OVERRIDE;
588 switch (state->speed) {
589 case SPEED_1000:
590 reg |= SPDSTS_1000 << SPEED_SHIFT;
591 break;
592 case SPEED_100:
593 reg |= SPDSTS_100 << SPEED_SHIFT;
594 break;
595 }
596
597 if (state->link)
598 reg |= LINK_STS;
599 if (state->duplex == DUPLEX_FULL)
600 reg |= DUPLX_MODE;
601
602 core_writel(priv, reg, offset);
603 }
604
bcm_sf2_sw_mac_link_set(struct dsa_switch * ds,int port,phy_interface_t interface,bool link)605 static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
606 phy_interface_t interface, bool link)
607 {
608 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
609 u32 reg;
610
611 if (!phy_interface_mode_is_rgmii(interface) &&
612 interface != PHY_INTERFACE_MODE_MII &&
613 interface != PHY_INTERFACE_MODE_REVMII)
614 return;
615
616 /* If the link is down, just disable the interface to conserve power */
617 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
618 if (link)
619 reg |= RGMII_MODE_EN;
620 else
621 reg &= ~RGMII_MODE_EN;
622 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
623 }
624
bcm_sf2_sw_mac_link_down(struct dsa_switch * ds,int port,unsigned int mode,phy_interface_t interface)625 static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
626 unsigned int mode,
627 phy_interface_t interface)
628 {
629 bcm_sf2_sw_mac_link_set(ds, port, interface, false);
630 }
631
bcm_sf2_sw_mac_link_up(struct dsa_switch * ds,int port,unsigned int mode,phy_interface_t interface,struct phy_device * phydev)632 static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
633 unsigned int mode,
634 phy_interface_t interface,
635 struct phy_device *phydev)
636 {
637 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
638 struct ethtool_eee *p = &priv->dev->ports[port].eee;
639
640 bcm_sf2_sw_mac_link_set(ds, port, interface, true);
641
642 if (mode == MLO_AN_PHY && phydev)
643 p->eee_enabled = b53_eee_init(ds, port, phydev);
644 }
645
bcm_sf2_sw_fixed_state(struct dsa_switch * ds,int port,struct phylink_link_state * status)646 static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
647 struct phylink_link_state *status)
648 {
649 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
650
651 status->link = false;
652
653 /* MoCA port is special as we do not get link status from CORE_LNKSTS,
654 * which means that we need to force the link at the port override
655 * level to get the data to flow. We do use what the interrupt handler
656 * did determine before.
657 *
658 * For the other ports, we just force the link status, since this is
659 * a fixed PHY device.
660 */
661 if (port == priv->moca_port) {
662 status->link = priv->port_sts[port].link;
663 /* For MoCA interfaces, also force a link down notification
664 * since some version of the user-space daemon (mocad) use
665 * cmd->autoneg to force the link, which messes up the PHY
666 * state machine and make it go in PHY_FORCING state instead.
667 */
668 if (!status->link)
669 netif_carrier_off(ds->ports[port].slave);
670 status->duplex = DUPLEX_FULL;
671 } else {
672 status->link = true;
673 }
674 }
675
bcm_sf2_enable_acb(struct dsa_switch * ds)676 static void bcm_sf2_enable_acb(struct dsa_switch *ds)
677 {
678 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
679 u32 reg;
680
681 /* Enable ACB globally */
682 reg = acb_readl(priv, ACB_CONTROL);
683 reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
684 acb_writel(priv, reg, ACB_CONTROL);
685 reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
686 reg |= ACB_EN | ACB_ALGORITHM;
687 acb_writel(priv, reg, ACB_CONTROL);
688 }
689
bcm_sf2_sw_suspend(struct dsa_switch * ds)690 static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
691 {
692 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
693 unsigned int port;
694
695 bcm_sf2_intr_disable(priv);
696
697 /* Disable all ports physically present including the IMP
698 * port, the other ones have already been disabled during
699 * bcm_sf2_sw_setup
700 */
701 for (port = 0; port < ds->num_ports; port++) {
702 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
703 bcm_sf2_port_disable(ds, port, NULL);
704 }
705
706 return 0;
707 }
708
bcm_sf2_sw_resume(struct dsa_switch * ds)709 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
710 {
711 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
712 int ret;
713
714 ret = bcm_sf2_sw_rst(priv);
715 if (ret) {
716 pr_err("%s: failed to software reset switch\n", __func__);
717 return ret;
718 }
719
720 if (priv->hw_params.num_gphy == 1)
721 bcm_sf2_gphy_enable_set(ds, true);
722
723 ds->ops->setup(ds);
724
725 return 0;
726 }
727
bcm_sf2_sw_get_wol(struct dsa_switch * ds,int port,struct ethtool_wolinfo * wol)728 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
729 struct ethtool_wolinfo *wol)
730 {
731 struct net_device *p = ds->ports[port].cpu_dp->master;
732 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
733 struct ethtool_wolinfo pwol = { };
734
735 /* Get the parent device WoL settings */
736 if (p->ethtool_ops->get_wol)
737 p->ethtool_ops->get_wol(p, &pwol);
738
739 /* Advertise the parent device supported settings */
740 wol->supported = pwol.supported;
741 memset(&wol->sopass, 0, sizeof(wol->sopass));
742
743 if (pwol.wolopts & WAKE_MAGICSECURE)
744 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
745
746 if (priv->wol_ports_mask & (1 << port))
747 wol->wolopts = pwol.wolopts;
748 else
749 wol->wolopts = 0;
750 }
751
bcm_sf2_sw_set_wol(struct dsa_switch * ds,int port,struct ethtool_wolinfo * wol)752 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
753 struct ethtool_wolinfo *wol)
754 {
755 struct net_device *p = ds->ports[port].cpu_dp->master;
756 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
757 s8 cpu_port = ds->ports[port].cpu_dp->index;
758 struct ethtool_wolinfo pwol = { };
759
760 if (p->ethtool_ops->get_wol)
761 p->ethtool_ops->get_wol(p, &pwol);
762 if (wol->wolopts & ~pwol.supported)
763 return -EINVAL;
764
765 if (wol->wolopts)
766 priv->wol_ports_mask |= (1 << port);
767 else
768 priv->wol_ports_mask &= ~(1 << port);
769
770 /* If we have at least one port enabled, make sure the CPU port
771 * is also enabled. If the CPU port is the last one enabled, we disable
772 * it since this configuration does not make sense.
773 */
774 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
775 priv->wol_ports_mask |= (1 << cpu_port);
776 else
777 priv->wol_ports_mask &= ~(1 << cpu_port);
778
779 return p->ethtool_ops->set_wol(p, wol);
780 }
781
bcm_sf2_sw_setup(struct dsa_switch * ds)782 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
783 {
784 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
785 unsigned int port;
786
787 /* Enable all valid ports and disable those unused */
788 for (port = 0; port < priv->hw_params.num_ports; port++) {
789 /* IMP port receives special treatment */
790 if (dsa_is_user_port(ds, port))
791 bcm_sf2_port_setup(ds, port, NULL);
792 else if (dsa_is_cpu_port(ds, port))
793 bcm_sf2_imp_setup(ds, port);
794 else
795 bcm_sf2_port_disable(ds, port, NULL);
796 }
797
798 b53_configure_vlan(ds);
799 bcm_sf2_enable_acb(ds);
800
801 return 0;
802 }
803
804 /* The SWITCH_CORE register space is managed by b53 but operates on a page +
805 * register basis so we need to translate that into an address that the
806 * bus-glue understands.
807 */
808 #define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2)
809
bcm_sf2_core_read8(struct b53_device * dev,u8 page,u8 reg,u8 * val)810 static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg,
811 u8 *val)
812 {
813 struct bcm_sf2_priv *priv = dev->priv;
814
815 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
816
817 return 0;
818 }
819
bcm_sf2_core_read16(struct b53_device * dev,u8 page,u8 reg,u16 * val)820 static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg,
821 u16 *val)
822 {
823 struct bcm_sf2_priv *priv = dev->priv;
824
825 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
826
827 return 0;
828 }
829
bcm_sf2_core_read32(struct b53_device * dev,u8 page,u8 reg,u32 * val)830 static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg,
831 u32 *val)
832 {
833 struct bcm_sf2_priv *priv = dev->priv;
834
835 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
836
837 return 0;
838 }
839
bcm_sf2_core_read64(struct b53_device * dev,u8 page,u8 reg,u64 * val)840 static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg,
841 u64 *val)
842 {
843 struct bcm_sf2_priv *priv = dev->priv;
844
845 *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg));
846
847 return 0;
848 }
849
bcm_sf2_core_write8(struct b53_device * dev,u8 page,u8 reg,u8 value)850 static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg,
851 u8 value)
852 {
853 struct bcm_sf2_priv *priv = dev->priv;
854
855 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
856
857 return 0;
858 }
859
bcm_sf2_core_write16(struct b53_device * dev,u8 page,u8 reg,u16 value)860 static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg,
861 u16 value)
862 {
863 struct bcm_sf2_priv *priv = dev->priv;
864
865 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
866
867 return 0;
868 }
869
bcm_sf2_core_write32(struct b53_device * dev,u8 page,u8 reg,u32 value)870 static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg,
871 u32 value)
872 {
873 struct bcm_sf2_priv *priv = dev->priv;
874
875 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
876
877 return 0;
878 }
879
bcm_sf2_core_write64(struct b53_device * dev,u8 page,u8 reg,u64 value)880 static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
881 u64 value)
882 {
883 struct bcm_sf2_priv *priv = dev->priv;
884
885 core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
886
887 return 0;
888 }
889
890 static const struct b53_io_ops bcm_sf2_io_ops = {
891 .read8 = bcm_sf2_core_read8,
892 .read16 = bcm_sf2_core_read16,
893 .read32 = bcm_sf2_core_read32,
894 .read48 = bcm_sf2_core_read64,
895 .read64 = bcm_sf2_core_read64,
896 .write8 = bcm_sf2_core_write8,
897 .write16 = bcm_sf2_core_write16,
898 .write32 = bcm_sf2_core_write32,
899 .write48 = bcm_sf2_core_write64,
900 .write64 = bcm_sf2_core_write64,
901 };
902
903 static const struct dsa_switch_ops bcm_sf2_ops = {
904 .get_tag_protocol = b53_get_tag_protocol,
905 .setup = bcm_sf2_sw_setup,
906 .get_strings = b53_get_strings,
907 .get_ethtool_stats = b53_get_ethtool_stats,
908 .get_sset_count = b53_get_sset_count,
909 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
910 .get_phy_flags = bcm_sf2_sw_get_phy_flags,
911 .phylink_validate = bcm_sf2_sw_validate,
912 .phylink_mac_config = bcm_sf2_sw_mac_config,
913 .phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
914 .phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
915 .phylink_fixed_state = bcm_sf2_sw_fixed_state,
916 .suspend = bcm_sf2_sw_suspend,
917 .resume = bcm_sf2_sw_resume,
918 .get_wol = bcm_sf2_sw_get_wol,
919 .set_wol = bcm_sf2_sw_set_wol,
920 .port_enable = bcm_sf2_port_setup,
921 .port_disable = bcm_sf2_port_disable,
922 .get_mac_eee = b53_get_mac_eee,
923 .set_mac_eee = b53_set_mac_eee,
924 .port_bridge_join = b53_br_join,
925 .port_bridge_leave = b53_br_leave,
926 .port_stp_state_set = b53_br_set_stp_state,
927 .port_fast_age = b53_br_fast_age,
928 .port_vlan_filtering = b53_vlan_filtering,
929 .port_vlan_prepare = b53_vlan_prepare,
930 .port_vlan_add = b53_vlan_add,
931 .port_vlan_del = b53_vlan_del,
932 .port_fdb_dump = b53_fdb_dump,
933 .port_fdb_add = b53_fdb_add,
934 .port_fdb_del = b53_fdb_del,
935 .get_rxnfc = bcm_sf2_get_rxnfc,
936 .set_rxnfc = bcm_sf2_set_rxnfc,
937 .port_mirror_add = b53_mirror_add,
938 .port_mirror_del = b53_mirror_del,
939 };
940
941 struct bcm_sf2_of_data {
942 u32 type;
943 const u16 *reg_offsets;
944 unsigned int core_reg_align;
945 unsigned int num_cfp_rules;
946 };
947
948 /* Register offsets for the SWITCH_REG_* block */
949 static const u16 bcm_sf2_7445_reg_offsets[] = {
950 [REG_SWITCH_CNTRL] = 0x00,
951 [REG_SWITCH_STATUS] = 0x04,
952 [REG_DIR_DATA_WRITE] = 0x08,
953 [REG_DIR_DATA_READ] = 0x0C,
954 [REG_SWITCH_REVISION] = 0x18,
955 [REG_PHY_REVISION] = 0x1C,
956 [REG_SPHY_CNTRL] = 0x2C,
957 [REG_RGMII_0_CNTRL] = 0x34,
958 [REG_RGMII_1_CNTRL] = 0x40,
959 [REG_RGMII_2_CNTRL] = 0x4c,
960 [REG_LED_0_CNTRL] = 0x90,
961 [REG_LED_1_CNTRL] = 0x94,
962 [REG_LED_2_CNTRL] = 0x98,
963 };
964
965 static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
966 .type = BCM7445_DEVICE_ID,
967 .core_reg_align = 0,
968 .reg_offsets = bcm_sf2_7445_reg_offsets,
969 .num_cfp_rules = 256,
970 };
971
972 static const u16 bcm_sf2_7278_reg_offsets[] = {
973 [REG_SWITCH_CNTRL] = 0x00,
974 [REG_SWITCH_STATUS] = 0x04,
975 [REG_DIR_DATA_WRITE] = 0x08,
976 [REG_DIR_DATA_READ] = 0x0c,
977 [REG_SWITCH_REVISION] = 0x10,
978 [REG_PHY_REVISION] = 0x14,
979 [REG_SPHY_CNTRL] = 0x24,
980 [REG_RGMII_0_CNTRL] = 0xe0,
981 [REG_RGMII_1_CNTRL] = 0xec,
982 [REG_RGMII_2_CNTRL] = 0xf8,
983 [REG_LED_0_CNTRL] = 0x40,
984 [REG_LED_1_CNTRL] = 0x4c,
985 [REG_LED_2_CNTRL] = 0x58,
986 };
987
988 static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
989 .type = BCM7278_DEVICE_ID,
990 .core_reg_align = 1,
991 .reg_offsets = bcm_sf2_7278_reg_offsets,
992 .num_cfp_rules = 128,
993 };
994
995 static const struct of_device_id bcm_sf2_of_match[] = {
996 { .compatible = "brcm,bcm7445-switch-v4.0",
997 .data = &bcm_sf2_7445_data
998 },
999 { .compatible = "brcm,bcm7278-switch-v4.0",
1000 .data = &bcm_sf2_7278_data
1001 },
1002 { .compatible = "brcm,bcm7278-switch-v4.8",
1003 .data = &bcm_sf2_7278_data
1004 },
1005 { /* sentinel */ },
1006 };
1007 MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
1008
bcm_sf2_sw_probe(struct platform_device * pdev)1009 static int bcm_sf2_sw_probe(struct platform_device *pdev)
1010 {
1011 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
1012 struct device_node *dn = pdev->dev.of_node;
1013 const struct of_device_id *of_id = NULL;
1014 const struct bcm_sf2_of_data *data;
1015 struct b53_platform_data *pdata;
1016 struct dsa_switch_ops *ops;
1017 struct device_node *ports;
1018 struct bcm_sf2_priv *priv;
1019 struct b53_device *dev;
1020 struct dsa_switch *ds;
1021 void __iomem **base;
1022 struct resource *r;
1023 unsigned int i;
1024 u32 reg, rev;
1025 int ret;
1026
1027 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1028 if (!priv)
1029 return -ENOMEM;
1030
1031 ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
1032 if (!ops)
1033 return -ENOMEM;
1034
1035 dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
1036 if (!dev)
1037 return -ENOMEM;
1038
1039 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1040 if (!pdata)
1041 return -ENOMEM;
1042
1043 of_id = of_match_node(bcm_sf2_of_match, dn);
1044 if (!of_id || !of_id->data)
1045 return -EINVAL;
1046
1047 data = of_id->data;
1048
1049 /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
1050 priv->type = data->type;
1051 priv->reg_offsets = data->reg_offsets;
1052 priv->core_reg_align = data->core_reg_align;
1053 priv->num_cfp_rules = data->num_cfp_rules;
1054
1055 /* Auto-detection using standard registers will not work, so
1056 * provide an indication of what kind of device we are for
1057 * b53_common to work with
1058 */
1059 pdata->chip_id = priv->type;
1060 dev->pdata = pdata;
1061
1062 priv->dev = dev;
1063 ds = dev->ds;
1064 ds->ops = &bcm_sf2_ops;
1065
1066 /* Advertise the 8 egress queues */
1067 ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
1068
1069 dev_set_drvdata(&pdev->dev, priv);
1070
1071 spin_lock_init(&priv->indir_lock);
1072 mutex_init(&priv->stats_mutex);
1073 mutex_init(&priv->cfp.lock);
1074
1075 /* CFP rule #0 cannot be used for specific classifications, flag it as
1076 * permanently used
1077 */
1078 set_bit(0, priv->cfp.used);
1079 set_bit(0, priv->cfp.unique);
1080
1081 /* Balance of_node_put() done by of_find_node_by_name() */
1082 of_node_get(dn);
1083 ports = of_find_node_by_name(dn, "ports");
1084 if (ports) {
1085 bcm_sf2_identify_ports(priv, ports);
1086 of_node_put(ports);
1087 }
1088
1089 priv->irq0 = irq_of_parse_and_map(dn, 0);
1090 priv->irq1 = irq_of_parse_and_map(dn, 1);
1091
1092 base = &priv->core;
1093 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1094 r = platform_get_resource(pdev, IORESOURCE_MEM, i);
1095 *base = devm_ioremap_resource(&pdev->dev, r);
1096 if (IS_ERR(*base)) {
1097 pr_err("unable to find register: %s\n", reg_names[i]);
1098 return PTR_ERR(*base);
1099 }
1100 base++;
1101 }
1102
1103 ret = bcm_sf2_sw_rst(priv);
1104 if (ret) {
1105 pr_err("unable to software reset switch: %d\n", ret);
1106 return ret;
1107 }
1108
1109 bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1110
1111 ret = bcm_sf2_mdio_register(ds);
1112 if (ret) {
1113 pr_err("failed to register MDIO bus\n");
1114 return ret;
1115 }
1116
1117 bcm_sf2_gphy_enable_set(priv->dev->ds, false);
1118
1119 ret = bcm_sf2_cfp_rst(priv);
1120 if (ret) {
1121 pr_err("failed to reset CFP\n");
1122 goto out_mdio;
1123 }
1124
1125 /* Disable all interrupts and request them */
1126 bcm_sf2_intr_disable(priv);
1127
1128 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
1129 "switch_0", ds);
1130 if (ret < 0) {
1131 pr_err("failed to request switch_0 IRQ\n");
1132 goto out_mdio;
1133 }
1134
1135 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
1136 "switch_1", ds);
1137 if (ret < 0) {
1138 pr_err("failed to request switch_1 IRQ\n");
1139 goto out_mdio;
1140 }
1141
1142 /* Reset the MIB counters */
1143 reg = core_readl(priv, CORE_GMNCFGCFG);
1144 reg |= RST_MIB_CNT;
1145 core_writel(priv, reg, CORE_GMNCFGCFG);
1146 reg &= ~RST_MIB_CNT;
1147 core_writel(priv, reg, CORE_GMNCFGCFG);
1148
1149 /* Get the maximum number of ports for this switch */
1150 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1151 if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1152 priv->hw_params.num_ports = DSA_MAX_PORTS;
1153
1154 /* Assume a single GPHY setup if we can't read that property */
1155 if (of_property_read_u32(dn, "brcm,num-gphy",
1156 &priv->hw_params.num_gphy))
1157 priv->hw_params.num_gphy = 1;
1158
1159 rev = reg_readl(priv, REG_SWITCH_REVISION);
1160 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
1161 SWITCH_TOP_REV_MASK;
1162 priv->hw_params.core_rev = (rev & SF2_REV_MASK);
1163
1164 rev = reg_readl(priv, REG_PHY_REVISION);
1165 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
1166
1167 ret = b53_switch_register(dev);
1168 if (ret)
1169 goto out_mdio;
1170
1171 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
1172 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
1173 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1174 priv->core, priv->irq0, priv->irq1);
1175
1176 return 0;
1177
1178 out_mdio:
1179 bcm_sf2_mdio_unregister(priv);
1180 return ret;
1181 }
1182
bcm_sf2_sw_remove(struct platform_device * pdev)1183 static int bcm_sf2_sw_remove(struct platform_device *pdev)
1184 {
1185 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1186
1187 priv->wol_ports_mask = 0;
1188 dsa_unregister_switch(priv->dev->ds);
1189 /* Disable all ports and interrupts */
1190 bcm_sf2_sw_suspend(priv->dev->ds);
1191 bcm_sf2_mdio_unregister(priv);
1192
1193 return 0;
1194 }
1195
bcm_sf2_sw_shutdown(struct platform_device * pdev)1196 static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
1197 {
1198 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1199
1200 /* For a kernel about to be kexec'd we want to keep the GPHY on for a
1201 * successful MDIO bus scan to occur. If we did turn off the GPHY
1202 * before (e.g: port_disable), this will also power it back on.
1203 *
1204 * Do not rely on kexec_in_progress, just power the PHY on.
1205 */
1206 if (priv->hw_params.num_gphy == 1)
1207 bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1208 }
1209
1210 #ifdef CONFIG_PM_SLEEP
bcm_sf2_suspend(struct device * dev)1211 static int bcm_sf2_suspend(struct device *dev)
1212 {
1213 struct platform_device *pdev = to_platform_device(dev);
1214 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1215
1216 return dsa_switch_suspend(priv->dev->ds);
1217 }
1218
bcm_sf2_resume(struct device * dev)1219 static int bcm_sf2_resume(struct device *dev)
1220 {
1221 struct platform_device *pdev = to_platform_device(dev);
1222 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1223
1224 return dsa_switch_resume(priv->dev->ds);
1225 }
1226 #endif /* CONFIG_PM_SLEEP */
1227
1228 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
1229 bcm_sf2_suspend, bcm_sf2_resume);
1230
1231
1232 static struct platform_driver bcm_sf2_driver = {
1233 .probe = bcm_sf2_sw_probe,
1234 .remove = bcm_sf2_sw_remove,
1235 .shutdown = bcm_sf2_sw_shutdown,
1236 .driver = {
1237 .name = "brcm-sf2",
1238 .of_match_table = bcm_sf2_of_match,
1239 .pm = &bcm_sf2_pm_ops,
1240 },
1241 };
1242 module_platform_driver(bcm_sf2_driver);
1243
1244 MODULE_AUTHOR("Broadcom Corporation");
1245 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1246 MODULE_LICENSE("GPL");
1247 MODULE_ALIAS("platform:brcm-sf2");
1248