1 /*
2 * B53 switch driver main logic
3 *
4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/delay.h>
23 #include <linux/export.h>
24 #include <linux/gpio.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/platform_data/b53.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/etherdevice.h>
31 #include <linux/if_bridge.h>
32 #include <net/dsa.h>
33
34 #include "b53_regs.h"
35 #include "b53_priv.h"
36
37 struct b53_mib_desc {
38 u8 size;
39 u8 offset;
40 const char *name;
41 };
42
43 /* BCM5365 MIB counters */
44 static const struct b53_mib_desc b53_mibs_65[] = {
45 { 8, 0x00, "TxOctets" },
46 { 4, 0x08, "TxDropPkts" },
47 { 4, 0x10, "TxBroadcastPkts" },
48 { 4, 0x14, "TxMulticastPkts" },
49 { 4, 0x18, "TxUnicastPkts" },
50 { 4, 0x1c, "TxCollisions" },
51 { 4, 0x20, "TxSingleCollision" },
52 { 4, 0x24, "TxMultipleCollision" },
53 { 4, 0x28, "TxDeferredTransmit" },
54 { 4, 0x2c, "TxLateCollision" },
55 { 4, 0x30, "TxExcessiveCollision" },
56 { 4, 0x38, "TxPausePkts" },
57 { 8, 0x44, "RxOctets" },
58 { 4, 0x4c, "RxUndersizePkts" },
59 { 4, 0x50, "RxPausePkts" },
60 { 4, 0x54, "Pkts64Octets" },
61 { 4, 0x58, "Pkts65to127Octets" },
62 { 4, 0x5c, "Pkts128to255Octets" },
63 { 4, 0x60, "Pkts256to511Octets" },
64 { 4, 0x64, "Pkts512to1023Octets" },
65 { 4, 0x68, "Pkts1024to1522Octets" },
66 { 4, 0x6c, "RxOversizePkts" },
67 { 4, 0x70, "RxJabbers" },
68 { 4, 0x74, "RxAlignmentErrors" },
69 { 4, 0x78, "RxFCSErrors" },
70 { 8, 0x7c, "RxGoodOctets" },
71 { 4, 0x84, "RxDropPkts" },
72 { 4, 0x88, "RxUnicastPkts" },
73 { 4, 0x8c, "RxMulticastPkts" },
74 { 4, 0x90, "RxBroadcastPkts" },
75 { 4, 0x94, "RxSAChanges" },
76 { 4, 0x98, "RxFragments" },
77 };
78
79 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65)
80
81 /* BCM63xx MIB counters */
82 static const struct b53_mib_desc b53_mibs_63xx[] = {
83 { 8, 0x00, "TxOctets" },
84 { 4, 0x08, "TxDropPkts" },
85 { 4, 0x0c, "TxQoSPkts" },
86 { 4, 0x10, "TxBroadcastPkts" },
87 { 4, 0x14, "TxMulticastPkts" },
88 { 4, 0x18, "TxUnicastPkts" },
89 { 4, 0x1c, "TxCollisions" },
90 { 4, 0x20, "TxSingleCollision" },
91 { 4, 0x24, "TxMultipleCollision" },
92 { 4, 0x28, "TxDeferredTransmit" },
93 { 4, 0x2c, "TxLateCollision" },
94 { 4, 0x30, "TxExcessiveCollision" },
95 { 4, 0x38, "TxPausePkts" },
96 { 8, 0x3c, "TxQoSOctets" },
97 { 8, 0x44, "RxOctets" },
98 { 4, 0x4c, "RxUndersizePkts" },
99 { 4, 0x50, "RxPausePkts" },
100 { 4, 0x54, "Pkts64Octets" },
101 { 4, 0x58, "Pkts65to127Octets" },
102 { 4, 0x5c, "Pkts128to255Octets" },
103 { 4, 0x60, "Pkts256to511Octets" },
104 { 4, 0x64, "Pkts512to1023Octets" },
105 { 4, 0x68, "Pkts1024to1522Octets" },
106 { 4, 0x6c, "RxOversizePkts" },
107 { 4, 0x70, "RxJabbers" },
108 { 4, 0x74, "RxAlignmentErrors" },
109 { 4, 0x78, "RxFCSErrors" },
110 { 8, 0x7c, "RxGoodOctets" },
111 { 4, 0x84, "RxDropPkts" },
112 { 4, 0x88, "RxUnicastPkts" },
113 { 4, 0x8c, "RxMulticastPkts" },
114 { 4, 0x90, "RxBroadcastPkts" },
115 { 4, 0x94, "RxSAChanges" },
116 { 4, 0x98, "RxFragments" },
117 { 4, 0xa0, "RxSymbolErrors" },
118 { 4, 0xa4, "RxQoSPkts" },
119 { 8, 0xa8, "RxQoSOctets" },
120 { 4, 0xb0, "Pkts1523to2047Octets" },
121 { 4, 0xb4, "Pkts2048to4095Octets" },
122 { 4, 0xb8, "Pkts4096to8191Octets" },
123 { 4, 0xbc, "Pkts8192to9728Octets" },
124 { 4, 0xc0, "RxDiscarded" },
125 };
126
127 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx)
128
129 /* MIB counters */
130 static const struct b53_mib_desc b53_mibs[] = {
131 { 8, 0x00, "TxOctets" },
132 { 4, 0x08, "TxDropPkts" },
133 { 4, 0x10, "TxBroadcastPkts" },
134 { 4, 0x14, "TxMulticastPkts" },
135 { 4, 0x18, "TxUnicastPkts" },
136 { 4, 0x1c, "TxCollisions" },
137 { 4, 0x20, "TxSingleCollision" },
138 { 4, 0x24, "TxMultipleCollision" },
139 { 4, 0x28, "TxDeferredTransmit" },
140 { 4, 0x2c, "TxLateCollision" },
141 { 4, 0x30, "TxExcessiveCollision" },
142 { 4, 0x38, "TxPausePkts" },
143 { 8, 0x50, "RxOctets" },
144 { 4, 0x58, "RxUndersizePkts" },
145 { 4, 0x5c, "RxPausePkts" },
146 { 4, 0x60, "Pkts64Octets" },
147 { 4, 0x64, "Pkts65to127Octets" },
148 { 4, 0x68, "Pkts128to255Octets" },
149 { 4, 0x6c, "Pkts256to511Octets" },
150 { 4, 0x70, "Pkts512to1023Octets" },
151 { 4, 0x74, "Pkts1024to1522Octets" },
152 { 4, 0x78, "RxOversizePkts" },
153 { 4, 0x7c, "RxJabbers" },
154 { 4, 0x80, "RxAlignmentErrors" },
155 { 4, 0x84, "RxFCSErrors" },
156 { 8, 0x88, "RxGoodOctets" },
157 { 4, 0x90, "RxDropPkts" },
158 { 4, 0x94, "RxUnicastPkts" },
159 { 4, 0x98, "RxMulticastPkts" },
160 { 4, 0x9c, "RxBroadcastPkts" },
161 { 4, 0xa0, "RxSAChanges" },
162 { 4, 0xa4, "RxFragments" },
163 { 4, 0xa8, "RxJumboPkts" },
164 { 4, 0xac, "RxSymbolErrors" },
165 { 4, 0xc0, "RxDiscarded" },
166 };
167
168 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs)
169
170 static const struct b53_mib_desc b53_mibs_58xx[] = {
171 { 8, 0x00, "TxOctets" },
172 { 4, 0x08, "TxDropPkts" },
173 { 4, 0x0c, "TxQPKTQ0" },
174 { 4, 0x10, "TxBroadcastPkts" },
175 { 4, 0x14, "TxMulticastPkts" },
176 { 4, 0x18, "TxUnicastPKts" },
177 { 4, 0x1c, "TxCollisions" },
178 { 4, 0x20, "TxSingleCollision" },
179 { 4, 0x24, "TxMultipleCollision" },
180 { 4, 0x28, "TxDeferredCollision" },
181 { 4, 0x2c, "TxLateCollision" },
182 { 4, 0x30, "TxExcessiveCollision" },
183 { 4, 0x34, "TxFrameInDisc" },
184 { 4, 0x38, "TxPausePkts" },
185 { 4, 0x3c, "TxQPKTQ1" },
186 { 4, 0x40, "TxQPKTQ2" },
187 { 4, 0x44, "TxQPKTQ3" },
188 { 4, 0x48, "TxQPKTQ4" },
189 { 4, 0x4c, "TxQPKTQ5" },
190 { 8, 0x50, "RxOctets" },
191 { 4, 0x58, "RxUndersizePkts" },
192 { 4, 0x5c, "RxPausePkts" },
193 { 4, 0x60, "RxPkts64Octets" },
194 { 4, 0x64, "RxPkts65to127Octets" },
195 { 4, 0x68, "RxPkts128to255Octets" },
196 { 4, 0x6c, "RxPkts256to511Octets" },
197 { 4, 0x70, "RxPkts512to1023Octets" },
198 { 4, 0x74, "RxPkts1024toMaxPktsOctets" },
199 { 4, 0x78, "RxOversizePkts" },
200 { 4, 0x7c, "RxJabbers" },
201 { 4, 0x80, "RxAlignmentErrors" },
202 { 4, 0x84, "RxFCSErrors" },
203 { 8, 0x88, "RxGoodOctets" },
204 { 4, 0x90, "RxDropPkts" },
205 { 4, 0x94, "RxUnicastPkts" },
206 { 4, 0x98, "RxMulticastPkts" },
207 { 4, 0x9c, "RxBroadcastPkts" },
208 { 4, 0xa0, "RxSAChanges" },
209 { 4, 0xa4, "RxFragments" },
210 { 4, 0xa8, "RxJumboPkt" },
211 { 4, 0xac, "RxSymblErr" },
212 { 4, 0xb0, "InRangeErrCount" },
213 { 4, 0xb4, "OutRangeErrCount" },
214 { 4, 0xb8, "EEELpiEvent" },
215 { 4, 0xbc, "EEELpiDuration" },
216 { 4, 0xc0, "RxDiscard" },
217 { 4, 0xc8, "TxQPKTQ6" },
218 { 4, 0xcc, "TxQPKTQ7" },
219 { 4, 0xd0, "TxPkts64Octets" },
220 { 4, 0xd4, "TxPkts65to127Octets" },
221 { 4, 0xd8, "TxPkts128to255Octets" },
222 { 4, 0xdc, "TxPkts256to511Ocets" },
223 { 4, 0xe0, "TxPkts512to1023Ocets" },
224 { 4, 0xe4, "TxPkts1024toMaxPktOcets" },
225 };
226
227 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
228
b53_do_vlan_op(struct b53_device * dev,u8 op)229 static int b53_do_vlan_op(struct b53_device *dev, u8 op)
230 {
231 unsigned int i;
232
233 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
234
235 for (i = 0; i < 10; i++) {
236 u8 vta;
237
238 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
239 if (!(vta & VTA_START_CMD))
240 return 0;
241
242 usleep_range(100, 200);
243 }
244
245 return -EIO;
246 }
247
b53_set_vlan_entry(struct b53_device * dev,u16 vid,struct b53_vlan * vlan)248 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
249 struct b53_vlan *vlan)
250 {
251 if (is5325(dev)) {
252 u32 entry = 0;
253
254 if (vlan->members) {
255 entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
256 VA_UNTAG_S_25) | vlan->members;
257 if (dev->core_rev >= 3)
258 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
259 else
260 entry |= VA_VALID_25;
261 }
262
263 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
264 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
265 VTA_RW_STATE_WR | VTA_RW_OP_EN);
266 } else if (is5365(dev)) {
267 u16 entry = 0;
268
269 if (vlan->members)
270 entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
271 VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
272
273 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
274 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
275 VTA_RW_STATE_WR | VTA_RW_OP_EN);
276 } else {
277 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
278 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
279 (vlan->untag << VTE_UNTAG_S) | vlan->members);
280
281 b53_do_vlan_op(dev, VTA_CMD_WRITE);
282 }
283
284 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
285 vid, vlan->members, vlan->untag);
286 }
287
b53_get_vlan_entry(struct b53_device * dev,u16 vid,struct b53_vlan * vlan)288 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
289 struct b53_vlan *vlan)
290 {
291 if (is5325(dev)) {
292 u32 entry = 0;
293
294 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
295 VTA_RW_STATE_RD | VTA_RW_OP_EN);
296 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
297
298 if (dev->core_rev >= 3)
299 vlan->valid = !!(entry & VA_VALID_25_R4);
300 else
301 vlan->valid = !!(entry & VA_VALID_25);
302 vlan->members = entry & VA_MEMBER_MASK;
303 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
304
305 } else if (is5365(dev)) {
306 u16 entry = 0;
307
308 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
309 VTA_RW_STATE_WR | VTA_RW_OP_EN);
310 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
311
312 vlan->valid = !!(entry & VA_VALID_65);
313 vlan->members = entry & VA_MEMBER_MASK;
314 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
315 } else {
316 u32 entry = 0;
317
318 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
319 b53_do_vlan_op(dev, VTA_CMD_READ);
320 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
321 vlan->members = entry & VTE_MEMBERS;
322 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
323 vlan->valid = true;
324 }
325 }
326
b53_set_forwarding(struct b53_device * dev,int enable)327 static void b53_set_forwarding(struct b53_device *dev, int enable)
328 {
329 u8 mgmt;
330
331 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
332
333 if (enable)
334 mgmt |= SM_SW_FWD_EN;
335 else
336 mgmt &= ~SM_SW_FWD_EN;
337
338 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
339
340 /* Include IMP port in dumb forwarding mode
341 */
342 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
343 mgmt |= B53_MII_DUMB_FWDG_EN;
344 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
345
346 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
347 * frames should be flooded or not.
348 */
349 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
350 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
351 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
352 }
353
b53_enable_vlan(struct b53_device * dev,bool enable,bool enable_filtering)354 static void b53_enable_vlan(struct b53_device *dev, bool enable,
355 bool enable_filtering)
356 {
357 u8 mgmt, vc0, vc1, vc4 = 0, vc5;
358
359 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
360 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
361 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
362
363 if (is5325(dev) || is5365(dev)) {
364 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
365 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
366 } else if (is63xx(dev)) {
367 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
368 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
369 } else {
370 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
371 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
372 }
373
374 mgmt &= ~SM_SW_FWD_MODE;
375
376 if (enable) {
377 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
378 vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
379 vc4 &= ~VC4_ING_VID_CHECK_MASK;
380 if (enable_filtering) {
381 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
382 vc5 |= VC5_DROP_VTABLE_MISS;
383 } else {
384 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
385 vc5 &= ~VC5_DROP_VTABLE_MISS;
386 }
387
388 if (is5325(dev))
389 vc0 &= ~VC0_RESERVED_1;
390
391 if (is5325(dev) || is5365(dev))
392 vc1 |= VC1_RX_MCST_TAG_EN;
393
394 } else {
395 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
396 vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
397 vc4 &= ~VC4_ING_VID_CHECK_MASK;
398 vc5 &= ~VC5_DROP_VTABLE_MISS;
399
400 if (is5325(dev) || is5365(dev))
401 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
402 else
403 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
404
405 if (is5325(dev) || is5365(dev))
406 vc1 &= ~VC1_RX_MCST_TAG_EN;
407 }
408
409 if (!is5325(dev) && !is5365(dev))
410 vc5 &= ~VC5_VID_FFF_EN;
411
412 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
413 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
414
415 if (is5325(dev) || is5365(dev)) {
416 /* enable the high 8 bit vid check on 5325 */
417 if (is5325(dev) && enable)
418 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
419 VC3_HIGH_8BIT_EN);
420 else
421 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
422
423 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
424 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
425 } else if (is63xx(dev)) {
426 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
427 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
428 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
429 } else {
430 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
431 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
432 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
433 }
434
435 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
436
437 dev->vlan_enabled = enable;
438 }
439
b53_set_jumbo(struct b53_device * dev,bool enable,bool allow_10_100)440 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
441 {
442 u32 port_mask = 0;
443 u16 max_size = JMS_MIN_SIZE;
444
445 if (is5325(dev) || is5365(dev))
446 return -EINVAL;
447
448 if (enable) {
449 port_mask = dev->enabled_ports;
450 max_size = JMS_MAX_SIZE;
451 if (allow_10_100)
452 port_mask |= JPM_10_100_JUMBO_EN;
453 }
454
455 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
456 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
457 }
458
b53_flush_arl(struct b53_device * dev,u8 mask)459 static int b53_flush_arl(struct b53_device *dev, u8 mask)
460 {
461 unsigned int i;
462
463 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
464 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
465
466 for (i = 0; i < 10; i++) {
467 u8 fast_age_ctrl;
468
469 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
470 &fast_age_ctrl);
471
472 if (!(fast_age_ctrl & FAST_AGE_DONE))
473 goto out;
474
475 msleep(1);
476 }
477
478 return -ETIMEDOUT;
479 out:
480 /* Only age dynamic entries (default behavior) */
481 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
482 return 0;
483 }
484
b53_fast_age_port(struct b53_device * dev,int port)485 static int b53_fast_age_port(struct b53_device *dev, int port)
486 {
487 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
488
489 return b53_flush_arl(dev, FAST_AGE_PORT);
490 }
491
b53_fast_age_vlan(struct b53_device * dev,u16 vid)492 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
493 {
494 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
495
496 return b53_flush_arl(dev, FAST_AGE_VLAN);
497 }
498
b53_imp_vlan_setup(struct dsa_switch * ds,int cpu_port)499 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
500 {
501 struct b53_device *dev = ds->priv;
502 unsigned int i;
503 u16 pvlan;
504
505 /* Enable the IMP port to be in the same VLAN as the other ports
506 * on a per-port basis such that we only have Port i and IMP in
507 * the same VLAN.
508 */
509 b53_for_each_port(dev, i) {
510 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
511 pvlan |= BIT(cpu_port);
512 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
513 }
514 }
515 EXPORT_SYMBOL(b53_imp_vlan_setup);
516
b53_enable_port(struct dsa_switch * ds,int port,struct phy_device * phy)517 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
518 {
519 struct b53_device *dev = ds->priv;
520 unsigned int cpu_port;
521 int ret = 0;
522 u16 pvlan;
523
524 if (!dsa_is_user_port(ds, port))
525 return 0;
526
527 cpu_port = ds->ports[port].cpu_dp->index;
528
529 b53_br_egress_floods(ds, port, true, true);
530
531 if (dev->ops->irq_enable)
532 ret = dev->ops->irq_enable(dev, port);
533 if (ret)
534 return ret;
535
536 /* Clear the Rx and Tx disable bits and set to no spanning tree */
537 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
538
539 /* Set this port, and only this one to be in the default VLAN,
540 * if member of a bridge, restore its membership prior to
541 * bringing down this port.
542 */
543 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
544 pvlan &= ~0x1ff;
545 pvlan |= BIT(port);
546 pvlan |= dev->ports[port].vlan_ctl_mask;
547 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
548
549 b53_imp_vlan_setup(ds, cpu_port);
550
551 /* If EEE was enabled, restore it */
552 if (dev->ports[port].eee.eee_enabled)
553 b53_eee_enable_set(ds, port, true);
554
555 return 0;
556 }
557 EXPORT_SYMBOL(b53_enable_port);
558
b53_disable_port(struct dsa_switch * ds,int port)559 void b53_disable_port(struct dsa_switch *ds, int port)
560 {
561 struct b53_device *dev = ds->priv;
562 u8 reg;
563
564 /* Disable Tx/Rx for the port */
565 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®);
566 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
567 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
568
569 if (dev->ops->irq_disable)
570 dev->ops->irq_disable(dev, port);
571 }
572 EXPORT_SYMBOL(b53_disable_port);
573
b53_brcm_hdr_setup(struct dsa_switch * ds,int port)574 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
575 {
576 bool tag_en = !(ds->ops->get_tag_protocol(ds, port) ==
577 DSA_TAG_PROTO_NONE);
578 struct b53_device *dev = ds->priv;
579 u8 hdr_ctl, val;
580 u16 reg;
581
582 /* Resolve which bit controls the Broadcom tag */
583 switch (port) {
584 case 8:
585 val = BRCM_HDR_P8_EN;
586 break;
587 case 7:
588 val = BRCM_HDR_P7_EN;
589 break;
590 case 5:
591 val = BRCM_HDR_P5_EN;
592 break;
593 default:
594 val = 0;
595 break;
596 }
597
598 /* Enable Broadcom tags for IMP port */
599 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
600 if (tag_en)
601 hdr_ctl |= val;
602 else
603 hdr_ctl &= ~val;
604 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl);
605
606 /* Registers below are only accessible on newer devices */
607 if (!is58xx(dev))
608 return;
609
610 /* Enable reception Broadcom tag for CPU TX (switch RX) to
611 * allow us to tag outgoing frames
612 */
613 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®);
614 if (tag_en)
615 reg &= ~BIT(port);
616 else
617 reg |= BIT(port);
618 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg);
619
620 /* Enable transmission of Broadcom tags from the switch (CPU RX) to
621 * allow delivering frames to the per-port net_devices
622 */
623 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®);
624 if (tag_en)
625 reg &= ~BIT(port);
626 else
627 reg |= BIT(port);
628 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg);
629 }
630 EXPORT_SYMBOL(b53_brcm_hdr_setup);
631
b53_enable_cpu_port(struct b53_device * dev,int port)632 static void b53_enable_cpu_port(struct b53_device *dev, int port)
633 {
634 u8 port_ctrl;
635
636 /* BCM5325 CPU port is at 8 */
637 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25)
638 port = B53_CPU_PORT;
639
640 port_ctrl = PORT_CTRL_RX_BCST_EN |
641 PORT_CTRL_RX_MCST_EN |
642 PORT_CTRL_RX_UCST_EN;
643 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
644
645 b53_brcm_hdr_setup(dev->ds, port);
646
647 b53_br_egress_floods(dev->ds, port, true, true);
648 }
649
b53_enable_mib(struct b53_device * dev)650 static void b53_enable_mib(struct b53_device *dev)
651 {
652 u8 gc;
653
654 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
655 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
656 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
657 }
658
b53_default_pvid(struct b53_device * dev)659 static u16 b53_default_pvid(struct b53_device *dev)
660 {
661 if (is5325(dev) || is5365(dev))
662 return 1;
663 else
664 return 0;
665 }
666
b53_configure_vlan(struct dsa_switch * ds)667 int b53_configure_vlan(struct dsa_switch *ds)
668 {
669 struct b53_device *dev = ds->priv;
670 struct b53_vlan vl = { 0 };
671 int i, def_vid;
672
673 def_vid = b53_default_pvid(dev);
674
675 /* clear all vlan entries */
676 if (is5325(dev) || is5365(dev)) {
677 for (i = def_vid; i < dev->num_vlans; i++)
678 b53_set_vlan_entry(dev, i, &vl);
679 } else {
680 b53_do_vlan_op(dev, VTA_CMD_CLEAR);
681 }
682
683 b53_enable_vlan(dev, false, ds->vlan_filtering);
684
685 b53_for_each_port(dev, i)
686 b53_write16(dev, B53_VLAN_PAGE,
687 B53_VLAN_PORT_DEF_TAG(i), def_vid);
688
689 if (!is5325(dev) && !is5365(dev))
690 b53_set_jumbo(dev, dev->enable_jumbo, false);
691
692 return 0;
693 }
694 EXPORT_SYMBOL(b53_configure_vlan);
695
b53_switch_reset_gpio(struct b53_device * dev)696 static void b53_switch_reset_gpio(struct b53_device *dev)
697 {
698 int gpio = dev->reset_gpio;
699
700 if (gpio < 0)
701 return;
702
703 /* Reset sequence: RESET low(50ms)->high(20ms)
704 */
705 gpio_set_value(gpio, 0);
706 mdelay(50);
707
708 gpio_set_value(gpio, 1);
709 mdelay(20);
710
711 dev->current_page = 0xff;
712 }
713
b53_switch_reset(struct b53_device * dev)714 static int b53_switch_reset(struct b53_device *dev)
715 {
716 unsigned int timeout = 1000;
717 u8 mgmt, reg;
718
719 b53_switch_reset_gpio(dev);
720
721 if (is539x(dev)) {
722 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
723 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
724 }
725
726 /* This is specific to 58xx devices here, do not use is58xx() which
727 * covers the larger Starfigther 2 family, including 7445/7278 which
728 * still use this driver as a library and need to perform the reset
729 * earlier.
730 */
731 if (dev->chip_id == BCM58XX_DEVICE_ID ||
732 dev->chip_id == BCM583XX_DEVICE_ID) {
733 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®);
734 reg |= SW_RST | EN_SW_RST | EN_CH_RST;
735 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
736
737 do {
738 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®);
739 if (!(reg & SW_RST))
740 break;
741
742 usleep_range(1000, 2000);
743 } while (timeout-- > 0);
744
745 if (timeout == 0)
746 return -ETIMEDOUT;
747 }
748
749 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
750
751 if (!(mgmt & SM_SW_FWD_EN)) {
752 mgmt &= ~SM_SW_FWD_MODE;
753 mgmt |= SM_SW_FWD_EN;
754
755 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
756 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
757
758 if (!(mgmt & SM_SW_FWD_EN)) {
759 dev_err(dev->dev, "Failed to enable switch!\n");
760 return -EINVAL;
761 }
762 }
763
764 b53_enable_mib(dev);
765
766 return b53_flush_arl(dev, FAST_AGE_STATIC);
767 }
768
b53_phy_read16(struct dsa_switch * ds,int addr,int reg)769 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
770 {
771 struct b53_device *priv = ds->priv;
772 u16 value = 0;
773 int ret;
774
775 if (priv->ops->phy_read16)
776 ret = priv->ops->phy_read16(priv, addr, reg, &value);
777 else
778 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
779 reg * 2, &value);
780
781 return ret ? ret : value;
782 }
783
b53_phy_write16(struct dsa_switch * ds,int addr,int reg,u16 val)784 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
785 {
786 struct b53_device *priv = ds->priv;
787
788 if (priv->ops->phy_write16)
789 return priv->ops->phy_write16(priv, addr, reg, val);
790
791 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
792 }
793
b53_reset_switch(struct b53_device * priv)794 static int b53_reset_switch(struct b53_device *priv)
795 {
796 /* reset vlans */
797 priv->enable_jumbo = false;
798
799 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
800 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
801
802 priv->serdes_lane = B53_INVALID_LANE;
803
804 return b53_switch_reset(priv);
805 }
806
b53_apply_config(struct b53_device * priv)807 static int b53_apply_config(struct b53_device *priv)
808 {
809 /* disable switching */
810 b53_set_forwarding(priv, 0);
811
812 b53_configure_vlan(priv->ds);
813
814 /* enable switching */
815 b53_set_forwarding(priv, 1);
816
817 return 0;
818 }
819
b53_reset_mib(struct b53_device * priv)820 static void b53_reset_mib(struct b53_device *priv)
821 {
822 u8 gc;
823
824 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
825
826 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
827 msleep(1);
828 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
829 msleep(1);
830 }
831
b53_get_mib(struct b53_device * dev)832 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
833 {
834 if (is5365(dev))
835 return b53_mibs_65;
836 else if (is63xx(dev))
837 return b53_mibs_63xx;
838 else if (is58xx(dev))
839 return b53_mibs_58xx;
840 else
841 return b53_mibs;
842 }
843
b53_get_mib_size(struct b53_device * dev)844 static unsigned int b53_get_mib_size(struct b53_device *dev)
845 {
846 if (is5365(dev))
847 return B53_MIBS_65_SIZE;
848 else if (is63xx(dev))
849 return B53_MIBS_63XX_SIZE;
850 else if (is58xx(dev))
851 return B53_MIBS_58XX_SIZE;
852 else
853 return B53_MIBS_SIZE;
854 }
855
b53_get_phy_device(struct dsa_switch * ds,int port)856 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
857 {
858 /* These ports typically do not have built-in PHYs */
859 switch (port) {
860 case B53_CPU_PORT_25:
861 case 7:
862 case B53_CPU_PORT:
863 return NULL;
864 }
865
866 return mdiobus_get_phy(ds->slave_mii_bus, port);
867 }
868
b53_get_strings(struct dsa_switch * ds,int port,u32 stringset,uint8_t * data)869 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
870 uint8_t *data)
871 {
872 struct b53_device *dev = ds->priv;
873 const struct b53_mib_desc *mibs = b53_get_mib(dev);
874 unsigned int mib_size = b53_get_mib_size(dev);
875 struct phy_device *phydev;
876 unsigned int i;
877
878 if (stringset == ETH_SS_STATS) {
879 for (i = 0; i < mib_size; i++)
880 strlcpy(data + i * ETH_GSTRING_LEN,
881 mibs[i].name, ETH_GSTRING_LEN);
882 } else if (stringset == ETH_SS_PHY_STATS) {
883 phydev = b53_get_phy_device(ds, port);
884 if (!phydev)
885 return;
886
887 phy_ethtool_get_strings(phydev, data);
888 }
889 }
890 EXPORT_SYMBOL(b53_get_strings);
891
b53_get_ethtool_stats(struct dsa_switch * ds,int port,uint64_t * data)892 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
893 {
894 struct b53_device *dev = ds->priv;
895 const struct b53_mib_desc *mibs = b53_get_mib(dev);
896 unsigned int mib_size = b53_get_mib_size(dev);
897 const struct b53_mib_desc *s;
898 unsigned int i;
899 u64 val = 0;
900
901 if (is5365(dev) && port == 5)
902 port = 8;
903
904 mutex_lock(&dev->stats_mutex);
905
906 for (i = 0; i < mib_size; i++) {
907 s = &mibs[i];
908
909 if (s->size == 8) {
910 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
911 } else {
912 u32 val32;
913
914 b53_read32(dev, B53_MIB_PAGE(port), s->offset,
915 &val32);
916 val = val32;
917 }
918 data[i] = (u64)val;
919 }
920
921 mutex_unlock(&dev->stats_mutex);
922 }
923 EXPORT_SYMBOL(b53_get_ethtool_stats);
924
b53_get_ethtool_phy_stats(struct dsa_switch * ds,int port,uint64_t * data)925 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data)
926 {
927 struct phy_device *phydev;
928
929 phydev = b53_get_phy_device(ds, port);
930 if (!phydev)
931 return;
932
933 phy_ethtool_get_stats(phydev, NULL, data);
934 }
935 EXPORT_SYMBOL(b53_get_ethtool_phy_stats);
936
b53_get_sset_count(struct dsa_switch * ds,int port,int sset)937 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
938 {
939 struct b53_device *dev = ds->priv;
940 struct phy_device *phydev;
941
942 if (sset == ETH_SS_STATS) {
943 return b53_get_mib_size(dev);
944 } else if (sset == ETH_SS_PHY_STATS) {
945 phydev = b53_get_phy_device(ds, port);
946 if (!phydev)
947 return 0;
948
949 return phy_ethtool_get_sset_count(phydev);
950 }
951
952 return 0;
953 }
954 EXPORT_SYMBOL(b53_get_sset_count);
955
b53_setup(struct dsa_switch * ds)956 static int b53_setup(struct dsa_switch *ds)
957 {
958 struct b53_device *dev = ds->priv;
959 unsigned int port;
960 int ret;
961
962 ret = b53_reset_switch(dev);
963 if (ret) {
964 dev_err(ds->dev, "failed to reset switch\n");
965 return ret;
966 }
967
968 b53_reset_mib(dev);
969
970 ret = b53_apply_config(dev);
971 if (ret)
972 dev_err(ds->dev, "failed to apply configuration\n");
973
974 /* Configure IMP/CPU port, disable all other ports. Enabled
975 * ports will be configured with .port_enable
976 */
977 for (port = 0; port < dev->num_ports; port++) {
978 if (dsa_is_cpu_port(ds, port))
979 b53_enable_cpu_port(dev, port);
980 else
981 b53_disable_port(ds, port);
982 }
983
984 /* Let DSA handle the case were multiple bridges span the same switch
985 * device and different VLAN awareness settings are requested, which
986 * would be breaking filtering semantics for any of the other bridge
987 * devices. (not hardware supported)
988 */
989 ds->vlan_filtering_is_global = true;
990
991 return ret;
992 }
993
b53_force_link(struct b53_device * dev,int port,int link)994 static void b53_force_link(struct b53_device *dev, int port, int link)
995 {
996 u8 reg, val, off;
997
998 /* Override the port settings */
999 if (port == dev->cpu_port) {
1000 off = B53_PORT_OVERRIDE_CTRL;
1001 val = PORT_OVERRIDE_EN;
1002 } else {
1003 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1004 val = GMII_PO_EN;
1005 }
1006
1007 b53_read8(dev, B53_CTRL_PAGE, off, ®);
1008 reg |= val;
1009 if (link)
1010 reg |= PORT_OVERRIDE_LINK;
1011 else
1012 reg &= ~PORT_OVERRIDE_LINK;
1013 b53_write8(dev, B53_CTRL_PAGE, off, reg);
1014 }
1015
b53_force_port_config(struct b53_device * dev,int port,int speed,int duplex,int pause)1016 static void b53_force_port_config(struct b53_device *dev, int port,
1017 int speed, int duplex, int pause)
1018 {
1019 u8 reg, val, off;
1020
1021 /* Override the port settings */
1022 if (port == dev->cpu_port) {
1023 off = B53_PORT_OVERRIDE_CTRL;
1024 val = PORT_OVERRIDE_EN;
1025 } else {
1026 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1027 val = GMII_PO_EN;
1028 }
1029
1030 b53_read8(dev, B53_CTRL_PAGE, off, ®);
1031 reg |= val;
1032 if (duplex == DUPLEX_FULL)
1033 reg |= PORT_OVERRIDE_FULL_DUPLEX;
1034 else
1035 reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
1036
1037 switch (speed) {
1038 case 2000:
1039 reg |= PORT_OVERRIDE_SPEED_2000M;
1040 /* fallthrough */
1041 case SPEED_1000:
1042 reg |= PORT_OVERRIDE_SPEED_1000M;
1043 break;
1044 case SPEED_100:
1045 reg |= PORT_OVERRIDE_SPEED_100M;
1046 break;
1047 case SPEED_10:
1048 reg |= PORT_OVERRIDE_SPEED_10M;
1049 break;
1050 default:
1051 dev_err(dev->dev, "unknown speed: %d\n", speed);
1052 return;
1053 }
1054
1055 if (pause & MLO_PAUSE_RX)
1056 reg |= PORT_OVERRIDE_RX_FLOW;
1057 if (pause & MLO_PAUSE_TX)
1058 reg |= PORT_OVERRIDE_TX_FLOW;
1059
1060 b53_write8(dev, B53_CTRL_PAGE, off, reg);
1061 }
1062
b53_adjust_link(struct dsa_switch * ds,int port,struct phy_device * phydev)1063 static void b53_adjust_link(struct dsa_switch *ds, int port,
1064 struct phy_device *phydev)
1065 {
1066 struct b53_device *dev = ds->priv;
1067 struct ethtool_eee *p = &dev->ports[port].eee;
1068 u8 rgmii_ctrl = 0, reg = 0, off;
1069 int pause = 0;
1070
1071 if (!phy_is_pseudo_fixed_link(phydev))
1072 return;
1073
1074 /* Enable flow control on BCM5301x's CPU port */
1075 if (is5301x(dev) && port == dev->cpu_port)
1076 pause = MLO_PAUSE_TXRX_MASK;
1077
1078 if (phydev->pause) {
1079 if (phydev->asym_pause)
1080 pause |= MLO_PAUSE_TX;
1081 pause |= MLO_PAUSE_RX;
1082 }
1083
1084 b53_force_port_config(dev, port, phydev->speed, phydev->duplex, pause);
1085 b53_force_link(dev, port, phydev->link);
1086
1087 if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
1088 if (port == 8)
1089 off = B53_RGMII_CTRL_IMP;
1090 else
1091 off = B53_RGMII_CTRL_P(port);
1092
1093 /* Configure the port RGMII clock delay by DLL disabled and
1094 * tx_clk aligned timing (restoring to reset defaults)
1095 */
1096 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
1097 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
1098 RGMII_CTRL_TIMING_SEL);
1099
1100 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
1101 * sure that we enable the port TX clock internal delay to
1102 * account for this internal delay that is inserted, otherwise
1103 * the switch won't be able to receive correctly.
1104 *
1105 * PHY_INTERFACE_MODE_RGMII means that we are not introducing
1106 * any delay neither on transmission nor reception, so the
1107 * BCM53125 must also be configured accordingly to account for
1108 * the lack of delay and introduce
1109 *
1110 * The BCM53125 switch has its RX clock and TX clock control
1111 * swapped, hence the reason why we modify the TX clock path in
1112 * the "RGMII" case
1113 */
1114 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
1115 rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
1116 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
1117 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
1118 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
1119 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
1120
1121 dev_info(ds->dev, "Configured port %d for %s\n", port,
1122 phy_modes(phydev->interface));
1123 }
1124
1125 /* configure MII port if necessary */
1126 if (is5325(dev)) {
1127 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1128 ®);
1129
1130 /* reverse mii needs to be enabled */
1131 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1132 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1133 reg | PORT_OVERRIDE_RV_MII_25);
1134 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1135 ®);
1136
1137 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1138 dev_err(ds->dev,
1139 "Failed to enable reverse MII mode\n");
1140 return;
1141 }
1142 }
1143 } else if (is5301x(dev)) {
1144 if (port != dev->cpu_port) {
1145 b53_force_port_config(dev, dev->cpu_port, 2000,
1146 DUPLEX_FULL, MLO_PAUSE_TXRX_MASK);
1147 b53_force_link(dev, dev->cpu_port, 1);
1148 }
1149 }
1150
1151 /* Re-negotiate EEE if it was enabled already */
1152 p->eee_enabled = b53_eee_init(ds, port, phydev);
1153 }
1154
b53_port_event(struct dsa_switch * ds,int port)1155 void b53_port_event(struct dsa_switch *ds, int port)
1156 {
1157 struct b53_device *dev = ds->priv;
1158 bool link;
1159 u16 sts;
1160
1161 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts);
1162 link = !!(sts & BIT(port));
1163 dsa_port_phylink_mac_change(ds, port, link);
1164 }
1165 EXPORT_SYMBOL(b53_port_event);
1166
b53_phylink_validate(struct dsa_switch * ds,int port,unsigned long * supported,struct phylink_link_state * state)1167 void b53_phylink_validate(struct dsa_switch *ds, int port,
1168 unsigned long *supported,
1169 struct phylink_link_state *state)
1170 {
1171 struct b53_device *dev = ds->priv;
1172 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1173
1174 if (dev->ops->serdes_phylink_validate)
1175 dev->ops->serdes_phylink_validate(dev, port, mask, state);
1176
1177 /* Allow all the expected bits */
1178 phylink_set(mask, Autoneg);
1179 phylink_set_port_modes(mask);
1180 phylink_set(mask, Pause);
1181 phylink_set(mask, Asym_Pause);
1182
1183 /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we
1184 * support Gigabit, including Half duplex.
1185 */
1186 if (state->interface != PHY_INTERFACE_MODE_MII &&
1187 state->interface != PHY_INTERFACE_MODE_REVMII &&
1188 !phy_interface_mode_is_8023z(state->interface) &&
1189 !(is5325(dev) || is5365(dev))) {
1190 phylink_set(mask, 1000baseT_Full);
1191 phylink_set(mask, 1000baseT_Half);
1192 }
1193
1194 if (!phy_interface_mode_is_8023z(state->interface)) {
1195 phylink_set(mask, 10baseT_Half);
1196 phylink_set(mask, 10baseT_Full);
1197 phylink_set(mask, 100baseT_Half);
1198 phylink_set(mask, 100baseT_Full);
1199 }
1200
1201 bitmap_and(supported, supported, mask,
1202 __ETHTOOL_LINK_MODE_MASK_NBITS);
1203 bitmap_and(state->advertising, state->advertising, mask,
1204 __ETHTOOL_LINK_MODE_MASK_NBITS);
1205
1206 phylink_helper_basex_speed(state);
1207 }
1208 EXPORT_SYMBOL(b53_phylink_validate);
1209
b53_phylink_mac_link_state(struct dsa_switch * ds,int port,struct phylink_link_state * state)1210 int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
1211 struct phylink_link_state *state)
1212 {
1213 struct b53_device *dev = ds->priv;
1214 int ret = -EOPNOTSUPP;
1215
1216 if ((phy_interface_mode_is_8023z(state->interface) ||
1217 state->interface == PHY_INTERFACE_MODE_SGMII) &&
1218 dev->ops->serdes_link_state)
1219 ret = dev->ops->serdes_link_state(dev, port, state);
1220
1221 return ret;
1222 }
1223 EXPORT_SYMBOL(b53_phylink_mac_link_state);
1224
b53_phylink_mac_config(struct dsa_switch * ds,int port,unsigned int mode,const struct phylink_link_state * state)1225 void b53_phylink_mac_config(struct dsa_switch *ds, int port,
1226 unsigned int mode,
1227 const struct phylink_link_state *state)
1228 {
1229 struct b53_device *dev = ds->priv;
1230
1231 if (mode == MLO_AN_PHY)
1232 return;
1233
1234 if (mode == MLO_AN_FIXED) {
1235 b53_force_port_config(dev, port, state->speed,
1236 state->duplex, state->pause);
1237 return;
1238 }
1239
1240 if ((phy_interface_mode_is_8023z(state->interface) ||
1241 state->interface == PHY_INTERFACE_MODE_SGMII) &&
1242 dev->ops->serdes_config)
1243 dev->ops->serdes_config(dev, port, mode, state);
1244 }
1245 EXPORT_SYMBOL(b53_phylink_mac_config);
1246
b53_phylink_mac_an_restart(struct dsa_switch * ds,int port)1247 void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port)
1248 {
1249 struct b53_device *dev = ds->priv;
1250
1251 if (dev->ops->serdes_an_restart)
1252 dev->ops->serdes_an_restart(dev, port);
1253 }
1254 EXPORT_SYMBOL(b53_phylink_mac_an_restart);
1255
b53_phylink_mac_link_down(struct dsa_switch * ds,int port,unsigned int mode,phy_interface_t interface)1256 void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
1257 unsigned int mode,
1258 phy_interface_t interface)
1259 {
1260 struct b53_device *dev = ds->priv;
1261
1262 if (mode == MLO_AN_PHY)
1263 return;
1264
1265 if (mode == MLO_AN_FIXED) {
1266 b53_force_link(dev, port, false);
1267 return;
1268 }
1269
1270 if (phy_interface_mode_is_8023z(interface) &&
1271 dev->ops->serdes_link_set)
1272 dev->ops->serdes_link_set(dev, port, mode, interface, false);
1273 }
1274 EXPORT_SYMBOL(b53_phylink_mac_link_down);
1275
b53_phylink_mac_link_up(struct dsa_switch * ds,int port,unsigned int mode,phy_interface_t interface,struct phy_device * phydev)1276 void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
1277 unsigned int mode,
1278 phy_interface_t interface,
1279 struct phy_device *phydev)
1280 {
1281 struct b53_device *dev = ds->priv;
1282
1283 if (mode == MLO_AN_PHY)
1284 return;
1285
1286 if (mode == MLO_AN_FIXED) {
1287 b53_force_link(dev, port, true);
1288 return;
1289 }
1290
1291 if (phy_interface_mode_is_8023z(interface) &&
1292 dev->ops->serdes_link_set)
1293 dev->ops->serdes_link_set(dev, port, mode, interface, true);
1294 }
1295 EXPORT_SYMBOL(b53_phylink_mac_link_up);
1296
b53_vlan_filtering(struct dsa_switch * ds,int port,bool vlan_filtering)1297 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
1298 {
1299 struct b53_device *dev = ds->priv;
1300 u16 pvid, new_pvid;
1301
1302 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
1303 new_pvid = pvid;
1304 if (!vlan_filtering) {
1305 /* Filtering is currently enabled, use the default PVID since
1306 * the bridge does not expect tagging anymore
1307 */
1308 dev->ports[port].pvid = pvid;
1309 new_pvid = b53_default_pvid(dev);
1310 } else {
1311 /* Filtering is currently disabled, restore the previous PVID */
1312 new_pvid = dev->ports[port].pvid;
1313 }
1314
1315 if (pvid != new_pvid)
1316 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1317 new_pvid);
1318
1319 b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
1320
1321 return 0;
1322 }
1323 EXPORT_SYMBOL(b53_vlan_filtering);
1324
b53_vlan_prepare(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1325 int b53_vlan_prepare(struct dsa_switch *ds, int port,
1326 const struct switchdev_obj_port_vlan *vlan)
1327 {
1328 struct b53_device *dev = ds->priv;
1329
1330 if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
1331 return -EOPNOTSUPP;
1332
1333 if (vlan->vid_end > dev->num_vlans)
1334 return -ERANGE;
1335
1336 b53_enable_vlan(dev, true, ds->vlan_filtering);
1337
1338 return 0;
1339 }
1340 EXPORT_SYMBOL(b53_vlan_prepare);
1341
b53_vlan_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1342 void b53_vlan_add(struct dsa_switch *ds, int port,
1343 const struct switchdev_obj_port_vlan *vlan)
1344 {
1345 struct b53_device *dev = ds->priv;
1346 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1347 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1348 struct b53_vlan *vl;
1349 u16 vid;
1350
1351 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1352 vl = &dev->vlans[vid];
1353
1354 b53_get_vlan_entry(dev, vid, vl);
1355
1356 vl->members |= BIT(port);
1357 if (untagged && !dsa_is_cpu_port(ds, port))
1358 vl->untag |= BIT(port);
1359 else
1360 vl->untag &= ~BIT(port);
1361
1362 b53_set_vlan_entry(dev, vid, vl);
1363 b53_fast_age_vlan(dev, vid);
1364 }
1365
1366 if (pvid && !dsa_is_cpu_port(ds, port)) {
1367 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1368 vlan->vid_end);
1369 b53_fast_age_vlan(dev, vid);
1370 }
1371 }
1372 EXPORT_SYMBOL(b53_vlan_add);
1373
b53_vlan_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1374 int b53_vlan_del(struct dsa_switch *ds, int port,
1375 const struct switchdev_obj_port_vlan *vlan)
1376 {
1377 struct b53_device *dev = ds->priv;
1378 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1379 struct b53_vlan *vl;
1380 u16 vid;
1381 u16 pvid;
1382
1383 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
1384
1385 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1386 vl = &dev->vlans[vid];
1387
1388 b53_get_vlan_entry(dev, vid, vl);
1389
1390 vl->members &= ~BIT(port);
1391
1392 if (pvid == vid)
1393 pvid = b53_default_pvid(dev);
1394
1395 if (untagged && !dsa_is_cpu_port(ds, port))
1396 vl->untag &= ~(BIT(port));
1397
1398 b53_set_vlan_entry(dev, vid, vl);
1399 b53_fast_age_vlan(dev, vid);
1400 }
1401
1402 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
1403 b53_fast_age_vlan(dev, pvid);
1404
1405 return 0;
1406 }
1407 EXPORT_SYMBOL(b53_vlan_del);
1408
1409 /* Address Resolution Logic routines */
b53_arl_op_wait(struct b53_device * dev)1410 static int b53_arl_op_wait(struct b53_device *dev)
1411 {
1412 unsigned int timeout = 10;
1413 u8 reg;
1414
1415 do {
1416 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®);
1417 if (!(reg & ARLTBL_START_DONE))
1418 return 0;
1419
1420 usleep_range(1000, 2000);
1421 } while (timeout--);
1422
1423 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
1424
1425 return -ETIMEDOUT;
1426 }
1427
b53_arl_rw_op(struct b53_device * dev,unsigned int op)1428 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
1429 {
1430 u8 reg;
1431
1432 if (op > ARLTBL_RW)
1433 return -EINVAL;
1434
1435 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®);
1436 reg |= ARLTBL_START_DONE;
1437 if (op)
1438 reg |= ARLTBL_RW;
1439 else
1440 reg &= ~ARLTBL_RW;
1441 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
1442
1443 return b53_arl_op_wait(dev);
1444 }
1445
b53_arl_read(struct b53_device * dev,u64 mac,u16 vid,struct b53_arl_entry * ent,u8 * idx,bool is_valid)1446 static int b53_arl_read(struct b53_device *dev, u64 mac,
1447 u16 vid, struct b53_arl_entry *ent, u8 *idx,
1448 bool is_valid)
1449 {
1450 unsigned int i;
1451 int ret;
1452
1453 ret = b53_arl_op_wait(dev);
1454 if (ret)
1455 return ret;
1456
1457 /* Read the bins */
1458 for (i = 0; i < dev->num_arl_entries; i++) {
1459 u64 mac_vid;
1460 u32 fwd_entry;
1461
1462 b53_read64(dev, B53_ARLIO_PAGE,
1463 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
1464 b53_read32(dev, B53_ARLIO_PAGE,
1465 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
1466 b53_arl_to_entry(ent, mac_vid, fwd_entry);
1467
1468 if (!(fwd_entry & ARLTBL_VALID))
1469 continue;
1470 if ((mac_vid & ARLTBL_MAC_MASK) != mac)
1471 continue;
1472 *idx = i;
1473 }
1474
1475 return -ENOENT;
1476 }
1477
b53_arl_op(struct b53_device * dev,int op,int port,const unsigned char * addr,u16 vid,bool is_valid)1478 static int b53_arl_op(struct b53_device *dev, int op, int port,
1479 const unsigned char *addr, u16 vid, bool is_valid)
1480 {
1481 struct b53_arl_entry ent;
1482 u32 fwd_entry;
1483 u64 mac, mac_vid = 0;
1484 u8 idx = 0;
1485 int ret;
1486
1487 /* Convert the array into a 64-bit MAC */
1488 mac = ether_addr_to_u64(addr);
1489
1490 /* Perform a read for the given MAC and VID */
1491 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
1492 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
1493
1494 /* Issue a read operation for this MAC */
1495 ret = b53_arl_rw_op(dev, 1);
1496 if (ret)
1497 return ret;
1498
1499 ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid);
1500 /* If this is a read, just finish now */
1501 if (op)
1502 return ret;
1503
1504 /* We could not find a matching MAC, so reset to a new entry */
1505 if (ret) {
1506 fwd_entry = 0;
1507 idx = 1;
1508 }
1509
1510 memset(&ent, 0, sizeof(ent));
1511 ent.port = port;
1512 ent.is_valid = is_valid;
1513 ent.vid = vid;
1514 ent.is_static = true;
1515 memcpy(ent.mac, addr, ETH_ALEN);
1516 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
1517
1518 b53_write64(dev, B53_ARLIO_PAGE,
1519 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
1520 b53_write32(dev, B53_ARLIO_PAGE,
1521 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
1522
1523 return b53_arl_rw_op(dev, 0);
1524 }
1525
b53_fdb_add(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid)1526 int b53_fdb_add(struct dsa_switch *ds, int port,
1527 const unsigned char *addr, u16 vid)
1528 {
1529 struct b53_device *priv = ds->priv;
1530
1531 /* 5325 and 5365 require some more massaging, but could
1532 * be supported eventually
1533 */
1534 if (is5325(priv) || is5365(priv))
1535 return -EOPNOTSUPP;
1536
1537 return b53_arl_op(priv, 0, port, addr, vid, true);
1538 }
1539 EXPORT_SYMBOL(b53_fdb_add);
1540
b53_fdb_del(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid)1541 int b53_fdb_del(struct dsa_switch *ds, int port,
1542 const unsigned char *addr, u16 vid)
1543 {
1544 struct b53_device *priv = ds->priv;
1545
1546 return b53_arl_op(priv, 0, port, addr, vid, false);
1547 }
1548 EXPORT_SYMBOL(b53_fdb_del);
1549
b53_arl_search_wait(struct b53_device * dev)1550 static int b53_arl_search_wait(struct b53_device *dev)
1551 {
1552 unsigned int timeout = 1000;
1553 u8 reg;
1554
1555 do {
1556 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®);
1557 if (!(reg & ARL_SRCH_STDN))
1558 return 0;
1559
1560 if (reg & ARL_SRCH_VLID)
1561 return 0;
1562
1563 usleep_range(1000, 2000);
1564 } while (timeout--);
1565
1566 return -ETIMEDOUT;
1567 }
1568
b53_arl_search_rd(struct b53_device * dev,u8 idx,struct b53_arl_entry * ent)1569 static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
1570 struct b53_arl_entry *ent)
1571 {
1572 u64 mac_vid;
1573 u32 fwd_entry;
1574
1575 b53_read64(dev, B53_ARLIO_PAGE,
1576 B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
1577 b53_read32(dev, B53_ARLIO_PAGE,
1578 B53_ARL_SRCH_RSTL(idx), &fwd_entry);
1579 b53_arl_to_entry(ent, mac_vid, fwd_entry);
1580 }
1581
b53_fdb_copy(int port,const struct b53_arl_entry * ent,dsa_fdb_dump_cb_t * cb,void * data)1582 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
1583 dsa_fdb_dump_cb_t *cb, void *data)
1584 {
1585 if (!ent->is_valid)
1586 return 0;
1587
1588 if (port != ent->port)
1589 return 0;
1590
1591 return cb(ent->mac, ent->vid, ent->is_static, data);
1592 }
1593
b53_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)1594 int b53_fdb_dump(struct dsa_switch *ds, int port,
1595 dsa_fdb_dump_cb_t *cb, void *data)
1596 {
1597 struct b53_device *priv = ds->priv;
1598 struct b53_arl_entry results[2];
1599 unsigned int count = 0;
1600 int ret;
1601 u8 reg;
1602
1603 /* Start search operation */
1604 reg = ARL_SRCH_STDN;
1605 b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
1606
1607 do {
1608 ret = b53_arl_search_wait(priv);
1609 if (ret)
1610 return ret;
1611
1612 b53_arl_search_rd(priv, 0, &results[0]);
1613 ret = b53_fdb_copy(port, &results[0], cb, data);
1614 if (ret)
1615 return ret;
1616
1617 if (priv->num_arl_entries > 2) {
1618 b53_arl_search_rd(priv, 1, &results[1]);
1619 ret = b53_fdb_copy(port, &results[1], cb, data);
1620 if (ret)
1621 return ret;
1622
1623 if (!results[0].is_valid && !results[1].is_valid)
1624 break;
1625 }
1626
1627 } while (count++ < 1024);
1628
1629 return 0;
1630 }
1631 EXPORT_SYMBOL(b53_fdb_dump);
1632
b53_br_join(struct dsa_switch * ds,int port,struct net_device * br)1633 int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
1634 {
1635 struct b53_device *dev = ds->priv;
1636 s8 cpu_port = ds->ports[port].cpu_dp->index;
1637 u16 pvlan, reg;
1638 unsigned int i;
1639
1640 /* Make this port leave the all VLANs join since we will have proper
1641 * VLAN entries from now on
1642 */
1643 if (is58xx(dev)) {
1644 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®);
1645 reg &= ~BIT(port);
1646 if ((reg & BIT(cpu_port)) == BIT(cpu_port))
1647 reg &= ~BIT(cpu_port);
1648 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
1649 }
1650
1651 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1652
1653 b53_for_each_port(dev, i) {
1654 if (dsa_to_port(ds, i)->bridge_dev != br)
1655 continue;
1656
1657 /* Add this local port to the remote port VLAN control
1658 * membership and update the remote port bitmask
1659 */
1660 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®);
1661 reg |= BIT(port);
1662 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1663 dev->ports[i].vlan_ctl_mask = reg;
1664
1665 pvlan |= BIT(i);
1666 }
1667
1668 /* Configure the local port VLAN control membership to include
1669 * remote ports and update the local port bitmask
1670 */
1671 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1672 dev->ports[port].vlan_ctl_mask = pvlan;
1673
1674 return 0;
1675 }
1676 EXPORT_SYMBOL(b53_br_join);
1677
b53_br_leave(struct dsa_switch * ds,int port,struct net_device * br)1678 void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
1679 {
1680 struct b53_device *dev = ds->priv;
1681 struct b53_vlan *vl = &dev->vlans[0];
1682 s8 cpu_port = ds->ports[port].cpu_dp->index;
1683 unsigned int i;
1684 u16 pvlan, reg, pvid;
1685
1686 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1687
1688 b53_for_each_port(dev, i) {
1689 /* Don't touch the remaining ports */
1690 if (dsa_to_port(ds, i)->bridge_dev != br)
1691 continue;
1692
1693 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®);
1694 reg &= ~BIT(port);
1695 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1696 dev->ports[port].vlan_ctl_mask = reg;
1697
1698 /* Prevent self removal to preserve isolation */
1699 if (port != i)
1700 pvlan &= ~BIT(i);
1701 }
1702
1703 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1704 dev->ports[port].vlan_ctl_mask = pvlan;
1705
1706 pvid = b53_default_pvid(dev);
1707
1708 /* Make this port join all VLANs without VLAN entries */
1709 if (is58xx(dev)) {
1710 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®);
1711 reg |= BIT(port);
1712 if (!(reg & BIT(cpu_port)))
1713 reg |= BIT(cpu_port);
1714 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
1715 } else {
1716 b53_get_vlan_entry(dev, pvid, vl);
1717 vl->members |= BIT(port) | BIT(cpu_port);
1718 vl->untag |= BIT(port) | BIT(cpu_port);
1719 b53_set_vlan_entry(dev, pvid, vl);
1720 }
1721 }
1722 EXPORT_SYMBOL(b53_br_leave);
1723
b53_br_set_stp_state(struct dsa_switch * ds,int port,u8 state)1724 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
1725 {
1726 struct b53_device *dev = ds->priv;
1727 u8 hw_state;
1728 u8 reg;
1729
1730 switch (state) {
1731 case BR_STATE_DISABLED:
1732 hw_state = PORT_CTRL_DIS_STATE;
1733 break;
1734 case BR_STATE_LISTENING:
1735 hw_state = PORT_CTRL_LISTEN_STATE;
1736 break;
1737 case BR_STATE_LEARNING:
1738 hw_state = PORT_CTRL_LEARN_STATE;
1739 break;
1740 case BR_STATE_FORWARDING:
1741 hw_state = PORT_CTRL_FWD_STATE;
1742 break;
1743 case BR_STATE_BLOCKING:
1744 hw_state = PORT_CTRL_BLOCK_STATE;
1745 break;
1746 default:
1747 dev_err(ds->dev, "invalid STP state: %d\n", state);
1748 return;
1749 }
1750
1751 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®);
1752 reg &= ~PORT_CTRL_STP_STATE_MASK;
1753 reg |= hw_state;
1754 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
1755 }
1756 EXPORT_SYMBOL(b53_br_set_stp_state);
1757
b53_br_fast_age(struct dsa_switch * ds,int port)1758 void b53_br_fast_age(struct dsa_switch *ds, int port)
1759 {
1760 struct b53_device *dev = ds->priv;
1761
1762 if (b53_fast_age_port(dev, port))
1763 dev_err(ds->dev, "fast ageing failed\n");
1764 }
1765 EXPORT_SYMBOL(b53_br_fast_age);
1766
b53_br_egress_floods(struct dsa_switch * ds,int port,bool unicast,bool multicast)1767 int b53_br_egress_floods(struct dsa_switch *ds, int port,
1768 bool unicast, bool multicast)
1769 {
1770 struct b53_device *dev = ds->priv;
1771 u16 uc, mc;
1772
1773 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
1774 if (unicast)
1775 uc |= BIT(port);
1776 else
1777 uc &= ~BIT(port);
1778 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
1779
1780 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
1781 if (multicast)
1782 mc |= BIT(port);
1783 else
1784 mc &= ~BIT(port);
1785 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
1786
1787 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
1788 if (multicast)
1789 mc |= BIT(port);
1790 else
1791 mc &= ~BIT(port);
1792 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
1793
1794 return 0;
1795
1796 }
1797 EXPORT_SYMBOL(b53_br_egress_floods);
1798
b53_possible_cpu_port(struct dsa_switch * ds,int port)1799 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
1800 {
1801 /* Broadcom switches will accept enabling Broadcom tags on the
1802 * following ports: 5, 7 and 8, any other port is not supported
1803 */
1804 switch (port) {
1805 case B53_CPU_PORT_25:
1806 case 7:
1807 case B53_CPU_PORT:
1808 return true;
1809 }
1810
1811 return false;
1812 }
1813
b53_can_enable_brcm_tags(struct dsa_switch * ds,int port)1814 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
1815 {
1816 bool ret = b53_possible_cpu_port(ds, port);
1817
1818 if (!ret)
1819 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
1820 port);
1821 return ret;
1822 }
1823
b53_get_tag_protocol(struct dsa_switch * ds,int port)1824 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port)
1825 {
1826 struct b53_device *dev = ds->priv;
1827
1828 /* Older models (5325, 5365) support a different tag format that we do
1829 * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
1830 * mode to be turned on which means we need to specifically manage ARL
1831 * misses on multicast addresses (TBD).
1832 */
1833 if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
1834 !b53_can_enable_brcm_tags(ds, port))
1835 return DSA_TAG_PROTO_NONE;
1836
1837 /* Broadcom BCM58xx chips have a flow accelerator on Port 8
1838 * which requires us to use the prepended Broadcom tag type
1839 */
1840 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT)
1841 return DSA_TAG_PROTO_BRCM_PREPEND;
1842
1843 return DSA_TAG_PROTO_BRCM;
1844 }
1845 EXPORT_SYMBOL(b53_get_tag_protocol);
1846
b53_mirror_add(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror,bool ingress)1847 int b53_mirror_add(struct dsa_switch *ds, int port,
1848 struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
1849 {
1850 struct b53_device *dev = ds->priv;
1851 u16 reg, loc;
1852
1853 if (ingress)
1854 loc = B53_IG_MIR_CTL;
1855 else
1856 loc = B53_EG_MIR_CTL;
1857
1858 b53_read16(dev, B53_MGMT_PAGE, loc, ®);
1859 reg |= BIT(port);
1860 b53_write16(dev, B53_MGMT_PAGE, loc, reg);
1861
1862 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®);
1863 reg &= ~CAP_PORT_MASK;
1864 reg |= mirror->to_local_port;
1865 reg |= MIRROR_EN;
1866 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
1867
1868 return 0;
1869 }
1870 EXPORT_SYMBOL(b53_mirror_add);
1871
b53_mirror_del(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror)1872 void b53_mirror_del(struct dsa_switch *ds, int port,
1873 struct dsa_mall_mirror_tc_entry *mirror)
1874 {
1875 struct b53_device *dev = ds->priv;
1876 bool loc_disable = false, other_loc_disable = false;
1877 u16 reg, loc;
1878
1879 if (mirror->ingress)
1880 loc = B53_IG_MIR_CTL;
1881 else
1882 loc = B53_EG_MIR_CTL;
1883
1884 /* Update the desired ingress/egress register */
1885 b53_read16(dev, B53_MGMT_PAGE, loc, ®);
1886 reg &= ~BIT(port);
1887 if (!(reg & MIRROR_MASK))
1888 loc_disable = true;
1889 b53_write16(dev, B53_MGMT_PAGE, loc, reg);
1890
1891 /* Now look at the other one to know if we can disable mirroring
1892 * entirely
1893 */
1894 if (mirror->ingress)
1895 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®);
1896 else
1897 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®);
1898 if (!(reg & MIRROR_MASK))
1899 other_loc_disable = true;
1900
1901 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®);
1902 /* Both no longer have ports, let's disable mirroring */
1903 if (loc_disable && other_loc_disable) {
1904 reg &= ~MIRROR_EN;
1905 reg &= ~mirror->to_local_port;
1906 }
1907 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
1908 }
1909 EXPORT_SYMBOL(b53_mirror_del);
1910
b53_eee_enable_set(struct dsa_switch * ds,int port,bool enable)1911 void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
1912 {
1913 struct b53_device *dev = ds->priv;
1914 u16 reg;
1915
1916 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®);
1917 if (enable)
1918 reg |= BIT(port);
1919 else
1920 reg &= ~BIT(port);
1921 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
1922 }
1923 EXPORT_SYMBOL(b53_eee_enable_set);
1924
1925
1926 /* Returns 0 if EEE was not enabled, or 1 otherwise
1927 */
b53_eee_init(struct dsa_switch * ds,int port,struct phy_device * phy)1928 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
1929 {
1930 int ret;
1931
1932 ret = phy_init_eee(phy, 0);
1933 if (ret)
1934 return 0;
1935
1936 b53_eee_enable_set(ds, port, true);
1937
1938 return 1;
1939 }
1940 EXPORT_SYMBOL(b53_eee_init);
1941
b53_get_mac_eee(struct dsa_switch * ds,int port,struct ethtool_eee * e)1942 int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
1943 {
1944 struct b53_device *dev = ds->priv;
1945 struct ethtool_eee *p = &dev->ports[port].eee;
1946 u16 reg;
1947
1948 if (is5325(dev) || is5365(dev))
1949 return -EOPNOTSUPP;
1950
1951 b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, ®);
1952 e->eee_enabled = p->eee_enabled;
1953 e->eee_active = !!(reg & BIT(port));
1954
1955 return 0;
1956 }
1957 EXPORT_SYMBOL(b53_get_mac_eee);
1958
b53_set_mac_eee(struct dsa_switch * ds,int port,struct ethtool_eee * e)1959 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
1960 {
1961 struct b53_device *dev = ds->priv;
1962 struct ethtool_eee *p = &dev->ports[port].eee;
1963
1964 if (is5325(dev) || is5365(dev))
1965 return -EOPNOTSUPP;
1966
1967 p->eee_enabled = e->eee_enabled;
1968 b53_eee_enable_set(ds, port, e->eee_enabled);
1969
1970 return 0;
1971 }
1972 EXPORT_SYMBOL(b53_set_mac_eee);
1973
1974 static const struct dsa_switch_ops b53_switch_ops = {
1975 .get_tag_protocol = b53_get_tag_protocol,
1976 .setup = b53_setup,
1977 .get_strings = b53_get_strings,
1978 .get_ethtool_stats = b53_get_ethtool_stats,
1979 .get_sset_count = b53_get_sset_count,
1980 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
1981 .phy_read = b53_phy_read16,
1982 .phy_write = b53_phy_write16,
1983 .adjust_link = b53_adjust_link,
1984 .phylink_validate = b53_phylink_validate,
1985 .phylink_mac_link_state = b53_phylink_mac_link_state,
1986 .phylink_mac_config = b53_phylink_mac_config,
1987 .phylink_mac_an_restart = b53_phylink_mac_an_restart,
1988 .phylink_mac_link_down = b53_phylink_mac_link_down,
1989 .phylink_mac_link_up = b53_phylink_mac_link_up,
1990 .port_enable = b53_enable_port,
1991 .port_disable = b53_disable_port,
1992 .get_mac_eee = b53_get_mac_eee,
1993 .set_mac_eee = b53_set_mac_eee,
1994 .port_bridge_join = b53_br_join,
1995 .port_bridge_leave = b53_br_leave,
1996 .port_stp_state_set = b53_br_set_stp_state,
1997 .port_fast_age = b53_br_fast_age,
1998 .port_egress_floods = b53_br_egress_floods,
1999 .port_vlan_filtering = b53_vlan_filtering,
2000 .port_vlan_prepare = b53_vlan_prepare,
2001 .port_vlan_add = b53_vlan_add,
2002 .port_vlan_del = b53_vlan_del,
2003 .port_fdb_dump = b53_fdb_dump,
2004 .port_fdb_add = b53_fdb_add,
2005 .port_fdb_del = b53_fdb_del,
2006 .port_mirror_add = b53_mirror_add,
2007 .port_mirror_del = b53_mirror_del,
2008 };
2009
2010 struct b53_chip_data {
2011 u32 chip_id;
2012 const char *dev_name;
2013 u16 vlans;
2014 u16 enabled_ports;
2015 u8 cpu_port;
2016 u8 vta_regs[3];
2017 u8 arl_entries;
2018 u8 duplex_reg;
2019 u8 jumbo_pm_reg;
2020 u8 jumbo_size_reg;
2021 };
2022
2023 #define B53_VTA_REGS \
2024 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
2025 #define B53_VTA_REGS_9798 \
2026 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
2027 #define B53_VTA_REGS_63XX \
2028 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
2029
2030 static const struct b53_chip_data b53_switch_chips[] = {
2031 {
2032 .chip_id = BCM5325_DEVICE_ID,
2033 .dev_name = "BCM5325",
2034 .vlans = 16,
2035 .enabled_ports = 0x1f,
2036 .arl_entries = 2,
2037 .cpu_port = B53_CPU_PORT_25,
2038 .duplex_reg = B53_DUPLEX_STAT_FE,
2039 },
2040 {
2041 .chip_id = BCM5365_DEVICE_ID,
2042 .dev_name = "BCM5365",
2043 .vlans = 256,
2044 .enabled_ports = 0x1f,
2045 .arl_entries = 2,
2046 .cpu_port = B53_CPU_PORT_25,
2047 .duplex_reg = B53_DUPLEX_STAT_FE,
2048 },
2049 {
2050 .chip_id = BCM5389_DEVICE_ID,
2051 .dev_name = "BCM5389",
2052 .vlans = 4096,
2053 .enabled_ports = 0x1f,
2054 .arl_entries = 4,
2055 .cpu_port = B53_CPU_PORT,
2056 .vta_regs = B53_VTA_REGS,
2057 .duplex_reg = B53_DUPLEX_STAT_GE,
2058 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2059 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2060 },
2061 {
2062 .chip_id = BCM5395_DEVICE_ID,
2063 .dev_name = "BCM5395",
2064 .vlans = 4096,
2065 .enabled_ports = 0x1f,
2066 .arl_entries = 4,
2067 .cpu_port = B53_CPU_PORT,
2068 .vta_regs = B53_VTA_REGS,
2069 .duplex_reg = B53_DUPLEX_STAT_GE,
2070 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2071 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2072 },
2073 {
2074 .chip_id = BCM5397_DEVICE_ID,
2075 .dev_name = "BCM5397",
2076 .vlans = 4096,
2077 .enabled_ports = 0x1f,
2078 .arl_entries = 4,
2079 .cpu_port = B53_CPU_PORT,
2080 .vta_regs = B53_VTA_REGS_9798,
2081 .duplex_reg = B53_DUPLEX_STAT_GE,
2082 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2083 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2084 },
2085 {
2086 .chip_id = BCM5398_DEVICE_ID,
2087 .dev_name = "BCM5398",
2088 .vlans = 4096,
2089 .enabled_ports = 0x7f,
2090 .arl_entries = 4,
2091 .cpu_port = B53_CPU_PORT,
2092 .vta_regs = B53_VTA_REGS_9798,
2093 .duplex_reg = B53_DUPLEX_STAT_GE,
2094 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2095 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2096 },
2097 {
2098 .chip_id = BCM53115_DEVICE_ID,
2099 .dev_name = "BCM53115",
2100 .vlans = 4096,
2101 .enabled_ports = 0x1f,
2102 .arl_entries = 4,
2103 .vta_regs = B53_VTA_REGS,
2104 .cpu_port = B53_CPU_PORT,
2105 .duplex_reg = B53_DUPLEX_STAT_GE,
2106 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2107 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2108 },
2109 {
2110 .chip_id = BCM53125_DEVICE_ID,
2111 .dev_name = "BCM53125",
2112 .vlans = 4096,
2113 .enabled_ports = 0xff,
2114 .arl_entries = 4,
2115 .cpu_port = B53_CPU_PORT,
2116 .vta_regs = B53_VTA_REGS,
2117 .duplex_reg = B53_DUPLEX_STAT_GE,
2118 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2119 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2120 },
2121 {
2122 .chip_id = BCM53128_DEVICE_ID,
2123 .dev_name = "BCM53128",
2124 .vlans = 4096,
2125 .enabled_ports = 0x1ff,
2126 .arl_entries = 4,
2127 .cpu_port = B53_CPU_PORT,
2128 .vta_regs = B53_VTA_REGS,
2129 .duplex_reg = B53_DUPLEX_STAT_GE,
2130 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2131 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2132 },
2133 {
2134 .chip_id = BCM63XX_DEVICE_ID,
2135 .dev_name = "BCM63xx",
2136 .vlans = 4096,
2137 .enabled_ports = 0, /* pdata must provide them */
2138 .arl_entries = 4,
2139 .cpu_port = B53_CPU_PORT,
2140 .vta_regs = B53_VTA_REGS_63XX,
2141 .duplex_reg = B53_DUPLEX_STAT_63XX,
2142 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
2143 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
2144 },
2145 {
2146 .chip_id = BCM53010_DEVICE_ID,
2147 .dev_name = "BCM53010",
2148 .vlans = 4096,
2149 .enabled_ports = 0x1f,
2150 .arl_entries = 4,
2151 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2152 .vta_regs = B53_VTA_REGS,
2153 .duplex_reg = B53_DUPLEX_STAT_GE,
2154 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2155 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2156 },
2157 {
2158 .chip_id = BCM53011_DEVICE_ID,
2159 .dev_name = "BCM53011",
2160 .vlans = 4096,
2161 .enabled_ports = 0x1bf,
2162 .arl_entries = 4,
2163 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2164 .vta_regs = B53_VTA_REGS,
2165 .duplex_reg = B53_DUPLEX_STAT_GE,
2166 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2167 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2168 },
2169 {
2170 .chip_id = BCM53012_DEVICE_ID,
2171 .dev_name = "BCM53012",
2172 .vlans = 4096,
2173 .enabled_ports = 0x1bf,
2174 .arl_entries = 4,
2175 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2176 .vta_regs = B53_VTA_REGS,
2177 .duplex_reg = B53_DUPLEX_STAT_GE,
2178 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2179 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2180 },
2181 {
2182 .chip_id = BCM53018_DEVICE_ID,
2183 .dev_name = "BCM53018",
2184 .vlans = 4096,
2185 .enabled_ports = 0x1f,
2186 .arl_entries = 4,
2187 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2188 .vta_regs = B53_VTA_REGS,
2189 .duplex_reg = B53_DUPLEX_STAT_GE,
2190 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2191 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2192 },
2193 {
2194 .chip_id = BCM53019_DEVICE_ID,
2195 .dev_name = "BCM53019",
2196 .vlans = 4096,
2197 .enabled_ports = 0x1f,
2198 .arl_entries = 4,
2199 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2200 .vta_regs = B53_VTA_REGS,
2201 .duplex_reg = B53_DUPLEX_STAT_GE,
2202 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2203 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2204 },
2205 {
2206 .chip_id = BCM58XX_DEVICE_ID,
2207 .dev_name = "BCM585xx/586xx/88312",
2208 .vlans = 4096,
2209 .enabled_ports = 0x1ff,
2210 .arl_entries = 4,
2211 .cpu_port = B53_CPU_PORT,
2212 .vta_regs = B53_VTA_REGS,
2213 .duplex_reg = B53_DUPLEX_STAT_GE,
2214 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2215 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2216 },
2217 {
2218 .chip_id = BCM583XX_DEVICE_ID,
2219 .dev_name = "BCM583xx/11360",
2220 .vlans = 4096,
2221 .enabled_ports = 0x103,
2222 .arl_entries = 4,
2223 .cpu_port = B53_CPU_PORT,
2224 .vta_regs = B53_VTA_REGS,
2225 .duplex_reg = B53_DUPLEX_STAT_GE,
2226 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2227 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2228 },
2229 {
2230 .chip_id = BCM7445_DEVICE_ID,
2231 .dev_name = "BCM7445",
2232 .vlans = 4096,
2233 .enabled_ports = 0x1ff,
2234 .arl_entries = 4,
2235 .cpu_port = B53_CPU_PORT,
2236 .vta_regs = B53_VTA_REGS,
2237 .duplex_reg = B53_DUPLEX_STAT_GE,
2238 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2239 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2240 },
2241 {
2242 .chip_id = BCM7278_DEVICE_ID,
2243 .dev_name = "BCM7278",
2244 .vlans = 4096,
2245 .enabled_ports = 0x1ff,
2246 .arl_entries= 4,
2247 .cpu_port = B53_CPU_PORT,
2248 .vta_regs = B53_VTA_REGS,
2249 .duplex_reg = B53_DUPLEX_STAT_GE,
2250 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2251 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2252 },
2253 };
2254
b53_switch_init(struct b53_device * dev)2255 static int b53_switch_init(struct b53_device *dev)
2256 {
2257 unsigned int i;
2258 int ret;
2259
2260 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
2261 const struct b53_chip_data *chip = &b53_switch_chips[i];
2262
2263 if (chip->chip_id == dev->chip_id) {
2264 if (!dev->enabled_ports)
2265 dev->enabled_ports = chip->enabled_ports;
2266 dev->name = chip->dev_name;
2267 dev->duplex_reg = chip->duplex_reg;
2268 dev->vta_regs[0] = chip->vta_regs[0];
2269 dev->vta_regs[1] = chip->vta_regs[1];
2270 dev->vta_regs[2] = chip->vta_regs[2];
2271 dev->jumbo_pm_reg = chip->jumbo_pm_reg;
2272 dev->cpu_port = chip->cpu_port;
2273 dev->num_vlans = chip->vlans;
2274 dev->num_arl_entries = chip->arl_entries;
2275 break;
2276 }
2277 }
2278
2279 /* check which BCM5325x version we have */
2280 if (is5325(dev)) {
2281 u8 vc4;
2282
2283 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
2284
2285 /* check reserved bits */
2286 switch (vc4 & 3) {
2287 case 1:
2288 /* BCM5325E */
2289 break;
2290 case 3:
2291 /* BCM5325F - do not use port 4 */
2292 dev->enabled_ports &= ~BIT(4);
2293 break;
2294 default:
2295 /* On the BCM47XX SoCs this is the supported internal switch.*/
2296 #ifndef CONFIG_BCM47XX
2297 /* BCM5325M */
2298 return -EINVAL;
2299 #else
2300 break;
2301 #endif
2302 }
2303 } else if (dev->chip_id == BCM53115_DEVICE_ID) {
2304 u64 strap_value;
2305
2306 b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
2307 /* use second IMP port if GMII is enabled */
2308 if (strap_value & SV_GMII_CTRL_115)
2309 dev->cpu_port = 5;
2310 }
2311
2312 /* cpu port is always last */
2313 dev->num_ports = dev->cpu_port + 1;
2314 dev->enabled_ports |= BIT(dev->cpu_port);
2315
2316 /* Include non standard CPU port built-in PHYs to be probed */
2317 if (is539x(dev) || is531x5(dev)) {
2318 for (i = 0; i < dev->num_ports; i++) {
2319 if (!(dev->ds->phys_mii_mask & BIT(i)) &&
2320 !b53_possible_cpu_port(dev->ds, i))
2321 dev->ds->phys_mii_mask |= BIT(i);
2322 }
2323 }
2324
2325 dev->ports = devm_kcalloc(dev->dev,
2326 dev->num_ports, sizeof(struct b53_port),
2327 GFP_KERNEL);
2328 if (!dev->ports)
2329 return -ENOMEM;
2330
2331 dev->vlans = devm_kcalloc(dev->dev,
2332 dev->num_vlans, sizeof(struct b53_vlan),
2333 GFP_KERNEL);
2334 if (!dev->vlans)
2335 return -ENOMEM;
2336
2337 dev->reset_gpio = b53_switch_get_reset_gpio(dev);
2338 if (dev->reset_gpio >= 0) {
2339 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
2340 GPIOF_OUT_INIT_HIGH, "robo_reset");
2341 if (ret)
2342 return ret;
2343 }
2344
2345 return 0;
2346 }
2347
b53_switch_alloc(struct device * base,const struct b53_io_ops * ops,void * priv)2348 struct b53_device *b53_switch_alloc(struct device *base,
2349 const struct b53_io_ops *ops,
2350 void *priv)
2351 {
2352 struct dsa_switch *ds;
2353 struct b53_device *dev;
2354
2355 ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
2356 if (!ds)
2357 return NULL;
2358
2359 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
2360 if (!dev)
2361 return NULL;
2362
2363 ds->priv = dev;
2364 dev->dev = base;
2365
2366 dev->ds = ds;
2367 dev->priv = priv;
2368 dev->ops = ops;
2369 ds->ops = &b53_switch_ops;
2370 mutex_init(&dev->reg_mutex);
2371 mutex_init(&dev->stats_mutex);
2372
2373 return dev;
2374 }
2375 EXPORT_SYMBOL(b53_switch_alloc);
2376
b53_switch_detect(struct b53_device * dev)2377 int b53_switch_detect(struct b53_device *dev)
2378 {
2379 u32 id32;
2380 u16 tmp;
2381 u8 id8;
2382 int ret;
2383
2384 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
2385 if (ret)
2386 return ret;
2387
2388 switch (id8) {
2389 case 0:
2390 /* BCM5325 and BCM5365 do not have this register so reads
2391 * return 0. But the read operation did succeed, so assume this
2392 * is one of them.
2393 *
2394 * Next check if we can write to the 5325's VTA register; for
2395 * 5365 it is read only.
2396 */
2397 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
2398 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
2399
2400 if (tmp == 0xf)
2401 dev->chip_id = BCM5325_DEVICE_ID;
2402 else
2403 dev->chip_id = BCM5365_DEVICE_ID;
2404 break;
2405 case BCM5389_DEVICE_ID:
2406 case BCM5395_DEVICE_ID:
2407 case BCM5397_DEVICE_ID:
2408 case BCM5398_DEVICE_ID:
2409 dev->chip_id = id8;
2410 break;
2411 default:
2412 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
2413 if (ret)
2414 return ret;
2415
2416 switch (id32) {
2417 case BCM53115_DEVICE_ID:
2418 case BCM53125_DEVICE_ID:
2419 case BCM53128_DEVICE_ID:
2420 case BCM53010_DEVICE_ID:
2421 case BCM53011_DEVICE_ID:
2422 case BCM53012_DEVICE_ID:
2423 case BCM53018_DEVICE_ID:
2424 case BCM53019_DEVICE_ID:
2425 dev->chip_id = id32;
2426 break;
2427 default:
2428 pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n",
2429 id8, id32);
2430 return -ENODEV;
2431 }
2432 }
2433
2434 if (dev->chip_id == BCM5325_DEVICE_ID)
2435 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
2436 &dev->core_rev);
2437 else
2438 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
2439 &dev->core_rev);
2440 }
2441 EXPORT_SYMBOL(b53_switch_detect);
2442
b53_switch_register(struct b53_device * dev)2443 int b53_switch_register(struct b53_device *dev)
2444 {
2445 int ret;
2446
2447 if (dev->pdata) {
2448 dev->chip_id = dev->pdata->chip_id;
2449 dev->enabled_ports = dev->pdata->enabled_ports;
2450 }
2451
2452 if (!dev->chip_id && b53_switch_detect(dev))
2453 return -EINVAL;
2454
2455 ret = b53_switch_init(dev);
2456 if (ret)
2457 return ret;
2458
2459 pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
2460
2461 return dsa_register_switch(dev->ds);
2462 }
2463 EXPORT_SYMBOL(b53_switch_register);
2464
2465 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
2466 MODULE_DESCRIPTION("B53 switch library");
2467 MODULE_LICENSE("Dual BSD/GPL");
2468