1 /*
2 * B53 switch driver main logic
3 *
4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <linux/delay.h>
21 #include <linux/export.h>
22 #include <linux/gpio.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/platform_data/b53.h>
26 #include <linux/phy.h>
27 #include <linux/phylink.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_bridge.h>
30 #include <linux/if_vlan.h>
31 #include <net/dsa.h>
32
33 #include "b53_regs.h"
34 #include "b53_priv.h"
35
36 struct b53_mib_desc {
37 u8 size;
38 u8 offset;
39 const char *name;
40 };
41
42 /* BCM5365 MIB counters */
43 static const struct b53_mib_desc b53_mibs_65[] = {
44 { 8, 0x00, "TxOctets" },
45 { 4, 0x08, "TxDropPkts" },
46 { 4, 0x10, "TxBroadcastPkts" },
47 { 4, 0x14, "TxMulticastPkts" },
48 { 4, 0x18, "TxUnicastPkts" },
49 { 4, 0x1c, "TxCollisions" },
50 { 4, 0x20, "TxSingleCollision" },
51 { 4, 0x24, "TxMultipleCollision" },
52 { 4, 0x28, "TxDeferredTransmit" },
53 { 4, 0x2c, "TxLateCollision" },
54 { 4, 0x30, "TxExcessiveCollision" },
55 { 4, 0x38, "TxPausePkts" },
56 { 8, 0x44, "RxOctets" },
57 { 4, 0x4c, "RxUndersizePkts" },
58 { 4, 0x50, "RxPausePkts" },
59 { 4, 0x54, "Pkts64Octets" },
60 { 4, 0x58, "Pkts65to127Octets" },
61 { 4, 0x5c, "Pkts128to255Octets" },
62 { 4, 0x60, "Pkts256to511Octets" },
63 { 4, 0x64, "Pkts512to1023Octets" },
64 { 4, 0x68, "Pkts1024to1522Octets" },
65 { 4, 0x6c, "RxOversizePkts" },
66 { 4, 0x70, "RxJabbers" },
67 { 4, 0x74, "RxAlignmentErrors" },
68 { 4, 0x78, "RxFCSErrors" },
69 { 8, 0x7c, "RxGoodOctets" },
70 { 4, 0x84, "RxDropPkts" },
71 { 4, 0x88, "RxUnicastPkts" },
72 { 4, 0x8c, "RxMulticastPkts" },
73 { 4, 0x90, "RxBroadcastPkts" },
74 { 4, 0x94, "RxSAChanges" },
75 { 4, 0x98, "RxFragments" },
76 };
77
78 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65)
79
80 /* BCM63xx MIB counters */
81 static const struct b53_mib_desc b53_mibs_63xx[] = {
82 { 8, 0x00, "TxOctets" },
83 { 4, 0x08, "TxDropPkts" },
84 { 4, 0x0c, "TxQoSPkts" },
85 { 4, 0x10, "TxBroadcastPkts" },
86 { 4, 0x14, "TxMulticastPkts" },
87 { 4, 0x18, "TxUnicastPkts" },
88 { 4, 0x1c, "TxCollisions" },
89 { 4, 0x20, "TxSingleCollision" },
90 { 4, 0x24, "TxMultipleCollision" },
91 { 4, 0x28, "TxDeferredTransmit" },
92 { 4, 0x2c, "TxLateCollision" },
93 { 4, 0x30, "TxExcessiveCollision" },
94 { 4, 0x38, "TxPausePkts" },
95 { 8, 0x3c, "TxQoSOctets" },
96 { 8, 0x44, "RxOctets" },
97 { 4, 0x4c, "RxUndersizePkts" },
98 { 4, 0x50, "RxPausePkts" },
99 { 4, 0x54, "Pkts64Octets" },
100 { 4, 0x58, "Pkts65to127Octets" },
101 { 4, 0x5c, "Pkts128to255Octets" },
102 { 4, 0x60, "Pkts256to511Octets" },
103 { 4, 0x64, "Pkts512to1023Octets" },
104 { 4, 0x68, "Pkts1024to1522Octets" },
105 { 4, 0x6c, "RxOversizePkts" },
106 { 4, 0x70, "RxJabbers" },
107 { 4, 0x74, "RxAlignmentErrors" },
108 { 4, 0x78, "RxFCSErrors" },
109 { 8, 0x7c, "RxGoodOctets" },
110 { 4, 0x84, "RxDropPkts" },
111 { 4, 0x88, "RxUnicastPkts" },
112 { 4, 0x8c, "RxMulticastPkts" },
113 { 4, 0x90, "RxBroadcastPkts" },
114 { 4, 0x94, "RxSAChanges" },
115 { 4, 0x98, "RxFragments" },
116 { 4, 0xa0, "RxSymbolErrors" },
117 { 4, 0xa4, "RxQoSPkts" },
118 { 8, 0xa8, "RxQoSOctets" },
119 { 4, 0xb0, "Pkts1523to2047Octets" },
120 { 4, 0xb4, "Pkts2048to4095Octets" },
121 { 4, 0xb8, "Pkts4096to8191Octets" },
122 { 4, 0xbc, "Pkts8192to9728Octets" },
123 { 4, 0xc0, "RxDiscarded" },
124 };
125
126 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx)
127
128 /* MIB counters */
129 static const struct b53_mib_desc b53_mibs[] = {
130 { 8, 0x00, "TxOctets" },
131 { 4, 0x08, "TxDropPkts" },
132 { 4, 0x10, "TxBroadcastPkts" },
133 { 4, 0x14, "TxMulticastPkts" },
134 { 4, 0x18, "TxUnicastPkts" },
135 { 4, 0x1c, "TxCollisions" },
136 { 4, 0x20, "TxSingleCollision" },
137 { 4, 0x24, "TxMultipleCollision" },
138 { 4, 0x28, "TxDeferredTransmit" },
139 { 4, 0x2c, "TxLateCollision" },
140 { 4, 0x30, "TxExcessiveCollision" },
141 { 4, 0x38, "TxPausePkts" },
142 { 8, 0x50, "RxOctets" },
143 { 4, 0x58, "RxUndersizePkts" },
144 { 4, 0x5c, "RxPausePkts" },
145 { 4, 0x60, "Pkts64Octets" },
146 { 4, 0x64, "Pkts65to127Octets" },
147 { 4, 0x68, "Pkts128to255Octets" },
148 { 4, 0x6c, "Pkts256to511Octets" },
149 { 4, 0x70, "Pkts512to1023Octets" },
150 { 4, 0x74, "Pkts1024to1522Octets" },
151 { 4, 0x78, "RxOversizePkts" },
152 { 4, 0x7c, "RxJabbers" },
153 { 4, 0x80, "RxAlignmentErrors" },
154 { 4, 0x84, "RxFCSErrors" },
155 { 8, 0x88, "RxGoodOctets" },
156 { 4, 0x90, "RxDropPkts" },
157 { 4, 0x94, "RxUnicastPkts" },
158 { 4, 0x98, "RxMulticastPkts" },
159 { 4, 0x9c, "RxBroadcastPkts" },
160 { 4, 0xa0, "RxSAChanges" },
161 { 4, 0xa4, "RxFragments" },
162 { 4, 0xa8, "RxJumboPkts" },
163 { 4, 0xac, "RxSymbolErrors" },
164 { 4, 0xc0, "RxDiscarded" },
165 };
166
167 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs)
168
169 static const struct b53_mib_desc b53_mibs_58xx[] = {
170 { 8, 0x00, "TxOctets" },
171 { 4, 0x08, "TxDropPkts" },
172 { 4, 0x0c, "TxQPKTQ0" },
173 { 4, 0x10, "TxBroadcastPkts" },
174 { 4, 0x14, "TxMulticastPkts" },
175 { 4, 0x18, "TxUnicastPKts" },
176 { 4, 0x1c, "TxCollisions" },
177 { 4, 0x20, "TxSingleCollision" },
178 { 4, 0x24, "TxMultipleCollision" },
179 { 4, 0x28, "TxDeferredCollision" },
180 { 4, 0x2c, "TxLateCollision" },
181 { 4, 0x30, "TxExcessiveCollision" },
182 { 4, 0x34, "TxFrameInDisc" },
183 { 4, 0x38, "TxPausePkts" },
184 { 4, 0x3c, "TxQPKTQ1" },
185 { 4, 0x40, "TxQPKTQ2" },
186 { 4, 0x44, "TxQPKTQ3" },
187 { 4, 0x48, "TxQPKTQ4" },
188 { 4, 0x4c, "TxQPKTQ5" },
189 { 8, 0x50, "RxOctets" },
190 { 4, 0x58, "RxUndersizePkts" },
191 { 4, 0x5c, "RxPausePkts" },
192 { 4, 0x60, "RxPkts64Octets" },
193 { 4, 0x64, "RxPkts65to127Octets" },
194 { 4, 0x68, "RxPkts128to255Octets" },
195 { 4, 0x6c, "RxPkts256to511Octets" },
196 { 4, 0x70, "RxPkts512to1023Octets" },
197 { 4, 0x74, "RxPkts1024toMaxPktsOctets" },
198 { 4, 0x78, "RxOversizePkts" },
199 { 4, 0x7c, "RxJabbers" },
200 { 4, 0x80, "RxAlignmentErrors" },
201 { 4, 0x84, "RxFCSErrors" },
202 { 8, 0x88, "RxGoodOctets" },
203 { 4, 0x90, "RxDropPkts" },
204 { 4, 0x94, "RxUnicastPkts" },
205 { 4, 0x98, "RxMulticastPkts" },
206 { 4, 0x9c, "RxBroadcastPkts" },
207 { 4, 0xa0, "RxSAChanges" },
208 { 4, 0xa4, "RxFragments" },
209 { 4, 0xa8, "RxJumboPkt" },
210 { 4, 0xac, "RxSymblErr" },
211 { 4, 0xb0, "InRangeErrCount" },
212 { 4, 0xb4, "OutRangeErrCount" },
213 { 4, 0xb8, "EEELpiEvent" },
214 { 4, 0xbc, "EEELpiDuration" },
215 { 4, 0xc0, "RxDiscard" },
216 { 4, 0xc8, "TxQPKTQ6" },
217 { 4, 0xcc, "TxQPKTQ7" },
218 { 4, 0xd0, "TxPkts64Octets" },
219 { 4, 0xd4, "TxPkts65to127Octets" },
220 { 4, 0xd8, "TxPkts128to255Octets" },
221 { 4, 0xdc, "TxPkts256to511Ocets" },
222 { 4, 0xe0, "TxPkts512to1023Ocets" },
223 { 4, 0xe4, "TxPkts1024toMaxPktOcets" },
224 };
225
226 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
227
228 #define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
229 #define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
230
b53_do_vlan_op(struct b53_device * dev,u8 op)231 static int b53_do_vlan_op(struct b53_device *dev, u8 op)
232 {
233 unsigned int i;
234
235 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
236
237 for (i = 0; i < 10; i++) {
238 u8 vta;
239
240 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
241 if (!(vta & VTA_START_CMD))
242 return 0;
243
244 usleep_range(100, 200);
245 }
246
247 return -EIO;
248 }
249
b53_set_vlan_entry(struct b53_device * dev,u16 vid,struct b53_vlan * vlan)250 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
251 struct b53_vlan *vlan)
252 {
253 if (is5325(dev)) {
254 u32 entry = 0;
255
256 if (vlan->members) {
257 entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
258 VA_UNTAG_S_25) | vlan->members;
259 if (dev->core_rev >= 3)
260 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
261 else
262 entry |= VA_VALID_25;
263 }
264
265 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
266 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
267 VTA_RW_STATE_WR | VTA_RW_OP_EN);
268 } else if (is5365(dev)) {
269 u16 entry = 0;
270
271 if (vlan->members)
272 entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
273 VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
274
275 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
276 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
277 VTA_RW_STATE_WR | VTA_RW_OP_EN);
278 } else {
279 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
280 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
281 (vlan->untag << VTE_UNTAG_S) | vlan->members);
282
283 b53_do_vlan_op(dev, VTA_CMD_WRITE);
284 }
285
286 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
287 vid, vlan->members, vlan->untag);
288 }
289
b53_get_vlan_entry(struct b53_device * dev,u16 vid,struct b53_vlan * vlan)290 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
291 struct b53_vlan *vlan)
292 {
293 if (is5325(dev)) {
294 u32 entry = 0;
295
296 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
297 VTA_RW_STATE_RD | VTA_RW_OP_EN);
298 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
299
300 if (dev->core_rev >= 3)
301 vlan->valid = !!(entry & VA_VALID_25_R4);
302 else
303 vlan->valid = !!(entry & VA_VALID_25);
304 vlan->members = entry & VA_MEMBER_MASK;
305 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
306
307 } else if (is5365(dev)) {
308 u16 entry = 0;
309
310 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
311 VTA_RW_STATE_WR | VTA_RW_OP_EN);
312 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
313
314 vlan->valid = !!(entry & VA_VALID_65);
315 vlan->members = entry & VA_MEMBER_MASK;
316 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
317 } else {
318 u32 entry = 0;
319
320 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
321 b53_do_vlan_op(dev, VTA_CMD_READ);
322 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
323 vlan->members = entry & VTE_MEMBERS;
324 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
325 vlan->valid = true;
326 }
327 }
328
b53_set_eap_mode(struct b53_device * dev,int port,int mode)329 static void b53_set_eap_mode(struct b53_device *dev, int port, int mode)
330 {
331 u64 eap_conf;
332
333 if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID)
334 return;
335
336 b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf);
337
338 if (is63xx(dev)) {
339 eap_conf &= ~EAP_MODE_MASK_63XX;
340 eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX;
341 } else {
342 eap_conf &= ~EAP_MODE_MASK;
343 eap_conf |= (u64)mode << EAP_MODE_SHIFT;
344 }
345
346 b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf);
347 }
348
b53_set_forwarding(struct b53_device * dev,int enable)349 static void b53_set_forwarding(struct b53_device *dev, int enable)
350 {
351 u8 mgmt;
352
353 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
354
355 if (enable)
356 mgmt |= SM_SW_FWD_EN;
357 else
358 mgmt &= ~SM_SW_FWD_EN;
359
360 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
361
362 if (!is5325(dev)) {
363 /* Include IMP port in dumb forwarding mode */
364 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
365 mgmt |= B53_MII_DUMB_FWDG_EN;
366 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
367
368 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
369 * frames should be flooded or not.
370 */
371 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
372 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
373 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
374 } else {
375 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
376 mgmt |= B53_IP_MCAST_25;
377 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
378 }
379 }
380
b53_enable_vlan(struct b53_device * dev,int port,bool enable,bool enable_filtering)381 static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
382 bool enable_filtering)
383 {
384 u8 mgmt, vc0, vc1, vc4 = 0, vc5;
385
386 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
387 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
388 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
389
390 if (is5325(dev) || is5365(dev)) {
391 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
392 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
393 } else if (is63xx(dev)) {
394 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
395 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
396 } else {
397 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
398 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
399 }
400
401 vc1 &= ~VC1_RX_MCST_FWD_EN;
402
403 if (enable) {
404 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
405 vc1 |= VC1_RX_MCST_UNTAG_EN;
406 vc4 &= ~VC4_ING_VID_CHECK_MASK;
407 if (enable_filtering) {
408 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
409 vc5 |= VC5_DROP_VTABLE_MISS;
410 } else {
411 vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S;
412 vc5 &= ~VC5_DROP_VTABLE_MISS;
413 }
414
415 if (is5325(dev))
416 vc0 &= ~VC0_RESERVED_1;
417
418 if (is5325(dev) || is5365(dev))
419 vc1 |= VC1_RX_MCST_TAG_EN;
420
421 } else {
422 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
423 vc1 &= ~VC1_RX_MCST_UNTAG_EN;
424 vc4 &= ~VC4_ING_VID_CHECK_MASK;
425 vc5 &= ~VC5_DROP_VTABLE_MISS;
426
427 if (is5325(dev) || is5365(dev))
428 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
429 else
430 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
431
432 if (is5325(dev) || is5365(dev))
433 vc1 &= ~VC1_RX_MCST_TAG_EN;
434 }
435
436 if (!is5325(dev) && !is5365(dev))
437 vc5 &= ~VC5_VID_FFF_EN;
438
439 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
440 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
441
442 if (is5325(dev) || is5365(dev)) {
443 /* enable the high 8 bit vid check on 5325 */
444 if (is5325(dev) && enable)
445 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
446 VC3_HIGH_8BIT_EN);
447 else
448 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
449
450 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
451 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
452 } else if (is63xx(dev)) {
453 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
454 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
455 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
456 } else {
457 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
458 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
459 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
460 }
461
462 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
463
464 dev->vlan_enabled = enable;
465
466 dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n",
467 port, enable, enable_filtering);
468 }
469
b53_set_jumbo(struct b53_device * dev,bool enable,bool allow_10_100)470 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
471 {
472 u32 port_mask = 0;
473 u16 max_size = JMS_MIN_SIZE;
474
475 if (is5325(dev) || is5365(dev))
476 return -EINVAL;
477
478 if (enable) {
479 port_mask = dev->enabled_ports;
480 max_size = JMS_MAX_SIZE;
481 if (allow_10_100)
482 port_mask |= JPM_10_100_JUMBO_EN;
483 }
484
485 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
486 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
487 }
488
b53_flush_arl(struct b53_device * dev,u8 mask)489 static int b53_flush_arl(struct b53_device *dev, u8 mask)
490 {
491 unsigned int i;
492
493 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
494 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
495
496 for (i = 0; i < 10; i++) {
497 u8 fast_age_ctrl;
498
499 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
500 &fast_age_ctrl);
501
502 if (!(fast_age_ctrl & FAST_AGE_DONE))
503 goto out;
504
505 msleep(1);
506 }
507
508 return -ETIMEDOUT;
509 out:
510 /* Only age dynamic entries (default behavior) */
511 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
512 return 0;
513 }
514
b53_fast_age_port(struct b53_device * dev,int port)515 static int b53_fast_age_port(struct b53_device *dev, int port)
516 {
517 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
518
519 return b53_flush_arl(dev, FAST_AGE_PORT);
520 }
521
b53_fast_age_vlan(struct b53_device * dev,u16 vid)522 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
523 {
524 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
525
526 return b53_flush_arl(dev, FAST_AGE_VLAN);
527 }
528
b53_imp_vlan_setup(struct dsa_switch * ds,int cpu_port)529 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
530 {
531 struct b53_device *dev = ds->priv;
532 unsigned int i;
533 u16 pvlan;
534
535 /* BCM5325 CPU port is at 8 */
536 if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25)
537 cpu_port = B53_CPU_PORT;
538
539 /* Enable the IMP port to be in the same VLAN as the other ports
540 * on a per-port basis such that we only have Port i and IMP in
541 * the same VLAN.
542 */
543 b53_for_each_port(dev, i) {
544 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
545 pvlan |= BIT(cpu_port);
546 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
547 }
548 }
549 EXPORT_SYMBOL(b53_imp_vlan_setup);
550
b53_port_set_ucast_flood(struct b53_device * dev,int port,bool unicast)551 static void b53_port_set_ucast_flood(struct b53_device *dev, int port,
552 bool unicast)
553 {
554 u16 uc;
555
556 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
557 if (unicast)
558 uc |= BIT(port);
559 else
560 uc &= ~BIT(port);
561 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
562 }
563
b53_port_set_mcast_flood(struct b53_device * dev,int port,bool multicast)564 static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
565 bool multicast)
566 {
567 u16 mc;
568
569 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
570 if (multicast)
571 mc |= BIT(port);
572 else
573 mc &= ~BIT(port);
574 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
575
576 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
577 if (multicast)
578 mc |= BIT(port);
579 else
580 mc &= ~BIT(port);
581 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
582 }
583
b53_port_set_learning(struct b53_device * dev,int port,bool learning)584 static void b53_port_set_learning(struct b53_device *dev, int port,
585 bool learning)
586 {
587 u16 reg;
588
589 if (is5325(dev))
590 return;
591
592 b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®);
593 if (learning)
594 reg &= ~BIT(port);
595 else
596 reg |= BIT(port);
597 b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
598 }
599
b53_eee_enable_set(struct dsa_switch * ds,int port,bool enable)600 static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
601 {
602 struct b53_device *dev = ds->priv;
603 u16 reg;
604
605 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®);
606 if (enable)
607 reg |= BIT(port);
608 else
609 reg &= ~BIT(port);
610 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
611 }
612
b53_setup_port(struct dsa_switch * ds,int port)613 int b53_setup_port(struct dsa_switch *ds, int port)
614 {
615 struct b53_device *dev = ds->priv;
616
617 b53_port_set_ucast_flood(dev, port, true);
618 b53_port_set_mcast_flood(dev, port, true);
619 b53_port_set_learning(dev, port, false);
620
621 /* Force all traffic to go to the CPU port to prevent the ASIC from
622 * trying to forward to bridged ports on matching FDB entries, then
623 * dropping frames because it isn't allowed to forward there.
624 */
625 if (dsa_is_user_port(ds, port))
626 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
627
628 if (is5325(dev) &&
629 in_range(port, 1, 4)) {
630 u8 reg;
631
632 b53_read8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, ®);
633 reg &= ~PD_MODE_POWER_DOWN_PORT(0);
634 if (dsa_is_unused_port(ds, port))
635 reg |= PD_MODE_POWER_DOWN_PORT(port);
636 else
637 reg &= ~PD_MODE_POWER_DOWN_PORT(port);
638 b53_write8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, reg);
639 }
640
641 return 0;
642 }
643 EXPORT_SYMBOL(b53_setup_port);
644
b53_enable_port(struct dsa_switch * ds,int port,struct phy_device * phy)645 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
646 {
647 struct b53_device *dev = ds->priv;
648 unsigned int cpu_port;
649 int ret = 0;
650 u16 pvlan;
651
652 if (!dsa_is_user_port(ds, port))
653 return 0;
654
655 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
656
657 if (dev->ops->irq_enable)
658 ret = dev->ops->irq_enable(dev, port);
659 if (ret)
660 return ret;
661
662 /* Clear the Rx and Tx disable bits and set to no spanning tree */
663 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
664
665 /* Set this port, and only this one to be in the default VLAN,
666 * if member of a bridge, restore its membership prior to
667 * bringing down this port.
668 */
669 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
670 pvlan &= ~0x1ff;
671 pvlan |= BIT(port);
672 pvlan |= dev->ports[port].vlan_ctl_mask;
673 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
674
675 b53_imp_vlan_setup(ds, cpu_port);
676
677 /* If EEE was enabled, restore it */
678 if (dev->ports[port].eee.eee_enabled)
679 b53_eee_enable_set(ds, port, true);
680
681 return 0;
682 }
683 EXPORT_SYMBOL(b53_enable_port);
684
b53_disable_port(struct dsa_switch * ds,int port)685 void b53_disable_port(struct dsa_switch *ds, int port)
686 {
687 struct b53_device *dev = ds->priv;
688 u8 reg;
689
690 /* Disable Tx/Rx for the port */
691 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®);
692 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
693 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
694
695 if (dev->ops->irq_disable)
696 dev->ops->irq_disable(dev, port);
697 }
698 EXPORT_SYMBOL(b53_disable_port);
699
b53_brcm_hdr_setup(struct dsa_switch * ds,int port)700 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
701 {
702 struct b53_device *dev = ds->priv;
703 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE);
704 u8 hdr_ctl, val;
705 u16 reg;
706
707 /* Resolve which bit controls the Broadcom tag */
708 switch (port) {
709 case 8:
710 val = BRCM_HDR_P8_EN;
711 break;
712 case 7:
713 val = BRCM_HDR_P7_EN;
714 break;
715 case 5:
716 val = BRCM_HDR_P5_EN;
717 break;
718 default:
719 val = 0;
720 break;
721 }
722
723 /* Enable management mode if tagging is requested */
724 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl);
725 if (tag_en)
726 hdr_ctl |= SM_SW_FWD_MODE;
727 else
728 hdr_ctl &= ~SM_SW_FWD_MODE;
729 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl);
730
731 /* Configure the appropriate IMP port */
732 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl);
733 if (port == 8)
734 hdr_ctl |= GC_FRM_MGMT_PORT_MII;
735 else if (port == 5)
736 hdr_ctl |= GC_FRM_MGMT_PORT_M;
737 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
738
739 /* Enable Broadcom tags for IMP port */
740 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
741 if (tag_en)
742 hdr_ctl |= val;
743 else
744 hdr_ctl &= ~val;
745 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl);
746
747 /* Registers below are only accessible on newer devices */
748 if (!is58xx(dev))
749 return;
750
751 /* Enable reception Broadcom tag for CPU TX (switch RX) to
752 * allow us to tag outgoing frames
753 */
754 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®);
755 if (tag_en)
756 reg &= ~BIT(port);
757 else
758 reg |= BIT(port);
759 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg);
760
761 /* Enable transmission of Broadcom tags from the switch (CPU RX) to
762 * allow delivering frames to the per-port net_devices
763 */
764 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®);
765 if (tag_en)
766 reg &= ~BIT(port);
767 else
768 reg |= BIT(port);
769 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg);
770 }
771 EXPORT_SYMBOL(b53_brcm_hdr_setup);
772
b53_enable_cpu_port(struct b53_device * dev,int port)773 static void b53_enable_cpu_port(struct b53_device *dev, int port)
774 {
775 u8 port_ctrl;
776
777 /* BCM5325 CPU port is at 8 */
778 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25)
779 port = B53_CPU_PORT;
780
781 port_ctrl = PORT_CTRL_RX_BCST_EN |
782 PORT_CTRL_RX_MCST_EN |
783 PORT_CTRL_RX_UCST_EN;
784 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
785
786 b53_brcm_hdr_setup(dev->ds, port);
787 }
788
b53_enable_mib(struct b53_device * dev)789 static void b53_enable_mib(struct b53_device *dev)
790 {
791 u8 gc;
792
793 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
794 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
795 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
796 }
797
b53_enable_stp(struct b53_device * dev)798 static void b53_enable_stp(struct b53_device *dev)
799 {
800 u8 gc;
801
802 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
803 gc |= GC_RX_BPDU_EN;
804 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
805 }
806
b53_default_pvid(struct b53_device * dev)807 static u16 b53_default_pvid(struct b53_device *dev)
808 {
809 if (is5325(dev) || is5365(dev))
810 return 1;
811 else
812 return 0;
813 }
814
b53_vlan_port_needs_forced_tagged(struct dsa_switch * ds,int port)815 static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
816 {
817 struct b53_device *dev = ds->priv;
818
819 return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
820 }
821
b53_vlan_port_may_join_untagged(struct dsa_switch * ds,int port)822 static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port)
823 {
824 struct b53_device *dev = ds->priv;
825 struct dsa_port *dp;
826
827 if (!dev->vlan_filtering)
828 return true;
829
830 dp = dsa_to_port(ds, port);
831
832 if (dsa_port_is_cpu(dp))
833 return true;
834
835 return dp->bridge == NULL;
836 }
837
b53_configure_vlan(struct dsa_switch * ds)838 int b53_configure_vlan(struct dsa_switch *ds)
839 {
840 struct b53_device *dev = ds->priv;
841 struct b53_vlan vl = { 0 };
842 struct b53_vlan *v;
843 int i, def_vid;
844 u16 vid;
845
846 def_vid = b53_default_pvid(dev);
847
848 /* clear all vlan entries */
849 if (is5325(dev) || is5365(dev)) {
850 for (i = def_vid; i < dev->num_vlans; i++)
851 b53_set_vlan_entry(dev, i, &vl);
852 } else {
853 b53_do_vlan_op(dev, VTA_CMD_CLEAR);
854 }
855
856 b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering);
857
858 /* Create an untagged VLAN entry for the default PVID in case
859 * CONFIG_VLAN_8021Q is disabled and there are no calls to
860 * dsa_user_vlan_rx_add_vid() to create the default VLAN
861 * entry. Do this only when the tagging protocol is not
862 * DSA_TAG_PROTO_NONE
863 */
864 v = &dev->vlans[def_vid];
865 b53_for_each_port(dev, i) {
866 if (!b53_vlan_port_may_join_untagged(ds, i))
867 continue;
868
869 vl.members |= BIT(i);
870 if (!b53_vlan_port_needs_forced_tagged(ds, i))
871 vl.untag = vl.members;
872 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i),
873 def_vid);
874 }
875 b53_set_vlan_entry(dev, def_vid, &vl);
876
877 if (dev->vlan_filtering) {
878 /* Upon initial call we have not set-up any VLANs, but upon
879 * system resume, we need to restore all VLAN entries.
880 */
881 for (vid = def_vid + 1; vid < dev->num_vlans; vid++) {
882 v = &dev->vlans[vid];
883
884 if (!v->members)
885 continue;
886
887 b53_set_vlan_entry(dev, vid, v);
888 b53_fast_age_vlan(dev, vid);
889 }
890
891 b53_for_each_port(dev, i) {
892 if (!dsa_is_cpu_port(ds, i))
893 b53_write16(dev, B53_VLAN_PAGE,
894 B53_VLAN_PORT_DEF_TAG(i),
895 dev->ports[i].pvid);
896 }
897 }
898
899 return 0;
900 }
901 EXPORT_SYMBOL(b53_configure_vlan);
902
b53_switch_reset_gpio(struct b53_device * dev)903 static void b53_switch_reset_gpio(struct b53_device *dev)
904 {
905 int gpio = dev->reset_gpio;
906
907 if (gpio < 0)
908 return;
909
910 /* Reset sequence: RESET low(50ms)->high(20ms)
911 */
912 gpio_set_value(gpio, 0);
913 mdelay(50);
914
915 gpio_set_value(gpio, 1);
916 mdelay(20);
917
918 dev->current_page = 0xff;
919 }
920
b53_switch_reset(struct b53_device * dev)921 static int b53_switch_reset(struct b53_device *dev)
922 {
923 unsigned int timeout = 1000;
924 u8 mgmt, reg;
925
926 b53_switch_reset_gpio(dev);
927
928 if (is539x(dev)) {
929 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
930 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
931 }
932
933 /* This is specific to 58xx devices here, do not use is58xx() which
934 * covers the larger Starfigther 2 family, including 7445/7278 which
935 * still use this driver as a library and need to perform the reset
936 * earlier.
937 */
938 if (dev->chip_id == BCM58XX_DEVICE_ID ||
939 dev->chip_id == BCM583XX_DEVICE_ID) {
940 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®);
941 reg |= SW_RST | EN_SW_RST | EN_CH_RST;
942 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
943
944 do {
945 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®);
946 if (!(reg & SW_RST))
947 break;
948
949 usleep_range(1000, 2000);
950 } while (timeout-- > 0);
951
952 if (timeout == 0) {
953 dev_err(dev->dev,
954 "Timeout waiting for SW_RST to clear!\n");
955 return -ETIMEDOUT;
956 }
957 }
958
959 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
960
961 if (!(mgmt & SM_SW_FWD_EN)) {
962 mgmt &= ~SM_SW_FWD_MODE;
963 mgmt |= SM_SW_FWD_EN;
964
965 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
966 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
967
968 if (!(mgmt & SM_SW_FWD_EN)) {
969 dev_err(dev->dev, "Failed to enable switch!\n");
970 return -EINVAL;
971 }
972 }
973
974 b53_enable_mib(dev);
975 b53_enable_stp(dev);
976
977 return b53_flush_arl(dev, FAST_AGE_STATIC);
978 }
979
b53_phy_read16(struct dsa_switch * ds,int addr,int reg)980 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
981 {
982 struct b53_device *priv = ds->priv;
983 u16 value = 0;
984 int ret;
985
986 if (priv->ops->phy_read16)
987 ret = priv->ops->phy_read16(priv, addr, reg, &value);
988 else
989 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
990 reg * 2, &value);
991
992 return ret ? ret : value;
993 }
994
b53_phy_write16(struct dsa_switch * ds,int addr,int reg,u16 val)995 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
996 {
997 struct b53_device *priv = ds->priv;
998
999 if (priv->ops->phy_write16)
1000 return priv->ops->phy_write16(priv, addr, reg, val);
1001
1002 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
1003 }
1004
b53_reset_switch(struct b53_device * priv)1005 static int b53_reset_switch(struct b53_device *priv)
1006 {
1007 /* reset vlans */
1008 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
1009 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
1010
1011 priv->serdes_lane = B53_INVALID_LANE;
1012
1013 return b53_switch_reset(priv);
1014 }
1015
b53_apply_config(struct b53_device * priv)1016 static int b53_apply_config(struct b53_device *priv)
1017 {
1018 /* disable switching */
1019 b53_set_forwarding(priv, 0);
1020
1021 b53_configure_vlan(priv->ds);
1022
1023 /* enable switching */
1024 b53_set_forwarding(priv, 1);
1025
1026 return 0;
1027 }
1028
b53_reset_mib(struct b53_device * priv)1029 static void b53_reset_mib(struct b53_device *priv)
1030 {
1031 u8 gc;
1032
1033 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
1034
1035 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
1036 msleep(1);
1037 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
1038 msleep(1);
1039 }
1040
b53_get_mib(struct b53_device * dev)1041 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
1042 {
1043 if (is5365(dev))
1044 return b53_mibs_65;
1045 else if (is63xx(dev))
1046 return b53_mibs_63xx;
1047 else if (is58xx(dev))
1048 return b53_mibs_58xx;
1049 else
1050 return b53_mibs;
1051 }
1052
b53_get_mib_size(struct b53_device * dev)1053 static unsigned int b53_get_mib_size(struct b53_device *dev)
1054 {
1055 if (is5365(dev))
1056 return B53_MIBS_65_SIZE;
1057 else if (is63xx(dev))
1058 return B53_MIBS_63XX_SIZE;
1059 else if (is58xx(dev))
1060 return B53_MIBS_58XX_SIZE;
1061 else
1062 return B53_MIBS_SIZE;
1063 }
1064
b53_get_phy_device(struct dsa_switch * ds,int port)1065 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
1066 {
1067 /* These ports typically do not have built-in PHYs */
1068 switch (port) {
1069 case B53_CPU_PORT_25:
1070 case 7:
1071 case B53_CPU_PORT:
1072 return NULL;
1073 }
1074
1075 return mdiobus_get_phy(ds->user_mii_bus, port);
1076 }
1077
b53_get_strings(struct dsa_switch * ds,int port,u32 stringset,uint8_t * data)1078 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
1079 uint8_t *data)
1080 {
1081 struct b53_device *dev = ds->priv;
1082 const struct b53_mib_desc *mibs = b53_get_mib(dev);
1083 unsigned int mib_size = b53_get_mib_size(dev);
1084 struct phy_device *phydev;
1085 unsigned int i;
1086
1087 if (stringset == ETH_SS_STATS) {
1088 for (i = 0; i < mib_size; i++)
1089 strscpy(data + i * ETH_GSTRING_LEN,
1090 mibs[i].name, ETH_GSTRING_LEN);
1091 } else if (stringset == ETH_SS_PHY_STATS) {
1092 phydev = b53_get_phy_device(ds, port);
1093 if (!phydev)
1094 return;
1095
1096 phy_ethtool_get_strings(phydev, data);
1097 }
1098 }
1099 EXPORT_SYMBOL(b53_get_strings);
1100
b53_get_ethtool_stats(struct dsa_switch * ds,int port,uint64_t * data)1101 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
1102 {
1103 struct b53_device *dev = ds->priv;
1104 const struct b53_mib_desc *mibs = b53_get_mib(dev);
1105 unsigned int mib_size = b53_get_mib_size(dev);
1106 const struct b53_mib_desc *s;
1107 unsigned int i;
1108 u64 val = 0;
1109
1110 if (is5365(dev) && port == 5)
1111 port = 8;
1112
1113 mutex_lock(&dev->stats_mutex);
1114
1115 for (i = 0; i < mib_size; i++) {
1116 s = &mibs[i];
1117
1118 if (s->size == 8) {
1119 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
1120 } else {
1121 u32 val32;
1122
1123 b53_read32(dev, B53_MIB_PAGE(port), s->offset,
1124 &val32);
1125 val = val32;
1126 }
1127 data[i] = (u64)val;
1128 }
1129
1130 mutex_unlock(&dev->stats_mutex);
1131 }
1132 EXPORT_SYMBOL(b53_get_ethtool_stats);
1133
b53_get_ethtool_phy_stats(struct dsa_switch * ds,int port,uint64_t * data)1134 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data)
1135 {
1136 struct phy_device *phydev;
1137
1138 phydev = b53_get_phy_device(ds, port);
1139 if (!phydev)
1140 return;
1141
1142 phy_ethtool_get_stats(phydev, NULL, data);
1143 }
1144 EXPORT_SYMBOL(b53_get_ethtool_phy_stats);
1145
b53_get_sset_count(struct dsa_switch * ds,int port,int sset)1146 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
1147 {
1148 struct b53_device *dev = ds->priv;
1149 struct phy_device *phydev;
1150
1151 if (sset == ETH_SS_STATS) {
1152 return b53_get_mib_size(dev);
1153 } else if (sset == ETH_SS_PHY_STATS) {
1154 phydev = b53_get_phy_device(ds, port);
1155 if (!phydev)
1156 return 0;
1157
1158 return phy_ethtool_get_sset_count(phydev);
1159 }
1160
1161 return 0;
1162 }
1163 EXPORT_SYMBOL(b53_get_sset_count);
1164
1165 enum b53_devlink_resource_id {
1166 B53_DEVLINK_PARAM_ID_VLAN_TABLE,
1167 };
1168
b53_devlink_vlan_table_get(void * priv)1169 static u64 b53_devlink_vlan_table_get(void *priv)
1170 {
1171 struct b53_device *dev = priv;
1172 struct b53_vlan *vl;
1173 unsigned int i;
1174 u64 count = 0;
1175
1176 for (i = 0; i < dev->num_vlans; i++) {
1177 vl = &dev->vlans[i];
1178 if (vl->members)
1179 count++;
1180 }
1181
1182 return count;
1183 }
1184
b53_setup_devlink_resources(struct dsa_switch * ds)1185 int b53_setup_devlink_resources(struct dsa_switch *ds)
1186 {
1187 struct devlink_resource_size_params size_params;
1188 struct b53_device *dev = ds->priv;
1189 int err;
1190
1191 devlink_resource_size_params_init(&size_params, dev->num_vlans,
1192 dev->num_vlans,
1193 1, DEVLINK_RESOURCE_UNIT_ENTRY);
1194
1195 err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans,
1196 B53_DEVLINK_PARAM_ID_VLAN_TABLE,
1197 DEVLINK_RESOURCE_ID_PARENT_TOP,
1198 &size_params);
1199 if (err)
1200 goto out;
1201
1202 dsa_devlink_resource_occ_get_register(ds,
1203 B53_DEVLINK_PARAM_ID_VLAN_TABLE,
1204 b53_devlink_vlan_table_get, dev);
1205
1206 return 0;
1207 out:
1208 dsa_devlink_resources_unregister(ds);
1209 return err;
1210 }
1211 EXPORT_SYMBOL(b53_setup_devlink_resources);
1212
b53_setup(struct dsa_switch * ds)1213 static int b53_setup(struct dsa_switch *ds)
1214 {
1215 struct b53_device *dev = ds->priv;
1216 struct b53_vlan *vl;
1217 unsigned int port;
1218 u16 pvid;
1219 int ret;
1220
1221 /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
1222 * which forces the CPU port to be tagged in all VLANs.
1223 */
1224 ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
1225
1226 /* The switch does not tell us the original VLAN for untagged
1227 * packets, so keep the CPU port always tagged.
1228 */
1229 ds->untag_vlan_aware_bridge_pvid = true;
1230
1231 ret = b53_reset_switch(dev);
1232 if (ret) {
1233 dev_err(ds->dev, "failed to reset switch\n");
1234 return ret;
1235 }
1236
1237 /* setup default vlan for filtering mode */
1238 pvid = b53_default_pvid(dev);
1239 vl = &dev->vlans[pvid];
1240 b53_for_each_port(dev, port) {
1241 vl->members |= BIT(port);
1242 if (!b53_vlan_port_needs_forced_tagged(ds, port))
1243 vl->untag |= BIT(port);
1244 }
1245
1246 b53_reset_mib(dev);
1247
1248 ret = b53_apply_config(dev);
1249 if (ret) {
1250 dev_err(ds->dev, "failed to apply configuration\n");
1251 return ret;
1252 }
1253
1254 /* Configure IMP/CPU port, disable all other ports. Enabled
1255 * ports will be configured with .port_enable
1256 */
1257 for (port = 0; port < dev->num_ports; port++) {
1258 if (dsa_is_cpu_port(ds, port))
1259 b53_enable_cpu_port(dev, port);
1260 else
1261 b53_disable_port(ds, port);
1262 }
1263
1264 return b53_setup_devlink_resources(ds);
1265 }
1266
b53_teardown(struct dsa_switch * ds)1267 static void b53_teardown(struct dsa_switch *ds)
1268 {
1269 dsa_devlink_resources_unregister(ds);
1270 }
1271
b53_force_link(struct b53_device * dev,int port,int link)1272 static void b53_force_link(struct b53_device *dev, int port, int link)
1273 {
1274 u8 reg, val, off;
1275
1276 /* Override the port settings */
1277 if (port == dev->imp_port) {
1278 off = B53_PORT_OVERRIDE_CTRL;
1279 val = PORT_OVERRIDE_EN;
1280 } else if (is5325(dev)) {
1281 return;
1282 } else {
1283 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1284 val = GMII_PO_EN;
1285 }
1286
1287 b53_read8(dev, B53_CTRL_PAGE, off, ®);
1288 reg |= val;
1289 if (link)
1290 reg |= PORT_OVERRIDE_LINK;
1291 else
1292 reg &= ~PORT_OVERRIDE_LINK;
1293 b53_write8(dev, B53_CTRL_PAGE, off, reg);
1294 }
1295
b53_force_port_config(struct b53_device * dev,int port,int speed,int duplex,bool tx_pause,bool rx_pause)1296 static void b53_force_port_config(struct b53_device *dev, int port,
1297 int speed, int duplex,
1298 bool tx_pause, bool rx_pause)
1299 {
1300 u8 reg, val, off;
1301
1302 /* Override the port settings */
1303 if (port == dev->imp_port) {
1304 off = B53_PORT_OVERRIDE_CTRL;
1305 val = PORT_OVERRIDE_EN;
1306 } else if (is5325(dev)) {
1307 return;
1308 } else {
1309 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1310 val = GMII_PO_EN;
1311 }
1312
1313 b53_read8(dev, B53_CTRL_PAGE, off, ®);
1314 reg |= val;
1315 if (duplex == DUPLEX_FULL)
1316 reg |= PORT_OVERRIDE_FULL_DUPLEX;
1317 else
1318 reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
1319
1320 switch (speed) {
1321 case 2000:
1322 reg |= PORT_OVERRIDE_SPEED_2000M;
1323 fallthrough;
1324 case SPEED_1000:
1325 reg |= PORT_OVERRIDE_SPEED_1000M;
1326 break;
1327 case SPEED_100:
1328 reg |= PORT_OVERRIDE_SPEED_100M;
1329 break;
1330 case SPEED_10:
1331 reg |= PORT_OVERRIDE_SPEED_10M;
1332 break;
1333 default:
1334 dev_err(dev->dev, "unknown speed: %d\n", speed);
1335 return;
1336 }
1337
1338 if (rx_pause) {
1339 if (is5325(dev))
1340 reg |= PORT_OVERRIDE_LP_FLOW_25;
1341 else
1342 reg |= PORT_OVERRIDE_RX_FLOW;
1343 }
1344
1345 if (tx_pause) {
1346 if (is5325(dev))
1347 reg |= PORT_OVERRIDE_LP_FLOW_25;
1348 else
1349 reg |= PORT_OVERRIDE_TX_FLOW;
1350 }
1351
1352 b53_write8(dev, B53_CTRL_PAGE, off, reg);
1353 }
1354
b53_adjust_63xx_rgmii(struct dsa_switch * ds,int port,phy_interface_t interface)1355 static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
1356 phy_interface_t interface)
1357 {
1358 struct b53_device *dev = ds->priv;
1359 u8 rgmii_ctrl = 0, off;
1360
1361 if (port == dev->imp_port)
1362 off = B53_RGMII_CTRL_IMP;
1363 else
1364 off = B53_RGMII_CTRL_P(port);
1365
1366 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
1367 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
1368
1369 if (port != dev->imp_port) {
1370 if (is63268(dev))
1371 rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
1372
1373 rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
1374 }
1375
1376 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
1377
1378 dev_dbg(ds->dev, "Configured port %d for %s\n", port,
1379 phy_modes(interface));
1380 }
1381
b53_adjust_531x5_rgmii(struct dsa_switch * ds,int port,phy_interface_t interface)1382 static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
1383 phy_interface_t interface)
1384 {
1385 struct b53_device *dev = ds->priv;
1386 u8 rgmii_ctrl = 0, off;
1387
1388 if (port == dev->imp_port)
1389 off = B53_RGMII_CTRL_IMP;
1390 else
1391 off = B53_RGMII_CTRL_P(port);
1392
1393 /* Configure the port RGMII clock delay by DLL disabled and
1394 * tx_clk aligned timing (restoring to reset defaults)
1395 */
1396 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
1397 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
1398
1399 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
1400 * sure that we enable the port TX clock internal delay to
1401 * account for this internal delay that is inserted, otherwise
1402 * the switch won't be able to receive correctly.
1403 *
1404 * PHY_INTERFACE_MODE_RGMII means that we are not introducing
1405 * any delay neither on transmission nor reception, so the
1406 * BCM53125 must also be configured accordingly to account for
1407 * the lack of delay and introduce
1408 *
1409 * The BCM53125 switch has its RX clock and TX clock control
1410 * swapped, hence the reason why we modify the TX clock path in
1411 * the "RGMII" case
1412 */
1413 if (interface == PHY_INTERFACE_MODE_RGMII_TXID)
1414 rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
1415 if (interface == PHY_INTERFACE_MODE_RGMII)
1416 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
1417
1418 if (dev->chip_id != BCM53115_DEVICE_ID)
1419 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
1420
1421 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
1422
1423 dev_info(ds->dev, "Configured port %d for %s\n", port,
1424 phy_modes(interface));
1425 }
1426
b53_adjust_5325_mii(struct dsa_switch * ds,int port)1427 static void b53_adjust_5325_mii(struct dsa_switch *ds, int port)
1428 {
1429 struct b53_device *dev = ds->priv;
1430 u8 reg = 0;
1431
1432 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1433 ®);
1434
1435 /* reverse mii needs to be enabled */
1436 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1437 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1438 reg | PORT_OVERRIDE_RV_MII_25);
1439 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1440 ®);
1441
1442 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1443 dev_err(ds->dev,
1444 "Failed to enable reverse MII mode\n");
1445 return;
1446 }
1447 }
1448 }
1449
b53_port_event(struct dsa_switch * ds,int port)1450 void b53_port_event(struct dsa_switch *ds, int port)
1451 {
1452 struct b53_device *dev = ds->priv;
1453 bool link;
1454 u16 sts;
1455
1456 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts);
1457 link = !!(sts & BIT(port));
1458 dsa_port_phylink_mac_change(ds, port, link);
1459 }
1460 EXPORT_SYMBOL(b53_port_event);
1461
b53_phylink_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)1462 static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
1463 struct phylink_config *config)
1464 {
1465 struct b53_device *dev = ds->priv;
1466
1467 /* Internal ports need GMII for PHYLIB */
1468 __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
1469
1470 /* These switches appear to support MII and RevMII too, but beyond
1471 * this, the code gives very few clues. FIXME: We probably need more
1472 * interface modes here.
1473 *
1474 * According to b53_srab_mux_init(), ports 3..5 can support:
1475 * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting.
1476 * However, the interface mode read from the MUX configuration is
1477 * not passed back to DSA, so phylink uses NA.
1478 * DT can specify RGMII for ports 0, 1.
1479 * For MDIO, port 8 can be RGMII_TXID.
1480 */
1481 __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
1482 __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
1483
1484 /* BCM63xx RGMII ports support RGMII */
1485 if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
1486 phy_interface_set_rgmii(config->supported_interfaces);
1487
1488 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1489 MAC_10 | MAC_100;
1490
1491 /* 5325/5365 are not capable of gigabit speeds, everything else is.
1492 * Note: the original code also exclulded Gigagbit for MII, RevMII
1493 * and 802.3z modes. MII and RevMII are not able to work above 100M,
1494 * so will be excluded by the generic validator implementation.
1495 * However, the exclusion of Gigabit for 802.3z just seems wrong.
1496 */
1497 if (!(is5325(dev) || is5365(dev)))
1498 config->mac_capabilities |= MAC_1000;
1499
1500 /* Get the implementation specific capabilities */
1501 if (dev->ops->phylink_get_caps)
1502 dev->ops->phylink_get_caps(dev, port, config);
1503 }
1504
b53_phylink_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)1505 static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config,
1506 phy_interface_t interface)
1507 {
1508 struct dsa_port *dp = dsa_phylink_to_port(config);
1509 struct b53_device *dev = dp->ds->priv;
1510
1511 if (!dev->ops->phylink_mac_select_pcs)
1512 return NULL;
1513
1514 return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface);
1515 }
1516
b53_phylink_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)1517 static void b53_phylink_mac_config(struct phylink_config *config,
1518 unsigned int mode,
1519 const struct phylink_link_state *state)
1520 {
1521 struct dsa_port *dp = dsa_phylink_to_port(config);
1522 phy_interface_t interface = state->interface;
1523 struct dsa_switch *ds = dp->ds;
1524 struct b53_device *dev = ds->priv;
1525 int port = dp->index;
1526
1527 if (is63xx(dev) && port >= B53_63XX_RGMII0)
1528 b53_adjust_63xx_rgmii(ds, port, interface);
1529
1530 if (mode == MLO_AN_FIXED) {
1531 if (is531x5(dev) && phy_interface_mode_is_rgmii(interface))
1532 b53_adjust_531x5_rgmii(ds, port, interface);
1533
1534 /* configure MII port if necessary */
1535 if (is5325(dev))
1536 b53_adjust_5325_mii(ds, port);
1537 }
1538 }
1539
b53_phylink_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1540 static void b53_phylink_mac_link_down(struct phylink_config *config,
1541 unsigned int mode,
1542 phy_interface_t interface)
1543 {
1544 struct dsa_port *dp = dsa_phylink_to_port(config);
1545 struct b53_device *dev = dp->ds->priv;
1546 int port = dp->index;
1547
1548 if (mode == MLO_AN_PHY)
1549 return;
1550
1551 if (mode == MLO_AN_FIXED) {
1552 b53_force_link(dev, port, false);
1553 return;
1554 }
1555
1556 if (phy_interface_mode_is_8023z(interface) &&
1557 dev->ops->serdes_link_set)
1558 dev->ops->serdes_link_set(dev, port, mode, interface, false);
1559 }
1560
b53_phylink_mac_link_up(struct phylink_config * config,struct phy_device * phydev,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1561 static void b53_phylink_mac_link_up(struct phylink_config *config,
1562 struct phy_device *phydev,
1563 unsigned int mode,
1564 phy_interface_t interface,
1565 int speed, int duplex,
1566 bool tx_pause, bool rx_pause)
1567 {
1568 struct dsa_port *dp = dsa_phylink_to_port(config);
1569 struct dsa_switch *ds = dp->ds;
1570 struct b53_device *dev = ds->priv;
1571 struct ethtool_keee *p = &dev->ports[dp->index].eee;
1572 int port = dp->index;
1573
1574 if (mode == MLO_AN_PHY) {
1575 /* Re-negotiate EEE if it was enabled already */
1576 p->eee_enabled = b53_eee_init(ds, port, phydev);
1577 return;
1578 }
1579
1580 if (mode == MLO_AN_FIXED) {
1581 /* Force flow control on BCM5301x's CPU port */
1582 if (is5301x(dev) && dsa_is_cpu_port(ds, port))
1583 tx_pause = rx_pause = true;
1584
1585 b53_force_port_config(dev, port, speed, duplex,
1586 tx_pause, rx_pause);
1587 b53_force_link(dev, port, true);
1588 return;
1589 }
1590
1591 if (phy_interface_mode_is_8023z(interface) &&
1592 dev->ops->serdes_link_set)
1593 dev->ops->serdes_link_set(dev, port, mode, interface, true);
1594 }
1595
b53_vlan_filtering(struct dsa_switch * ds,int port,bool vlan_filtering,struct netlink_ext_ack * extack)1596 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
1597 struct netlink_ext_ack *extack)
1598 {
1599 struct b53_device *dev = ds->priv;
1600
1601 if (dev->vlan_filtering != vlan_filtering) {
1602 dev->vlan_filtering = vlan_filtering;
1603 b53_apply_config(dev);
1604 }
1605
1606 return 0;
1607 }
1608 EXPORT_SYMBOL(b53_vlan_filtering);
1609
b53_vlan_prepare(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1610 static int b53_vlan_prepare(struct dsa_switch *ds, int port,
1611 const struct switchdev_obj_port_vlan *vlan)
1612 {
1613 struct b53_device *dev = ds->priv;
1614
1615 if ((is5325(dev) || is5365(dev)) && vlan->vid == 0)
1616 return -EOPNOTSUPP;
1617
1618 /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
1619 * receiving VLAN tagged frames at all, we can still allow the port to
1620 * be configured for egress untagged.
1621 */
1622 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 &&
1623 !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1624 return -EINVAL;
1625
1626 if (vlan->vid >= dev->num_vlans)
1627 return -ERANGE;
1628
1629 b53_enable_vlan(dev, port, true, dev->vlan_filtering);
1630
1631 return 0;
1632 }
1633
b53_vlan_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)1634 int b53_vlan_add(struct dsa_switch *ds, int port,
1635 const struct switchdev_obj_port_vlan *vlan,
1636 struct netlink_ext_ack *extack)
1637 {
1638 struct b53_device *dev = ds->priv;
1639 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1640 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1641 struct b53_vlan *vl;
1642 u16 old_pvid, new_pvid;
1643 int err;
1644
1645 err = b53_vlan_prepare(ds, port, vlan);
1646 if (err)
1647 return err;
1648
1649 if (vlan->vid == 0)
1650 return 0;
1651
1652 old_pvid = dev->ports[port].pvid;
1653 if (pvid)
1654 new_pvid = vlan->vid;
1655 else if (!pvid && vlan->vid == old_pvid)
1656 new_pvid = b53_default_pvid(dev);
1657 else
1658 new_pvid = old_pvid;
1659 dev->ports[port].pvid = new_pvid;
1660
1661 vl = &dev->vlans[vlan->vid];
1662
1663 if (dsa_is_cpu_port(ds, port))
1664 untagged = false;
1665
1666 vl->members |= BIT(port);
1667 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
1668 vl->untag |= BIT(port);
1669 else
1670 vl->untag &= ~BIT(port);
1671
1672 if (!dev->vlan_filtering)
1673 return 0;
1674
1675 b53_set_vlan_entry(dev, vlan->vid, vl);
1676 b53_fast_age_vlan(dev, vlan->vid);
1677
1678 if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
1679 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1680 new_pvid);
1681 b53_fast_age_vlan(dev, old_pvid);
1682 }
1683
1684 return 0;
1685 }
1686 EXPORT_SYMBOL(b53_vlan_add);
1687
b53_vlan_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1688 int b53_vlan_del(struct dsa_switch *ds, int port,
1689 const struct switchdev_obj_port_vlan *vlan)
1690 {
1691 struct b53_device *dev = ds->priv;
1692 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1693 struct b53_vlan *vl;
1694 u16 pvid;
1695
1696 if (vlan->vid == 0)
1697 return 0;
1698
1699 pvid = dev->ports[port].pvid;
1700
1701 vl = &dev->vlans[vlan->vid];
1702
1703 vl->members &= ~BIT(port);
1704
1705 if (pvid == vlan->vid)
1706 pvid = b53_default_pvid(dev);
1707 dev->ports[port].pvid = pvid;
1708
1709 if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
1710 vl->untag &= ~(BIT(port));
1711
1712 if (!dev->vlan_filtering)
1713 return 0;
1714
1715 b53_set_vlan_entry(dev, vlan->vid, vl);
1716 b53_fast_age_vlan(dev, vlan->vid);
1717
1718 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
1719 b53_fast_age_vlan(dev, pvid);
1720
1721 return 0;
1722 }
1723 EXPORT_SYMBOL(b53_vlan_del);
1724
1725 /* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */
b53_arl_op_wait(struct b53_device * dev)1726 static int b53_arl_op_wait(struct b53_device *dev)
1727 {
1728 unsigned int timeout = 10;
1729 u8 reg;
1730
1731 do {
1732 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®);
1733 if (!(reg & ARLTBL_START_DONE))
1734 return 0;
1735
1736 usleep_range(1000, 2000);
1737 } while (timeout--);
1738
1739 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
1740
1741 return -ETIMEDOUT;
1742 }
1743
b53_arl_rw_op(struct b53_device * dev,unsigned int op)1744 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
1745 {
1746 u8 reg;
1747
1748 if (op > ARLTBL_RW)
1749 return -EINVAL;
1750
1751 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®);
1752 reg |= ARLTBL_START_DONE;
1753 if (op)
1754 reg |= ARLTBL_RW;
1755 else
1756 reg &= ~ARLTBL_RW;
1757 if (dev->vlan_enabled)
1758 reg &= ~ARLTBL_IVL_SVL_SELECT;
1759 else
1760 reg |= ARLTBL_IVL_SVL_SELECT;
1761 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
1762
1763 return b53_arl_op_wait(dev);
1764 }
1765
b53_arl_read(struct b53_device * dev,u64 mac,u16 vid,struct b53_arl_entry * ent,u8 * idx)1766 static int b53_arl_read(struct b53_device *dev, u64 mac,
1767 u16 vid, struct b53_arl_entry *ent, u8 *idx)
1768 {
1769 DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
1770 unsigned int i;
1771 int ret;
1772
1773 ret = b53_arl_op_wait(dev);
1774 if (ret)
1775 return ret;
1776
1777 bitmap_zero(free_bins, dev->num_arl_bins);
1778
1779 /* Read the bins */
1780 for (i = 0; i < dev->num_arl_bins; i++) {
1781 u64 mac_vid;
1782 u32 fwd_entry;
1783
1784 b53_read64(dev, B53_ARLIO_PAGE,
1785 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
1786 b53_read32(dev, B53_ARLIO_PAGE,
1787 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
1788 b53_arl_to_entry(ent, mac_vid, fwd_entry);
1789
1790 if (!(fwd_entry & ARLTBL_VALID)) {
1791 set_bit(i, free_bins);
1792 continue;
1793 }
1794 if ((mac_vid & ARLTBL_MAC_MASK) != mac)
1795 continue;
1796 if (dev->vlan_enabled &&
1797 ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
1798 continue;
1799 *idx = i;
1800 return 0;
1801 }
1802
1803 *idx = find_first_bit(free_bins, dev->num_arl_bins);
1804 return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
1805 }
1806
b53_arl_op(struct b53_device * dev,int op,int port,const unsigned char * addr,u16 vid,bool is_valid)1807 static int b53_arl_op(struct b53_device *dev, int op, int port,
1808 const unsigned char *addr, u16 vid, bool is_valid)
1809 {
1810 struct b53_arl_entry ent;
1811 u32 fwd_entry;
1812 u64 mac, mac_vid = 0;
1813 u8 idx = 0;
1814 int ret;
1815
1816 /* Convert the array into a 64-bit MAC */
1817 mac = ether_addr_to_u64(addr);
1818
1819 /* Perform a read for the given MAC and VID */
1820 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
1821 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
1822
1823 /* Issue a read operation for this MAC */
1824 ret = b53_arl_rw_op(dev, 1);
1825 if (ret)
1826 return ret;
1827
1828 ret = b53_arl_read(dev, mac, vid, &ent, &idx);
1829
1830 /* If this is a read, just finish now */
1831 if (op)
1832 return ret;
1833
1834 switch (ret) {
1835 case -ETIMEDOUT:
1836 return ret;
1837 case -ENOSPC:
1838 dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
1839 addr, vid);
1840 return is_valid ? ret : 0;
1841 case -ENOENT:
1842 /* We could not find a matching MAC, so reset to a new entry */
1843 dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
1844 addr, vid, idx);
1845 fwd_entry = 0;
1846 break;
1847 default:
1848 dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
1849 addr, vid, idx);
1850 break;
1851 }
1852
1853 /* For multicast address, the port is a bitmask and the validity
1854 * is determined by having at least one port being still active
1855 */
1856 if (!is_multicast_ether_addr(addr)) {
1857 ent.port = port;
1858 ent.is_valid = is_valid;
1859 } else {
1860 if (is_valid)
1861 ent.port |= BIT(port);
1862 else
1863 ent.port &= ~BIT(port);
1864
1865 ent.is_valid = !!(ent.port);
1866 }
1867
1868 ent.vid = vid;
1869 ent.is_static = true;
1870 ent.is_age = false;
1871 memcpy(ent.mac, addr, ETH_ALEN);
1872 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
1873
1874 b53_write64(dev, B53_ARLIO_PAGE,
1875 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
1876 b53_write32(dev, B53_ARLIO_PAGE,
1877 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
1878
1879 return b53_arl_rw_op(dev, 0);
1880 }
1881
b53_fdb_add(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)1882 int b53_fdb_add(struct dsa_switch *ds, int port,
1883 const unsigned char *addr, u16 vid,
1884 struct dsa_db db)
1885 {
1886 struct b53_device *priv = ds->priv;
1887 int ret;
1888
1889 /* 5325 and 5365 require some more massaging, but could
1890 * be supported eventually
1891 */
1892 if (is5325(priv) || is5365(priv))
1893 return -EOPNOTSUPP;
1894
1895 mutex_lock(&priv->arl_mutex);
1896 ret = b53_arl_op(priv, 0, port, addr, vid, true);
1897 mutex_unlock(&priv->arl_mutex);
1898
1899 return ret;
1900 }
1901 EXPORT_SYMBOL(b53_fdb_add);
1902
b53_fdb_del(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)1903 int b53_fdb_del(struct dsa_switch *ds, int port,
1904 const unsigned char *addr, u16 vid,
1905 struct dsa_db db)
1906 {
1907 struct b53_device *priv = ds->priv;
1908 int ret;
1909
1910 mutex_lock(&priv->arl_mutex);
1911 ret = b53_arl_op(priv, 0, port, addr, vid, false);
1912 mutex_unlock(&priv->arl_mutex);
1913
1914 return ret;
1915 }
1916 EXPORT_SYMBOL(b53_fdb_del);
1917
b53_arl_search_wait(struct b53_device * dev)1918 static int b53_arl_search_wait(struct b53_device *dev)
1919 {
1920 unsigned int timeout = 1000;
1921 u8 reg;
1922
1923 do {
1924 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®);
1925 if (!(reg & ARL_SRCH_STDN))
1926 return 0;
1927
1928 if (reg & ARL_SRCH_VLID)
1929 return 0;
1930
1931 usleep_range(1000, 2000);
1932 } while (timeout--);
1933
1934 return -ETIMEDOUT;
1935 }
1936
b53_arl_search_rd(struct b53_device * dev,u8 idx,struct b53_arl_entry * ent)1937 static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
1938 struct b53_arl_entry *ent)
1939 {
1940 u64 mac_vid;
1941 u32 fwd_entry;
1942
1943 b53_read64(dev, B53_ARLIO_PAGE,
1944 B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
1945 b53_read32(dev, B53_ARLIO_PAGE,
1946 B53_ARL_SRCH_RSTL(idx), &fwd_entry);
1947 b53_arl_to_entry(ent, mac_vid, fwd_entry);
1948 }
1949
b53_fdb_copy(int port,const struct b53_arl_entry * ent,dsa_fdb_dump_cb_t * cb,void * data)1950 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
1951 dsa_fdb_dump_cb_t *cb, void *data)
1952 {
1953 if (!ent->is_valid)
1954 return 0;
1955
1956 if (port != ent->port)
1957 return 0;
1958
1959 return cb(ent->mac, ent->vid, ent->is_static, data);
1960 }
1961
b53_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)1962 int b53_fdb_dump(struct dsa_switch *ds, int port,
1963 dsa_fdb_dump_cb_t *cb, void *data)
1964 {
1965 struct b53_device *priv = ds->priv;
1966 struct b53_arl_entry results[2];
1967 unsigned int count = 0;
1968 int ret;
1969 u8 reg;
1970
1971 mutex_lock(&priv->arl_mutex);
1972
1973 /* Start search operation */
1974 reg = ARL_SRCH_STDN;
1975 b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
1976
1977 do {
1978 ret = b53_arl_search_wait(priv);
1979 if (ret)
1980 break;
1981
1982 b53_arl_search_rd(priv, 0, &results[0]);
1983 ret = b53_fdb_copy(port, &results[0], cb, data);
1984 if (ret)
1985 break;
1986
1987 if (priv->num_arl_bins > 2) {
1988 b53_arl_search_rd(priv, 1, &results[1]);
1989 ret = b53_fdb_copy(port, &results[1], cb, data);
1990 if (ret)
1991 break;
1992
1993 if (!results[0].is_valid && !results[1].is_valid)
1994 break;
1995 }
1996
1997 } while (count++ < b53_max_arl_entries(priv) / 2);
1998
1999 mutex_unlock(&priv->arl_mutex);
2000
2001 return 0;
2002 }
2003 EXPORT_SYMBOL(b53_fdb_dump);
2004
b53_mdb_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)2005 int b53_mdb_add(struct dsa_switch *ds, int port,
2006 const struct switchdev_obj_port_mdb *mdb,
2007 struct dsa_db db)
2008 {
2009 struct b53_device *priv = ds->priv;
2010 int ret;
2011
2012 /* 5325 and 5365 require some more massaging, but could
2013 * be supported eventually
2014 */
2015 if (is5325(priv) || is5365(priv))
2016 return -EOPNOTSUPP;
2017
2018 mutex_lock(&priv->arl_mutex);
2019 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
2020 mutex_unlock(&priv->arl_mutex);
2021
2022 return ret;
2023 }
2024 EXPORT_SYMBOL(b53_mdb_add);
2025
b53_mdb_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)2026 int b53_mdb_del(struct dsa_switch *ds, int port,
2027 const struct switchdev_obj_port_mdb *mdb,
2028 struct dsa_db db)
2029 {
2030 struct b53_device *priv = ds->priv;
2031 int ret;
2032
2033 mutex_lock(&priv->arl_mutex);
2034 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
2035 mutex_unlock(&priv->arl_mutex);
2036 if (ret)
2037 dev_err(ds->dev, "failed to delete MDB entry\n");
2038
2039 return ret;
2040 }
2041 EXPORT_SYMBOL(b53_mdb_del);
2042
b53_br_join(struct dsa_switch * ds,int port,struct dsa_bridge bridge,bool * tx_fwd_offload,struct netlink_ext_ack * extack)2043 int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
2044 bool *tx_fwd_offload, struct netlink_ext_ack *extack)
2045 {
2046 struct b53_device *dev = ds->priv;
2047 struct b53_vlan *vl;
2048 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2049 u16 pvlan, reg, pvid;
2050 unsigned int i;
2051
2052 /* On 7278, port 7 which connects to the ASP should only receive
2053 * traffic from matching CFP rules.
2054 */
2055 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
2056 return -EINVAL;
2057
2058 pvid = b53_default_pvid(dev);
2059 vl = &dev->vlans[pvid];
2060
2061 if (dev->vlan_filtering) {
2062 /* Make this port leave the all VLANs join since we will have
2063 * proper VLAN entries from now on
2064 */
2065 if (is58xx(dev)) {
2066 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
2067 ®);
2068 reg &= ~BIT(port);
2069 if ((reg & BIT(cpu_port)) == BIT(cpu_port))
2070 reg &= ~BIT(cpu_port);
2071 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
2072 reg);
2073 }
2074
2075 b53_get_vlan_entry(dev, pvid, vl);
2076 vl->members &= ~BIT(port);
2077 b53_set_vlan_entry(dev, pvid, vl);
2078 }
2079
2080 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
2081
2082 b53_for_each_port(dev, i) {
2083 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
2084 continue;
2085
2086 /* Add this local port to the remote port VLAN control
2087 * membership and update the remote port bitmask
2088 */
2089 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®);
2090 reg |= BIT(port);
2091 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
2092 dev->ports[i].vlan_ctl_mask = reg;
2093
2094 pvlan |= BIT(i);
2095 }
2096
2097 /* Disable redirection of unknown SA to the CPU port */
2098 b53_set_eap_mode(dev, port, EAP_MODE_BASIC);
2099
2100 /* Configure the local port VLAN control membership to include
2101 * remote ports and update the local port bitmask
2102 */
2103 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
2104 dev->ports[port].vlan_ctl_mask = pvlan;
2105
2106 return 0;
2107 }
2108 EXPORT_SYMBOL(b53_br_join);
2109
b53_br_leave(struct dsa_switch * ds,int port,struct dsa_bridge bridge)2110 void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
2111 {
2112 struct b53_device *dev = ds->priv;
2113 struct b53_vlan *vl;
2114 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2115 unsigned int i;
2116 u16 pvlan, reg, pvid;
2117
2118 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
2119
2120 b53_for_each_port(dev, i) {
2121 /* Don't touch the remaining ports */
2122 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
2123 continue;
2124
2125 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®);
2126 reg &= ~BIT(port);
2127 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
2128 dev->ports[port].vlan_ctl_mask = reg;
2129
2130 /* Prevent self removal to preserve isolation */
2131 if (port != i)
2132 pvlan &= ~BIT(i);
2133 }
2134
2135 /* Enable redirection of unknown SA to the CPU port */
2136 b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
2137
2138 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
2139 dev->ports[port].vlan_ctl_mask = pvlan;
2140
2141 pvid = b53_default_pvid(dev);
2142 vl = &dev->vlans[pvid];
2143
2144 if (dev->vlan_filtering) {
2145 /* Make this port join all VLANs without VLAN entries */
2146 if (is58xx(dev)) {
2147 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®);
2148 reg |= BIT(port);
2149 if (!(reg & BIT(cpu_port)))
2150 reg |= BIT(cpu_port);
2151 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
2152 }
2153
2154 b53_get_vlan_entry(dev, pvid, vl);
2155 vl->members |= BIT(port);
2156 b53_set_vlan_entry(dev, pvid, vl);
2157 }
2158 }
2159 EXPORT_SYMBOL(b53_br_leave);
2160
b53_br_set_stp_state(struct dsa_switch * ds,int port,u8 state)2161 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
2162 {
2163 struct b53_device *dev = ds->priv;
2164 u8 hw_state;
2165 u8 reg;
2166
2167 switch (state) {
2168 case BR_STATE_DISABLED:
2169 hw_state = PORT_CTRL_DIS_STATE;
2170 break;
2171 case BR_STATE_LISTENING:
2172 hw_state = PORT_CTRL_LISTEN_STATE;
2173 break;
2174 case BR_STATE_LEARNING:
2175 hw_state = PORT_CTRL_LEARN_STATE;
2176 break;
2177 case BR_STATE_FORWARDING:
2178 hw_state = PORT_CTRL_FWD_STATE;
2179 break;
2180 case BR_STATE_BLOCKING:
2181 hw_state = PORT_CTRL_BLOCK_STATE;
2182 break;
2183 default:
2184 dev_err(ds->dev, "invalid STP state: %d\n", state);
2185 return;
2186 }
2187
2188 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®);
2189 reg &= ~PORT_CTRL_STP_STATE_MASK;
2190 reg |= hw_state;
2191 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
2192 }
2193 EXPORT_SYMBOL(b53_br_set_stp_state);
2194
b53_br_fast_age(struct dsa_switch * ds,int port)2195 void b53_br_fast_age(struct dsa_switch *ds, int port)
2196 {
2197 struct b53_device *dev = ds->priv;
2198
2199 if (b53_fast_age_port(dev, port))
2200 dev_err(ds->dev, "fast ageing failed\n");
2201 }
2202 EXPORT_SYMBOL(b53_br_fast_age);
2203
b53_br_flags_pre(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)2204 int b53_br_flags_pre(struct dsa_switch *ds, int port,
2205 struct switchdev_brport_flags flags,
2206 struct netlink_ext_ack *extack)
2207 {
2208 struct b53_device *dev = ds->priv;
2209 unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD);
2210
2211 if (!is5325(dev))
2212 mask |= BR_LEARNING;
2213
2214 if (flags.mask & ~mask)
2215 return -EINVAL;
2216
2217 return 0;
2218 }
2219 EXPORT_SYMBOL(b53_br_flags_pre);
2220
b53_br_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)2221 int b53_br_flags(struct dsa_switch *ds, int port,
2222 struct switchdev_brport_flags flags,
2223 struct netlink_ext_ack *extack)
2224 {
2225 if (flags.mask & BR_FLOOD)
2226 b53_port_set_ucast_flood(ds->priv, port,
2227 !!(flags.val & BR_FLOOD));
2228 if (flags.mask & BR_MCAST_FLOOD)
2229 b53_port_set_mcast_flood(ds->priv, port,
2230 !!(flags.val & BR_MCAST_FLOOD));
2231 if (flags.mask & BR_LEARNING)
2232 b53_port_set_learning(ds->priv, port,
2233 !!(flags.val & BR_LEARNING));
2234
2235 return 0;
2236 }
2237 EXPORT_SYMBOL(b53_br_flags);
2238
b53_possible_cpu_port(struct dsa_switch * ds,int port)2239 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
2240 {
2241 /* Broadcom switches will accept enabling Broadcom tags on the
2242 * following ports: 5, 7 and 8, any other port is not supported
2243 */
2244 switch (port) {
2245 case B53_CPU_PORT_25:
2246 case 7:
2247 case B53_CPU_PORT:
2248 return true;
2249 }
2250
2251 return false;
2252 }
2253
b53_can_enable_brcm_tags(struct dsa_switch * ds,int port,enum dsa_tag_protocol tag_protocol)2254 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port,
2255 enum dsa_tag_protocol tag_protocol)
2256 {
2257 bool ret = b53_possible_cpu_port(ds, port);
2258
2259 if (!ret) {
2260 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
2261 port);
2262 return ret;
2263 }
2264
2265 switch (tag_protocol) {
2266 case DSA_TAG_PROTO_BRCM:
2267 case DSA_TAG_PROTO_BRCM_PREPEND:
2268 dev_warn(ds->dev,
2269 "Port %d is stacked to Broadcom tag switch\n", port);
2270 ret = false;
2271 break;
2272 default:
2273 ret = true;
2274 break;
2275 }
2276
2277 return ret;
2278 }
2279
b53_get_tag_protocol(struct dsa_switch * ds,int port,enum dsa_tag_protocol mprot)2280 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
2281 enum dsa_tag_protocol mprot)
2282 {
2283 struct b53_device *dev = ds->priv;
2284
2285 if (!b53_can_enable_brcm_tags(ds, port, mprot)) {
2286 dev->tag_protocol = DSA_TAG_PROTO_NONE;
2287 goto out;
2288 }
2289
2290 /* Older models require a different 6 byte tag */
2291 if (is5325(dev) || is5365(dev) || is63xx(dev)) {
2292 dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY;
2293 goto out;
2294 }
2295
2296 /* Broadcom BCM58xx chips have a flow accelerator on Port 8
2297 * which requires us to use the prepended Broadcom tag type
2298 */
2299 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) {
2300 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND;
2301 goto out;
2302 }
2303
2304 dev->tag_protocol = DSA_TAG_PROTO_BRCM;
2305 out:
2306 return dev->tag_protocol;
2307 }
2308 EXPORT_SYMBOL(b53_get_tag_protocol);
2309
b53_mirror_add(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror,bool ingress,struct netlink_ext_ack * extack)2310 int b53_mirror_add(struct dsa_switch *ds, int port,
2311 struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
2312 struct netlink_ext_ack *extack)
2313 {
2314 struct b53_device *dev = ds->priv;
2315 u16 reg, loc;
2316
2317 if (ingress)
2318 loc = B53_IG_MIR_CTL;
2319 else
2320 loc = B53_EG_MIR_CTL;
2321
2322 b53_read16(dev, B53_MGMT_PAGE, loc, ®);
2323 reg |= BIT(port);
2324 b53_write16(dev, B53_MGMT_PAGE, loc, reg);
2325
2326 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®);
2327 reg &= ~CAP_PORT_MASK;
2328 reg |= mirror->to_local_port;
2329 reg |= MIRROR_EN;
2330 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
2331
2332 return 0;
2333 }
2334 EXPORT_SYMBOL(b53_mirror_add);
2335
b53_mirror_del(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror)2336 void b53_mirror_del(struct dsa_switch *ds, int port,
2337 struct dsa_mall_mirror_tc_entry *mirror)
2338 {
2339 struct b53_device *dev = ds->priv;
2340 bool loc_disable = false, other_loc_disable = false;
2341 u16 reg, loc;
2342
2343 if (mirror->ingress)
2344 loc = B53_IG_MIR_CTL;
2345 else
2346 loc = B53_EG_MIR_CTL;
2347
2348 /* Update the desired ingress/egress register */
2349 b53_read16(dev, B53_MGMT_PAGE, loc, ®);
2350 reg &= ~BIT(port);
2351 if (!(reg & MIRROR_MASK))
2352 loc_disable = true;
2353 b53_write16(dev, B53_MGMT_PAGE, loc, reg);
2354
2355 /* Now look at the other one to know if we can disable mirroring
2356 * entirely
2357 */
2358 if (mirror->ingress)
2359 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®);
2360 else
2361 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®);
2362 if (!(reg & MIRROR_MASK))
2363 other_loc_disable = true;
2364
2365 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®);
2366 /* Both no longer have ports, let's disable mirroring */
2367 if (loc_disable && other_loc_disable) {
2368 reg &= ~MIRROR_EN;
2369 reg &= ~mirror->to_local_port;
2370 }
2371 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
2372 }
2373 EXPORT_SYMBOL(b53_mirror_del);
2374
2375 /* Returns 0 if EEE was not enabled, or 1 otherwise
2376 */
b53_eee_init(struct dsa_switch * ds,int port,struct phy_device * phy)2377 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
2378 {
2379 int ret;
2380
2381 if (!b53_support_eee(ds, port))
2382 return 0;
2383
2384 ret = phy_init_eee(phy, false);
2385 if (ret)
2386 return 0;
2387
2388 b53_eee_enable_set(ds, port, true);
2389
2390 return 1;
2391 }
2392 EXPORT_SYMBOL(b53_eee_init);
2393
b53_support_eee(struct dsa_switch * ds,int port)2394 bool b53_support_eee(struct dsa_switch *ds, int port)
2395 {
2396 struct b53_device *dev = ds->priv;
2397
2398 return !is5325(dev) && !is5365(dev) && !is63xx(dev);
2399 }
2400 EXPORT_SYMBOL(b53_support_eee);
2401
b53_get_mac_eee(struct dsa_switch * ds,int port,struct ethtool_keee * e)2402 int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
2403 {
2404 return 0;
2405 }
2406 EXPORT_SYMBOL(b53_get_mac_eee);
2407
b53_set_mac_eee(struct dsa_switch * ds,int port,struct ethtool_keee * e)2408 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
2409 {
2410 struct b53_device *dev = ds->priv;
2411 struct ethtool_keee *p = &dev->ports[port].eee;
2412
2413 p->eee_enabled = e->eee_enabled;
2414 b53_eee_enable_set(ds, port, e->eee_enabled);
2415
2416 return 0;
2417 }
2418 EXPORT_SYMBOL(b53_set_mac_eee);
2419
b53_change_mtu(struct dsa_switch * ds,int port,int mtu)2420 static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
2421 {
2422 struct b53_device *dev = ds->priv;
2423 bool enable_jumbo;
2424 bool allow_10_100;
2425
2426 if (is5325(dev) || is5365(dev))
2427 return 0;
2428
2429 if (!dsa_is_cpu_port(ds, port))
2430 return 0;
2431
2432 enable_jumbo = (mtu > ETH_DATA_LEN);
2433 allow_10_100 = !is63xx(dev);
2434
2435 return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
2436 }
2437
b53_get_max_mtu(struct dsa_switch * ds,int port)2438 static int b53_get_max_mtu(struct dsa_switch *ds, int port)
2439 {
2440 struct b53_device *dev = ds->priv;
2441
2442 if (is5325(dev) || is5365(dev))
2443 return B53_MAX_MTU_25;
2444
2445 return B53_MAX_MTU;
2446 }
2447
2448 static const struct phylink_mac_ops b53_phylink_mac_ops = {
2449 .mac_select_pcs = b53_phylink_mac_select_pcs,
2450 .mac_config = b53_phylink_mac_config,
2451 .mac_link_down = b53_phylink_mac_link_down,
2452 .mac_link_up = b53_phylink_mac_link_up,
2453 };
2454
2455 static const struct dsa_switch_ops b53_switch_ops = {
2456 .get_tag_protocol = b53_get_tag_protocol,
2457 .setup = b53_setup,
2458 .teardown = b53_teardown,
2459 .get_strings = b53_get_strings,
2460 .get_ethtool_stats = b53_get_ethtool_stats,
2461 .get_sset_count = b53_get_sset_count,
2462 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
2463 .phy_read = b53_phy_read16,
2464 .phy_write = b53_phy_write16,
2465 .phylink_get_caps = b53_phylink_get_caps,
2466 .port_setup = b53_setup_port,
2467 .port_enable = b53_enable_port,
2468 .port_disable = b53_disable_port,
2469 .support_eee = b53_support_eee,
2470 .get_mac_eee = b53_get_mac_eee,
2471 .set_mac_eee = b53_set_mac_eee,
2472 .port_bridge_join = b53_br_join,
2473 .port_bridge_leave = b53_br_leave,
2474 .port_pre_bridge_flags = b53_br_flags_pre,
2475 .port_bridge_flags = b53_br_flags,
2476 .port_stp_state_set = b53_br_set_stp_state,
2477 .port_fast_age = b53_br_fast_age,
2478 .port_vlan_filtering = b53_vlan_filtering,
2479 .port_vlan_add = b53_vlan_add,
2480 .port_vlan_del = b53_vlan_del,
2481 .port_fdb_dump = b53_fdb_dump,
2482 .port_fdb_add = b53_fdb_add,
2483 .port_fdb_del = b53_fdb_del,
2484 .port_mirror_add = b53_mirror_add,
2485 .port_mirror_del = b53_mirror_del,
2486 .port_mdb_add = b53_mdb_add,
2487 .port_mdb_del = b53_mdb_del,
2488 .port_max_mtu = b53_get_max_mtu,
2489 .port_change_mtu = b53_change_mtu,
2490 };
2491
2492 struct b53_chip_data {
2493 u32 chip_id;
2494 const char *dev_name;
2495 u16 vlans;
2496 u16 enabled_ports;
2497 u8 imp_port;
2498 u8 cpu_port;
2499 u8 vta_regs[3];
2500 u8 arl_bins;
2501 u16 arl_buckets;
2502 u8 duplex_reg;
2503 u8 jumbo_pm_reg;
2504 u8 jumbo_size_reg;
2505 };
2506
2507 #define B53_VTA_REGS \
2508 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
2509 #define B53_VTA_REGS_9798 \
2510 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
2511 #define B53_VTA_REGS_63XX \
2512 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
2513
2514 static const struct b53_chip_data b53_switch_chips[] = {
2515 {
2516 .chip_id = BCM5325_DEVICE_ID,
2517 .dev_name = "BCM5325",
2518 .vlans = 16,
2519 .enabled_ports = 0x3f,
2520 .arl_bins = 2,
2521 .arl_buckets = 1024,
2522 .imp_port = 5,
2523 .duplex_reg = B53_DUPLEX_STAT_FE,
2524 },
2525 {
2526 .chip_id = BCM5365_DEVICE_ID,
2527 .dev_name = "BCM5365",
2528 .vlans = 256,
2529 .enabled_ports = 0x3f,
2530 .arl_bins = 2,
2531 .arl_buckets = 1024,
2532 .imp_port = 5,
2533 .duplex_reg = B53_DUPLEX_STAT_FE,
2534 },
2535 {
2536 .chip_id = BCM5389_DEVICE_ID,
2537 .dev_name = "BCM5389",
2538 .vlans = 4096,
2539 .enabled_ports = 0x11f,
2540 .arl_bins = 4,
2541 .arl_buckets = 1024,
2542 .imp_port = 8,
2543 .vta_regs = B53_VTA_REGS,
2544 .duplex_reg = B53_DUPLEX_STAT_GE,
2545 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2546 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2547 },
2548 {
2549 .chip_id = BCM5395_DEVICE_ID,
2550 .dev_name = "BCM5395",
2551 .vlans = 4096,
2552 .enabled_ports = 0x11f,
2553 .arl_bins = 4,
2554 .arl_buckets = 1024,
2555 .imp_port = 8,
2556 .vta_regs = B53_VTA_REGS,
2557 .duplex_reg = B53_DUPLEX_STAT_GE,
2558 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2559 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2560 },
2561 {
2562 .chip_id = BCM5397_DEVICE_ID,
2563 .dev_name = "BCM5397",
2564 .vlans = 4096,
2565 .enabled_ports = 0x11f,
2566 .arl_bins = 4,
2567 .arl_buckets = 1024,
2568 .imp_port = 8,
2569 .vta_regs = B53_VTA_REGS_9798,
2570 .duplex_reg = B53_DUPLEX_STAT_GE,
2571 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2572 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2573 },
2574 {
2575 .chip_id = BCM5398_DEVICE_ID,
2576 .dev_name = "BCM5398",
2577 .vlans = 4096,
2578 .enabled_ports = 0x17f,
2579 .arl_bins = 4,
2580 .arl_buckets = 1024,
2581 .imp_port = 8,
2582 .vta_regs = B53_VTA_REGS_9798,
2583 .duplex_reg = B53_DUPLEX_STAT_GE,
2584 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2585 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2586 },
2587 {
2588 .chip_id = BCM53115_DEVICE_ID,
2589 .dev_name = "BCM53115",
2590 .vlans = 4096,
2591 .enabled_ports = 0x11f,
2592 .arl_bins = 4,
2593 .arl_buckets = 1024,
2594 .vta_regs = B53_VTA_REGS,
2595 .imp_port = 8,
2596 .duplex_reg = B53_DUPLEX_STAT_GE,
2597 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2598 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2599 },
2600 {
2601 .chip_id = BCM53125_DEVICE_ID,
2602 .dev_name = "BCM53125",
2603 .vlans = 4096,
2604 .enabled_ports = 0x1ff,
2605 .arl_bins = 4,
2606 .arl_buckets = 1024,
2607 .imp_port = 8,
2608 .vta_regs = B53_VTA_REGS,
2609 .duplex_reg = B53_DUPLEX_STAT_GE,
2610 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2611 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2612 },
2613 {
2614 .chip_id = BCM53128_DEVICE_ID,
2615 .dev_name = "BCM53128",
2616 .vlans = 4096,
2617 .enabled_ports = 0x1ff,
2618 .arl_bins = 4,
2619 .arl_buckets = 1024,
2620 .imp_port = 8,
2621 .vta_regs = B53_VTA_REGS,
2622 .duplex_reg = B53_DUPLEX_STAT_GE,
2623 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2624 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2625 },
2626 {
2627 .chip_id = BCM63XX_DEVICE_ID,
2628 .dev_name = "BCM63xx",
2629 .vlans = 4096,
2630 .enabled_ports = 0, /* pdata must provide them */
2631 .arl_bins = 4,
2632 .arl_buckets = 1024,
2633 .imp_port = 8,
2634 .vta_regs = B53_VTA_REGS_63XX,
2635 .duplex_reg = B53_DUPLEX_STAT_63XX,
2636 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
2637 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
2638 },
2639 {
2640 .chip_id = BCM63268_DEVICE_ID,
2641 .dev_name = "BCM63268",
2642 .vlans = 4096,
2643 .enabled_ports = 0, /* pdata must provide them */
2644 .arl_bins = 4,
2645 .arl_buckets = 1024,
2646 .imp_port = 8,
2647 .vta_regs = B53_VTA_REGS_63XX,
2648 .duplex_reg = B53_DUPLEX_STAT_63XX,
2649 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
2650 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
2651 },
2652 {
2653 .chip_id = BCM53010_DEVICE_ID,
2654 .dev_name = "BCM53010",
2655 .vlans = 4096,
2656 .enabled_ports = 0x1bf,
2657 .arl_bins = 4,
2658 .arl_buckets = 1024,
2659 .imp_port = 8,
2660 .vta_regs = B53_VTA_REGS,
2661 .duplex_reg = B53_DUPLEX_STAT_GE,
2662 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2663 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2664 },
2665 {
2666 .chip_id = BCM53011_DEVICE_ID,
2667 .dev_name = "BCM53011",
2668 .vlans = 4096,
2669 .enabled_ports = 0x1bf,
2670 .arl_bins = 4,
2671 .arl_buckets = 1024,
2672 .imp_port = 8,
2673 .vta_regs = B53_VTA_REGS,
2674 .duplex_reg = B53_DUPLEX_STAT_GE,
2675 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2676 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2677 },
2678 {
2679 .chip_id = BCM53012_DEVICE_ID,
2680 .dev_name = "BCM53012",
2681 .vlans = 4096,
2682 .enabled_ports = 0x1bf,
2683 .arl_bins = 4,
2684 .arl_buckets = 1024,
2685 .imp_port = 8,
2686 .vta_regs = B53_VTA_REGS,
2687 .duplex_reg = B53_DUPLEX_STAT_GE,
2688 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2689 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2690 },
2691 {
2692 .chip_id = BCM53018_DEVICE_ID,
2693 .dev_name = "BCM53018",
2694 .vlans = 4096,
2695 .enabled_ports = 0x1bf,
2696 .arl_bins = 4,
2697 .arl_buckets = 1024,
2698 .imp_port = 8,
2699 .vta_regs = B53_VTA_REGS,
2700 .duplex_reg = B53_DUPLEX_STAT_GE,
2701 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2702 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2703 },
2704 {
2705 .chip_id = BCM53019_DEVICE_ID,
2706 .dev_name = "BCM53019",
2707 .vlans = 4096,
2708 .enabled_ports = 0x1bf,
2709 .arl_bins = 4,
2710 .arl_buckets = 1024,
2711 .imp_port = 8,
2712 .vta_regs = B53_VTA_REGS,
2713 .duplex_reg = B53_DUPLEX_STAT_GE,
2714 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2715 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2716 },
2717 {
2718 .chip_id = BCM58XX_DEVICE_ID,
2719 .dev_name = "BCM585xx/586xx/88312",
2720 .vlans = 4096,
2721 .enabled_ports = 0x1ff,
2722 .arl_bins = 4,
2723 .arl_buckets = 1024,
2724 .imp_port = 8,
2725 .vta_regs = B53_VTA_REGS,
2726 .duplex_reg = B53_DUPLEX_STAT_GE,
2727 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2728 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2729 },
2730 {
2731 .chip_id = BCM583XX_DEVICE_ID,
2732 .dev_name = "BCM583xx/11360",
2733 .vlans = 4096,
2734 .enabled_ports = 0x103,
2735 .arl_bins = 4,
2736 .arl_buckets = 1024,
2737 .imp_port = 8,
2738 .vta_regs = B53_VTA_REGS,
2739 .duplex_reg = B53_DUPLEX_STAT_GE,
2740 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2741 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2742 },
2743 /* Starfighter 2 */
2744 {
2745 .chip_id = BCM4908_DEVICE_ID,
2746 .dev_name = "BCM4908",
2747 .vlans = 4096,
2748 .enabled_ports = 0x1bf,
2749 .arl_bins = 4,
2750 .arl_buckets = 256,
2751 .imp_port = 8,
2752 .vta_regs = B53_VTA_REGS,
2753 .duplex_reg = B53_DUPLEX_STAT_GE,
2754 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2755 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2756 },
2757 {
2758 .chip_id = BCM7445_DEVICE_ID,
2759 .dev_name = "BCM7445",
2760 .vlans = 4096,
2761 .enabled_ports = 0x1ff,
2762 .arl_bins = 4,
2763 .arl_buckets = 1024,
2764 .imp_port = 8,
2765 .vta_regs = B53_VTA_REGS,
2766 .duplex_reg = B53_DUPLEX_STAT_GE,
2767 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2768 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2769 },
2770 {
2771 .chip_id = BCM7278_DEVICE_ID,
2772 .dev_name = "BCM7278",
2773 .vlans = 4096,
2774 .enabled_ports = 0x1ff,
2775 .arl_bins = 4,
2776 .arl_buckets = 256,
2777 .imp_port = 8,
2778 .vta_regs = B53_VTA_REGS,
2779 .duplex_reg = B53_DUPLEX_STAT_GE,
2780 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2781 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2782 },
2783 {
2784 .chip_id = BCM53134_DEVICE_ID,
2785 .dev_name = "BCM53134",
2786 .vlans = 4096,
2787 .enabled_ports = 0x12f,
2788 .imp_port = 8,
2789 .cpu_port = B53_CPU_PORT,
2790 .vta_regs = B53_VTA_REGS,
2791 .arl_bins = 4,
2792 .arl_buckets = 1024,
2793 .duplex_reg = B53_DUPLEX_STAT_GE,
2794 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2795 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2796 },
2797 };
2798
b53_switch_init(struct b53_device * dev)2799 static int b53_switch_init(struct b53_device *dev)
2800 {
2801 unsigned int i;
2802 int ret;
2803
2804 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
2805 const struct b53_chip_data *chip = &b53_switch_chips[i];
2806
2807 if (chip->chip_id == dev->chip_id) {
2808 if (!dev->enabled_ports)
2809 dev->enabled_ports = chip->enabled_ports;
2810 dev->name = chip->dev_name;
2811 dev->duplex_reg = chip->duplex_reg;
2812 dev->vta_regs[0] = chip->vta_regs[0];
2813 dev->vta_regs[1] = chip->vta_regs[1];
2814 dev->vta_regs[2] = chip->vta_regs[2];
2815 dev->jumbo_pm_reg = chip->jumbo_pm_reg;
2816 dev->imp_port = chip->imp_port;
2817 dev->num_vlans = chip->vlans;
2818 dev->num_arl_bins = chip->arl_bins;
2819 dev->num_arl_buckets = chip->arl_buckets;
2820 break;
2821 }
2822 }
2823
2824 /* check which BCM5325x version we have */
2825 if (is5325(dev)) {
2826 u8 vc4;
2827
2828 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
2829
2830 /* check reserved bits */
2831 switch (vc4 & 3) {
2832 case 1:
2833 /* BCM5325E */
2834 break;
2835 case 3:
2836 /* BCM5325F - do not use port 4 */
2837 dev->enabled_ports &= ~BIT(4);
2838 break;
2839 default:
2840 /* On the BCM47XX SoCs this is the supported internal switch.*/
2841 #ifndef CONFIG_BCM47XX
2842 /* BCM5325M */
2843 return -EINVAL;
2844 #else
2845 break;
2846 #endif
2847 }
2848 }
2849
2850 dev->num_ports = fls(dev->enabled_ports);
2851
2852 dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
2853
2854 /* Include non standard CPU port built-in PHYs to be probed */
2855 if (is539x(dev) || is531x5(dev)) {
2856 for (i = 0; i < dev->num_ports; i++) {
2857 if (!(dev->ds->phys_mii_mask & BIT(i)) &&
2858 !b53_possible_cpu_port(dev->ds, i))
2859 dev->ds->phys_mii_mask |= BIT(i);
2860 }
2861 }
2862
2863 dev->ports = devm_kcalloc(dev->dev,
2864 dev->num_ports, sizeof(struct b53_port),
2865 GFP_KERNEL);
2866 if (!dev->ports)
2867 return -ENOMEM;
2868
2869 dev->vlans = devm_kcalloc(dev->dev,
2870 dev->num_vlans, sizeof(struct b53_vlan),
2871 GFP_KERNEL);
2872 if (!dev->vlans)
2873 return -ENOMEM;
2874
2875 dev->reset_gpio = b53_switch_get_reset_gpio(dev);
2876 if (dev->reset_gpio >= 0) {
2877 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
2878 GPIOF_OUT_INIT_HIGH, "robo_reset");
2879 if (ret)
2880 return ret;
2881 }
2882
2883 return 0;
2884 }
2885
b53_switch_alloc(struct device * base,const struct b53_io_ops * ops,void * priv)2886 struct b53_device *b53_switch_alloc(struct device *base,
2887 const struct b53_io_ops *ops,
2888 void *priv)
2889 {
2890 struct dsa_switch *ds;
2891 struct b53_device *dev;
2892
2893 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
2894 if (!ds)
2895 return NULL;
2896
2897 ds->dev = base;
2898
2899 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
2900 if (!dev)
2901 return NULL;
2902
2903 ds->priv = dev;
2904 dev->dev = base;
2905
2906 dev->ds = ds;
2907 dev->priv = priv;
2908 dev->ops = ops;
2909 ds->ops = &b53_switch_ops;
2910 ds->phylink_mac_ops = &b53_phylink_mac_ops;
2911 dev->vlan_enabled = true;
2912 dev->vlan_filtering = false;
2913 /* Let DSA handle the case were multiple bridges span the same switch
2914 * device and different VLAN awareness settings are requested, which
2915 * would be breaking filtering semantics for any of the other bridge
2916 * devices. (not hardware supported)
2917 */
2918 ds->vlan_filtering_is_global = true;
2919
2920 mutex_init(&dev->reg_mutex);
2921 mutex_init(&dev->stats_mutex);
2922 mutex_init(&dev->arl_mutex);
2923
2924 return dev;
2925 }
2926 EXPORT_SYMBOL(b53_switch_alloc);
2927
b53_switch_detect(struct b53_device * dev)2928 int b53_switch_detect(struct b53_device *dev)
2929 {
2930 u32 id32;
2931 u16 tmp;
2932 u8 id8;
2933 int ret;
2934
2935 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
2936 if (ret)
2937 return ret;
2938
2939 switch (id8) {
2940 case 0:
2941 /* BCM5325 and BCM5365 do not have this register so reads
2942 * return 0. But the read operation did succeed, so assume this
2943 * is one of them.
2944 *
2945 * Next check if we can write to the 5325's VTA register; for
2946 * 5365 it is read only.
2947 */
2948 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
2949 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
2950
2951 if (tmp == 0xf)
2952 dev->chip_id = BCM5325_DEVICE_ID;
2953 else
2954 dev->chip_id = BCM5365_DEVICE_ID;
2955 break;
2956 case BCM5389_DEVICE_ID:
2957 case BCM5395_DEVICE_ID:
2958 case BCM5397_DEVICE_ID:
2959 case BCM5398_DEVICE_ID:
2960 dev->chip_id = id8;
2961 break;
2962 default:
2963 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
2964 if (ret)
2965 return ret;
2966
2967 switch (id32) {
2968 case BCM53115_DEVICE_ID:
2969 case BCM53125_DEVICE_ID:
2970 case BCM53128_DEVICE_ID:
2971 case BCM53010_DEVICE_ID:
2972 case BCM53011_DEVICE_ID:
2973 case BCM53012_DEVICE_ID:
2974 case BCM53018_DEVICE_ID:
2975 case BCM53019_DEVICE_ID:
2976 case BCM53134_DEVICE_ID:
2977 dev->chip_id = id32;
2978 break;
2979 default:
2980 dev_err(dev->dev,
2981 "unsupported switch detected (BCM53%02x/BCM%x)\n",
2982 id8, id32);
2983 return -ENODEV;
2984 }
2985 }
2986
2987 if (dev->chip_id == BCM5325_DEVICE_ID)
2988 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
2989 &dev->core_rev);
2990 else
2991 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
2992 &dev->core_rev);
2993 }
2994 EXPORT_SYMBOL(b53_switch_detect);
2995
b53_switch_register(struct b53_device * dev)2996 int b53_switch_register(struct b53_device *dev)
2997 {
2998 int ret;
2999
3000 if (dev->pdata) {
3001 dev->chip_id = dev->pdata->chip_id;
3002 dev->enabled_ports = dev->pdata->enabled_ports;
3003 }
3004
3005 if (!dev->chip_id && b53_switch_detect(dev))
3006 return -EINVAL;
3007
3008 ret = b53_switch_init(dev);
3009 if (ret)
3010 return ret;
3011
3012 dev_info(dev->dev, "found switch: %s, rev %i\n",
3013 dev->name, dev->core_rev);
3014
3015 return dsa_register_switch(dev->ds);
3016 }
3017 EXPORT_SYMBOL(b53_switch_register);
3018
3019 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
3020 MODULE_DESCRIPTION("B53 switch library");
3021 MODULE_LICENSE("Dual BSD/GPL");
3022