1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
2 /*
3 * Copyright 2008 - 2015 Freescale Semiconductor Inc.
4 */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include "fman_dtsec.h"
9 #include "fman.h"
10 #include "mac.h"
11
12 #include <linux/slab.h>
13 #include <linux/bitrev.h>
14 #include <linux/io.h>
15 #include <linux/delay.h>
16 #include <linux/phy.h>
17 #include <linux/crc32.h>
18 #include <linux/of_mdio.h>
19 #include <linux/mii.h>
20
21 /* TBI register addresses */
22 #define MII_TBICON 0x11
23
24 /* TBICON register bit fields */
25 #define TBICON_SOFT_RESET 0x8000 /* Soft reset */
26 #define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */
27 #define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */
28 #define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */
29 #define TBICON_CLK_SELECT 0x0020 /* Clock select */
30 #define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
31
32 #define TBIANA_SGMII 0x4001
33 #define TBIANA_1000X 0x01a0
34
35 /* Interrupt Mask Register (IMASK) */
36 #define DTSEC_IMASK_BREN 0x80000000
37 #define DTSEC_IMASK_RXCEN 0x40000000
38 #define DTSEC_IMASK_MSROEN 0x04000000
39 #define DTSEC_IMASK_GTSCEN 0x02000000
40 #define DTSEC_IMASK_BTEN 0x01000000
41 #define DTSEC_IMASK_TXCEN 0x00800000
42 #define DTSEC_IMASK_TXEEN 0x00400000
43 #define DTSEC_IMASK_LCEN 0x00040000
44 #define DTSEC_IMASK_CRLEN 0x00020000
45 #define DTSEC_IMASK_XFUNEN 0x00010000
46 #define DTSEC_IMASK_ABRTEN 0x00008000
47 #define DTSEC_IMASK_IFERREN 0x00004000
48 #define DTSEC_IMASK_MAGEN 0x00000800
49 #define DTSEC_IMASK_MMRDEN 0x00000400
50 #define DTSEC_IMASK_MMWREN 0x00000200
51 #define DTSEC_IMASK_GRSCEN 0x00000100
52 #define DTSEC_IMASK_TDPEEN 0x00000002
53 #define DTSEC_IMASK_RDPEEN 0x00000001
54
55 #define DTSEC_EVENTS_MASK \
56 ((u32)(DTSEC_IMASK_BREN | \
57 DTSEC_IMASK_RXCEN | \
58 DTSEC_IMASK_BTEN | \
59 DTSEC_IMASK_TXCEN | \
60 DTSEC_IMASK_TXEEN | \
61 DTSEC_IMASK_ABRTEN | \
62 DTSEC_IMASK_LCEN | \
63 DTSEC_IMASK_CRLEN | \
64 DTSEC_IMASK_XFUNEN | \
65 DTSEC_IMASK_IFERREN | \
66 DTSEC_IMASK_MAGEN | \
67 DTSEC_IMASK_TDPEEN | \
68 DTSEC_IMASK_RDPEEN))
69
70 /* dtsec timestamp event bits */
71 #define TMR_PEMASK_TSREEN 0x00010000
72 #define TMR_PEVENT_TSRE 0x00010000
73
74 /* Group address bit indication */
75 #define MAC_GROUP_ADDRESS 0x0000010000000000ULL
76
77 /* Defaults */
78 #define DEFAULT_HALFDUP_RETRANSMIT 0xf
79 #define DEFAULT_HALFDUP_COLL_WINDOW 0x37
80 #define DEFAULT_TX_PAUSE_TIME 0xf000
81 #define DEFAULT_RX_PREPEND 0
82 #define DEFAULT_PREAMBLE_LEN 7
83 #define DEFAULT_TX_PAUSE_TIME_EXTD 0
84 #define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
85 #define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
86 #define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
87 #define DEFAULT_BACK_TO_BACK_IPG 0x60
88 #define DEFAULT_MAXIMUM_FRAME 0x600
89
90 /* register related defines (bits, field offsets..) */
91 #define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
92
93 #define DTSEC_ECNTRL_GMIIM 0x00000040
94 #define DTSEC_ECNTRL_TBIM 0x00000020
95 #define DTSEC_ECNTRL_SGMIIM 0x00000002
96 #define DTSEC_ECNTRL_RPM 0x00000010
97 #define DTSEC_ECNTRL_R100M 0x00000008
98 #define DTSEC_ECNTRL_QSGMIIM 0x00000001
99
100 #define TCTRL_TTSE 0x00000040
101 #define TCTRL_GTS 0x00000020
102
103 #define RCTRL_PAL_MASK 0x001f0000
104 #define RCTRL_PAL_SHIFT 16
105 #define RCTRL_GHTX 0x00000400
106 #define RCTRL_RTSE 0x00000040
107 #define RCTRL_GRS 0x00000020
108 #define RCTRL_MPROM 0x00000008
109 #define RCTRL_RSF 0x00000004
110 #define RCTRL_UPROM 0x00000001
111
112 #define MACCFG1_SOFT_RESET 0x80000000
113 #define MACCFG1_RX_FLOW 0x00000020
114 #define MACCFG1_TX_FLOW 0x00000010
115 #define MACCFG1_TX_EN 0x00000001
116 #define MACCFG1_RX_EN 0x00000004
117
118 #define MACCFG2_NIBBLE_MODE 0x00000100
119 #define MACCFG2_BYTE_MODE 0x00000200
120 #define MACCFG2_PAD_CRC_EN 0x00000004
121 #define MACCFG2_FULL_DUPLEX 0x00000001
122 #define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
123 #define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
124
125 #define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
126 #define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
127 #define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
128
129 #define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
130 #define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
131 #define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
132 #define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
133
134 #define HAFDUP_EXCESS_DEFER 0x00010000
135 #define HAFDUP_COLLISION_WINDOW 0x000003ff
136 #define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
137 #define HAFDUP_RETRANSMISSION_MAX 0x0000f000
138
139 #define NUM_OF_HASH_REGS 8 /* Number of hash table registers */
140
141 #define PTV_PTE_MASK 0xffff0000
142 #define PTV_PT_MASK 0x0000ffff
143 #define PTV_PTE_SHIFT 16
144
145 #define MAX_PACKET_ALIGNMENT 31
146 #define MAX_INTER_PACKET_GAP 0x7f
147 #define MAX_RETRANSMISSION 0x0f
148 #define MAX_COLLISION_WINDOW 0x03ff
149
150 /* Hash table size (32 bits*8 regs) */
151 #define DTSEC_HASH_TABLE_SIZE 256
152 /* Extended Hash table size (32 bits*16 regs) */
153 #define EXTENDED_HASH_TABLE_SIZE 512
154
155 /* dTSEC Memory Map registers */
156 struct dtsec_regs {
157 /* dTSEC General Control and Status Registers */
158 u32 tsec_id; /* 0x000 ETSEC_ID register */
159 u32 tsec_id2; /* 0x004 ETSEC_ID2 register */
160 u32 ievent; /* 0x008 Interrupt event register */
161 u32 imask; /* 0x00C Interrupt mask register */
162 u32 reserved0010[1];
163 u32 ecntrl; /* 0x014 E control register */
164 u32 ptv; /* 0x018 Pause time value register */
165 u32 tbipa; /* 0x01C TBI PHY address register */
166 u32 tmr_ctrl; /* 0x020 Time-stamp Control register */
167 u32 tmr_pevent; /* 0x024 Time-stamp event register */
168 u32 tmr_pemask; /* 0x028 Timer event mask register */
169 u32 reserved002c[5];
170 u32 tctrl; /* 0x040 Transmit control register */
171 u32 reserved0044[3];
172 u32 rctrl; /* 0x050 Receive control register */
173 u32 reserved0054[11];
174 u32 igaddr[8]; /* 0x080-0x09C Individual/group address */
175 u32 gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */
176 u32 reserved00c0[16];
177 u32 maccfg1; /* 0x100 MAC configuration #1 */
178 u32 maccfg2; /* 0x104 MAC configuration #2 */
179 u32 ipgifg; /* 0x108 IPG/IFG */
180 u32 hafdup; /* 0x10C Half-duplex */
181 u32 maxfrm; /* 0x110 Maximum frame */
182 u32 reserved0114[10];
183 u32 ifstat; /* 0x13C Interface status */
184 u32 macstnaddr1; /* 0x140 Station Address,part 1 */
185 u32 macstnaddr2; /* 0x144 Station Address,part 2 */
186 struct {
187 u32 exact_match1; /* octets 1-4 */
188 u32 exact_match2; /* octets 5-6 */
189 } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */
190 u32 reserved01c0[16];
191 u32 tr64; /* 0x200 Tx and Rx 64 byte frame counter */
192 u32 tr127; /* 0x204 Tx and Rx 65 to 127 byte frame counter */
193 u32 tr255; /* 0x208 Tx and Rx 128 to 255 byte frame counter */
194 u32 tr511; /* 0x20C Tx and Rx 256 to 511 byte frame counter */
195 u32 tr1k; /* 0x210 Tx and Rx 512 to 1023 byte frame counter */
196 u32 trmax; /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
197 u32 trmgv;
198 /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
199 u32 rbyt; /* 0x21C receive byte counter */
200 u32 rpkt; /* 0x220 receive packet counter */
201 u32 rfcs; /* 0x224 receive FCS error counter */
202 u32 rmca; /* 0x228 RMCA Rx multicast packet counter */
203 u32 rbca; /* 0x22C Rx broadcast packet counter */
204 u32 rxcf; /* 0x230 Rx control frame packet counter */
205 u32 rxpf; /* 0x234 Rx pause frame packet counter */
206 u32 rxuo; /* 0x238 Rx unknown OP code counter */
207 u32 raln; /* 0x23C Rx alignment error counter */
208 u32 rflr; /* 0x240 Rx frame length error counter */
209 u32 rcde; /* 0x244 Rx code error counter */
210 u32 rcse; /* 0x248 Rx carrier sense error counter */
211 u32 rund; /* 0x24C Rx undersize packet counter */
212 u32 rovr; /* 0x250 Rx oversize packet counter */
213 u32 rfrg; /* 0x254 Rx fragments counter */
214 u32 rjbr; /* 0x258 Rx jabber counter */
215 u32 rdrp; /* 0x25C Rx drop */
216 u32 tbyt; /* 0x260 Tx byte counter */
217 u32 tpkt; /* 0x264 Tx packet counter */
218 u32 tmca; /* 0x268 Tx multicast packet counter */
219 u32 tbca; /* 0x26C Tx broadcast packet counter */
220 u32 txpf; /* 0x270 Tx pause control frame counter */
221 u32 tdfr; /* 0x274 Tx deferral packet counter */
222 u32 tedf; /* 0x278 Tx excessive deferral packet counter */
223 u32 tscl; /* 0x27C Tx single collision packet counter */
224 u32 tmcl; /* 0x280 Tx multiple collision packet counter */
225 u32 tlcl; /* 0x284 Tx late collision packet counter */
226 u32 txcl; /* 0x288 Tx excessive collision packet counter */
227 u32 tncl; /* 0x28C Tx total collision counter */
228 u32 reserved0290[1];
229 u32 tdrp; /* 0x294 Tx drop frame counter */
230 u32 tjbr; /* 0x298 Tx jabber frame counter */
231 u32 tfcs; /* 0x29C Tx FCS error counter */
232 u32 txcf; /* 0x2A0 Tx control frame counter */
233 u32 tovr; /* 0x2A4 Tx oversize frame counter */
234 u32 tund; /* 0x2A8 Tx undersize frame counter */
235 u32 tfrg; /* 0x2AC Tx fragments frame counter */
236 u32 car1; /* 0x2B0 carry register one register* */
237 u32 car2; /* 0x2B4 carry register two register* */
238 u32 cam1; /* 0x2B8 carry register one mask register */
239 u32 cam2; /* 0x2BC carry register two mask register */
240 u32 reserved02c0[848];
241 };
242
243 /* struct dtsec_cfg - dTSEC configuration
244 * Transmit half-duplex flow control, under software control for 10/100-Mbps
245 * half-duplex media. If set, back pressure is applied to media by raising
246 * carrier.
247 * halfdup_retransmit:
248 * Number of retransmission attempts following a collision.
249 * If this is exceeded dTSEC aborts transmission due to excessive collisions.
250 * The standard specifies the attempt limit to be 15.
251 * halfdup_coll_window:
252 * The number of bytes of the frame during which collisions may occur.
253 * The default value of 55 corresponds to the frame byte at the end of the
254 * standard 512-bit slot time window. If collisions are detected after this
255 * byte, the late collision event is asserted and transmission of current
256 * frame is aborted.
257 * tx_pad_crc:
258 * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
259 * appends a CRC to every frame regardless of padding requirement.
260 * tx_pause_time:
261 * Transmit pause time value. This pause value is used as part of the pause
262 * frame to be sent when a transmit pause frame is initiated.
263 * If set to 0 this disables transmission of pause frames.
264 * preamble_len:
265 * Length, in bytes, of the preamble field preceding each Ethernet
266 * start-of-frame delimiter byte. The default value of 0x7 should be used in
267 * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
268 * rx_prepend:
269 * Packet alignment padding length. The specified number of bytes (1-31)
270 * of zero padding are inserted before the start of each received frame.
271 * For Ethernet, where optional preamble extraction is enabled, the padding
272 * appears before the preamble, otherwise the padding precedes the
273 * layer 2 header.
274 *
275 * This structure contains basic dTSEC configuration and must be passed to
276 * init() function. A default set of configuration values can be
277 * obtained by calling set_dflts().
278 */
279 struct dtsec_cfg {
280 u16 halfdup_retransmit;
281 u16 halfdup_coll_window;
282 bool tx_pad_crc;
283 u16 tx_pause_time;
284 bool ptp_tsu_en;
285 bool ptp_exception_en;
286 u32 preamble_len;
287 u32 rx_prepend;
288 u16 tx_pause_time_extd;
289 u16 maximum_frame;
290 u32 non_back_to_back_ipg1;
291 u32 non_back_to_back_ipg2;
292 u32 min_ifg_enforcement;
293 u32 back_to_back_ipg;
294 };
295
296 struct fman_mac {
297 /* pointer to dTSEC memory mapped registers */
298 struct dtsec_regs __iomem *regs;
299 /* MAC address of device */
300 u64 addr;
301 /* Ethernet physical interface */
302 phy_interface_t phy_if;
303 u16 max_speed;
304 struct mac_device *dev_id; /* device cookie used by the exception cbs */
305 fman_mac_exception_cb *exception_cb;
306 fman_mac_exception_cb *event_cb;
307 /* Number of individual addresses in registers for this station */
308 u8 num_of_ind_addr_in_regs;
309 /* pointer to driver's global address hash table */
310 struct eth_hash_t *multicast_addr_hash;
311 /* pointer to driver's individual address hash table */
312 struct eth_hash_t *unicast_addr_hash;
313 u8 mac_id;
314 u32 exceptions;
315 bool ptp_tsu_enabled;
316 bool en_tsu_err_exception;
317 struct dtsec_cfg *dtsec_drv_param;
318 void *fm;
319 struct fman_rev_info fm_rev_info;
320 bool basex_if;
321 struct phy_device *tbiphy;
322 };
323
set_dflts(struct dtsec_cfg * cfg)324 static void set_dflts(struct dtsec_cfg *cfg)
325 {
326 cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
327 cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
328 cfg->tx_pad_crc = true;
329 cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
330 /* PHY address 0 is reserved (DPAA RM) */
331 cfg->rx_prepend = DEFAULT_RX_PREPEND;
332 cfg->ptp_tsu_en = true;
333 cfg->ptp_exception_en = true;
334 cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
335 cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
336 cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
337 cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
338 cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
339 cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
340 cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
341 }
342
set_mac_address(struct dtsec_regs __iomem * regs,const u8 * adr)343 static void set_mac_address(struct dtsec_regs __iomem *regs, const u8 *adr)
344 {
345 u32 tmp;
346
347 tmp = (u32)((adr[5] << 24) |
348 (adr[4] << 16) | (adr[3] << 8) | adr[2]);
349 iowrite32be(tmp, ®s->macstnaddr1);
350
351 tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
352 iowrite32be(tmp, ®s->macstnaddr2);
353 }
354
init(struct dtsec_regs __iomem * regs,struct dtsec_cfg * cfg,phy_interface_t iface,u16 iface_speed,u64 addr,u32 exception_mask,u8 tbi_addr)355 static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
356 phy_interface_t iface, u16 iface_speed, u64 addr,
357 u32 exception_mask, u8 tbi_addr)
358 {
359 bool is_rgmii, is_sgmii, is_qsgmii;
360 enet_addr_t eth_addr;
361 u32 tmp;
362 int i;
363
364 /* Soft reset */
365 iowrite32be(MACCFG1_SOFT_RESET, ®s->maccfg1);
366 iowrite32be(0, ®s->maccfg1);
367
368 /* dtsec_id2 */
369 tmp = ioread32be(®s->tsec_id2);
370
371 /* check RGMII support */
372 if (iface == PHY_INTERFACE_MODE_RGMII ||
373 iface == PHY_INTERFACE_MODE_RGMII_ID ||
374 iface == PHY_INTERFACE_MODE_RGMII_RXID ||
375 iface == PHY_INTERFACE_MODE_RGMII_TXID ||
376 iface == PHY_INTERFACE_MODE_RMII)
377 if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
378 return -EINVAL;
379
380 if (iface == PHY_INTERFACE_MODE_SGMII ||
381 iface == PHY_INTERFACE_MODE_MII)
382 if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
383 return -EINVAL;
384
385 is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
386 iface == PHY_INTERFACE_MODE_RGMII_ID ||
387 iface == PHY_INTERFACE_MODE_RGMII_RXID ||
388 iface == PHY_INTERFACE_MODE_RGMII_TXID;
389 is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
390 is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
391
392 tmp = 0;
393 if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
394 tmp |= DTSEC_ECNTRL_GMIIM;
395 if (is_sgmii)
396 tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
397 if (is_qsgmii)
398 tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
399 DTSEC_ECNTRL_QSGMIIM);
400 if (is_rgmii)
401 tmp |= DTSEC_ECNTRL_RPM;
402 if (iface_speed == SPEED_100)
403 tmp |= DTSEC_ECNTRL_R100M;
404
405 iowrite32be(tmp, ®s->ecntrl);
406
407 tmp = 0;
408
409 if (cfg->tx_pause_time)
410 tmp |= cfg->tx_pause_time;
411 if (cfg->tx_pause_time_extd)
412 tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
413 iowrite32be(tmp, ®s->ptv);
414
415 tmp = 0;
416 tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
417 /* Accept short frames */
418 tmp |= RCTRL_RSF;
419
420 iowrite32be(tmp, ®s->rctrl);
421
422 /* Assign a Phy Address to the TBI (TBIPA).
423 * Done also in cases where TBI is not selected to avoid conflict with
424 * the external PHY's Physical address
425 */
426 iowrite32be(tbi_addr, ®s->tbipa);
427
428 iowrite32be(0, ®s->tmr_ctrl);
429
430 if (cfg->ptp_tsu_en) {
431 tmp = 0;
432 tmp |= TMR_PEVENT_TSRE;
433 iowrite32be(tmp, ®s->tmr_pevent);
434
435 if (cfg->ptp_exception_en) {
436 tmp = 0;
437 tmp |= TMR_PEMASK_TSREEN;
438 iowrite32be(tmp, ®s->tmr_pemask);
439 }
440 }
441
442 tmp = 0;
443 tmp |= MACCFG1_RX_FLOW;
444 tmp |= MACCFG1_TX_FLOW;
445 iowrite32be(tmp, ®s->maccfg1);
446
447 tmp = 0;
448
449 if (iface_speed < SPEED_1000)
450 tmp |= MACCFG2_NIBBLE_MODE;
451 else if (iface_speed == SPEED_1000)
452 tmp |= MACCFG2_BYTE_MODE;
453
454 tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
455 MACCFG2_PREAMBLE_LENGTH_MASK;
456 if (cfg->tx_pad_crc)
457 tmp |= MACCFG2_PAD_CRC_EN;
458 /* Full Duplex */
459 tmp |= MACCFG2_FULL_DUPLEX;
460 iowrite32be(tmp, ®s->maccfg2);
461
462 tmp = (((cfg->non_back_to_back_ipg1 <<
463 IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
464 & IPGIFG_NON_BACK_TO_BACK_IPG_1)
465 | ((cfg->non_back_to_back_ipg2 <<
466 IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
467 & IPGIFG_NON_BACK_TO_BACK_IPG_2)
468 | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
469 & IPGIFG_MIN_IFG_ENFORCEMENT)
470 | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
471 iowrite32be(tmp, ®s->ipgifg);
472
473 tmp = 0;
474 tmp |= HAFDUP_EXCESS_DEFER;
475 tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
476 & HAFDUP_RETRANSMISSION_MAX);
477 tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
478
479 iowrite32be(tmp, ®s->hafdup);
480
481 /* Initialize Maximum frame length */
482 iowrite32be(cfg->maximum_frame, ®s->maxfrm);
483
484 iowrite32be(0xffffffff, ®s->cam1);
485 iowrite32be(0xffffffff, ®s->cam2);
486
487 iowrite32be(exception_mask, ®s->imask);
488
489 iowrite32be(0xffffffff, ®s->ievent);
490
491 if (addr) {
492 MAKE_ENET_ADDR_FROM_UINT64(addr, eth_addr);
493 set_mac_address(regs, (const u8 *)eth_addr);
494 }
495
496 /* HASH */
497 for (i = 0; i < NUM_OF_HASH_REGS; i++) {
498 /* Initialize IADDRx */
499 iowrite32be(0, ®s->igaddr[i]);
500 /* Initialize GADDRx */
501 iowrite32be(0, ®s->gaddr[i]);
502 }
503
504 return 0;
505 }
506
set_bucket(struct dtsec_regs __iomem * regs,int bucket,bool enable)507 static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
508 bool enable)
509 {
510 int reg_idx = (bucket >> 5) & 0xf;
511 int bit_idx = bucket & 0x1f;
512 u32 bit_mask = 0x80000000 >> bit_idx;
513 u32 __iomem *reg;
514
515 if (reg_idx > 7)
516 reg = ®s->gaddr[reg_idx - 8];
517 else
518 reg = ®s->igaddr[reg_idx];
519
520 if (enable)
521 iowrite32be(ioread32be(reg) | bit_mask, reg);
522 else
523 iowrite32be(ioread32be(reg) & (~bit_mask), reg);
524 }
525
check_init_parameters(struct fman_mac * dtsec)526 static int check_init_parameters(struct fman_mac *dtsec)
527 {
528 if (dtsec->max_speed >= SPEED_10000) {
529 pr_err("1G MAC driver supports 1G or lower speeds\n");
530 return -EINVAL;
531 }
532 if ((dtsec->dtsec_drv_param)->rx_prepend >
533 MAX_PACKET_ALIGNMENT) {
534 pr_err("packetAlignmentPadding can't be > than %d\n",
535 MAX_PACKET_ALIGNMENT);
536 return -EINVAL;
537 }
538 if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
539 MAX_INTER_PACKET_GAP) ||
540 ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
541 MAX_INTER_PACKET_GAP) ||
542 ((dtsec->dtsec_drv_param)->back_to_back_ipg >
543 MAX_INTER_PACKET_GAP)) {
544 pr_err("Inter packet gap can't be greater than %d\n",
545 MAX_INTER_PACKET_GAP);
546 return -EINVAL;
547 }
548 if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
549 MAX_RETRANSMISSION) {
550 pr_err("maxRetransmission can't be greater than %d\n",
551 MAX_RETRANSMISSION);
552 return -EINVAL;
553 }
554 if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
555 MAX_COLLISION_WINDOW) {
556 pr_err("collisionWindow can't be greater than %d\n",
557 MAX_COLLISION_WINDOW);
558 return -EINVAL;
559 /* If Auto negotiation process is disabled, need to set up the PHY
560 * using the MII Management Interface
561 */
562 }
563 if (!dtsec->exception_cb) {
564 pr_err("uninitialized exception_cb\n");
565 return -EINVAL;
566 }
567 if (!dtsec->event_cb) {
568 pr_err("uninitialized event_cb\n");
569 return -EINVAL;
570 }
571
572 return 0;
573 }
574
get_exception_flag(enum fman_mac_exceptions exception)575 static int get_exception_flag(enum fman_mac_exceptions exception)
576 {
577 u32 bit_mask;
578
579 switch (exception) {
580 case FM_MAC_EX_1G_BAB_RX:
581 bit_mask = DTSEC_IMASK_BREN;
582 break;
583 case FM_MAC_EX_1G_RX_CTL:
584 bit_mask = DTSEC_IMASK_RXCEN;
585 break;
586 case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
587 bit_mask = DTSEC_IMASK_GTSCEN;
588 break;
589 case FM_MAC_EX_1G_BAB_TX:
590 bit_mask = DTSEC_IMASK_BTEN;
591 break;
592 case FM_MAC_EX_1G_TX_CTL:
593 bit_mask = DTSEC_IMASK_TXCEN;
594 break;
595 case FM_MAC_EX_1G_TX_ERR:
596 bit_mask = DTSEC_IMASK_TXEEN;
597 break;
598 case FM_MAC_EX_1G_LATE_COL:
599 bit_mask = DTSEC_IMASK_LCEN;
600 break;
601 case FM_MAC_EX_1G_COL_RET_LMT:
602 bit_mask = DTSEC_IMASK_CRLEN;
603 break;
604 case FM_MAC_EX_1G_TX_FIFO_UNDRN:
605 bit_mask = DTSEC_IMASK_XFUNEN;
606 break;
607 case FM_MAC_EX_1G_MAG_PCKT:
608 bit_mask = DTSEC_IMASK_MAGEN;
609 break;
610 case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
611 bit_mask = DTSEC_IMASK_MMRDEN;
612 break;
613 case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
614 bit_mask = DTSEC_IMASK_MMWREN;
615 break;
616 case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
617 bit_mask = DTSEC_IMASK_GRSCEN;
618 break;
619 case FM_MAC_EX_1G_DATA_ERR:
620 bit_mask = DTSEC_IMASK_TDPEEN;
621 break;
622 case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
623 bit_mask = DTSEC_IMASK_MSROEN;
624 break;
625 default:
626 bit_mask = 0;
627 break;
628 }
629
630 return bit_mask;
631 }
632
is_init_done(struct dtsec_cfg * dtsec_drv_params)633 static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
634 {
635 /* Checks if dTSEC driver parameters were initialized */
636 if (!dtsec_drv_params)
637 return true;
638
639 return false;
640 }
641
dtsec_get_max_frame_length(struct fman_mac * dtsec)642 static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
643 {
644 struct dtsec_regs __iomem *regs = dtsec->regs;
645
646 if (is_init_done(dtsec->dtsec_drv_param))
647 return 0;
648
649 return (u16)ioread32be(®s->maxfrm);
650 }
651
dtsec_isr(void * handle)652 static void dtsec_isr(void *handle)
653 {
654 struct fman_mac *dtsec = (struct fman_mac *)handle;
655 struct dtsec_regs __iomem *regs = dtsec->regs;
656 u32 event;
657
658 /* do not handle MDIO events */
659 event = ioread32be(®s->ievent) &
660 (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
661
662 event &= ioread32be(®s->imask);
663
664 iowrite32be(event, ®s->ievent);
665
666 if (event & DTSEC_IMASK_BREN)
667 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
668 if (event & DTSEC_IMASK_RXCEN)
669 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
670 if (event & DTSEC_IMASK_GTSCEN)
671 dtsec->exception_cb(dtsec->dev_id,
672 FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
673 if (event & DTSEC_IMASK_BTEN)
674 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
675 if (event & DTSEC_IMASK_TXCEN)
676 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
677 if (event & DTSEC_IMASK_TXEEN)
678 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
679 if (event & DTSEC_IMASK_LCEN)
680 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
681 if (event & DTSEC_IMASK_CRLEN)
682 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
683 if (event & DTSEC_IMASK_XFUNEN) {
684 /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
685 if (dtsec->fm_rev_info.major == 2) {
686 u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
687 /* a. Write 0x00E0_0C00 to DTSEC_ID
688 * This is a read only register
689 * b. Read and save the value of TPKT
690 */
691 tpkt1 = ioread32be(®s->tpkt);
692
693 /* c. Read the register at dTSEC address offset 0x32C */
694 tmp_reg1 = ioread32be(®s->reserved02c0[27]);
695
696 /* d. Compare bits [9:15] to bits [25:31] of the
697 * register at address offset 0x32C.
698 */
699 if ((tmp_reg1 & 0x007F0000) !=
700 (tmp_reg1 & 0x0000007F)) {
701 /* If they are not equal, save the value of
702 * this register and wait for at least
703 * MAXFRM*16 ns
704 */
705 usleep_range((u32)(min
706 (dtsec_get_max_frame_length(dtsec) *
707 16 / 1000, 1)), (u32)
708 (min(dtsec_get_max_frame_length
709 (dtsec) * 16 / 1000, 1) + 1));
710 }
711
712 /* e. Read and save TPKT again and read the register
713 * at dTSEC address offset 0x32C again
714 */
715 tpkt2 = ioread32be(®s->tpkt);
716 tmp_reg2 = ioread32be(®s->reserved02c0[27]);
717
718 /* f. Compare the value of TPKT saved in step b to
719 * value read in step e. Also compare bits [9:15] of
720 * the register at offset 0x32C saved in step d to the
721 * value of bits [9:15] saved in step e. If the two
722 * registers values are unchanged, then the transmit
723 * portion of the dTSEC controller is locked up and
724 * the user should proceed to the recover sequence.
725 */
726 if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
727 (tmp_reg2 & 0x007F0000))) {
728 /* recover sequence */
729
730 /* a.Write a 1 to RCTRL[GRS] */
731
732 iowrite32be(ioread32be(®s->rctrl) |
733 RCTRL_GRS, ®s->rctrl);
734
735 /* b.Wait until IEVENT[GRSC]=1, or at least
736 * 100 us has elapsed.
737 */
738 for (i = 0; i < 100; i++) {
739 if (ioread32be(®s->ievent) &
740 DTSEC_IMASK_GRSCEN)
741 break;
742 udelay(1);
743 }
744 if (ioread32be(®s->ievent) &
745 DTSEC_IMASK_GRSCEN)
746 iowrite32be(DTSEC_IMASK_GRSCEN,
747 ®s->ievent);
748 else
749 pr_debug("Rx lockup due to Tx lockup\n");
750
751 /* c.Write a 1 to bit n of FM_RSTC
752 * (offset 0x0CC of FPM)
753 */
754 fman_reset_mac(dtsec->fm, dtsec->mac_id);
755
756 /* d.Wait 4 Tx clocks (32 ns) */
757 udelay(1);
758
759 /* e.Write a 0 to bit n of FM_RSTC. */
760 /* cleared by FMAN
761 */
762 }
763 }
764
765 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
766 }
767 if (event & DTSEC_IMASK_MAGEN)
768 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
769 if (event & DTSEC_IMASK_GRSCEN)
770 dtsec->exception_cb(dtsec->dev_id,
771 FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
772 if (event & DTSEC_IMASK_TDPEEN)
773 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
774 if (event & DTSEC_IMASK_RDPEEN)
775 dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
776
777 /* masked interrupts */
778 WARN_ON(event & DTSEC_IMASK_ABRTEN);
779 WARN_ON(event & DTSEC_IMASK_IFERREN);
780 }
781
dtsec_1588_isr(void * handle)782 static void dtsec_1588_isr(void *handle)
783 {
784 struct fman_mac *dtsec = (struct fman_mac *)handle;
785 struct dtsec_regs __iomem *regs = dtsec->regs;
786 u32 event;
787
788 if (dtsec->ptp_tsu_enabled) {
789 event = ioread32be(®s->tmr_pevent);
790 event &= ioread32be(®s->tmr_pemask);
791
792 if (event) {
793 iowrite32be(event, ®s->tmr_pevent);
794 WARN_ON(event & TMR_PEVENT_TSRE);
795 dtsec->exception_cb(dtsec->dev_id,
796 FM_MAC_EX_1G_1588_TS_RX_ERR);
797 }
798 }
799 }
800
free_init_resources(struct fman_mac * dtsec)801 static void free_init_resources(struct fman_mac *dtsec)
802 {
803 fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
804 FMAN_INTR_TYPE_ERR);
805 fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
806 FMAN_INTR_TYPE_NORMAL);
807
808 /* release the driver's group hash table */
809 free_hash_table(dtsec->multicast_addr_hash);
810 dtsec->multicast_addr_hash = NULL;
811
812 /* release the driver's individual hash table */
813 free_hash_table(dtsec->unicast_addr_hash);
814 dtsec->unicast_addr_hash = NULL;
815 }
816
graceful_start(struct fman_mac * dtsec)817 static void graceful_start(struct fman_mac *dtsec)
818 {
819 struct dtsec_regs __iomem *regs = dtsec->regs;
820
821 iowrite32be(ioread32be(®s->tctrl) & ~TCTRL_GTS, ®s->tctrl);
822 iowrite32be(ioread32be(®s->rctrl) & ~RCTRL_GRS, ®s->rctrl);
823 }
824
graceful_stop(struct fman_mac * dtsec)825 static void graceful_stop(struct fman_mac *dtsec)
826 {
827 struct dtsec_regs __iomem *regs = dtsec->regs;
828 u32 tmp;
829
830 /* Graceful stop - Assert the graceful Rx stop bit */
831 tmp = ioread32be(®s->rctrl) | RCTRL_GRS;
832 iowrite32be(tmp, ®s->rctrl);
833
834 if (dtsec->fm_rev_info.major == 2) {
835 /* Workaround for dTSEC Errata A002 */
836 usleep_range(100, 200);
837 } else {
838 /* Workaround for dTSEC Errata A004839 */
839 usleep_range(10, 50);
840 }
841
842 /* Graceful stop - Assert the graceful Tx stop bit */
843 if (dtsec->fm_rev_info.major == 2) {
844 /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
845 pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
846 } else {
847 tmp = ioread32be(®s->tctrl) | TCTRL_GTS;
848 iowrite32be(tmp, ®s->tctrl);
849
850 /* Workaround for dTSEC Errata A0012, A0014 */
851 usleep_range(10, 50);
852 }
853 }
854
dtsec_enable(struct fman_mac * dtsec)855 static int dtsec_enable(struct fman_mac *dtsec)
856 {
857 struct dtsec_regs __iomem *regs = dtsec->regs;
858 u32 tmp;
859
860 if (!is_init_done(dtsec->dtsec_drv_param))
861 return -EINVAL;
862
863 /* Enable */
864 tmp = ioread32be(®s->maccfg1);
865 tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
866 iowrite32be(tmp, ®s->maccfg1);
867
868 /* Graceful start - clear the graceful Rx/Tx stop bit */
869 graceful_start(dtsec);
870
871 return 0;
872 }
873
dtsec_disable(struct fman_mac * dtsec)874 static void dtsec_disable(struct fman_mac *dtsec)
875 {
876 struct dtsec_regs __iomem *regs = dtsec->regs;
877 u32 tmp;
878
879 WARN_ON_ONCE(!is_init_done(dtsec->dtsec_drv_param));
880
881 /* Graceful stop - Assert the graceful Rx/Tx stop bit */
882 graceful_stop(dtsec);
883
884 tmp = ioread32be(®s->maccfg1);
885 tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
886 iowrite32be(tmp, ®s->maccfg1);
887 }
888
dtsec_set_tx_pause_frames(struct fman_mac * dtsec,u8 __maybe_unused priority,u16 pause_time,u16 __maybe_unused thresh_time)889 static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
890 u8 __maybe_unused priority,
891 u16 pause_time,
892 u16 __maybe_unused thresh_time)
893 {
894 struct dtsec_regs __iomem *regs = dtsec->regs;
895 u32 ptv = 0;
896
897 if (!is_init_done(dtsec->dtsec_drv_param))
898 return -EINVAL;
899
900 graceful_stop(dtsec);
901
902 if (pause_time) {
903 /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
904 if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
905 pr_warn("pause-time: %d illegal.Should be > 320\n",
906 pause_time);
907 return -EINVAL;
908 }
909
910 ptv = ioread32be(®s->ptv);
911 ptv &= PTV_PTE_MASK;
912 ptv |= pause_time & PTV_PT_MASK;
913 iowrite32be(ptv, ®s->ptv);
914
915 /* trigger the transmission of a flow-control pause frame */
916 iowrite32be(ioread32be(®s->maccfg1) | MACCFG1_TX_FLOW,
917 ®s->maccfg1);
918 } else
919 iowrite32be(ioread32be(®s->maccfg1) & ~MACCFG1_TX_FLOW,
920 ®s->maccfg1);
921
922 graceful_start(dtsec);
923
924 return 0;
925 }
926
dtsec_accept_rx_pause_frames(struct fman_mac * dtsec,bool en)927 static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
928 {
929 struct dtsec_regs __iomem *regs = dtsec->regs;
930 u32 tmp;
931
932 if (!is_init_done(dtsec->dtsec_drv_param))
933 return -EINVAL;
934
935 graceful_stop(dtsec);
936
937 tmp = ioread32be(®s->maccfg1);
938 if (en)
939 tmp |= MACCFG1_RX_FLOW;
940 else
941 tmp &= ~MACCFG1_RX_FLOW;
942 iowrite32be(tmp, ®s->maccfg1);
943
944 graceful_start(dtsec);
945
946 return 0;
947 }
948
dtsec_modify_mac_address(struct fman_mac * dtsec,const enet_addr_t * enet_addr)949 static int dtsec_modify_mac_address(struct fman_mac *dtsec,
950 const enet_addr_t *enet_addr)
951 {
952 if (!is_init_done(dtsec->dtsec_drv_param))
953 return -EINVAL;
954
955 graceful_stop(dtsec);
956
957 /* Initialize MAC Station Address registers (1 & 2)
958 * Station address have to be swapped (big endian to little endian
959 */
960 dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
961 set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
962
963 graceful_start(dtsec);
964
965 return 0;
966 }
967
dtsec_add_hash_mac_address(struct fman_mac * dtsec,enet_addr_t * eth_addr)968 static int dtsec_add_hash_mac_address(struct fman_mac *dtsec,
969 enet_addr_t *eth_addr)
970 {
971 struct dtsec_regs __iomem *regs = dtsec->regs;
972 struct eth_hash_entry *hash_entry;
973 u64 addr;
974 s32 bucket;
975 u32 crc = 0xFFFFFFFF;
976 bool mcast, ghtx;
977
978 if (!is_init_done(dtsec->dtsec_drv_param))
979 return -EINVAL;
980
981 addr = ENET_ADDR_TO_UINT64(*eth_addr);
982
983 ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false);
984 mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
985
986 /* Cannot handle unicast mac addr when GHTX is on */
987 if (ghtx && !mcast) {
988 pr_err("Could not compute hash bucket\n");
989 return -EINVAL;
990 }
991 crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
992 crc = bitrev32(crc);
993
994 /* considering the 9 highest order bits in crc H[8:0]:
995 *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
996 *and H[5:1] (next 5 bits) identify the hash bit
997 *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
998 *and H[4:0] (next 5 bits) identify the hash bit.
999 *
1000 *In bucket index output the low 5 bits identify the hash register
1001 *bit, while the higher 4 bits identify the hash register
1002 */
1003
1004 if (ghtx) {
1005 bucket = (s32)((crc >> 23) & 0x1ff);
1006 } else {
1007 bucket = (s32)((crc >> 24) & 0xff);
1008 /* if !ghtx and mcast the bit must be set in gaddr instead of
1009 *igaddr.
1010 */
1011 if (mcast)
1012 bucket += 0x100;
1013 }
1014
1015 set_bucket(dtsec->regs, bucket, true);
1016
1017 /* Create element to be added to the driver hash table */
1018 hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
1019 if (!hash_entry)
1020 return -ENOMEM;
1021 hash_entry->addr = addr;
1022 INIT_LIST_HEAD(&hash_entry->node);
1023
1024 if (addr & MAC_GROUP_ADDRESS)
1025 /* Group Address */
1026 list_add_tail(&hash_entry->node,
1027 &dtsec->multicast_addr_hash->lsts[bucket]);
1028 else
1029 list_add_tail(&hash_entry->node,
1030 &dtsec->unicast_addr_hash->lsts[bucket]);
1031
1032 return 0;
1033 }
1034
dtsec_set_allmulti(struct fman_mac * dtsec,bool enable)1035 static int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
1036 {
1037 u32 tmp;
1038 struct dtsec_regs __iomem *regs = dtsec->regs;
1039
1040 if (!is_init_done(dtsec->dtsec_drv_param))
1041 return -EINVAL;
1042
1043 tmp = ioread32be(®s->rctrl);
1044 if (enable)
1045 tmp |= RCTRL_MPROM;
1046 else
1047 tmp &= ~RCTRL_MPROM;
1048
1049 iowrite32be(tmp, ®s->rctrl);
1050
1051 return 0;
1052 }
1053
dtsec_set_tstamp(struct fman_mac * dtsec,bool enable)1054 static int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
1055 {
1056 struct dtsec_regs __iomem *regs = dtsec->regs;
1057 u32 rctrl, tctrl;
1058
1059 if (!is_init_done(dtsec->dtsec_drv_param))
1060 return -EINVAL;
1061
1062 rctrl = ioread32be(®s->rctrl);
1063 tctrl = ioread32be(®s->tctrl);
1064
1065 if (enable) {
1066 rctrl |= RCTRL_RTSE;
1067 tctrl |= TCTRL_TTSE;
1068 } else {
1069 rctrl &= ~RCTRL_RTSE;
1070 tctrl &= ~TCTRL_TTSE;
1071 }
1072
1073 iowrite32be(rctrl, ®s->rctrl);
1074 iowrite32be(tctrl, ®s->tctrl);
1075
1076 return 0;
1077 }
1078
dtsec_del_hash_mac_address(struct fman_mac * dtsec,enet_addr_t * eth_addr)1079 static int dtsec_del_hash_mac_address(struct fman_mac *dtsec,
1080 enet_addr_t *eth_addr)
1081 {
1082 struct dtsec_regs __iomem *regs = dtsec->regs;
1083 struct list_head *pos;
1084 struct eth_hash_entry *hash_entry = NULL;
1085 u64 addr;
1086 s32 bucket;
1087 u32 crc = 0xFFFFFFFF;
1088 bool mcast, ghtx;
1089
1090 if (!is_init_done(dtsec->dtsec_drv_param))
1091 return -EINVAL;
1092
1093 addr = ENET_ADDR_TO_UINT64(*eth_addr);
1094
1095 ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false);
1096 mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
1097
1098 /* Cannot handle unicast mac addr when GHTX is on */
1099 if (ghtx && !mcast) {
1100 pr_err("Could not compute hash bucket\n");
1101 return -EINVAL;
1102 }
1103 crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
1104 crc = bitrev32(crc);
1105
1106 if (ghtx) {
1107 bucket = (s32)((crc >> 23) & 0x1ff);
1108 } else {
1109 bucket = (s32)((crc >> 24) & 0xff);
1110 /* if !ghtx and mcast the bit must be set
1111 * in gaddr instead of igaddr.
1112 */
1113 if (mcast)
1114 bucket += 0x100;
1115 }
1116
1117 if (addr & MAC_GROUP_ADDRESS) {
1118 /* Group Address */
1119 list_for_each(pos,
1120 &dtsec->multicast_addr_hash->lsts[bucket]) {
1121 hash_entry = ETH_HASH_ENTRY_OBJ(pos);
1122 if (hash_entry && hash_entry->addr == addr) {
1123 list_del_init(&hash_entry->node);
1124 kfree(hash_entry);
1125 break;
1126 }
1127 }
1128 if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
1129 set_bucket(dtsec->regs, bucket, false);
1130 } else {
1131 /* Individual Address */
1132 list_for_each(pos,
1133 &dtsec->unicast_addr_hash->lsts[bucket]) {
1134 hash_entry = ETH_HASH_ENTRY_OBJ(pos);
1135 if (hash_entry && hash_entry->addr == addr) {
1136 list_del_init(&hash_entry->node);
1137 kfree(hash_entry);
1138 break;
1139 }
1140 }
1141 if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
1142 set_bucket(dtsec->regs, bucket, false);
1143 }
1144
1145 /* address does not exist */
1146 WARN_ON(!hash_entry);
1147
1148 return 0;
1149 }
1150
dtsec_set_promiscuous(struct fman_mac * dtsec,bool new_val)1151 static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
1152 {
1153 struct dtsec_regs __iomem *regs = dtsec->regs;
1154 u32 tmp;
1155
1156 if (!is_init_done(dtsec->dtsec_drv_param))
1157 return -EINVAL;
1158
1159 /* Set unicast promiscuous */
1160 tmp = ioread32be(®s->rctrl);
1161 if (new_val)
1162 tmp |= RCTRL_UPROM;
1163 else
1164 tmp &= ~RCTRL_UPROM;
1165
1166 iowrite32be(tmp, ®s->rctrl);
1167
1168 /* Set multicast promiscuous */
1169 tmp = ioread32be(®s->rctrl);
1170 if (new_val)
1171 tmp |= RCTRL_MPROM;
1172 else
1173 tmp &= ~RCTRL_MPROM;
1174
1175 iowrite32be(tmp, ®s->rctrl);
1176
1177 return 0;
1178 }
1179
dtsec_adjust_link(struct fman_mac * dtsec,u16 speed)1180 static int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
1181 {
1182 struct dtsec_regs __iomem *regs = dtsec->regs;
1183 u32 tmp;
1184
1185 if (!is_init_done(dtsec->dtsec_drv_param))
1186 return -EINVAL;
1187
1188 graceful_stop(dtsec);
1189
1190 tmp = ioread32be(®s->maccfg2);
1191
1192 /* Full Duplex */
1193 tmp |= MACCFG2_FULL_DUPLEX;
1194
1195 tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
1196 if (speed < SPEED_1000)
1197 tmp |= MACCFG2_NIBBLE_MODE;
1198 else if (speed == SPEED_1000)
1199 tmp |= MACCFG2_BYTE_MODE;
1200 iowrite32be(tmp, ®s->maccfg2);
1201
1202 tmp = ioread32be(®s->ecntrl);
1203 if (speed == SPEED_100)
1204 tmp |= DTSEC_ECNTRL_R100M;
1205 else
1206 tmp &= ~DTSEC_ECNTRL_R100M;
1207 iowrite32be(tmp, ®s->ecntrl);
1208
1209 graceful_start(dtsec);
1210
1211 return 0;
1212 }
1213
dtsec_restart_autoneg(struct fman_mac * dtsec)1214 static int dtsec_restart_autoneg(struct fman_mac *dtsec)
1215 {
1216 u16 tmp_reg16;
1217
1218 if (!is_init_done(dtsec->dtsec_drv_param))
1219 return -EINVAL;
1220
1221 tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
1222
1223 tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1224 tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
1225 BMCR_FULLDPLX | BMCR_SPEED1000);
1226
1227 phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
1228
1229 return 0;
1230 }
1231
adjust_link_dtsec(struct mac_device * mac_dev)1232 static void adjust_link_dtsec(struct mac_device *mac_dev)
1233 {
1234 struct phy_device *phy_dev = mac_dev->phy_dev;
1235 struct fman_mac *fman_mac;
1236 bool rx_pause, tx_pause;
1237 int err;
1238
1239 fman_mac = mac_dev->fman_mac;
1240 if (!phy_dev->link) {
1241 dtsec_restart_autoneg(fman_mac);
1242
1243 return;
1244 }
1245
1246 dtsec_adjust_link(fman_mac, phy_dev->speed);
1247 mac_dev->update_speed(mac_dev, phy_dev->speed);
1248 fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
1249 err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
1250 if (err < 0)
1251 dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
1252 err);
1253 }
1254
dtsec_set_exception(struct fman_mac * dtsec,enum fman_mac_exceptions exception,bool enable)1255 static int dtsec_set_exception(struct fman_mac *dtsec,
1256 enum fman_mac_exceptions exception, bool enable)
1257 {
1258 struct dtsec_regs __iomem *regs = dtsec->regs;
1259 u32 bit_mask = 0;
1260
1261 if (!is_init_done(dtsec->dtsec_drv_param))
1262 return -EINVAL;
1263
1264 if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
1265 bit_mask = get_exception_flag(exception);
1266 if (bit_mask) {
1267 if (enable)
1268 dtsec->exceptions |= bit_mask;
1269 else
1270 dtsec->exceptions &= ~bit_mask;
1271 } else {
1272 pr_err("Undefined exception\n");
1273 return -EINVAL;
1274 }
1275 if (enable)
1276 iowrite32be(ioread32be(®s->imask) | bit_mask,
1277 ®s->imask);
1278 else
1279 iowrite32be(ioread32be(®s->imask) & ~bit_mask,
1280 ®s->imask);
1281 } else {
1282 if (!dtsec->ptp_tsu_enabled) {
1283 pr_err("Exception valid for 1588 only\n");
1284 return -EINVAL;
1285 }
1286 switch (exception) {
1287 case FM_MAC_EX_1G_1588_TS_RX_ERR:
1288 if (enable) {
1289 dtsec->en_tsu_err_exception = true;
1290 iowrite32be(ioread32be(®s->tmr_pemask) |
1291 TMR_PEMASK_TSREEN,
1292 ®s->tmr_pemask);
1293 } else {
1294 dtsec->en_tsu_err_exception = false;
1295 iowrite32be(ioread32be(®s->tmr_pemask) &
1296 ~TMR_PEMASK_TSREEN,
1297 ®s->tmr_pemask);
1298 }
1299 break;
1300 default:
1301 pr_err("Undefined exception\n");
1302 return -EINVAL;
1303 }
1304 }
1305
1306 return 0;
1307 }
1308
dtsec_init(struct fman_mac * dtsec)1309 static int dtsec_init(struct fman_mac *dtsec)
1310 {
1311 struct dtsec_regs __iomem *regs = dtsec->regs;
1312 struct dtsec_cfg *dtsec_drv_param;
1313 u16 max_frm_ln;
1314 int err;
1315
1316 if (is_init_done(dtsec->dtsec_drv_param))
1317 return -EINVAL;
1318
1319 if (DEFAULT_RESET_ON_INIT &&
1320 (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
1321 pr_err("Can't reset MAC!\n");
1322 return -EINVAL;
1323 }
1324
1325 err = check_init_parameters(dtsec);
1326 if (err)
1327 return err;
1328
1329 dtsec_drv_param = dtsec->dtsec_drv_param;
1330
1331 err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
1332 dtsec->max_speed, dtsec->addr, dtsec->exceptions,
1333 dtsec->tbiphy->mdio.addr);
1334 if (err) {
1335 free_init_resources(dtsec);
1336 pr_err("DTSEC version doesn't support this i/f mode\n");
1337 return err;
1338 }
1339
1340 if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
1341 u16 tmp_reg16;
1342
1343 /* Configure the TBI PHY Control Register */
1344 tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
1345 phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
1346
1347 tmp_reg16 = TBICON_CLK_SELECT;
1348 phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
1349
1350 tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
1351 BMCR_FULLDPLX | BMCR_SPEED1000);
1352 phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
1353
1354 if (dtsec->basex_if)
1355 tmp_reg16 = TBIANA_1000X;
1356 else
1357 tmp_reg16 = TBIANA_SGMII;
1358 phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
1359
1360 tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
1361 BMCR_FULLDPLX | BMCR_SPEED1000);
1362
1363 phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
1364 }
1365
1366 /* Max Frame Length */
1367 max_frm_ln = (u16)ioread32be(®s->maxfrm);
1368 err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
1369 if (err) {
1370 pr_err("Setting max frame length failed\n");
1371 free_init_resources(dtsec);
1372 return -EINVAL;
1373 }
1374
1375 dtsec->multicast_addr_hash =
1376 alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
1377 if (!dtsec->multicast_addr_hash) {
1378 free_init_resources(dtsec);
1379 pr_err("MC hash table is failed\n");
1380 return -ENOMEM;
1381 }
1382
1383 dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
1384 if (!dtsec->unicast_addr_hash) {
1385 free_init_resources(dtsec);
1386 pr_err("UC hash table is failed\n");
1387 return -ENOMEM;
1388 }
1389
1390 /* register err intr handler for dtsec to FPM (err) */
1391 fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
1392 FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
1393 /* register 1588 intr handler for TMR to FPM (normal) */
1394 fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
1395 FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
1396
1397 kfree(dtsec_drv_param);
1398 dtsec->dtsec_drv_param = NULL;
1399
1400 return 0;
1401 }
1402
dtsec_free(struct fman_mac * dtsec)1403 static int dtsec_free(struct fman_mac *dtsec)
1404 {
1405 free_init_resources(dtsec);
1406
1407 kfree(dtsec->dtsec_drv_param);
1408 dtsec->dtsec_drv_param = NULL;
1409 kfree(dtsec);
1410
1411 return 0;
1412 }
1413
dtsec_config(struct mac_device * mac_dev,struct fman_mac_params * params)1414 static struct fman_mac *dtsec_config(struct mac_device *mac_dev,
1415 struct fman_mac_params *params)
1416 {
1417 struct fman_mac *dtsec;
1418 struct dtsec_cfg *dtsec_drv_param;
1419
1420 /* allocate memory for the UCC GETH data structure. */
1421 dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
1422 if (!dtsec)
1423 return NULL;
1424
1425 /* allocate memory for the d_tsec driver parameters data structure. */
1426 dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
1427 if (!dtsec_drv_param)
1428 goto err_dtsec;
1429
1430 /* Plant parameter structure pointer */
1431 dtsec->dtsec_drv_param = dtsec_drv_param;
1432
1433 set_dflts(dtsec_drv_param);
1434
1435 dtsec->regs = mac_dev->vaddr;
1436 dtsec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
1437 dtsec->max_speed = params->max_speed;
1438 dtsec->phy_if = mac_dev->phy_if;
1439 dtsec->mac_id = params->mac_id;
1440 dtsec->exceptions = (DTSEC_IMASK_BREN |
1441 DTSEC_IMASK_RXCEN |
1442 DTSEC_IMASK_BTEN |
1443 DTSEC_IMASK_TXCEN |
1444 DTSEC_IMASK_TXEEN |
1445 DTSEC_IMASK_ABRTEN |
1446 DTSEC_IMASK_LCEN |
1447 DTSEC_IMASK_CRLEN |
1448 DTSEC_IMASK_XFUNEN |
1449 DTSEC_IMASK_IFERREN |
1450 DTSEC_IMASK_MAGEN |
1451 DTSEC_IMASK_TDPEEN |
1452 DTSEC_IMASK_RDPEEN);
1453 dtsec->exception_cb = params->exception_cb;
1454 dtsec->event_cb = params->event_cb;
1455 dtsec->dev_id = mac_dev;
1456 dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
1457 dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
1458
1459 dtsec->fm = params->fm;
1460 dtsec->basex_if = params->basex_if;
1461
1462 /* Save FMan revision */
1463 fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
1464
1465 return dtsec;
1466
1467 err_dtsec:
1468 kfree(dtsec);
1469 return NULL;
1470 }
1471
dtsec_initialization(struct mac_device * mac_dev,struct device_node * mac_node,struct fman_mac_params * params)1472 int dtsec_initialization(struct mac_device *mac_dev,
1473 struct device_node *mac_node,
1474 struct fman_mac_params *params)
1475 {
1476 int err;
1477 struct fman_mac *dtsec;
1478 struct device_node *phy_node;
1479
1480 mac_dev->set_promisc = dtsec_set_promiscuous;
1481 mac_dev->change_addr = dtsec_modify_mac_address;
1482 mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
1483 mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
1484 mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
1485 mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
1486 mac_dev->set_exception = dtsec_set_exception;
1487 mac_dev->set_allmulti = dtsec_set_allmulti;
1488 mac_dev->set_tstamp = dtsec_set_tstamp;
1489 mac_dev->set_multi = fman_set_multi;
1490 mac_dev->adjust_link = adjust_link_dtsec;
1491 mac_dev->enable = dtsec_enable;
1492 mac_dev->disable = dtsec_disable;
1493
1494 mac_dev->fman_mac = dtsec_config(mac_dev, params);
1495 if (!mac_dev->fman_mac) {
1496 err = -EINVAL;
1497 goto _return;
1498 }
1499
1500 dtsec = mac_dev->fman_mac;
1501 dtsec->dtsec_drv_param->maximum_frame = fman_get_max_frm();
1502 dtsec->dtsec_drv_param->tx_pad_crc = true;
1503
1504 phy_node = of_parse_phandle(mac_node, "tbi-handle", 0);
1505 if (!phy_node) {
1506 pr_err("TBI PHY node is not available\n");
1507 err = -EINVAL;
1508 goto _return_fm_mac_free;
1509 }
1510
1511 dtsec->tbiphy = of_phy_find_device(phy_node);
1512 if (!dtsec->tbiphy) {
1513 pr_err("of_phy_find_device (TBI PHY) failed\n");
1514 err = -EINVAL;
1515 goto _return_fm_mac_free;
1516 }
1517 put_device(&dtsec->tbiphy->mdio.dev);
1518
1519 err = dtsec_init(dtsec);
1520 if (err < 0)
1521 goto _return_fm_mac_free;
1522
1523 /* For 1G MAC, disable by default the MIB counters overflow interrupt */
1524 err = dtsec_set_exception(dtsec, FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
1525 if (err < 0)
1526 goto _return_fm_mac_free;
1527
1528 dev_info(mac_dev->dev, "FMan dTSEC version: 0x%08x\n",
1529 ioread32be(&dtsec->regs->tsec_id));
1530
1531 goto _return;
1532
1533 _return_fm_mac_free:
1534 dtsec_free(dtsec);
1535
1536 _return:
1537 return err;
1538 }
1539