1 /*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
4 *
5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
10 * small packets.
11 *
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
14 *
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
17 *
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA.
20 *
21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
22 */
23
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/ptrace.h>
29 #include <linux/errno.h>
30 #include <linux/ioport.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/in.h>
38 #include <linux/ip.h>
39 #include <net/ip.h>
40 #include <net/tso.h>
41 #include <linux/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/icmp.h>
44 #include <linux/spinlock.h>
45 #include <linux/workqueue.h>
46 #include <linux/bitops.h>
47 #include <linux/io.h>
48 #include <linux/irq.h>
49 #include <linux/clk.h>
50 #include <linux/platform_device.h>
51 #include <linux/mdio.h>
52 #include <linux/phy.h>
53 #include <linux/fec.h>
54 #include <linux/of.h>
55 #include <linux/of_device.h>
56 #include <linux/of_gpio.h>
57 #include <linux/of_mdio.h>
58 #include <linux/of_net.h>
59 #include <linux/regulator/consumer.h>
60 #include <linux/if_vlan.h>
61 #include <linux/pinctrl/consumer.h>
62 #include <linux/prefetch.h>
63 #include <soc/imx/cpuidle.h>
64
65 #include <asm/cacheflush.h>
66
67 #include "fec.h"
68
69 static void set_multicast_list(struct net_device *ndev);
70 static void fec_enet_itr_coal_init(struct net_device *ndev);
71
72 #define DRIVER_NAME "fec"
73
74 #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
75
76 /* Pause frame feild and FIFO threshold */
77 #define FEC_ENET_FCE (1 << 5)
78 #define FEC_ENET_RSEM_V 0x84
79 #define FEC_ENET_RSFL_V 16
80 #define FEC_ENET_RAEM_V 0x8
81 #define FEC_ENET_RAFL_V 0x8
82 #define FEC_ENET_OPD_V 0xFFF0
83 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */
84
85 static struct platform_device_id fec_devtype[] = {
86 {
87 /* keep it for coldfire */
88 .name = DRIVER_NAME,
89 .driver_data = 0,
90 }, {
91 .name = "imx25-fec",
92 .driver_data = FEC_QUIRK_USE_GASKET,
93 }, {
94 .name = "imx27-fec",
95 .driver_data = 0,
96 }, {
97 .name = "imx28-fec",
98 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
99 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
100 }, {
101 .name = "imx6q-fec",
102 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
103 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
104 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
105 FEC_QUIRK_HAS_RACC,
106 }, {
107 .name = "mvf600-fec",
108 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
109 }, {
110 .name = "imx6sx-fec",
111 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
112 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
113 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
114 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
115 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
116 }, {
117 .name = "imx6ul-fec",
118 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
119 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
120 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
121 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
122 }, {
123 /* sentinel */
124 }
125 };
126 MODULE_DEVICE_TABLE(platform, fec_devtype);
127
128 enum imx_fec_type {
129 IMX25_FEC = 1, /* runs on i.mx25/50/53 */
130 IMX27_FEC, /* runs on i.mx27/35/51 */
131 IMX28_FEC,
132 IMX6Q_FEC,
133 MVF600_FEC,
134 IMX6SX_FEC,
135 IMX6UL_FEC,
136 };
137
138 static const struct of_device_id fec_dt_ids[] = {
139 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
140 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
141 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
142 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
143 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
144 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
145 { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
146 { /* sentinel */ }
147 };
148 MODULE_DEVICE_TABLE(of, fec_dt_ids);
149
150 static unsigned char macaddr[ETH_ALEN];
151 module_param_array(macaddr, byte, NULL, 0);
152 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
153
154 #if defined(CONFIG_M5272)
155 /*
156 * Some hardware gets it MAC address out of local flash memory.
157 * if this is non-zero then assume it is the address to get MAC from.
158 */
159 #if defined(CONFIG_NETtel)
160 #define FEC_FLASHMAC 0xf0006006
161 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
162 #define FEC_FLASHMAC 0xf0006000
163 #elif defined(CONFIG_CANCam)
164 #define FEC_FLASHMAC 0xf0020000
165 #elif defined (CONFIG_M5272C3)
166 #define FEC_FLASHMAC (0xffe04000 + 4)
167 #elif defined(CONFIG_MOD5272)
168 #define FEC_FLASHMAC 0xffc0406b
169 #else
170 #define FEC_FLASHMAC 0
171 #endif
172 #endif /* CONFIG_M5272 */
173
174 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
175 *
176 * 2048 byte skbufs are allocated. However, alignment requirements
177 * varies between FEC variants. Worst case is 64, so round down by 64.
178 */
179 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
180 #define PKT_MINBUF_SIZE 64
181
182 /* FEC receive acceleration */
183 #define FEC_RACC_IPDIS (1 << 1)
184 #define FEC_RACC_PRODIS (1 << 2)
185 #define FEC_RACC_SHIFT16 BIT(7)
186 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
187
188 /*
189 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
190 * size bits. Other FEC hardware does not, so we need to take that into
191 * account when setting it.
192 */
193 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
194 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
195 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
196 #else
197 #define OPT_FRAME_SIZE 0
198 #endif
199
200 /* FEC MII MMFR bits definition */
201 #define FEC_MMFR_ST (1 << 30)
202 #define FEC_MMFR_OP_READ (2 << 28)
203 #define FEC_MMFR_OP_WRITE (1 << 28)
204 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
205 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
206 #define FEC_MMFR_TA (2 << 16)
207 #define FEC_MMFR_DATA(v) (v & 0xffff)
208 /* FEC ECR bits definition */
209 #define FEC_ECR_MAGICEN (1 << 2)
210 #define FEC_ECR_SLEEP (1 << 3)
211
212 #define FEC_MII_TIMEOUT 30000 /* us */
213
214 /* Transmitter timeout */
215 #define TX_TIMEOUT (2 * HZ)
216
217 #define FEC_PAUSE_FLAG_AUTONEG 0x1
218 #define FEC_PAUSE_FLAG_ENABLE 0x2
219 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
220 #define FEC_WOL_FLAG_ENABLE (0x1 << 1)
221 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
222
223 #define COPYBREAK_DEFAULT 256
224
225 #define TSO_HEADER_SIZE 128
226 /* Max number of allowed TCP segments for software TSO */
227 #define FEC_MAX_TSO_SEGS 100
228 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
229
230 #define IS_TSO_HEADER(txq, addr) \
231 ((addr >= txq->tso_hdrs_dma) && \
232 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
233
234 static int mii_cnt;
235
fec_enet_get_nextdesc(struct bufdesc * bdp,struct bufdesc_prop * bd)236 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
237 struct bufdesc_prop *bd)
238 {
239 return (bdp >= bd->last) ? bd->base
240 : (struct bufdesc *)(((unsigned)bdp) + bd->dsize);
241 }
242
fec_enet_get_prevdesc(struct bufdesc * bdp,struct bufdesc_prop * bd)243 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
244 struct bufdesc_prop *bd)
245 {
246 return (bdp <= bd->base) ? bd->last
247 : (struct bufdesc *)(((unsigned)bdp) - bd->dsize);
248 }
249
fec_enet_get_bd_index(struct bufdesc * bdp,struct bufdesc_prop * bd)250 static int fec_enet_get_bd_index(struct bufdesc *bdp,
251 struct bufdesc_prop *bd)
252 {
253 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
254 }
255
fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q * txq)256 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
257 {
258 int entries;
259
260 entries = (((const char *)txq->dirty_tx -
261 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
262
263 return entries >= 0 ? entries : entries + txq->bd.ring_size;
264 }
265
swap_buffer(void * bufaddr,int len)266 static void swap_buffer(void *bufaddr, int len)
267 {
268 int i;
269 unsigned int *buf = bufaddr;
270
271 for (i = 0; i < len; i += 4, buf++)
272 swab32s(buf);
273 }
274
swap_buffer2(void * dst_buf,void * src_buf,int len)275 static void swap_buffer2(void *dst_buf, void *src_buf, int len)
276 {
277 int i;
278 unsigned int *src = src_buf;
279 unsigned int *dst = dst_buf;
280
281 for (i = 0; i < len; i += 4, src++, dst++)
282 *dst = swab32p(src);
283 }
284
fec_dump(struct net_device * ndev)285 static void fec_dump(struct net_device *ndev)
286 {
287 struct fec_enet_private *fep = netdev_priv(ndev);
288 struct bufdesc *bdp;
289 struct fec_enet_priv_tx_q *txq;
290 int index = 0;
291
292 netdev_info(ndev, "TX ring dump\n");
293 pr_info("Nr SC addr len SKB\n");
294
295 txq = fep->tx_queue[0];
296 bdp = txq->bd.base;
297
298 do {
299 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
300 index,
301 bdp == txq->bd.cur ? 'S' : ' ',
302 bdp == txq->dirty_tx ? 'H' : ' ',
303 fec16_to_cpu(bdp->cbd_sc),
304 fec32_to_cpu(bdp->cbd_bufaddr),
305 fec16_to_cpu(bdp->cbd_datlen),
306 txq->tx_skbuff[index]);
307 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
308 index++;
309 } while (bdp != txq->bd.base);
310 }
311
is_ipv4_pkt(struct sk_buff * skb)312 static inline bool is_ipv4_pkt(struct sk_buff *skb)
313 {
314 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
315 }
316
317 static int
fec_enet_clear_csum(struct sk_buff * skb,struct net_device * ndev)318 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
319 {
320 /* Only run for packets requiring a checksum. */
321 if (skb->ip_summed != CHECKSUM_PARTIAL)
322 return 0;
323
324 if (unlikely(skb_cow_head(skb, 0)))
325 return -1;
326
327 if (is_ipv4_pkt(skb))
328 ip_hdr(skb)->check = 0;
329 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
330
331 return 0;
332 }
333
334 static struct bufdesc *
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q * txq,struct sk_buff * skb,struct net_device * ndev)335 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
336 struct sk_buff *skb,
337 struct net_device *ndev)
338 {
339 struct fec_enet_private *fep = netdev_priv(ndev);
340 struct bufdesc *bdp = txq->bd.cur;
341 struct bufdesc_ex *ebdp;
342 int nr_frags = skb_shinfo(skb)->nr_frags;
343 int frag, frag_len;
344 unsigned short status;
345 unsigned int estatus = 0;
346 skb_frag_t *this_frag;
347 unsigned int index;
348 void *bufaddr;
349 dma_addr_t addr;
350 int i;
351
352 for (frag = 0; frag < nr_frags; frag++) {
353 this_frag = &skb_shinfo(skb)->frags[frag];
354 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
355 ebdp = (struct bufdesc_ex *)bdp;
356
357 status = fec16_to_cpu(bdp->cbd_sc);
358 status &= ~BD_ENET_TX_STATS;
359 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
360 frag_len = skb_shinfo(skb)->frags[frag].size;
361
362 /* Handle the last BD specially */
363 if (frag == nr_frags - 1) {
364 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
365 if (fep->bufdesc_ex) {
366 estatus |= BD_ENET_TX_INT;
367 if (unlikely(skb_shinfo(skb)->tx_flags &
368 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
369 estatus |= BD_ENET_TX_TS;
370 }
371 }
372
373 if (fep->bufdesc_ex) {
374 if (fep->quirks & FEC_QUIRK_HAS_AVB)
375 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
376 if (skb->ip_summed == CHECKSUM_PARTIAL)
377 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
378 ebdp->cbd_bdu = 0;
379 ebdp->cbd_esc = cpu_to_fec32(estatus);
380 }
381
382 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
383
384 index = fec_enet_get_bd_index(bdp, &txq->bd);
385 if (((unsigned long) bufaddr) & fep->tx_align ||
386 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
387 memcpy(txq->tx_bounce[index], bufaddr, frag_len);
388 bufaddr = txq->tx_bounce[index];
389
390 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
391 swap_buffer(bufaddr, frag_len);
392 }
393
394 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
395 DMA_TO_DEVICE);
396 if (dma_mapping_error(&fep->pdev->dev, addr)) {
397 if (net_ratelimit())
398 netdev_err(ndev, "Tx DMA memory map failed\n");
399 goto dma_mapping_error;
400 }
401
402 bdp->cbd_bufaddr = cpu_to_fec32(addr);
403 bdp->cbd_datlen = cpu_to_fec16(frag_len);
404 /* Make sure the updates to rest of the descriptor are
405 * performed before transferring ownership.
406 */
407 wmb();
408 bdp->cbd_sc = cpu_to_fec16(status);
409 }
410
411 return bdp;
412 dma_mapping_error:
413 bdp = txq->bd.cur;
414 for (i = 0; i < frag; i++) {
415 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
416 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
417 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
418 }
419 return ERR_PTR(-ENOMEM);
420 }
421
fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q * txq,struct sk_buff * skb,struct net_device * ndev)422 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
423 struct sk_buff *skb, struct net_device *ndev)
424 {
425 struct fec_enet_private *fep = netdev_priv(ndev);
426 int nr_frags = skb_shinfo(skb)->nr_frags;
427 struct bufdesc *bdp, *last_bdp;
428 void *bufaddr;
429 dma_addr_t addr;
430 unsigned short status;
431 unsigned short buflen;
432 unsigned int estatus = 0;
433 unsigned int index;
434 int entries_free;
435
436 entries_free = fec_enet_get_free_txdesc_num(txq);
437 if (entries_free < MAX_SKB_FRAGS + 1) {
438 dev_kfree_skb_any(skb);
439 if (net_ratelimit())
440 netdev_err(ndev, "NOT enough BD for SG!\n");
441 return NETDEV_TX_OK;
442 }
443
444 /* Protocol checksum off-load for TCP and UDP. */
445 if (fec_enet_clear_csum(skb, ndev)) {
446 dev_kfree_skb_any(skb);
447 return NETDEV_TX_OK;
448 }
449
450 /* Fill in a Tx ring entry */
451 bdp = txq->bd.cur;
452 last_bdp = bdp;
453 status = fec16_to_cpu(bdp->cbd_sc);
454 status &= ~BD_ENET_TX_STATS;
455
456 /* Set buffer length and buffer pointer */
457 bufaddr = skb->data;
458 buflen = skb_headlen(skb);
459
460 index = fec_enet_get_bd_index(bdp, &txq->bd);
461 if (((unsigned long) bufaddr) & fep->tx_align ||
462 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
463 memcpy(txq->tx_bounce[index], skb->data, buflen);
464 bufaddr = txq->tx_bounce[index];
465
466 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
467 swap_buffer(bufaddr, buflen);
468 }
469
470 /* Push the data cache so the CPM does not get stale memory data. */
471 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
472 if (dma_mapping_error(&fep->pdev->dev, addr)) {
473 dev_kfree_skb_any(skb);
474 if (net_ratelimit())
475 netdev_err(ndev, "Tx DMA memory map failed\n");
476 return NETDEV_TX_OK;
477 }
478
479 if (nr_frags) {
480 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
481 if (IS_ERR(last_bdp)) {
482 dma_unmap_single(&fep->pdev->dev, addr,
483 buflen, DMA_TO_DEVICE);
484 dev_kfree_skb_any(skb);
485 return NETDEV_TX_OK;
486 }
487 } else {
488 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
489 if (fep->bufdesc_ex) {
490 estatus = BD_ENET_TX_INT;
491 if (unlikely(skb_shinfo(skb)->tx_flags &
492 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
493 estatus |= BD_ENET_TX_TS;
494 }
495 }
496 bdp->cbd_bufaddr = cpu_to_fec32(addr);
497 bdp->cbd_datlen = cpu_to_fec16(buflen);
498
499 if (fep->bufdesc_ex) {
500
501 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
502
503 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
504 fep->hwts_tx_en))
505 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
506
507 if (fep->quirks & FEC_QUIRK_HAS_AVB)
508 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
509
510 if (skb->ip_summed == CHECKSUM_PARTIAL)
511 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
512
513 ebdp->cbd_bdu = 0;
514 ebdp->cbd_esc = cpu_to_fec32(estatus);
515 }
516
517 index = fec_enet_get_bd_index(last_bdp, &txq->bd);
518 /* Save skb pointer */
519 txq->tx_skbuff[index] = skb;
520
521 /* Make sure the updates to rest of the descriptor are performed before
522 * transferring ownership.
523 */
524 wmb();
525
526 /* Send it on its way. Tell FEC it's ready, interrupt when done,
527 * it's the last BD of the frame, and to put the CRC on the end.
528 */
529 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
530 bdp->cbd_sc = cpu_to_fec16(status);
531
532 /* If this was the last BD in the ring, start at the beginning again. */
533 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
534
535 skb_tx_timestamp(skb);
536
537 /* Make sure the update to bdp and tx_skbuff are performed before
538 * txq->bd.cur.
539 */
540 wmb();
541 txq->bd.cur = bdp;
542
543 /* Trigger transmission start */
544 writel(0, txq->bd.reg_desc_active);
545
546 return 0;
547 }
548
549 static int
fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q * txq,struct sk_buff * skb,struct net_device * ndev,struct bufdesc * bdp,int index,char * data,int size,bool last_tcp,bool is_last)550 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
551 struct net_device *ndev,
552 struct bufdesc *bdp, int index, char *data,
553 int size, bool last_tcp, bool is_last)
554 {
555 struct fec_enet_private *fep = netdev_priv(ndev);
556 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
557 unsigned short status;
558 unsigned int estatus = 0;
559 dma_addr_t addr;
560
561 status = fec16_to_cpu(bdp->cbd_sc);
562 status &= ~BD_ENET_TX_STATS;
563
564 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
565
566 if (((unsigned long) data) & fep->tx_align ||
567 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
568 memcpy(txq->tx_bounce[index], data, size);
569 data = txq->tx_bounce[index];
570
571 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
572 swap_buffer(data, size);
573 }
574
575 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
576 if (dma_mapping_error(&fep->pdev->dev, addr)) {
577 dev_kfree_skb_any(skb);
578 if (net_ratelimit())
579 netdev_err(ndev, "Tx DMA memory map failed\n");
580 return NETDEV_TX_BUSY;
581 }
582
583 bdp->cbd_datlen = cpu_to_fec16(size);
584 bdp->cbd_bufaddr = cpu_to_fec32(addr);
585
586 if (fep->bufdesc_ex) {
587 if (fep->quirks & FEC_QUIRK_HAS_AVB)
588 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
589 if (skb->ip_summed == CHECKSUM_PARTIAL)
590 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
591 ebdp->cbd_bdu = 0;
592 ebdp->cbd_esc = cpu_to_fec32(estatus);
593 }
594
595 /* Handle the last BD specially */
596 if (last_tcp)
597 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
598 if (is_last) {
599 status |= BD_ENET_TX_INTR;
600 if (fep->bufdesc_ex)
601 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
602 }
603
604 bdp->cbd_sc = cpu_to_fec16(status);
605
606 return 0;
607 }
608
609 static int
fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q * txq,struct sk_buff * skb,struct net_device * ndev,struct bufdesc * bdp,int index)610 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
611 struct sk_buff *skb, struct net_device *ndev,
612 struct bufdesc *bdp, int index)
613 {
614 struct fec_enet_private *fep = netdev_priv(ndev);
615 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
616 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
617 void *bufaddr;
618 unsigned long dmabuf;
619 unsigned short status;
620 unsigned int estatus = 0;
621
622 status = fec16_to_cpu(bdp->cbd_sc);
623 status &= ~BD_ENET_TX_STATS;
624 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
625
626 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
627 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
628 if (((unsigned long)bufaddr) & fep->tx_align ||
629 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
630 memcpy(txq->tx_bounce[index], skb->data, hdr_len);
631 bufaddr = txq->tx_bounce[index];
632
633 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
634 swap_buffer(bufaddr, hdr_len);
635
636 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
637 hdr_len, DMA_TO_DEVICE);
638 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
639 dev_kfree_skb_any(skb);
640 if (net_ratelimit())
641 netdev_err(ndev, "Tx DMA memory map failed\n");
642 return NETDEV_TX_BUSY;
643 }
644 }
645
646 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
647 bdp->cbd_datlen = cpu_to_fec16(hdr_len);
648
649 if (fep->bufdesc_ex) {
650 if (fep->quirks & FEC_QUIRK_HAS_AVB)
651 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
652 if (skb->ip_summed == CHECKSUM_PARTIAL)
653 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
654 ebdp->cbd_bdu = 0;
655 ebdp->cbd_esc = cpu_to_fec32(estatus);
656 }
657
658 bdp->cbd_sc = cpu_to_fec16(status);
659
660 return 0;
661 }
662
fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q * txq,struct sk_buff * skb,struct net_device * ndev)663 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
664 struct sk_buff *skb,
665 struct net_device *ndev)
666 {
667 struct fec_enet_private *fep = netdev_priv(ndev);
668 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
669 int total_len, data_left;
670 struct bufdesc *bdp = txq->bd.cur;
671 struct tso_t tso;
672 unsigned int index = 0;
673 int ret;
674
675 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
676 dev_kfree_skb_any(skb);
677 if (net_ratelimit())
678 netdev_err(ndev, "NOT enough BD for TSO!\n");
679 return NETDEV_TX_OK;
680 }
681
682 /* Protocol checksum off-load for TCP and UDP. */
683 if (fec_enet_clear_csum(skb, ndev)) {
684 dev_kfree_skb_any(skb);
685 return NETDEV_TX_OK;
686 }
687
688 /* Initialize the TSO handler, and prepare the first payload */
689 tso_start(skb, &tso);
690
691 total_len = skb->len - hdr_len;
692 while (total_len > 0) {
693 char *hdr;
694
695 index = fec_enet_get_bd_index(bdp, &txq->bd);
696 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
697 total_len -= data_left;
698
699 /* prepare packet headers: MAC + IP + TCP */
700 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
701 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
702 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
703 if (ret)
704 goto err_release;
705
706 while (data_left > 0) {
707 int size;
708
709 size = min_t(int, tso.size, data_left);
710 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
711 index = fec_enet_get_bd_index(bdp, &txq->bd);
712 ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
713 bdp, index,
714 tso.data, size,
715 size == data_left,
716 total_len == 0);
717 if (ret)
718 goto err_release;
719
720 data_left -= size;
721 tso_build_data(skb, &tso, size);
722 }
723
724 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
725 }
726
727 /* Save skb pointer */
728 txq->tx_skbuff[index] = skb;
729
730 skb_tx_timestamp(skb);
731 txq->bd.cur = bdp;
732
733 /* Trigger transmission start */
734 if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
735 !readl(txq->bd.reg_desc_active) ||
736 !readl(txq->bd.reg_desc_active) ||
737 !readl(txq->bd.reg_desc_active) ||
738 !readl(txq->bd.reg_desc_active))
739 writel(0, txq->bd.reg_desc_active);
740
741 return 0;
742
743 err_release:
744 /* TODO: Release all used data descriptors for TSO */
745 return ret;
746 }
747
748 static netdev_tx_t
fec_enet_start_xmit(struct sk_buff * skb,struct net_device * ndev)749 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
750 {
751 struct fec_enet_private *fep = netdev_priv(ndev);
752 int entries_free;
753 unsigned short queue;
754 struct fec_enet_priv_tx_q *txq;
755 struct netdev_queue *nq;
756 int ret;
757
758 queue = skb_get_queue_mapping(skb);
759 txq = fep->tx_queue[queue];
760 nq = netdev_get_tx_queue(ndev, queue);
761
762 if (skb_is_gso(skb))
763 ret = fec_enet_txq_submit_tso(txq, skb, ndev);
764 else
765 ret = fec_enet_txq_submit_skb(txq, skb, ndev);
766 if (ret)
767 return ret;
768
769 entries_free = fec_enet_get_free_txdesc_num(txq);
770 if (entries_free <= txq->tx_stop_threshold)
771 netif_tx_stop_queue(nq);
772
773 return NETDEV_TX_OK;
774 }
775
776 /* Init RX & TX buffer descriptors
777 */
fec_enet_bd_init(struct net_device * dev)778 static void fec_enet_bd_init(struct net_device *dev)
779 {
780 struct fec_enet_private *fep = netdev_priv(dev);
781 struct fec_enet_priv_tx_q *txq;
782 struct fec_enet_priv_rx_q *rxq;
783 struct bufdesc *bdp;
784 unsigned int i;
785 unsigned int q;
786
787 for (q = 0; q < fep->num_rx_queues; q++) {
788 /* Initialize the receive buffer descriptors. */
789 rxq = fep->rx_queue[q];
790 bdp = rxq->bd.base;
791
792 for (i = 0; i < rxq->bd.ring_size; i++) {
793
794 /* Initialize the BD for every fragment in the page. */
795 if (bdp->cbd_bufaddr)
796 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
797 else
798 bdp->cbd_sc = cpu_to_fec16(0);
799 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
800 }
801
802 /* Set the last buffer to wrap */
803 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
804 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
805
806 rxq->bd.cur = rxq->bd.base;
807 }
808
809 for (q = 0; q < fep->num_tx_queues; q++) {
810 /* ...and the same for transmit */
811 txq = fep->tx_queue[q];
812 bdp = txq->bd.base;
813 txq->bd.cur = bdp;
814
815 for (i = 0; i < txq->bd.ring_size; i++) {
816 /* Initialize the BD for every fragment in the page. */
817 bdp->cbd_sc = cpu_to_fec16(0);
818 if (bdp->cbd_bufaddr &&
819 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
820 dma_unmap_single(&fep->pdev->dev,
821 fec32_to_cpu(bdp->cbd_bufaddr),
822 fec16_to_cpu(bdp->cbd_datlen),
823 DMA_TO_DEVICE);
824 if (txq->tx_skbuff[i]) {
825 dev_kfree_skb_any(txq->tx_skbuff[i]);
826 txq->tx_skbuff[i] = NULL;
827 }
828 bdp->cbd_bufaddr = cpu_to_fec32(0);
829 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
830 }
831
832 /* Set the last buffer to wrap */
833 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
834 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
835 txq->dirty_tx = bdp;
836 }
837 }
838
fec_enet_active_rxring(struct net_device * ndev)839 static void fec_enet_active_rxring(struct net_device *ndev)
840 {
841 struct fec_enet_private *fep = netdev_priv(ndev);
842 int i;
843
844 for (i = 0; i < fep->num_rx_queues; i++)
845 writel(0, fep->rx_queue[i]->bd.reg_desc_active);
846 }
847
fec_enet_enable_ring(struct net_device * ndev)848 static void fec_enet_enable_ring(struct net_device *ndev)
849 {
850 struct fec_enet_private *fep = netdev_priv(ndev);
851 struct fec_enet_priv_tx_q *txq;
852 struct fec_enet_priv_rx_q *rxq;
853 int i;
854
855 for (i = 0; i < fep->num_rx_queues; i++) {
856 rxq = fep->rx_queue[i];
857 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
858 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
859
860 /* enable DMA1/2 */
861 if (i)
862 writel(RCMR_MATCHEN | RCMR_CMP(i),
863 fep->hwp + FEC_RCMR(i));
864 }
865
866 for (i = 0; i < fep->num_tx_queues; i++) {
867 txq = fep->tx_queue[i];
868 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
869
870 /* enable DMA1/2 */
871 if (i)
872 writel(DMA_CLASS_EN | IDLE_SLOPE(i),
873 fep->hwp + FEC_DMA_CFG(i));
874 }
875 }
876
fec_enet_reset_skb(struct net_device * ndev)877 static void fec_enet_reset_skb(struct net_device *ndev)
878 {
879 struct fec_enet_private *fep = netdev_priv(ndev);
880 struct fec_enet_priv_tx_q *txq;
881 int i, j;
882
883 for (i = 0; i < fep->num_tx_queues; i++) {
884 txq = fep->tx_queue[i];
885
886 for (j = 0; j < txq->bd.ring_size; j++) {
887 if (txq->tx_skbuff[j]) {
888 dev_kfree_skb_any(txq->tx_skbuff[j]);
889 txq->tx_skbuff[j] = NULL;
890 }
891 }
892 }
893 }
894
895 /*
896 * This function is called to start or restart the FEC during a link
897 * change, transmit timeout, or to reconfigure the FEC. The network
898 * packet processing for this device must be stopped before this call.
899 */
900 static void
fec_restart(struct net_device * ndev)901 fec_restart(struct net_device *ndev)
902 {
903 struct fec_enet_private *fep = netdev_priv(ndev);
904 u32 val;
905 u32 temp_mac[2];
906 u32 rcntl = OPT_FRAME_SIZE | 0x04;
907 u32 ecntl = 0x2; /* ETHEREN */
908
909 /* Whack a reset. We should wait for this.
910 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
911 * instead of reset MAC itself.
912 */
913 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
914 writel(0, fep->hwp + FEC_ECNTRL);
915 } else {
916 writel(1, fep->hwp + FEC_ECNTRL);
917 udelay(10);
918 }
919
920 /*
921 * enet-mac reset will reset mac address registers too,
922 * so need to reconfigure it.
923 */
924 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
925 writel((__force u32)cpu_to_be32(temp_mac[0]),
926 fep->hwp + FEC_ADDR_LOW);
927 writel((__force u32)cpu_to_be32(temp_mac[1]),
928 fep->hwp + FEC_ADDR_HIGH);
929
930 /* Clear any outstanding interrupt. */
931 writel(0xffffffff, fep->hwp + FEC_IEVENT);
932
933 fec_enet_bd_init(ndev);
934
935 fec_enet_enable_ring(ndev);
936
937 /* Reset tx SKB buffers. */
938 fec_enet_reset_skb(ndev);
939
940 /* Enable MII mode */
941 if (fep->full_duplex == DUPLEX_FULL) {
942 /* FD enable */
943 writel(0x04, fep->hwp + FEC_X_CNTRL);
944 } else {
945 /* No Rcv on Xmit */
946 rcntl |= 0x02;
947 writel(0x0, fep->hwp + FEC_X_CNTRL);
948 }
949
950 /* Set MII speed */
951 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
952
953 #if !defined(CONFIG_M5272)
954 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
955 val = readl(fep->hwp + FEC_RACC);
956 /* align IP header */
957 val |= FEC_RACC_SHIFT16;
958 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
959 /* set RX checksum */
960 val |= FEC_RACC_OPTIONS;
961 else
962 val &= ~FEC_RACC_OPTIONS;
963 writel(val, fep->hwp + FEC_RACC);
964 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
965 }
966 #endif
967
968 /*
969 * The phy interface and speed need to get configured
970 * differently on enet-mac.
971 */
972 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
973 /* Enable flow control and length check */
974 rcntl |= 0x40000000 | 0x00000020;
975
976 /* RGMII, RMII or MII */
977 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
978 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
979 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
980 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
981 rcntl |= (1 << 6);
982 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
983 rcntl |= (1 << 8);
984 else
985 rcntl &= ~(1 << 8);
986
987 /* 1G, 100M or 10M */
988 if (ndev->phydev) {
989 if (ndev->phydev->speed == SPEED_1000)
990 ecntl |= (1 << 5);
991 else if (ndev->phydev->speed == SPEED_100)
992 rcntl &= ~(1 << 9);
993 else
994 rcntl |= (1 << 9);
995 }
996 } else {
997 #ifdef FEC_MIIGSK_ENR
998 if (fep->quirks & FEC_QUIRK_USE_GASKET) {
999 u32 cfgr;
1000 /* disable the gasket and wait */
1001 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1002 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1003 udelay(1);
1004
1005 /*
1006 * configure the gasket:
1007 * RMII, 50 MHz, no loopback, no echo
1008 * MII, 25 MHz, no loopback, no echo
1009 */
1010 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1011 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1012 if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1013 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1014 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1015
1016 /* re-enable the gasket */
1017 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1018 }
1019 #endif
1020 }
1021
1022 #if !defined(CONFIG_M5272)
1023 /* enable pause frame*/
1024 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1025 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1026 ndev->phydev && ndev->phydev->pause)) {
1027 rcntl |= FEC_ENET_FCE;
1028
1029 /* set FIFO threshold parameter to reduce overrun */
1030 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1031 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1032 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1033 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1034
1035 /* OPD */
1036 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1037 } else {
1038 rcntl &= ~FEC_ENET_FCE;
1039 }
1040 #endif /* !defined(CONFIG_M5272) */
1041
1042 writel(rcntl, fep->hwp + FEC_R_CNTRL);
1043
1044 /* Setup multicast filter. */
1045 set_multicast_list(ndev);
1046 #ifndef CONFIG_M5272
1047 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1048 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1049 #endif
1050
1051 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1052 /* enable ENET endian swap */
1053 ecntl |= (1 << 8);
1054 /* enable ENET store and forward mode */
1055 writel(1 << 8, fep->hwp + FEC_X_WMRK);
1056 }
1057
1058 if (fep->bufdesc_ex)
1059 ecntl |= (1 << 4);
1060
1061 #ifndef CONFIG_M5272
1062 /* Enable the MIB statistic event counters */
1063 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1064 #endif
1065
1066 /* And last, enable the transmit and receive processing */
1067 writel(ecntl, fep->hwp + FEC_ECNTRL);
1068 fec_enet_active_rxring(ndev);
1069
1070 if (fep->bufdesc_ex)
1071 fec_ptp_start_cyclecounter(ndev);
1072
1073 /* Enable interrupts we wish to service */
1074 if (fep->link)
1075 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1076 else
1077 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1078
1079 /* Init the interrupt coalescing */
1080 fec_enet_itr_coal_init(ndev);
1081
1082 }
1083
1084 static void
fec_stop(struct net_device * ndev)1085 fec_stop(struct net_device *ndev)
1086 {
1087 struct fec_enet_private *fep = netdev_priv(ndev);
1088 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1089 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1090 u32 val;
1091
1092 /* We cannot expect a graceful transmit stop without link !!! */
1093 if (fep->link) {
1094 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1095 udelay(10);
1096 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1097 netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1098 }
1099
1100 /* Whack a reset. We should wait for this.
1101 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1102 * instead of reset MAC itself.
1103 */
1104 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1105 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
1106 writel(0, fep->hwp + FEC_ECNTRL);
1107 } else {
1108 writel(1, fep->hwp + FEC_ECNTRL);
1109 udelay(10);
1110 }
1111 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1112 } else {
1113 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1114 val = readl(fep->hwp + FEC_ECNTRL);
1115 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1116 writel(val, fep->hwp + FEC_ECNTRL);
1117
1118 if (pdata && pdata->sleep_mode_enable)
1119 pdata->sleep_mode_enable(true);
1120 }
1121 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1122
1123 /* We have to keep ENET enabled to have MII interrupt stay working */
1124 if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1125 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1126 writel(2, fep->hwp + FEC_ECNTRL);
1127 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1128 }
1129 }
1130
1131
1132 static void
fec_timeout(struct net_device * ndev)1133 fec_timeout(struct net_device *ndev)
1134 {
1135 struct fec_enet_private *fep = netdev_priv(ndev);
1136
1137 fec_dump(ndev);
1138
1139 ndev->stats.tx_errors++;
1140
1141 schedule_work(&fep->tx_timeout_work);
1142 }
1143
fec_enet_timeout_work(struct work_struct * work)1144 static void fec_enet_timeout_work(struct work_struct *work)
1145 {
1146 struct fec_enet_private *fep =
1147 container_of(work, struct fec_enet_private, tx_timeout_work);
1148 struct net_device *ndev = fep->netdev;
1149
1150 rtnl_lock();
1151 if (netif_device_present(ndev) || netif_running(ndev)) {
1152 napi_disable(&fep->napi);
1153 netif_tx_lock_bh(ndev);
1154 fec_restart(ndev);
1155 netif_wake_queue(ndev);
1156 netif_tx_unlock_bh(ndev);
1157 napi_enable(&fep->napi);
1158 }
1159 rtnl_unlock();
1160 }
1161
1162 static void
fec_enet_hwtstamp(struct fec_enet_private * fep,unsigned ts,struct skb_shared_hwtstamps * hwtstamps)1163 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1164 struct skb_shared_hwtstamps *hwtstamps)
1165 {
1166 unsigned long flags;
1167 u64 ns;
1168
1169 spin_lock_irqsave(&fep->tmreg_lock, flags);
1170 ns = timecounter_cyc2time(&fep->tc, ts);
1171 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1172
1173 memset(hwtstamps, 0, sizeof(*hwtstamps));
1174 hwtstamps->hwtstamp = ns_to_ktime(ns);
1175 }
1176
1177 static void
fec_enet_tx_queue(struct net_device * ndev,u16 queue_id)1178 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1179 {
1180 struct fec_enet_private *fep;
1181 struct bufdesc *bdp;
1182 unsigned short status;
1183 struct sk_buff *skb;
1184 struct fec_enet_priv_tx_q *txq;
1185 struct netdev_queue *nq;
1186 int index = 0;
1187 int entries_free;
1188
1189 fep = netdev_priv(ndev);
1190
1191 queue_id = FEC_ENET_GET_QUQUE(queue_id);
1192
1193 txq = fep->tx_queue[queue_id];
1194 /* get next bdp of dirty_tx */
1195 nq = netdev_get_tx_queue(ndev, queue_id);
1196 bdp = txq->dirty_tx;
1197
1198 /* get next bdp of dirty_tx */
1199 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1200
1201 while (bdp != READ_ONCE(txq->bd.cur)) {
1202 /* Order the load of bd.cur and cbd_sc */
1203 rmb();
1204 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1205 if (status & BD_ENET_TX_READY)
1206 break;
1207
1208 index = fec_enet_get_bd_index(bdp, &txq->bd);
1209
1210 skb = txq->tx_skbuff[index];
1211 txq->tx_skbuff[index] = NULL;
1212 if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1213 dma_unmap_single(&fep->pdev->dev,
1214 fec32_to_cpu(bdp->cbd_bufaddr),
1215 fec16_to_cpu(bdp->cbd_datlen),
1216 DMA_TO_DEVICE);
1217 bdp->cbd_bufaddr = cpu_to_fec32(0);
1218 if (!skb)
1219 goto skb_done;
1220
1221 /* Check for errors. */
1222 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1223 BD_ENET_TX_RL | BD_ENET_TX_UN |
1224 BD_ENET_TX_CSL)) {
1225 ndev->stats.tx_errors++;
1226 if (status & BD_ENET_TX_HB) /* No heartbeat */
1227 ndev->stats.tx_heartbeat_errors++;
1228 if (status & BD_ENET_TX_LC) /* Late collision */
1229 ndev->stats.tx_window_errors++;
1230 if (status & BD_ENET_TX_RL) /* Retrans limit */
1231 ndev->stats.tx_aborted_errors++;
1232 if (status & BD_ENET_TX_UN) /* Underrun */
1233 ndev->stats.tx_fifo_errors++;
1234 if (status & BD_ENET_TX_CSL) /* Carrier lost */
1235 ndev->stats.tx_carrier_errors++;
1236 } else {
1237 ndev->stats.tx_packets++;
1238 ndev->stats.tx_bytes += skb->len;
1239 }
1240
1241 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
1242 fep->bufdesc_ex) {
1243 struct skb_shared_hwtstamps shhwtstamps;
1244 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1245
1246 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1247 skb_tstamp_tx(skb, &shhwtstamps);
1248 }
1249
1250 /* Deferred means some collisions occurred during transmit,
1251 * but we eventually sent the packet OK.
1252 */
1253 if (status & BD_ENET_TX_DEF)
1254 ndev->stats.collisions++;
1255
1256 /* Free the sk buffer associated with this last transmit */
1257 dev_kfree_skb_any(skb);
1258 skb_done:
1259 /* Make sure the update to bdp and tx_skbuff are performed
1260 * before dirty_tx
1261 */
1262 wmb();
1263 txq->dirty_tx = bdp;
1264
1265 /* Update pointer to next buffer descriptor to be transmitted */
1266 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1267
1268 /* Since we have freed up a buffer, the ring is no longer full
1269 */
1270 if (netif_queue_stopped(ndev)) {
1271 entries_free = fec_enet_get_free_txdesc_num(txq);
1272 if (entries_free >= txq->tx_wake_threshold)
1273 netif_tx_wake_queue(nq);
1274 }
1275 }
1276
1277 /* ERR006538: Keep the transmitter going */
1278 if (bdp != txq->bd.cur &&
1279 readl(txq->bd.reg_desc_active) == 0)
1280 writel(0, txq->bd.reg_desc_active);
1281 }
1282
1283 static void
fec_enet_tx(struct net_device * ndev)1284 fec_enet_tx(struct net_device *ndev)
1285 {
1286 struct fec_enet_private *fep = netdev_priv(ndev);
1287 u16 queue_id;
1288 /* First process class A queue, then Class B and Best Effort queue */
1289 for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
1290 clear_bit(queue_id, &fep->work_tx);
1291 fec_enet_tx_queue(ndev, queue_id);
1292 }
1293 return;
1294 }
1295
1296 static int
fec_enet_new_rxbdp(struct net_device * ndev,struct bufdesc * bdp,struct sk_buff * skb)1297 fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
1298 {
1299 struct fec_enet_private *fep = netdev_priv(ndev);
1300 int off;
1301
1302 off = ((unsigned long)skb->data) & fep->rx_align;
1303 if (off)
1304 skb_reserve(skb, fep->rx_align + 1 - off);
1305
1306 bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
1307 if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1308 if (net_ratelimit())
1309 netdev_err(ndev, "Rx DMA memory map failed\n");
1310 return -ENOMEM;
1311 }
1312
1313 return 0;
1314 }
1315
fec_enet_copybreak(struct net_device * ndev,struct sk_buff ** skb,struct bufdesc * bdp,u32 length,bool swap)1316 static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1317 struct bufdesc *bdp, u32 length, bool swap)
1318 {
1319 struct fec_enet_private *fep = netdev_priv(ndev);
1320 struct sk_buff *new_skb;
1321
1322 if (length > fep->rx_copybreak)
1323 return false;
1324
1325 new_skb = netdev_alloc_skb(ndev, length);
1326 if (!new_skb)
1327 return false;
1328
1329 dma_sync_single_for_cpu(&fep->pdev->dev,
1330 fec32_to_cpu(bdp->cbd_bufaddr),
1331 FEC_ENET_RX_FRSIZE - fep->rx_align,
1332 DMA_FROM_DEVICE);
1333 if (!swap)
1334 memcpy(new_skb->data, (*skb)->data, length);
1335 else
1336 swap_buffer2(new_skb->data, (*skb)->data, length);
1337 *skb = new_skb;
1338
1339 return true;
1340 }
1341
1342 /* During a receive, the bd_rx.cur points to the current incoming buffer.
1343 * When we update through the ring, if the next incoming buffer has
1344 * not been given to the system, we just set the empty indicator,
1345 * effectively tossing the packet.
1346 */
1347 static int
fec_enet_rx_queue(struct net_device * ndev,int budget,u16 queue_id)1348 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1349 {
1350 struct fec_enet_private *fep = netdev_priv(ndev);
1351 struct fec_enet_priv_rx_q *rxq;
1352 struct bufdesc *bdp;
1353 unsigned short status;
1354 struct sk_buff *skb_new = NULL;
1355 struct sk_buff *skb;
1356 ushort pkt_len;
1357 __u8 *data;
1358 int pkt_received = 0;
1359 struct bufdesc_ex *ebdp = NULL;
1360 bool vlan_packet_rcvd = false;
1361 u16 vlan_tag;
1362 int index = 0;
1363 bool is_copybreak;
1364 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1365
1366 #ifdef CONFIG_M532x
1367 flush_cache_all();
1368 #endif
1369 queue_id = FEC_ENET_GET_QUQUE(queue_id);
1370 rxq = fep->rx_queue[queue_id];
1371
1372 /* First, grab all of the stats for the incoming packet.
1373 * These get messed up if we get called due to a busy condition.
1374 */
1375 bdp = rxq->bd.cur;
1376
1377 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1378
1379 if (pkt_received >= budget)
1380 break;
1381 pkt_received++;
1382
1383 writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
1384
1385 /* Check for errors. */
1386 status ^= BD_ENET_RX_LAST;
1387 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1388 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
1389 BD_ENET_RX_CL)) {
1390 ndev->stats.rx_errors++;
1391 if (status & BD_ENET_RX_OV) {
1392 /* FIFO overrun */
1393 ndev->stats.rx_fifo_errors++;
1394 goto rx_processing_done;
1395 }
1396 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
1397 | BD_ENET_RX_LAST)) {
1398 /* Frame too long or too short. */
1399 ndev->stats.rx_length_errors++;
1400 if (status & BD_ENET_RX_LAST)
1401 netdev_err(ndev, "rcv is not +last\n");
1402 }
1403 if (status & BD_ENET_RX_CR) /* CRC Error */
1404 ndev->stats.rx_crc_errors++;
1405 /* Report late collisions as a frame error. */
1406 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
1407 ndev->stats.rx_frame_errors++;
1408 goto rx_processing_done;
1409 }
1410
1411 /* Process the incoming frame. */
1412 ndev->stats.rx_packets++;
1413 pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1414 ndev->stats.rx_bytes += pkt_len;
1415
1416 index = fec_enet_get_bd_index(bdp, &rxq->bd);
1417 skb = rxq->rx_skbuff[index];
1418
1419 /* The packet length includes FCS, but we don't want to
1420 * include that when passing upstream as it messes up
1421 * bridging applications.
1422 */
1423 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
1424 need_swap);
1425 if (!is_copybreak) {
1426 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1427 if (unlikely(!skb_new)) {
1428 ndev->stats.rx_dropped++;
1429 goto rx_processing_done;
1430 }
1431 dma_unmap_single(&fep->pdev->dev,
1432 fec32_to_cpu(bdp->cbd_bufaddr),
1433 FEC_ENET_RX_FRSIZE - fep->rx_align,
1434 DMA_FROM_DEVICE);
1435 }
1436
1437 prefetch(skb->data - NET_IP_ALIGN);
1438 skb_put(skb, pkt_len - 4);
1439 data = skb->data;
1440
1441 if (!is_copybreak && need_swap)
1442 swap_buffer(data, pkt_len);
1443
1444 #if !defined(CONFIG_M5272)
1445 if (fep->quirks & FEC_QUIRK_HAS_RACC)
1446 data = skb_pull_inline(skb, 2);
1447 #endif
1448
1449 /* Extract the enhanced buffer descriptor */
1450 ebdp = NULL;
1451 if (fep->bufdesc_ex)
1452 ebdp = (struct bufdesc_ex *)bdp;
1453
1454 /* If this is a VLAN packet remove the VLAN Tag */
1455 vlan_packet_rcvd = false;
1456 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1457 fep->bufdesc_ex &&
1458 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1459 /* Push and remove the vlan tag */
1460 struct vlan_hdr *vlan_header =
1461 (struct vlan_hdr *) (data + ETH_HLEN);
1462 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1463
1464 vlan_packet_rcvd = true;
1465
1466 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1467 skb_pull(skb, VLAN_HLEN);
1468 }
1469
1470 skb->protocol = eth_type_trans(skb, ndev);
1471
1472 /* Get receive timestamp from the skb */
1473 if (fep->hwts_rx_en && fep->bufdesc_ex)
1474 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1475 skb_hwtstamps(skb));
1476
1477 if (fep->bufdesc_ex &&
1478 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1479 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1480 /* don't check it */
1481 skb->ip_summed = CHECKSUM_UNNECESSARY;
1482 } else {
1483 skb_checksum_none_assert(skb);
1484 }
1485 }
1486
1487 /* Handle received VLAN packets */
1488 if (vlan_packet_rcvd)
1489 __vlan_hwaccel_put_tag(skb,
1490 htons(ETH_P_8021Q),
1491 vlan_tag);
1492
1493 napi_gro_receive(&fep->napi, skb);
1494
1495 if (is_copybreak) {
1496 dma_sync_single_for_device(&fep->pdev->dev,
1497 fec32_to_cpu(bdp->cbd_bufaddr),
1498 FEC_ENET_RX_FRSIZE - fep->rx_align,
1499 DMA_FROM_DEVICE);
1500 } else {
1501 rxq->rx_skbuff[index] = skb_new;
1502 fec_enet_new_rxbdp(ndev, bdp, skb_new);
1503 }
1504
1505 rx_processing_done:
1506 /* Clear the status flags for this buffer */
1507 status &= ~BD_ENET_RX_STATS;
1508
1509 /* Mark the buffer empty */
1510 status |= BD_ENET_RX_EMPTY;
1511
1512 if (fep->bufdesc_ex) {
1513 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1514
1515 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1516 ebdp->cbd_prot = 0;
1517 ebdp->cbd_bdu = 0;
1518 }
1519 /* Make sure the updates to rest of the descriptor are
1520 * performed before transferring ownership.
1521 */
1522 wmb();
1523 bdp->cbd_sc = cpu_to_fec16(status);
1524
1525 /* Update BD pointer to next entry */
1526 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1527
1528 /* Doing this here will keep the FEC running while we process
1529 * incoming frames. On a heavily loaded network, we should be
1530 * able to keep up at the expense of system resources.
1531 */
1532 writel(0, rxq->bd.reg_desc_active);
1533 }
1534 rxq->bd.cur = bdp;
1535 return pkt_received;
1536 }
1537
1538 static int
fec_enet_rx(struct net_device * ndev,int budget)1539 fec_enet_rx(struct net_device *ndev, int budget)
1540 {
1541 int pkt_received = 0;
1542 u16 queue_id;
1543 struct fec_enet_private *fep = netdev_priv(ndev);
1544
1545 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
1546 int ret;
1547
1548 ret = fec_enet_rx_queue(ndev,
1549 budget - pkt_received, queue_id);
1550
1551 if (ret < budget - pkt_received)
1552 clear_bit(queue_id, &fep->work_rx);
1553
1554 pkt_received += ret;
1555 }
1556 return pkt_received;
1557 }
1558
1559 static bool
fec_enet_collect_events(struct fec_enet_private * fep,uint int_events)1560 fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
1561 {
1562 if (int_events == 0)
1563 return false;
1564
1565 if (int_events & FEC_ENET_RXF)
1566 fep->work_rx |= (1 << 2);
1567 if (int_events & FEC_ENET_RXF_1)
1568 fep->work_rx |= (1 << 0);
1569 if (int_events & FEC_ENET_RXF_2)
1570 fep->work_rx |= (1 << 1);
1571
1572 if (int_events & FEC_ENET_TXF)
1573 fep->work_tx |= (1 << 2);
1574 if (int_events & FEC_ENET_TXF_1)
1575 fep->work_tx |= (1 << 0);
1576 if (int_events & FEC_ENET_TXF_2)
1577 fep->work_tx |= (1 << 1);
1578
1579 return true;
1580 }
1581
1582 static irqreturn_t
fec_enet_interrupt(int irq,void * dev_id)1583 fec_enet_interrupt(int irq, void *dev_id)
1584 {
1585 struct net_device *ndev = dev_id;
1586 struct fec_enet_private *fep = netdev_priv(ndev);
1587 uint int_events;
1588 irqreturn_t ret = IRQ_NONE;
1589
1590 int_events = readl(fep->hwp + FEC_IEVENT);
1591 writel(int_events, fep->hwp + FEC_IEVENT);
1592 fec_enet_collect_events(fep, int_events);
1593
1594 if ((fep->work_tx || fep->work_rx) && fep->link) {
1595 ret = IRQ_HANDLED;
1596
1597 if (napi_schedule_prep(&fep->napi)) {
1598 /* Disable the NAPI interrupts */
1599 writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
1600 __napi_schedule(&fep->napi);
1601 }
1602 }
1603
1604 if (int_events & FEC_ENET_MII) {
1605 ret = IRQ_HANDLED;
1606 complete(&fep->mdio_done);
1607 }
1608
1609 if (fep->ptp_clock)
1610 fec_ptp_check_pps_event(fep);
1611
1612 return ret;
1613 }
1614
fec_enet_rx_napi(struct napi_struct * napi,int budget)1615 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1616 {
1617 struct net_device *ndev = napi->dev;
1618 struct fec_enet_private *fep = netdev_priv(ndev);
1619 int pkts;
1620
1621 pkts = fec_enet_rx(ndev, budget);
1622
1623 fec_enet_tx(ndev);
1624
1625 if (pkts < budget) {
1626 napi_complete(napi);
1627 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1628 }
1629 return pkts;
1630 }
1631
1632 /* ------------------------------------------------------------------------- */
fec_get_mac(struct net_device * ndev)1633 static void fec_get_mac(struct net_device *ndev)
1634 {
1635 struct fec_enet_private *fep = netdev_priv(ndev);
1636 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1637 unsigned char *iap, tmpaddr[ETH_ALEN];
1638
1639 /*
1640 * try to get mac address in following order:
1641 *
1642 * 1) module parameter via kernel command line in form
1643 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1644 */
1645 iap = macaddr;
1646
1647 /*
1648 * 2) from device tree data
1649 */
1650 if (!is_valid_ether_addr(iap)) {
1651 struct device_node *np = fep->pdev->dev.of_node;
1652 if (np) {
1653 const char *mac = of_get_mac_address(np);
1654 if (mac)
1655 iap = (unsigned char *) mac;
1656 }
1657 }
1658
1659 /*
1660 * 3) from flash or fuse (via platform data)
1661 */
1662 if (!is_valid_ether_addr(iap)) {
1663 #ifdef CONFIG_M5272
1664 if (FEC_FLASHMAC)
1665 iap = (unsigned char *)FEC_FLASHMAC;
1666 #else
1667 if (pdata)
1668 iap = (unsigned char *)&pdata->mac;
1669 #endif
1670 }
1671
1672 /*
1673 * 4) FEC mac registers set by bootloader
1674 */
1675 if (!is_valid_ether_addr(iap)) {
1676 *((__be32 *) &tmpaddr[0]) =
1677 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1678 *((__be16 *) &tmpaddr[4]) =
1679 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1680 iap = &tmpaddr[0];
1681 }
1682
1683 /*
1684 * 5) random mac address
1685 */
1686 if (!is_valid_ether_addr(iap)) {
1687 /* Report it and use a random ethernet address instead */
1688 netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
1689 eth_hw_addr_random(ndev);
1690 netdev_info(ndev, "Using random MAC address: %pM\n",
1691 ndev->dev_addr);
1692 return;
1693 }
1694
1695 memcpy(ndev->dev_addr, iap, ETH_ALEN);
1696
1697 /* Adjust MAC if using macaddr */
1698 if (iap == macaddr)
1699 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
1700 }
1701
1702 /* ------------------------------------------------------------------------- */
1703
1704 /*
1705 * Phy section
1706 */
fec_enet_adjust_link(struct net_device * ndev)1707 static void fec_enet_adjust_link(struct net_device *ndev)
1708 {
1709 struct fec_enet_private *fep = netdev_priv(ndev);
1710 struct phy_device *phy_dev = ndev->phydev;
1711 int status_change = 0;
1712
1713 /* Prevent a state halted on mii error */
1714 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
1715 phy_dev->state = PHY_RESUMING;
1716 return;
1717 }
1718
1719 /*
1720 * If the netdev is down, or is going down, we're not interested
1721 * in link state events, so just mark our idea of the link as down
1722 * and ignore the event.
1723 */
1724 if (!netif_running(ndev) || !netif_device_present(ndev)) {
1725 fep->link = 0;
1726 } else if (phy_dev->link) {
1727 if (!fep->link) {
1728 fep->link = phy_dev->link;
1729 status_change = 1;
1730 }
1731
1732 if (fep->full_duplex != phy_dev->duplex) {
1733 fep->full_duplex = phy_dev->duplex;
1734 status_change = 1;
1735 }
1736
1737 if (phy_dev->speed != fep->speed) {
1738 fep->speed = phy_dev->speed;
1739 status_change = 1;
1740 }
1741
1742 /* if any of the above changed restart the FEC */
1743 if (status_change) {
1744 napi_disable(&fep->napi);
1745 netif_tx_lock_bh(ndev);
1746 fec_restart(ndev);
1747 netif_wake_queue(ndev);
1748 netif_tx_unlock_bh(ndev);
1749 napi_enable(&fep->napi);
1750 }
1751 } else {
1752 if (fep->link) {
1753 napi_disable(&fep->napi);
1754 netif_tx_lock_bh(ndev);
1755 fec_stop(ndev);
1756 netif_tx_unlock_bh(ndev);
1757 napi_enable(&fep->napi);
1758 fep->link = phy_dev->link;
1759 status_change = 1;
1760 }
1761 }
1762
1763 if (status_change)
1764 phy_print_status(phy_dev);
1765 }
1766
fec_enet_mdio_read(struct mii_bus * bus,int mii_id,int regnum)1767 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1768 {
1769 struct fec_enet_private *fep = bus->priv;
1770 struct device *dev = &fep->pdev->dev;
1771 unsigned long time_left;
1772 int ret = 0;
1773
1774 ret = pm_runtime_get_sync(dev);
1775 if (ret < 0)
1776 return ret;
1777
1778 fep->mii_timeout = 0;
1779 reinit_completion(&fep->mdio_done);
1780
1781 /* start a read op */
1782 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
1783 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1784 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
1785
1786 /* wait for end of transfer */
1787 time_left = wait_for_completion_timeout(&fep->mdio_done,
1788 usecs_to_jiffies(FEC_MII_TIMEOUT));
1789 if (time_left == 0) {
1790 fep->mii_timeout = 1;
1791 netdev_err(fep->netdev, "MDIO read timeout\n");
1792 ret = -ETIMEDOUT;
1793 goto out;
1794 }
1795
1796 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1797
1798 out:
1799 pm_runtime_mark_last_busy(dev);
1800 pm_runtime_put_autosuspend(dev);
1801
1802 return ret;
1803 }
1804
fec_enet_mdio_write(struct mii_bus * bus,int mii_id,int regnum,u16 value)1805 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1806 u16 value)
1807 {
1808 struct fec_enet_private *fep = bus->priv;
1809 struct device *dev = &fep->pdev->dev;
1810 unsigned long time_left;
1811 int ret;
1812
1813 ret = pm_runtime_get_sync(dev);
1814 if (ret < 0)
1815 return ret;
1816 else
1817 ret = 0;
1818
1819 fep->mii_timeout = 0;
1820 reinit_completion(&fep->mdio_done);
1821
1822 /* start a write op */
1823 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1824 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1825 FEC_MMFR_TA | FEC_MMFR_DATA(value),
1826 fep->hwp + FEC_MII_DATA);
1827
1828 /* wait for end of transfer */
1829 time_left = wait_for_completion_timeout(&fep->mdio_done,
1830 usecs_to_jiffies(FEC_MII_TIMEOUT));
1831 if (time_left == 0) {
1832 fep->mii_timeout = 1;
1833 netdev_err(fep->netdev, "MDIO write timeout\n");
1834 ret = -ETIMEDOUT;
1835 }
1836
1837 pm_runtime_mark_last_busy(dev);
1838 pm_runtime_put_autosuspend(dev);
1839
1840 return ret;
1841 }
1842
fec_enet_clk_enable(struct net_device * ndev,bool enable)1843 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1844 {
1845 struct fec_enet_private *fep = netdev_priv(ndev);
1846 int ret;
1847
1848 if (enable) {
1849 ret = clk_prepare_enable(fep->clk_ahb);
1850 if (ret)
1851 return ret;
1852 if (fep->clk_enet_out) {
1853 ret = clk_prepare_enable(fep->clk_enet_out);
1854 if (ret)
1855 goto failed_clk_enet_out;
1856 }
1857 if (fep->clk_ptp) {
1858 mutex_lock(&fep->ptp_clk_mutex);
1859 ret = clk_prepare_enable(fep->clk_ptp);
1860 if (ret) {
1861 mutex_unlock(&fep->ptp_clk_mutex);
1862 goto failed_clk_ptp;
1863 } else {
1864 fep->ptp_clk_on = true;
1865 }
1866 mutex_unlock(&fep->ptp_clk_mutex);
1867 }
1868 if (fep->clk_ref) {
1869 ret = clk_prepare_enable(fep->clk_ref);
1870 if (ret)
1871 goto failed_clk_ref;
1872 }
1873 } else {
1874 clk_disable_unprepare(fep->clk_ahb);
1875 if (fep->clk_enet_out)
1876 clk_disable_unprepare(fep->clk_enet_out);
1877 if (fep->clk_ptp) {
1878 mutex_lock(&fep->ptp_clk_mutex);
1879 clk_disable_unprepare(fep->clk_ptp);
1880 fep->ptp_clk_on = false;
1881 mutex_unlock(&fep->ptp_clk_mutex);
1882 }
1883 if (fep->clk_ref)
1884 clk_disable_unprepare(fep->clk_ref);
1885 }
1886
1887 return 0;
1888
1889 failed_clk_ref:
1890 if (fep->clk_ref)
1891 clk_disable_unprepare(fep->clk_ref);
1892 failed_clk_ptp:
1893 if (fep->clk_enet_out)
1894 clk_disable_unprepare(fep->clk_enet_out);
1895 failed_clk_enet_out:
1896 clk_disable_unprepare(fep->clk_ahb);
1897
1898 return ret;
1899 }
1900
fec_enet_mii_probe(struct net_device * ndev)1901 static int fec_enet_mii_probe(struct net_device *ndev)
1902 {
1903 struct fec_enet_private *fep = netdev_priv(ndev);
1904 struct phy_device *phy_dev = NULL;
1905 char mdio_bus_id[MII_BUS_ID_SIZE];
1906 char phy_name[MII_BUS_ID_SIZE + 3];
1907 int phy_id;
1908 int dev_id = fep->dev_id;
1909
1910 if (fep->phy_node) {
1911 phy_dev = of_phy_connect(ndev, fep->phy_node,
1912 &fec_enet_adjust_link, 0,
1913 fep->phy_interface);
1914 if (!phy_dev)
1915 return -ENODEV;
1916 } else {
1917 /* check for attached phy */
1918 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1919 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
1920 continue;
1921 if (dev_id--)
1922 continue;
1923 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
1924 break;
1925 }
1926
1927 if (phy_id >= PHY_MAX_ADDR) {
1928 netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
1929 strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
1930 phy_id = 0;
1931 }
1932
1933 snprintf(phy_name, sizeof(phy_name),
1934 PHY_ID_FMT, mdio_bus_id, phy_id);
1935 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
1936 fep->phy_interface);
1937 }
1938
1939 if (IS_ERR(phy_dev)) {
1940 netdev_err(ndev, "could not attach to PHY\n");
1941 return PTR_ERR(phy_dev);
1942 }
1943
1944 /* mask with MAC supported features */
1945 if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
1946 phy_dev->supported &= PHY_GBIT_FEATURES;
1947 phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
1948 #if !defined(CONFIG_M5272)
1949 phy_dev->supported |= SUPPORTED_Pause;
1950 #endif
1951 }
1952 else
1953 phy_dev->supported &= PHY_BASIC_FEATURES;
1954
1955 phy_dev->advertising = phy_dev->supported;
1956
1957 fep->link = 0;
1958 fep->full_duplex = 0;
1959
1960 phy_attached_info(phy_dev);
1961
1962 return 0;
1963 }
1964
fec_enet_mii_init(struct platform_device * pdev)1965 static int fec_enet_mii_init(struct platform_device *pdev)
1966 {
1967 static struct mii_bus *fec0_mii_bus;
1968 struct net_device *ndev = platform_get_drvdata(pdev);
1969 struct fec_enet_private *fep = netdev_priv(ndev);
1970 struct device_node *node;
1971 int err = -ENXIO;
1972 u32 mii_speed, holdtime;
1973
1974 /*
1975 * The i.MX28 dual fec interfaces are not equal.
1976 * Here are the differences:
1977 *
1978 * - fec0 supports MII & RMII modes while fec1 only supports RMII
1979 * - fec0 acts as the 1588 time master while fec1 is slave
1980 * - external phys can only be configured by fec0
1981 *
1982 * That is to say fec1 can not work independently. It only works
1983 * when fec0 is working. The reason behind this design is that the
1984 * second interface is added primarily for Switch mode.
1985 *
1986 * Because of the last point above, both phys are attached on fec0
1987 * mdio interface in board design, and need to be configured by
1988 * fec0 mii_bus.
1989 */
1990 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
1991 /* fec1 uses fec0 mii_bus */
1992 if (mii_cnt && fec0_mii_bus) {
1993 fep->mii_bus = fec0_mii_bus;
1994 mii_cnt++;
1995 return 0;
1996 }
1997 return -ENOENT;
1998 }
1999
2000 fep->mii_timeout = 0;
2001
2002 /*
2003 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
2004 *
2005 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2006 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
2007 * Reference Manual has an error on this, and gets fixed on i.MX6Q
2008 * document.
2009 */
2010 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
2011 if (fep->quirks & FEC_QUIRK_ENET_MAC)
2012 mii_speed--;
2013 if (mii_speed > 63) {
2014 dev_err(&pdev->dev,
2015 "fec clock (%lu) to fast to get right mii speed\n",
2016 clk_get_rate(fep->clk_ipg));
2017 err = -EINVAL;
2018 goto err_out;
2019 }
2020
2021 /*
2022 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2023 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2024 * versions are RAZ there, so just ignore the difference and write the
2025 * register always.
2026 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2027 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2028 * output.
2029 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2030 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2031 * holdtime cannot result in a value greater than 3.
2032 */
2033 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2034
2035 fep->phy_speed = mii_speed << 1 | holdtime << 8;
2036
2037 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2038
2039 fep->mii_bus = mdiobus_alloc();
2040 if (fep->mii_bus == NULL) {
2041 err = -ENOMEM;
2042 goto err_out;
2043 }
2044
2045 fep->mii_bus->name = "fec_enet_mii_bus";
2046 fep->mii_bus->read = fec_enet_mdio_read;
2047 fep->mii_bus->write = fec_enet_mdio_write;
2048 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2049 pdev->name, fep->dev_id + 1);
2050 fep->mii_bus->priv = fep;
2051 fep->mii_bus->parent = &pdev->dev;
2052
2053 node = of_get_child_by_name(pdev->dev.of_node, "mdio");
2054 if (node) {
2055 err = of_mdiobus_register(fep->mii_bus, node);
2056 of_node_put(node);
2057 } else {
2058 err = mdiobus_register(fep->mii_bus);
2059 }
2060
2061 if (err)
2062 goto err_out_free_mdiobus;
2063
2064 mii_cnt++;
2065
2066 /* save fec0 mii_bus */
2067 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2068 fec0_mii_bus = fep->mii_bus;
2069
2070 return 0;
2071
2072 err_out_free_mdiobus:
2073 mdiobus_free(fep->mii_bus);
2074 err_out:
2075 return err;
2076 }
2077
fec_enet_mii_remove(struct fec_enet_private * fep)2078 static void fec_enet_mii_remove(struct fec_enet_private *fep)
2079 {
2080 if (--mii_cnt == 0) {
2081 mdiobus_unregister(fep->mii_bus);
2082 mdiobus_free(fep->mii_bus);
2083 }
2084 }
2085
fec_enet_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * info)2086 static void fec_enet_get_drvinfo(struct net_device *ndev,
2087 struct ethtool_drvinfo *info)
2088 {
2089 struct fec_enet_private *fep = netdev_priv(ndev);
2090
2091 strlcpy(info->driver, fep->pdev->dev.driver->name,
2092 sizeof(info->driver));
2093 strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
2094 strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
2095 }
2096
fec_enet_get_regs_len(struct net_device * ndev)2097 static int fec_enet_get_regs_len(struct net_device *ndev)
2098 {
2099 struct fec_enet_private *fep = netdev_priv(ndev);
2100 struct resource *r;
2101 int s = 0;
2102
2103 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2104 if (r)
2105 s = resource_size(r);
2106
2107 return s;
2108 }
2109
2110 /* List of registers that can be safety be read to dump them with ethtool */
2111 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2112 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
2113 static u32 fec_enet_register_offset[] = {
2114 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2115 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2116 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
2117 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
2118 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
2119 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
2120 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
2121 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
2122 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2123 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
2124 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
2125 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
2126 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2127 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2128 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2129 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2130 RMON_T_P_GTE2048, RMON_T_OCTETS,
2131 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2132 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2133 IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2134 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2135 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2136 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2137 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2138 RMON_R_P_GTE2048, RMON_R_OCTETS,
2139 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2140 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2141 };
2142 #else
2143 static u32 fec_enet_register_offset[] = {
2144 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2145 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
2146 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
2147 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
2148 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
2149 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
2150 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
2151 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
2152 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
2153 };
2154 #endif
2155
fec_enet_get_regs(struct net_device * ndev,struct ethtool_regs * regs,void * regbuf)2156 static void fec_enet_get_regs(struct net_device *ndev,
2157 struct ethtool_regs *regs, void *regbuf)
2158 {
2159 struct fec_enet_private *fep = netdev_priv(ndev);
2160 u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2161 u32 *buf = (u32 *)regbuf;
2162 u32 i, off;
2163
2164 memset(buf, 0, regs->len);
2165
2166 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
2167 off = fec_enet_register_offset[i] / 4;
2168 buf[off] = readl(&theregs[off]);
2169 }
2170 }
2171
fec_enet_get_ts_info(struct net_device * ndev,struct ethtool_ts_info * info)2172 static int fec_enet_get_ts_info(struct net_device *ndev,
2173 struct ethtool_ts_info *info)
2174 {
2175 struct fec_enet_private *fep = netdev_priv(ndev);
2176
2177 if (fep->bufdesc_ex) {
2178
2179 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2180 SOF_TIMESTAMPING_RX_SOFTWARE |
2181 SOF_TIMESTAMPING_SOFTWARE |
2182 SOF_TIMESTAMPING_TX_HARDWARE |
2183 SOF_TIMESTAMPING_RX_HARDWARE |
2184 SOF_TIMESTAMPING_RAW_HARDWARE;
2185 if (fep->ptp_clock)
2186 info->phc_index = ptp_clock_index(fep->ptp_clock);
2187 else
2188 info->phc_index = -1;
2189
2190 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
2191 (1 << HWTSTAMP_TX_ON);
2192
2193 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2194 (1 << HWTSTAMP_FILTER_ALL);
2195 return 0;
2196 } else {
2197 return ethtool_op_get_ts_info(ndev, info);
2198 }
2199 }
2200
2201 #if !defined(CONFIG_M5272)
2202
fec_enet_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * pause)2203 static void fec_enet_get_pauseparam(struct net_device *ndev,
2204 struct ethtool_pauseparam *pause)
2205 {
2206 struct fec_enet_private *fep = netdev_priv(ndev);
2207
2208 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2209 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2210 pause->rx_pause = pause->tx_pause;
2211 }
2212
fec_enet_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * pause)2213 static int fec_enet_set_pauseparam(struct net_device *ndev,
2214 struct ethtool_pauseparam *pause)
2215 {
2216 struct fec_enet_private *fep = netdev_priv(ndev);
2217
2218 if (!ndev->phydev)
2219 return -ENODEV;
2220
2221 if (pause->tx_pause != pause->rx_pause) {
2222 netdev_info(ndev,
2223 "hardware only support enable/disable both tx and rx");
2224 return -EINVAL;
2225 }
2226
2227 fep->pause_flag = 0;
2228
2229 /* tx pause must be same as rx pause */
2230 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2231 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2232
2233 if (pause->rx_pause || pause->autoneg) {
2234 ndev->phydev->supported |= ADVERTISED_Pause;
2235 ndev->phydev->advertising |= ADVERTISED_Pause;
2236 } else {
2237 ndev->phydev->supported &= ~ADVERTISED_Pause;
2238 ndev->phydev->advertising &= ~ADVERTISED_Pause;
2239 }
2240
2241 if (pause->autoneg) {
2242 if (netif_running(ndev))
2243 fec_stop(ndev);
2244 phy_start_aneg(ndev->phydev);
2245 }
2246 if (netif_running(ndev)) {
2247 napi_disable(&fep->napi);
2248 netif_tx_lock_bh(ndev);
2249 fec_restart(ndev);
2250 netif_wake_queue(ndev);
2251 netif_tx_unlock_bh(ndev);
2252 napi_enable(&fep->napi);
2253 }
2254
2255 return 0;
2256 }
2257
2258 static const struct fec_stat {
2259 char name[ETH_GSTRING_LEN];
2260 u16 offset;
2261 } fec_stats[] = {
2262 /* RMON TX */
2263 { "tx_dropped", RMON_T_DROP },
2264 { "tx_packets", RMON_T_PACKETS },
2265 { "tx_broadcast", RMON_T_BC_PKT },
2266 { "tx_multicast", RMON_T_MC_PKT },
2267 { "tx_crc_errors", RMON_T_CRC_ALIGN },
2268 { "tx_undersize", RMON_T_UNDERSIZE },
2269 { "tx_oversize", RMON_T_OVERSIZE },
2270 { "tx_fragment", RMON_T_FRAG },
2271 { "tx_jabber", RMON_T_JAB },
2272 { "tx_collision", RMON_T_COL },
2273 { "tx_64byte", RMON_T_P64 },
2274 { "tx_65to127byte", RMON_T_P65TO127 },
2275 { "tx_128to255byte", RMON_T_P128TO255 },
2276 { "tx_256to511byte", RMON_T_P256TO511 },
2277 { "tx_512to1023byte", RMON_T_P512TO1023 },
2278 { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2279 { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2280 { "tx_octets", RMON_T_OCTETS },
2281
2282 /* IEEE TX */
2283 { "IEEE_tx_drop", IEEE_T_DROP },
2284 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2285 { "IEEE_tx_1col", IEEE_T_1COL },
2286 { "IEEE_tx_mcol", IEEE_T_MCOL },
2287 { "IEEE_tx_def", IEEE_T_DEF },
2288 { "IEEE_tx_lcol", IEEE_T_LCOL },
2289 { "IEEE_tx_excol", IEEE_T_EXCOL },
2290 { "IEEE_tx_macerr", IEEE_T_MACERR },
2291 { "IEEE_tx_cserr", IEEE_T_CSERR },
2292 { "IEEE_tx_sqe", IEEE_T_SQE },
2293 { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2294 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2295
2296 /* RMON RX */
2297 { "rx_packets", RMON_R_PACKETS },
2298 { "rx_broadcast", RMON_R_BC_PKT },
2299 { "rx_multicast", RMON_R_MC_PKT },
2300 { "rx_crc_errors", RMON_R_CRC_ALIGN },
2301 { "rx_undersize", RMON_R_UNDERSIZE },
2302 { "rx_oversize", RMON_R_OVERSIZE },
2303 { "rx_fragment", RMON_R_FRAG },
2304 { "rx_jabber", RMON_R_JAB },
2305 { "rx_64byte", RMON_R_P64 },
2306 { "rx_65to127byte", RMON_R_P65TO127 },
2307 { "rx_128to255byte", RMON_R_P128TO255 },
2308 { "rx_256to511byte", RMON_R_P256TO511 },
2309 { "rx_512to1023byte", RMON_R_P512TO1023 },
2310 { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2311 { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2312 { "rx_octets", RMON_R_OCTETS },
2313
2314 /* IEEE RX */
2315 { "IEEE_rx_drop", IEEE_R_DROP },
2316 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2317 { "IEEE_rx_crc", IEEE_R_CRC },
2318 { "IEEE_rx_align", IEEE_R_ALIGN },
2319 { "IEEE_rx_macerr", IEEE_R_MACERR },
2320 { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2321 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2322 };
2323
2324 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
2325
fec_enet_update_ethtool_stats(struct net_device * dev)2326 static void fec_enet_update_ethtool_stats(struct net_device *dev)
2327 {
2328 struct fec_enet_private *fep = netdev_priv(dev);
2329 int i;
2330
2331 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2332 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2333 }
2334
fec_enet_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2335 static void fec_enet_get_ethtool_stats(struct net_device *dev,
2336 struct ethtool_stats *stats, u64 *data)
2337 {
2338 struct fec_enet_private *fep = netdev_priv(dev);
2339
2340 if (netif_running(dev))
2341 fec_enet_update_ethtool_stats(dev);
2342
2343 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2344 }
2345
fec_enet_get_strings(struct net_device * netdev,u32 stringset,u8 * data)2346 static void fec_enet_get_strings(struct net_device *netdev,
2347 u32 stringset, u8 *data)
2348 {
2349 int i;
2350 switch (stringset) {
2351 case ETH_SS_STATS:
2352 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2353 memcpy(data + i * ETH_GSTRING_LEN,
2354 fec_stats[i].name, ETH_GSTRING_LEN);
2355 break;
2356 }
2357 }
2358
fec_enet_get_sset_count(struct net_device * dev,int sset)2359 static int fec_enet_get_sset_count(struct net_device *dev, int sset)
2360 {
2361 switch (sset) {
2362 case ETH_SS_STATS:
2363 return ARRAY_SIZE(fec_stats);
2364 default:
2365 return -EOPNOTSUPP;
2366 }
2367 }
2368
2369 #else /* !defined(CONFIG_M5272) */
2370 #define FEC_STATS_SIZE 0
fec_enet_update_ethtool_stats(struct net_device * dev)2371 static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
2372 {
2373 }
2374
fec_enet_clear_ethtool_stats(struct net_device * dev)2375 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
2376 {
2377 }
2378 #endif /* !defined(CONFIG_M5272) */
2379
fec_enet_nway_reset(struct net_device * dev)2380 static int fec_enet_nway_reset(struct net_device *dev)
2381 {
2382 struct phy_device *phydev = dev->phydev;
2383
2384 if (!phydev)
2385 return -ENODEV;
2386
2387 return genphy_restart_aneg(phydev);
2388 }
2389
2390 /* ITR clock source is enet system clock (clk_ahb).
2391 * TCTT unit is cycle_ns * 64 cycle
2392 * So, the ICTT value = X us / (cycle_ns * 64)
2393 */
fec_enet_us_to_itr_clock(struct net_device * ndev,int us)2394 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
2395 {
2396 struct fec_enet_private *fep = netdev_priv(ndev);
2397
2398 return us * (fep->itr_clk_rate / 64000) / 1000;
2399 }
2400
2401 /* Set threshold for interrupt coalescing */
fec_enet_itr_coal_set(struct net_device * ndev)2402 static void fec_enet_itr_coal_set(struct net_device *ndev)
2403 {
2404 struct fec_enet_private *fep = netdev_priv(ndev);
2405 int rx_itr, tx_itr;
2406
2407 /* Must be greater than zero to avoid unpredictable behavior */
2408 if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
2409 !fep->tx_time_itr || !fep->tx_pkts_itr)
2410 return;
2411
2412 /* Select enet system clock as Interrupt Coalescing
2413 * timer Clock Source
2414 */
2415 rx_itr = FEC_ITR_CLK_SEL;
2416 tx_itr = FEC_ITR_CLK_SEL;
2417
2418 /* set ICFT and ICTT */
2419 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
2420 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
2421 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
2422 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
2423
2424 rx_itr |= FEC_ITR_EN;
2425 tx_itr |= FEC_ITR_EN;
2426
2427 writel(tx_itr, fep->hwp + FEC_TXIC0);
2428 writel(rx_itr, fep->hwp + FEC_RXIC0);
2429 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
2430 writel(tx_itr, fep->hwp + FEC_TXIC1);
2431 writel(rx_itr, fep->hwp + FEC_RXIC1);
2432 writel(tx_itr, fep->hwp + FEC_TXIC2);
2433 writel(rx_itr, fep->hwp + FEC_RXIC2);
2434 }
2435 }
2436
2437 static int
fec_enet_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * ec)2438 fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
2439 {
2440 struct fec_enet_private *fep = netdev_priv(ndev);
2441
2442 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2443 return -EOPNOTSUPP;
2444
2445 ec->rx_coalesce_usecs = fep->rx_time_itr;
2446 ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
2447
2448 ec->tx_coalesce_usecs = fep->tx_time_itr;
2449 ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
2450
2451 return 0;
2452 }
2453
2454 static int
fec_enet_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * ec)2455 fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
2456 {
2457 struct fec_enet_private *fep = netdev_priv(ndev);
2458 unsigned int cycle;
2459
2460 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2461 return -EOPNOTSUPP;
2462
2463 if (ec->rx_max_coalesced_frames > 255) {
2464 pr_err("Rx coalesced frames exceed hardware limitation\n");
2465 return -EINVAL;
2466 }
2467
2468 if (ec->tx_max_coalesced_frames > 255) {
2469 pr_err("Tx coalesced frame exceed hardware limitation\n");
2470 return -EINVAL;
2471 }
2472
2473 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
2474 if (cycle > 0xFFFF) {
2475 pr_err("Rx coalesced usec exceed hardware limitation\n");
2476 return -EINVAL;
2477 }
2478
2479 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
2480 if (cycle > 0xFFFF) {
2481 pr_err("Rx coalesced usec exceed hardware limitation\n");
2482 return -EINVAL;
2483 }
2484
2485 fep->rx_time_itr = ec->rx_coalesce_usecs;
2486 fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
2487
2488 fep->tx_time_itr = ec->tx_coalesce_usecs;
2489 fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
2490
2491 fec_enet_itr_coal_set(ndev);
2492
2493 return 0;
2494 }
2495
fec_enet_itr_coal_init(struct net_device * ndev)2496 static void fec_enet_itr_coal_init(struct net_device *ndev)
2497 {
2498 struct ethtool_coalesce ec;
2499
2500 ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
2501 ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
2502
2503 ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
2504 ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
2505
2506 fec_enet_set_coalesce(ndev, &ec);
2507 }
2508
fec_enet_get_tunable(struct net_device * netdev,const struct ethtool_tunable * tuna,void * data)2509 static int fec_enet_get_tunable(struct net_device *netdev,
2510 const struct ethtool_tunable *tuna,
2511 void *data)
2512 {
2513 struct fec_enet_private *fep = netdev_priv(netdev);
2514 int ret = 0;
2515
2516 switch (tuna->id) {
2517 case ETHTOOL_RX_COPYBREAK:
2518 *(u32 *)data = fep->rx_copybreak;
2519 break;
2520 default:
2521 ret = -EINVAL;
2522 break;
2523 }
2524
2525 return ret;
2526 }
2527
fec_enet_set_tunable(struct net_device * netdev,const struct ethtool_tunable * tuna,const void * data)2528 static int fec_enet_set_tunable(struct net_device *netdev,
2529 const struct ethtool_tunable *tuna,
2530 const void *data)
2531 {
2532 struct fec_enet_private *fep = netdev_priv(netdev);
2533 int ret = 0;
2534
2535 switch (tuna->id) {
2536 case ETHTOOL_RX_COPYBREAK:
2537 fep->rx_copybreak = *(u32 *)data;
2538 break;
2539 default:
2540 ret = -EINVAL;
2541 break;
2542 }
2543
2544 return ret;
2545 }
2546
2547 static void
fec_enet_get_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)2548 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2549 {
2550 struct fec_enet_private *fep = netdev_priv(ndev);
2551
2552 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
2553 wol->supported = WAKE_MAGIC;
2554 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
2555 } else {
2556 wol->supported = wol->wolopts = 0;
2557 }
2558 }
2559
2560 static int
fec_enet_set_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)2561 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2562 {
2563 struct fec_enet_private *fep = netdev_priv(ndev);
2564
2565 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
2566 return -EINVAL;
2567
2568 if (wol->wolopts & ~WAKE_MAGIC)
2569 return -EINVAL;
2570
2571 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
2572 if (device_may_wakeup(&ndev->dev)) {
2573 fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
2574 if (fep->irq[0] > 0)
2575 enable_irq_wake(fep->irq[0]);
2576 } else {
2577 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
2578 if (fep->irq[0] > 0)
2579 disable_irq_wake(fep->irq[0]);
2580 }
2581
2582 return 0;
2583 }
2584
2585 static const struct ethtool_ops fec_enet_ethtool_ops = {
2586 .get_drvinfo = fec_enet_get_drvinfo,
2587 .get_regs_len = fec_enet_get_regs_len,
2588 .get_regs = fec_enet_get_regs,
2589 .nway_reset = fec_enet_nway_reset,
2590 .get_link = ethtool_op_get_link,
2591 .get_coalesce = fec_enet_get_coalesce,
2592 .set_coalesce = fec_enet_set_coalesce,
2593 #ifndef CONFIG_M5272
2594 .get_pauseparam = fec_enet_get_pauseparam,
2595 .set_pauseparam = fec_enet_set_pauseparam,
2596 .get_strings = fec_enet_get_strings,
2597 .get_ethtool_stats = fec_enet_get_ethtool_stats,
2598 .get_sset_count = fec_enet_get_sset_count,
2599 #endif
2600 .get_ts_info = fec_enet_get_ts_info,
2601 .get_tunable = fec_enet_get_tunable,
2602 .set_tunable = fec_enet_set_tunable,
2603 .get_wol = fec_enet_get_wol,
2604 .set_wol = fec_enet_set_wol,
2605 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2606 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2607 };
2608
fec_enet_ioctl(struct net_device * ndev,struct ifreq * rq,int cmd)2609 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2610 {
2611 struct fec_enet_private *fep = netdev_priv(ndev);
2612 struct phy_device *phydev = ndev->phydev;
2613
2614 if (!netif_running(ndev))
2615 return -EINVAL;
2616
2617 if (!phydev)
2618 return -ENODEV;
2619
2620 if (fep->bufdesc_ex) {
2621 if (cmd == SIOCSHWTSTAMP)
2622 return fec_ptp_set(ndev, rq);
2623 if (cmd == SIOCGHWTSTAMP)
2624 return fec_ptp_get(ndev, rq);
2625 }
2626
2627 return phy_mii_ioctl(phydev, rq, cmd);
2628 }
2629
fec_enet_free_buffers(struct net_device * ndev)2630 static void fec_enet_free_buffers(struct net_device *ndev)
2631 {
2632 struct fec_enet_private *fep = netdev_priv(ndev);
2633 unsigned int i;
2634 struct sk_buff *skb;
2635 struct bufdesc *bdp;
2636 struct fec_enet_priv_tx_q *txq;
2637 struct fec_enet_priv_rx_q *rxq;
2638 unsigned int q;
2639
2640 for (q = 0; q < fep->num_rx_queues; q++) {
2641 rxq = fep->rx_queue[q];
2642 bdp = rxq->bd.base;
2643 for (i = 0; i < rxq->bd.ring_size; i++) {
2644 skb = rxq->rx_skbuff[i];
2645 rxq->rx_skbuff[i] = NULL;
2646 if (skb) {
2647 dma_unmap_single(&fep->pdev->dev,
2648 fec32_to_cpu(bdp->cbd_bufaddr),
2649 FEC_ENET_RX_FRSIZE - fep->rx_align,
2650 DMA_FROM_DEVICE);
2651 dev_kfree_skb(skb);
2652 }
2653 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2654 }
2655 }
2656
2657 for (q = 0; q < fep->num_tx_queues; q++) {
2658 txq = fep->tx_queue[q];
2659 bdp = txq->bd.base;
2660 for (i = 0; i < txq->bd.ring_size; i++) {
2661 kfree(txq->tx_bounce[i]);
2662 txq->tx_bounce[i] = NULL;
2663 skb = txq->tx_skbuff[i];
2664 txq->tx_skbuff[i] = NULL;
2665 dev_kfree_skb(skb);
2666 }
2667 }
2668 }
2669
fec_enet_free_queue(struct net_device * ndev)2670 static void fec_enet_free_queue(struct net_device *ndev)
2671 {
2672 struct fec_enet_private *fep = netdev_priv(ndev);
2673 int i;
2674 struct fec_enet_priv_tx_q *txq;
2675
2676 for (i = 0; i < fep->num_tx_queues; i++)
2677 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
2678 txq = fep->tx_queue[i];
2679 dma_free_coherent(NULL,
2680 txq->bd.ring_size * TSO_HEADER_SIZE,
2681 txq->tso_hdrs,
2682 txq->tso_hdrs_dma);
2683 }
2684
2685 for (i = 0; i < fep->num_rx_queues; i++)
2686 kfree(fep->rx_queue[i]);
2687 for (i = 0; i < fep->num_tx_queues; i++)
2688 kfree(fep->tx_queue[i]);
2689 }
2690
fec_enet_alloc_queue(struct net_device * ndev)2691 static int fec_enet_alloc_queue(struct net_device *ndev)
2692 {
2693 struct fec_enet_private *fep = netdev_priv(ndev);
2694 int i;
2695 int ret = 0;
2696 struct fec_enet_priv_tx_q *txq;
2697
2698 for (i = 0; i < fep->num_tx_queues; i++) {
2699 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
2700 if (!txq) {
2701 ret = -ENOMEM;
2702 goto alloc_failed;
2703 }
2704
2705 fep->tx_queue[i] = txq;
2706 txq->bd.ring_size = TX_RING_SIZE;
2707 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
2708
2709 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
2710 txq->tx_wake_threshold =
2711 (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
2712
2713 txq->tso_hdrs = dma_alloc_coherent(NULL,
2714 txq->bd.ring_size * TSO_HEADER_SIZE,
2715 &txq->tso_hdrs_dma,
2716 GFP_KERNEL);
2717 if (!txq->tso_hdrs) {
2718 ret = -ENOMEM;
2719 goto alloc_failed;
2720 }
2721 }
2722
2723 for (i = 0; i < fep->num_rx_queues; i++) {
2724 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
2725 GFP_KERNEL);
2726 if (!fep->rx_queue[i]) {
2727 ret = -ENOMEM;
2728 goto alloc_failed;
2729 }
2730
2731 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
2732 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
2733 }
2734 return ret;
2735
2736 alloc_failed:
2737 fec_enet_free_queue(ndev);
2738 return ret;
2739 }
2740
2741 static int
fec_enet_alloc_rxq_buffers(struct net_device * ndev,unsigned int queue)2742 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
2743 {
2744 struct fec_enet_private *fep = netdev_priv(ndev);
2745 unsigned int i;
2746 struct sk_buff *skb;
2747 struct bufdesc *bdp;
2748 struct fec_enet_priv_rx_q *rxq;
2749
2750 rxq = fep->rx_queue[queue];
2751 bdp = rxq->bd.base;
2752 for (i = 0; i < rxq->bd.ring_size; i++) {
2753 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2754 if (!skb)
2755 goto err_alloc;
2756
2757 if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
2758 dev_kfree_skb(skb);
2759 goto err_alloc;
2760 }
2761
2762 rxq->rx_skbuff[i] = skb;
2763 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
2764
2765 if (fep->bufdesc_ex) {
2766 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2767 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
2768 }
2769
2770 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2771 }
2772
2773 /* Set the last buffer to wrap. */
2774 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
2775 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2776 return 0;
2777
2778 err_alloc:
2779 fec_enet_free_buffers(ndev);
2780 return -ENOMEM;
2781 }
2782
2783 static int
fec_enet_alloc_txq_buffers(struct net_device * ndev,unsigned int queue)2784 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
2785 {
2786 struct fec_enet_private *fep = netdev_priv(ndev);
2787 unsigned int i;
2788 struct bufdesc *bdp;
2789 struct fec_enet_priv_tx_q *txq;
2790
2791 txq = fep->tx_queue[queue];
2792 bdp = txq->bd.base;
2793 for (i = 0; i < txq->bd.ring_size; i++) {
2794 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
2795 if (!txq->tx_bounce[i])
2796 goto err_alloc;
2797
2798 bdp->cbd_sc = cpu_to_fec16(0);
2799 bdp->cbd_bufaddr = cpu_to_fec32(0);
2800
2801 if (fep->bufdesc_ex) {
2802 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2803 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
2804 }
2805
2806 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
2807 }
2808
2809 /* Set the last buffer to wrap. */
2810 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
2811 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2812
2813 return 0;
2814
2815 err_alloc:
2816 fec_enet_free_buffers(ndev);
2817 return -ENOMEM;
2818 }
2819
fec_enet_alloc_buffers(struct net_device * ndev)2820 static int fec_enet_alloc_buffers(struct net_device *ndev)
2821 {
2822 struct fec_enet_private *fep = netdev_priv(ndev);
2823 unsigned int i;
2824
2825 for (i = 0; i < fep->num_rx_queues; i++)
2826 if (fec_enet_alloc_rxq_buffers(ndev, i))
2827 return -ENOMEM;
2828
2829 for (i = 0; i < fep->num_tx_queues; i++)
2830 if (fec_enet_alloc_txq_buffers(ndev, i))
2831 return -ENOMEM;
2832 return 0;
2833 }
2834
2835 static int
fec_enet_open(struct net_device * ndev)2836 fec_enet_open(struct net_device *ndev)
2837 {
2838 struct fec_enet_private *fep = netdev_priv(ndev);
2839 int ret;
2840
2841 ret = pm_runtime_get_sync(&fep->pdev->dev);
2842 if (ret < 0)
2843 return ret;
2844
2845 pinctrl_pm_select_default_state(&fep->pdev->dev);
2846 ret = fec_enet_clk_enable(ndev, true);
2847 if (ret)
2848 goto clk_enable;
2849
2850 /* I should reset the ring buffers here, but I don't yet know
2851 * a simple way to do that.
2852 */
2853
2854 ret = fec_enet_alloc_buffers(ndev);
2855 if (ret)
2856 goto err_enet_alloc;
2857
2858 /* Init MAC prior to mii bus probe */
2859 fec_restart(ndev);
2860
2861 /* Probe and connect to PHY when open the interface */
2862 ret = fec_enet_mii_probe(ndev);
2863 if (ret)
2864 goto err_enet_mii_probe;
2865
2866 if (fep->quirks & FEC_QUIRK_ERR006687)
2867 imx6q_cpuidle_fec_irqs_used();
2868
2869 napi_enable(&fep->napi);
2870 phy_start(ndev->phydev);
2871 netif_tx_start_all_queues(ndev);
2872
2873 device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
2874 FEC_WOL_FLAG_ENABLE);
2875
2876 return 0;
2877
2878 err_enet_mii_probe:
2879 fec_enet_free_buffers(ndev);
2880 err_enet_alloc:
2881 fec_enet_clk_enable(ndev, false);
2882 clk_enable:
2883 pm_runtime_mark_last_busy(&fep->pdev->dev);
2884 pm_runtime_put_autosuspend(&fep->pdev->dev);
2885 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2886 return ret;
2887 }
2888
2889 static int
fec_enet_close(struct net_device * ndev)2890 fec_enet_close(struct net_device *ndev)
2891 {
2892 struct fec_enet_private *fep = netdev_priv(ndev);
2893
2894 phy_stop(ndev->phydev);
2895
2896 if (netif_device_present(ndev)) {
2897 napi_disable(&fep->napi);
2898 netif_tx_disable(ndev);
2899 fec_stop(ndev);
2900 }
2901
2902 phy_disconnect(ndev->phydev);
2903
2904 if (fep->quirks & FEC_QUIRK_ERR006687)
2905 imx6q_cpuidle_fec_irqs_unused();
2906
2907 fec_enet_update_ethtool_stats(ndev);
2908
2909 fec_enet_clk_enable(ndev, false);
2910 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2911 pm_runtime_mark_last_busy(&fep->pdev->dev);
2912 pm_runtime_put_autosuspend(&fep->pdev->dev);
2913
2914 fec_enet_free_buffers(ndev);
2915
2916 return 0;
2917 }
2918
2919 /* Set or clear the multicast filter for this adaptor.
2920 * Skeleton taken from sunlance driver.
2921 * The CPM Ethernet implementation allows Multicast as well as individual
2922 * MAC address filtering. Some of the drivers check to make sure it is
2923 * a group multicast address, and discard those that are not. I guess I
2924 * will do the same for now, but just remove the test if you want
2925 * individual filtering as well (do the upper net layers want or support
2926 * this kind of feature?).
2927 */
2928
2929 #define FEC_HASH_BITS 6 /* #bits in hash */
2930 #define CRC32_POLY 0xEDB88320
2931
set_multicast_list(struct net_device * ndev)2932 static void set_multicast_list(struct net_device *ndev)
2933 {
2934 struct fec_enet_private *fep = netdev_priv(ndev);
2935 struct netdev_hw_addr *ha;
2936 unsigned int i, bit, data, crc, tmp;
2937 unsigned char hash;
2938 unsigned int hash_high = 0, hash_low = 0;
2939
2940 if (ndev->flags & IFF_PROMISC) {
2941 tmp = readl(fep->hwp + FEC_R_CNTRL);
2942 tmp |= 0x8;
2943 writel(tmp, fep->hwp + FEC_R_CNTRL);
2944 return;
2945 }
2946
2947 tmp = readl(fep->hwp + FEC_R_CNTRL);
2948 tmp &= ~0x8;
2949 writel(tmp, fep->hwp + FEC_R_CNTRL);
2950
2951 if (ndev->flags & IFF_ALLMULTI) {
2952 /* Catch all multicast addresses, so set the
2953 * filter to all 1's
2954 */
2955 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2956 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2957
2958 return;
2959 }
2960
2961 /* Add the addresses in hash register */
2962 netdev_for_each_mc_addr(ha, ndev) {
2963 /* calculate crc32 value of mac address */
2964 crc = 0xffffffff;
2965
2966 for (i = 0; i < ndev->addr_len; i++) {
2967 data = ha->addr[i];
2968 for (bit = 0; bit < 8; bit++, data >>= 1) {
2969 crc = (crc >> 1) ^
2970 (((crc ^ data) & 1) ? CRC32_POLY : 0);
2971 }
2972 }
2973
2974 /* only upper 6 bits (FEC_HASH_BITS) are used
2975 * which point to specific bit in he hash registers
2976 */
2977 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
2978
2979 if (hash > 31)
2980 hash_high |= 1 << (hash - 32);
2981 else
2982 hash_low |= 1 << hash;
2983 }
2984
2985 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2986 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2987 }
2988
2989 /* Set a MAC change in hardware. */
2990 static int
fec_set_mac_address(struct net_device * ndev,void * p)2991 fec_set_mac_address(struct net_device *ndev, void *p)
2992 {
2993 struct fec_enet_private *fep = netdev_priv(ndev);
2994 struct sockaddr *addr = p;
2995
2996 if (addr) {
2997 if (!is_valid_ether_addr(addr->sa_data))
2998 return -EADDRNOTAVAIL;
2999 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3000 }
3001
3002 /* Add netif status check here to avoid system hang in below case:
3003 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
3004 * After ethx down, fec all clocks are gated off and then register
3005 * access causes system hang.
3006 */
3007 if (!netif_running(ndev))
3008 return 0;
3009
3010 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
3011 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
3012 fep->hwp + FEC_ADDR_LOW);
3013 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
3014 fep->hwp + FEC_ADDR_HIGH);
3015 return 0;
3016 }
3017
3018 #ifdef CONFIG_NET_POLL_CONTROLLER
3019 /**
3020 * fec_poll_controller - FEC Poll controller function
3021 * @dev: The FEC network adapter
3022 *
3023 * Polled functionality used by netconsole and others in non interrupt mode
3024 *
3025 */
fec_poll_controller(struct net_device * dev)3026 static void fec_poll_controller(struct net_device *dev)
3027 {
3028 int i;
3029 struct fec_enet_private *fep = netdev_priv(dev);
3030
3031 for (i = 0; i < FEC_IRQ_NUM; i++) {
3032 if (fep->irq[i] > 0) {
3033 disable_irq(fep->irq[i]);
3034 fec_enet_interrupt(fep->irq[i], dev);
3035 enable_irq(fep->irq[i]);
3036 }
3037 }
3038 }
3039 #endif
3040
fec_enet_set_netdev_features(struct net_device * netdev,netdev_features_t features)3041 static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3042 netdev_features_t features)
3043 {
3044 struct fec_enet_private *fep = netdev_priv(netdev);
3045 netdev_features_t changed = features ^ netdev->features;
3046
3047 netdev->features = features;
3048
3049 /* Receive checksum has been changed */
3050 if (changed & NETIF_F_RXCSUM) {
3051 if (features & NETIF_F_RXCSUM)
3052 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3053 else
3054 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3055 }
3056 }
3057
fec_set_features(struct net_device * netdev,netdev_features_t features)3058 static int fec_set_features(struct net_device *netdev,
3059 netdev_features_t features)
3060 {
3061 struct fec_enet_private *fep = netdev_priv(netdev);
3062 netdev_features_t changed = features ^ netdev->features;
3063
3064 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3065 napi_disable(&fep->napi);
3066 netif_tx_lock_bh(netdev);
3067 fec_stop(netdev);
3068 fec_enet_set_netdev_features(netdev, features);
3069 fec_restart(netdev);
3070 netif_tx_wake_all_queues(netdev);
3071 netif_tx_unlock_bh(netdev);
3072 napi_enable(&fep->napi);
3073 } else {
3074 fec_enet_set_netdev_features(netdev, features);
3075 }
3076
3077 return 0;
3078 }
3079
3080 static const struct net_device_ops fec_netdev_ops = {
3081 .ndo_open = fec_enet_open,
3082 .ndo_stop = fec_enet_close,
3083 .ndo_start_xmit = fec_enet_start_xmit,
3084 .ndo_set_rx_mode = set_multicast_list,
3085 .ndo_change_mtu = eth_change_mtu,
3086 .ndo_validate_addr = eth_validate_addr,
3087 .ndo_tx_timeout = fec_timeout,
3088 .ndo_set_mac_address = fec_set_mac_address,
3089 .ndo_do_ioctl = fec_enet_ioctl,
3090 #ifdef CONFIG_NET_POLL_CONTROLLER
3091 .ndo_poll_controller = fec_poll_controller,
3092 #endif
3093 .ndo_set_features = fec_set_features,
3094 };
3095
3096 static const unsigned short offset_des_active_rxq[] = {
3097 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
3098 };
3099
3100 static const unsigned short offset_des_active_txq[] = {
3101 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
3102 };
3103
3104 /*
3105 * XXX: We need to clean up on failure exits here.
3106 *
3107 */
fec_enet_init(struct net_device * ndev)3108 static int fec_enet_init(struct net_device *ndev)
3109 {
3110 struct fec_enet_private *fep = netdev_priv(ndev);
3111 struct bufdesc *cbd_base;
3112 dma_addr_t bd_dma;
3113 int bd_size;
3114 unsigned int i;
3115 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
3116 sizeof(struct bufdesc);
3117 unsigned dsize_log2 = __fls(dsize);
3118
3119 WARN_ON(dsize != (1 << dsize_log2));
3120 #if defined(CONFIG_ARM)
3121 fep->rx_align = 0xf;
3122 fep->tx_align = 0xf;
3123 #else
3124 fep->rx_align = 0x3;
3125 fep->tx_align = 0x3;
3126 #endif
3127
3128 fec_enet_alloc_queue(ndev);
3129
3130 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
3131
3132 /* Allocate memory for buffer descriptors. */
3133 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
3134 GFP_KERNEL);
3135 if (!cbd_base) {
3136 return -ENOMEM;
3137 }
3138
3139 memset(cbd_base, 0, bd_size);
3140
3141 /* Get the Ethernet address */
3142 fec_get_mac(ndev);
3143 /* make sure MAC we just acquired is programmed into the hw */
3144 fec_set_mac_address(ndev, NULL);
3145
3146 /* Set receive and transmit descriptor base. */
3147 for (i = 0; i < fep->num_rx_queues; i++) {
3148 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
3149 unsigned size = dsize * rxq->bd.ring_size;
3150
3151 rxq->bd.qid = i;
3152 rxq->bd.base = cbd_base;
3153 rxq->bd.cur = cbd_base;
3154 rxq->bd.dma = bd_dma;
3155 rxq->bd.dsize = dsize;
3156 rxq->bd.dsize_log2 = dsize_log2;
3157 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
3158 bd_dma += size;
3159 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
3160 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3161 }
3162
3163 for (i = 0; i < fep->num_tx_queues; i++) {
3164 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
3165 unsigned size = dsize * txq->bd.ring_size;
3166
3167 txq->bd.qid = i;
3168 txq->bd.base = cbd_base;
3169 txq->bd.cur = cbd_base;
3170 txq->bd.dma = bd_dma;
3171 txq->bd.dsize = dsize;
3172 txq->bd.dsize_log2 = dsize_log2;
3173 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
3174 bd_dma += size;
3175 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
3176 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3177 }
3178
3179
3180 /* The FEC Ethernet specific entries in the device structure */
3181 ndev->watchdog_timeo = TX_TIMEOUT;
3182 ndev->netdev_ops = &fec_netdev_ops;
3183 ndev->ethtool_ops = &fec_enet_ethtool_ops;
3184
3185 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
3186 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
3187
3188 if (fep->quirks & FEC_QUIRK_HAS_VLAN)
3189 /* enable hw VLAN support */
3190 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3191
3192 if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
3193 ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
3194
3195 /* enable hw accelerator */
3196 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
3197 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
3198 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3199 }
3200
3201 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
3202 fep->tx_align = 0;
3203 fep->rx_align = 0x3f;
3204 }
3205
3206 ndev->hw_features = ndev->features;
3207
3208 fec_restart(ndev);
3209
3210 fec_enet_update_ethtool_stats(ndev);
3211
3212 return 0;
3213 }
3214
3215 #ifdef CONFIG_OF
fec_reset_phy(struct platform_device * pdev)3216 static int fec_reset_phy(struct platform_device *pdev)
3217 {
3218 int err, phy_reset;
3219 bool active_high = false;
3220 int msec = 1;
3221 struct device_node *np = pdev->dev.of_node;
3222
3223 if (!np)
3224 return 0;
3225
3226 of_property_read_u32(np, "phy-reset-duration", &msec);
3227 /* A sane reset duration should not be longer than 1s */
3228 if (msec > 1000)
3229 msec = 1;
3230
3231 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
3232 if (phy_reset == -EPROBE_DEFER)
3233 return phy_reset;
3234 else if (!gpio_is_valid(phy_reset))
3235 return 0;
3236
3237 active_high = of_property_read_bool(np, "phy-reset-active-high");
3238
3239 err = devm_gpio_request_one(&pdev->dev, phy_reset,
3240 active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
3241 "phy-reset");
3242 if (err) {
3243 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
3244 return err;
3245 }
3246
3247 if (msec > 20)
3248 msleep(msec);
3249 else
3250 usleep_range(msec * 1000, msec * 1000 + 1000);
3251
3252 gpio_set_value_cansleep(phy_reset, !active_high);
3253
3254 return 0;
3255 }
3256 #else /* CONFIG_OF */
fec_reset_phy(struct platform_device * pdev)3257 static int fec_reset_phy(struct platform_device *pdev)
3258 {
3259 /*
3260 * In case of platform probe, the reset has been done
3261 * by machine code.
3262 */
3263 return 0;
3264 }
3265 #endif /* CONFIG_OF */
3266
3267 static void
fec_enet_get_queue_num(struct platform_device * pdev,int * num_tx,int * num_rx)3268 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
3269 {
3270 struct device_node *np = pdev->dev.of_node;
3271
3272 *num_tx = *num_rx = 1;
3273
3274 if (!np || !of_device_is_available(np))
3275 return;
3276
3277 /* parse the num of tx and rx queues */
3278 of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
3279
3280 of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
3281
3282 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
3283 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
3284 *num_tx);
3285 *num_tx = 1;
3286 return;
3287 }
3288
3289 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
3290 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
3291 *num_rx);
3292 *num_rx = 1;
3293 return;
3294 }
3295
3296 }
3297
3298 static int
fec_probe(struct platform_device * pdev)3299 fec_probe(struct platform_device *pdev)
3300 {
3301 struct fec_enet_private *fep;
3302 struct fec_platform_data *pdata;
3303 struct net_device *ndev;
3304 int i, irq, ret = 0;
3305 struct resource *r;
3306 const struct of_device_id *of_id;
3307 static int dev_id;
3308 struct device_node *np = pdev->dev.of_node, *phy_node;
3309 int num_tx_qs;
3310 int num_rx_qs;
3311
3312 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
3313
3314 /* Init network device */
3315 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
3316 FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
3317 if (!ndev)
3318 return -ENOMEM;
3319
3320 SET_NETDEV_DEV(ndev, &pdev->dev);
3321
3322 /* setup board info structure */
3323 fep = netdev_priv(ndev);
3324
3325 of_id = of_match_device(fec_dt_ids, &pdev->dev);
3326 if (of_id)
3327 pdev->id_entry = of_id->data;
3328 fep->quirks = pdev->id_entry->driver_data;
3329
3330 fep->netdev = ndev;
3331 fep->num_rx_queues = num_rx_qs;
3332 fep->num_tx_queues = num_tx_qs;
3333
3334 #if !defined(CONFIG_M5272)
3335 /* default enable pause frame auto negotiation */
3336 if (fep->quirks & FEC_QUIRK_HAS_GBIT)
3337 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
3338 #endif
3339
3340 /* Select default pin state */
3341 pinctrl_pm_select_default_state(&pdev->dev);
3342
3343 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3344 fep->hwp = devm_ioremap_resource(&pdev->dev, r);
3345 if (IS_ERR(fep->hwp)) {
3346 ret = PTR_ERR(fep->hwp);
3347 goto failed_ioremap;
3348 }
3349
3350 fep->pdev = pdev;
3351 fep->dev_id = dev_id++;
3352
3353 platform_set_drvdata(pdev, ndev);
3354
3355 if ((of_machine_is_compatible("fsl,imx6q") ||
3356 of_machine_is_compatible("fsl,imx6dl")) &&
3357 !of_property_read_bool(np, "fsl,err006687-workaround-present"))
3358 fep->quirks |= FEC_QUIRK_ERR006687;
3359
3360 if (of_get_property(np, "fsl,magic-packet", NULL))
3361 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
3362
3363 phy_node = of_parse_phandle(np, "phy-handle", 0);
3364 if (!phy_node && of_phy_is_fixed_link(np)) {
3365 ret = of_phy_register_fixed_link(np);
3366 if (ret < 0) {
3367 dev_err(&pdev->dev,
3368 "broken fixed-link specification\n");
3369 goto failed_phy;
3370 }
3371 phy_node = of_node_get(np);
3372 }
3373 fep->phy_node = phy_node;
3374
3375 ret = of_get_phy_mode(pdev->dev.of_node);
3376 if (ret < 0) {
3377 pdata = dev_get_platdata(&pdev->dev);
3378 if (pdata)
3379 fep->phy_interface = pdata->phy;
3380 else
3381 fep->phy_interface = PHY_INTERFACE_MODE_MII;
3382 } else {
3383 fep->phy_interface = ret;
3384 }
3385
3386 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
3387 if (IS_ERR(fep->clk_ipg)) {
3388 ret = PTR_ERR(fep->clk_ipg);
3389 goto failed_clk;
3390 }
3391
3392 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
3393 if (IS_ERR(fep->clk_ahb)) {
3394 ret = PTR_ERR(fep->clk_ahb);
3395 goto failed_clk;
3396 }
3397
3398 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
3399
3400 /* enet_out is optional, depends on board */
3401 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
3402 if (IS_ERR(fep->clk_enet_out))
3403 fep->clk_enet_out = NULL;
3404
3405 fep->ptp_clk_on = false;
3406 mutex_init(&fep->ptp_clk_mutex);
3407
3408 /* clk_ref is optional, depends on board */
3409 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
3410 if (IS_ERR(fep->clk_ref))
3411 fep->clk_ref = NULL;
3412
3413 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
3414 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
3415 if (IS_ERR(fep->clk_ptp)) {
3416 fep->clk_ptp = NULL;
3417 fep->bufdesc_ex = false;
3418 }
3419
3420 ret = fec_enet_clk_enable(ndev, true);
3421 if (ret)
3422 goto failed_clk;
3423
3424 ret = clk_prepare_enable(fep->clk_ipg);
3425 if (ret)
3426 goto failed_clk_ipg;
3427
3428 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
3429 if (!IS_ERR(fep->reg_phy)) {
3430 ret = regulator_enable(fep->reg_phy);
3431 if (ret) {
3432 dev_err(&pdev->dev,
3433 "Failed to enable phy regulator: %d\n", ret);
3434 clk_disable_unprepare(fep->clk_ipg);
3435 goto failed_regulator;
3436 }
3437 } else {
3438 fep->reg_phy = NULL;
3439 }
3440
3441 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3442 pm_runtime_use_autosuspend(&pdev->dev);
3443 pm_runtime_get_noresume(&pdev->dev);
3444 pm_runtime_set_active(&pdev->dev);
3445 pm_runtime_enable(&pdev->dev);
3446
3447 ret = fec_reset_phy(pdev);
3448 if (ret)
3449 goto failed_reset;
3450
3451 if (fep->bufdesc_ex)
3452 fec_ptp_init(pdev);
3453
3454 ret = fec_enet_init(ndev);
3455 if (ret)
3456 goto failed_init;
3457
3458 for (i = 0; i < FEC_IRQ_NUM; i++) {
3459 irq = platform_get_irq(pdev, i);
3460 if (irq < 0) {
3461 if (i)
3462 break;
3463 ret = irq;
3464 goto failed_irq;
3465 }
3466 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
3467 0, pdev->name, ndev);
3468 if (ret)
3469 goto failed_irq;
3470
3471 fep->irq[i] = irq;
3472 }
3473
3474 init_completion(&fep->mdio_done);
3475 ret = fec_enet_mii_init(pdev);
3476 if (ret)
3477 goto failed_mii_init;
3478
3479 /* Carrier starts down, phylib will bring it up */
3480 netif_carrier_off(ndev);
3481 fec_enet_clk_enable(ndev, false);
3482 pinctrl_pm_select_sleep_state(&pdev->dev);
3483
3484 ret = register_netdev(ndev);
3485 if (ret)
3486 goto failed_register;
3487
3488 device_init_wakeup(&ndev->dev, fep->wol_flag &
3489 FEC_WOL_HAS_MAGIC_PACKET);
3490
3491 if (fep->bufdesc_ex && fep->ptp_clock)
3492 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
3493
3494 fep->rx_copybreak = COPYBREAK_DEFAULT;
3495 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3496
3497 pm_runtime_mark_last_busy(&pdev->dev);
3498 pm_runtime_put_autosuspend(&pdev->dev);
3499
3500 return 0;
3501
3502 failed_register:
3503 fec_enet_mii_remove(fep);
3504 failed_mii_init:
3505 failed_irq:
3506 failed_init:
3507 fec_ptp_stop(pdev);
3508 if (fep->reg_phy)
3509 regulator_disable(fep->reg_phy);
3510 failed_reset:
3511 pm_runtime_put(&pdev->dev);
3512 pm_runtime_disable(&pdev->dev);
3513 failed_regulator:
3514 failed_clk_ipg:
3515 fec_enet_clk_enable(ndev, false);
3516 failed_clk:
3517 if (of_phy_is_fixed_link(np))
3518 of_phy_deregister_fixed_link(np);
3519 failed_phy:
3520 of_node_put(phy_node);
3521 failed_ioremap:
3522 free_netdev(ndev);
3523
3524 return ret;
3525 }
3526
3527 static int
fec_drv_remove(struct platform_device * pdev)3528 fec_drv_remove(struct platform_device *pdev)
3529 {
3530 struct net_device *ndev = platform_get_drvdata(pdev);
3531 struct fec_enet_private *fep = netdev_priv(ndev);
3532 struct device_node *np = pdev->dev.of_node;
3533
3534 cancel_work_sync(&fep->tx_timeout_work);
3535 fec_ptp_stop(pdev);
3536 unregister_netdev(ndev);
3537 fec_enet_mii_remove(fep);
3538 if (fep->reg_phy)
3539 regulator_disable(fep->reg_phy);
3540 pm_runtime_put(&pdev->dev);
3541 pm_runtime_disable(&pdev->dev);
3542 if (of_phy_is_fixed_link(np))
3543 of_phy_deregister_fixed_link(np);
3544 of_node_put(fep->phy_node);
3545 free_netdev(ndev);
3546
3547 return 0;
3548 }
3549
fec_suspend(struct device * dev)3550 static int __maybe_unused fec_suspend(struct device *dev)
3551 {
3552 struct net_device *ndev = dev_get_drvdata(dev);
3553 struct fec_enet_private *fep = netdev_priv(ndev);
3554
3555 rtnl_lock();
3556 if (netif_running(ndev)) {
3557 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
3558 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
3559 phy_stop(ndev->phydev);
3560 napi_disable(&fep->napi);
3561 netif_tx_lock_bh(ndev);
3562 netif_device_detach(ndev);
3563 netif_tx_unlock_bh(ndev);
3564 fec_stop(ndev);
3565 fec_enet_clk_enable(ndev, false);
3566 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3567 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3568 }
3569 rtnl_unlock();
3570
3571 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3572 regulator_disable(fep->reg_phy);
3573
3574 /* SOC supply clock to phy, when clock is disabled, phy link down
3575 * SOC control phy regulator, when regulator is disabled, phy link down
3576 */
3577 if (fep->clk_enet_out || fep->reg_phy)
3578 fep->link = 0;
3579
3580 return 0;
3581 }
3582
fec_resume(struct device * dev)3583 static int __maybe_unused fec_resume(struct device *dev)
3584 {
3585 struct net_device *ndev = dev_get_drvdata(dev);
3586 struct fec_enet_private *fep = netdev_priv(ndev);
3587 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
3588 int ret;
3589 int val;
3590
3591 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
3592 ret = regulator_enable(fep->reg_phy);
3593 if (ret)
3594 return ret;
3595 }
3596
3597 rtnl_lock();
3598 if (netif_running(ndev)) {
3599 ret = fec_enet_clk_enable(ndev, true);
3600 if (ret) {
3601 rtnl_unlock();
3602 goto failed_clk;
3603 }
3604 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
3605 if (pdata && pdata->sleep_mode_enable)
3606 pdata->sleep_mode_enable(false);
3607 val = readl(fep->hwp + FEC_ECNTRL);
3608 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
3609 writel(val, fep->hwp + FEC_ECNTRL);
3610 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
3611 } else {
3612 pinctrl_pm_select_default_state(&fep->pdev->dev);
3613 }
3614 fec_restart(ndev);
3615 netif_tx_lock_bh(ndev);
3616 netif_device_attach(ndev);
3617 netif_tx_unlock_bh(ndev);
3618 napi_enable(&fep->napi);
3619 phy_start(ndev->phydev);
3620 }
3621 rtnl_unlock();
3622
3623 return 0;
3624
3625 failed_clk:
3626 if (fep->reg_phy)
3627 regulator_disable(fep->reg_phy);
3628 return ret;
3629 }
3630
fec_runtime_suspend(struct device * dev)3631 static int __maybe_unused fec_runtime_suspend(struct device *dev)
3632 {
3633 struct net_device *ndev = dev_get_drvdata(dev);
3634 struct fec_enet_private *fep = netdev_priv(ndev);
3635
3636 clk_disable_unprepare(fep->clk_ipg);
3637
3638 return 0;
3639 }
3640
fec_runtime_resume(struct device * dev)3641 static int __maybe_unused fec_runtime_resume(struct device *dev)
3642 {
3643 struct net_device *ndev = dev_get_drvdata(dev);
3644 struct fec_enet_private *fep = netdev_priv(ndev);
3645
3646 return clk_prepare_enable(fep->clk_ipg);
3647 }
3648
3649 static const struct dev_pm_ops fec_pm_ops = {
3650 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
3651 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
3652 };
3653
3654 static struct platform_driver fec_driver = {
3655 .driver = {
3656 .name = DRIVER_NAME,
3657 .pm = &fec_pm_ops,
3658 .of_match_table = fec_dt_ids,
3659 },
3660 .id_table = fec_devtype,
3661 .probe = fec_probe,
3662 .remove = fec_drv_remove,
3663 };
3664
3665 module_platform_driver(fec_driver);
3666
3667 MODULE_ALIAS("platform:"DRIVER_NAME);
3668 MODULE_LICENSE("GPL");
3669