1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2016-2017, National Instruments Corp.
3 *
4 * Author: Moritz Fischer <mdf@kernel.org>
5 */
6
7 #include <linux/etherdevice.h>
8 #include <linux/module.h>
9 #include <linux/netdevice.h>
10 #include <linux/of_address.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13 #include <linux/of_platform.h>
14 #include <linux/of_irq.h>
15 #include <linux/skbuff.h>
16 #include <linux/phy.h>
17 #include <linux/mii.h>
18 #include <linux/nvmem-consumer.h>
19 #include <linux/ethtool.h>
20 #include <linux/iopoll.h>
21
22 #define TX_BD_NUM 64
23 #define RX_BD_NUM 128
24
25 /* Axi DMA Register definitions */
26 #define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */
27 #define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */
28 #define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */
29 #define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */
30
31 #define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */
32 #define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */
33 #define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */
34 #define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */
35
36 #define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */
37 #define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */
38
39 #define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
40 #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
41 #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
42 #define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
43
44 #define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
45 #define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
46
47 #define XAXIDMA_DELAY_SHIFT 24
48 #define XAXIDMA_COALESCE_SHIFT 16
49
50 #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
51 #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
52 #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
53 #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
54
55 /* Default TX/RX Threshold and waitbound values for SGDMA mode */
56 #define XAXIDMA_DFT_TX_THRESHOLD 24
57 #define XAXIDMA_DFT_TX_WAITBOUND 254
58 #define XAXIDMA_DFT_RX_THRESHOLD 24
59 #define XAXIDMA_DFT_RX_WAITBOUND 254
60
61 #define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
62 #define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
63 #define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
64 #define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
65 #define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
66 #define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
67 #define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
68 #define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
69 #define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
70
71 #define NIXGE_REG_CTRL_OFFSET 0x4000
72 #define NIXGE_REG_INFO 0x00
73 #define NIXGE_REG_MAC_CTL 0x04
74 #define NIXGE_REG_PHY_CTL 0x08
75 #define NIXGE_REG_LED_CTL 0x0c
76 #define NIXGE_REG_MDIO_DATA 0x10
77 #define NIXGE_REG_MDIO_ADDR 0x14
78 #define NIXGE_REG_MDIO_OP 0x18
79 #define NIXGE_REG_MDIO_CTRL 0x1c
80
81 #define NIXGE_ID_LED_CTL_EN BIT(0)
82 #define NIXGE_ID_LED_CTL_VAL BIT(1)
83
84 #define NIXGE_MDIO_CLAUSE45 BIT(12)
85 #define NIXGE_MDIO_CLAUSE22 0
86 #define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10)
87 #define NIXGE_MDIO_OP_ADDRESS 0
88 #define NIXGE_MDIO_C45_WRITE BIT(0)
89 #define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0))
90 #define NIXGE_MDIO_C22_WRITE BIT(0)
91 #define NIXGE_MDIO_C22_READ BIT(1)
92 #define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5)
93 #define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0)
94
95 #define NIXGE_REG_MAC_LSB 0x1000
96 #define NIXGE_REG_MAC_MSB 0x1004
97
98 /* Packet size info */
99 #define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */
100 #define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
101 #define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */
102 #define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
103
104 #define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
105 #define NIXGE_MAX_JUMBO_FRAME_SIZE \
106 (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
107
108 struct nixge_hw_dma_bd {
109 u32 next;
110 u32 reserved1;
111 u32 phys;
112 u32 reserved2;
113 u32 reserved3;
114 u32 reserved4;
115 u32 cntrl;
116 u32 status;
117 u32 app0;
118 u32 app1;
119 u32 app2;
120 u32 app3;
121 u32 app4;
122 u32 sw_id_offset;
123 u32 reserved5;
124 u32 reserved6;
125 };
126
127 struct nixge_tx_skb {
128 struct sk_buff *skb;
129 dma_addr_t mapping;
130 size_t size;
131 bool mapped_as_page;
132 };
133
134 struct nixge_priv {
135 struct net_device *ndev;
136 struct napi_struct napi;
137 struct device *dev;
138
139 /* Connection to PHY device */
140 struct device_node *phy_node;
141 phy_interface_t phy_mode;
142
143 int link;
144 unsigned int speed;
145 unsigned int duplex;
146
147 /* MDIO bus data */
148 struct mii_bus *mii_bus; /* MII bus reference */
149
150 /* IO registers, dma functions and IRQs */
151 void __iomem *ctrl_regs;
152 void __iomem *dma_regs;
153
154 struct tasklet_struct dma_err_tasklet;
155
156 int tx_irq;
157 int rx_irq;
158
159 /* Buffer descriptors */
160 struct nixge_hw_dma_bd *tx_bd_v;
161 struct nixge_tx_skb *tx_skb;
162 dma_addr_t tx_bd_p;
163
164 struct nixge_hw_dma_bd *rx_bd_v;
165 dma_addr_t rx_bd_p;
166 u32 tx_bd_ci;
167 u32 tx_bd_tail;
168 u32 rx_bd_ci;
169
170 u32 coalesce_count_rx;
171 u32 coalesce_count_tx;
172 };
173
nixge_dma_write_reg(struct nixge_priv * priv,off_t offset,u32 val)174 static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
175 {
176 writel(val, priv->dma_regs + offset);
177 }
178
nixge_dma_read_reg(const struct nixge_priv * priv,off_t offset)179 static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset)
180 {
181 return readl(priv->dma_regs + offset);
182 }
183
nixge_ctrl_write_reg(struct nixge_priv * priv,off_t offset,u32 val)184 static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
185 {
186 writel(val, priv->ctrl_regs + offset);
187 }
188
nixge_ctrl_read_reg(struct nixge_priv * priv,off_t offset)189 static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
190 {
191 return readl(priv->ctrl_regs + offset);
192 }
193
194 #define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
195 readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \
196 (sleep_us), (timeout_us))
197
198 #define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
199 readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \
200 (sleep_us), (timeout_us))
201
nixge_hw_dma_bd_release(struct net_device * ndev)202 static void nixge_hw_dma_bd_release(struct net_device *ndev)
203 {
204 struct nixge_priv *priv = netdev_priv(ndev);
205 int i;
206
207 for (i = 0; i < RX_BD_NUM; i++) {
208 dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys,
209 NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
210 dev_kfree_skb((struct sk_buff *)
211 (priv->rx_bd_v[i].sw_id_offset));
212 }
213
214 if (priv->rx_bd_v)
215 dma_free_coherent(ndev->dev.parent,
216 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
217 priv->rx_bd_v,
218 priv->rx_bd_p);
219
220 if (priv->tx_skb)
221 devm_kfree(ndev->dev.parent, priv->tx_skb);
222
223 if (priv->tx_bd_v)
224 dma_free_coherent(ndev->dev.parent,
225 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
226 priv->tx_bd_v,
227 priv->tx_bd_p);
228 }
229
nixge_hw_dma_bd_init(struct net_device * ndev)230 static int nixge_hw_dma_bd_init(struct net_device *ndev)
231 {
232 struct nixge_priv *priv = netdev_priv(ndev);
233 struct sk_buff *skb;
234 u32 cr;
235 int i;
236
237 /* Reset the indexes which are used for accessing the BDs */
238 priv->tx_bd_ci = 0;
239 priv->tx_bd_tail = 0;
240 priv->rx_bd_ci = 0;
241
242 /* Allocate the Tx and Rx buffer descriptors. */
243 priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
244 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
245 &priv->tx_bd_p, GFP_KERNEL);
246 if (!priv->tx_bd_v)
247 goto out;
248
249 priv->tx_skb = devm_kcalloc(ndev->dev.parent,
250 TX_BD_NUM, sizeof(*priv->tx_skb),
251 GFP_KERNEL);
252 if (!priv->tx_skb)
253 goto out;
254
255 priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
256 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
257 &priv->rx_bd_p, GFP_KERNEL);
258 if (!priv->rx_bd_v)
259 goto out;
260
261 for (i = 0; i < TX_BD_NUM; i++) {
262 priv->tx_bd_v[i].next = priv->tx_bd_p +
263 sizeof(*priv->tx_bd_v) *
264 ((i + 1) % TX_BD_NUM);
265 }
266
267 for (i = 0; i < RX_BD_NUM; i++) {
268 priv->rx_bd_v[i].next = priv->rx_bd_p +
269 sizeof(*priv->rx_bd_v) *
270 ((i + 1) % RX_BD_NUM);
271
272 skb = netdev_alloc_skb_ip_align(ndev,
273 NIXGE_MAX_JUMBO_FRAME_SIZE);
274 if (!skb)
275 goto out;
276
277 priv->rx_bd_v[i].sw_id_offset = (u32)skb;
278 priv->rx_bd_v[i].phys =
279 dma_map_single(ndev->dev.parent,
280 skb->data,
281 NIXGE_MAX_JUMBO_FRAME_SIZE,
282 DMA_FROM_DEVICE);
283 priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
284 }
285
286 /* Start updating the Rx channel control register */
287 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
288 /* Update the interrupt coalesce count */
289 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
290 ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
291 /* Update the delay timer count */
292 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
293 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
294 /* Enable coalesce, delay timer and error interrupts */
295 cr |= XAXIDMA_IRQ_ALL_MASK;
296 /* Write to the Rx channel control register */
297 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
298
299 /* Start updating the Tx channel control register */
300 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
301 /* Update the interrupt coalesce count */
302 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
303 ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
304 /* Update the delay timer count */
305 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
306 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
307 /* Enable coalesce, delay timer and error interrupts */
308 cr |= XAXIDMA_IRQ_ALL_MASK;
309 /* Write to the Tx channel control register */
310 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
311
312 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
313 * halted state. This will make the Rx side ready for reception.
314 */
315 nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
316 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
317 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
318 cr | XAXIDMA_CR_RUNSTOP_MASK);
319 nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
320 (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
321
322 /* Write to the RS (Run-stop) bit in the Tx channel control register.
323 * Tx channel is now ready to run. But only after we write to the
324 * tail pointer register that the Tx channel will start transmitting.
325 */
326 nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
327 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
328 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
329 cr | XAXIDMA_CR_RUNSTOP_MASK);
330
331 return 0;
332 out:
333 nixge_hw_dma_bd_release(ndev);
334 return -ENOMEM;
335 }
336
__nixge_device_reset(struct nixge_priv * priv,off_t offset)337 static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
338 {
339 u32 status;
340 int err;
341
342 /* Reset Axi DMA. This would reset NIXGE Ethernet core as well.
343 * The reset process of Axi DMA takes a while to complete as all
344 * pending commands/transfers will be flushed or completed during
345 * this reset process.
346 */
347 nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
348 err = nixge_dma_poll_timeout(priv, offset, status,
349 !(status & XAXIDMA_CR_RESET_MASK), 10,
350 1000);
351 if (err)
352 netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__);
353 }
354
nixge_device_reset(struct net_device * ndev)355 static void nixge_device_reset(struct net_device *ndev)
356 {
357 struct nixge_priv *priv = netdev_priv(ndev);
358
359 __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET);
360 __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET);
361
362 if (nixge_hw_dma_bd_init(ndev))
363 netdev_err(ndev, "%s: descriptor allocation failed\n",
364 __func__);
365
366 netif_trans_update(ndev);
367 }
368
nixge_handle_link_change(struct net_device * ndev)369 static void nixge_handle_link_change(struct net_device *ndev)
370 {
371 struct nixge_priv *priv = netdev_priv(ndev);
372 struct phy_device *phydev = ndev->phydev;
373
374 if (phydev->link != priv->link || phydev->speed != priv->speed ||
375 phydev->duplex != priv->duplex) {
376 priv->link = phydev->link;
377 priv->speed = phydev->speed;
378 priv->duplex = phydev->duplex;
379 phy_print_status(phydev);
380 }
381 }
382
nixge_tx_skb_unmap(struct nixge_priv * priv,struct nixge_tx_skb * tx_skb)383 static void nixge_tx_skb_unmap(struct nixge_priv *priv,
384 struct nixge_tx_skb *tx_skb)
385 {
386 if (tx_skb->mapping) {
387 if (tx_skb->mapped_as_page)
388 dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping,
389 tx_skb->size, DMA_TO_DEVICE);
390 else
391 dma_unmap_single(priv->ndev->dev.parent,
392 tx_skb->mapping,
393 tx_skb->size, DMA_TO_DEVICE);
394 tx_skb->mapping = 0;
395 }
396
397 if (tx_skb->skb) {
398 dev_kfree_skb_any(tx_skb->skb);
399 tx_skb->skb = NULL;
400 }
401 }
402
nixge_start_xmit_done(struct net_device * ndev)403 static void nixge_start_xmit_done(struct net_device *ndev)
404 {
405 struct nixge_priv *priv = netdev_priv(ndev);
406 struct nixge_hw_dma_bd *cur_p;
407 struct nixge_tx_skb *tx_skb;
408 unsigned int status = 0;
409 u32 packets = 0;
410 u32 size = 0;
411
412 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
413 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
414
415 status = cur_p->status;
416
417 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
418 nixge_tx_skb_unmap(priv, tx_skb);
419 cur_p->status = 0;
420
421 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
422 packets++;
423
424 ++priv->tx_bd_ci;
425 priv->tx_bd_ci %= TX_BD_NUM;
426 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
427 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
428 status = cur_p->status;
429 }
430
431 ndev->stats.tx_packets += packets;
432 ndev->stats.tx_bytes += size;
433
434 if (packets)
435 netif_wake_queue(ndev);
436 }
437
nixge_check_tx_bd_space(struct nixge_priv * priv,int num_frag)438 static int nixge_check_tx_bd_space(struct nixge_priv *priv,
439 int num_frag)
440 {
441 struct nixge_hw_dma_bd *cur_p;
442
443 cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
444 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
445 return NETDEV_TX_BUSY;
446 return 0;
447 }
448
nixge_start_xmit(struct sk_buff * skb,struct net_device * ndev)449 static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
450 {
451 struct nixge_priv *priv = netdev_priv(ndev);
452 struct nixge_hw_dma_bd *cur_p;
453 struct nixge_tx_skb *tx_skb;
454 dma_addr_t tail_p;
455 skb_frag_t *frag;
456 u32 num_frag;
457 u32 ii;
458
459 num_frag = skb_shinfo(skb)->nr_frags;
460 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
461 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
462
463 if (nixge_check_tx_bd_space(priv, num_frag)) {
464 if (!netif_queue_stopped(ndev))
465 netif_stop_queue(ndev);
466 return NETDEV_TX_OK;
467 }
468
469 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
470 skb_headlen(skb), DMA_TO_DEVICE);
471 if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
472 goto drop;
473
474 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
475
476 tx_skb->skb = NULL;
477 tx_skb->mapping = cur_p->phys;
478 tx_skb->size = skb_headlen(skb);
479 tx_skb->mapped_as_page = false;
480
481 for (ii = 0; ii < num_frag; ii++) {
482 ++priv->tx_bd_tail;
483 priv->tx_bd_tail %= TX_BD_NUM;
484 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
485 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
486 frag = &skb_shinfo(skb)->frags[ii];
487
488 cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0,
489 skb_frag_size(frag),
490 DMA_TO_DEVICE);
491 if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
492 goto frag_err;
493
494 cur_p->cntrl = skb_frag_size(frag);
495
496 tx_skb->skb = NULL;
497 tx_skb->mapping = cur_p->phys;
498 tx_skb->size = skb_frag_size(frag);
499 tx_skb->mapped_as_page = true;
500 }
501
502 /* last buffer of the frame */
503 tx_skb->skb = skb;
504
505 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
506
507 tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
508 /* Start the transfer */
509 nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
510 ++priv->tx_bd_tail;
511 priv->tx_bd_tail %= TX_BD_NUM;
512
513 return NETDEV_TX_OK;
514 frag_err:
515 for (; ii > 0; ii--) {
516 if (priv->tx_bd_tail)
517 priv->tx_bd_tail--;
518 else
519 priv->tx_bd_tail = TX_BD_NUM - 1;
520
521 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
522 nixge_tx_skb_unmap(priv, tx_skb);
523
524 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
525 cur_p->status = 0;
526 }
527 dma_unmap_single(priv->ndev->dev.parent,
528 tx_skb->mapping,
529 tx_skb->size, DMA_TO_DEVICE);
530 drop:
531 ndev->stats.tx_dropped++;
532 return NETDEV_TX_OK;
533 }
534
nixge_recv(struct net_device * ndev,int budget)535 static int nixge_recv(struct net_device *ndev, int budget)
536 {
537 struct nixge_priv *priv = netdev_priv(ndev);
538 struct sk_buff *skb, *new_skb;
539 struct nixge_hw_dma_bd *cur_p;
540 dma_addr_t tail_p = 0;
541 u32 packets = 0;
542 u32 length = 0;
543 u32 size = 0;
544
545 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
546
547 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK &&
548 budget > packets)) {
549 tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) *
550 priv->rx_bd_ci;
551
552 skb = (struct sk_buff *)(cur_p->sw_id_offset);
553
554 length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
555 if (length > NIXGE_MAX_JUMBO_FRAME_SIZE)
556 length = NIXGE_MAX_JUMBO_FRAME_SIZE;
557
558 dma_unmap_single(ndev->dev.parent, cur_p->phys,
559 NIXGE_MAX_JUMBO_FRAME_SIZE,
560 DMA_FROM_DEVICE);
561
562 skb_put(skb, length);
563
564 skb->protocol = eth_type_trans(skb, ndev);
565 skb_checksum_none_assert(skb);
566
567 /* For now mark them as CHECKSUM_NONE since
568 * we don't have offload capabilities
569 */
570 skb->ip_summed = CHECKSUM_NONE;
571
572 napi_gro_receive(&priv->napi, skb);
573
574 size += length;
575 packets++;
576
577 new_skb = netdev_alloc_skb_ip_align(ndev,
578 NIXGE_MAX_JUMBO_FRAME_SIZE);
579 if (!new_skb)
580 return packets;
581
582 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
583 NIXGE_MAX_JUMBO_FRAME_SIZE,
584 DMA_FROM_DEVICE);
585 if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
586 /* FIXME: bail out and clean up */
587 netdev_err(ndev, "Failed to map ...\n");
588 }
589 cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
590 cur_p->status = 0;
591 cur_p->sw_id_offset = (u32)new_skb;
592
593 ++priv->rx_bd_ci;
594 priv->rx_bd_ci %= RX_BD_NUM;
595 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
596 }
597
598 ndev->stats.rx_packets += packets;
599 ndev->stats.rx_bytes += size;
600
601 if (tail_p)
602 nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
603
604 return packets;
605 }
606
nixge_poll(struct napi_struct * napi,int budget)607 static int nixge_poll(struct napi_struct *napi, int budget)
608 {
609 struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi);
610 int work_done;
611 u32 status, cr;
612
613 work_done = 0;
614
615 work_done = nixge_recv(priv->ndev, budget);
616 if (work_done < budget) {
617 napi_complete_done(napi, work_done);
618 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
619
620 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
621 /* If there's more, reschedule, but clear */
622 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
623 napi_reschedule(napi);
624 } else {
625 /* if not, turn on RX IRQs again ... */
626 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
627 cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
628 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
629 }
630 }
631
632 return work_done;
633 }
634
nixge_tx_irq(int irq,void * _ndev)635 static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
636 {
637 struct nixge_priv *priv = netdev_priv(_ndev);
638 struct net_device *ndev = _ndev;
639 unsigned int status;
640 u32 cr;
641
642 status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET);
643 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
644 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
645 nixge_start_xmit_done(priv->ndev);
646 goto out;
647 }
648 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
649 netdev_err(ndev, "No interrupts asserted in Tx path\n");
650 return IRQ_NONE;
651 }
652 if (status & XAXIDMA_IRQ_ERROR_MASK) {
653 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
654 netdev_err(ndev, "Current BD is at: 0x%x\n",
655 (priv->tx_bd_v[priv->tx_bd_ci]).phys);
656
657 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
658 /* Disable coalesce, delay timer and error interrupts */
659 cr &= (~XAXIDMA_IRQ_ALL_MASK);
660 /* Write to the Tx channel control register */
661 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
662
663 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
664 /* Disable coalesce, delay timer and error interrupts */
665 cr &= (~XAXIDMA_IRQ_ALL_MASK);
666 /* Write to the Rx channel control register */
667 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
668
669 tasklet_schedule(&priv->dma_err_tasklet);
670 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
671 }
672 out:
673 return IRQ_HANDLED;
674 }
675
nixge_rx_irq(int irq,void * _ndev)676 static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
677 {
678 struct nixge_priv *priv = netdev_priv(_ndev);
679 struct net_device *ndev = _ndev;
680 unsigned int status;
681 u32 cr;
682
683 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
684 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
685 /* Turn of IRQs because NAPI */
686 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
687 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
688 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
689 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
690
691 if (napi_schedule_prep(&priv->napi))
692 __napi_schedule(&priv->napi);
693 goto out;
694 }
695 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
696 netdev_err(ndev, "No interrupts asserted in Rx path\n");
697 return IRQ_NONE;
698 }
699 if (status & XAXIDMA_IRQ_ERROR_MASK) {
700 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
701 netdev_err(ndev, "Current BD is at: 0x%x\n",
702 (priv->rx_bd_v[priv->rx_bd_ci]).phys);
703
704 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
705 /* Disable coalesce, delay timer and error interrupts */
706 cr &= (~XAXIDMA_IRQ_ALL_MASK);
707 /* Finally write to the Tx channel control register */
708 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
709
710 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
711 /* Disable coalesce, delay timer and error interrupts */
712 cr &= (~XAXIDMA_IRQ_ALL_MASK);
713 /* write to the Rx channel control register */
714 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
715
716 tasklet_schedule(&priv->dma_err_tasklet);
717 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
718 }
719 out:
720 return IRQ_HANDLED;
721 }
722
nixge_dma_err_handler(unsigned long data)723 static void nixge_dma_err_handler(unsigned long data)
724 {
725 struct nixge_priv *lp = (struct nixge_priv *)data;
726 struct nixge_hw_dma_bd *cur_p;
727 struct nixge_tx_skb *tx_skb;
728 u32 cr, i;
729
730 __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
731 __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
732
733 for (i = 0; i < TX_BD_NUM; i++) {
734 cur_p = &lp->tx_bd_v[i];
735 tx_skb = &lp->tx_skb[i];
736 nixge_tx_skb_unmap(lp, tx_skb);
737
738 cur_p->phys = 0;
739 cur_p->cntrl = 0;
740 cur_p->status = 0;
741 cur_p->sw_id_offset = 0;
742 }
743
744 for (i = 0; i < RX_BD_NUM; i++) {
745 cur_p = &lp->rx_bd_v[i];
746 cur_p->status = 0;
747 }
748
749 lp->tx_bd_ci = 0;
750 lp->tx_bd_tail = 0;
751 lp->rx_bd_ci = 0;
752
753 /* Start updating the Rx channel control register */
754 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
755 /* Update the interrupt coalesce count */
756 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
757 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
758 /* Update the delay timer count */
759 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
760 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
761 /* Enable coalesce, delay timer and error interrupts */
762 cr |= XAXIDMA_IRQ_ALL_MASK;
763 /* Finally write to the Rx channel control register */
764 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr);
765
766 /* Start updating the Tx channel control register */
767 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
768 /* Update the interrupt coalesce count */
769 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
770 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
771 /* Update the delay timer count */
772 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
773 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
774 /* Enable coalesce, delay timer and error interrupts */
775 cr |= XAXIDMA_IRQ_ALL_MASK;
776 /* Finally write to the Tx channel control register */
777 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr);
778
779 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
780 * halted state. This will make the Rx side ready for reception.
781 */
782 nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
783 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
784 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET,
785 cr | XAXIDMA_CR_RUNSTOP_MASK);
786 nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
787 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
788
789 /* Write to the RS (Run-stop) bit in the Tx channel control register.
790 * Tx channel is now ready to run. But only after we write to the
791 * tail pointer register that the Tx channel will start transmitting
792 */
793 nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
794 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
795 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET,
796 cr | XAXIDMA_CR_RUNSTOP_MASK);
797 }
798
nixge_open(struct net_device * ndev)799 static int nixge_open(struct net_device *ndev)
800 {
801 struct nixge_priv *priv = netdev_priv(ndev);
802 struct phy_device *phy;
803 int ret;
804
805 nixge_device_reset(ndev);
806
807 phy = of_phy_connect(ndev, priv->phy_node,
808 &nixge_handle_link_change, 0, priv->phy_mode);
809 if (!phy)
810 return -ENODEV;
811
812 phy_start(phy);
813
814 /* Enable tasklets for Axi DMA error handling */
815 tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler,
816 (unsigned long)priv);
817
818 napi_enable(&priv->napi);
819
820 /* Enable interrupts for Axi DMA Tx */
821 ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev);
822 if (ret)
823 goto err_tx_irq;
824 /* Enable interrupts for Axi DMA Rx */
825 ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev);
826 if (ret)
827 goto err_rx_irq;
828
829 netif_start_queue(ndev);
830
831 return 0;
832
833 err_rx_irq:
834 free_irq(priv->tx_irq, ndev);
835 err_tx_irq:
836 phy_stop(phy);
837 phy_disconnect(phy);
838 tasklet_kill(&priv->dma_err_tasklet);
839 netdev_err(ndev, "request_irq() failed\n");
840 return ret;
841 }
842
nixge_stop(struct net_device * ndev)843 static int nixge_stop(struct net_device *ndev)
844 {
845 struct nixge_priv *priv = netdev_priv(ndev);
846 u32 cr;
847
848 netif_stop_queue(ndev);
849 napi_disable(&priv->napi);
850
851 if (ndev->phydev) {
852 phy_stop(ndev->phydev);
853 phy_disconnect(ndev->phydev);
854 }
855
856 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
857 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
858 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
859 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
860 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
861 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
862
863 tasklet_kill(&priv->dma_err_tasklet);
864
865 free_irq(priv->tx_irq, ndev);
866 free_irq(priv->rx_irq, ndev);
867
868 nixge_hw_dma_bd_release(ndev);
869
870 return 0;
871 }
872
nixge_change_mtu(struct net_device * ndev,int new_mtu)873 static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
874 {
875 if (netif_running(ndev))
876 return -EBUSY;
877
878 if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) >
879 NIXGE_MAX_JUMBO_FRAME_SIZE)
880 return -EINVAL;
881
882 ndev->mtu = new_mtu;
883
884 return 0;
885 }
886
__nixge_hw_set_mac_address(struct net_device * ndev)887 static s32 __nixge_hw_set_mac_address(struct net_device *ndev)
888 {
889 struct nixge_priv *priv = netdev_priv(ndev);
890
891 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
892 (ndev->dev_addr[2]) << 24 |
893 (ndev->dev_addr[3] << 16) |
894 (ndev->dev_addr[4] << 8) |
895 (ndev->dev_addr[5] << 0));
896
897 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
898 (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
899
900 return 0;
901 }
902
nixge_net_set_mac_address(struct net_device * ndev,void * p)903 static int nixge_net_set_mac_address(struct net_device *ndev, void *p)
904 {
905 int err;
906
907 err = eth_mac_addr(ndev, p);
908 if (!err)
909 __nixge_hw_set_mac_address(ndev);
910
911 return err;
912 }
913
914 static const struct net_device_ops nixge_netdev_ops = {
915 .ndo_open = nixge_open,
916 .ndo_stop = nixge_stop,
917 .ndo_start_xmit = nixge_start_xmit,
918 .ndo_change_mtu = nixge_change_mtu,
919 .ndo_set_mac_address = nixge_net_set_mac_address,
920 .ndo_validate_addr = eth_validate_addr,
921 };
922
nixge_ethtools_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * ed)923 static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
924 struct ethtool_drvinfo *ed)
925 {
926 strlcpy(ed->driver, "nixge", sizeof(ed->driver));
927 strlcpy(ed->bus_info, "platform", sizeof(ed->driver));
928 }
929
nixge_ethtools_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce)930 static int nixge_ethtools_get_coalesce(struct net_device *ndev,
931 struct ethtool_coalesce *ecoalesce)
932 {
933 struct nixge_priv *priv = netdev_priv(ndev);
934 u32 regval = 0;
935
936 regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
937 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
938 >> XAXIDMA_COALESCE_SHIFT;
939 regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
940 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
941 >> XAXIDMA_COALESCE_SHIFT;
942 return 0;
943 }
944
nixge_ethtools_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce)945 static int nixge_ethtools_set_coalesce(struct net_device *ndev,
946 struct ethtool_coalesce *ecoalesce)
947 {
948 struct nixge_priv *priv = netdev_priv(ndev);
949
950 if (netif_running(ndev)) {
951 netdev_err(ndev,
952 "Please stop netif before applying configuration\n");
953 return -EBUSY;
954 }
955
956 if (ecoalesce->rx_coalesce_usecs ||
957 ecoalesce->rx_coalesce_usecs_irq ||
958 ecoalesce->rx_max_coalesced_frames_irq ||
959 ecoalesce->tx_coalesce_usecs ||
960 ecoalesce->tx_coalesce_usecs_irq ||
961 ecoalesce->tx_max_coalesced_frames_irq ||
962 ecoalesce->stats_block_coalesce_usecs ||
963 ecoalesce->use_adaptive_rx_coalesce ||
964 ecoalesce->use_adaptive_tx_coalesce ||
965 ecoalesce->pkt_rate_low ||
966 ecoalesce->rx_coalesce_usecs_low ||
967 ecoalesce->rx_max_coalesced_frames_low ||
968 ecoalesce->tx_coalesce_usecs_low ||
969 ecoalesce->tx_max_coalesced_frames_low ||
970 ecoalesce->pkt_rate_high ||
971 ecoalesce->rx_coalesce_usecs_high ||
972 ecoalesce->rx_max_coalesced_frames_high ||
973 ecoalesce->tx_coalesce_usecs_high ||
974 ecoalesce->tx_max_coalesced_frames_high ||
975 ecoalesce->rate_sample_interval)
976 return -EOPNOTSUPP;
977 if (ecoalesce->rx_max_coalesced_frames)
978 priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
979 if (ecoalesce->tx_max_coalesced_frames)
980 priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
981
982 return 0;
983 }
984
nixge_ethtools_set_phys_id(struct net_device * ndev,enum ethtool_phys_id_state state)985 static int nixge_ethtools_set_phys_id(struct net_device *ndev,
986 enum ethtool_phys_id_state state)
987 {
988 struct nixge_priv *priv = netdev_priv(ndev);
989 u32 ctrl;
990
991 ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL);
992 switch (state) {
993 case ETHTOOL_ID_ACTIVE:
994 ctrl |= NIXGE_ID_LED_CTL_EN;
995 /* Enable identification LED override*/
996 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
997 return 2;
998
999 case ETHTOOL_ID_ON:
1000 ctrl |= NIXGE_ID_LED_CTL_VAL;
1001 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1002 break;
1003
1004 case ETHTOOL_ID_OFF:
1005 ctrl &= ~NIXGE_ID_LED_CTL_VAL;
1006 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1007 break;
1008
1009 case ETHTOOL_ID_INACTIVE:
1010 /* Restore LED settings */
1011 ctrl &= ~NIXGE_ID_LED_CTL_EN;
1012 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1013 break;
1014 }
1015
1016 return 0;
1017 }
1018
1019 static const struct ethtool_ops nixge_ethtool_ops = {
1020 .get_drvinfo = nixge_ethtools_get_drvinfo,
1021 .get_coalesce = nixge_ethtools_get_coalesce,
1022 .set_coalesce = nixge_ethtools_set_coalesce,
1023 .set_phys_id = nixge_ethtools_set_phys_id,
1024 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1025 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1026 .get_link = ethtool_op_get_link,
1027 };
1028
nixge_mdio_read(struct mii_bus * bus,int phy_id,int reg)1029 static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
1030 {
1031 struct nixge_priv *priv = bus->priv;
1032 u32 status, tmp;
1033 int err;
1034 u16 device;
1035
1036 if (reg & MII_ADDR_C45) {
1037 device = (reg >> 16) & 0x1f;
1038
1039 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1040
1041 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1042 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1043
1044 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1045 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1046
1047 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1048 !status, 10, 1000);
1049 if (err) {
1050 dev_err(priv->dev, "timeout setting address");
1051 return err;
1052 }
1053
1054 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
1055 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1056 } else {
1057 device = reg & 0x1f;
1058
1059 tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
1060 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1061 }
1062
1063 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1064 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1065
1066 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1067 !status, 10, 1000);
1068 if (err) {
1069 dev_err(priv->dev, "timeout setting read command");
1070 return err;
1071 }
1072
1073 status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
1074
1075 return status;
1076 }
1077
nixge_mdio_write(struct mii_bus * bus,int phy_id,int reg,u16 val)1078 static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
1079 {
1080 struct nixge_priv *priv = bus->priv;
1081 u32 status, tmp;
1082 u16 device;
1083 int err;
1084
1085 if (reg & MII_ADDR_C45) {
1086 device = (reg >> 16) & 0x1f;
1087
1088 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1089
1090 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1091 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1092
1093 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1094 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1095
1096 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1097 !status, 10, 1000);
1098 if (err) {
1099 dev_err(priv->dev, "timeout setting address");
1100 return err;
1101 }
1102
1103 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE)
1104 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1105
1106 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1107 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1108 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1109 !status, 10, 1000);
1110 if (err)
1111 dev_err(priv->dev, "timeout setting write command");
1112 } else {
1113 device = reg & 0x1f;
1114
1115 tmp = NIXGE_MDIO_CLAUSE22 |
1116 NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
1117 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1118
1119 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1120 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1121 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1122
1123 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1124 !status, 10, 1000);
1125 if (err)
1126 dev_err(priv->dev, "timeout setting write command");
1127 }
1128
1129 return err;
1130 }
1131
nixge_mdio_setup(struct nixge_priv * priv,struct device_node * np)1132 static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
1133 {
1134 struct mii_bus *bus;
1135
1136 bus = devm_mdiobus_alloc(priv->dev);
1137 if (!bus)
1138 return -ENOMEM;
1139
1140 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
1141 bus->priv = priv;
1142 bus->name = "nixge_mii_bus";
1143 bus->read = nixge_mdio_read;
1144 bus->write = nixge_mdio_write;
1145 bus->parent = priv->dev;
1146
1147 priv->mii_bus = bus;
1148
1149 return of_mdiobus_register(bus, np);
1150 }
1151
nixge_get_nvmem_address(struct device * dev)1152 static void *nixge_get_nvmem_address(struct device *dev)
1153 {
1154 struct nvmem_cell *cell;
1155 size_t cell_size;
1156 char *mac;
1157
1158 cell = nvmem_cell_get(dev, "address");
1159 if (IS_ERR(cell))
1160 return NULL;
1161
1162 mac = nvmem_cell_read(cell, &cell_size);
1163 nvmem_cell_put(cell);
1164
1165 return mac;
1166 }
1167
nixge_probe(struct platform_device * pdev)1168 static int nixge_probe(struct platform_device *pdev)
1169 {
1170 struct nixge_priv *priv;
1171 struct net_device *ndev;
1172 struct resource *dmares;
1173 const u8 *mac_addr;
1174 int err;
1175
1176 ndev = alloc_etherdev(sizeof(*priv));
1177 if (!ndev)
1178 return -ENOMEM;
1179
1180 platform_set_drvdata(pdev, ndev);
1181 SET_NETDEV_DEV(ndev, &pdev->dev);
1182
1183 ndev->features = NETIF_F_SG;
1184 ndev->netdev_ops = &nixge_netdev_ops;
1185 ndev->ethtool_ops = &nixge_ethtool_ops;
1186
1187 /* MTU range: 64 - 9000 */
1188 ndev->min_mtu = 64;
1189 ndev->max_mtu = NIXGE_JUMBO_MTU;
1190
1191 mac_addr = nixge_get_nvmem_address(&pdev->dev);
1192 if (mac_addr && is_valid_ether_addr(mac_addr)) {
1193 ether_addr_copy(ndev->dev_addr, mac_addr);
1194 kfree(mac_addr);
1195 } else {
1196 eth_hw_addr_random(ndev);
1197 }
1198
1199 priv = netdev_priv(ndev);
1200 priv->ndev = ndev;
1201 priv->dev = &pdev->dev;
1202
1203 netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
1204
1205 dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1206 priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
1207 if (IS_ERR(priv->dma_regs)) {
1208 netdev_err(ndev, "failed to map dma regs\n");
1209 return PTR_ERR(priv->dma_regs);
1210 }
1211 priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET;
1212 __nixge_hw_set_mac_address(ndev);
1213
1214 priv->tx_irq = platform_get_irq_byname(pdev, "tx");
1215 if (priv->tx_irq < 0) {
1216 netdev_err(ndev, "could not find 'tx' irq");
1217 return priv->tx_irq;
1218 }
1219
1220 priv->rx_irq = platform_get_irq_byname(pdev, "rx");
1221 if (priv->rx_irq < 0) {
1222 netdev_err(ndev, "could not find 'rx' irq");
1223 return priv->rx_irq;
1224 }
1225
1226 priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1227 priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
1228
1229 err = nixge_mdio_setup(priv, pdev->dev.of_node);
1230 if (err) {
1231 netdev_err(ndev, "error registering mdio bus");
1232 goto free_netdev;
1233 }
1234
1235 priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
1236 if ((int)priv->phy_mode < 0) {
1237 netdev_err(ndev, "not find \"phy-mode\" property\n");
1238 err = -EINVAL;
1239 goto unregister_mdio;
1240 }
1241
1242 priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1243 if (!priv->phy_node) {
1244 netdev_err(ndev, "not find \"phy-handle\" property\n");
1245 err = -EINVAL;
1246 goto unregister_mdio;
1247 }
1248
1249 err = register_netdev(priv->ndev);
1250 if (err) {
1251 netdev_err(ndev, "register_netdev() error (%i)\n", err);
1252 goto unregister_mdio;
1253 }
1254
1255 return 0;
1256
1257 unregister_mdio:
1258 mdiobus_unregister(priv->mii_bus);
1259
1260 free_netdev:
1261 free_netdev(ndev);
1262
1263 return err;
1264 }
1265
nixge_remove(struct platform_device * pdev)1266 static int nixge_remove(struct platform_device *pdev)
1267 {
1268 struct net_device *ndev = platform_get_drvdata(pdev);
1269 struct nixge_priv *priv = netdev_priv(ndev);
1270
1271 unregister_netdev(ndev);
1272
1273 mdiobus_unregister(priv->mii_bus);
1274
1275 free_netdev(ndev);
1276
1277 return 0;
1278 }
1279
1280 /* Match table for of_platform binding */
1281 static const struct of_device_id nixge_dt_ids[] = {
1282 { .compatible = "ni,xge-enet-2.00", },
1283 {},
1284 };
1285 MODULE_DEVICE_TABLE(of, nixge_dt_ids);
1286
1287 static struct platform_driver nixge_driver = {
1288 .probe = nixge_probe,
1289 .remove = nixge_remove,
1290 .driver = {
1291 .name = "nixge",
1292 .of_match_table = of_match_ptr(nixge_dt_ids),
1293 },
1294 };
1295 module_platform_driver(nixge_driver);
1296
1297 MODULE_LICENSE("GPL v2");
1298 MODULE_DESCRIPTION("National Instruments XGE Management MAC");
1299 MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>");
1300