• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Atmel MACB Ethernet Controller driver
3  *
4  * Copyright (C) 2004-2006 Atmel Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/platform_device.h>
22 #include <linux/phy.h>
23 
24 #include <mach/board.h>
25 #include <mach/cpu.h>
26 
27 #include "macb.h"
28 
29 #define RX_BUFFER_SIZE		128
30 #define RX_RING_SIZE		512
31 #define RX_RING_BYTES		(sizeof(struct dma_desc) * RX_RING_SIZE)
32 
33 /* Make the IP header word-aligned (the ethernet header is 14 bytes) */
34 #define RX_OFFSET		2
35 
36 #define TX_RING_SIZE		128
37 #define DEF_TX_RING_PENDING	(TX_RING_SIZE - 1)
38 #define TX_RING_BYTES		(sizeof(struct dma_desc) * TX_RING_SIZE)
39 
40 #define TX_RING_GAP(bp)						\
41 	(TX_RING_SIZE - (bp)->tx_pending)
42 #define TX_BUFFS_AVAIL(bp)					\
43 	(((bp)->tx_tail <= (bp)->tx_head) ?			\
44 	 (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head :	\
45 	 (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
46 #define NEXT_TX(n)		(((n) + 1) & (TX_RING_SIZE - 1))
47 
48 #define NEXT_RX(n)		(((n) + 1) & (RX_RING_SIZE - 1))
49 
50 /* minimum number of free TX descriptors before waking up TX process */
51 #define MACB_TX_WAKEUP_THRESH	(TX_RING_SIZE / 4)
52 
53 #define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
54 				 | MACB_BIT(ISR_ROVR))
55 
__macb_set_hwaddr(struct macb * bp)56 static void __macb_set_hwaddr(struct macb *bp)
57 {
58 	u32 bottom;
59 	u16 top;
60 
61 	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
62 	macb_writel(bp, SA1B, bottom);
63 	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
64 	macb_writel(bp, SA1T, top);
65 }
66 
macb_get_hwaddr(struct macb * bp)67 static void __init macb_get_hwaddr(struct macb *bp)
68 {
69 	u32 bottom;
70 	u16 top;
71 	u8 addr[6];
72 
73 	bottom = macb_readl(bp, SA1B);
74 	top = macb_readl(bp, SA1T);
75 
76 	addr[0] = bottom & 0xff;
77 	addr[1] = (bottom >> 8) & 0xff;
78 	addr[2] = (bottom >> 16) & 0xff;
79 	addr[3] = (bottom >> 24) & 0xff;
80 	addr[4] = top & 0xff;
81 	addr[5] = (top >> 8) & 0xff;
82 
83 	if (is_valid_ether_addr(addr)) {
84 		memcpy(bp->dev->dev_addr, addr, sizeof(addr));
85 	} else {
86 		dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
87 		random_ether_addr(bp->dev->dev_addr);
88 	}
89 }
90 
macb_mdio_read(struct mii_bus * bus,int mii_id,int regnum)91 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
92 {
93 	struct macb *bp = bus->priv;
94 	int value;
95 
96 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
97 			      | MACB_BF(RW, MACB_MAN_READ)
98 			      | MACB_BF(PHYA, mii_id)
99 			      | MACB_BF(REGA, regnum)
100 			      | MACB_BF(CODE, MACB_MAN_CODE)));
101 
102 	/* wait for end of transfer */
103 	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
104 		cpu_relax();
105 
106 	value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
107 
108 	return value;
109 }
110 
macb_mdio_write(struct mii_bus * bus,int mii_id,int regnum,u16 value)111 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
112 			   u16 value)
113 {
114 	struct macb *bp = bus->priv;
115 
116 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
117 			      | MACB_BF(RW, MACB_MAN_WRITE)
118 			      | MACB_BF(PHYA, mii_id)
119 			      | MACB_BF(REGA, regnum)
120 			      | MACB_BF(CODE, MACB_MAN_CODE)
121 			      | MACB_BF(DATA, value)));
122 
123 	/* wait for end of transfer */
124 	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
125 		cpu_relax();
126 
127 	return 0;
128 }
129 
macb_mdio_reset(struct mii_bus * bus)130 static int macb_mdio_reset(struct mii_bus *bus)
131 {
132 	return 0;
133 }
134 
macb_handle_link_change(struct net_device * dev)135 static void macb_handle_link_change(struct net_device *dev)
136 {
137 	struct macb *bp = netdev_priv(dev);
138 	struct phy_device *phydev = bp->phy_dev;
139 	unsigned long flags;
140 
141 	int status_change = 0;
142 
143 	spin_lock_irqsave(&bp->lock, flags);
144 
145 	if (phydev->link) {
146 		if ((bp->speed != phydev->speed) ||
147 		    (bp->duplex != phydev->duplex)) {
148 			u32 reg;
149 
150 			reg = macb_readl(bp, NCFGR);
151 			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
152 
153 			if (phydev->duplex)
154 				reg |= MACB_BIT(FD);
155 			if (phydev->speed == SPEED_100)
156 				reg |= MACB_BIT(SPD);
157 
158 			macb_writel(bp, NCFGR, reg);
159 
160 			bp->speed = phydev->speed;
161 			bp->duplex = phydev->duplex;
162 			status_change = 1;
163 		}
164 	}
165 
166 	if (phydev->link != bp->link) {
167 		if (!phydev->link) {
168 			bp->speed = 0;
169 			bp->duplex = -1;
170 		}
171 		bp->link = phydev->link;
172 
173 		status_change = 1;
174 	}
175 
176 	spin_unlock_irqrestore(&bp->lock, flags);
177 
178 	if (status_change) {
179 		if (phydev->link)
180 			printk(KERN_INFO "%s: link up (%d/%s)\n",
181 			       dev->name, phydev->speed,
182 			       DUPLEX_FULL == phydev->duplex ? "Full":"Half");
183 		else
184 			printk(KERN_INFO "%s: link down\n", dev->name);
185 	}
186 }
187 
188 /* based on au1000_eth. c*/
macb_mii_probe(struct net_device * dev)189 static int macb_mii_probe(struct net_device *dev)
190 {
191 	struct macb *bp = netdev_priv(dev);
192 	struct phy_device *phydev = NULL;
193 	struct eth_platform_data *pdata;
194 	int phy_addr;
195 
196 	/* find the first phy */
197 	for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
198 		if (bp->mii_bus->phy_map[phy_addr]) {
199 			phydev = bp->mii_bus->phy_map[phy_addr];
200 			break;
201 		}
202 	}
203 
204 	if (!phydev) {
205 		printk (KERN_ERR "%s: no PHY found\n", dev->name);
206 		return -1;
207 	}
208 
209 	pdata = bp->pdev->dev.platform_data;
210 	/* TODO : add pin_irq */
211 
212 	/* attach the mac to the phy */
213 	if (pdata && pdata->is_rmii) {
214 		phydev = phy_connect(dev, phydev->dev.bus_id,
215 			&macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII);
216 	} else {
217 		phydev = phy_connect(dev, phydev->dev.bus_id,
218 			&macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII);
219 	}
220 
221 	if (IS_ERR(phydev)) {
222 		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
223 		return PTR_ERR(phydev);
224 	}
225 
226 	/* mask with MAC supported features */
227 	phydev->supported &= PHY_BASIC_FEATURES;
228 
229 	phydev->advertising = phydev->supported;
230 
231 	bp->link = 0;
232 	bp->speed = 0;
233 	bp->duplex = -1;
234 	bp->phy_dev = phydev;
235 
236 	return 0;
237 }
238 
macb_mii_init(struct macb * bp)239 static int macb_mii_init(struct macb *bp)
240 {
241 	struct eth_platform_data *pdata;
242 	int err = -ENXIO, i;
243 
244 	/* Enable managment port */
245 	macb_writel(bp, NCR, MACB_BIT(MPE));
246 
247 	bp->mii_bus = mdiobus_alloc();
248 	if (bp->mii_bus == NULL) {
249 		err = -ENOMEM;
250 		goto err_out;
251 	}
252 
253 	bp->mii_bus->name = "MACB_mii_bus";
254 	bp->mii_bus->read = &macb_mdio_read;
255 	bp->mii_bus->write = &macb_mdio_write;
256 	bp->mii_bus->reset = &macb_mdio_reset;
257 	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", bp->pdev->id);
258 	bp->mii_bus->priv = bp;
259 	bp->mii_bus->parent = &bp->dev->dev;
260 	pdata = bp->pdev->dev.platform_data;
261 
262 	if (pdata)
263 		bp->mii_bus->phy_mask = pdata->phy_mask;
264 
265 	bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
266 	if (!bp->mii_bus->irq) {
267 		err = -ENOMEM;
268 		goto err_out_free_mdiobus;
269 	}
270 
271 	for (i = 0; i < PHY_MAX_ADDR; i++)
272 		bp->mii_bus->irq[i] = PHY_POLL;
273 
274 	platform_set_drvdata(bp->dev, bp->mii_bus);
275 
276 	if (mdiobus_register(bp->mii_bus))
277 		goto err_out_free_mdio_irq;
278 
279 	if (macb_mii_probe(bp->dev) != 0) {
280 		goto err_out_unregister_bus;
281 	}
282 
283 	return 0;
284 
285 err_out_unregister_bus:
286 	mdiobus_unregister(bp->mii_bus);
287 err_out_free_mdio_irq:
288 	kfree(bp->mii_bus->irq);
289 err_out_free_mdiobus:
290 	mdiobus_free(bp->mii_bus);
291 err_out:
292 	return err;
293 }
294 
macb_update_stats(struct macb * bp)295 static void macb_update_stats(struct macb *bp)
296 {
297 	u32 __iomem *reg = bp->regs + MACB_PFR;
298 	u32 *p = &bp->hw_stats.rx_pause_frames;
299 	u32 *end = &bp->hw_stats.tx_pause_frames + 1;
300 
301 	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
302 
303 	for(; p < end; p++, reg++)
304 		*p += __raw_readl(reg);
305 }
306 
macb_tx(struct macb * bp)307 static void macb_tx(struct macb *bp)
308 {
309 	unsigned int tail;
310 	unsigned int head;
311 	u32 status;
312 
313 	status = macb_readl(bp, TSR);
314 	macb_writel(bp, TSR, status);
315 
316 	dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n",
317 		(unsigned long)status);
318 
319 	if (status & MACB_BIT(UND)) {
320 		int i;
321 		printk(KERN_ERR "%s: TX underrun, resetting buffers\n",
322 			bp->dev->name);
323 
324 		/* Transfer ongoing, disable transmitter, to avoid confusion */
325 		if (status & MACB_BIT(TGO))
326 			macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
327 
328 		head = bp->tx_head;
329 
330 		/*Mark all the buffer as used to avoid sending a lost buffer*/
331 		for (i = 0; i < TX_RING_SIZE; i++)
332 			bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
333 
334 		/* free transmit buffer in upper layer*/
335 		for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
336 			struct ring_info *rp = &bp->tx_skb[tail];
337 			struct sk_buff *skb = rp->skb;
338 
339 			BUG_ON(skb == NULL);
340 
341 			rmb();
342 
343 			dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
344 							 DMA_TO_DEVICE);
345 			rp->skb = NULL;
346 			dev_kfree_skb_irq(skb);
347 		}
348 
349 		bp->tx_head = bp->tx_tail = 0;
350 
351 		/* Enable the transmitter again */
352 		if (status & MACB_BIT(TGO))
353 			macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
354 	}
355 
356 	if (!(status & MACB_BIT(COMP)))
357 		/*
358 		 * This may happen when a buffer becomes complete
359 		 * between reading the ISR and scanning the
360 		 * descriptors.  Nothing to worry about.
361 		 */
362 		return;
363 
364 	head = bp->tx_head;
365 	for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
366 		struct ring_info *rp = &bp->tx_skb[tail];
367 		struct sk_buff *skb = rp->skb;
368 		u32 bufstat;
369 
370 		BUG_ON(skb == NULL);
371 
372 		rmb();
373 		bufstat = bp->tx_ring[tail].ctrl;
374 
375 		if (!(bufstat & MACB_BIT(TX_USED)))
376 			break;
377 
378 		dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n",
379 			tail, skb->data);
380 		dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
381 				 DMA_TO_DEVICE);
382 		bp->stats.tx_packets++;
383 		bp->stats.tx_bytes += skb->len;
384 		rp->skb = NULL;
385 		dev_kfree_skb_irq(skb);
386 	}
387 
388 	bp->tx_tail = tail;
389 	if (netif_queue_stopped(bp->dev) &&
390 	    TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH)
391 		netif_wake_queue(bp->dev);
392 }
393 
macb_rx_frame(struct macb * bp,unsigned int first_frag,unsigned int last_frag)394 static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
395 			 unsigned int last_frag)
396 {
397 	unsigned int len;
398 	unsigned int frag;
399 	unsigned int offset = 0;
400 	struct sk_buff *skb;
401 
402 	len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
403 
404 	dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n",
405 		first_frag, last_frag, len);
406 
407 	skb = dev_alloc_skb(len + RX_OFFSET);
408 	if (!skb) {
409 		bp->stats.rx_dropped++;
410 		for (frag = first_frag; ; frag = NEXT_RX(frag)) {
411 			bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
412 			if (frag == last_frag)
413 				break;
414 		}
415 		wmb();
416 		return 1;
417 	}
418 
419 	skb_reserve(skb, RX_OFFSET);
420 	skb->ip_summed = CHECKSUM_NONE;
421 	skb_put(skb, len);
422 
423 	for (frag = first_frag; ; frag = NEXT_RX(frag)) {
424 		unsigned int frag_len = RX_BUFFER_SIZE;
425 
426 		if (offset + frag_len > len) {
427 			BUG_ON(frag != last_frag);
428 			frag_len = len - offset;
429 		}
430 		skb_copy_to_linear_data_offset(skb, offset,
431 					       (bp->rx_buffers +
432 					        (RX_BUFFER_SIZE * frag)),
433 					       frag_len);
434 		offset += RX_BUFFER_SIZE;
435 		bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
436 		wmb();
437 
438 		if (frag == last_frag)
439 			break;
440 	}
441 
442 	skb->protocol = eth_type_trans(skb, bp->dev);
443 
444 	bp->stats.rx_packets++;
445 	bp->stats.rx_bytes += len;
446 	dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n",
447 		skb->len, skb->csum);
448 	netif_receive_skb(skb);
449 
450 	return 0;
451 }
452 
453 /* Mark DMA descriptors from begin up to and not including end as unused */
discard_partial_frame(struct macb * bp,unsigned int begin,unsigned int end)454 static void discard_partial_frame(struct macb *bp, unsigned int begin,
455 				  unsigned int end)
456 {
457 	unsigned int frag;
458 
459 	for (frag = begin; frag != end; frag = NEXT_RX(frag))
460 		bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
461 	wmb();
462 
463 	/*
464 	 * When this happens, the hardware stats registers for
465 	 * whatever caused this is updated, so we don't have to record
466 	 * anything.
467 	 */
468 }
469 
macb_rx(struct macb * bp,int budget)470 static int macb_rx(struct macb *bp, int budget)
471 {
472 	int received = 0;
473 	unsigned int tail = bp->rx_tail;
474 	int first_frag = -1;
475 
476 	for (; budget > 0; tail = NEXT_RX(tail)) {
477 		u32 addr, ctrl;
478 
479 		rmb();
480 		addr = bp->rx_ring[tail].addr;
481 		ctrl = bp->rx_ring[tail].ctrl;
482 
483 		if (!(addr & MACB_BIT(RX_USED)))
484 			break;
485 
486 		if (ctrl & MACB_BIT(RX_SOF)) {
487 			if (first_frag != -1)
488 				discard_partial_frame(bp, first_frag, tail);
489 			first_frag = tail;
490 		}
491 
492 		if (ctrl & MACB_BIT(RX_EOF)) {
493 			int dropped;
494 			BUG_ON(first_frag == -1);
495 
496 			dropped = macb_rx_frame(bp, first_frag, tail);
497 			first_frag = -1;
498 			if (!dropped) {
499 				received++;
500 				budget--;
501 			}
502 		}
503 	}
504 
505 	if (first_frag != -1)
506 		bp->rx_tail = first_frag;
507 	else
508 		bp->rx_tail = tail;
509 
510 	return received;
511 }
512 
macb_poll(struct napi_struct * napi,int budget)513 static int macb_poll(struct napi_struct *napi, int budget)
514 {
515 	struct macb *bp = container_of(napi, struct macb, napi);
516 	struct net_device *dev = bp->dev;
517 	int work_done;
518 	u32 status;
519 
520 	status = macb_readl(bp, RSR);
521 	macb_writel(bp, RSR, status);
522 
523 	work_done = 0;
524 	if (!status) {
525 		/*
526 		 * This may happen if an interrupt was pending before
527 		 * this function was called last time, and no packets
528 		 * have been received since.
529 		 */
530 		netif_rx_complete(napi);
531 		goto out;
532 	}
533 
534 	dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n",
535 		(unsigned long)status, budget);
536 
537 	if (!(status & MACB_BIT(REC))) {
538 		dev_warn(&bp->pdev->dev,
539 			 "No RX buffers complete, status = %02lx\n",
540 			 (unsigned long)status);
541 		netif_rx_complete(napi);
542 		goto out;
543 	}
544 
545 	work_done = macb_rx(bp, budget);
546 	if (work_done < budget)
547 		netif_rx_complete(napi);
548 
549 	/*
550 	 * We've done what we can to clean the buffers. Make sure we
551 	 * get notified when new packets arrive.
552 	 */
553 out:
554 	macb_writel(bp, IER, MACB_RX_INT_FLAGS);
555 
556 	/* TODO: Handle errors */
557 
558 	return work_done;
559 }
560 
macb_interrupt(int irq,void * dev_id)561 static irqreturn_t macb_interrupt(int irq, void *dev_id)
562 {
563 	struct net_device *dev = dev_id;
564 	struct macb *bp = netdev_priv(dev);
565 	u32 status;
566 
567 	status = macb_readl(bp, ISR);
568 
569 	if (unlikely(!status))
570 		return IRQ_NONE;
571 
572 	spin_lock(&bp->lock);
573 
574 	while (status) {
575 		/* close possible race with dev_close */
576 		if (unlikely(!netif_running(dev))) {
577 			macb_writel(bp, IDR, ~0UL);
578 			break;
579 		}
580 
581 		if (status & MACB_RX_INT_FLAGS) {
582 			if (netif_rx_schedule_prep(&bp->napi)) {
583 				/*
584 				 * There's no point taking any more interrupts
585 				 * until we have processed the buffers
586 				 */
587 				macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
588 				dev_dbg(&bp->pdev->dev,
589 					"scheduling RX softirq\n");
590 				__netif_rx_schedule(&bp->napi);
591 			}
592 		}
593 
594 		if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND)))
595 			macb_tx(bp);
596 
597 		/*
598 		 * Link change detection isn't possible with RMII, so we'll
599 		 * add that if/when we get our hands on a full-blown MII PHY.
600 		 */
601 
602 		if (status & MACB_BIT(HRESP)) {
603 			/*
604 			 * TODO: Reset the hardware, and maybe move the printk
605 			 * to a lower-priority context as well (work queue?)
606 			 */
607 			printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n",
608 			       dev->name);
609 		}
610 
611 		status = macb_readl(bp, ISR);
612 	}
613 
614 	spin_unlock(&bp->lock);
615 
616 	return IRQ_HANDLED;
617 }
618 
macb_start_xmit(struct sk_buff * skb,struct net_device * dev)619 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
620 {
621 	struct macb *bp = netdev_priv(dev);
622 	dma_addr_t mapping;
623 	unsigned int len, entry;
624 	u32 ctrl;
625 
626 #ifdef DEBUG
627 	int i;
628 	dev_dbg(&bp->pdev->dev,
629 		"start_xmit: len %u head %p data %p tail %p end %p\n",
630 		skb->len, skb->head, skb->data,
631 		skb_tail_pointer(skb), skb_end_pointer(skb));
632 	dev_dbg(&bp->pdev->dev,
633 		"data:");
634 	for (i = 0; i < 16; i++)
635 		printk(" %02x", (unsigned int)skb->data[i]);
636 	printk("\n");
637 #endif
638 
639 	len = skb->len;
640 	spin_lock_irq(&bp->lock);
641 
642 	/* This is a hard error, log it. */
643 	if (TX_BUFFS_AVAIL(bp) < 1) {
644 		netif_stop_queue(dev);
645 		spin_unlock_irq(&bp->lock);
646 		dev_err(&bp->pdev->dev,
647 			"BUG! Tx Ring full when queue awake!\n");
648 		dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
649 			bp->tx_head, bp->tx_tail);
650 		return 1;
651 	}
652 
653 	entry = bp->tx_head;
654 	dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry);
655 	mapping = dma_map_single(&bp->pdev->dev, skb->data,
656 				 len, DMA_TO_DEVICE);
657 	bp->tx_skb[entry].skb = skb;
658 	bp->tx_skb[entry].mapping = mapping;
659 	dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n",
660 		skb->data, (unsigned long)mapping);
661 
662 	ctrl = MACB_BF(TX_FRMLEN, len);
663 	ctrl |= MACB_BIT(TX_LAST);
664 	if (entry == (TX_RING_SIZE - 1))
665 		ctrl |= MACB_BIT(TX_WRAP);
666 
667 	bp->tx_ring[entry].addr = mapping;
668 	bp->tx_ring[entry].ctrl = ctrl;
669 	wmb();
670 
671 	entry = NEXT_TX(entry);
672 	bp->tx_head = entry;
673 
674 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
675 
676 	if (TX_BUFFS_AVAIL(bp) < 1)
677 		netif_stop_queue(dev);
678 
679 	spin_unlock_irq(&bp->lock);
680 
681 	dev->trans_start = jiffies;
682 
683 	return 0;
684 }
685 
macb_free_consistent(struct macb * bp)686 static void macb_free_consistent(struct macb *bp)
687 {
688 	if (bp->tx_skb) {
689 		kfree(bp->tx_skb);
690 		bp->tx_skb = NULL;
691 	}
692 	if (bp->rx_ring) {
693 		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
694 				  bp->rx_ring, bp->rx_ring_dma);
695 		bp->rx_ring = NULL;
696 	}
697 	if (bp->tx_ring) {
698 		dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
699 				  bp->tx_ring, bp->tx_ring_dma);
700 		bp->tx_ring = NULL;
701 	}
702 	if (bp->rx_buffers) {
703 		dma_free_coherent(&bp->pdev->dev,
704 				  RX_RING_SIZE * RX_BUFFER_SIZE,
705 				  bp->rx_buffers, bp->rx_buffers_dma);
706 		bp->rx_buffers = NULL;
707 	}
708 }
709 
macb_alloc_consistent(struct macb * bp)710 static int macb_alloc_consistent(struct macb *bp)
711 {
712 	int size;
713 
714 	size = TX_RING_SIZE * sizeof(struct ring_info);
715 	bp->tx_skb = kmalloc(size, GFP_KERNEL);
716 	if (!bp->tx_skb)
717 		goto out_err;
718 
719 	size = RX_RING_BYTES;
720 	bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
721 					 &bp->rx_ring_dma, GFP_KERNEL);
722 	if (!bp->rx_ring)
723 		goto out_err;
724 	dev_dbg(&bp->pdev->dev,
725 		"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
726 		size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
727 
728 	size = TX_RING_BYTES;
729 	bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
730 					 &bp->tx_ring_dma, GFP_KERNEL);
731 	if (!bp->tx_ring)
732 		goto out_err;
733 	dev_dbg(&bp->pdev->dev,
734 		"Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
735 		size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
736 
737 	size = RX_RING_SIZE * RX_BUFFER_SIZE;
738 	bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
739 					    &bp->rx_buffers_dma, GFP_KERNEL);
740 	if (!bp->rx_buffers)
741 		goto out_err;
742 	dev_dbg(&bp->pdev->dev,
743 		"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
744 		size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
745 
746 	return 0;
747 
748 out_err:
749 	macb_free_consistent(bp);
750 	return -ENOMEM;
751 }
752 
macb_init_rings(struct macb * bp)753 static void macb_init_rings(struct macb *bp)
754 {
755 	int i;
756 	dma_addr_t addr;
757 
758 	addr = bp->rx_buffers_dma;
759 	for (i = 0; i < RX_RING_SIZE; i++) {
760 		bp->rx_ring[i].addr = addr;
761 		bp->rx_ring[i].ctrl = 0;
762 		addr += RX_BUFFER_SIZE;
763 	}
764 	bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
765 
766 	for (i = 0; i < TX_RING_SIZE; i++) {
767 		bp->tx_ring[i].addr = 0;
768 		bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
769 	}
770 	bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
771 
772 	bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
773 }
774 
macb_reset_hw(struct macb * bp)775 static void macb_reset_hw(struct macb *bp)
776 {
777 	/* Make sure we have the write buffer for ourselves */
778 	wmb();
779 
780 	/*
781 	 * Disable RX and TX (XXX: Should we halt the transmission
782 	 * more gracefully?)
783 	 */
784 	macb_writel(bp, NCR, 0);
785 
786 	/* Clear the stats registers (XXX: Update stats first?) */
787 	macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
788 
789 	/* Clear all status flags */
790 	macb_writel(bp, TSR, ~0UL);
791 	macb_writel(bp, RSR, ~0UL);
792 
793 	/* Disable all interrupts */
794 	macb_writel(bp, IDR, ~0UL);
795 	macb_readl(bp, ISR);
796 }
797 
macb_init_hw(struct macb * bp)798 static void macb_init_hw(struct macb *bp)
799 {
800 	u32 config;
801 
802 	macb_reset_hw(bp);
803 	__macb_set_hwaddr(bp);
804 
805 	config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
806 	config |= MACB_BIT(PAE);		/* PAuse Enable */
807 	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
808 	if (bp->dev->flags & IFF_PROMISC)
809 		config |= MACB_BIT(CAF);	/* Copy All Frames */
810 	if (!(bp->dev->flags & IFF_BROADCAST))
811 		config |= MACB_BIT(NBC);	/* No BroadCast */
812 	macb_writel(bp, NCFGR, config);
813 
814 	/* Initialize TX and RX buffers */
815 	macb_writel(bp, RBQP, bp->rx_ring_dma);
816 	macb_writel(bp, TBQP, bp->tx_ring_dma);
817 
818 	/* Enable TX and RX */
819 	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
820 
821 	/* Enable interrupts */
822 	macb_writel(bp, IER, (MACB_BIT(RCOMP)
823 			      | MACB_BIT(RXUBR)
824 			      | MACB_BIT(ISR_TUND)
825 			      | MACB_BIT(ISR_RLE)
826 			      | MACB_BIT(TXERR)
827 			      | MACB_BIT(TCOMP)
828 			      | MACB_BIT(ISR_ROVR)
829 			      | MACB_BIT(HRESP)));
830 
831 }
832 
833 /*
834  * The hash address register is 64 bits long and takes up two
835  * locations in the memory map.  The least significant bits are stored
836  * in EMAC_HSL and the most significant bits in EMAC_HSH.
837  *
838  * The unicast hash enable and the multicast hash enable bits in the
839  * network configuration register enable the reception of hash matched
840  * frames. The destination address is reduced to a 6 bit index into
841  * the 64 bit hash register using the following hash function.  The
842  * hash function is an exclusive or of every sixth bit of the
843  * destination address.
844  *
845  * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
846  * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
847  * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
848  * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
849  * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
850  * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
851  *
852  * da[0] represents the least significant bit of the first byte
853  * received, that is, the multicast/unicast indicator, and da[47]
854  * represents the most significant bit of the last byte received.  If
855  * the hash index, hi[n], points to a bit that is set in the hash
856  * register then the frame will be matched according to whether the
857  * frame is multicast or unicast.  A multicast match will be signalled
858  * if the multicast hash enable bit is set, da[0] is 1 and the hash
859  * index points to a bit set in the hash register.  A unicast match
860  * will be signalled if the unicast hash enable bit is set, da[0] is 0
861  * and the hash index points to a bit set in the hash register.  To
862  * receive all multicast frames, the hash register should be set with
863  * all ones and the multicast hash enable bit should be set in the
864  * network configuration register.
865  */
866 
hash_bit_value(int bitnr,__u8 * addr)867 static inline int hash_bit_value(int bitnr, __u8 *addr)
868 {
869 	if (addr[bitnr / 8] & (1 << (bitnr % 8)))
870 		return 1;
871 	return 0;
872 }
873 
874 /*
875  * Return the hash index value for the specified address.
876  */
hash_get_index(__u8 * addr)877 static int hash_get_index(__u8 *addr)
878 {
879 	int i, j, bitval;
880 	int hash_index = 0;
881 
882 	for (j = 0; j < 6; j++) {
883 		for (i = 0, bitval = 0; i < 8; i++)
884 			bitval ^= hash_bit_value(i*6 + j, addr);
885 
886 		hash_index |= (bitval << j);
887 	}
888 
889 	return hash_index;
890 }
891 
892 /*
893  * Add multicast addresses to the internal multicast-hash table.
894  */
macb_sethashtable(struct net_device * dev)895 static void macb_sethashtable(struct net_device *dev)
896 {
897 	struct dev_mc_list *curr;
898 	unsigned long mc_filter[2];
899 	unsigned int i, bitnr;
900 	struct macb *bp = netdev_priv(dev);
901 
902 	mc_filter[0] = mc_filter[1] = 0;
903 
904 	curr = dev->mc_list;
905 	for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
906 		if (!curr) break;	/* unexpected end of list */
907 
908 		bitnr = hash_get_index(curr->dmi_addr);
909 		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
910 	}
911 
912 	macb_writel(bp, HRB, mc_filter[0]);
913 	macb_writel(bp, HRT, mc_filter[1]);
914 }
915 
916 /*
917  * Enable/Disable promiscuous and multicast modes.
918  */
macb_set_rx_mode(struct net_device * dev)919 static void macb_set_rx_mode(struct net_device *dev)
920 {
921 	unsigned long cfg;
922 	struct macb *bp = netdev_priv(dev);
923 
924 	cfg = macb_readl(bp, NCFGR);
925 
926 	if (dev->flags & IFF_PROMISC)
927 		/* Enable promiscuous mode */
928 		cfg |= MACB_BIT(CAF);
929 	else if (dev->flags & (~IFF_PROMISC))
930 		 /* Disable promiscuous mode */
931 		cfg &= ~MACB_BIT(CAF);
932 
933 	if (dev->flags & IFF_ALLMULTI) {
934 		/* Enable all multicast mode */
935 		macb_writel(bp, HRB, -1);
936 		macb_writel(bp, HRT, -1);
937 		cfg |= MACB_BIT(NCFGR_MTI);
938 	} else if (dev->mc_count > 0) {
939 		/* Enable specific multicasts */
940 		macb_sethashtable(dev);
941 		cfg |= MACB_BIT(NCFGR_MTI);
942 	} else if (dev->flags & (~IFF_ALLMULTI)) {
943 		/* Disable all multicast mode */
944 		macb_writel(bp, HRB, 0);
945 		macb_writel(bp, HRT, 0);
946 		cfg &= ~MACB_BIT(NCFGR_MTI);
947 	}
948 
949 	macb_writel(bp, NCFGR, cfg);
950 }
951 
macb_open(struct net_device * dev)952 static int macb_open(struct net_device *dev)
953 {
954 	struct macb *bp = netdev_priv(dev);
955 	int err;
956 
957 	dev_dbg(&bp->pdev->dev, "open\n");
958 
959 	/* if the phy is not yet register, retry later*/
960 	if (!bp->phy_dev)
961 		return -EAGAIN;
962 
963 	if (!is_valid_ether_addr(dev->dev_addr))
964 		return -EADDRNOTAVAIL;
965 
966 	err = macb_alloc_consistent(bp);
967 	if (err) {
968 		printk(KERN_ERR
969 		       "%s: Unable to allocate DMA memory (error %d)\n",
970 		       dev->name, err);
971 		return err;
972 	}
973 
974 	napi_enable(&bp->napi);
975 
976 	macb_init_rings(bp);
977 	macb_init_hw(bp);
978 
979 	/* schedule a link state check */
980 	phy_start(bp->phy_dev);
981 
982 	netif_start_queue(dev);
983 
984 	return 0;
985 }
986 
macb_close(struct net_device * dev)987 static int macb_close(struct net_device *dev)
988 {
989 	struct macb *bp = netdev_priv(dev);
990 	unsigned long flags;
991 
992 	netif_stop_queue(dev);
993 	napi_disable(&bp->napi);
994 
995 	if (bp->phy_dev)
996 		phy_stop(bp->phy_dev);
997 
998 	spin_lock_irqsave(&bp->lock, flags);
999 	macb_reset_hw(bp);
1000 	netif_carrier_off(dev);
1001 	spin_unlock_irqrestore(&bp->lock, flags);
1002 
1003 	macb_free_consistent(bp);
1004 
1005 	return 0;
1006 }
1007 
macb_get_stats(struct net_device * dev)1008 static struct net_device_stats *macb_get_stats(struct net_device *dev)
1009 {
1010 	struct macb *bp = netdev_priv(dev);
1011 	struct net_device_stats *nstat = &bp->stats;
1012 	struct macb_stats *hwstat = &bp->hw_stats;
1013 
1014 	/* read stats from hardware */
1015 	macb_update_stats(bp);
1016 
1017 	/* Convert HW stats into netdevice stats */
1018 	nstat->rx_errors = (hwstat->rx_fcs_errors +
1019 			    hwstat->rx_align_errors +
1020 			    hwstat->rx_resource_errors +
1021 			    hwstat->rx_overruns +
1022 			    hwstat->rx_oversize_pkts +
1023 			    hwstat->rx_jabbers +
1024 			    hwstat->rx_undersize_pkts +
1025 			    hwstat->sqe_test_errors +
1026 			    hwstat->rx_length_mismatch);
1027 	nstat->tx_errors = (hwstat->tx_late_cols +
1028 			    hwstat->tx_excessive_cols +
1029 			    hwstat->tx_underruns +
1030 			    hwstat->tx_carrier_errors);
1031 	nstat->collisions = (hwstat->tx_single_cols +
1032 			     hwstat->tx_multiple_cols +
1033 			     hwstat->tx_excessive_cols);
1034 	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1035 				   hwstat->rx_jabbers +
1036 				   hwstat->rx_undersize_pkts +
1037 				   hwstat->rx_length_mismatch);
1038 	nstat->rx_over_errors = hwstat->rx_resource_errors;
1039 	nstat->rx_crc_errors = hwstat->rx_fcs_errors;
1040 	nstat->rx_frame_errors = hwstat->rx_align_errors;
1041 	nstat->rx_fifo_errors = hwstat->rx_overruns;
1042 	/* XXX: What does "missed" mean? */
1043 	nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
1044 	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
1045 	nstat->tx_fifo_errors = hwstat->tx_underruns;
1046 	/* Don't know about heartbeat or window errors... */
1047 
1048 	return nstat;
1049 }
1050 
macb_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)1051 static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1052 {
1053 	struct macb *bp = netdev_priv(dev);
1054 	struct phy_device *phydev = bp->phy_dev;
1055 
1056 	if (!phydev)
1057 		return -ENODEV;
1058 
1059 	return phy_ethtool_gset(phydev, cmd);
1060 }
1061 
macb_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)1062 static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1063 {
1064 	struct macb *bp = netdev_priv(dev);
1065 	struct phy_device *phydev = bp->phy_dev;
1066 
1067 	if (!phydev)
1068 		return -ENODEV;
1069 
1070 	return phy_ethtool_sset(phydev, cmd);
1071 }
1072 
macb_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1073 static void macb_get_drvinfo(struct net_device *dev,
1074 			     struct ethtool_drvinfo *info)
1075 {
1076 	struct macb *bp = netdev_priv(dev);
1077 
1078 	strcpy(info->driver, bp->pdev->dev.driver->name);
1079 	strcpy(info->version, "$Revision: 1.14 $");
1080 	strcpy(info->bus_info, bp->pdev->dev.bus_id);
1081 }
1082 
1083 static struct ethtool_ops macb_ethtool_ops = {
1084 	.get_settings		= macb_get_settings,
1085 	.set_settings		= macb_set_settings,
1086 	.get_drvinfo		= macb_get_drvinfo,
1087 	.get_link		= ethtool_op_get_link,
1088 };
1089 
macb_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1090 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1091 {
1092 	struct macb *bp = netdev_priv(dev);
1093 	struct phy_device *phydev = bp->phy_dev;
1094 
1095 	if (!netif_running(dev))
1096 		return -EINVAL;
1097 
1098 	if (!phydev)
1099 		return -ENODEV;
1100 
1101 	return phy_mii_ioctl(phydev, if_mii(rq), cmd);
1102 }
1103 
macb_probe(struct platform_device * pdev)1104 static int __init macb_probe(struct platform_device *pdev)
1105 {
1106 	struct eth_platform_data *pdata;
1107 	struct resource *regs;
1108 	struct net_device *dev;
1109 	struct macb *bp;
1110 	struct phy_device *phydev;
1111 	unsigned long pclk_hz;
1112 	u32 config;
1113 	int err = -ENXIO;
1114 
1115 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1116 	if (!regs) {
1117 		dev_err(&pdev->dev, "no mmio resource defined\n");
1118 		goto err_out;
1119 	}
1120 
1121 	err = -ENOMEM;
1122 	dev = alloc_etherdev(sizeof(*bp));
1123 	if (!dev) {
1124 		dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
1125 		goto err_out;
1126 	}
1127 
1128 	SET_NETDEV_DEV(dev, &pdev->dev);
1129 
1130 	/* TODO: Actually, we have some interesting features... */
1131 	dev->features |= 0;
1132 
1133 	bp = netdev_priv(dev);
1134 	bp->pdev = pdev;
1135 	bp->dev = dev;
1136 
1137 	spin_lock_init(&bp->lock);
1138 
1139 #if defined(CONFIG_ARCH_AT91)
1140 	bp->pclk = clk_get(&pdev->dev, "macb_clk");
1141 	if (IS_ERR(bp->pclk)) {
1142 		dev_err(&pdev->dev, "failed to get macb_clk\n");
1143 		goto err_out_free_dev;
1144 	}
1145 	clk_enable(bp->pclk);
1146 #else
1147 	bp->pclk = clk_get(&pdev->dev, "pclk");
1148 	if (IS_ERR(bp->pclk)) {
1149 		dev_err(&pdev->dev, "failed to get pclk\n");
1150 		goto err_out_free_dev;
1151 	}
1152 	bp->hclk = clk_get(&pdev->dev, "hclk");
1153 	if (IS_ERR(bp->hclk)) {
1154 		dev_err(&pdev->dev, "failed to get hclk\n");
1155 		goto err_out_put_pclk;
1156 	}
1157 
1158 	clk_enable(bp->pclk);
1159 	clk_enable(bp->hclk);
1160 #endif
1161 
1162 	bp->regs = ioremap(regs->start, regs->end - regs->start + 1);
1163 	if (!bp->regs) {
1164 		dev_err(&pdev->dev, "failed to map registers, aborting.\n");
1165 		err = -ENOMEM;
1166 		goto err_out_disable_clocks;
1167 	}
1168 
1169 	dev->irq = platform_get_irq(pdev, 0);
1170 	err = request_irq(dev->irq, macb_interrupt, IRQF_SAMPLE_RANDOM,
1171 			  dev->name, dev);
1172 	if (err) {
1173 		printk(KERN_ERR
1174 		       "%s: Unable to request IRQ %d (error %d)\n",
1175 		       dev->name, dev->irq, err);
1176 		goto err_out_iounmap;
1177 	}
1178 
1179 	dev->open = macb_open;
1180 	dev->stop = macb_close;
1181 	dev->hard_start_xmit = macb_start_xmit;
1182 	dev->get_stats = macb_get_stats;
1183 	dev->set_multicast_list = macb_set_rx_mode;
1184 	dev->do_ioctl = macb_ioctl;
1185 	netif_napi_add(dev, &bp->napi, macb_poll, 64);
1186 	dev->ethtool_ops = &macb_ethtool_ops;
1187 
1188 	dev->base_addr = regs->start;
1189 
1190 	/* Set MII management clock divider */
1191 	pclk_hz = clk_get_rate(bp->pclk);
1192 	if (pclk_hz <= 20000000)
1193 		config = MACB_BF(CLK, MACB_CLK_DIV8);
1194 	else if (pclk_hz <= 40000000)
1195 		config = MACB_BF(CLK, MACB_CLK_DIV16);
1196 	else if (pclk_hz <= 80000000)
1197 		config = MACB_BF(CLK, MACB_CLK_DIV32);
1198 	else
1199 		config = MACB_BF(CLK, MACB_CLK_DIV64);
1200 	macb_writel(bp, NCFGR, config);
1201 
1202 	macb_get_hwaddr(bp);
1203 	pdata = pdev->dev.platform_data;
1204 
1205 	if (pdata && pdata->is_rmii)
1206 #if defined(CONFIG_ARCH_AT91)
1207 		macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) );
1208 #else
1209 		macb_writel(bp, USRIO, 0);
1210 #endif
1211 	else
1212 #if defined(CONFIG_ARCH_AT91)
1213 		macb_writel(bp, USRIO, MACB_BIT(CLKEN));
1214 #else
1215 		macb_writel(bp, USRIO, MACB_BIT(MII));
1216 #endif
1217 
1218 	bp->tx_pending = DEF_TX_RING_PENDING;
1219 
1220 	err = register_netdev(dev);
1221 	if (err) {
1222 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1223 		goto err_out_free_irq;
1224 	}
1225 
1226 	if (macb_mii_init(bp) != 0) {
1227 		goto err_out_unregister_netdev;
1228 	}
1229 
1230 	platform_set_drvdata(pdev, dev);
1231 
1232 	printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d (%pM)\n",
1233 	       dev->name, dev->base_addr, dev->irq, dev->dev_addr);
1234 
1235 	phydev = bp->phy_dev;
1236 	printk(KERN_INFO "%s: attached PHY driver [%s] "
1237 		"(mii_bus:phy_addr=%s, irq=%d)\n",
1238 		dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq);
1239 
1240 	return 0;
1241 
1242 err_out_unregister_netdev:
1243 	unregister_netdev(dev);
1244 err_out_free_irq:
1245 	free_irq(dev->irq, dev);
1246 err_out_iounmap:
1247 	iounmap(bp->regs);
1248 err_out_disable_clocks:
1249 #ifndef CONFIG_ARCH_AT91
1250 	clk_disable(bp->hclk);
1251 	clk_put(bp->hclk);
1252 #endif
1253 	clk_disable(bp->pclk);
1254 #ifndef CONFIG_ARCH_AT91
1255 err_out_put_pclk:
1256 #endif
1257 	clk_put(bp->pclk);
1258 err_out_free_dev:
1259 	free_netdev(dev);
1260 err_out:
1261 	platform_set_drvdata(pdev, NULL);
1262 	return err;
1263 }
1264 
macb_remove(struct platform_device * pdev)1265 static int __exit macb_remove(struct platform_device *pdev)
1266 {
1267 	struct net_device *dev;
1268 	struct macb *bp;
1269 
1270 	dev = platform_get_drvdata(pdev);
1271 
1272 	if (dev) {
1273 		bp = netdev_priv(dev);
1274 		if (bp->phy_dev)
1275 			phy_disconnect(bp->phy_dev);
1276 		mdiobus_unregister(bp->mii_bus);
1277 		kfree(bp->mii_bus->irq);
1278 		mdiobus_free(bp->mii_bus);
1279 		unregister_netdev(dev);
1280 		free_irq(dev->irq, dev);
1281 		iounmap(bp->regs);
1282 #ifndef CONFIG_ARCH_AT91
1283 		clk_disable(bp->hclk);
1284 		clk_put(bp->hclk);
1285 #endif
1286 		clk_disable(bp->pclk);
1287 		clk_put(bp->pclk);
1288 		free_netdev(dev);
1289 		platform_set_drvdata(pdev, NULL);
1290 	}
1291 
1292 	return 0;
1293 }
1294 
1295 #ifdef CONFIG_PM
macb_suspend(struct platform_device * pdev,pm_message_t state)1296 static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1297 {
1298 	struct net_device *netdev = platform_get_drvdata(pdev);
1299 	struct macb *bp = netdev_priv(netdev);
1300 
1301 	netif_device_detach(netdev);
1302 
1303 #ifndef CONFIG_ARCH_AT91
1304 	clk_disable(bp->hclk);
1305 #endif
1306 	clk_disable(bp->pclk);
1307 
1308 	return 0;
1309 }
1310 
macb_resume(struct platform_device * pdev)1311 static int macb_resume(struct platform_device *pdev)
1312 {
1313 	struct net_device *netdev = platform_get_drvdata(pdev);
1314 	struct macb *bp = netdev_priv(netdev);
1315 
1316 	clk_enable(bp->pclk);
1317 #ifndef CONFIG_ARCH_AT91
1318 	clk_enable(bp->hclk);
1319 #endif
1320 
1321 	netif_device_attach(netdev);
1322 
1323 	return 0;
1324 }
1325 #else
1326 #define macb_suspend	NULL
1327 #define macb_resume	NULL
1328 #endif
1329 
1330 static struct platform_driver macb_driver = {
1331 	.remove		= __exit_p(macb_remove),
1332 	.suspend	= macb_suspend,
1333 	.resume		= macb_resume,
1334 	.driver		= {
1335 		.name		= "macb",
1336 		.owner	= THIS_MODULE,
1337 	},
1338 };
1339 
macb_init(void)1340 static int __init macb_init(void)
1341 {
1342 	return platform_driver_probe(&macb_driver, macb_probe);
1343 }
1344 
macb_exit(void)1345 static void __exit macb_exit(void)
1346 {
1347 	platform_driver_unregister(&macb_driver);
1348 }
1349 
1350 module_init(macb_init);
1351 module_exit(macb_exit);
1352 
1353 MODULE_LICENSE("GPL");
1354 MODULE_DESCRIPTION("Atmel MACB Ethernet driver");
1355 MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
1356 MODULE_ALIAS("platform:macb");
1357