• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Blackfin On-Chip MAC Driver
3  *
4  * Copyright 2004-2007 Analog Devices Inc.
5  *
6  * Enter bugs at http://blackfin.uclinux.org/
7  *
8  * Licensed under the GPL-2 or later.
9  */
10 
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/delay.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/irq.h>
20 #include <linux/io.h>
21 #include <linux/ioport.h>
22 #include <linux/crc32.h>
23 #include <linux/device.h>
24 #include <linux/spinlock.h>
25 #include <linux/mii.h>
26 #include <linux/phy.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/ethtool.h>
30 #include <linux/skbuff.h>
31 #include <linux/platform_device.h>
32 
33 #include <asm/dma.h>
34 #include <linux/dma-mapping.h>
35 
36 #include <asm/blackfin.h>
37 #include <asm/cacheflush.h>
38 #include <asm/portmux.h>
39 
40 #include "bfin_mac.h"
41 
42 #define DRV_NAME	"bfin_mac"
43 #define DRV_VERSION	"1.1"
44 #define DRV_AUTHOR	"Bryan Wu, Luke Yang"
45 #define DRV_DESC	"Blackfin on-chip Ethernet MAC driver"
46 
47 MODULE_AUTHOR(DRV_AUTHOR);
48 MODULE_LICENSE("GPL");
49 MODULE_DESCRIPTION(DRV_DESC);
50 MODULE_ALIAS("platform:bfin_mac");
51 
52 #if defined(CONFIG_BFIN_MAC_USE_L1)
53 # define bfin_mac_alloc(dma_handle, size)  l1_data_sram_zalloc(size)
54 # define bfin_mac_free(dma_handle, ptr)    l1_data_sram_free(ptr)
55 #else
56 # define bfin_mac_alloc(dma_handle, size) \
57 	dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL)
58 # define bfin_mac_free(dma_handle, ptr) \
59 	dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle)
60 #endif
61 
62 #define PKT_BUF_SZ 1580
63 
64 #define MAX_TIMEOUT_CNT	500
65 
66 /* pointers to maintain transmit list */
67 static struct net_dma_desc_tx *tx_list_head;
68 static struct net_dma_desc_tx *tx_list_tail;
69 static struct net_dma_desc_rx *rx_list_head;
70 static struct net_dma_desc_rx *rx_list_tail;
71 static struct net_dma_desc_rx *current_rx_ptr;
72 static struct net_dma_desc_tx *current_tx_ptr;
73 static struct net_dma_desc_tx *tx_desc;
74 static struct net_dma_desc_rx *rx_desc;
75 
76 #if defined(CONFIG_BFIN_MAC_RMII)
77 static u16 pin_req[] = P_RMII0;
78 #else
79 static u16 pin_req[] = P_MII0;
80 #endif
81 
82 static void bfin_mac_disable(void);
83 static void bfin_mac_enable(void);
84 
desc_list_free(void)85 static void desc_list_free(void)
86 {
87 	struct net_dma_desc_rx *r;
88 	struct net_dma_desc_tx *t;
89 	int i;
90 #if !defined(CONFIG_BFIN_MAC_USE_L1)
91 	dma_addr_t dma_handle = 0;
92 #endif
93 
94 	if (tx_desc) {
95 		t = tx_list_head;
96 		for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
97 			if (t) {
98 				if (t->skb) {
99 					dev_kfree_skb(t->skb);
100 					t->skb = NULL;
101 				}
102 				t = t->next;
103 			}
104 		}
105 		bfin_mac_free(dma_handle, tx_desc);
106 	}
107 
108 	if (rx_desc) {
109 		r = rx_list_head;
110 		for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
111 			if (r) {
112 				if (r->skb) {
113 					dev_kfree_skb(r->skb);
114 					r->skb = NULL;
115 				}
116 				r = r->next;
117 			}
118 		}
119 		bfin_mac_free(dma_handle, rx_desc);
120 	}
121 }
122 
desc_list_init(void)123 static int desc_list_init(void)
124 {
125 	int i;
126 	struct sk_buff *new_skb;
127 #if !defined(CONFIG_BFIN_MAC_USE_L1)
128 	/*
129 	 * This dma_handle is useless in Blackfin dma_alloc_coherent().
130 	 * The real dma handler is the return value of dma_alloc_coherent().
131 	 */
132 	dma_addr_t dma_handle;
133 #endif
134 
135 	tx_desc = bfin_mac_alloc(&dma_handle,
136 				sizeof(struct net_dma_desc_tx) *
137 				CONFIG_BFIN_TX_DESC_NUM);
138 	if (tx_desc == NULL)
139 		goto init_error;
140 
141 	rx_desc = bfin_mac_alloc(&dma_handle,
142 				sizeof(struct net_dma_desc_rx) *
143 				CONFIG_BFIN_RX_DESC_NUM);
144 	if (rx_desc == NULL)
145 		goto init_error;
146 
147 	/* init tx_list */
148 	tx_list_head = tx_list_tail = tx_desc;
149 
150 	for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
151 		struct net_dma_desc_tx *t = tx_desc + i;
152 		struct dma_descriptor *a = &(t->desc_a);
153 		struct dma_descriptor *b = &(t->desc_b);
154 
155 		/*
156 		 * disable DMA
157 		 * read from memory WNR = 0
158 		 * wordsize is 32 bits
159 		 * 6 half words is desc size
160 		 * large desc flow
161 		 */
162 		a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
163 		a->start_addr = (unsigned long)t->packet;
164 		a->x_count = 0;
165 		a->next_dma_desc = b;
166 
167 		/*
168 		 * enabled DMA
169 		 * write to memory WNR = 1
170 		 * wordsize is 32 bits
171 		 * disable interrupt
172 		 * 6 half words is desc size
173 		 * large desc flow
174 		 */
175 		b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
176 		b->start_addr = (unsigned long)(&(t->status));
177 		b->x_count = 0;
178 
179 		t->skb = NULL;
180 		tx_list_tail->desc_b.next_dma_desc = a;
181 		tx_list_tail->next = t;
182 		tx_list_tail = t;
183 	}
184 	tx_list_tail->next = tx_list_head;	/* tx_list is a circle */
185 	tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
186 	current_tx_ptr = tx_list_head;
187 
188 	/* init rx_list */
189 	rx_list_head = rx_list_tail = rx_desc;
190 
191 	for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
192 		struct net_dma_desc_rx *r = rx_desc + i;
193 		struct dma_descriptor *a = &(r->desc_a);
194 		struct dma_descriptor *b = &(r->desc_b);
195 
196 		/* allocate a new skb for next time receive */
197 		new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
198 		if (!new_skb) {
199 			printk(KERN_NOTICE DRV_NAME
200 			       ": init: low on mem - packet dropped\n");
201 			goto init_error;
202 		}
203 		skb_reserve(new_skb, 2);
204 		r->skb = new_skb;
205 
206 		/*
207 		 * enabled DMA
208 		 * write to memory WNR = 1
209 		 * wordsize is 32 bits
210 		 * disable interrupt
211 		 * 6 half words is desc size
212 		 * large desc flow
213 		 */
214 		a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
215 		/* since RXDWA is enabled */
216 		a->start_addr = (unsigned long)new_skb->data - 2;
217 		a->x_count = 0;
218 		a->next_dma_desc = b;
219 
220 		/*
221 		 * enabled DMA
222 		 * write to memory WNR = 1
223 		 * wordsize is 32 bits
224 		 * enable interrupt
225 		 * 6 half words is desc size
226 		 * large desc flow
227 		 */
228 		b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
229 				NDSIZE_6 | DMAFLOW_LARGE;
230 		b->start_addr = (unsigned long)(&(r->status));
231 		b->x_count = 0;
232 
233 		rx_list_tail->desc_b.next_dma_desc = a;
234 		rx_list_tail->next = r;
235 		rx_list_tail = r;
236 	}
237 	rx_list_tail->next = rx_list_head;	/* rx_list is a circle */
238 	rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
239 	current_rx_ptr = rx_list_head;
240 
241 	return 0;
242 
243 init_error:
244 	desc_list_free();
245 	printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
246 	return -ENOMEM;
247 }
248 
249 
250 /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
251 
252 /*
253  * MII operations
254  */
255 /* Wait until the previous MDC/MDIO transaction has completed */
bfin_mdio_poll(void)256 static void bfin_mdio_poll(void)
257 {
258 	int timeout_cnt = MAX_TIMEOUT_CNT;
259 
260 	/* poll the STABUSY bit */
261 	while ((bfin_read_EMAC_STAADD()) & STABUSY) {
262 		udelay(1);
263 		if (timeout_cnt-- < 0) {
264 			printk(KERN_ERR DRV_NAME
265 			": wait MDC/MDIO transaction to complete timeout\n");
266 			break;
267 		}
268 	}
269 }
270 
271 /* Read an off-chip register in a PHY through the MDC/MDIO port */
bfin_mdiobus_read(struct mii_bus * bus,int phy_addr,int regnum)272 static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
273 {
274 	bfin_mdio_poll();
275 
276 	/* read mode */
277 	bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
278 				SET_REGAD((u16) regnum) |
279 				STABUSY);
280 
281 	bfin_mdio_poll();
282 
283 	return (int) bfin_read_EMAC_STADAT();
284 }
285 
286 /* Write an off-chip register in a PHY through the MDC/MDIO port */
bfin_mdiobus_write(struct mii_bus * bus,int phy_addr,int regnum,u16 value)287 static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
288 			      u16 value)
289 {
290 	bfin_mdio_poll();
291 
292 	bfin_write_EMAC_STADAT((u32) value);
293 
294 	/* write mode */
295 	bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
296 				SET_REGAD((u16) regnum) |
297 				STAOP |
298 				STABUSY);
299 
300 	bfin_mdio_poll();
301 
302 	return 0;
303 }
304 
bfin_mdiobus_reset(struct mii_bus * bus)305 static int bfin_mdiobus_reset(struct mii_bus *bus)
306 {
307 	return 0;
308 }
309 
bfin_mac_adjust_link(struct net_device * dev)310 static void bfin_mac_adjust_link(struct net_device *dev)
311 {
312 	struct bfin_mac_local *lp = netdev_priv(dev);
313 	struct phy_device *phydev = lp->phydev;
314 	unsigned long flags;
315 	int new_state = 0;
316 
317 	spin_lock_irqsave(&lp->lock, flags);
318 	if (phydev->link) {
319 		/* Now we make sure that we can be in full duplex mode.
320 		 * If not, we operate in half-duplex mode. */
321 		if (phydev->duplex != lp->old_duplex) {
322 			u32 opmode = bfin_read_EMAC_OPMODE();
323 			new_state = 1;
324 
325 			if (phydev->duplex)
326 				opmode |= FDMODE;
327 			else
328 				opmode &= ~(FDMODE);
329 
330 			bfin_write_EMAC_OPMODE(opmode);
331 			lp->old_duplex = phydev->duplex;
332 		}
333 
334 		if (phydev->speed != lp->old_speed) {
335 #if defined(CONFIG_BFIN_MAC_RMII)
336 			u32 opmode = bfin_read_EMAC_OPMODE();
337 			switch (phydev->speed) {
338 			case 10:
339 				opmode |= RMII_10;
340 				break;
341 			case 100:
342 				opmode &= ~(RMII_10);
343 				break;
344 			default:
345 				printk(KERN_WARNING
346 					"%s: Ack!  Speed (%d) is not 10/100!\n",
347 					DRV_NAME, phydev->speed);
348 				break;
349 			}
350 			bfin_write_EMAC_OPMODE(opmode);
351 #endif
352 
353 			new_state = 1;
354 			lp->old_speed = phydev->speed;
355 		}
356 
357 		if (!lp->old_link) {
358 			new_state = 1;
359 			lp->old_link = 1;
360 		}
361 	} else if (lp->old_link) {
362 		new_state = 1;
363 		lp->old_link = 0;
364 		lp->old_speed = 0;
365 		lp->old_duplex = -1;
366 	}
367 
368 	if (new_state) {
369 		u32 opmode = bfin_read_EMAC_OPMODE();
370 		phy_print_status(phydev);
371 		pr_debug("EMAC_OPMODE = 0x%08x\n", opmode);
372 	}
373 
374 	spin_unlock_irqrestore(&lp->lock, flags);
375 }
376 
377 /* MDC  = 2.5 MHz */
378 #define MDC_CLK 2500000
379 
mii_probe(struct net_device * dev)380 static int mii_probe(struct net_device *dev)
381 {
382 	struct bfin_mac_local *lp = netdev_priv(dev);
383 	struct phy_device *phydev = NULL;
384 	unsigned short sysctl;
385 	int i;
386 	u32 sclk, mdc_div;
387 
388 	/* Enable PHY output early */
389 	if (!(bfin_read_VR_CTL() & PHYCLKOE))
390 		bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
391 
392 	sclk = get_sclk();
393 	mdc_div = ((sclk / MDC_CLK) / 2) - 1;
394 
395 	sysctl = bfin_read_EMAC_SYSCTL();
396 	sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
397 	bfin_write_EMAC_SYSCTL(sysctl);
398 
399 	/* search for connect PHY device */
400 	for (i = 0; i < PHY_MAX_ADDR; i++) {
401 		struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i];
402 
403 		if (!tmp_phydev)
404 			continue; /* no PHY here... */
405 
406 		phydev = tmp_phydev;
407 		break; /* found it */
408 	}
409 
410 	/* now we are supposed to have a proper phydev, to attach to... */
411 	if (!phydev) {
412 		printk(KERN_INFO "%s: Don't found any phy device at all\n",
413 			dev->name);
414 		return -ENODEV;
415 	}
416 
417 #if defined(CONFIG_BFIN_MAC_RMII)
418 	phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0,
419 			PHY_INTERFACE_MODE_RMII);
420 #else
421 	phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0,
422 			PHY_INTERFACE_MODE_MII);
423 #endif
424 
425 	if (IS_ERR(phydev)) {
426 		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
427 		return PTR_ERR(phydev);
428 	}
429 
430 	/* mask with MAC supported features */
431 	phydev->supported &= (SUPPORTED_10baseT_Half
432 			      | SUPPORTED_10baseT_Full
433 			      | SUPPORTED_100baseT_Half
434 			      | SUPPORTED_100baseT_Full
435 			      | SUPPORTED_Autoneg
436 			      | SUPPORTED_Pause | SUPPORTED_Asym_Pause
437 			      | SUPPORTED_MII
438 			      | SUPPORTED_TP);
439 
440 	phydev->advertising = phydev->supported;
441 
442 	lp->old_link = 0;
443 	lp->old_speed = 0;
444 	lp->old_duplex = -1;
445 	lp->phydev = phydev;
446 
447 	printk(KERN_INFO "%s: attached PHY driver [%s] "
448 	       "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
449 	       "@sclk=%dMHz)\n",
450 	       DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq,
451 	       MDC_CLK, mdc_div, sclk/1000000);
452 
453 	return 0;
454 }
455 
456 /*
457  * Ethtool support
458  */
459 
460 static int
bfin_mac_ethtool_getsettings(struct net_device * dev,struct ethtool_cmd * cmd)461 bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
462 {
463 	struct bfin_mac_local *lp = netdev_priv(dev);
464 
465 	if (lp->phydev)
466 		return phy_ethtool_gset(lp->phydev, cmd);
467 
468 	return -EINVAL;
469 }
470 
471 static int
bfin_mac_ethtool_setsettings(struct net_device * dev,struct ethtool_cmd * cmd)472 bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
473 {
474 	struct bfin_mac_local *lp = netdev_priv(dev);
475 
476 	if (!capable(CAP_NET_ADMIN))
477 		return -EPERM;
478 
479 	if (lp->phydev)
480 		return phy_ethtool_sset(lp->phydev, cmd);
481 
482 	return -EINVAL;
483 }
484 
bfin_mac_ethtool_getdrvinfo(struct net_device * dev,struct ethtool_drvinfo * info)485 static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
486 					struct ethtool_drvinfo *info)
487 {
488 	strcpy(info->driver, DRV_NAME);
489 	strcpy(info->version, DRV_VERSION);
490 	strcpy(info->fw_version, "N/A");
491 	strcpy(info->bus_info, dev->dev.bus_id);
492 }
493 
494 static struct ethtool_ops bfin_mac_ethtool_ops = {
495 	.get_settings = bfin_mac_ethtool_getsettings,
496 	.set_settings = bfin_mac_ethtool_setsettings,
497 	.get_link = ethtool_op_get_link,
498 	.get_drvinfo = bfin_mac_ethtool_getdrvinfo,
499 };
500 
501 /**************************************************************************/
setup_system_regs(struct net_device * dev)502 void setup_system_regs(struct net_device *dev)
503 {
504 	unsigned short sysctl;
505 
506 	/*
507 	 * Odd word alignment for Receive Frame DMA word
508 	 * Configure checksum support and rcve frame word alignment
509 	 */
510 	sysctl = bfin_read_EMAC_SYSCTL();
511 #if defined(BFIN_MAC_CSUM_OFFLOAD)
512 	sysctl |= RXDWA | RXCKS;
513 #else
514 	sysctl |= RXDWA;
515 #endif
516 	bfin_write_EMAC_SYSCTL(sysctl);
517 
518 	bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
519 
520 	/* Initialize the TX DMA channel registers */
521 	bfin_write_DMA2_X_COUNT(0);
522 	bfin_write_DMA2_X_MODIFY(4);
523 	bfin_write_DMA2_Y_COUNT(0);
524 	bfin_write_DMA2_Y_MODIFY(0);
525 
526 	/* Initialize the RX DMA channel registers */
527 	bfin_write_DMA1_X_COUNT(0);
528 	bfin_write_DMA1_X_MODIFY(4);
529 	bfin_write_DMA1_Y_COUNT(0);
530 	bfin_write_DMA1_Y_MODIFY(0);
531 }
532 
setup_mac_addr(u8 * mac_addr)533 static void setup_mac_addr(u8 *mac_addr)
534 {
535 	u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
536 	u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);
537 
538 	/* this depends on a little-endian machine */
539 	bfin_write_EMAC_ADDRLO(addr_low);
540 	bfin_write_EMAC_ADDRHI(addr_hi);
541 }
542 
bfin_mac_set_mac_address(struct net_device * dev,void * p)543 static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
544 {
545 	struct sockaddr *addr = p;
546 	if (netif_running(dev))
547 		return -EBUSY;
548 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
549 	setup_mac_addr(dev->dev_addr);
550 	return 0;
551 }
552 
adjust_tx_list(void)553 static void adjust_tx_list(void)
554 {
555 	int timeout_cnt = MAX_TIMEOUT_CNT;
556 
557 	if (tx_list_head->status.status_word != 0
558 	    && current_tx_ptr != tx_list_head) {
559 		goto adjust_head;	/* released something, just return; */
560 	}
561 
562 	/*
563 	 * if nothing released, check wait condition
564 	 * current's next can not be the head,
565 	 * otherwise the dma will not stop as we want
566 	 */
567 	if (current_tx_ptr->next->next == tx_list_head) {
568 		while (tx_list_head->status.status_word == 0) {
569 			mdelay(1);
570 			if (tx_list_head->status.status_word != 0
571 			    || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) {
572 				goto adjust_head;
573 			}
574 			if (timeout_cnt-- < 0) {
575 				printk(KERN_ERR DRV_NAME
576 				": wait for adjust tx list head timeout\n");
577 				break;
578 			}
579 		}
580 		if (tx_list_head->status.status_word != 0) {
581 			goto adjust_head;
582 		}
583 	}
584 
585 	return;
586 
587 adjust_head:
588 	do {
589 		tx_list_head->desc_a.config &= ~DMAEN;
590 		tx_list_head->status.status_word = 0;
591 		if (tx_list_head->skb) {
592 			dev_kfree_skb(tx_list_head->skb);
593 			tx_list_head->skb = NULL;
594 		} else {
595 			printk(KERN_ERR DRV_NAME
596 			       ": no sk_buff in a transmitted frame!\n");
597 		}
598 		tx_list_head = tx_list_head->next;
599 	} while (tx_list_head->status.status_word != 0
600 		 && current_tx_ptr != tx_list_head);
601 	return;
602 
603 }
604 
bfin_mac_hard_start_xmit(struct sk_buff * skb,struct net_device * dev)605 static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
606 				struct net_device *dev)
607 {
608 	u16 *data;
609 
610 	current_tx_ptr->skb = skb;
611 
612 	if (ANOMALY_05000285) {
613 		/*
614 		 * TXDWA feature is not avaible to older revision < 0.3 silicon
615 		 * of BF537
616 		 *
617 		 * Only if data buffer is ODD WORD alignment, we do not
618 		 * need to memcpy
619 		 */
620 		u32 data_align = (u32)(skb->data) & 0x3;
621 		if (data_align == 0x2) {
622 			/* move skb->data to current_tx_ptr payload */
623 			data = (u16 *)(skb->data) - 1;
624 			*data = (u16)(skb->len);
625 			current_tx_ptr->desc_a.start_addr = (u32)data;
626 			/* this is important! */
627 			blackfin_dcache_flush_range((u32)data,
628 					(u32)((u8 *)data + skb->len + 4));
629 		} else {
630 			*((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
631 			memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
632 				skb->len);
633 			current_tx_ptr->desc_a.start_addr =
634 				(u32)current_tx_ptr->packet;
635 			if (current_tx_ptr->status.status_word != 0)
636 				current_tx_ptr->status.status_word = 0;
637 			blackfin_dcache_flush_range(
638 				(u32)current_tx_ptr->packet,
639 				(u32)(current_tx_ptr->packet + skb->len + 2));
640 		}
641 	} else {
642 		/*
643 		 * TXDWA feature is avaible to revision < 0.3 silicon of
644 		 * BF537 and always avaible to BF52x
645 		 */
646 		u32 data_align = (u32)(skb->data) & 0x3;
647 		if (data_align == 0x0) {
648 			u16 sysctl = bfin_read_EMAC_SYSCTL();
649 			sysctl |= TXDWA;
650 			bfin_write_EMAC_SYSCTL(sysctl);
651 
652 			/* move skb->data to current_tx_ptr payload */
653 			data = (u16 *)(skb->data) - 2;
654 			*data = (u16)(skb->len);
655 			current_tx_ptr->desc_a.start_addr = (u32)data;
656 			/* this is important! */
657 			blackfin_dcache_flush_range(
658 					(u32)data,
659 					(u32)((u8 *)data + skb->len + 4));
660 		} else if (data_align == 0x2) {
661 			u16 sysctl = bfin_read_EMAC_SYSCTL();
662 			sysctl &= ~TXDWA;
663 			bfin_write_EMAC_SYSCTL(sysctl);
664 
665 			/* move skb->data to current_tx_ptr payload */
666 			data = (u16 *)(skb->data) - 1;
667 			*data = (u16)(skb->len);
668 			current_tx_ptr->desc_a.start_addr = (u32)data;
669 			/* this is important! */
670 			blackfin_dcache_flush_range(
671 					(u32)data,
672 					(u32)((u8 *)data + skb->len + 4));
673 		} else {
674 			u16 sysctl = bfin_read_EMAC_SYSCTL();
675 			sysctl &= ~TXDWA;
676 			bfin_write_EMAC_SYSCTL(sysctl);
677 
678 			*((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
679 			memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
680 				skb->len);
681 			current_tx_ptr->desc_a.start_addr =
682 				(u32)current_tx_ptr->packet;
683 			if (current_tx_ptr->status.status_word != 0)
684 				current_tx_ptr->status.status_word = 0;
685 			blackfin_dcache_flush_range(
686 				(u32)current_tx_ptr->packet,
687 				(u32)(current_tx_ptr->packet + skb->len + 2));
688 		}
689 	}
690 
691 	/* enable this packet's dma */
692 	current_tx_ptr->desc_a.config |= DMAEN;
693 
694 	/* tx dma is running, just return */
695 	if (bfin_read_DMA2_IRQ_STATUS() & 0x08)
696 		goto out;
697 
698 	/* tx dma is not running */
699 	bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
700 	/* dma enabled, read from memory, size is 6 */
701 	bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
702 	/* Turn on the EMAC tx */
703 	bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
704 
705 out:
706 	adjust_tx_list();
707 	current_tx_ptr = current_tx_ptr->next;
708 	dev->trans_start = jiffies;
709 	dev->stats.tx_packets++;
710 	dev->stats.tx_bytes += (skb->len);
711 	return 0;
712 }
713 
bfin_mac_rx(struct net_device * dev)714 static void bfin_mac_rx(struct net_device *dev)
715 {
716 	struct sk_buff *skb, *new_skb;
717 	unsigned short len;
718 
719 	/* allocate a new skb for next time receive */
720 	skb = current_rx_ptr->skb;
721 	new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
722 	if (!new_skb) {
723 		printk(KERN_NOTICE DRV_NAME
724 		       ": rx: low on mem - packet dropped\n");
725 		dev->stats.rx_dropped++;
726 		goto out;
727 	}
728 	/* reserve 2 bytes for RXDWA padding */
729 	skb_reserve(new_skb, 2);
730 	current_rx_ptr->skb = new_skb;
731 	current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
732 
733 	/* Invidate the data cache of skb->data range when it is write back
734 	 * cache. It will prevent overwritting the new data from DMA
735 	 */
736 	blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
737 					 (unsigned long)new_skb->end);
738 
739 	len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
740 	skb_put(skb, len);
741 	blackfin_dcache_invalidate_range((unsigned long)skb->head,
742 					 (unsigned long)skb->tail);
743 
744 	skb->protocol = eth_type_trans(skb, dev);
745 #if defined(BFIN_MAC_CSUM_OFFLOAD)
746 	skb->csum = current_rx_ptr->status.ip_payload_csum;
747 	skb->ip_summed = CHECKSUM_COMPLETE;
748 #endif
749 
750 	netif_rx(skb);
751 	dev->stats.rx_packets++;
752 	dev->stats.rx_bytes += len;
753 	current_rx_ptr->status.status_word = 0x00000000;
754 	current_rx_ptr = current_rx_ptr->next;
755 
756 out:
757 	return;
758 }
759 
760 /* interrupt routine to handle rx and error signal */
bfin_mac_interrupt(int irq,void * dev_id)761 static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
762 {
763 	struct net_device *dev = dev_id;
764 	int number = 0;
765 
766 get_one_packet:
767 	if (current_rx_ptr->status.status_word == 0) {
768 		/* no more new packet received */
769 		if (number == 0) {
770 			if (current_rx_ptr->next->status.status_word != 0) {
771 				current_rx_ptr = current_rx_ptr->next;
772 				goto real_rx;
773 			}
774 		}
775 		bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
776 					   DMA_DONE | DMA_ERR);
777 		return IRQ_HANDLED;
778 	}
779 
780 real_rx:
781 	bfin_mac_rx(dev);
782 	number++;
783 	goto get_one_packet;
784 }
785 
786 #ifdef CONFIG_NET_POLL_CONTROLLER
bfin_mac_poll(struct net_device * dev)787 static void bfin_mac_poll(struct net_device *dev)
788 {
789 	disable_irq(IRQ_MAC_RX);
790 	bfin_mac_interrupt(IRQ_MAC_RX, dev);
791 	enable_irq(IRQ_MAC_RX);
792 }
793 #endif				/* CONFIG_NET_POLL_CONTROLLER */
794 
bfin_mac_disable(void)795 static void bfin_mac_disable(void)
796 {
797 	unsigned int opmode;
798 
799 	opmode = bfin_read_EMAC_OPMODE();
800 	opmode &= (~RE);
801 	opmode &= (~TE);
802 	/* Turn off the EMAC */
803 	bfin_write_EMAC_OPMODE(opmode);
804 }
805 
806 /*
807  * Enable Interrupts, Receive, and Transmit
808  */
bfin_mac_enable(void)809 static void bfin_mac_enable(void)
810 {
811 	u32 opmode;
812 
813 	pr_debug("%s: %s\n", DRV_NAME, __func__);
814 
815 	/* Set RX DMA */
816 	bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
817 	bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
818 
819 	/* Wait MII done */
820 	bfin_mdio_poll();
821 
822 	/* We enable only RX here */
823 	/* ASTP   : Enable Automatic Pad Stripping
824 	   PR     : Promiscuous Mode for test
825 	   PSF    : Receive frames with total length less than 64 bytes.
826 	   FDMODE : Full Duplex Mode
827 	   LB     : Internal Loopback for test
828 	   RE     : Receiver Enable */
829 	opmode = bfin_read_EMAC_OPMODE();
830 	if (opmode & FDMODE)
831 		opmode |= PSF;
832 	else
833 		opmode |= DRO | DC | PSF;
834 	opmode |= RE;
835 
836 #if defined(CONFIG_BFIN_MAC_RMII)
837 	opmode |= RMII; /* For Now only 100MBit are supported */
838 #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2
839 	opmode |= TE;
840 #endif
841 #endif
842 	/* Turn on the EMAC rx */
843 	bfin_write_EMAC_OPMODE(opmode);
844 }
845 
846 /* Our watchdog timed out. Called by the networking layer */
bfin_mac_timeout(struct net_device * dev)847 static void bfin_mac_timeout(struct net_device *dev)
848 {
849 	pr_debug("%s: %s\n", dev->name, __func__);
850 
851 	bfin_mac_disable();
852 
853 	/* reset tx queue */
854 	tx_list_tail = tx_list_head->next;
855 
856 	bfin_mac_enable();
857 
858 	/* We can accept TX packets again */
859 	dev->trans_start = jiffies;
860 	netif_wake_queue(dev);
861 }
862 
bfin_mac_multicast_hash(struct net_device * dev)863 static void bfin_mac_multicast_hash(struct net_device *dev)
864 {
865 	u32 emac_hashhi, emac_hashlo;
866 	struct dev_mc_list *dmi = dev->mc_list;
867 	char *addrs;
868 	int i;
869 	u32 crc;
870 
871 	emac_hashhi = emac_hashlo = 0;
872 
873 	for (i = 0; i < dev->mc_count; i++) {
874 		addrs = dmi->dmi_addr;
875 		dmi = dmi->next;
876 
877 		/* skip non-multicast addresses */
878 		if (!(*addrs & 1))
879 			continue;
880 
881 		crc = ether_crc(ETH_ALEN, addrs);
882 		crc >>= 26;
883 
884 		if (crc & 0x20)
885 			emac_hashhi |= 1 << (crc & 0x1f);
886 		else
887 			emac_hashlo |= 1 << (crc & 0x1f);
888 	}
889 
890 	bfin_write_EMAC_HASHHI(emac_hashhi);
891 	bfin_write_EMAC_HASHLO(emac_hashlo);
892 
893 	return;
894 }
895 
896 /*
897  * This routine will, depending on the values passed to it,
898  * either make it accept multicast packets, go into
899  * promiscuous mode (for TCPDUMP and cousins) or accept
900  * a select set of multicast packets
901  */
bfin_mac_set_multicast_list(struct net_device * dev)902 static void bfin_mac_set_multicast_list(struct net_device *dev)
903 {
904 	u32 sysctl;
905 
906 	if (dev->flags & IFF_PROMISC) {
907 		printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
908 		sysctl = bfin_read_EMAC_OPMODE();
909 		sysctl |= RAF;
910 		bfin_write_EMAC_OPMODE(sysctl);
911 	} else if (dev->flags & IFF_ALLMULTI) {
912 		/* accept all multicast */
913 		sysctl = bfin_read_EMAC_OPMODE();
914 		sysctl |= PAM;
915 		bfin_write_EMAC_OPMODE(sysctl);
916 	} else if (dev->mc_count) {
917 		/* set up multicast hash table */
918 		sysctl = bfin_read_EMAC_OPMODE();
919 		sysctl |= HM;
920 		bfin_write_EMAC_OPMODE(sysctl);
921 		bfin_mac_multicast_hash(dev);
922 	} else {
923 		/* clear promisc or multicast mode */
924 		sysctl = bfin_read_EMAC_OPMODE();
925 		sysctl &= ~(RAF | PAM);
926 		bfin_write_EMAC_OPMODE(sysctl);
927 	}
928 }
929 
930 /*
931  * this puts the device in an inactive state
932  */
bfin_mac_shutdown(struct net_device * dev)933 static void bfin_mac_shutdown(struct net_device *dev)
934 {
935 	/* Turn off the EMAC */
936 	bfin_write_EMAC_OPMODE(0x00000000);
937 	/* Turn off the EMAC RX DMA */
938 	bfin_write_DMA1_CONFIG(0x0000);
939 	bfin_write_DMA2_CONFIG(0x0000);
940 }
941 
942 /*
943  * Open and Initialize the interface
944  *
945  * Set up everything, reset the card, etc..
946  */
bfin_mac_open(struct net_device * dev)947 static int bfin_mac_open(struct net_device *dev)
948 {
949 	struct bfin_mac_local *lp = netdev_priv(dev);
950 	int retval;
951 	pr_debug("%s: %s\n", dev->name, __func__);
952 
953 	/*
954 	 * Check that the address is valid.  If its not, refuse
955 	 * to bring the device up.  The user must specify an
956 	 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
957 	 */
958 	if (!is_valid_ether_addr(dev->dev_addr)) {
959 		printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n");
960 		return -EINVAL;
961 	}
962 
963 	/* initial rx and tx list */
964 	retval = desc_list_init();
965 
966 	if (retval)
967 		return retval;
968 
969 	phy_start(lp->phydev);
970 	phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
971 	setup_system_regs(dev);
972 	setup_mac_addr(dev->dev_addr);
973 	bfin_mac_disable();
974 	bfin_mac_enable();
975 	pr_debug("hardware init finished\n");
976 	netif_start_queue(dev);
977 	netif_carrier_on(dev);
978 
979 	return 0;
980 }
981 
982 /*
983  *
984  * this makes the board clean up everything that it can
985  * and not talk to the outside world.   Caused by
986  * an 'ifconfig ethX down'
987  */
bfin_mac_close(struct net_device * dev)988 static int bfin_mac_close(struct net_device *dev)
989 {
990 	struct bfin_mac_local *lp = netdev_priv(dev);
991 	pr_debug("%s: %s\n", dev->name, __func__);
992 
993 	netif_stop_queue(dev);
994 	netif_carrier_off(dev);
995 
996 	phy_stop(lp->phydev);
997 	phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
998 
999 	/* clear everything */
1000 	bfin_mac_shutdown(dev);
1001 
1002 	/* free the rx/tx buffers */
1003 	desc_list_free();
1004 
1005 	return 0;
1006 }
1007 
bfin_mac_probe(struct platform_device * pdev)1008 static int __devinit bfin_mac_probe(struct platform_device *pdev)
1009 {
1010 	struct net_device *ndev;
1011 	struct bfin_mac_local *lp;
1012 	int rc, i;
1013 
1014 	ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
1015 	if (!ndev) {
1016 		dev_err(&pdev->dev, "Cannot allocate net device!\n");
1017 		return -ENOMEM;
1018 	}
1019 
1020 	SET_NETDEV_DEV(ndev, &pdev->dev);
1021 	platform_set_drvdata(pdev, ndev);
1022 	lp = netdev_priv(ndev);
1023 
1024 	/* Grab the MAC address in the MAC */
1025 	*(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
1026 	*(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
1027 
1028 	/* probe mac */
1029 	/*todo: how to proble? which is revision_register */
1030 	bfin_write_EMAC_ADDRLO(0x12345678);
1031 	if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
1032 		dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
1033 		rc = -ENODEV;
1034 		goto out_err_probe_mac;
1035 	}
1036 
1037 	/* set the GPIO pins to Ethernet mode */
1038 	rc = peripheral_request_list(pin_req, DRV_NAME);
1039 	if (rc) {
1040 		dev_err(&pdev->dev, "Requesting peripherals failed!\n");
1041 		rc = -EFAULT;
1042 		goto out_err_setup_pin_mux;
1043 	}
1044 
1045 	/*
1046 	 * Is it valid? (Did bootloader initialize it?)
1047 	 * Grab the MAC from the board somehow
1048 	 * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
1049 	 */
1050 	if (!is_valid_ether_addr(ndev->dev_addr))
1051 		bfin_get_ether_addr(ndev->dev_addr);
1052 
1053 	/* If still not valid, get a random one */
1054 	if (!is_valid_ether_addr(ndev->dev_addr))
1055 		random_ether_addr(ndev->dev_addr);
1056 
1057 	setup_mac_addr(ndev->dev_addr);
1058 
1059 	/* MDIO bus initial */
1060 	lp->mii_bus = mdiobus_alloc();
1061 	if (lp->mii_bus == NULL)
1062 		goto out_err_mdiobus_alloc;
1063 
1064 	lp->mii_bus->priv = ndev;
1065 	lp->mii_bus->read = bfin_mdiobus_read;
1066 	lp->mii_bus->write = bfin_mdiobus_write;
1067 	lp->mii_bus->reset = bfin_mdiobus_reset;
1068 	lp->mii_bus->name = "bfin_mac_mdio";
1069 	snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "0");
1070 	lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1071 	for (i = 0; i < PHY_MAX_ADDR; ++i)
1072 		lp->mii_bus->irq[i] = PHY_POLL;
1073 
1074 	rc = mdiobus_register(lp->mii_bus);
1075 	if (rc) {
1076 		dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
1077 		goto out_err_mdiobus_register;
1078 	}
1079 
1080 	rc = mii_probe(ndev);
1081 	if (rc) {
1082 		dev_err(&pdev->dev, "MII Probe failed!\n");
1083 		goto out_err_mii_probe;
1084 	}
1085 
1086 	/* Fill in the fields of the device structure with ethernet values. */
1087 	ether_setup(ndev);
1088 
1089 	ndev->open = bfin_mac_open;
1090 	ndev->stop = bfin_mac_close;
1091 	ndev->hard_start_xmit = bfin_mac_hard_start_xmit;
1092 	ndev->set_mac_address = bfin_mac_set_mac_address;
1093 	ndev->tx_timeout = bfin_mac_timeout;
1094 	ndev->set_multicast_list = bfin_mac_set_multicast_list;
1095 #ifdef CONFIG_NET_POLL_CONTROLLER
1096 	ndev->poll_controller = bfin_mac_poll;
1097 #endif
1098 	ndev->ethtool_ops = &bfin_mac_ethtool_ops;
1099 
1100 	spin_lock_init(&lp->lock);
1101 
1102 	/* now, enable interrupts */
1103 	/* register irq handler */
1104 	rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
1105 			IRQF_DISABLED | IRQF_SHARED, "EMAC_RX", ndev);
1106 	if (rc) {
1107 		dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
1108 		rc = -EBUSY;
1109 		goto out_err_request_irq;
1110 	}
1111 
1112 	rc = register_netdev(ndev);
1113 	if (rc) {
1114 		dev_err(&pdev->dev, "Cannot register net device!\n");
1115 		goto out_err_reg_ndev;
1116 	}
1117 
1118 	/* now, print out the card info, in a short format.. */
1119 	dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1120 
1121 	return 0;
1122 
1123 out_err_reg_ndev:
1124 	free_irq(IRQ_MAC_RX, ndev);
1125 out_err_request_irq:
1126 out_err_mii_probe:
1127 	mdiobus_unregister(lp->mii_bus);
1128 out_err_mdiobus_register:
1129 	mdiobus_free(lp->mii_bus);
1130 out_err_mdiobus_alloc:
1131 	peripheral_free_list(pin_req);
1132 out_err_setup_pin_mux:
1133 out_err_probe_mac:
1134 	platform_set_drvdata(pdev, NULL);
1135 	free_netdev(ndev);
1136 
1137 	return rc;
1138 }
1139 
bfin_mac_remove(struct platform_device * pdev)1140 static int __devexit bfin_mac_remove(struct platform_device *pdev)
1141 {
1142 	struct net_device *ndev = platform_get_drvdata(pdev);
1143 	struct bfin_mac_local *lp = netdev_priv(ndev);
1144 
1145 	platform_set_drvdata(pdev, NULL);
1146 
1147 	mdiobus_unregister(lp->mii_bus);
1148 	mdiobus_free(lp->mii_bus);
1149 
1150 	unregister_netdev(ndev);
1151 
1152 	free_irq(IRQ_MAC_RX, ndev);
1153 
1154 	free_netdev(ndev);
1155 
1156 	peripheral_free_list(pin_req);
1157 
1158 	return 0;
1159 }
1160 
1161 #ifdef CONFIG_PM
bfin_mac_suspend(struct platform_device * pdev,pm_message_t mesg)1162 static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
1163 {
1164 	struct net_device *net_dev = platform_get_drvdata(pdev);
1165 
1166 	if (netif_running(net_dev))
1167 		bfin_mac_close(net_dev);
1168 
1169 	return 0;
1170 }
1171 
bfin_mac_resume(struct platform_device * pdev)1172 static int bfin_mac_resume(struct platform_device *pdev)
1173 {
1174 	struct net_device *net_dev = platform_get_drvdata(pdev);
1175 
1176 	if (netif_running(net_dev))
1177 		bfin_mac_open(net_dev);
1178 
1179 	return 0;
1180 }
1181 #else
1182 #define bfin_mac_suspend NULL
1183 #define bfin_mac_resume NULL
1184 #endif	/* CONFIG_PM */
1185 
1186 static struct platform_driver bfin_mac_driver = {
1187 	.probe = bfin_mac_probe,
1188 	.remove = __devexit_p(bfin_mac_remove),
1189 	.resume = bfin_mac_resume,
1190 	.suspend = bfin_mac_suspend,
1191 	.driver = {
1192 		.name = DRV_NAME,
1193 		.owner	= THIS_MODULE,
1194 	},
1195 };
1196 
bfin_mac_init(void)1197 static int __init bfin_mac_init(void)
1198 {
1199 	return platform_driver_register(&bfin_mac_driver);
1200 }
1201 
1202 module_init(bfin_mac_init);
1203 
bfin_mac_cleanup(void)1204 static void __exit bfin_mac_cleanup(void)
1205 {
1206 	platform_driver_unregister(&bfin_mac_driver);
1207 }
1208 
1209 module_exit(bfin_mac_cleanup);
1210 
1211