• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	Driver for the Macintosh 68K onboard MACE controller with PSC
3  *	driven DMA. The MACE driver code is derived from mace.c. The
4  *	Mac68k theory of operation is courtesy of the MacBSD wizards.
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  *
11  *	Copyright (C) 1996 Paul Mackerras.
12  *	Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
13  *
14  *	Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
15  *
16  *	Copyright (C) 2007 Finn Thain
17  *
18  *	Converted to DMA API, converted to unified driver model,
19  *	sync'd some routines with mace.c and fixed various bugs.
20  */
21 
22 
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/delay.h>
28 #include <linux/string.h>
29 #include <linux/crc32.h>
30 #include <linux/bitrev.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/platform_device.h>
33 #include <asm/io.h>
34 #include <asm/irq.h>
35 #include <asm/macintosh.h>
36 #include <asm/macints.h>
37 #include <asm/mac_psc.h>
38 #include <asm/page.h>
39 #include "mace.h"
40 
41 static char mac_mace_string[] = "macmace";
42 static struct platform_device *mac_mace_device;
43 
44 #define N_TX_BUFF_ORDER	0
45 #define N_TX_RING	(1 << N_TX_BUFF_ORDER)
46 #define N_RX_BUFF_ORDER	3
47 #define N_RX_RING	(1 << N_RX_BUFF_ORDER)
48 
49 #define TX_TIMEOUT	HZ
50 
51 #define MACE_BUFF_SIZE	0x800
52 
53 /* Chip rev needs workaround on HW & multicast addr change */
54 #define BROKEN_ADDRCHG_REV	0x0941
55 
56 /* The MACE is simply wired down on a Mac68K box */
57 
58 #define MACE_BASE	(void *)(0x50F1C000)
59 #define MACE_PROM	(void *)(0x50F08001)
60 
61 struct mace_data {
62 	volatile struct mace *mace;
63 	unsigned char *tx_ring;
64 	dma_addr_t tx_ring_phys;
65 	unsigned char *rx_ring;
66 	dma_addr_t rx_ring_phys;
67 	int dma_intr;
68 	int rx_slot, rx_tail;
69 	int tx_slot, tx_sloti, tx_count;
70 	int chipid;
71 	struct device *device;
72 };
73 
74 struct mace_frame {
75 	u8	rcvcnt;
76 	u8	pad1;
77 	u8	rcvsts;
78 	u8	pad2;
79 	u8	rntpc;
80 	u8	pad3;
81 	u8	rcvcc;
82 	u8	pad4;
83 	u32	pad5;
84 	u32	pad6;
85 	u8	data[1];
86 	/* And frame continues.. */
87 };
88 
89 #define PRIV_BYTES	sizeof(struct mace_data)
90 
91 static int mace_open(struct net_device *dev);
92 static int mace_close(struct net_device *dev);
93 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
94 static void mace_set_multicast(struct net_device *dev);
95 static int mace_set_address(struct net_device *dev, void *addr);
96 static void mace_reset(struct net_device *dev);
97 static irqreturn_t mace_interrupt(int irq, void *dev_id);
98 static irqreturn_t mace_dma_intr(int irq, void *dev_id);
99 static void mace_tx_timeout(struct net_device *dev);
100 static void __mace_set_address(struct net_device *dev, void *addr);
101 
102 /*
103  * Load a receive DMA channel with a base address and ring length
104  */
105 
mace_load_rxdma_base(struct net_device * dev,int set)106 static void mace_load_rxdma_base(struct net_device *dev, int set)
107 {
108 	struct mace_data *mp = netdev_priv(dev);
109 
110 	psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
111 	psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
112 	psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
113 	psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
114 	mp->rx_tail = 0;
115 }
116 
117 /*
118  * Reset the receive DMA subsystem
119  */
120 
mace_rxdma_reset(struct net_device * dev)121 static void mace_rxdma_reset(struct net_device *dev)
122 {
123 	struct mace_data *mp = netdev_priv(dev);
124 	volatile struct mace *mace = mp->mace;
125 	u8 maccc = mace->maccc;
126 
127 	mace->maccc = maccc & ~ENRCV;
128 
129 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
130 	mace_load_rxdma_base(dev, 0x00);
131 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
132 
133 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
134 	mace_load_rxdma_base(dev, 0x10);
135 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
136 
137 	mace->maccc = maccc;
138 	mp->rx_slot = 0;
139 
140 	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
141 	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
142 }
143 
144 /*
145  * Reset the transmit DMA subsystem
146  */
147 
mace_txdma_reset(struct net_device * dev)148 static void mace_txdma_reset(struct net_device *dev)
149 {
150 	struct mace_data *mp = netdev_priv(dev);
151 	volatile struct mace *mace = mp->mace;
152 	u8 maccc;
153 
154 	psc_write_word(PSC_ENETWR_CTL, 0x8800);
155 
156 	maccc = mace->maccc;
157 	mace->maccc = maccc & ~ENXMT;
158 
159 	mp->tx_slot = mp->tx_sloti = 0;
160 	mp->tx_count = N_TX_RING;
161 
162 	psc_write_word(PSC_ENETWR_CTL, 0x0400);
163 	mace->maccc = maccc;
164 }
165 
166 /*
167  * Disable DMA
168  */
169 
mace_dma_off(struct net_device * dev)170 static void mace_dma_off(struct net_device *dev)
171 {
172 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
173 	psc_write_word(PSC_ENETRD_CTL, 0x1000);
174 	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
175 	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
176 
177 	psc_write_word(PSC_ENETWR_CTL, 0x8800);
178 	psc_write_word(PSC_ENETWR_CTL, 0x1000);
179 	psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
180 	psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
181 }
182 
183 /*
184  * Not really much of a probe. The hardware table tells us if this
185  * model of Macintrash has a MACE (AV macintoshes)
186  */
187 
mace_probe(struct platform_device * pdev)188 static int __devinit mace_probe(struct platform_device *pdev)
189 {
190 	int j;
191 	struct mace_data *mp;
192 	unsigned char *addr;
193 	struct net_device *dev;
194 	unsigned char checksum = 0;
195 	static int found = 0;
196 	int err;
197 
198 	if (found || macintosh_config->ether_type != MAC_ETHER_MACE)
199 		return -ENODEV;
200 
201 	found = 1;	/* prevent 'finding' one on every device probe */
202 
203 	dev = alloc_etherdev(PRIV_BYTES);
204 	if (!dev)
205 		return -ENOMEM;
206 
207 	mp = netdev_priv(dev);
208 
209 	mp->device = &pdev->dev;
210 	SET_NETDEV_DEV(dev, &pdev->dev);
211 
212 	dev->base_addr = (u32)MACE_BASE;
213 	mp->mace = (volatile struct mace *) MACE_BASE;
214 
215 	dev->irq = IRQ_MAC_MACE;
216 	mp->dma_intr = IRQ_MAC_MACE_DMA;
217 
218 	mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
219 
220 	/*
221 	 * The PROM contains 8 bytes which total 0xFF when XOR'd
222 	 * together. Due to the usual peculiar apple brain damage
223 	 * the bytes are spaced out in a strange boundary and the
224 	 * bits are reversed.
225 	 */
226 
227 	addr = (void *)MACE_PROM;
228 
229 	for (j = 0; j < 6; ++j) {
230 		u8 v = bitrev8(addr[j<<4]);
231 		checksum ^= v;
232 		dev->dev_addr[j] = v;
233 	}
234 	for (; j < 8; ++j) {
235 		checksum ^= bitrev8(addr[j<<4]);
236 	}
237 
238 	if (checksum != 0xFF) {
239 		free_netdev(dev);
240 		return -ENODEV;
241 	}
242 
243 	dev->open		= mace_open;
244 	dev->stop		= mace_close;
245 	dev->hard_start_xmit	= mace_xmit_start;
246 	dev->tx_timeout		= mace_tx_timeout;
247 	dev->watchdog_timeo	= TX_TIMEOUT;
248 	dev->set_multicast_list	= mace_set_multicast;
249 	dev->set_mac_address	= mace_set_address;
250 
251 	printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
252 	       dev->name, dev->dev_addr);
253 
254 	err = register_netdev(dev);
255 	if (!err)
256 		return 0;
257 
258 	free_netdev(dev);
259 	return err;
260 }
261 
262 /*
263  * Reset the chip.
264  */
265 
mace_reset(struct net_device * dev)266 static void mace_reset(struct net_device *dev)
267 {
268 	struct mace_data *mp = netdev_priv(dev);
269 	volatile struct mace *mb = mp->mace;
270 	int i;
271 
272 	/* soft-reset the chip */
273 	i = 200;
274 	while (--i) {
275 		mb->biucc = SWRST;
276 		if (mb->biucc & SWRST) {
277 			udelay(10);
278 			continue;
279 		}
280 		break;
281 	}
282 	if (!i) {
283 		printk(KERN_ERR "macmace: cannot reset chip!\n");
284 		return;
285 	}
286 
287 	mb->maccc = 0;	/* turn off tx, rx */
288 	mb->imr = 0xFF;	/* disable all intrs for now */
289 	i = mb->ir;
290 
291 	mb->biucc = XMTSP_64;
292 	mb->utr = RTRD;
293 	mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
294 
295 	mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
296 	mb->rcvfc = 0;
297 
298 	/* load up the hardware address */
299 	__mace_set_address(dev, dev->dev_addr);
300 
301 	/* clear the multicast filter */
302 	if (mp->chipid == BROKEN_ADDRCHG_REV)
303 		mb->iac = LOGADDR;
304 	else {
305 		mb->iac = ADDRCHG | LOGADDR;
306 		while ((mb->iac & ADDRCHG) != 0)
307 			;
308 	}
309 	for (i = 0; i < 8; ++i)
310 		mb->ladrf = 0;
311 
312 	/* done changing address */
313 	if (mp->chipid != BROKEN_ADDRCHG_REV)
314 		mb->iac = 0;
315 
316 	mb->plscc = PORTSEL_AUI;
317 }
318 
319 /*
320  * Load the address on a mace controller.
321  */
322 
__mace_set_address(struct net_device * dev,void * addr)323 static void __mace_set_address(struct net_device *dev, void *addr)
324 {
325 	struct mace_data *mp = netdev_priv(dev);
326 	volatile struct mace *mb = mp->mace;
327 	unsigned char *p = addr;
328 	int i;
329 
330 	/* load up the hardware address */
331 	if (mp->chipid == BROKEN_ADDRCHG_REV)
332 		mb->iac = PHYADDR;
333 	else {
334 		mb->iac = ADDRCHG | PHYADDR;
335 		while ((mb->iac & ADDRCHG) != 0)
336 			;
337 	}
338 	for (i = 0; i < 6; ++i)
339 		mb->padr = dev->dev_addr[i] = p[i];
340 	if (mp->chipid != BROKEN_ADDRCHG_REV)
341 		mb->iac = 0;
342 }
343 
mace_set_address(struct net_device * dev,void * addr)344 static int mace_set_address(struct net_device *dev, void *addr)
345 {
346 	struct mace_data *mp = netdev_priv(dev);
347 	volatile struct mace *mb = mp->mace;
348 	unsigned long flags;
349 	u8 maccc;
350 
351 	local_irq_save(flags);
352 
353 	maccc = mb->maccc;
354 
355 	__mace_set_address(dev, addr);
356 
357 	mb->maccc = maccc;
358 
359 	local_irq_restore(flags);
360 
361 	return 0;
362 }
363 
364 /*
365  * Open the Macintosh MACE. Most of this is playing with the DMA
366  * engine. The ethernet chip is quite friendly.
367  */
368 
mace_open(struct net_device * dev)369 static int mace_open(struct net_device *dev)
370 {
371 	struct mace_data *mp = netdev_priv(dev);
372 	volatile struct mace *mb = mp->mace;
373 
374 	/* reset the chip */
375 	mace_reset(dev);
376 
377 	if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
378 		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
379 		return -EAGAIN;
380 	}
381 	if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
382 		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
383 		free_irq(dev->irq, dev);
384 		return -EAGAIN;
385 	}
386 
387 	/* Allocate the DMA ring buffers */
388 
389 	mp->tx_ring = dma_alloc_coherent(mp->device,
390 			N_TX_RING * MACE_BUFF_SIZE,
391 			&mp->tx_ring_phys, GFP_KERNEL);
392 	if (mp->tx_ring == NULL) {
393 		printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
394 		goto out1;
395 	}
396 
397 	mp->rx_ring = dma_alloc_coherent(mp->device,
398 			N_RX_RING * MACE_BUFF_SIZE,
399 			&mp->rx_ring_phys, GFP_KERNEL);
400 	if (mp->rx_ring == NULL) {
401 		printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
402 		goto out2;
403 	}
404 
405 	mace_dma_off(dev);
406 
407 	/* Not sure what these do */
408 
409 	psc_write_word(PSC_ENETWR_CTL, 0x9000);
410 	psc_write_word(PSC_ENETRD_CTL, 0x9000);
411 	psc_write_word(PSC_ENETWR_CTL, 0x0400);
412 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
413 
414 	mace_rxdma_reset(dev);
415 	mace_txdma_reset(dev);
416 
417 	/* turn it on! */
418 	mb->maccc = ENXMT | ENRCV;
419 	/* enable all interrupts except receive interrupts */
420 	mb->imr = RCVINT;
421 	return 0;
422 
423 out2:
424 	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
425 	                  mp->tx_ring, mp->tx_ring_phys);
426 out1:
427 	free_irq(dev->irq, dev);
428 	free_irq(mp->dma_intr, dev);
429 	return -ENOMEM;
430 }
431 
432 /*
433  * Shut down the mace and its interrupt channel
434  */
435 
mace_close(struct net_device * dev)436 static int mace_close(struct net_device *dev)
437 {
438 	struct mace_data *mp = netdev_priv(dev);
439 	volatile struct mace *mb = mp->mace;
440 
441 	mb->maccc = 0;		/* disable rx and tx	 */
442 	mb->imr = 0xFF;		/* disable all irqs	 */
443 	mace_dma_off(dev);	/* disable rx and tx dma */
444 
445 	return 0;
446 }
447 
448 /*
449  * Transmit a frame
450  */
451 
mace_xmit_start(struct sk_buff * skb,struct net_device * dev)452 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
453 {
454 	struct mace_data *mp = netdev_priv(dev);
455 	unsigned long flags;
456 
457 	/* Stop the queue since there's only the one buffer */
458 
459 	local_irq_save(flags);
460 	netif_stop_queue(dev);
461 	if (!mp->tx_count) {
462 		printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
463 		local_irq_restore(flags);
464 		return NETDEV_TX_BUSY;
465 	}
466 	mp->tx_count--;
467 	local_irq_restore(flags);
468 
469 	dev->stats.tx_packets++;
470 	dev->stats.tx_bytes += skb->len;
471 
472 	/* We need to copy into our xmit buffer to take care of alignment and caching issues */
473 	skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
474 
475 	/* load the Tx DMA and fire it off */
476 
477 	psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32)  mp->tx_ring_phys);
478 	psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
479 	psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
480 
481 	mp->tx_slot ^= 0x10;
482 
483 	dev_kfree_skb(skb);
484 
485 	dev->trans_start = jiffies;
486 	return NETDEV_TX_OK;
487 }
488 
mace_set_multicast(struct net_device * dev)489 static void mace_set_multicast(struct net_device *dev)
490 {
491 	struct mace_data *mp = netdev_priv(dev);
492 	volatile struct mace *mb = mp->mace;
493 	int i, j;
494 	u32 crc;
495 	u8 maccc;
496 	unsigned long flags;
497 
498 	local_irq_save(flags);
499 	maccc = mb->maccc;
500 	mb->maccc &= ~PROM;
501 
502 	if (dev->flags & IFF_PROMISC) {
503 		mb->maccc |= PROM;
504 	} else {
505 		unsigned char multicast_filter[8];
506 		struct dev_mc_list *dmi = dev->mc_list;
507 
508 		if (dev->flags & IFF_ALLMULTI) {
509 			for (i = 0; i < 8; i++) {
510 				multicast_filter[i] = 0xFF;
511 			}
512 		} else {
513 			for (i = 0; i < 8; i++)
514 				multicast_filter[i] = 0;
515 			for (i = 0; i < dev->mc_count; i++) {
516 				crc = ether_crc_le(6, dmi->dmi_addr);
517 				j = crc >> 26;	/* bit number in multicast_filter */
518 				multicast_filter[j >> 3] |= 1 << (j & 7);
519 				dmi = dmi->next;
520 			}
521 		}
522 
523 		if (mp->chipid == BROKEN_ADDRCHG_REV)
524 			mb->iac = LOGADDR;
525 		else {
526 			mb->iac = ADDRCHG | LOGADDR;
527 			while ((mb->iac & ADDRCHG) != 0)
528 				;
529 		}
530 		for (i = 0; i < 8; ++i)
531 			mb->ladrf = multicast_filter[i];
532 		if (mp->chipid != BROKEN_ADDRCHG_REV)
533 			mb->iac = 0;
534 	}
535 
536 	mb->maccc = maccc;
537 	local_irq_restore(flags);
538 }
539 
mace_handle_misc_intrs(struct net_device * dev,int intr)540 static void mace_handle_misc_intrs(struct net_device *dev, int intr)
541 {
542 	struct mace_data *mp = netdev_priv(dev);
543 	volatile struct mace *mb = mp->mace;
544 	static int mace_babbles, mace_jabbers;
545 
546 	if (intr & MPCO)
547 		dev->stats.rx_missed_errors += 256;
548 	dev->stats.rx_missed_errors += mb->mpc;   /* reading clears it */
549 	if (intr & RNTPCO)
550 		dev->stats.rx_length_errors += 256;
551 	dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
552 	if (intr & CERR)
553 		++dev->stats.tx_heartbeat_errors;
554 	if (intr & BABBLE)
555 		if (mace_babbles++ < 4)
556 			printk(KERN_DEBUG "macmace: babbling transmitter\n");
557 	if (intr & JABBER)
558 		if (mace_jabbers++ < 4)
559 			printk(KERN_DEBUG "macmace: jabbering transceiver\n");
560 }
561 
mace_interrupt(int irq,void * dev_id)562 static irqreturn_t mace_interrupt(int irq, void *dev_id)
563 {
564 	struct net_device *dev = (struct net_device *) dev_id;
565 	struct mace_data *mp = netdev_priv(dev);
566 	volatile struct mace *mb = mp->mace;
567 	int intr, fs;
568 	unsigned long flags;
569 
570 	/* don't want the dma interrupt handler to fire */
571 	local_irq_save(flags);
572 
573 	intr = mb->ir; /* read interrupt register */
574 	mace_handle_misc_intrs(dev, intr);
575 
576 	if (intr & XMTINT) {
577 		fs = mb->xmtfs;
578 		if ((fs & XMTSV) == 0) {
579 			printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
580 			mace_reset(dev);
581 			/*
582 			 * XXX mace likes to hang the machine after a xmtfs error.
583 			 * This is hard to reproduce, reseting *may* help
584 			 */
585 		}
586 		/* dma should have finished */
587 		if (!mp->tx_count) {
588 			printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
589 		}
590 		/* Update stats */
591 		if (fs & (UFLO|LCOL|LCAR|RTRY)) {
592 			++dev->stats.tx_errors;
593 			if (fs & LCAR)
594 				++dev->stats.tx_carrier_errors;
595 			else if (fs & (UFLO|LCOL|RTRY)) {
596 				++dev->stats.tx_aborted_errors;
597 				if (mb->xmtfs & UFLO) {
598 					printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
599 					dev->stats.tx_fifo_errors++;
600 					mace_txdma_reset(dev);
601 				}
602 			}
603 		}
604 	}
605 
606 	if (mp->tx_count)
607 		netif_wake_queue(dev);
608 
609 	local_irq_restore(flags);
610 
611 	return IRQ_HANDLED;
612 }
613 
mace_tx_timeout(struct net_device * dev)614 static void mace_tx_timeout(struct net_device *dev)
615 {
616 	struct mace_data *mp = netdev_priv(dev);
617 	volatile struct mace *mb = mp->mace;
618 	unsigned long flags;
619 
620 	local_irq_save(flags);
621 
622 	/* turn off both tx and rx and reset the chip */
623 	mb->maccc = 0;
624 	printk(KERN_ERR "macmace: transmit timeout - resetting\n");
625 	mace_txdma_reset(dev);
626 	mace_reset(dev);
627 
628 	/* restart rx dma */
629 	mace_rxdma_reset(dev);
630 
631 	mp->tx_count = N_TX_RING;
632 	netif_wake_queue(dev);
633 
634 	/* turn it on! */
635 	mb->maccc = ENXMT | ENRCV;
636 	/* enable all interrupts except receive interrupts */
637 	mb->imr = RCVINT;
638 
639 	local_irq_restore(flags);
640 }
641 
642 /*
643  * Handle a newly arrived frame
644  */
645 
mace_dma_rx_frame(struct net_device * dev,struct mace_frame * mf)646 static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
647 {
648 	struct sk_buff *skb;
649 	unsigned int frame_status = mf->rcvsts;
650 
651 	if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
652 		dev->stats.rx_errors++;
653 		if (frame_status & RS_OFLO) {
654 			printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name);
655 			dev->stats.rx_fifo_errors++;
656 		}
657 		if (frame_status & RS_CLSN)
658 			dev->stats.collisions++;
659 		if (frame_status & RS_FRAMERR)
660 			dev->stats.rx_frame_errors++;
661 		if (frame_status & RS_FCSERR)
662 			dev->stats.rx_crc_errors++;
663 	} else {
664 		unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
665 
666 		skb = dev_alloc_skb(frame_length + 2);
667 		if (!skb) {
668 			dev->stats.rx_dropped++;
669 			return;
670 		}
671 		skb_reserve(skb, 2);
672 		memcpy(skb_put(skb, frame_length), mf->data, frame_length);
673 
674 		skb->protocol = eth_type_trans(skb, dev);
675 		netif_rx(skb);
676 		dev->stats.rx_packets++;
677 		dev->stats.rx_bytes += frame_length;
678 	}
679 }
680 
681 /*
682  * The PSC has passed us a DMA interrupt event.
683  */
684 
mace_dma_intr(int irq,void * dev_id)685 static irqreturn_t mace_dma_intr(int irq, void *dev_id)
686 {
687 	struct net_device *dev = (struct net_device *) dev_id;
688 	struct mace_data *mp = netdev_priv(dev);
689 	int left, head;
690 	u16 status;
691 	u32 baka;
692 
693 	/* Not sure what this does */
694 
695 	while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
696 	if (!(baka & 0x60000000)) return IRQ_NONE;
697 
698 	/*
699 	 * Process the read queue
700 	 */
701 
702 	status = psc_read_word(PSC_ENETRD_CTL);
703 
704 	if (status & 0x2000) {
705 		mace_rxdma_reset(dev);
706 	} else if (status & 0x0100) {
707 		psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
708 
709 		left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
710 		head = N_RX_RING - left;
711 
712 		/* Loop through the ring buffer and process new packages */
713 
714 		while (mp->rx_tail < head) {
715 			mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
716 				+ (mp->rx_tail * MACE_BUFF_SIZE)));
717 			mp->rx_tail++;
718 		}
719 
720 		/* If we're out of buffers in this ring then switch to */
721 		/* the other set, otherwise just reactivate this one.  */
722 
723 		if (!left) {
724 			mace_load_rxdma_base(dev, mp->rx_slot);
725 			mp->rx_slot ^= 0x10;
726 		} else {
727 			psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
728 		}
729 	}
730 
731 	/*
732 	 * Process the write queue
733 	 */
734 
735 	status = psc_read_word(PSC_ENETWR_CTL);
736 
737 	if (status & 0x2000) {
738 		mace_txdma_reset(dev);
739 	} else if (status & 0x0100) {
740 		psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
741 		mp->tx_sloti ^= 0x10;
742 		mp->tx_count++;
743 	}
744 	return IRQ_HANDLED;
745 }
746 
747 MODULE_LICENSE("GPL");
748 MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
749 
mac_mace_device_remove(struct platform_device * pdev)750 static int __devexit mac_mace_device_remove (struct platform_device *pdev)
751 {
752 	struct net_device *dev = platform_get_drvdata(pdev);
753 	struct mace_data *mp = netdev_priv(dev);
754 
755 	unregister_netdev(dev);
756 
757 	free_irq(dev->irq, dev);
758 	free_irq(IRQ_MAC_MACE_DMA, dev);
759 
760 	dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
761 	                  mp->rx_ring, mp->rx_ring_phys);
762 	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
763 	                  mp->tx_ring, mp->tx_ring_phys);
764 
765 	free_netdev(dev);
766 
767 	return 0;
768 }
769 
770 static struct platform_driver mac_mace_driver = {
771 	.probe  = mace_probe,
772 	.remove = __devexit_p(mac_mace_device_remove),
773 	.driver	= {
774 		.name = mac_mace_string,
775 	},
776 };
777 
mac_mace_init_module(void)778 static int __init mac_mace_init_module(void)
779 {
780 	int err;
781 
782 	if (!MACH_IS_MAC)
783 		return -ENODEV;
784 
785 	if ((err = platform_driver_register(&mac_mace_driver))) {
786 		printk(KERN_ERR "Driver registration failed\n");
787 		return err;
788 	}
789 
790 	mac_mace_device = platform_device_alloc(mac_mace_string, 0);
791 	if (!mac_mace_device)
792 		goto out_unregister;
793 
794 	if (platform_device_add(mac_mace_device)) {
795 		platform_device_put(mac_mace_device);
796 		mac_mace_device = NULL;
797 	}
798 
799 	return 0;
800 
801 out_unregister:
802 	platform_driver_unregister(&mac_mace_driver);
803 
804 	return -ENOMEM;
805 }
806 
mac_mace_cleanup_module(void)807 static void __exit mac_mace_cleanup_module(void)
808 {
809 	platform_driver_unregister(&mac_mace_driver);
810 
811 	if (mac_mace_device) {
812 		platform_device_unregister(mac_mace_device);
813 		mac_mace_device = NULL;
814 	}
815 }
816 
817 module_init(mac_mace_init_module);
818 module_exit(mac_mace_cleanup_module);
819