• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	This program is free software; you can redistribute it and/or
3  *	modify it under the terms of the GNU General Public License
4  *	as published by the Free Software Foundation; either version
5  *	2 of the License, or (at your option) any later version.
6  *
7  *	(c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
8  *	(c) Copyright 2000, 2001 Red Hat Inc
9  *
10  *	Development of this driver was funded by Equiinet Ltd
11  *			http://www.equiinet.com
12  *
13  *	ChangeLog:
14  *
15  *	Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
16  *	unification of all the Z85x30 asynchronous drivers for real.
17  *
18  *	DMA now uses get_free_page as kmalloc buffers may span a 64K
19  *	boundary.
20  *
21  *	Modified for SMP safety and SMP locking by Alan Cox
22  *					<alan@lxorguk.ukuu.org.uk>
23  *
24  *	Performance
25  *
26  *	Z85230:
27  *	Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
28  *	X.25 is not unrealistic on all machines. DMA mode can in theory
29  *	handle T1/E1 quite nicely. In practice the limit seems to be about
30  *	512Kbit->1Mbit depending on motherboard.
31  *
32  *	Z85C30:
33  *	64K will take DMA, 9600 baud X.25 should be ok.
34  *
35  *	Z8530:
36  *	Synchronous mode without DMA is unlikely to pass about 2400 baud.
37  */
38 
39 #include <linux/module.h>
40 #include <linux/kernel.h>
41 #include <linux/mm.h>
42 #include <linux/net.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/if_arp.h>
46 #include <linux/delay.h>
47 #include <linux/hdlc.h>
48 #include <linux/ioport.h>
49 #include <linux/init.h>
50 #include <asm/dma.h>
51 #include <asm/io.h>
52 #define RT_LOCK
53 #define RT_UNLOCK
54 #include <linux/spinlock.h>
55 
56 #include "z85230.h"
57 
58 
59 /**
60  *	z8530_read_port - Architecture specific interface function
61  *	@p: port to read
62  *
63  *	Provided port access methods. The Comtrol SV11 requires no delays
64  *	between accesses and uses PC I/O. Some drivers may need a 5uS delay
65  *
66  *	In the longer term this should become an architecture specific
67  *	section so that this can become a generic driver interface for all
68  *	platforms. For now we only handle PC I/O ports with or without the
69  *	dread 5uS sanity delay.
70  *
71  *	The caller must hold sufficient locks to avoid violating the horrible
72  *	5uS delay rule.
73  */
74 
z8530_read_port(unsigned long p)75 static inline int z8530_read_port(unsigned long p)
76 {
77 	u8 r=inb(Z8530_PORT_OF(p));
78 	if(p&Z8530_PORT_SLEEP)	/* gcc should figure this out efficiently ! */
79 		udelay(5);
80 	return r;
81 }
82 
83 /**
84  *	z8530_write_port - Architecture specific interface function
85  *	@p: port to write
86  *	@d: value to write
87  *
88  *	Write a value to a port with delays if need be. Note that the
89  *	caller must hold locks to avoid read/writes from other contexts
90  *	violating the 5uS rule
91  *
92  *	In the longer term this should become an architecture specific
93  *	section so that this can become a generic driver interface for all
94  *	platforms. For now we only handle PC I/O ports with or without the
95  *	dread 5uS sanity delay.
96  */
97 
98 
z8530_write_port(unsigned long p,u8 d)99 static inline void z8530_write_port(unsigned long p, u8 d)
100 {
101 	outb(d,Z8530_PORT_OF(p));
102 	if(p&Z8530_PORT_SLEEP)
103 		udelay(5);
104 }
105 
106 
107 
108 static void z8530_rx_done(struct z8530_channel *c);
109 static void z8530_tx_done(struct z8530_channel *c);
110 
111 
112 /**
113  *	read_zsreg - Read a register from a Z85230
114  *	@c: Z8530 channel to read from (2 per chip)
115  *	@reg: Register to read
116  *	FIXME: Use a spinlock.
117  *
118  *	Most of the Z8530 registers are indexed off the control registers.
119  *	A read is done by writing to the control register and reading the
120  *	register back.  The caller must hold the lock
121  */
122 
read_zsreg(struct z8530_channel * c,u8 reg)123 static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
124 {
125 	if(reg)
126 		z8530_write_port(c->ctrlio, reg);
127 	return z8530_read_port(c->ctrlio);
128 }
129 
130 /**
131  *	read_zsdata - Read the data port of a Z8530 channel
132  *	@c: The Z8530 channel to read the data port from
133  *
134  *	The data port provides fast access to some things. We still
135  *	have all the 5uS delays to worry about.
136  */
137 
read_zsdata(struct z8530_channel * c)138 static inline u8 read_zsdata(struct z8530_channel *c)
139 {
140 	u8 r;
141 	r=z8530_read_port(c->dataio);
142 	return r;
143 }
144 
145 /**
146  *	write_zsreg - Write to a Z8530 channel register
147  *	@c: The Z8530 channel
148  *	@reg: Register number
149  *	@val: Value to write
150  *
151  *	Write a value to an indexed register. The caller must hold the lock
152  *	to honour the irritating delay rules. We know about register 0
153  *	being fast to access.
154  *
155  *      Assumes c->lock is held.
156  */
write_zsreg(struct z8530_channel * c,u8 reg,u8 val)157 static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
158 {
159 	if(reg)
160 		z8530_write_port(c->ctrlio, reg);
161 	z8530_write_port(c->ctrlio, val);
162 
163 }
164 
165 /**
166  *	write_zsctrl - Write to a Z8530 control register
167  *	@c: The Z8530 channel
168  *	@val: Value to write
169  *
170  *	Write directly to the control register on the Z8530
171  */
172 
write_zsctrl(struct z8530_channel * c,u8 val)173 static inline void write_zsctrl(struct z8530_channel *c, u8 val)
174 {
175 	z8530_write_port(c->ctrlio, val);
176 }
177 
178 /**
179  *	write_zsdata - Write to a Z8530 control register
180  *	@c: The Z8530 channel
181  *	@val: Value to write
182  *
183  *	Write directly to the data register on the Z8530
184  */
185 
186 
write_zsdata(struct z8530_channel * c,u8 val)187 static inline void write_zsdata(struct z8530_channel *c, u8 val)
188 {
189 	z8530_write_port(c->dataio, val);
190 }
191 
192 /*
193  *	Register loading parameters for a dead port
194  */
195 
196 u8 z8530_dead_port[]=
197 {
198 	255
199 };
200 
201 EXPORT_SYMBOL(z8530_dead_port);
202 
203 /*
204  *	Register loading parameters for currently supported circuit types
205  */
206 
207 
208 /*
209  *	Data clocked by telco end. This is the correct data for the UK
210  *	"kilostream" service, and most other similar services.
211  */
212 
213 u8 z8530_hdlc_kilostream[]=
214 {
215 	4,	SYNC_ENAB|SDLC|X1CLK,
216 	2,	0,	/* No vector */
217 	1,	0,
218 	3,	ENT_HM|RxCRC_ENAB|Rx8,
219 	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
220 	9,	0,		/* Disable interrupts */
221 	6,	0xFF,
222 	7,	FLAG,
223 	10,	ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
224 	11,	TCTRxCP,
225 	14,	DISDPLL,
226 	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
227 	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
228 	9,	NV|MIE|NORESET,
229 	255
230 };
231 
232 EXPORT_SYMBOL(z8530_hdlc_kilostream);
233 
234 /*
235  *	As above but for enhanced chips.
236  */
237 
238 u8 z8530_hdlc_kilostream_85230[]=
239 {
240 	4,	SYNC_ENAB|SDLC|X1CLK,
241 	2,	0,	/* No vector */
242 	1,	0,
243 	3,	ENT_HM|RxCRC_ENAB|Rx8,
244 	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
245 	9,	0,		/* Disable interrupts */
246 	6,	0xFF,
247 	7,	FLAG,
248 	10,	ABUNDER|NRZ|CRCPS,	/* MARKIDLE?? */
249 	11,	TCTRxCP,
250 	14,	DISDPLL,
251 	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
252 	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
253 	9,	NV|MIE|NORESET,
254 	23,	3,		/* Extended mode AUTO TX and EOM*/
255 
256 	255
257 };
258 
259 EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
260 
261 /**
262  *	z8530_flush_fifo - Flush on chip RX FIFO
263  *	@c: Channel to flush
264  *
265  *	Flush the receive FIFO. There is no specific option for this, we
266  *	blindly read bytes and discard them. Reading when there is no data
267  *	is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
268  *
269  *	All locking is handled for the caller. On return data may still be
270  *	present if it arrived during the flush.
271  */
272 
z8530_flush_fifo(struct z8530_channel * c)273 static void z8530_flush_fifo(struct z8530_channel *c)
274 {
275 	read_zsreg(c, R1);
276 	read_zsreg(c, R1);
277 	read_zsreg(c, R1);
278 	read_zsreg(c, R1);
279 	if(c->dev->type==Z85230)
280 	{
281 		read_zsreg(c, R1);
282 		read_zsreg(c, R1);
283 		read_zsreg(c, R1);
284 		read_zsreg(c, R1);
285 	}
286 }
287 
288 /**
289  *	z8530_rtsdtr - Control the outgoing DTS/RTS line
290  *	@c: The Z8530 channel to control;
291  *	@set: 1 to set, 0 to clear
292  *
293  *	Sets or clears DTR/RTS on the requested line. All locking is handled
294  *	by the caller. For now we assume all boards use the actual RTS/DTR
295  *	on the chip. Apparently one or two don't. We'll scream about them
296  *	later.
297  */
298 
z8530_rtsdtr(struct z8530_channel * c,int set)299 static void z8530_rtsdtr(struct z8530_channel *c, int set)
300 {
301 	if (set)
302 		c->regs[5] |= (RTS | DTR);
303 	else
304 		c->regs[5] &= ~(RTS | DTR);
305 	write_zsreg(c, R5, c->regs[5]);
306 }
307 
308 /**
309  *	z8530_rx - Handle a PIO receive event
310  *	@c: Z8530 channel to process
311  *
312  *	Receive handler for receiving in PIO mode. This is much like the
313  *	async one but not quite the same or as complex
314  *
315  *	Note: Its intended that this handler can easily be separated from
316  *	the main code to run realtime. That'll be needed for some machines
317  *	(eg to ever clock 64kbits on a sparc ;)).
318  *
319  *	The RT_LOCK macros don't do anything now. Keep the code covered
320  *	by them as short as possible in all circumstances - clocks cost
321  *	baud. The interrupt handler is assumed to be atomic w.r.t. to
322  *	other code - this is true in the RT case too.
323  *
324  *	We only cover the sync cases for this. If you want 2Mbit async
325  *	do it yourself but consider medical assistance first. This non DMA
326  *	synchronous mode is portable code. The DMA mode assumes PCI like
327  *	ISA DMA
328  *
329  *	Called with the device lock held
330  */
331 
z8530_rx(struct z8530_channel * c)332 static void z8530_rx(struct z8530_channel *c)
333 {
334 	u8 ch,stat;
335 
336 	while(1)
337 	{
338 		/* FIFO empty ? */
339 		if(!(read_zsreg(c, R0)&1))
340 			break;
341 		ch=read_zsdata(c);
342 		stat=read_zsreg(c, R1);
343 
344 		/*
345 		 *	Overrun ?
346 		 */
347 		if(c->count < c->max)
348 		{
349 			*c->dptr++=ch;
350 			c->count++;
351 		}
352 
353 		if(stat&END_FR)
354 		{
355 
356 			/*
357 			 *	Error ?
358 			 */
359 			if(stat&(Rx_OVR|CRC_ERR))
360 			{
361 				/* Rewind the buffer and return */
362 				if(c->skb)
363 					c->dptr=c->skb->data;
364 				c->count=0;
365 				if(stat&Rx_OVR)
366 				{
367 					printk(KERN_WARNING "%s: overrun\n", c->dev->name);
368 					c->rx_overrun++;
369 				}
370 				if(stat&CRC_ERR)
371 				{
372 					c->rx_crc_err++;
373 					/* printk("crc error\n"); */
374 				}
375 				/* Shove the frame upstream */
376 			}
377 			else
378 			{
379 				/*
380 				 *	Drop the lock for RX processing, or
381 		 		 *	there are deadlocks
382 		 		 */
383 				z8530_rx_done(c);
384 				write_zsctrl(c, RES_Rx_CRC);
385 			}
386 		}
387 	}
388 	/*
389 	 *	Clear irq
390 	 */
391 	write_zsctrl(c, ERR_RES);
392 	write_zsctrl(c, RES_H_IUS);
393 }
394 
395 
396 /**
397  *	z8530_tx - Handle a PIO transmit event
398  *	@c: Z8530 channel to process
399  *
400  *	Z8530 transmit interrupt handler for the PIO mode. The basic
401  *	idea is to attempt to keep the FIFO fed. We fill as many bytes
402  *	in as possible, its quite possible that we won't keep up with the
403  *	data rate otherwise.
404  */
405 
z8530_tx(struct z8530_channel * c)406 static void z8530_tx(struct z8530_channel *c)
407 {
408 	while(c->txcount) {
409 		/* FIFO full ? */
410 		if(!(read_zsreg(c, R0)&4))
411 			return;
412 		c->txcount--;
413 		/*
414 		 *	Shovel out the byte
415 		 */
416 		write_zsreg(c, R8, *c->tx_ptr++);
417 		write_zsctrl(c, RES_H_IUS);
418 		/* We are about to underflow */
419 		if(c->txcount==0)
420 		{
421 			write_zsctrl(c, RES_EOM_L);
422 			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
423 		}
424 	}
425 
426 
427 	/*
428 	 *	End of frame TX - fire another one
429 	 */
430 
431 	write_zsctrl(c, RES_Tx_P);
432 
433 	z8530_tx_done(c);
434 	write_zsctrl(c, RES_H_IUS);
435 }
436 
437 /**
438  *	z8530_status - Handle a PIO status exception
439  *	@chan: Z8530 channel to process
440  *
441  *	A status event occurred in PIO synchronous mode. There are several
442  *	reasons the chip will bother us here. A transmit underrun means we
443  *	failed to feed the chip fast enough and just broke a packet. A DCD
444  *	change is a line up or down.
445  */
446 
z8530_status(struct z8530_channel * chan)447 static void z8530_status(struct z8530_channel *chan)
448 {
449 	u8 status, altered;
450 
451 	status = read_zsreg(chan, R0);
452 	altered = chan->status ^ status;
453 
454 	chan->status = status;
455 
456 	if (status & TxEOM) {
457 /*		printk("%s: Tx underrun.\n", chan->dev->name); */
458 		chan->netdevice->stats.tx_fifo_errors++;
459 		write_zsctrl(chan, ERR_RES);
460 		z8530_tx_done(chan);
461 	}
462 
463 	if (altered & chan->dcdcheck)
464 	{
465 		if (status & chan->dcdcheck) {
466 			printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
467 			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
468 			if (chan->netdevice)
469 				netif_carrier_on(chan->netdevice);
470 		} else {
471 			printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
472 			write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
473 			z8530_flush_fifo(chan);
474 			if (chan->netdevice)
475 				netif_carrier_off(chan->netdevice);
476 		}
477 
478 	}
479 	write_zsctrl(chan, RES_EXT_INT);
480 	write_zsctrl(chan, RES_H_IUS);
481 }
482 
483 struct z8530_irqhandler z8530_sync =
484 {
485 	z8530_rx,
486 	z8530_tx,
487 	z8530_status
488 };
489 
490 EXPORT_SYMBOL(z8530_sync);
491 
492 /**
493  *	z8530_dma_rx - Handle a DMA RX event
494  *	@chan: Channel to handle
495  *
496  *	Non bus mastering DMA interfaces for the Z8x30 devices. This
497  *	is really pretty PC specific. The DMA mode means that most receive
498  *	events are handled by the DMA hardware. We get a kick here only if
499  *	a frame ended.
500  */
501 
z8530_dma_rx(struct z8530_channel * chan)502 static void z8530_dma_rx(struct z8530_channel *chan)
503 {
504 	if(chan->rxdma_on)
505 	{
506 		/* Special condition check only */
507 		u8 status;
508 
509 		read_zsreg(chan, R7);
510 		read_zsreg(chan, R6);
511 
512 		status=read_zsreg(chan, R1);
513 
514 		if(status&END_FR)
515 		{
516 			z8530_rx_done(chan);	/* Fire up the next one */
517 		}
518 		write_zsctrl(chan, ERR_RES);
519 		write_zsctrl(chan, RES_H_IUS);
520 	}
521 	else
522 	{
523 		/* DMA is off right now, drain the slow way */
524 		z8530_rx(chan);
525 	}
526 }
527 
528 /**
529  *	z8530_dma_tx - Handle a DMA TX event
530  *	@chan:	The Z8530 channel to handle
531  *
532  *	We have received an interrupt while doing DMA transmissions. It
533  *	shouldn't happen. Scream loudly if it does.
534  */
535 
z8530_dma_tx(struct z8530_channel * chan)536 static void z8530_dma_tx(struct z8530_channel *chan)
537 {
538 	if(!chan->dma_tx)
539 	{
540 		printk(KERN_WARNING "Hey who turned the DMA off?\n");
541 		z8530_tx(chan);
542 		return;
543 	}
544 	/* This shouldnt occur in DMA mode */
545 	printk(KERN_ERR "DMA tx - bogus event!\n");
546 	z8530_tx(chan);
547 }
548 
549 /**
550  *	z8530_dma_status - Handle a DMA status exception
551  *	@chan: Z8530 channel to process
552  *
553  *	A status event occurred on the Z8530. We receive these for two reasons
554  *	when in DMA mode. Firstly if we finished a packet transfer we get one
555  *	and kick the next packet out. Secondly we may see a DCD change.
556  *
557  */
558 
z8530_dma_status(struct z8530_channel * chan)559 static void z8530_dma_status(struct z8530_channel *chan)
560 {
561 	u8 status, altered;
562 
563 	status=read_zsreg(chan, R0);
564 	altered=chan->status^status;
565 
566 	chan->status=status;
567 
568 
569 	if(chan->dma_tx)
570 	{
571 		if(status&TxEOM)
572 		{
573 			unsigned long flags;
574 
575 			flags=claim_dma_lock();
576 			disable_dma(chan->txdma);
577 			clear_dma_ff(chan->txdma);
578 			chan->txdma_on=0;
579 			release_dma_lock(flags);
580 			z8530_tx_done(chan);
581 		}
582 	}
583 
584 	if (altered & chan->dcdcheck)
585 	{
586 		if (status & chan->dcdcheck) {
587 			printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
588 			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
589 			if (chan->netdevice)
590 				netif_carrier_on(chan->netdevice);
591 		} else {
592 			printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
593 			write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
594 			z8530_flush_fifo(chan);
595 			if (chan->netdevice)
596 				netif_carrier_off(chan->netdevice);
597 		}
598 	}
599 
600 	write_zsctrl(chan, RES_EXT_INT);
601 	write_zsctrl(chan, RES_H_IUS);
602 }
603 
604 static struct z8530_irqhandler z8530_dma_sync = {
605 	z8530_dma_rx,
606 	z8530_dma_tx,
607 	z8530_dma_status
608 };
609 
610 static struct z8530_irqhandler z8530_txdma_sync = {
611 	z8530_rx,
612 	z8530_dma_tx,
613 	z8530_dma_status
614 };
615 
616 /**
617  *	z8530_rx_clear - Handle RX events from a stopped chip
618  *	@c: Z8530 channel to shut up
619  *
620  *	Receive interrupt vectors for a Z8530 that is in 'parked' mode.
621  *	For machines with PCI Z85x30 cards, or level triggered interrupts
622  *	(eg the MacII) we must clear the interrupt cause or die.
623  */
624 
625 
z8530_rx_clear(struct z8530_channel * c)626 static void z8530_rx_clear(struct z8530_channel *c)
627 {
628 	/*
629 	 *	Data and status bytes
630 	 */
631 	u8 stat;
632 
633 	read_zsdata(c);
634 	stat=read_zsreg(c, R1);
635 
636 	if(stat&END_FR)
637 		write_zsctrl(c, RES_Rx_CRC);
638 	/*
639 	 *	Clear irq
640 	 */
641 	write_zsctrl(c, ERR_RES);
642 	write_zsctrl(c, RES_H_IUS);
643 }
644 
645 /**
646  *	z8530_tx_clear - Handle TX events from a stopped chip
647  *	@c: Z8530 channel to shut up
648  *
649  *	Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
650  *	For machines with PCI Z85x30 cards, or level triggered interrupts
651  *	(eg the MacII) we must clear the interrupt cause or die.
652  */
653 
z8530_tx_clear(struct z8530_channel * c)654 static void z8530_tx_clear(struct z8530_channel *c)
655 {
656 	write_zsctrl(c, RES_Tx_P);
657 	write_zsctrl(c, RES_H_IUS);
658 }
659 
660 /**
661  *	z8530_status_clear - Handle status events from a stopped chip
662  *	@chan: Z8530 channel to shut up
663  *
664  *	Status interrupt vectors for a Z8530 that is in 'parked' mode.
665  *	For machines with PCI Z85x30 cards, or level triggered interrupts
666  *	(eg the MacII) we must clear the interrupt cause or die.
667  */
668 
z8530_status_clear(struct z8530_channel * chan)669 static void z8530_status_clear(struct z8530_channel *chan)
670 {
671 	u8 status=read_zsreg(chan, R0);
672 	if(status&TxEOM)
673 		write_zsctrl(chan, ERR_RES);
674 	write_zsctrl(chan, RES_EXT_INT);
675 	write_zsctrl(chan, RES_H_IUS);
676 }
677 
678 struct z8530_irqhandler z8530_nop=
679 {
680 	z8530_rx_clear,
681 	z8530_tx_clear,
682 	z8530_status_clear
683 };
684 
685 
686 EXPORT_SYMBOL(z8530_nop);
687 
688 /**
689  *	z8530_interrupt - Handle an interrupt from a Z8530
690  *	@irq: 	Interrupt number
691  *	@dev_id: The Z8530 device that is interrupting.
692  *
693  *	A Z85[2]30 device has stuck its hand in the air for attention.
694  *	We scan both the channels on the chip for events and then call
695  *	the channel specific call backs for each channel that has events.
696  *	We have to use callback functions because the two channels can be
697  *	in different modes.
698  *
699  *	Locking is done for the handlers. Note that locking is done
700  *	at the chip level (the 5uS delay issue is per chip not per
701  *	channel). c->lock for both channels points to dev->lock
702  */
703 
z8530_interrupt(int irq,void * dev_id)704 irqreturn_t z8530_interrupt(int irq, void *dev_id)
705 {
706 	struct z8530_dev *dev=dev_id;
707 	u8 uninitialized_var(intr);
708 	static volatile int locker=0;
709 	int work=0;
710 	struct z8530_irqhandler *irqs;
711 
712 	if(locker)
713 	{
714 		printk(KERN_ERR "IRQ re-enter\n");
715 		return IRQ_NONE;
716 	}
717 	locker=1;
718 
719 	spin_lock(&dev->lock);
720 
721 	while(++work<5000)
722 	{
723 
724 		intr = read_zsreg(&dev->chanA, R3);
725 		if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
726 			break;
727 
728 		/* This holds the IRQ status. On the 8530 you must read it from chan
729 		   A even though it applies to the whole chip */
730 
731 		/* Now walk the chip and see what it is wanting - it may be
732 		   an IRQ for someone else remember */
733 
734 		irqs=dev->chanA.irqs;
735 
736 		if(intr & (CHARxIP|CHATxIP|CHAEXT))
737 		{
738 			if(intr&CHARxIP)
739 				irqs->rx(&dev->chanA);
740 			if(intr&CHATxIP)
741 				irqs->tx(&dev->chanA);
742 			if(intr&CHAEXT)
743 				irqs->status(&dev->chanA);
744 		}
745 
746 		irqs=dev->chanB.irqs;
747 
748 		if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
749 		{
750 			if(intr&CHBRxIP)
751 				irqs->rx(&dev->chanB);
752 			if(intr&CHBTxIP)
753 				irqs->tx(&dev->chanB);
754 			if(intr&CHBEXT)
755 				irqs->status(&dev->chanB);
756 		}
757 	}
758 	spin_unlock(&dev->lock);
759 	if(work==5000)
760 		printk(KERN_ERR "%s: interrupt jammed - abort(0x%X)!\n", dev->name, intr);
761 	/* Ok all done */
762 	locker=0;
763 	return IRQ_HANDLED;
764 }
765 
766 EXPORT_SYMBOL(z8530_interrupt);
767 
768 static char reg_init[16]=
769 {
770 	0,0,0,0,
771 	0,0,0,0,
772 	0,0,0,0,
773 	0x55,0,0,0
774 };
775 
776 
777 /**
778  *	z8530_sync_open - Open a Z8530 channel for PIO
779  *	@dev:	The network interface we are using
780  *	@c:	The Z8530 channel to open in synchronous PIO mode
781  *
782  *	Switch a Z8530 into synchronous mode without DMA assist. We
783  *	raise the RTS/DTR and commence network operation.
784  */
785 
z8530_sync_open(struct net_device * dev,struct z8530_channel * c)786 int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
787 {
788 	unsigned long flags;
789 
790 	spin_lock_irqsave(c->lock, flags);
791 
792 	c->sync = 1;
793 	c->mtu = dev->mtu+64;
794 	c->count = 0;
795 	c->skb = NULL;
796 	c->skb2 = NULL;
797 	c->irqs = &z8530_sync;
798 
799 	/* This loads the double buffer up */
800 	z8530_rx_done(c);	/* Load the frame ring */
801 	z8530_rx_done(c);	/* Load the backup frame */
802 	z8530_rtsdtr(c,1);
803 	c->dma_tx = 0;
804 	c->regs[R1]|=TxINT_ENAB;
805 	write_zsreg(c, R1, c->regs[R1]);
806 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
807 
808 	spin_unlock_irqrestore(c->lock, flags);
809 	return 0;
810 }
811 
812 
813 EXPORT_SYMBOL(z8530_sync_open);
814 
815 /**
816  *	z8530_sync_close - Close a PIO Z8530 channel
817  *	@dev: Network device to close
818  *	@c: Z8530 channel to disassociate and move to idle
819  *
820  *	Close down a Z8530 interface and switch its interrupt handlers
821  *	to discard future events.
822  */
823 
z8530_sync_close(struct net_device * dev,struct z8530_channel * c)824 int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
825 {
826 	u8 chk;
827 	unsigned long flags;
828 
829 	spin_lock_irqsave(c->lock, flags);
830 	c->irqs = &z8530_nop;
831 	c->max = 0;
832 	c->sync = 0;
833 
834 	chk=read_zsreg(c,R0);
835 	write_zsreg(c, R3, c->regs[R3]);
836 	z8530_rtsdtr(c,0);
837 
838 	spin_unlock_irqrestore(c->lock, flags);
839 	return 0;
840 }
841 
842 EXPORT_SYMBOL(z8530_sync_close);
843 
844 /**
845  *	z8530_sync_dma_open - Open a Z8530 for DMA I/O
846  *	@dev: The network device to attach
847  *	@c: The Z8530 channel to configure in sync DMA mode.
848  *
849  *	Set up a Z85x30 device for synchronous DMA in both directions. Two
850  *	ISA DMA channels must be available for this to work. We assume ISA
851  *	DMA driven I/O and PC limits on access.
852  */
853 
z8530_sync_dma_open(struct net_device * dev,struct z8530_channel * c)854 int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
855 {
856 	unsigned long cflags, dflags;
857 
858 	c->sync = 1;
859 	c->mtu = dev->mtu+64;
860 	c->count = 0;
861 	c->skb = NULL;
862 	c->skb2 = NULL;
863 	/*
864 	 *	Load the DMA interfaces up
865 	 */
866 	c->rxdma_on = 0;
867 	c->txdma_on = 0;
868 
869 	/*
870 	 *	Allocate the DMA flip buffers. Limit by page size.
871 	 *	Everyone runs 1500 mtu or less on wan links so this
872 	 *	should be fine.
873 	 */
874 
875 	if(c->mtu  > PAGE_SIZE/2)
876 		return -EMSGSIZE;
877 
878 	c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
879 	if(c->rx_buf[0]==NULL)
880 		return -ENOBUFS;
881 	c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
882 
883 	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
884 	if(c->tx_dma_buf[0]==NULL)
885 	{
886 		free_page((unsigned long)c->rx_buf[0]);
887 		c->rx_buf[0]=NULL;
888 		return -ENOBUFS;
889 	}
890 	c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
891 
892 	c->tx_dma_used=0;
893 	c->dma_tx = 1;
894 	c->dma_num=0;
895 	c->dma_ready=1;
896 
897 	/*
898 	 *	Enable DMA control mode
899 	 */
900 
901 	spin_lock_irqsave(c->lock, cflags);
902 
903 	/*
904 	 *	TX DMA via DIR/REQ
905 	 */
906 
907 	c->regs[R14]|= DTRREQ;
908 	write_zsreg(c, R14, c->regs[R14]);
909 
910 	c->regs[R1]&= ~TxINT_ENAB;
911 	write_zsreg(c, R1, c->regs[R1]);
912 
913 	/*
914 	 *	RX DMA via W/Req
915 	 */
916 
917 	c->regs[R1]|= WT_FN_RDYFN;
918 	c->regs[R1]|= WT_RDY_RT;
919 	c->regs[R1]|= INT_ERR_Rx;
920 	c->regs[R1]&= ~TxINT_ENAB;
921 	write_zsreg(c, R1, c->regs[R1]);
922 	c->regs[R1]|= WT_RDY_ENAB;
923 	write_zsreg(c, R1, c->regs[R1]);
924 
925 	/*
926 	 *	DMA interrupts
927 	 */
928 
929 	/*
930 	 *	Set up the DMA configuration
931 	 */
932 
933 	dflags=claim_dma_lock();
934 
935 	disable_dma(c->rxdma);
936 	clear_dma_ff(c->rxdma);
937 	set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
938 	set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
939 	set_dma_count(c->rxdma, c->mtu);
940 	enable_dma(c->rxdma);
941 
942 	disable_dma(c->txdma);
943 	clear_dma_ff(c->txdma);
944 	set_dma_mode(c->txdma, DMA_MODE_WRITE);
945 	disable_dma(c->txdma);
946 
947 	release_dma_lock(dflags);
948 
949 	/*
950 	 *	Select the DMA interrupt handlers
951 	 */
952 
953 	c->rxdma_on = 1;
954 	c->txdma_on = 1;
955 	c->tx_dma_used = 1;
956 
957 	c->irqs = &z8530_dma_sync;
958 	z8530_rtsdtr(c,1);
959 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
960 
961 	spin_unlock_irqrestore(c->lock, cflags);
962 
963 	return 0;
964 }
965 
966 EXPORT_SYMBOL(z8530_sync_dma_open);
967 
968 /**
969  *	z8530_sync_dma_close - Close down DMA I/O
970  *	@dev: Network device to detach
971  *	@c: Z8530 channel to move into discard mode
972  *
973  *	Shut down a DMA mode synchronous interface. Halt the DMA, and
974  *	free the buffers.
975  */
976 
z8530_sync_dma_close(struct net_device * dev,struct z8530_channel * c)977 int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
978 {
979 	u8 chk;
980 	unsigned long flags;
981 
982 	c->irqs = &z8530_nop;
983 	c->max = 0;
984 	c->sync = 0;
985 
986 	/*
987 	 *	Disable the PC DMA channels
988 	 */
989 
990 	flags=claim_dma_lock();
991 	disable_dma(c->rxdma);
992 	clear_dma_ff(c->rxdma);
993 
994 	c->rxdma_on = 0;
995 
996 	disable_dma(c->txdma);
997 	clear_dma_ff(c->txdma);
998 	release_dma_lock(flags);
999 
1000 	c->txdma_on = 0;
1001 	c->tx_dma_used = 0;
1002 
1003 	spin_lock_irqsave(c->lock, flags);
1004 
1005 	/*
1006 	 *	Disable DMA control mode
1007 	 */
1008 
1009 	c->regs[R1]&= ~WT_RDY_ENAB;
1010 	write_zsreg(c, R1, c->regs[R1]);
1011 	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1012 	c->regs[R1]|= INT_ALL_Rx;
1013 	write_zsreg(c, R1, c->regs[R1]);
1014 	c->regs[R14]&= ~DTRREQ;
1015 	write_zsreg(c, R14, c->regs[R14]);
1016 
1017 	if(c->rx_buf[0])
1018 	{
1019 		free_page((unsigned long)c->rx_buf[0]);
1020 		c->rx_buf[0]=NULL;
1021 	}
1022 	if(c->tx_dma_buf[0])
1023 	{
1024 		free_page((unsigned  long)c->tx_dma_buf[0]);
1025 		c->tx_dma_buf[0]=NULL;
1026 	}
1027 	chk=read_zsreg(c,R0);
1028 	write_zsreg(c, R3, c->regs[R3]);
1029 	z8530_rtsdtr(c,0);
1030 
1031 	spin_unlock_irqrestore(c->lock, flags);
1032 
1033 	return 0;
1034 }
1035 
1036 EXPORT_SYMBOL(z8530_sync_dma_close);
1037 
1038 /**
1039  *	z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
1040  *	@dev: The network device to attach
1041  *	@c: The Z8530 channel to configure in sync DMA mode.
1042  *
1043  *	Set up a Z85x30 device for synchronous DMA tranmission. One
1044  *	ISA DMA channel must be available for this to work. The receive
1045  *	side is run in PIO mode, but then it has the bigger FIFO.
1046  */
1047 
z8530_sync_txdma_open(struct net_device * dev,struct z8530_channel * c)1048 int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1049 {
1050 	unsigned long cflags, dflags;
1051 
1052 	printk("Opening sync interface for TX-DMA\n");
1053 	c->sync = 1;
1054 	c->mtu = dev->mtu+64;
1055 	c->count = 0;
1056 	c->skb = NULL;
1057 	c->skb2 = NULL;
1058 
1059 	/*
1060 	 *	Allocate the DMA flip buffers. Limit by page size.
1061 	 *	Everyone runs 1500 mtu or less on wan links so this
1062 	 *	should be fine.
1063 	 */
1064 
1065 	if(c->mtu  > PAGE_SIZE/2)
1066 		return -EMSGSIZE;
1067 
1068 	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1069 	if(c->tx_dma_buf[0]==NULL)
1070 		return -ENOBUFS;
1071 
1072 	c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1073 
1074 
1075 	spin_lock_irqsave(c->lock, cflags);
1076 
1077 	/*
1078 	 *	Load the PIO receive ring
1079 	 */
1080 
1081 	z8530_rx_done(c);
1082 	z8530_rx_done(c);
1083 
1084  	/*
1085 	 *	Load the DMA interfaces up
1086 	 */
1087 
1088 	c->rxdma_on = 0;
1089 	c->txdma_on = 0;
1090 
1091 	c->tx_dma_used=0;
1092 	c->dma_num=0;
1093 	c->dma_ready=1;
1094 	c->dma_tx = 1;
1095 
1096  	/*
1097 	 *	Enable DMA control mode
1098 	 */
1099 
1100  	/*
1101 	 *	TX DMA via DIR/REQ
1102  	 */
1103 	c->regs[R14]|= DTRREQ;
1104 	write_zsreg(c, R14, c->regs[R14]);
1105 
1106 	c->regs[R1]&= ~TxINT_ENAB;
1107 	write_zsreg(c, R1, c->regs[R1]);
1108 
1109 	/*
1110 	 *	Set up the DMA configuration
1111 	 */
1112 
1113 	dflags = claim_dma_lock();
1114 
1115 	disable_dma(c->txdma);
1116 	clear_dma_ff(c->txdma);
1117 	set_dma_mode(c->txdma, DMA_MODE_WRITE);
1118 	disable_dma(c->txdma);
1119 
1120 	release_dma_lock(dflags);
1121 
1122 	/*
1123 	 *	Select the DMA interrupt handlers
1124 	 */
1125 
1126 	c->rxdma_on = 0;
1127 	c->txdma_on = 1;
1128 	c->tx_dma_used = 1;
1129 
1130 	c->irqs = &z8530_txdma_sync;
1131 	z8530_rtsdtr(c,1);
1132 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1133 	spin_unlock_irqrestore(c->lock, cflags);
1134 
1135 	return 0;
1136 }
1137 
1138 EXPORT_SYMBOL(z8530_sync_txdma_open);
1139 
1140 /**
1141  *	z8530_sync_txdma_close - Close down a TX driven DMA channel
1142  *	@dev: Network device to detach
1143  *	@c: Z8530 channel to move into discard mode
1144  *
1145  *	Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1146  *	and  free the buffers.
1147  */
1148 
z8530_sync_txdma_close(struct net_device * dev,struct z8530_channel * c)1149 int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1150 {
1151 	unsigned long dflags, cflags;
1152 	u8 chk;
1153 
1154 
1155 	spin_lock_irqsave(c->lock, cflags);
1156 
1157 	c->irqs = &z8530_nop;
1158 	c->max = 0;
1159 	c->sync = 0;
1160 
1161 	/*
1162 	 *	Disable the PC DMA channels
1163 	 */
1164 
1165 	dflags = claim_dma_lock();
1166 
1167 	disable_dma(c->txdma);
1168 	clear_dma_ff(c->txdma);
1169 	c->txdma_on = 0;
1170 	c->tx_dma_used = 0;
1171 
1172 	release_dma_lock(dflags);
1173 
1174 	/*
1175 	 *	Disable DMA control mode
1176 	 */
1177 
1178 	c->regs[R1]&= ~WT_RDY_ENAB;
1179 	write_zsreg(c, R1, c->regs[R1]);
1180 	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1181 	c->regs[R1]|= INT_ALL_Rx;
1182 	write_zsreg(c, R1, c->regs[R1]);
1183 	c->regs[R14]&= ~DTRREQ;
1184 	write_zsreg(c, R14, c->regs[R14]);
1185 
1186 	if(c->tx_dma_buf[0])
1187 	{
1188 		free_page((unsigned long)c->tx_dma_buf[0]);
1189 		c->tx_dma_buf[0]=NULL;
1190 	}
1191 	chk=read_zsreg(c,R0);
1192 	write_zsreg(c, R3, c->regs[R3]);
1193 	z8530_rtsdtr(c,0);
1194 
1195 	spin_unlock_irqrestore(c->lock, cflags);
1196 	return 0;
1197 }
1198 
1199 
1200 EXPORT_SYMBOL(z8530_sync_txdma_close);
1201 
1202 
1203 /*
1204  *	Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1205  *	it exists...
1206  */
1207 
1208 static char *z8530_type_name[]={
1209 	"Z8530",
1210 	"Z85C30",
1211 	"Z85230"
1212 };
1213 
1214 /**
1215  *	z8530_describe - Uniformly describe a Z8530 port
1216  *	@dev: Z8530 device to describe
1217  *	@mapping: string holding mapping type (eg "I/O" or "Mem")
1218  *	@io: the port value in question
1219  *
1220  *	Describe a Z8530 in a standard format. We must pass the I/O as
1221  *	the port offset isnt predictable. The main reason for this function
1222  *	is to try and get a common format of report.
1223  */
1224 
z8530_describe(struct z8530_dev * dev,char * mapping,unsigned long io)1225 void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1226 {
1227 	printk(KERN_INFO "%s: %s found at %s 0x%lX, IRQ %d.\n",
1228 		dev->name,
1229 		z8530_type_name[dev->type],
1230 		mapping,
1231 		Z8530_PORT_OF(io),
1232 		dev->irq);
1233 }
1234 
1235 EXPORT_SYMBOL(z8530_describe);
1236 
1237 /*
1238  *	Locked operation part of the z8530 init code
1239  */
1240 
do_z8530_init(struct z8530_dev * dev)1241 static inline int do_z8530_init(struct z8530_dev *dev)
1242 {
1243 	/* NOP the interrupt handlers first - we might get a
1244 	   floating IRQ transition when we reset the chip */
1245 	dev->chanA.irqs=&z8530_nop;
1246 	dev->chanB.irqs=&z8530_nop;
1247 	dev->chanA.dcdcheck=DCD;
1248 	dev->chanB.dcdcheck=DCD;
1249 
1250 	/* Reset the chip */
1251 	write_zsreg(&dev->chanA, R9, 0xC0);
1252 	udelay(200);
1253 	/* Now check its valid */
1254 	write_zsreg(&dev->chanA, R12, 0xAA);
1255 	if(read_zsreg(&dev->chanA, R12)!=0xAA)
1256 		return -ENODEV;
1257 	write_zsreg(&dev->chanA, R12, 0x55);
1258 	if(read_zsreg(&dev->chanA, R12)!=0x55)
1259 		return -ENODEV;
1260 
1261 	dev->type=Z8530;
1262 
1263 	/*
1264 	 *	See the application note.
1265 	 */
1266 
1267 	write_zsreg(&dev->chanA, R15, 0x01);
1268 
1269 	/*
1270 	 *	If we can set the low bit of R15 then
1271 	 *	the chip is enhanced.
1272 	 */
1273 
1274 	if(read_zsreg(&dev->chanA, R15)==0x01)
1275 	{
1276 		/* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1277 		/* Put a char in the fifo */
1278 		write_zsreg(&dev->chanA, R8, 0);
1279 		if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1280 			dev->type = Z85230;	/* Has a FIFO */
1281 		else
1282 			dev->type = Z85C30;	/* Z85C30, 1 byte FIFO */
1283 	}
1284 
1285 	/*
1286 	 *	The code assumes R7' and friends are
1287 	 *	off. Use write_zsext() for these and keep
1288 	 *	this bit clear.
1289 	 */
1290 
1291 	write_zsreg(&dev->chanA, R15, 0);
1292 
1293 	/*
1294 	 *	At this point it looks like the chip is behaving
1295 	 */
1296 
1297 	memcpy(dev->chanA.regs, reg_init, 16);
1298 	memcpy(dev->chanB.regs, reg_init ,16);
1299 
1300 	return 0;
1301 }
1302 
1303 /**
1304  *	z8530_init - Initialise a Z8530 device
1305  *	@dev: Z8530 device to initialise.
1306  *
1307  *	Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1308  *	is present, identify the type and then program it to hopefully
1309  *	keep quite and behave. This matters a lot, a Z8530 in the wrong
1310  *	state will sometimes get into stupid modes generating 10Khz
1311  *	interrupt streams and the like.
1312  *
1313  *	We set the interrupt handler up to discard any events, in case
1314  *	we get them during reset or setp.
1315  *
1316  *	Return 0 for success, or a negative value indicating the problem
1317  *	in errno form.
1318  */
1319 
z8530_init(struct z8530_dev * dev)1320 int z8530_init(struct z8530_dev *dev)
1321 {
1322 	unsigned long flags;
1323 	int ret;
1324 
1325 	/* Set up the chip level lock */
1326 	spin_lock_init(&dev->lock);
1327 	dev->chanA.lock = &dev->lock;
1328 	dev->chanB.lock = &dev->lock;
1329 
1330 	spin_lock_irqsave(&dev->lock, flags);
1331 	ret = do_z8530_init(dev);
1332 	spin_unlock_irqrestore(&dev->lock, flags);
1333 
1334 	return ret;
1335 }
1336 
1337 
1338 EXPORT_SYMBOL(z8530_init);
1339 
1340 /**
1341  *	z8530_shutdown - Shutdown a Z8530 device
1342  *	@dev: The Z8530 chip to shutdown
1343  *
1344  *	We set the interrupt handlers to silence any interrupts. We then
1345  *	reset the chip and wait 100uS to be sure the reset completed. Just
1346  *	in case the caller then tries to do stuff.
1347  *
1348  *	This is called without the lock held
1349  */
1350 
z8530_shutdown(struct z8530_dev * dev)1351 int z8530_shutdown(struct z8530_dev *dev)
1352 {
1353 	unsigned long flags;
1354 	/* Reset the chip */
1355 
1356 	spin_lock_irqsave(&dev->lock, flags);
1357 	dev->chanA.irqs=&z8530_nop;
1358 	dev->chanB.irqs=&z8530_nop;
1359 	write_zsreg(&dev->chanA, R9, 0xC0);
1360 	/* We must lock the udelay, the chip is offlimits here */
1361 	udelay(100);
1362 	spin_unlock_irqrestore(&dev->lock, flags);
1363 	return 0;
1364 }
1365 
1366 EXPORT_SYMBOL(z8530_shutdown);
1367 
1368 /**
1369  *	z8530_channel_load - Load channel data
1370  *	@c: Z8530 channel to configure
1371  *	@rtable: table of register, value pairs
1372  *	FIXME: ioctl to allow user uploaded tables
1373  *
1374  *	Load a Z8530 channel up from the system data. We use +16 to
1375  *	indicate the "prime" registers. The value 255 terminates the
1376  *	table.
1377  */
1378 
z8530_channel_load(struct z8530_channel * c,u8 * rtable)1379 int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1380 {
1381 	unsigned long flags;
1382 
1383 	spin_lock_irqsave(c->lock, flags);
1384 
1385 	while(*rtable!=255)
1386 	{
1387 		int reg=*rtable++;
1388 		if(reg>0x0F)
1389 			write_zsreg(c, R15, c->regs[15]|1);
1390 		write_zsreg(c, reg&0x0F, *rtable);
1391 		if(reg>0x0F)
1392 			write_zsreg(c, R15, c->regs[15]&~1);
1393 		c->regs[reg]=*rtable++;
1394 	}
1395 	c->rx_function=z8530_null_rx;
1396 	c->skb=NULL;
1397 	c->tx_skb=NULL;
1398 	c->tx_next_skb=NULL;
1399 	c->mtu=1500;
1400 	c->max=0;
1401 	c->count=0;
1402 	c->status=read_zsreg(c, R0);
1403 	c->sync=1;
1404 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1405 
1406 	spin_unlock_irqrestore(c->lock, flags);
1407 	return 0;
1408 }
1409 
1410 EXPORT_SYMBOL(z8530_channel_load);
1411 
1412 
1413 /**
1414  *	z8530_tx_begin - Begin packet transmission
1415  *	@c: The Z8530 channel to kick
1416  *
1417  *	This is the speed sensitive side of transmission. If we are called
1418  *	and no buffer is being transmitted we commence the next buffer. If
1419  *	nothing is queued we idle the sync.
1420  *
1421  *	Note: We are handling this code path in the interrupt path, keep it
1422  *	fast or bad things will happen.
1423  *
1424  *	Called with the lock held.
1425  */
1426 
z8530_tx_begin(struct z8530_channel * c)1427 static void z8530_tx_begin(struct z8530_channel *c)
1428 {
1429 	unsigned long flags;
1430 	if(c->tx_skb)
1431 		return;
1432 
1433 	c->tx_skb=c->tx_next_skb;
1434 	c->tx_next_skb=NULL;
1435 	c->tx_ptr=c->tx_next_ptr;
1436 
1437 	if(c->tx_skb==NULL)
1438 	{
1439 		/* Idle on */
1440 		if(c->dma_tx)
1441 		{
1442 			flags=claim_dma_lock();
1443 			disable_dma(c->txdma);
1444 			/*
1445 			 *	Check if we crapped out.
1446 			 */
1447 			if (get_dma_residue(c->txdma))
1448 			{
1449 				c->netdevice->stats.tx_dropped++;
1450 				c->netdevice->stats.tx_fifo_errors++;
1451 			}
1452 			release_dma_lock(flags);
1453 		}
1454 		c->txcount=0;
1455 	}
1456 	else
1457 	{
1458 		c->txcount=c->tx_skb->len;
1459 
1460 
1461 		if(c->dma_tx)
1462 		{
1463 			/*
1464 			 *	FIXME. DMA is broken for the original 8530,
1465 			 *	on the older parts we need to set a flag and
1466 			 *	wait for a further TX interrupt to fire this
1467 			 *	stage off
1468 			 */
1469 
1470 			flags=claim_dma_lock();
1471 			disable_dma(c->txdma);
1472 
1473 			/*
1474 			 *	These two are needed by the 8530/85C30
1475 			 *	and must be issued when idling.
1476 			 */
1477 
1478 			if(c->dev->type!=Z85230)
1479 			{
1480 				write_zsctrl(c, RES_Tx_CRC);
1481 				write_zsctrl(c, RES_EOM_L);
1482 			}
1483 			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1484 			clear_dma_ff(c->txdma);
1485 			set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1486 			set_dma_count(c->txdma, c->txcount);
1487 			enable_dma(c->txdma);
1488 			release_dma_lock(flags);
1489 			write_zsctrl(c, RES_EOM_L);
1490 			write_zsreg(c, R5, c->regs[R5]|TxENAB);
1491 		}
1492 		else
1493 		{
1494 
1495 			/* ABUNDER off */
1496 			write_zsreg(c, R10, c->regs[10]);
1497 			write_zsctrl(c, RES_Tx_CRC);
1498 
1499 			while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1500 			{
1501 				write_zsreg(c, R8, *c->tx_ptr++);
1502 				c->txcount--;
1503 			}
1504 
1505 		}
1506 	}
1507 	/*
1508 	 *	Since we emptied tx_skb we can ask for more
1509 	 */
1510 	netif_wake_queue(c->netdevice);
1511 }
1512 
1513 /**
1514  *	z8530_tx_done - TX complete callback
1515  *	@c: The channel that completed a transmit.
1516  *
1517  *	This is called when we complete a packet send. We wake the queue,
1518  *	start the next packet going and then free the buffer of the existing
1519  *	packet. This code is fairly timing sensitive.
1520  *
1521  *	Called with the register lock held.
1522  */
1523 
z8530_tx_done(struct z8530_channel * c)1524 static void z8530_tx_done(struct z8530_channel *c)
1525 {
1526 	struct sk_buff *skb;
1527 
1528 	/* Actually this can happen.*/
1529 	if (c->tx_skb == NULL)
1530 		return;
1531 
1532 	skb = c->tx_skb;
1533 	c->tx_skb = NULL;
1534 	z8530_tx_begin(c);
1535 	c->netdevice->stats.tx_packets++;
1536 	c->netdevice->stats.tx_bytes += skb->len;
1537 	dev_kfree_skb_irq(skb);
1538 }
1539 
1540 /**
1541  *	z8530_null_rx - Discard a packet
1542  *	@c: The channel the packet arrived on
1543  *	@skb: The buffer
1544  *
1545  *	We point the receive handler at this function when idle. Instead
1546  *	of processing the frames we get to throw them away.
1547  */
1548 
z8530_null_rx(struct z8530_channel * c,struct sk_buff * skb)1549 void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1550 {
1551 	dev_kfree_skb_any(skb);
1552 }
1553 
1554 EXPORT_SYMBOL(z8530_null_rx);
1555 
1556 /**
1557  *	z8530_rx_done - Receive completion callback
1558  *	@c: The channel that completed a receive
1559  *
1560  *	A new packet is complete. Our goal here is to get back into receive
1561  *	mode as fast as possible. On the Z85230 we could change to using
1562  *	ESCC mode, but on the older chips we have no choice. We flip to the
1563  *	new buffer immediately in DMA mode so that the DMA of the next
1564  *	frame can occur while we are copying the previous buffer to an sk_buff
1565  *
1566  *	Called with the lock held
1567  */
1568 
z8530_rx_done(struct z8530_channel * c)1569 static void z8530_rx_done(struct z8530_channel *c)
1570 {
1571 	struct sk_buff *skb;
1572 	int ct;
1573 
1574 	/*
1575 	 *	Is our receive engine in DMA mode
1576 	 */
1577 
1578 	if(c->rxdma_on)
1579 	{
1580 		/*
1581 		 *	Save the ready state and the buffer currently
1582 		 *	being used as the DMA target
1583 		 */
1584 
1585 		int ready=c->dma_ready;
1586 		unsigned char *rxb=c->rx_buf[c->dma_num];
1587 		unsigned long flags;
1588 
1589 		/*
1590 		 *	Complete this DMA. Neccessary to find the length
1591 		 */
1592 
1593 		flags=claim_dma_lock();
1594 
1595 		disable_dma(c->rxdma);
1596 		clear_dma_ff(c->rxdma);
1597 		c->rxdma_on=0;
1598 		ct=c->mtu-get_dma_residue(c->rxdma);
1599 		if(ct<0)
1600 			ct=2;	/* Shit happens.. */
1601 		c->dma_ready=0;
1602 
1603 		/*
1604 		 *	Normal case: the other slot is free, start the next DMA
1605 		 *	into it immediately.
1606 		 */
1607 
1608 		if(ready)
1609 		{
1610 			c->dma_num^=1;
1611 			set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1612 			set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1613 			set_dma_count(c->rxdma, c->mtu);
1614 			c->rxdma_on = 1;
1615 			enable_dma(c->rxdma);
1616 			/* Stop any frames that we missed the head of
1617 			   from passing */
1618 			write_zsreg(c, R0, RES_Rx_CRC);
1619 		}
1620 		else
1621 			/* Can't occur as we dont reenable the DMA irq until
1622 			   after the flip is done */
1623 			printk(KERN_WARNING "%s: DMA flip overrun!\n",
1624 			       c->netdevice->name);
1625 
1626 		release_dma_lock(flags);
1627 
1628 		/*
1629 		 *	Shove the old buffer into an sk_buff. We can't DMA
1630 		 *	directly into one on a PC - it might be above the 16Mb
1631 		 *	boundary. Optimisation - we could check to see if we
1632 		 *	can avoid the copy. Optimisation 2 - make the memcpy
1633 		 *	a copychecksum.
1634 		 */
1635 
1636 		skb = dev_alloc_skb(ct);
1637 		if (skb == NULL) {
1638 			c->netdevice->stats.rx_dropped++;
1639 			printk(KERN_WARNING "%s: Memory squeeze.\n",
1640 			       c->netdevice->name);
1641 		} else {
1642 			skb_put(skb, ct);
1643 			skb_copy_to_linear_data(skb, rxb, ct);
1644 			c->netdevice->stats.rx_packets++;
1645 			c->netdevice->stats.rx_bytes += ct;
1646 		}
1647 		c->dma_ready = 1;
1648 	} else {
1649 		RT_LOCK;
1650 		skb = c->skb;
1651 
1652 		/*
1653 		 *	The game we play for non DMA is similar. We want to
1654 		 *	get the controller set up for the next packet as fast
1655 		 *	as possible. We potentially only have one byte + the
1656 		 *	fifo length for this. Thus we want to flip to the new
1657 		 *	buffer and then mess around copying and allocating
1658 		 *	things. For the current case it doesn't matter but
1659 		 *	if you build a system where the sync irq isnt blocked
1660 		 *	by the kernel IRQ disable then you need only block the
1661 		 *	sync IRQ for the RT_LOCK area.
1662 		 *
1663 		 */
1664 		ct=c->count;
1665 
1666 		c->skb = c->skb2;
1667 		c->count = 0;
1668 		c->max = c->mtu;
1669 		if (c->skb) {
1670 			c->dptr = c->skb->data;
1671 			c->max = c->mtu;
1672 		} else {
1673 			c->count = 0;
1674 			c->max = 0;
1675 		}
1676 		RT_UNLOCK;
1677 
1678 		c->skb2 = dev_alloc_skb(c->mtu);
1679 		if (c->skb2 == NULL)
1680 			printk(KERN_WARNING "%s: memory squeeze.\n",
1681 			       c->netdevice->name);
1682 		else
1683 			skb_put(c->skb2, c->mtu);
1684 		c->netdevice->stats.rx_packets++;
1685 		c->netdevice->stats.rx_bytes += ct;
1686 	}
1687 	/*
1688 	 *	If we received a frame we must now process it.
1689 	 */
1690 	if (skb) {
1691 		skb_trim(skb, ct);
1692 		c->rx_function(c, skb);
1693 	} else {
1694 		c->netdevice->stats.rx_dropped++;
1695 		printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
1696 	}
1697 }
1698 
1699 /**
1700  *	spans_boundary - Check a packet can be ISA DMA'd
1701  *	@skb: The buffer to check
1702  *
1703  *	Returns true if the buffer cross a DMA boundary on a PC. The poor
1704  *	thing can only DMA within a 64K block not across the edges of it.
1705  */
1706 
spans_boundary(struct sk_buff * skb)1707 static inline int spans_boundary(struct sk_buff *skb)
1708 {
1709 	unsigned long a=(unsigned long)skb->data;
1710 	a^=(a+skb->len);
1711 	if(a&0x00010000)	/* If the 64K bit is different.. */
1712 		return 1;
1713 	return 0;
1714 }
1715 
1716 /**
1717  *	z8530_queue_xmit - Queue a packet
1718  *	@c: The channel to use
1719  *	@skb: The packet to kick down the channel
1720  *
1721  *	Queue a packet for transmission. Because we have rather
1722  *	hard to hit interrupt latencies for the Z85230 per packet
1723  *	even in DMA mode we do the flip to DMA buffer if needed here
1724  *	not in the IRQ.
1725  *
1726  *	Called from the network code. The lock is not held at this
1727  *	point.
1728  */
1729 
z8530_queue_xmit(struct z8530_channel * c,struct sk_buff * skb)1730 int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1731 {
1732 	unsigned long flags;
1733 
1734 	netif_stop_queue(c->netdevice);
1735 	if(c->tx_next_skb)
1736 	{
1737 		return 1;
1738 	}
1739 
1740 	/* PC SPECIFIC - DMA limits */
1741 
1742 	/*
1743 	 *	If we will DMA the transmit and its gone over the ISA bus
1744 	 *	limit, then copy to the flip buffer
1745 	 */
1746 
1747 	if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1748 	{
1749 		/*
1750 		 *	Send the flip buffer, and flip the flippy bit.
1751 		 *	We don't care which is used when just so long as
1752 		 *	we never use the same buffer twice in a row. Since
1753 		 *	only one buffer can be going out at a time the other
1754 		 *	has to be safe.
1755 		 */
1756 		c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1757 		c->tx_dma_used^=1;	/* Flip temp buffer */
1758 		skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
1759 	}
1760 	else
1761 		c->tx_next_ptr=skb->data;
1762 	RT_LOCK;
1763 	c->tx_next_skb=skb;
1764 	RT_UNLOCK;
1765 
1766 	spin_lock_irqsave(c->lock, flags);
1767 	z8530_tx_begin(c);
1768 	spin_unlock_irqrestore(c->lock, flags);
1769 
1770 	return 0;
1771 }
1772 
1773 EXPORT_SYMBOL(z8530_queue_xmit);
1774 
1775 /*
1776  *	Module support
1777  */
1778 static char banner[] __initdata = KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1779 
z85230_init_driver(void)1780 static int __init z85230_init_driver(void)
1781 {
1782 	printk(banner);
1783 	return 0;
1784 }
1785 module_init(z85230_init_driver);
1786 
z85230_cleanup_driver(void)1787 static void __exit z85230_cleanup_driver(void)
1788 {
1789 }
1790 module_exit(z85230_cleanup_driver);
1791 
1792 MODULE_AUTHOR("Red Hat Inc.");
1793 MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1794 MODULE_LICENSE("GPL");
1795