• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  *	(c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
5  *	(c) Copyright 2000, 2001 Red Hat Inc
6  *
7  *	Development of this driver was funded by Equiinet Ltd
8  *			http://www.equiinet.com
9  *
10  *	ChangeLog:
11  *
12  *	Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
13  *	unification of all the Z85x30 asynchronous drivers for real.
14  *
15  *	DMA now uses get_free_page as kmalloc buffers may span a 64K
16  *	boundary.
17  *
18  *	Modified for SMP safety and SMP locking by Alan Cox
19  *					<alan@lxorguk.ukuu.org.uk>
20  *
21  *	Performance
22  *
23  *	Z85230:
24  *	Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
25  *	X.25 is not unrealistic on all machines. DMA mode can in theory
26  *	handle T1/E1 quite nicely. In practice the limit seems to be about
27  *	512Kbit->1Mbit depending on motherboard.
28  *
29  *	Z85C30:
30  *	64K will take DMA, 9600 baud X.25 should be ok.
31  *
32  *	Z8530:
33  *	Synchronous mode without DMA is unlikely to pass about 2400 baud.
34  */
35 
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 
38 #include <linux/module.h>
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/netdevice.h>
44 #include <linux/if_arp.h>
45 #include <linux/delay.h>
46 #include <linux/hdlc.h>
47 #include <linux/ioport.h>
48 #include <linux/init.h>
49 #include <linux/gfp.h>
50 #include <asm/dma.h>
51 #include <asm/io.h>
52 #define RT_LOCK
53 #define RT_UNLOCK
54 #include <linux/spinlock.h>
55 
56 #include "z85230.h"
57 
58 
59 /**
60  *	z8530_read_port - Architecture specific interface function
61  *	@p: port to read
62  *
63  *	Provided port access methods. The Comtrol SV11 requires no delays
64  *	between accesses and uses PC I/O. Some drivers may need a 5uS delay
65  *
66  *	In the longer term this should become an architecture specific
67  *	section so that this can become a generic driver interface for all
68  *	platforms. For now we only handle PC I/O ports with or without the
69  *	dread 5uS sanity delay.
70  *
71  *	The caller must hold sufficient locks to avoid violating the horrible
72  *	5uS delay rule.
73  */
74 
z8530_read_port(unsigned long p)75 static inline int z8530_read_port(unsigned long p)
76 {
77 	u8 r=inb(Z8530_PORT_OF(p));
78 	if(p&Z8530_PORT_SLEEP)	/* gcc should figure this out efficiently ! */
79 		udelay(5);
80 	return r;
81 }
82 
83 /**
84  *	z8530_write_port - Architecture specific interface function
85  *	@p: port to write
86  *	@d: value to write
87  *
88  *	Write a value to a port with delays if need be. Note that the
89  *	caller must hold locks to avoid read/writes from other contexts
90  *	violating the 5uS rule
91  *
92  *	In the longer term this should become an architecture specific
93  *	section so that this can become a generic driver interface for all
94  *	platforms. For now we only handle PC I/O ports with or without the
95  *	dread 5uS sanity delay.
96  */
97 
98 
z8530_write_port(unsigned long p,u8 d)99 static inline void z8530_write_port(unsigned long p, u8 d)
100 {
101 	outb(d,Z8530_PORT_OF(p));
102 	if(p&Z8530_PORT_SLEEP)
103 		udelay(5);
104 }
105 
106 
107 
108 static void z8530_rx_done(struct z8530_channel *c);
109 static void z8530_tx_done(struct z8530_channel *c);
110 
111 
112 /**
113  *	read_zsreg - Read a register from a Z85230
114  *	@c: Z8530 channel to read from (2 per chip)
115  *	@reg: Register to read
116  *	FIXME: Use a spinlock.
117  *
118  *	Most of the Z8530 registers are indexed off the control registers.
119  *	A read is done by writing to the control register and reading the
120  *	register back.  The caller must hold the lock
121  */
122 
read_zsreg(struct z8530_channel * c,u8 reg)123 static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
124 {
125 	if(reg)
126 		z8530_write_port(c->ctrlio, reg);
127 	return z8530_read_port(c->ctrlio);
128 }
129 
130 /**
131  *	read_zsdata - Read the data port of a Z8530 channel
132  *	@c: The Z8530 channel to read the data port from
133  *
134  *	The data port provides fast access to some things. We still
135  *	have all the 5uS delays to worry about.
136  */
137 
read_zsdata(struct z8530_channel * c)138 static inline u8 read_zsdata(struct z8530_channel *c)
139 {
140 	u8 r;
141 	r=z8530_read_port(c->dataio);
142 	return r;
143 }
144 
145 /**
146  *	write_zsreg - Write to a Z8530 channel register
147  *	@c: The Z8530 channel
148  *	@reg: Register number
149  *	@val: Value to write
150  *
151  *	Write a value to an indexed register. The caller must hold the lock
152  *	to honour the irritating delay rules. We know about register 0
153  *	being fast to access.
154  *
155  *      Assumes c->lock is held.
156  */
write_zsreg(struct z8530_channel * c,u8 reg,u8 val)157 static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
158 {
159 	if(reg)
160 		z8530_write_port(c->ctrlio, reg);
161 	z8530_write_port(c->ctrlio, val);
162 
163 }
164 
165 /**
166  *	write_zsctrl - Write to a Z8530 control register
167  *	@c: The Z8530 channel
168  *	@val: Value to write
169  *
170  *	Write directly to the control register on the Z8530
171  */
172 
write_zsctrl(struct z8530_channel * c,u8 val)173 static inline void write_zsctrl(struct z8530_channel *c, u8 val)
174 {
175 	z8530_write_port(c->ctrlio, val);
176 }
177 
178 /**
179  *	write_zsdata - Write to a Z8530 control register
180  *	@c: The Z8530 channel
181  *	@val: Value to write
182  *
183  *	Write directly to the data register on the Z8530
184  */
185 
186 
write_zsdata(struct z8530_channel * c,u8 val)187 static inline void write_zsdata(struct z8530_channel *c, u8 val)
188 {
189 	z8530_write_port(c->dataio, val);
190 }
191 
192 /*
193  *	Register loading parameters for a dead port
194  */
195 
196 u8 z8530_dead_port[]=
197 {
198 	255
199 };
200 
201 EXPORT_SYMBOL(z8530_dead_port);
202 
203 /*
204  *	Register loading parameters for currently supported circuit types
205  */
206 
207 
208 /*
209  *	Data clocked by telco end. This is the correct data for the UK
210  *	"kilostream" service, and most other similar services.
211  */
212 
213 u8 z8530_hdlc_kilostream[]=
214 {
215 	4,	SYNC_ENAB|SDLC|X1CLK,
216 	2,	0,	/* No vector */
217 	1,	0,
218 	3,	ENT_HM|RxCRC_ENAB|Rx8,
219 	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
220 	9,	0,		/* Disable interrupts */
221 	6,	0xFF,
222 	7,	FLAG,
223 	10,	ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
224 	11,	TCTRxCP,
225 	14,	DISDPLL,
226 	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
227 	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
228 	9,	NV|MIE|NORESET,
229 	255
230 };
231 
232 EXPORT_SYMBOL(z8530_hdlc_kilostream);
233 
234 /*
235  *	As above but for enhanced chips.
236  */
237 
238 u8 z8530_hdlc_kilostream_85230[]=
239 {
240 	4,	SYNC_ENAB|SDLC|X1CLK,
241 	2,	0,	/* No vector */
242 	1,	0,
243 	3,	ENT_HM|RxCRC_ENAB|Rx8,
244 	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
245 	9,	0,		/* Disable interrupts */
246 	6,	0xFF,
247 	7,	FLAG,
248 	10,	ABUNDER|NRZ|CRCPS,	/* MARKIDLE?? */
249 	11,	TCTRxCP,
250 	14,	DISDPLL,
251 	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
252 	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
253 	9,	NV|MIE|NORESET,
254 	23,	3,		/* Extended mode AUTO TX and EOM*/
255 
256 	255
257 };
258 
259 EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
260 
261 /**
262  *	z8530_flush_fifo - Flush on chip RX FIFO
263  *	@c: Channel to flush
264  *
265  *	Flush the receive FIFO. There is no specific option for this, we
266  *	blindly read bytes and discard them. Reading when there is no data
267  *	is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
268  *
269  *	All locking is handled for the caller. On return data may still be
270  *	present if it arrived during the flush.
271  */
272 
z8530_flush_fifo(struct z8530_channel * c)273 static void z8530_flush_fifo(struct z8530_channel *c)
274 {
275 	read_zsreg(c, R1);
276 	read_zsreg(c, R1);
277 	read_zsreg(c, R1);
278 	read_zsreg(c, R1);
279 	if(c->dev->type==Z85230)
280 	{
281 		read_zsreg(c, R1);
282 		read_zsreg(c, R1);
283 		read_zsreg(c, R1);
284 		read_zsreg(c, R1);
285 	}
286 }
287 
288 /**
289  *	z8530_rtsdtr - Control the outgoing DTS/RTS line
290  *	@c: The Z8530 channel to control;
291  *	@set: 1 to set, 0 to clear
292  *
293  *	Sets or clears DTR/RTS on the requested line. All locking is handled
294  *	by the caller. For now we assume all boards use the actual RTS/DTR
295  *	on the chip. Apparently one or two don't. We'll scream about them
296  *	later.
297  */
298 
z8530_rtsdtr(struct z8530_channel * c,int set)299 static void z8530_rtsdtr(struct z8530_channel *c, int set)
300 {
301 	if (set)
302 		c->regs[5] |= (RTS | DTR);
303 	else
304 		c->regs[5] &= ~(RTS | DTR);
305 	write_zsreg(c, R5, c->regs[5]);
306 }
307 
308 /**
309  *	z8530_rx - Handle a PIO receive event
310  *	@c: Z8530 channel to process
311  *
312  *	Receive handler for receiving in PIO mode. This is much like the
313  *	async one but not quite the same or as complex
314  *
315  *	Note: Its intended that this handler can easily be separated from
316  *	the main code to run realtime. That'll be needed for some machines
317  *	(eg to ever clock 64kbits on a sparc ;)).
318  *
319  *	The RT_LOCK macros don't do anything now. Keep the code covered
320  *	by them as short as possible in all circumstances - clocks cost
321  *	baud. The interrupt handler is assumed to be atomic w.r.t. to
322  *	other code - this is true in the RT case too.
323  *
324  *	We only cover the sync cases for this. If you want 2Mbit async
325  *	do it yourself but consider medical assistance first. This non DMA
326  *	synchronous mode is portable code. The DMA mode assumes PCI like
327  *	ISA DMA
328  *
329  *	Called with the device lock held
330  */
331 
z8530_rx(struct z8530_channel * c)332 static void z8530_rx(struct z8530_channel *c)
333 {
334 	u8 ch,stat;
335 
336 	while(1)
337 	{
338 		/* FIFO empty ? */
339 		if(!(read_zsreg(c, R0)&1))
340 			break;
341 		ch=read_zsdata(c);
342 		stat=read_zsreg(c, R1);
343 
344 		/*
345 		 *	Overrun ?
346 		 */
347 		if(c->count < c->max)
348 		{
349 			*c->dptr++=ch;
350 			c->count++;
351 		}
352 
353 		if(stat&END_FR)
354 		{
355 
356 			/*
357 			 *	Error ?
358 			 */
359 			if(stat&(Rx_OVR|CRC_ERR))
360 			{
361 				/* Rewind the buffer and return */
362 				if(c->skb)
363 					c->dptr=c->skb->data;
364 				c->count=0;
365 				if(stat&Rx_OVR)
366 				{
367 					pr_warn("%s: overrun\n", c->dev->name);
368 					c->rx_overrun++;
369 				}
370 				if(stat&CRC_ERR)
371 				{
372 					c->rx_crc_err++;
373 					/* printk("crc error\n"); */
374 				}
375 				/* Shove the frame upstream */
376 			}
377 			else
378 			{
379 				/*
380 				 *	Drop the lock for RX processing, or
381 		 		 *	there are deadlocks
382 		 		 */
383 				z8530_rx_done(c);
384 				write_zsctrl(c, RES_Rx_CRC);
385 			}
386 		}
387 	}
388 	/*
389 	 *	Clear irq
390 	 */
391 	write_zsctrl(c, ERR_RES);
392 	write_zsctrl(c, RES_H_IUS);
393 }
394 
395 
396 /**
397  *	z8530_tx - Handle a PIO transmit event
398  *	@c: Z8530 channel to process
399  *
400  *	Z8530 transmit interrupt handler for the PIO mode. The basic
401  *	idea is to attempt to keep the FIFO fed. We fill as many bytes
402  *	in as possible, its quite possible that we won't keep up with the
403  *	data rate otherwise.
404  */
405 
z8530_tx(struct z8530_channel * c)406 static void z8530_tx(struct z8530_channel *c)
407 {
408 	while(c->txcount) {
409 		/* FIFO full ? */
410 		if(!(read_zsreg(c, R0)&4))
411 			return;
412 		c->txcount--;
413 		/*
414 		 *	Shovel out the byte
415 		 */
416 		write_zsreg(c, R8, *c->tx_ptr++);
417 		write_zsctrl(c, RES_H_IUS);
418 		/* We are about to underflow */
419 		if(c->txcount==0)
420 		{
421 			write_zsctrl(c, RES_EOM_L);
422 			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
423 		}
424 	}
425 
426 
427 	/*
428 	 *	End of frame TX - fire another one
429 	 */
430 
431 	write_zsctrl(c, RES_Tx_P);
432 
433 	z8530_tx_done(c);
434 	write_zsctrl(c, RES_H_IUS);
435 }
436 
437 /**
438  *	z8530_status - Handle a PIO status exception
439  *	@chan: Z8530 channel to process
440  *
441  *	A status event occurred in PIO synchronous mode. There are several
442  *	reasons the chip will bother us here. A transmit underrun means we
443  *	failed to feed the chip fast enough and just broke a packet. A DCD
444  *	change is a line up or down.
445  */
446 
z8530_status(struct z8530_channel * chan)447 static void z8530_status(struct z8530_channel *chan)
448 {
449 	u8 status, altered;
450 
451 	status = read_zsreg(chan, R0);
452 	altered = chan->status ^ status;
453 
454 	chan->status = status;
455 
456 	if (status & TxEOM) {
457 /*		printk("%s: Tx underrun.\n", chan->dev->name); */
458 		chan->netdevice->stats.tx_fifo_errors++;
459 		write_zsctrl(chan, ERR_RES);
460 		z8530_tx_done(chan);
461 	}
462 
463 	if (altered & chan->dcdcheck)
464 	{
465 		if (status & chan->dcdcheck) {
466 			pr_info("%s: DCD raised\n", chan->dev->name);
467 			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
468 			if (chan->netdevice)
469 				netif_carrier_on(chan->netdevice);
470 		} else {
471 			pr_info("%s: DCD lost\n", chan->dev->name);
472 			write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
473 			z8530_flush_fifo(chan);
474 			if (chan->netdevice)
475 				netif_carrier_off(chan->netdevice);
476 		}
477 
478 	}
479 	write_zsctrl(chan, RES_EXT_INT);
480 	write_zsctrl(chan, RES_H_IUS);
481 }
482 
483 struct z8530_irqhandler z8530_sync = {
484 	.rx = z8530_rx,
485 	.tx = z8530_tx,
486 	.status = z8530_status,
487 };
488 
489 EXPORT_SYMBOL(z8530_sync);
490 
491 /**
492  *	z8530_dma_rx - Handle a DMA RX event
493  *	@chan: Channel to handle
494  *
495  *	Non bus mastering DMA interfaces for the Z8x30 devices. This
496  *	is really pretty PC specific. The DMA mode means that most receive
497  *	events are handled by the DMA hardware. We get a kick here only if
498  *	a frame ended.
499  */
500 
z8530_dma_rx(struct z8530_channel * chan)501 static void z8530_dma_rx(struct z8530_channel *chan)
502 {
503 	if(chan->rxdma_on)
504 	{
505 		/* Special condition check only */
506 		u8 status;
507 
508 		read_zsreg(chan, R7);
509 		read_zsreg(chan, R6);
510 
511 		status=read_zsreg(chan, R1);
512 
513 		if(status&END_FR)
514 		{
515 			z8530_rx_done(chan);	/* Fire up the next one */
516 		}
517 		write_zsctrl(chan, ERR_RES);
518 		write_zsctrl(chan, RES_H_IUS);
519 	}
520 	else
521 	{
522 		/* DMA is off right now, drain the slow way */
523 		z8530_rx(chan);
524 	}
525 }
526 
527 /**
528  *	z8530_dma_tx - Handle a DMA TX event
529  *	@chan:	The Z8530 channel to handle
530  *
531  *	We have received an interrupt while doing DMA transmissions. It
532  *	shouldn't happen. Scream loudly if it does.
533  */
534 
z8530_dma_tx(struct z8530_channel * chan)535 static void z8530_dma_tx(struct z8530_channel *chan)
536 {
537 	if(!chan->dma_tx)
538 	{
539 		pr_warn("Hey who turned the DMA off?\n");
540 		z8530_tx(chan);
541 		return;
542 	}
543 	/* This shouldn't occur in DMA mode */
544 	pr_err("DMA tx - bogus event!\n");
545 	z8530_tx(chan);
546 }
547 
548 /**
549  *	z8530_dma_status - Handle a DMA status exception
550  *	@chan: Z8530 channel to process
551  *
552  *	A status event occurred on the Z8530. We receive these for two reasons
553  *	when in DMA mode. Firstly if we finished a packet transfer we get one
554  *	and kick the next packet out. Secondly we may see a DCD change.
555  *
556  */
557 
z8530_dma_status(struct z8530_channel * chan)558 static void z8530_dma_status(struct z8530_channel *chan)
559 {
560 	u8 status, altered;
561 
562 	status=read_zsreg(chan, R0);
563 	altered=chan->status^status;
564 
565 	chan->status=status;
566 
567 
568 	if(chan->dma_tx)
569 	{
570 		if(status&TxEOM)
571 		{
572 			unsigned long flags;
573 
574 			flags=claim_dma_lock();
575 			disable_dma(chan->txdma);
576 			clear_dma_ff(chan->txdma);
577 			chan->txdma_on=0;
578 			release_dma_lock(flags);
579 			z8530_tx_done(chan);
580 		}
581 	}
582 
583 	if (altered & chan->dcdcheck)
584 	{
585 		if (status & chan->dcdcheck) {
586 			pr_info("%s: DCD raised\n", chan->dev->name);
587 			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
588 			if (chan->netdevice)
589 				netif_carrier_on(chan->netdevice);
590 		} else {
591 			pr_info("%s: DCD lost\n", chan->dev->name);
592 			write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
593 			z8530_flush_fifo(chan);
594 			if (chan->netdevice)
595 				netif_carrier_off(chan->netdevice);
596 		}
597 	}
598 
599 	write_zsctrl(chan, RES_EXT_INT);
600 	write_zsctrl(chan, RES_H_IUS);
601 }
602 
603 static struct z8530_irqhandler z8530_dma_sync = {
604 	.rx = z8530_dma_rx,
605 	.tx = z8530_dma_tx,
606 	.status = z8530_dma_status,
607 };
608 
609 static struct z8530_irqhandler z8530_txdma_sync = {
610 	.rx = z8530_rx,
611 	.tx = z8530_dma_tx,
612 	.status = z8530_dma_status,
613 };
614 
615 /**
616  *	z8530_rx_clear - Handle RX events from a stopped chip
617  *	@c: Z8530 channel to shut up
618  *
619  *	Receive interrupt vectors for a Z8530 that is in 'parked' mode.
620  *	For machines with PCI Z85x30 cards, or level triggered interrupts
621  *	(eg the MacII) we must clear the interrupt cause or die.
622  */
623 
624 
z8530_rx_clear(struct z8530_channel * c)625 static void z8530_rx_clear(struct z8530_channel *c)
626 {
627 	/*
628 	 *	Data and status bytes
629 	 */
630 	u8 stat;
631 
632 	read_zsdata(c);
633 	stat=read_zsreg(c, R1);
634 
635 	if(stat&END_FR)
636 		write_zsctrl(c, RES_Rx_CRC);
637 	/*
638 	 *	Clear irq
639 	 */
640 	write_zsctrl(c, ERR_RES);
641 	write_zsctrl(c, RES_H_IUS);
642 }
643 
644 /**
645  *	z8530_tx_clear - Handle TX events from a stopped chip
646  *	@c: Z8530 channel to shut up
647  *
648  *	Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
649  *	For machines with PCI Z85x30 cards, or level triggered interrupts
650  *	(eg the MacII) we must clear the interrupt cause or die.
651  */
652 
z8530_tx_clear(struct z8530_channel * c)653 static void z8530_tx_clear(struct z8530_channel *c)
654 {
655 	write_zsctrl(c, RES_Tx_P);
656 	write_zsctrl(c, RES_H_IUS);
657 }
658 
659 /**
660  *	z8530_status_clear - Handle status events from a stopped chip
661  *	@chan: Z8530 channel to shut up
662  *
663  *	Status interrupt vectors for a Z8530 that is in 'parked' mode.
664  *	For machines with PCI Z85x30 cards, or level triggered interrupts
665  *	(eg the MacII) we must clear the interrupt cause or die.
666  */
667 
z8530_status_clear(struct z8530_channel * chan)668 static void z8530_status_clear(struct z8530_channel *chan)
669 {
670 	u8 status=read_zsreg(chan, R0);
671 	if(status&TxEOM)
672 		write_zsctrl(chan, ERR_RES);
673 	write_zsctrl(chan, RES_EXT_INT);
674 	write_zsctrl(chan, RES_H_IUS);
675 }
676 
677 struct z8530_irqhandler z8530_nop = {
678 	.rx = z8530_rx_clear,
679 	.tx = z8530_tx_clear,
680 	.status = z8530_status_clear,
681 };
682 
683 
684 EXPORT_SYMBOL(z8530_nop);
685 
686 /**
687  *	z8530_interrupt - Handle an interrupt from a Z8530
688  *	@irq: 	Interrupt number
689  *	@dev_id: The Z8530 device that is interrupting.
690  *
691  *	A Z85[2]30 device has stuck its hand in the air for attention.
692  *	We scan both the channels on the chip for events and then call
693  *	the channel specific call backs for each channel that has events.
694  *	We have to use callback functions because the two channels can be
695  *	in different modes.
696  *
697  *	Locking is done for the handlers. Note that locking is done
698  *	at the chip level (the 5uS delay issue is per chip not per
699  *	channel). c->lock for both channels points to dev->lock
700  */
701 
z8530_interrupt(int irq,void * dev_id)702 irqreturn_t z8530_interrupt(int irq, void *dev_id)
703 {
704 	struct z8530_dev *dev=dev_id;
705 	u8 intr;
706 	static volatile int locker=0;
707 	int work=0;
708 	struct z8530_irqhandler *irqs;
709 
710 	if(locker)
711 	{
712 		pr_err("IRQ re-enter\n");
713 		return IRQ_NONE;
714 	}
715 	locker=1;
716 
717 	spin_lock(&dev->lock);
718 
719 	while(++work<5000)
720 	{
721 
722 		intr = read_zsreg(&dev->chanA, R3);
723 		if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
724 			break;
725 
726 		/* This holds the IRQ status. On the 8530 you must read it from chan
727 		   A even though it applies to the whole chip */
728 
729 		/* Now walk the chip and see what it is wanting - it may be
730 		   an IRQ for someone else remember */
731 
732 		irqs=dev->chanA.irqs;
733 
734 		if(intr & (CHARxIP|CHATxIP|CHAEXT))
735 		{
736 			if(intr&CHARxIP)
737 				irqs->rx(&dev->chanA);
738 			if(intr&CHATxIP)
739 				irqs->tx(&dev->chanA);
740 			if(intr&CHAEXT)
741 				irqs->status(&dev->chanA);
742 		}
743 
744 		irqs=dev->chanB.irqs;
745 
746 		if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
747 		{
748 			if(intr&CHBRxIP)
749 				irqs->rx(&dev->chanB);
750 			if(intr&CHBTxIP)
751 				irqs->tx(&dev->chanB);
752 			if(intr&CHBEXT)
753 				irqs->status(&dev->chanB);
754 		}
755 	}
756 	spin_unlock(&dev->lock);
757 	if(work==5000)
758 		pr_err("%s: interrupt jammed - abort(0x%X)!\n",
759 		       dev->name, intr);
760 	/* Ok all done */
761 	locker=0;
762 	return IRQ_HANDLED;
763 }
764 
765 EXPORT_SYMBOL(z8530_interrupt);
766 
767 static const u8 reg_init[16]=
768 {
769 	0,0,0,0,
770 	0,0,0,0,
771 	0,0,0,0,
772 	0x55,0,0,0
773 };
774 
775 
776 /**
777  *	z8530_sync_open - Open a Z8530 channel for PIO
778  *	@dev:	The network interface we are using
779  *	@c:	The Z8530 channel to open in synchronous PIO mode
780  *
781  *	Switch a Z8530 into synchronous mode without DMA assist. We
782  *	raise the RTS/DTR and commence network operation.
783  */
784 
z8530_sync_open(struct net_device * dev,struct z8530_channel * c)785 int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
786 {
787 	unsigned long flags;
788 
789 	spin_lock_irqsave(c->lock, flags);
790 
791 	c->sync = 1;
792 	c->mtu = dev->mtu+64;
793 	c->count = 0;
794 	c->skb = NULL;
795 	c->skb2 = NULL;
796 	c->irqs = &z8530_sync;
797 
798 	/* This loads the double buffer up */
799 	z8530_rx_done(c);	/* Load the frame ring */
800 	z8530_rx_done(c);	/* Load the backup frame */
801 	z8530_rtsdtr(c,1);
802 	c->dma_tx = 0;
803 	c->regs[R1]|=TxINT_ENAB;
804 	write_zsreg(c, R1, c->regs[R1]);
805 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
806 
807 	spin_unlock_irqrestore(c->lock, flags);
808 	return 0;
809 }
810 
811 
812 EXPORT_SYMBOL(z8530_sync_open);
813 
814 /**
815  *	z8530_sync_close - Close a PIO Z8530 channel
816  *	@dev: Network device to close
817  *	@c: Z8530 channel to disassociate and move to idle
818  *
819  *	Close down a Z8530 interface and switch its interrupt handlers
820  *	to discard future events.
821  */
822 
z8530_sync_close(struct net_device * dev,struct z8530_channel * c)823 int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
824 {
825 	u8 chk;
826 	unsigned long flags;
827 
828 	spin_lock_irqsave(c->lock, flags);
829 	c->irqs = &z8530_nop;
830 	c->max = 0;
831 	c->sync = 0;
832 
833 	chk=read_zsreg(c,R0);
834 	write_zsreg(c, R3, c->regs[R3]);
835 	z8530_rtsdtr(c,0);
836 
837 	spin_unlock_irqrestore(c->lock, flags);
838 	return 0;
839 }
840 
841 EXPORT_SYMBOL(z8530_sync_close);
842 
843 /**
844  *	z8530_sync_dma_open - Open a Z8530 for DMA I/O
845  *	@dev: The network device to attach
846  *	@c: The Z8530 channel to configure in sync DMA mode.
847  *
848  *	Set up a Z85x30 device for synchronous DMA in both directions. Two
849  *	ISA DMA channels must be available for this to work. We assume ISA
850  *	DMA driven I/O and PC limits on access.
851  */
852 
z8530_sync_dma_open(struct net_device * dev,struct z8530_channel * c)853 int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
854 {
855 	unsigned long cflags, dflags;
856 
857 	c->sync = 1;
858 	c->mtu = dev->mtu+64;
859 	c->count = 0;
860 	c->skb = NULL;
861 	c->skb2 = NULL;
862 	/*
863 	 *	Load the DMA interfaces up
864 	 */
865 	c->rxdma_on = 0;
866 	c->txdma_on = 0;
867 
868 	/*
869 	 *	Allocate the DMA flip buffers. Limit by page size.
870 	 *	Everyone runs 1500 mtu or less on wan links so this
871 	 *	should be fine.
872 	 */
873 
874 	if(c->mtu  > PAGE_SIZE/2)
875 		return -EMSGSIZE;
876 
877 	c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
878 	if(c->rx_buf[0]==NULL)
879 		return -ENOBUFS;
880 	c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
881 
882 	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
883 	if(c->tx_dma_buf[0]==NULL)
884 	{
885 		free_page((unsigned long)c->rx_buf[0]);
886 		c->rx_buf[0]=NULL;
887 		return -ENOBUFS;
888 	}
889 	c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
890 
891 	c->tx_dma_used=0;
892 	c->dma_tx = 1;
893 	c->dma_num=0;
894 	c->dma_ready=1;
895 
896 	/*
897 	 *	Enable DMA control mode
898 	 */
899 
900 	spin_lock_irqsave(c->lock, cflags);
901 
902 	/*
903 	 *	TX DMA via DIR/REQ
904 	 */
905 
906 	c->regs[R14]|= DTRREQ;
907 	write_zsreg(c, R14, c->regs[R14]);
908 
909 	c->regs[R1]&= ~TxINT_ENAB;
910 	write_zsreg(c, R1, c->regs[R1]);
911 
912 	/*
913 	 *	RX DMA via W/Req
914 	 */
915 
916 	c->regs[R1]|= WT_FN_RDYFN;
917 	c->regs[R1]|= WT_RDY_RT;
918 	c->regs[R1]|= INT_ERR_Rx;
919 	c->regs[R1]&= ~TxINT_ENAB;
920 	write_zsreg(c, R1, c->regs[R1]);
921 	c->regs[R1]|= WT_RDY_ENAB;
922 	write_zsreg(c, R1, c->regs[R1]);
923 
924 	/*
925 	 *	DMA interrupts
926 	 */
927 
928 	/*
929 	 *	Set up the DMA configuration
930 	 */
931 
932 	dflags=claim_dma_lock();
933 
934 	disable_dma(c->rxdma);
935 	clear_dma_ff(c->rxdma);
936 	set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
937 	set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
938 	set_dma_count(c->rxdma, c->mtu);
939 	enable_dma(c->rxdma);
940 
941 	disable_dma(c->txdma);
942 	clear_dma_ff(c->txdma);
943 	set_dma_mode(c->txdma, DMA_MODE_WRITE);
944 	disable_dma(c->txdma);
945 
946 	release_dma_lock(dflags);
947 
948 	/*
949 	 *	Select the DMA interrupt handlers
950 	 */
951 
952 	c->rxdma_on = 1;
953 	c->txdma_on = 1;
954 	c->tx_dma_used = 1;
955 
956 	c->irqs = &z8530_dma_sync;
957 	z8530_rtsdtr(c,1);
958 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
959 
960 	spin_unlock_irqrestore(c->lock, cflags);
961 
962 	return 0;
963 }
964 
965 EXPORT_SYMBOL(z8530_sync_dma_open);
966 
967 /**
968  *	z8530_sync_dma_close - Close down DMA I/O
969  *	@dev: Network device to detach
970  *	@c: Z8530 channel to move into discard mode
971  *
972  *	Shut down a DMA mode synchronous interface. Halt the DMA, and
973  *	free the buffers.
974  */
975 
z8530_sync_dma_close(struct net_device * dev,struct z8530_channel * c)976 int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
977 {
978 	u8 chk;
979 	unsigned long flags;
980 
981 	c->irqs = &z8530_nop;
982 	c->max = 0;
983 	c->sync = 0;
984 
985 	/*
986 	 *	Disable the PC DMA channels
987 	 */
988 
989 	flags=claim_dma_lock();
990 	disable_dma(c->rxdma);
991 	clear_dma_ff(c->rxdma);
992 
993 	c->rxdma_on = 0;
994 
995 	disable_dma(c->txdma);
996 	clear_dma_ff(c->txdma);
997 	release_dma_lock(flags);
998 
999 	c->txdma_on = 0;
1000 	c->tx_dma_used = 0;
1001 
1002 	spin_lock_irqsave(c->lock, flags);
1003 
1004 	/*
1005 	 *	Disable DMA control mode
1006 	 */
1007 
1008 	c->regs[R1]&= ~WT_RDY_ENAB;
1009 	write_zsreg(c, R1, c->regs[R1]);
1010 	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1011 	c->regs[R1]|= INT_ALL_Rx;
1012 	write_zsreg(c, R1, c->regs[R1]);
1013 	c->regs[R14]&= ~DTRREQ;
1014 	write_zsreg(c, R14, c->regs[R14]);
1015 
1016 	if(c->rx_buf[0])
1017 	{
1018 		free_page((unsigned long)c->rx_buf[0]);
1019 		c->rx_buf[0]=NULL;
1020 	}
1021 	if(c->tx_dma_buf[0])
1022 	{
1023 		free_page((unsigned  long)c->tx_dma_buf[0]);
1024 		c->tx_dma_buf[0]=NULL;
1025 	}
1026 	chk=read_zsreg(c,R0);
1027 	write_zsreg(c, R3, c->regs[R3]);
1028 	z8530_rtsdtr(c,0);
1029 
1030 	spin_unlock_irqrestore(c->lock, flags);
1031 
1032 	return 0;
1033 }
1034 
1035 EXPORT_SYMBOL(z8530_sync_dma_close);
1036 
1037 /**
1038  *	z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
1039  *	@dev: The network device to attach
1040  *	@c: The Z8530 channel to configure in sync DMA mode.
1041  *
1042  *	Set up a Z85x30 device for synchronous DMA transmission. One
1043  *	ISA DMA channel must be available for this to work. The receive
1044  *	side is run in PIO mode, but then it has the bigger FIFO.
1045  */
1046 
z8530_sync_txdma_open(struct net_device * dev,struct z8530_channel * c)1047 int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1048 {
1049 	unsigned long cflags, dflags;
1050 
1051 	printk("Opening sync interface for TX-DMA\n");
1052 	c->sync = 1;
1053 	c->mtu = dev->mtu+64;
1054 	c->count = 0;
1055 	c->skb = NULL;
1056 	c->skb2 = NULL;
1057 
1058 	/*
1059 	 *	Allocate the DMA flip buffers. Limit by page size.
1060 	 *	Everyone runs 1500 mtu or less on wan links so this
1061 	 *	should be fine.
1062 	 */
1063 
1064 	if(c->mtu  > PAGE_SIZE/2)
1065 		return -EMSGSIZE;
1066 
1067 	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1068 	if(c->tx_dma_buf[0]==NULL)
1069 		return -ENOBUFS;
1070 
1071 	c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1072 
1073 
1074 	spin_lock_irqsave(c->lock, cflags);
1075 
1076 	/*
1077 	 *	Load the PIO receive ring
1078 	 */
1079 
1080 	z8530_rx_done(c);
1081 	z8530_rx_done(c);
1082 
1083  	/*
1084 	 *	Load the DMA interfaces up
1085 	 */
1086 
1087 	c->rxdma_on = 0;
1088 	c->txdma_on = 0;
1089 
1090 	c->tx_dma_used=0;
1091 	c->dma_num=0;
1092 	c->dma_ready=1;
1093 	c->dma_tx = 1;
1094 
1095  	/*
1096 	 *	Enable DMA control mode
1097 	 */
1098 
1099  	/*
1100 	 *	TX DMA via DIR/REQ
1101  	 */
1102 	c->regs[R14]|= DTRREQ;
1103 	write_zsreg(c, R14, c->regs[R14]);
1104 
1105 	c->regs[R1]&= ~TxINT_ENAB;
1106 	write_zsreg(c, R1, c->regs[R1]);
1107 
1108 	/*
1109 	 *	Set up the DMA configuration
1110 	 */
1111 
1112 	dflags = claim_dma_lock();
1113 
1114 	disable_dma(c->txdma);
1115 	clear_dma_ff(c->txdma);
1116 	set_dma_mode(c->txdma, DMA_MODE_WRITE);
1117 	disable_dma(c->txdma);
1118 
1119 	release_dma_lock(dflags);
1120 
1121 	/*
1122 	 *	Select the DMA interrupt handlers
1123 	 */
1124 
1125 	c->rxdma_on = 0;
1126 	c->txdma_on = 1;
1127 	c->tx_dma_used = 1;
1128 
1129 	c->irqs = &z8530_txdma_sync;
1130 	z8530_rtsdtr(c,1);
1131 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1132 	spin_unlock_irqrestore(c->lock, cflags);
1133 
1134 	return 0;
1135 }
1136 
1137 EXPORT_SYMBOL(z8530_sync_txdma_open);
1138 
1139 /**
1140  *	z8530_sync_txdma_close - Close down a TX driven DMA channel
1141  *	@dev: Network device to detach
1142  *	@c: Z8530 channel to move into discard mode
1143  *
1144  *	Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1145  *	and  free the buffers.
1146  */
1147 
z8530_sync_txdma_close(struct net_device * dev,struct z8530_channel * c)1148 int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1149 {
1150 	unsigned long dflags, cflags;
1151 	u8 chk;
1152 
1153 
1154 	spin_lock_irqsave(c->lock, cflags);
1155 
1156 	c->irqs = &z8530_nop;
1157 	c->max = 0;
1158 	c->sync = 0;
1159 
1160 	/*
1161 	 *	Disable the PC DMA channels
1162 	 */
1163 
1164 	dflags = claim_dma_lock();
1165 
1166 	disable_dma(c->txdma);
1167 	clear_dma_ff(c->txdma);
1168 	c->txdma_on = 0;
1169 	c->tx_dma_used = 0;
1170 
1171 	release_dma_lock(dflags);
1172 
1173 	/*
1174 	 *	Disable DMA control mode
1175 	 */
1176 
1177 	c->regs[R1]&= ~WT_RDY_ENAB;
1178 	write_zsreg(c, R1, c->regs[R1]);
1179 	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1180 	c->regs[R1]|= INT_ALL_Rx;
1181 	write_zsreg(c, R1, c->regs[R1]);
1182 	c->regs[R14]&= ~DTRREQ;
1183 	write_zsreg(c, R14, c->regs[R14]);
1184 
1185 	if(c->tx_dma_buf[0])
1186 	{
1187 		free_page((unsigned long)c->tx_dma_buf[0]);
1188 		c->tx_dma_buf[0]=NULL;
1189 	}
1190 	chk=read_zsreg(c,R0);
1191 	write_zsreg(c, R3, c->regs[R3]);
1192 	z8530_rtsdtr(c,0);
1193 
1194 	spin_unlock_irqrestore(c->lock, cflags);
1195 	return 0;
1196 }
1197 
1198 
1199 EXPORT_SYMBOL(z8530_sync_txdma_close);
1200 
1201 
1202 /*
1203  *	Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1204  *	it exists...
1205  */
1206 
1207 static const char *z8530_type_name[]={
1208 	"Z8530",
1209 	"Z85C30",
1210 	"Z85230"
1211 };
1212 
1213 /**
1214  *	z8530_describe - Uniformly describe a Z8530 port
1215  *	@dev: Z8530 device to describe
1216  *	@mapping: string holding mapping type (eg "I/O" or "Mem")
1217  *	@io: the port value in question
1218  *
1219  *	Describe a Z8530 in a standard format. We must pass the I/O as
1220  *	the port offset isn't predictable. The main reason for this function
1221  *	is to try and get a common format of report.
1222  */
1223 
z8530_describe(struct z8530_dev * dev,char * mapping,unsigned long io)1224 void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1225 {
1226 	pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
1227 		dev->name,
1228 		z8530_type_name[dev->type],
1229 		mapping,
1230 		Z8530_PORT_OF(io),
1231 		dev->irq);
1232 }
1233 
1234 EXPORT_SYMBOL(z8530_describe);
1235 
1236 /*
1237  *	Locked operation part of the z8530 init code
1238  */
1239 
do_z8530_init(struct z8530_dev * dev)1240 static inline int do_z8530_init(struct z8530_dev *dev)
1241 {
1242 	/* NOP the interrupt handlers first - we might get a
1243 	   floating IRQ transition when we reset the chip */
1244 	dev->chanA.irqs=&z8530_nop;
1245 	dev->chanB.irqs=&z8530_nop;
1246 	dev->chanA.dcdcheck=DCD;
1247 	dev->chanB.dcdcheck=DCD;
1248 
1249 	/* Reset the chip */
1250 	write_zsreg(&dev->chanA, R9, 0xC0);
1251 	udelay(200);
1252 	/* Now check its valid */
1253 	write_zsreg(&dev->chanA, R12, 0xAA);
1254 	if(read_zsreg(&dev->chanA, R12)!=0xAA)
1255 		return -ENODEV;
1256 	write_zsreg(&dev->chanA, R12, 0x55);
1257 	if(read_zsreg(&dev->chanA, R12)!=0x55)
1258 		return -ENODEV;
1259 
1260 	dev->type=Z8530;
1261 
1262 	/*
1263 	 *	See the application note.
1264 	 */
1265 
1266 	write_zsreg(&dev->chanA, R15, 0x01);
1267 
1268 	/*
1269 	 *	If we can set the low bit of R15 then
1270 	 *	the chip is enhanced.
1271 	 */
1272 
1273 	if(read_zsreg(&dev->chanA, R15)==0x01)
1274 	{
1275 		/* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1276 		/* Put a char in the fifo */
1277 		write_zsreg(&dev->chanA, R8, 0);
1278 		if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1279 			dev->type = Z85230;	/* Has a FIFO */
1280 		else
1281 			dev->type = Z85C30;	/* Z85C30, 1 byte FIFO */
1282 	}
1283 
1284 	/*
1285 	 *	The code assumes R7' and friends are
1286 	 *	off. Use write_zsext() for these and keep
1287 	 *	this bit clear.
1288 	 */
1289 
1290 	write_zsreg(&dev->chanA, R15, 0);
1291 
1292 	/*
1293 	 *	At this point it looks like the chip is behaving
1294 	 */
1295 
1296 	memcpy(dev->chanA.regs, reg_init, 16);
1297 	memcpy(dev->chanB.regs, reg_init ,16);
1298 
1299 	return 0;
1300 }
1301 
1302 /**
1303  *	z8530_init - Initialise a Z8530 device
1304  *	@dev: Z8530 device to initialise.
1305  *
1306  *	Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1307  *	is present, identify the type and then program it to hopefully
1308  *	keep quite and behave. This matters a lot, a Z8530 in the wrong
1309  *	state will sometimes get into stupid modes generating 10Khz
1310  *	interrupt streams and the like.
1311  *
1312  *	We set the interrupt handler up to discard any events, in case
1313  *	we get them during reset or setp.
1314  *
1315  *	Return 0 for success, or a negative value indicating the problem
1316  *	in errno form.
1317  */
1318 
z8530_init(struct z8530_dev * dev)1319 int z8530_init(struct z8530_dev *dev)
1320 {
1321 	unsigned long flags;
1322 	int ret;
1323 
1324 	/* Set up the chip level lock */
1325 	spin_lock_init(&dev->lock);
1326 	dev->chanA.lock = &dev->lock;
1327 	dev->chanB.lock = &dev->lock;
1328 
1329 	spin_lock_irqsave(&dev->lock, flags);
1330 	ret = do_z8530_init(dev);
1331 	spin_unlock_irqrestore(&dev->lock, flags);
1332 
1333 	return ret;
1334 }
1335 
1336 
1337 EXPORT_SYMBOL(z8530_init);
1338 
1339 /**
1340  *	z8530_shutdown - Shutdown a Z8530 device
1341  *	@dev: The Z8530 chip to shutdown
1342  *
1343  *	We set the interrupt handlers to silence any interrupts. We then
1344  *	reset the chip and wait 100uS to be sure the reset completed. Just
1345  *	in case the caller then tries to do stuff.
1346  *
1347  *	This is called without the lock held
1348  */
1349 
z8530_shutdown(struct z8530_dev * dev)1350 int z8530_shutdown(struct z8530_dev *dev)
1351 {
1352 	unsigned long flags;
1353 	/* Reset the chip */
1354 
1355 	spin_lock_irqsave(&dev->lock, flags);
1356 	dev->chanA.irqs=&z8530_nop;
1357 	dev->chanB.irqs=&z8530_nop;
1358 	write_zsreg(&dev->chanA, R9, 0xC0);
1359 	/* We must lock the udelay, the chip is offlimits here */
1360 	udelay(100);
1361 	spin_unlock_irqrestore(&dev->lock, flags);
1362 	return 0;
1363 }
1364 
1365 EXPORT_SYMBOL(z8530_shutdown);
1366 
1367 /**
1368  *	z8530_channel_load - Load channel data
1369  *	@c: Z8530 channel to configure
1370  *	@rtable: table of register, value pairs
1371  *	FIXME: ioctl to allow user uploaded tables
1372  *
1373  *	Load a Z8530 channel up from the system data. We use +16 to
1374  *	indicate the "prime" registers. The value 255 terminates the
1375  *	table.
1376  */
1377 
z8530_channel_load(struct z8530_channel * c,u8 * rtable)1378 int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1379 {
1380 	unsigned long flags;
1381 
1382 	spin_lock_irqsave(c->lock, flags);
1383 
1384 	while(*rtable!=255)
1385 	{
1386 		int reg=*rtable++;
1387 		if(reg>0x0F)
1388 			write_zsreg(c, R15, c->regs[15]|1);
1389 		write_zsreg(c, reg&0x0F, *rtable);
1390 		if(reg>0x0F)
1391 			write_zsreg(c, R15, c->regs[15]&~1);
1392 		c->regs[reg]=*rtable++;
1393 	}
1394 	c->rx_function=z8530_null_rx;
1395 	c->skb=NULL;
1396 	c->tx_skb=NULL;
1397 	c->tx_next_skb=NULL;
1398 	c->mtu=1500;
1399 	c->max=0;
1400 	c->count=0;
1401 	c->status=read_zsreg(c, R0);
1402 	c->sync=1;
1403 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1404 
1405 	spin_unlock_irqrestore(c->lock, flags);
1406 	return 0;
1407 }
1408 
1409 EXPORT_SYMBOL(z8530_channel_load);
1410 
1411 
1412 /**
1413  *	z8530_tx_begin - Begin packet transmission
1414  *	@c: The Z8530 channel to kick
1415  *
1416  *	This is the speed sensitive side of transmission. If we are called
1417  *	and no buffer is being transmitted we commence the next buffer. If
1418  *	nothing is queued we idle the sync.
1419  *
1420  *	Note: We are handling this code path in the interrupt path, keep it
1421  *	fast or bad things will happen.
1422  *
1423  *	Called with the lock held.
1424  */
1425 
z8530_tx_begin(struct z8530_channel * c)1426 static void z8530_tx_begin(struct z8530_channel *c)
1427 {
1428 	unsigned long flags;
1429 	if(c->tx_skb)
1430 		return;
1431 
1432 	c->tx_skb=c->tx_next_skb;
1433 	c->tx_next_skb=NULL;
1434 	c->tx_ptr=c->tx_next_ptr;
1435 
1436 	if(c->tx_skb==NULL)
1437 	{
1438 		/* Idle on */
1439 		if(c->dma_tx)
1440 		{
1441 			flags=claim_dma_lock();
1442 			disable_dma(c->txdma);
1443 			/*
1444 			 *	Check if we crapped out.
1445 			 */
1446 			if (get_dma_residue(c->txdma))
1447 			{
1448 				c->netdevice->stats.tx_dropped++;
1449 				c->netdevice->stats.tx_fifo_errors++;
1450 			}
1451 			release_dma_lock(flags);
1452 		}
1453 		c->txcount=0;
1454 	}
1455 	else
1456 	{
1457 		c->txcount=c->tx_skb->len;
1458 
1459 
1460 		if(c->dma_tx)
1461 		{
1462 			/*
1463 			 *	FIXME. DMA is broken for the original 8530,
1464 			 *	on the older parts we need to set a flag and
1465 			 *	wait for a further TX interrupt to fire this
1466 			 *	stage off
1467 			 */
1468 
1469 			flags=claim_dma_lock();
1470 			disable_dma(c->txdma);
1471 
1472 			/*
1473 			 *	These two are needed by the 8530/85C30
1474 			 *	and must be issued when idling.
1475 			 */
1476 
1477 			if(c->dev->type!=Z85230)
1478 			{
1479 				write_zsctrl(c, RES_Tx_CRC);
1480 				write_zsctrl(c, RES_EOM_L);
1481 			}
1482 			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1483 			clear_dma_ff(c->txdma);
1484 			set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1485 			set_dma_count(c->txdma, c->txcount);
1486 			enable_dma(c->txdma);
1487 			release_dma_lock(flags);
1488 			write_zsctrl(c, RES_EOM_L);
1489 			write_zsreg(c, R5, c->regs[R5]|TxENAB);
1490 		}
1491 		else
1492 		{
1493 
1494 			/* ABUNDER off */
1495 			write_zsreg(c, R10, c->regs[10]);
1496 			write_zsctrl(c, RES_Tx_CRC);
1497 
1498 			while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1499 			{
1500 				write_zsreg(c, R8, *c->tx_ptr++);
1501 				c->txcount--;
1502 			}
1503 
1504 		}
1505 	}
1506 	/*
1507 	 *	Since we emptied tx_skb we can ask for more
1508 	 */
1509 	netif_wake_queue(c->netdevice);
1510 }
1511 
1512 /**
1513  *	z8530_tx_done - TX complete callback
1514  *	@c: The channel that completed a transmit.
1515  *
1516  *	This is called when we complete a packet send. We wake the queue,
1517  *	start the next packet going and then free the buffer of the existing
1518  *	packet. This code is fairly timing sensitive.
1519  *
1520  *	Called with the register lock held.
1521  */
1522 
z8530_tx_done(struct z8530_channel * c)1523 static void z8530_tx_done(struct z8530_channel *c)
1524 {
1525 	struct sk_buff *skb;
1526 
1527 	/* Actually this can happen.*/
1528 	if (c->tx_skb == NULL)
1529 		return;
1530 
1531 	skb = c->tx_skb;
1532 	c->tx_skb = NULL;
1533 	z8530_tx_begin(c);
1534 	c->netdevice->stats.tx_packets++;
1535 	c->netdevice->stats.tx_bytes += skb->len;
1536 	dev_consume_skb_irq(skb);
1537 }
1538 
1539 /**
1540  *	z8530_null_rx - Discard a packet
1541  *	@c: The channel the packet arrived on
1542  *	@skb: The buffer
1543  *
1544  *	We point the receive handler at this function when idle. Instead
1545  *	of processing the frames we get to throw them away.
1546  */
1547 
z8530_null_rx(struct z8530_channel * c,struct sk_buff * skb)1548 void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1549 {
1550 	dev_kfree_skb_any(skb);
1551 }
1552 
1553 EXPORT_SYMBOL(z8530_null_rx);
1554 
1555 /**
1556  *	z8530_rx_done - Receive completion callback
1557  *	@c: The channel that completed a receive
1558  *
1559  *	A new packet is complete. Our goal here is to get back into receive
1560  *	mode as fast as possible. On the Z85230 we could change to using
1561  *	ESCC mode, but on the older chips we have no choice. We flip to the
1562  *	new buffer immediately in DMA mode so that the DMA of the next
1563  *	frame can occur while we are copying the previous buffer to an sk_buff
1564  *
1565  *	Called with the lock held
1566  */
1567 
z8530_rx_done(struct z8530_channel * c)1568 static void z8530_rx_done(struct z8530_channel *c)
1569 {
1570 	struct sk_buff *skb;
1571 	int ct;
1572 
1573 	/*
1574 	 *	Is our receive engine in DMA mode
1575 	 */
1576 
1577 	if(c->rxdma_on)
1578 	{
1579 		/*
1580 		 *	Save the ready state and the buffer currently
1581 		 *	being used as the DMA target
1582 		 */
1583 
1584 		int ready=c->dma_ready;
1585 		unsigned char *rxb=c->rx_buf[c->dma_num];
1586 		unsigned long flags;
1587 
1588 		/*
1589 		 *	Complete this DMA. Necessary to find the length
1590 		 */
1591 
1592 		flags=claim_dma_lock();
1593 
1594 		disable_dma(c->rxdma);
1595 		clear_dma_ff(c->rxdma);
1596 		c->rxdma_on=0;
1597 		ct=c->mtu-get_dma_residue(c->rxdma);
1598 		if(ct<0)
1599 			ct=2;	/* Shit happens.. */
1600 		c->dma_ready=0;
1601 
1602 		/*
1603 		 *	Normal case: the other slot is free, start the next DMA
1604 		 *	into it immediately.
1605 		 */
1606 
1607 		if(ready)
1608 		{
1609 			c->dma_num^=1;
1610 			set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1611 			set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1612 			set_dma_count(c->rxdma, c->mtu);
1613 			c->rxdma_on = 1;
1614 			enable_dma(c->rxdma);
1615 			/* Stop any frames that we missed the head of
1616 			   from passing */
1617 			write_zsreg(c, R0, RES_Rx_CRC);
1618 		}
1619 		else
1620 			/* Can't occur as we dont reenable the DMA irq until
1621 			   after the flip is done */
1622 			netdev_warn(c->netdevice, "DMA flip overrun!\n");
1623 
1624 		release_dma_lock(flags);
1625 
1626 		/*
1627 		 *	Shove the old buffer into an sk_buff. We can't DMA
1628 		 *	directly into one on a PC - it might be above the 16Mb
1629 		 *	boundary. Optimisation - we could check to see if we
1630 		 *	can avoid the copy. Optimisation 2 - make the memcpy
1631 		 *	a copychecksum.
1632 		 */
1633 
1634 		skb = dev_alloc_skb(ct);
1635 		if (skb == NULL) {
1636 			c->netdevice->stats.rx_dropped++;
1637 			netdev_warn(c->netdevice, "Memory squeeze\n");
1638 		} else {
1639 			skb_put(skb, ct);
1640 			skb_copy_to_linear_data(skb, rxb, ct);
1641 			c->netdevice->stats.rx_packets++;
1642 			c->netdevice->stats.rx_bytes += ct;
1643 		}
1644 		c->dma_ready = 1;
1645 	} else {
1646 		RT_LOCK;
1647 		skb = c->skb;
1648 
1649 		/*
1650 		 *	The game we play for non DMA is similar. We want to
1651 		 *	get the controller set up for the next packet as fast
1652 		 *	as possible. We potentially only have one byte + the
1653 		 *	fifo length for this. Thus we want to flip to the new
1654 		 *	buffer and then mess around copying and allocating
1655 		 *	things. For the current case it doesn't matter but
1656 		 *	if you build a system where the sync irq isn't blocked
1657 		 *	by the kernel IRQ disable then you need only block the
1658 		 *	sync IRQ for the RT_LOCK area.
1659 		 *
1660 		 */
1661 		ct=c->count;
1662 
1663 		c->skb = c->skb2;
1664 		c->count = 0;
1665 		c->max = c->mtu;
1666 		if (c->skb) {
1667 			c->dptr = c->skb->data;
1668 			c->max = c->mtu;
1669 		} else {
1670 			c->count = 0;
1671 			c->max = 0;
1672 		}
1673 		RT_UNLOCK;
1674 
1675 		c->skb2 = dev_alloc_skb(c->mtu);
1676 		if (c->skb2 == NULL)
1677 			netdev_warn(c->netdevice, "memory squeeze\n");
1678 		else
1679 			skb_put(c->skb2, c->mtu);
1680 		c->netdevice->stats.rx_packets++;
1681 		c->netdevice->stats.rx_bytes += ct;
1682 	}
1683 	/*
1684 	 *	If we received a frame we must now process it.
1685 	 */
1686 	if (skb) {
1687 		skb_trim(skb, ct);
1688 		c->rx_function(c, skb);
1689 	} else {
1690 		c->netdevice->stats.rx_dropped++;
1691 		netdev_err(c->netdevice, "Lost a frame\n");
1692 	}
1693 }
1694 
1695 /**
1696  *	spans_boundary - Check a packet can be ISA DMA'd
1697  *	@skb: The buffer to check
1698  *
1699  *	Returns true if the buffer cross a DMA boundary on a PC. The poor
1700  *	thing can only DMA within a 64K block not across the edges of it.
1701  */
1702 
spans_boundary(struct sk_buff * skb)1703 static inline int spans_boundary(struct sk_buff *skb)
1704 {
1705 	unsigned long a=(unsigned long)skb->data;
1706 	a^=(a+skb->len);
1707 	if(a&0x00010000)	/* If the 64K bit is different.. */
1708 		return 1;
1709 	return 0;
1710 }
1711 
1712 /**
1713  *	z8530_queue_xmit - Queue a packet
1714  *	@c: The channel to use
1715  *	@skb: The packet to kick down the channel
1716  *
1717  *	Queue a packet for transmission. Because we have rather
1718  *	hard to hit interrupt latencies for the Z85230 per packet
1719  *	even in DMA mode we do the flip to DMA buffer if needed here
1720  *	not in the IRQ.
1721  *
1722  *	Called from the network code. The lock is not held at this
1723  *	point.
1724  */
1725 
z8530_queue_xmit(struct z8530_channel * c,struct sk_buff * skb)1726 netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1727 {
1728 	unsigned long flags;
1729 
1730 	netif_stop_queue(c->netdevice);
1731 	if(c->tx_next_skb)
1732 		return NETDEV_TX_BUSY;
1733 
1734 
1735 	/* PC SPECIFIC - DMA limits */
1736 
1737 	/*
1738 	 *	If we will DMA the transmit and its gone over the ISA bus
1739 	 *	limit, then copy to the flip buffer
1740 	 */
1741 
1742 	if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1743 	{
1744 		/*
1745 		 *	Send the flip buffer, and flip the flippy bit.
1746 		 *	We don't care which is used when just so long as
1747 		 *	we never use the same buffer twice in a row. Since
1748 		 *	only one buffer can be going out at a time the other
1749 		 *	has to be safe.
1750 		 */
1751 		c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1752 		c->tx_dma_used^=1;	/* Flip temp buffer */
1753 		skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
1754 	}
1755 	else
1756 		c->tx_next_ptr=skb->data;
1757 	RT_LOCK;
1758 	c->tx_next_skb=skb;
1759 	RT_UNLOCK;
1760 
1761 	spin_lock_irqsave(c->lock, flags);
1762 	z8530_tx_begin(c);
1763 	spin_unlock_irqrestore(c->lock, flags);
1764 
1765 	return NETDEV_TX_OK;
1766 }
1767 
1768 EXPORT_SYMBOL(z8530_queue_xmit);
1769 
1770 /*
1771  *	Module support
1772  */
1773 static const char banner[] __initconst =
1774 	KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1775 
z85230_init_driver(void)1776 static int __init z85230_init_driver(void)
1777 {
1778 	printk(banner);
1779 	return 0;
1780 }
1781 module_init(z85230_init_driver);
1782 
z85230_cleanup_driver(void)1783 static void __exit z85230_cleanup_driver(void)
1784 {
1785 }
1786 module_exit(z85230_cleanup_driver);
1787 
1788 MODULE_AUTHOR("Red Hat Inc.");
1789 MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1790 MODULE_LICENSE("GPL");
1791