1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
3 * (c) Copyright 2000, 2001 Red Hat Inc
4 *
5 * Development of this driver was funded by Equiinet Ltd
6 * http://www.equiinet.com
7 *
8 * ChangeLog:
9 *
10 * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
11 * unification of all the Z85x30 asynchronous drivers for real.
12 *
13 * DMA now uses get_free_page as kmalloc buffers may span a 64K
14 * boundary.
15 *
16 * Modified for SMP safety and SMP locking by Alan Cox
17 * <alan@lxorguk.ukuu.org.uk>
18 *
19 * Performance
20 *
21 * Z85230:
22 * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
23 * X.25 is not unrealistic on all machines. DMA mode can in theory
24 * handle T1/E1 quite nicely. In practice the limit seems to be about
25 * 512Kbit->1Mbit depending on motherboard.
26 *
27 * Z85C30:
28 * 64K will take DMA, 9600 baud X.25 should be ok.
29 *
30 * Z8530:
31 * Synchronous mode without DMA is unlikely to pass about 2400 baud.
32 */
33
34 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/mm.h>
39 #include <linux/net.h>
40 #include <linux/skbuff.h>
41 #include <linux/netdevice.h>
42 #include <linux/if_arp.h>
43 #include <linux/delay.h>
44 #include <linux/hdlc.h>
45 #include <linux/ioport.h>
46 #include <linux/init.h>
47 #include <linux/gfp.h>
48 #include <asm/dma.h>
49 #include <asm/io.h>
50 #define RT_LOCK
51 #define RT_UNLOCK
52 #include <linux/spinlock.h>
53
54 #include "z85230.h"
55
56 /**
57 * z8530_read_port - Architecture specific interface function
58 * @p: port to read
59 *
60 * Provided port access methods. The Comtrol SV11 requires no delays
61 * between accesses and uses PC I/O. Some drivers may need a 5uS delay
62 *
63 * In the longer term this should become an architecture specific
64 * section so that this can become a generic driver interface for all
65 * platforms. For now we only handle PC I/O ports with or without the
66 * dread 5uS sanity delay.
67 *
68 * The caller must hold sufficient locks to avoid violating the horrible
69 * 5uS delay rule.
70 */
71
z8530_read_port(unsigned long p)72 static inline int z8530_read_port(unsigned long p)
73 {
74 u8 r = inb(Z8530_PORT_OF(p));
75
76 if (p & Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
77 udelay(5);
78 return r;
79 }
80
81 /**
82 * z8530_write_port - Architecture specific interface function
83 * @p: port to write
84 * @d: value to write
85 *
86 * Write a value to a port with delays if need be. Note that the
87 * caller must hold locks to avoid read/writes from other contexts
88 * violating the 5uS rule
89 *
90 * In the longer term this should become an architecture specific
91 * section so that this can become a generic driver interface for all
92 * platforms. For now we only handle PC I/O ports with or without the
93 * dread 5uS sanity delay.
94 */
95
z8530_write_port(unsigned long p,u8 d)96 static inline void z8530_write_port(unsigned long p, u8 d)
97 {
98 outb(d, Z8530_PORT_OF(p));
99 if (p & Z8530_PORT_SLEEP)
100 udelay(5);
101 }
102
103 static void z8530_rx_done(struct z8530_channel *c);
104 static void z8530_tx_done(struct z8530_channel *c);
105
106 /**
107 * read_zsreg - Read a register from a Z85230
108 * @c: Z8530 channel to read from (2 per chip)
109 * @reg: Register to read
110 * FIXME: Use a spinlock.
111 *
112 * Most of the Z8530 registers are indexed off the control registers.
113 * A read is done by writing to the control register and reading the
114 * register back. The caller must hold the lock
115 */
116
read_zsreg(struct z8530_channel * c,u8 reg)117 static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
118 {
119 if (reg)
120 z8530_write_port(c->ctrlio, reg);
121 return z8530_read_port(c->ctrlio);
122 }
123
124 /**
125 * read_zsdata - Read the data port of a Z8530 channel
126 * @c: The Z8530 channel to read the data port from
127 *
128 * The data port provides fast access to some things. We still
129 * have all the 5uS delays to worry about.
130 */
131
read_zsdata(struct z8530_channel * c)132 static inline u8 read_zsdata(struct z8530_channel *c)
133 {
134 u8 r;
135
136 r = z8530_read_port(c->dataio);
137 return r;
138 }
139
140 /**
141 * write_zsreg - Write to a Z8530 channel register
142 * @c: The Z8530 channel
143 * @reg: Register number
144 * @val: Value to write
145 *
146 * Write a value to an indexed register. The caller must hold the lock
147 * to honour the irritating delay rules. We know about register 0
148 * being fast to access.
149 *
150 * Assumes c->lock is held.
151 */
write_zsreg(struct z8530_channel * c,u8 reg,u8 val)152 static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
153 {
154 if (reg)
155 z8530_write_port(c->ctrlio, reg);
156 z8530_write_port(c->ctrlio, val);
157 }
158
159 /**
160 * write_zsctrl - Write to a Z8530 control register
161 * @c: The Z8530 channel
162 * @val: Value to write
163 *
164 * Write directly to the control register on the Z8530
165 */
166
write_zsctrl(struct z8530_channel * c,u8 val)167 static inline void write_zsctrl(struct z8530_channel *c, u8 val)
168 {
169 z8530_write_port(c->ctrlio, val);
170 }
171
172 /**
173 * write_zsdata - Write to a Z8530 control register
174 * @c: The Z8530 channel
175 * @val: Value to write
176 *
177 * Write directly to the data register on the Z8530
178 */
write_zsdata(struct z8530_channel * c,u8 val)179 static inline void write_zsdata(struct z8530_channel *c, u8 val)
180 {
181 z8530_write_port(c->dataio, val);
182 }
183
184 /* Register loading parameters for a dead port
185 */
186
187 u8 z8530_dead_port[] = {
188 255
189 };
190 EXPORT_SYMBOL(z8530_dead_port);
191
192 /* Register loading parameters for currently supported circuit types
193 */
194
195 /* Data clocked by telco end. This is the correct data for the UK
196 * "kilostream" service, and most other similar services.
197 */
198
199 u8 z8530_hdlc_kilostream[] = {
200 4, SYNC_ENAB | SDLC | X1CLK,
201 2, 0, /* No vector */
202 1, 0,
203 3, ENT_HM | RxCRC_ENAB | Rx8,
204 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
205 9, 0, /* Disable interrupts */
206 6, 0xFF,
207 7, FLAG,
208 10, ABUNDER | NRZ | CRCPS,/*MARKIDLE ??*/
209 11, TCTRxCP,
210 14, DISDPLL,
211 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
212 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
213 9, NV | MIE | NORESET,
214 255
215 };
216 EXPORT_SYMBOL(z8530_hdlc_kilostream);
217
218 /* As above but for enhanced chips.
219 */
220
221 u8 z8530_hdlc_kilostream_85230[] = {
222 4, SYNC_ENAB | SDLC | X1CLK,
223 2, 0, /* No vector */
224 1, 0,
225 3, ENT_HM | RxCRC_ENAB | Rx8,
226 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
227 9, 0, /* Disable interrupts */
228 6, 0xFF,
229 7, FLAG,
230 10, ABUNDER | NRZ | CRCPS, /* MARKIDLE?? */
231 11, TCTRxCP,
232 14, DISDPLL,
233 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
234 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
235 9, NV | MIE | NORESET,
236 23, 3, /* Extended mode AUTO TX and EOM*/
237
238 255
239 };
240 EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
241
242 /**
243 * z8530_flush_fifo - Flush on chip RX FIFO
244 * @c: Channel to flush
245 *
246 * Flush the receive FIFO. There is no specific option for this, we
247 * blindly read bytes and discard them. Reading when there is no data
248 * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
249 *
250 * All locking is handled for the caller. On return data may still be
251 * present if it arrived during the flush.
252 */
253
z8530_flush_fifo(struct z8530_channel * c)254 static void z8530_flush_fifo(struct z8530_channel *c)
255 {
256 read_zsreg(c, R1);
257 read_zsreg(c, R1);
258 read_zsreg(c, R1);
259 read_zsreg(c, R1);
260 if (c->dev->type == Z85230) {
261 read_zsreg(c, R1);
262 read_zsreg(c, R1);
263 read_zsreg(c, R1);
264 read_zsreg(c, R1);
265 }
266 }
267
268 /**
269 * z8530_rtsdtr - Control the outgoing DTS/RTS line
270 * @c: The Z8530 channel to control;
271 * @set: 1 to set, 0 to clear
272 *
273 * Sets or clears DTR/RTS on the requested line. All locking is handled
274 * by the caller. For now we assume all boards use the actual RTS/DTR
275 * on the chip. Apparently one or two don't. We'll scream about them
276 * later.
277 */
278
z8530_rtsdtr(struct z8530_channel * c,int set)279 static void z8530_rtsdtr(struct z8530_channel *c, int set)
280 {
281 if (set)
282 c->regs[5] |= (RTS | DTR);
283 else
284 c->regs[5] &= ~(RTS | DTR);
285 write_zsreg(c, R5, c->regs[5]);
286 }
287
288 /**
289 * z8530_rx - Handle a PIO receive event
290 * @c: Z8530 channel to process
291 *
292 * Receive handler for receiving in PIO mode. This is much like the
293 * async one but not quite the same or as complex
294 *
295 * Note: Its intended that this handler can easily be separated from
296 * the main code to run realtime. That'll be needed for some machines
297 * (eg to ever clock 64kbits on a sparc ;)).
298 *
299 * The RT_LOCK macros don't do anything now. Keep the code covered
300 * by them as short as possible in all circumstances - clocks cost
301 * baud. The interrupt handler is assumed to be atomic w.r.t. to
302 * other code - this is true in the RT case too.
303 *
304 * We only cover the sync cases for this. If you want 2Mbit async
305 * do it yourself but consider medical assistance first. This non DMA
306 * synchronous mode is portable code. The DMA mode assumes PCI like
307 * ISA DMA
308 *
309 * Called with the device lock held
310 */
311
z8530_rx(struct z8530_channel * c)312 static void z8530_rx(struct z8530_channel *c)
313 {
314 u8 ch, stat;
315
316 while (1) {
317 /* FIFO empty ? */
318 if (!(read_zsreg(c, R0) & 1))
319 break;
320 ch = read_zsdata(c);
321 stat = read_zsreg(c, R1);
322
323 /* Overrun ?
324 */
325 if (c->count < c->max) {
326 *c->dptr++ = ch;
327 c->count++;
328 }
329
330 if (stat & END_FR) {
331 /* Error ?
332 */
333 if (stat & (Rx_OVR | CRC_ERR)) {
334 /* Rewind the buffer and return */
335 if (c->skb)
336 c->dptr = c->skb->data;
337 c->count = 0;
338 if (stat & Rx_OVR) {
339 pr_warn("%s: overrun\n", c->dev->name);
340 c->rx_overrun++;
341 }
342 if (stat & CRC_ERR) {
343 c->rx_crc_err++;
344 /* printk("crc error\n"); */
345 }
346 /* Shove the frame upstream */
347 } else {
348 /* Drop the lock for RX processing, or
349 * there are deadlocks
350 */
351 z8530_rx_done(c);
352 write_zsctrl(c, RES_Rx_CRC);
353 }
354 }
355 }
356 /* Clear irq
357 */
358 write_zsctrl(c, ERR_RES);
359 write_zsctrl(c, RES_H_IUS);
360 }
361
362 /**
363 * z8530_tx - Handle a PIO transmit event
364 * @c: Z8530 channel to process
365 *
366 * Z8530 transmit interrupt handler for the PIO mode. The basic
367 * idea is to attempt to keep the FIFO fed. We fill as many bytes
368 * in as possible, its quite possible that we won't keep up with the
369 * data rate otherwise.
370 */
371
z8530_tx(struct z8530_channel * c)372 static void z8530_tx(struct z8530_channel *c)
373 {
374 while (c->txcount) {
375 /* FIFO full ? */
376 if (!(read_zsreg(c, R0) & 4))
377 return;
378 c->txcount--;
379 /* Shovel out the byte
380 */
381 write_zsreg(c, R8, *c->tx_ptr++);
382 write_zsctrl(c, RES_H_IUS);
383 /* We are about to underflow */
384 if (c->txcount == 0) {
385 write_zsctrl(c, RES_EOM_L);
386 write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
387 }
388 }
389
390 /* End of frame TX - fire another one
391 */
392
393 write_zsctrl(c, RES_Tx_P);
394
395 z8530_tx_done(c);
396 write_zsctrl(c, RES_H_IUS);
397 }
398
399 /**
400 * z8530_status - Handle a PIO status exception
401 * @chan: Z8530 channel to process
402 *
403 * A status event occurred in PIO synchronous mode. There are several
404 * reasons the chip will bother us here. A transmit underrun means we
405 * failed to feed the chip fast enough and just broke a packet. A DCD
406 * change is a line up or down.
407 */
408
z8530_status(struct z8530_channel * chan)409 static void z8530_status(struct z8530_channel *chan)
410 {
411 u8 status, altered;
412
413 status = read_zsreg(chan, R0);
414 altered = chan->status ^ status;
415
416 chan->status = status;
417
418 if (status & TxEOM) {
419 /* printk("%s: Tx underrun.\n", chan->dev->name); */
420 chan->netdevice->stats.tx_fifo_errors++;
421 write_zsctrl(chan, ERR_RES);
422 z8530_tx_done(chan);
423 }
424
425 if (altered & chan->dcdcheck) {
426 if (status & chan->dcdcheck) {
427 pr_info("%s: DCD raised\n", chan->dev->name);
428 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
429 if (chan->netdevice)
430 netif_carrier_on(chan->netdevice);
431 } else {
432 pr_info("%s: DCD lost\n", chan->dev->name);
433 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
434 z8530_flush_fifo(chan);
435 if (chan->netdevice)
436 netif_carrier_off(chan->netdevice);
437 }
438 }
439 write_zsctrl(chan, RES_EXT_INT);
440 write_zsctrl(chan, RES_H_IUS);
441 }
442
443 struct z8530_irqhandler z8530_sync = {
444 .rx = z8530_rx,
445 .tx = z8530_tx,
446 .status = z8530_status,
447 };
448 EXPORT_SYMBOL(z8530_sync);
449
450 /**
451 * z8530_dma_rx - Handle a DMA RX event
452 * @chan: Channel to handle
453 *
454 * Non bus mastering DMA interfaces for the Z8x30 devices. This
455 * is really pretty PC specific. The DMA mode means that most receive
456 * events are handled by the DMA hardware. We get a kick here only if
457 * a frame ended.
458 */
459
z8530_dma_rx(struct z8530_channel * chan)460 static void z8530_dma_rx(struct z8530_channel *chan)
461 {
462 if (chan->rxdma_on) {
463 /* Special condition check only */
464 u8 status;
465
466 read_zsreg(chan, R7);
467 read_zsreg(chan, R6);
468
469 status = read_zsreg(chan, R1);
470
471 if (status & END_FR)
472 z8530_rx_done(chan); /* Fire up the next one */
473
474 write_zsctrl(chan, ERR_RES);
475 write_zsctrl(chan, RES_H_IUS);
476 } else {
477 /* DMA is off right now, drain the slow way */
478 z8530_rx(chan);
479 }
480 }
481
482 /**
483 * z8530_dma_tx - Handle a DMA TX event
484 * @chan: The Z8530 channel to handle
485 *
486 * We have received an interrupt while doing DMA transmissions. It
487 * shouldn't happen. Scream loudly if it does.
488 */
z8530_dma_tx(struct z8530_channel * chan)489 static void z8530_dma_tx(struct z8530_channel *chan)
490 {
491 if (!chan->dma_tx) {
492 pr_warn("Hey who turned the DMA off?\n");
493 z8530_tx(chan);
494 return;
495 }
496 /* This shouldn't occur in DMA mode */
497 pr_err("DMA tx - bogus event!\n");
498 z8530_tx(chan);
499 }
500
501 /**
502 * z8530_dma_status - Handle a DMA status exception
503 * @chan: Z8530 channel to process
504 *
505 * A status event occurred on the Z8530. We receive these for two reasons
506 * when in DMA mode. Firstly if we finished a packet transfer we get one
507 * and kick the next packet out. Secondly we may see a DCD change.
508 *
509 */
z8530_dma_status(struct z8530_channel * chan)510 static void z8530_dma_status(struct z8530_channel *chan)
511 {
512 u8 status, altered;
513
514 status = read_zsreg(chan, R0);
515 altered = chan->status ^ status;
516
517 chan->status = status;
518
519 if (chan->dma_tx) {
520 if (status & TxEOM) {
521 unsigned long flags;
522
523 flags = claim_dma_lock();
524 disable_dma(chan->txdma);
525 clear_dma_ff(chan->txdma);
526 chan->txdma_on = 0;
527 release_dma_lock(flags);
528 z8530_tx_done(chan);
529 }
530 }
531
532 if (altered & chan->dcdcheck) {
533 if (status & chan->dcdcheck) {
534 pr_info("%s: DCD raised\n", chan->dev->name);
535 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
536 if (chan->netdevice)
537 netif_carrier_on(chan->netdevice);
538 } else {
539 pr_info("%s: DCD lost\n", chan->dev->name);
540 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
541 z8530_flush_fifo(chan);
542 if (chan->netdevice)
543 netif_carrier_off(chan->netdevice);
544 }
545 }
546
547 write_zsctrl(chan, RES_EXT_INT);
548 write_zsctrl(chan, RES_H_IUS);
549 }
550
551 static struct z8530_irqhandler z8530_dma_sync = {
552 .rx = z8530_dma_rx,
553 .tx = z8530_dma_tx,
554 .status = z8530_dma_status,
555 };
556
557 static struct z8530_irqhandler z8530_txdma_sync = {
558 .rx = z8530_rx,
559 .tx = z8530_dma_tx,
560 .status = z8530_dma_status,
561 };
562
563 /**
564 * z8530_rx_clear - Handle RX events from a stopped chip
565 * @c: Z8530 channel to shut up
566 *
567 * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
568 * For machines with PCI Z85x30 cards, or level triggered interrupts
569 * (eg the MacII) we must clear the interrupt cause or die.
570 */
571
z8530_rx_clear(struct z8530_channel * c)572 static void z8530_rx_clear(struct z8530_channel *c)
573 {
574 /* Data and status bytes
575 */
576 u8 stat;
577
578 read_zsdata(c);
579 stat = read_zsreg(c, R1);
580
581 if (stat & END_FR)
582 write_zsctrl(c, RES_Rx_CRC);
583 /* Clear irq
584 */
585 write_zsctrl(c, ERR_RES);
586 write_zsctrl(c, RES_H_IUS);
587 }
588
589 /**
590 * z8530_tx_clear - Handle TX events from a stopped chip
591 * @c: Z8530 channel to shut up
592 *
593 * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
594 * For machines with PCI Z85x30 cards, or level triggered interrupts
595 * (eg the MacII) we must clear the interrupt cause or die.
596 */
597
z8530_tx_clear(struct z8530_channel * c)598 static void z8530_tx_clear(struct z8530_channel *c)
599 {
600 write_zsctrl(c, RES_Tx_P);
601 write_zsctrl(c, RES_H_IUS);
602 }
603
604 /**
605 * z8530_status_clear - Handle status events from a stopped chip
606 * @chan: Z8530 channel to shut up
607 *
608 * Status interrupt vectors for a Z8530 that is in 'parked' mode.
609 * For machines with PCI Z85x30 cards, or level triggered interrupts
610 * (eg the MacII) we must clear the interrupt cause or die.
611 */
612
z8530_status_clear(struct z8530_channel * chan)613 static void z8530_status_clear(struct z8530_channel *chan)
614 {
615 u8 status = read_zsreg(chan, R0);
616
617 if (status & TxEOM)
618 write_zsctrl(chan, ERR_RES);
619 write_zsctrl(chan, RES_EXT_INT);
620 write_zsctrl(chan, RES_H_IUS);
621 }
622
623 struct z8530_irqhandler z8530_nop = {
624 .rx = z8530_rx_clear,
625 .tx = z8530_tx_clear,
626 .status = z8530_status_clear,
627 };
628 EXPORT_SYMBOL(z8530_nop);
629
630 /**
631 * z8530_interrupt - Handle an interrupt from a Z8530
632 * @irq: Interrupt number
633 * @dev_id: The Z8530 device that is interrupting.
634 *
635 * A Z85[2]30 device has stuck its hand in the air for attention.
636 * We scan both the channels on the chip for events and then call
637 * the channel specific call backs for each channel that has events.
638 * We have to use callback functions because the two channels can be
639 * in different modes.
640 *
641 * Locking is done for the handlers. Note that locking is done
642 * at the chip level (the 5uS delay issue is per chip not per
643 * channel). c->lock for both channels points to dev->lock
644 */
645
z8530_interrupt(int irq,void * dev_id)646 irqreturn_t z8530_interrupt(int irq, void *dev_id)
647 {
648 struct z8530_dev *dev = dev_id;
649 u8 intr;
650 static volatile int locker=0;
651 int work = 0;
652 struct z8530_irqhandler *irqs;
653
654 if (locker) {
655 pr_err("IRQ re-enter\n");
656 return IRQ_NONE;
657 }
658 locker = 1;
659
660 spin_lock(&dev->lock);
661
662 while (++work < 5000) {
663 intr = read_zsreg(&dev->chanA, R3);
664 if (!(intr &
665 (CHARxIP | CHATxIP | CHAEXT | CHBRxIP | CHBTxIP | CHBEXT)))
666 break;
667
668 /* This holds the IRQ status. On the 8530 you must read it
669 * from chan A even though it applies to the whole chip
670 */
671
672 /* Now walk the chip and see what it is wanting - it may be
673 * an IRQ for someone else remember
674 */
675
676 irqs = dev->chanA.irqs;
677
678 if (intr & (CHARxIP | CHATxIP | CHAEXT)) {
679 if (intr & CHARxIP)
680 irqs->rx(&dev->chanA);
681 if (intr & CHATxIP)
682 irqs->tx(&dev->chanA);
683 if (intr & CHAEXT)
684 irqs->status(&dev->chanA);
685 }
686
687 irqs = dev->chanB.irqs;
688
689 if (intr & (CHBRxIP | CHBTxIP | CHBEXT)) {
690 if (intr & CHBRxIP)
691 irqs->rx(&dev->chanB);
692 if (intr & CHBTxIP)
693 irqs->tx(&dev->chanB);
694 if (intr & CHBEXT)
695 irqs->status(&dev->chanB);
696 }
697 }
698 spin_unlock(&dev->lock);
699 if (work == 5000)
700 pr_err("%s: interrupt jammed - abort(0x%X)!\n",
701 dev->name, intr);
702 /* Ok all done */
703 locker = 0;
704 return IRQ_HANDLED;
705 }
706 EXPORT_SYMBOL(z8530_interrupt);
707
708 static const u8 reg_init[16] = {
709 0, 0, 0, 0,
710 0, 0, 0, 0,
711 0, 0, 0, 0,
712 0x55, 0, 0, 0
713 };
714
715 /**
716 * z8530_sync_open - Open a Z8530 channel for PIO
717 * @dev: The network interface we are using
718 * @c: The Z8530 channel to open in synchronous PIO mode
719 *
720 * Switch a Z8530 into synchronous mode without DMA assist. We
721 * raise the RTS/DTR and commence network operation.
722 */
z8530_sync_open(struct net_device * dev,struct z8530_channel * c)723 int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
724 {
725 unsigned long flags;
726
727 spin_lock_irqsave(c->lock, flags);
728
729 c->sync = 1;
730 c->mtu = dev->mtu + 64;
731 c->count = 0;
732 c->skb = NULL;
733 c->skb2 = NULL;
734 c->irqs = &z8530_sync;
735
736 /* This loads the double buffer up */
737 z8530_rx_done(c); /* Load the frame ring */
738 z8530_rx_done(c); /* Load the backup frame */
739 z8530_rtsdtr(c, 1);
740 c->dma_tx = 0;
741 c->regs[R1] |= TxINT_ENAB;
742 write_zsreg(c, R1, c->regs[R1]);
743 write_zsreg(c, R3, c->regs[R3] | RxENABLE);
744
745 spin_unlock_irqrestore(c->lock, flags);
746 return 0;
747 }
748 EXPORT_SYMBOL(z8530_sync_open);
749
750 /**
751 * z8530_sync_close - Close a PIO Z8530 channel
752 * @dev: Network device to close
753 * @c: Z8530 channel to disassociate and move to idle
754 *
755 * Close down a Z8530 interface and switch its interrupt handlers
756 * to discard future events.
757 */
z8530_sync_close(struct net_device * dev,struct z8530_channel * c)758 int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
759 {
760 u8 chk;
761 unsigned long flags;
762
763 spin_lock_irqsave(c->lock, flags);
764 c->irqs = &z8530_nop;
765 c->max = 0;
766 c->sync = 0;
767
768 chk = read_zsreg(c, R0);
769 write_zsreg(c, R3, c->regs[R3]);
770 z8530_rtsdtr(c, 0);
771
772 spin_unlock_irqrestore(c->lock, flags);
773 return 0;
774 }
775 EXPORT_SYMBOL(z8530_sync_close);
776
777 /**
778 * z8530_sync_dma_open - Open a Z8530 for DMA I/O
779 * @dev: The network device to attach
780 * @c: The Z8530 channel to configure in sync DMA mode.
781 *
782 * Set up a Z85x30 device for synchronous DMA in both directions. Two
783 * ISA DMA channels must be available for this to work. We assume ISA
784 * DMA driven I/O and PC limits on access.
785 */
z8530_sync_dma_open(struct net_device * dev,struct z8530_channel * c)786 int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
787 {
788 unsigned long cflags, dflags;
789
790 c->sync = 1;
791 c->mtu = dev->mtu + 64;
792 c->count = 0;
793 c->skb = NULL;
794 c->skb2 = NULL;
795
796 /* Load the DMA interfaces up
797 */
798 c->rxdma_on = 0;
799 c->txdma_on = 0;
800
801 /* Allocate the DMA flip buffers. Limit by page size.
802 * Everyone runs 1500 mtu or less on wan links so this
803 * should be fine.
804 */
805
806 if (c->mtu > PAGE_SIZE / 2)
807 return -EMSGSIZE;
808
809 c->rx_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
810 if (!c->rx_buf[0])
811 return -ENOBUFS;
812 c->rx_buf[1] = c->rx_buf[0] + PAGE_SIZE / 2;
813
814 c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
815 if (!c->tx_dma_buf[0]) {
816 free_page((unsigned long)c->rx_buf[0]);
817 c->rx_buf[0] = NULL;
818 return -ENOBUFS;
819 }
820 c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
821
822 c->tx_dma_used = 0;
823 c->dma_tx = 1;
824 c->dma_num = 0;
825 c->dma_ready = 1;
826
827 /* Enable DMA control mode
828 */
829
830 spin_lock_irqsave(c->lock, cflags);
831
832 /* TX DMA via DIR/REQ
833 */
834
835 c->regs[R14] |= DTRREQ;
836 write_zsreg(c, R14, c->regs[R14]);
837
838 c->regs[R1] &= ~TxINT_ENAB;
839 write_zsreg(c, R1, c->regs[R1]);
840
841 /* RX DMA via W/Req
842 */
843
844 c->regs[R1] |= WT_FN_RDYFN;
845 c->regs[R1] |= WT_RDY_RT;
846 c->regs[R1] |= INT_ERR_Rx;
847 c->regs[R1] &= ~TxINT_ENAB;
848 write_zsreg(c, R1, c->regs[R1]);
849 c->regs[R1] |= WT_RDY_ENAB;
850 write_zsreg(c, R1, c->regs[R1]);
851
852 /* DMA interrupts
853 */
854
855 /* Set up the DMA configuration
856 */
857
858 dflags = claim_dma_lock();
859
860 disable_dma(c->rxdma);
861 clear_dma_ff(c->rxdma);
862 set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
863 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
864 set_dma_count(c->rxdma, c->mtu);
865 enable_dma(c->rxdma);
866
867 disable_dma(c->txdma);
868 clear_dma_ff(c->txdma);
869 set_dma_mode(c->txdma, DMA_MODE_WRITE);
870 disable_dma(c->txdma);
871
872 release_dma_lock(dflags);
873
874 /* Select the DMA interrupt handlers
875 */
876
877 c->rxdma_on = 1;
878 c->txdma_on = 1;
879 c->tx_dma_used = 1;
880
881 c->irqs = &z8530_dma_sync;
882 z8530_rtsdtr(c, 1);
883 write_zsreg(c, R3, c->regs[R3] | RxENABLE);
884
885 spin_unlock_irqrestore(c->lock, cflags);
886
887 return 0;
888 }
889 EXPORT_SYMBOL(z8530_sync_dma_open);
890
891 /**
892 * z8530_sync_dma_close - Close down DMA I/O
893 * @dev: Network device to detach
894 * @c: Z8530 channel to move into discard mode
895 *
896 * Shut down a DMA mode synchronous interface. Halt the DMA, and
897 * free the buffers.
898 */
z8530_sync_dma_close(struct net_device * dev,struct z8530_channel * c)899 int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
900 {
901 u8 chk;
902 unsigned long flags;
903
904 c->irqs = &z8530_nop;
905 c->max = 0;
906 c->sync = 0;
907
908 /* Disable the PC DMA channels
909 */
910
911 flags = claim_dma_lock();
912 disable_dma(c->rxdma);
913 clear_dma_ff(c->rxdma);
914
915 c->rxdma_on = 0;
916
917 disable_dma(c->txdma);
918 clear_dma_ff(c->txdma);
919 release_dma_lock(flags);
920
921 c->txdma_on = 0;
922 c->tx_dma_used = 0;
923
924 spin_lock_irqsave(c->lock, flags);
925
926 /* Disable DMA control mode
927 */
928
929 c->regs[R1] &= ~WT_RDY_ENAB;
930 write_zsreg(c, R1, c->regs[R1]);
931 c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
932 c->regs[R1] |= INT_ALL_Rx;
933 write_zsreg(c, R1, c->regs[R1]);
934 c->regs[R14] &= ~DTRREQ;
935 write_zsreg(c, R14, c->regs[R14]);
936
937 if (c->rx_buf[0]) {
938 free_page((unsigned long)c->rx_buf[0]);
939 c->rx_buf[0] = NULL;
940 }
941 if (c->tx_dma_buf[0]) {
942 free_page((unsigned long)c->tx_dma_buf[0]);
943 c->tx_dma_buf[0] = NULL;
944 }
945 chk = read_zsreg(c, R0);
946 write_zsreg(c, R3, c->regs[R3]);
947 z8530_rtsdtr(c, 0);
948
949 spin_unlock_irqrestore(c->lock, flags);
950
951 return 0;
952 }
953 EXPORT_SYMBOL(z8530_sync_dma_close);
954
955 /**
956 * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
957 * @dev: The network device to attach
958 * @c: The Z8530 channel to configure in sync DMA mode.
959 *
960 * Set up a Z85x30 device for synchronous DMA transmission. One
961 * ISA DMA channel must be available for this to work. The receive
962 * side is run in PIO mode, but then it has the bigger FIFO.
963 */
964
z8530_sync_txdma_open(struct net_device * dev,struct z8530_channel * c)965 int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
966 {
967 unsigned long cflags, dflags;
968
969 printk("Opening sync interface for TX-DMA\n");
970 c->sync = 1;
971 c->mtu = dev->mtu + 64;
972 c->count = 0;
973 c->skb = NULL;
974 c->skb2 = NULL;
975
976 /* Allocate the DMA flip buffers. Limit by page size.
977 * Everyone runs 1500 mtu or less on wan links so this
978 * should be fine.
979 */
980
981 if (c->mtu > PAGE_SIZE / 2)
982 return -EMSGSIZE;
983
984 c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
985 if (!c->tx_dma_buf[0])
986 return -ENOBUFS;
987
988 c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
989
990 spin_lock_irqsave(c->lock, cflags);
991
992 /* Load the PIO receive ring
993 */
994
995 z8530_rx_done(c);
996 z8530_rx_done(c);
997
998 /* Load the DMA interfaces up
999 */
1000
1001 c->rxdma_on = 0;
1002 c->txdma_on = 0;
1003
1004 c->tx_dma_used = 0;
1005 c->dma_num = 0;
1006 c->dma_ready = 1;
1007 c->dma_tx = 1;
1008
1009 /* Enable DMA control mode
1010 */
1011
1012 /* TX DMA via DIR/REQ
1013 */
1014 c->regs[R14] |= DTRREQ;
1015 write_zsreg(c, R14, c->regs[R14]);
1016
1017 c->regs[R1] &= ~TxINT_ENAB;
1018 write_zsreg(c, R1, c->regs[R1]);
1019
1020 /* Set up the DMA configuration
1021 */
1022
1023 dflags = claim_dma_lock();
1024
1025 disable_dma(c->txdma);
1026 clear_dma_ff(c->txdma);
1027 set_dma_mode(c->txdma, DMA_MODE_WRITE);
1028 disable_dma(c->txdma);
1029
1030 release_dma_lock(dflags);
1031
1032 /* Select the DMA interrupt handlers
1033 */
1034
1035 c->rxdma_on = 0;
1036 c->txdma_on = 1;
1037 c->tx_dma_used = 1;
1038
1039 c->irqs = &z8530_txdma_sync;
1040 z8530_rtsdtr(c, 1);
1041 write_zsreg(c, R3, c->regs[R3] | RxENABLE);
1042 spin_unlock_irqrestore(c->lock, cflags);
1043
1044 return 0;
1045 }
1046 EXPORT_SYMBOL(z8530_sync_txdma_open);
1047
1048 /**
1049 * z8530_sync_txdma_close - Close down a TX driven DMA channel
1050 * @dev: Network device to detach
1051 * @c: Z8530 channel to move into discard mode
1052 *
1053 * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1054 * and free the buffers.
1055 */
1056
z8530_sync_txdma_close(struct net_device * dev,struct z8530_channel * c)1057 int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1058 {
1059 unsigned long dflags, cflags;
1060 u8 chk;
1061
1062 spin_lock_irqsave(c->lock, cflags);
1063
1064 c->irqs = &z8530_nop;
1065 c->max = 0;
1066 c->sync = 0;
1067
1068 /* Disable the PC DMA channels
1069 */
1070
1071 dflags = claim_dma_lock();
1072
1073 disable_dma(c->txdma);
1074 clear_dma_ff(c->txdma);
1075 c->txdma_on = 0;
1076 c->tx_dma_used = 0;
1077
1078 release_dma_lock(dflags);
1079
1080 /* Disable DMA control mode
1081 */
1082
1083 c->regs[R1] &= ~WT_RDY_ENAB;
1084 write_zsreg(c, R1, c->regs[R1]);
1085 c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
1086 c->regs[R1] |= INT_ALL_Rx;
1087 write_zsreg(c, R1, c->regs[R1]);
1088 c->regs[R14] &= ~DTRREQ;
1089 write_zsreg(c, R14, c->regs[R14]);
1090
1091 if (c->tx_dma_buf[0]) {
1092 free_page((unsigned long)c->tx_dma_buf[0]);
1093 c->tx_dma_buf[0] = NULL;
1094 }
1095 chk = read_zsreg(c, R0);
1096 write_zsreg(c, R3, c->regs[R3]);
1097 z8530_rtsdtr(c, 0);
1098
1099 spin_unlock_irqrestore(c->lock, cflags);
1100 return 0;
1101 }
1102 EXPORT_SYMBOL(z8530_sync_txdma_close);
1103
1104 /* Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1105 * it exists...
1106 */
1107 static const char * const z8530_type_name[] = {
1108 "Z8530",
1109 "Z85C30",
1110 "Z85230"
1111 };
1112
1113 /**
1114 * z8530_describe - Uniformly describe a Z8530 port
1115 * @dev: Z8530 device to describe
1116 * @mapping: string holding mapping type (eg "I/O" or "Mem")
1117 * @io: the port value in question
1118 *
1119 * Describe a Z8530 in a standard format. We must pass the I/O as
1120 * the port offset isn't predictable. The main reason for this function
1121 * is to try and get a common format of report.
1122 */
1123
z8530_describe(struct z8530_dev * dev,char * mapping,unsigned long io)1124 void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1125 {
1126 pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
1127 dev->name,
1128 z8530_type_name[dev->type],
1129 mapping,
1130 Z8530_PORT_OF(io),
1131 dev->irq);
1132 }
1133 EXPORT_SYMBOL(z8530_describe);
1134
1135 /* Locked operation part of the z8530 init code
1136 */
do_z8530_init(struct z8530_dev * dev)1137 static inline int do_z8530_init(struct z8530_dev *dev)
1138 {
1139 /* NOP the interrupt handlers first - we might get a
1140 * floating IRQ transition when we reset the chip
1141 */
1142 dev->chanA.irqs = &z8530_nop;
1143 dev->chanB.irqs = &z8530_nop;
1144 dev->chanA.dcdcheck = DCD;
1145 dev->chanB.dcdcheck = DCD;
1146
1147 /* Reset the chip */
1148 write_zsreg(&dev->chanA, R9, 0xC0);
1149 udelay(200);
1150 /* Now check its valid */
1151 write_zsreg(&dev->chanA, R12, 0xAA);
1152 if (read_zsreg(&dev->chanA, R12) != 0xAA)
1153 return -ENODEV;
1154 write_zsreg(&dev->chanA, R12, 0x55);
1155 if (read_zsreg(&dev->chanA, R12) != 0x55)
1156 return -ENODEV;
1157
1158 dev->type = Z8530;
1159
1160 /* See the application note.
1161 */
1162
1163 write_zsreg(&dev->chanA, R15, 0x01);
1164
1165 /* If we can set the low bit of R15 then
1166 * the chip is enhanced.
1167 */
1168
1169 if (read_zsreg(&dev->chanA, R15) == 0x01) {
1170 /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1171 /* Put a char in the fifo */
1172 write_zsreg(&dev->chanA, R8, 0);
1173 if (read_zsreg(&dev->chanA, R0) & Tx_BUF_EMP)
1174 dev->type = Z85230; /* Has a FIFO */
1175 else
1176 dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
1177 }
1178
1179 /* The code assumes R7' and friends are
1180 * off. Use write_zsext() for these and keep
1181 * this bit clear.
1182 */
1183
1184 write_zsreg(&dev->chanA, R15, 0);
1185
1186 /* At this point it looks like the chip is behaving
1187 */
1188
1189 memcpy(dev->chanA.regs, reg_init, 16);
1190 memcpy(dev->chanB.regs, reg_init, 16);
1191
1192 return 0;
1193 }
1194
1195 /**
1196 * z8530_init - Initialise a Z8530 device
1197 * @dev: Z8530 device to initialise.
1198 *
1199 * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1200 * is present, identify the type and then program it to hopefully
1201 * keep quite and behave. This matters a lot, a Z8530 in the wrong
1202 * state will sometimes get into stupid modes generating 10Khz
1203 * interrupt streams and the like.
1204 *
1205 * We set the interrupt handler up to discard any events, in case
1206 * we get them during reset or setp.
1207 *
1208 * Return 0 for success, or a negative value indicating the problem
1209 * in errno form.
1210 */
1211
z8530_init(struct z8530_dev * dev)1212 int z8530_init(struct z8530_dev *dev)
1213 {
1214 unsigned long flags;
1215 int ret;
1216
1217 /* Set up the chip level lock */
1218 spin_lock_init(&dev->lock);
1219 dev->chanA.lock = &dev->lock;
1220 dev->chanB.lock = &dev->lock;
1221
1222 spin_lock_irqsave(&dev->lock, flags);
1223 ret = do_z8530_init(dev);
1224 spin_unlock_irqrestore(&dev->lock, flags);
1225
1226 return ret;
1227 }
1228 EXPORT_SYMBOL(z8530_init);
1229
1230 /**
1231 * z8530_shutdown - Shutdown a Z8530 device
1232 * @dev: The Z8530 chip to shutdown
1233 *
1234 * We set the interrupt handlers to silence any interrupts. We then
1235 * reset the chip and wait 100uS to be sure the reset completed. Just
1236 * in case the caller then tries to do stuff.
1237 *
1238 * This is called without the lock held
1239 */
z8530_shutdown(struct z8530_dev * dev)1240 int z8530_shutdown(struct z8530_dev *dev)
1241 {
1242 unsigned long flags;
1243 /* Reset the chip */
1244
1245 spin_lock_irqsave(&dev->lock, flags);
1246 dev->chanA.irqs = &z8530_nop;
1247 dev->chanB.irqs = &z8530_nop;
1248 write_zsreg(&dev->chanA, R9, 0xC0);
1249 /* We must lock the udelay, the chip is offlimits here */
1250 udelay(100);
1251 spin_unlock_irqrestore(&dev->lock, flags);
1252 return 0;
1253 }
1254 EXPORT_SYMBOL(z8530_shutdown);
1255
1256 /**
1257 * z8530_channel_load - Load channel data
1258 * @c: Z8530 channel to configure
1259 * @rtable: table of register, value pairs
1260 * FIXME: ioctl to allow user uploaded tables
1261 *
1262 * Load a Z8530 channel up from the system data. We use +16 to
1263 * indicate the "prime" registers. The value 255 terminates the
1264 * table.
1265 */
1266
z8530_channel_load(struct z8530_channel * c,u8 * rtable)1267 int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1268 {
1269 unsigned long flags;
1270
1271 spin_lock_irqsave(c->lock, flags);
1272
1273 while (*rtable != 255) {
1274 int reg = *rtable++;
1275
1276 if (reg > 0x0F)
1277 write_zsreg(c, R15, c->regs[15] | 1);
1278 write_zsreg(c, reg & 0x0F, *rtable);
1279 if (reg > 0x0F)
1280 write_zsreg(c, R15, c->regs[15] & ~1);
1281 c->regs[reg] = *rtable++;
1282 }
1283 c->rx_function = z8530_null_rx;
1284 c->skb = NULL;
1285 c->tx_skb = NULL;
1286 c->tx_next_skb = NULL;
1287 c->mtu = 1500;
1288 c->max = 0;
1289 c->count = 0;
1290 c->status = read_zsreg(c, R0);
1291 c->sync = 1;
1292 write_zsreg(c, R3, c->regs[R3] | RxENABLE);
1293
1294 spin_unlock_irqrestore(c->lock, flags);
1295 return 0;
1296 }
1297 EXPORT_SYMBOL(z8530_channel_load);
1298
1299 /**
1300 * z8530_tx_begin - Begin packet transmission
1301 * @c: The Z8530 channel to kick
1302 *
1303 * This is the speed sensitive side of transmission. If we are called
1304 * and no buffer is being transmitted we commence the next buffer. If
1305 * nothing is queued we idle the sync.
1306 *
1307 * Note: We are handling this code path in the interrupt path, keep it
1308 * fast or bad things will happen.
1309 *
1310 * Called with the lock held.
1311 */
1312
z8530_tx_begin(struct z8530_channel * c)1313 static void z8530_tx_begin(struct z8530_channel *c)
1314 {
1315 unsigned long flags;
1316
1317 if (c->tx_skb)
1318 return;
1319
1320 c->tx_skb = c->tx_next_skb;
1321 c->tx_next_skb = NULL;
1322 c->tx_ptr = c->tx_next_ptr;
1323
1324 if (!c->tx_skb) {
1325 /* Idle on */
1326 if (c->dma_tx) {
1327 flags = claim_dma_lock();
1328 disable_dma(c->txdma);
1329 /* Check if we crapped out.
1330 */
1331 if (get_dma_residue(c->txdma)) {
1332 c->netdevice->stats.tx_dropped++;
1333 c->netdevice->stats.tx_fifo_errors++;
1334 }
1335 release_dma_lock(flags);
1336 }
1337 c->txcount = 0;
1338 } else {
1339 c->txcount = c->tx_skb->len;
1340
1341 if (c->dma_tx) {
1342 /* FIXME. DMA is broken for the original 8530,
1343 * on the older parts we need to set a flag and
1344 * wait for a further TX interrupt to fire this
1345 * stage off
1346 */
1347
1348 flags = claim_dma_lock();
1349 disable_dma(c->txdma);
1350
1351 /* These two are needed by the 8530/85C30
1352 * and must be issued when idling.
1353 */
1354 if (c->dev->type != Z85230) {
1355 write_zsctrl(c, RES_Tx_CRC);
1356 write_zsctrl(c, RES_EOM_L);
1357 }
1358 write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
1359 clear_dma_ff(c->txdma);
1360 set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1361 set_dma_count(c->txdma, c->txcount);
1362 enable_dma(c->txdma);
1363 release_dma_lock(flags);
1364 write_zsctrl(c, RES_EOM_L);
1365 write_zsreg(c, R5, c->regs[R5] | TxENAB);
1366 } else {
1367 /* ABUNDER off */
1368 write_zsreg(c, R10, c->regs[10]);
1369 write_zsctrl(c, RES_Tx_CRC);
1370
1371 while (c->txcount && (read_zsreg(c, R0) & Tx_BUF_EMP)) {
1372 write_zsreg(c, R8, *c->tx_ptr++);
1373 c->txcount--;
1374 }
1375 }
1376 }
1377 /* Since we emptied tx_skb we can ask for more
1378 */
1379 netif_wake_queue(c->netdevice);
1380 }
1381
1382 /**
1383 * z8530_tx_done - TX complete callback
1384 * @c: The channel that completed a transmit.
1385 *
1386 * This is called when we complete a packet send. We wake the queue,
1387 * start the next packet going and then free the buffer of the existing
1388 * packet. This code is fairly timing sensitive.
1389 *
1390 * Called with the register lock held.
1391 */
1392
z8530_tx_done(struct z8530_channel * c)1393 static void z8530_tx_done(struct z8530_channel *c)
1394 {
1395 struct sk_buff *skb;
1396
1397 /* Actually this can happen.*/
1398 if (!c->tx_skb)
1399 return;
1400
1401 skb = c->tx_skb;
1402 c->tx_skb = NULL;
1403 z8530_tx_begin(c);
1404 c->netdevice->stats.tx_packets++;
1405 c->netdevice->stats.tx_bytes += skb->len;
1406 dev_consume_skb_irq(skb);
1407 }
1408
1409 /**
1410 * z8530_null_rx - Discard a packet
1411 * @c: The channel the packet arrived on
1412 * @skb: The buffer
1413 *
1414 * We point the receive handler at this function when idle. Instead
1415 * of processing the frames we get to throw them away.
1416 */
z8530_null_rx(struct z8530_channel * c,struct sk_buff * skb)1417 void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1418 {
1419 dev_kfree_skb_any(skb);
1420 }
1421 EXPORT_SYMBOL(z8530_null_rx);
1422
1423 /**
1424 * z8530_rx_done - Receive completion callback
1425 * @c: The channel that completed a receive
1426 *
1427 * A new packet is complete. Our goal here is to get back into receive
1428 * mode as fast as possible. On the Z85230 we could change to using
1429 * ESCC mode, but on the older chips we have no choice. We flip to the
1430 * new buffer immediately in DMA mode so that the DMA of the next
1431 * frame can occur while we are copying the previous buffer to an sk_buff
1432 *
1433 * Called with the lock held
1434 */
z8530_rx_done(struct z8530_channel * c)1435 static void z8530_rx_done(struct z8530_channel *c)
1436 {
1437 struct sk_buff *skb;
1438 int ct;
1439
1440 /* Is our receive engine in DMA mode
1441 */
1442 if (c->rxdma_on) {
1443 /* Save the ready state and the buffer currently
1444 * being used as the DMA target
1445 */
1446 int ready = c->dma_ready;
1447 unsigned char *rxb = c->rx_buf[c->dma_num];
1448 unsigned long flags;
1449
1450 /* Complete this DMA. Necessary to find the length
1451 */
1452 flags = claim_dma_lock();
1453
1454 disable_dma(c->rxdma);
1455 clear_dma_ff(c->rxdma);
1456 c->rxdma_on = 0;
1457 ct = c->mtu - get_dma_residue(c->rxdma);
1458 if (ct < 0)
1459 ct = 2; /* Shit happens.. */
1460 c->dma_ready = 0;
1461
1462 /* Normal case: the other slot is free, start the next DMA
1463 * into it immediately.
1464 */
1465
1466 if (ready) {
1467 c->dma_num ^= 1;
1468 set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
1469 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1470 set_dma_count(c->rxdma, c->mtu);
1471 c->rxdma_on = 1;
1472 enable_dma(c->rxdma);
1473 /* Stop any frames that we missed the head of
1474 * from passing
1475 */
1476 write_zsreg(c, R0, RES_Rx_CRC);
1477 } else {
1478 /* Can't occur as we dont reenable the DMA irq until
1479 * after the flip is done
1480 */
1481 netdev_warn(c->netdevice, "DMA flip overrun!\n");
1482 }
1483
1484 release_dma_lock(flags);
1485
1486 /* Shove the old buffer into an sk_buff. We can't DMA
1487 * directly into one on a PC - it might be above the 16Mb
1488 * boundary. Optimisation - we could check to see if we
1489 * can avoid the copy. Optimisation 2 - make the memcpy
1490 * a copychecksum.
1491 */
1492
1493 skb = dev_alloc_skb(ct);
1494 if (!skb) {
1495 c->netdevice->stats.rx_dropped++;
1496 netdev_warn(c->netdevice, "Memory squeeze\n");
1497 } else {
1498 skb_put(skb, ct);
1499 skb_copy_to_linear_data(skb, rxb, ct);
1500 c->netdevice->stats.rx_packets++;
1501 c->netdevice->stats.rx_bytes += ct;
1502 }
1503 c->dma_ready = 1;
1504 } else {
1505 RT_LOCK;
1506 skb = c->skb;
1507
1508 /* The game we play for non DMA is similar. We want to
1509 * get the controller set up for the next packet as fast
1510 * as possible. We potentially only have one byte + the
1511 * fifo length for this. Thus we want to flip to the new
1512 * buffer and then mess around copying and allocating
1513 * things. For the current case it doesn't matter but
1514 * if you build a system where the sync irq isn't blocked
1515 * by the kernel IRQ disable then you need only block the
1516 * sync IRQ for the RT_LOCK area.
1517 *
1518 */
1519 ct = c->count;
1520
1521 c->skb = c->skb2;
1522 c->count = 0;
1523 c->max = c->mtu;
1524 if (c->skb) {
1525 c->dptr = c->skb->data;
1526 c->max = c->mtu;
1527 } else {
1528 c->count = 0;
1529 c->max = 0;
1530 }
1531 RT_UNLOCK;
1532
1533 c->skb2 = dev_alloc_skb(c->mtu);
1534 if (c->skb2)
1535 skb_put(c->skb2, c->mtu);
1536
1537 c->netdevice->stats.rx_packets++;
1538 c->netdevice->stats.rx_bytes += ct;
1539 }
1540 /* If we received a frame we must now process it.
1541 */
1542 if (skb) {
1543 skb_trim(skb, ct);
1544 c->rx_function(c, skb);
1545 } else {
1546 c->netdevice->stats.rx_dropped++;
1547 netdev_err(c->netdevice, "Lost a frame\n");
1548 }
1549 }
1550
1551 /**
1552 * spans_boundary - Check a packet can be ISA DMA'd
1553 * @skb: The buffer to check
1554 *
1555 * Returns true if the buffer cross a DMA boundary on a PC. The poor
1556 * thing can only DMA within a 64K block not across the edges of it.
1557 */
1558
spans_boundary(struct sk_buff * skb)1559 static inline int spans_boundary(struct sk_buff *skb)
1560 {
1561 unsigned long a = (unsigned long)skb->data;
1562
1563 a ^= (a + skb->len);
1564 if (a & 0x00010000) /* If the 64K bit is different.. */
1565 return 1;
1566 return 0;
1567 }
1568
1569 /**
1570 * z8530_queue_xmit - Queue a packet
1571 * @c: The channel to use
1572 * @skb: The packet to kick down the channel
1573 *
1574 * Queue a packet for transmission. Because we have rather
1575 * hard to hit interrupt latencies for the Z85230 per packet
1576 * even in DMA mode we do the flip to DMA buffer if needed here
1577 * not in the IRQ.
1578 *
1579 * Called from the network code. The lock is not held at this
1580 * point.
1581 */
z8530_queue_xmit(struct z8530_channel * c,struct sk_buff * skb)1582 netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1583 {
1584 unsigned long flags;
1585
1586 netif_stop_queue(c->netdevice);
1587 if (c->tx_next_skb)
1588 return NETDEV_TX_BUSY;
1589
1590 /* PC SPECIFIC - DMA limits */
1591 /* If we will DMA the transmit and its gone over the ISA bus
1592 * limit, then copy to the flip buffer
1593 */
1594
1595 if (c->dma_tx &&
1596 ((unsigned long)(virt_to_bus(skb->data + skb->len)) >=
1597 16 * 1024 * 1024 || spans_boundary(skb))) {
1598 /* Send the flip buffer, and flip the flippy bit.
1599 * We don't care which is used when just so long as
1600 * we never use the same buffer twice in a row. Since
1601 * only one buffer can be going out at a time the other
1602 * has to be safe.
1603 */
1604 c->tx_next_ptr = c->tx_dma_buf[c->tx_dma_used];
1605 c->tx_dma_used ^= 1; /* Flip temp buffer */
1606 skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
1607 } else {
1608 c->tx_next_ptr = skb->data;
1609 }
1610 RT_LOCK;
1611 c->tx_next_skb = skb;
1612 RT_UNLOCK;
1613
1614 spin_lock_irqsave(c->lock, flags);
1615 z8530_tx_begin(c);
1616 spin_unlock_irqrestore(c->lock, flags);
1617
1618 return NETDEV_TX_OK;
1619 }
1620 EXPORT_SYMBOL(z8530_queue_xmit);
1621
1622 /* Module support
1623 */
1624 static const char banner[] __initconst =
1625 KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1626
z85230_init_driver(void)1627 static int __init z85230_init_driver(void)
1628 {
1629 printk(banner);
1630 return 0;
1631 }
1632 module_init(z85230_init_driver);
1633
z85230_cleanup_driver(void)1634 static void __exit z85230_cleanup_driver(void)
1635 {
1636 }
1637 module_exit(z85230_cleanup_driver);
1638
1639 MODULE_AUTHOR("Red Hat Inc.");
1640 MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1641 MODULE_LICENSE("GPL");
1642