• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Driver for high-speed SCC boards (those with DMA support)
3  * Copyright (C) 1997-2000 Klaus Kudielka
4  *
5  * S5SCC/DMA support by Janko Koleznik S52HI
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21 
22 
23 #include <linux/module.h>
24 #include <linux/bitops.h>
25 #include <linux/delay.h>
26 #include <linux/errno.h>
27 #include <linux/if_arp.h>
28 #include <linux/in.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/ioport.h>
32 #include <linux/kernel.h>
33 #include <linux/mm.h>
34 #include <linux/netdevice.h>
35 #include <linux/slab.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/sockios.h>
38 #include <linux/workqueue.h>
39 #include <linux/atomic.h>
40 #include <asm/dma.h>
41 #include <asm/io.h>
42 #include <asm/irq.h>
43 #include <asm/uaccess.h>
44 #include <net/ax25.h>
45 #include "z8530.h"
46 
47 
48 /* Number of buffers per channel */
49 
50 #define NUM_TX_BUF      2	/* NUM_TX_BUF >= 1 (min. 2 recommended) */
51 #define NUM_RX_BUF      6	/* NUM_RX_BUF >= 1 (min. 2 recommended) */
52 #define BUF_SIZE        1576	/* BUF_SIZE >= mtu + hard_header_len */
53 
54 
55 /* Cards supported */
56 
57 #define HW_PI           { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
58                             0, 8, 1843200, 3686400 }
59 #define HW_PI2          { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
60 			    0, 8, 3686400, 7372800 }
61 #define HW_TWIN         { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
62 			    0, 4, 6144000, 6144000 }
63 #define HW_S5           { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
64                           0, 8, 4915200, 9830400 }
65 
66 #define HARDWARE        { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
67 
68 #define TMR_0_HZ        25600	/* Frequency of timer 0 */
69 
70 #define TYPE_PI         0
71 #define TYPE_PI2        1
72 #define TYPE_TWIN       2
73 #define TYPE_S5         3
74 #define NUM_TYPES       4
75 
76 #define MAX_NUM_DEVS    32
77 
78 
79 /* SCC chips supported */
80 
81 #define Z8530           0
82 #define Z85C30          1
83 #define Z85230          2
84 
85 #define CHIPNAMES       { "Z8530", "Z85C30", "Z85230" }
86 
87 
88 /* I/O registers */
89 
90 /* 8530 registers relative to card base */
91 #define SCCB_CMD        0x00
92 #define SCCB_DATA       0x01
93 #define SCCA_CMD        0x02
94 #define SCCA_DATA       0x03
95 
96 /* 8253/8254 registers relative to card base */
97 #define TMR_CNT0        0x00
98 #define TMR_CNT1        0x01
99 #define TMR_CNT2        0x02
100 #define TMR_CTRL        0x03
101 
102 /* Additional PI/PI2 registers relative to card base */
103 #define PI_DREQ_MASK    0x04
104 
105 /* Additional PackeTwin registers relative to card base */
106 #define TWIN_INT_REG    0x08
107 #define TWIN_CLR_TMR1   0x09
108 #define TWIN_CLR_TMR2   0x0a
109 #define TWIN_SPARE_1    0x0b
110 #define TWIN_DMA_CFG    0x08
111 #define TWIN_SERIAL_CFG 0x09
112 #define TWIN_DMA_CLR_FF 0x0a
113 #define TWIN_SPARE_2    0x0b
114 
115 
116 /* PackeTwin I/O register values */
117 
118 /* INT_REG */
119 #define TWIN_SCC_MSK       0x01
120 #define TWIN_TMR1_MSK      0x02
121 #define TWIN_TMR2_MSK      0x04
122 #define TWIN_INT_MSK       0x07
123 
124 /* SERIAL_CFG */
125 #define TWIN_DTRA_ON       0x01
126 #define TWIN_DTRB_ON       0x02
127 #define TWIN_EXTCLKA       0x04
128 #define TWIN_EXTCLKB       0x08
129 #define TWIN_LOOPA_ON      0x10
130 #define TWIN_LOOPB_ON      0x20
131 #define TWIN_EI            0x80
132 
133 /* DMA_CFG */
134 #define TWIN_DMA_HDX_T1    0x08
135 #define TWIN_DMA_HDX_R1    0x0a
136 #define TWIN_DMA_HDX_T3    0x14
137 #define TWIN_DMA_HDX_R3    0x16
138 #define TWIN_DMA_FDX_T3R1  0x1b
139 #define TWIN_DMA_FDX_T1R3  0x1d
140 
141 
142 /* Status values */
143 
144 #define IDLE      0
145 #define TX_HEAD   1
146 #define TX_DATA   2
147 #define TX_PAUSE  3
148 #define TX_TAIL   4
149 #define RTS_OFF   5
150 #define WAIT      6
151 #define DCD_ON    7
152 #define RX_ON     8
153 #define DCD_OFF   9
154 
155 
156 /* Ioctls */
157 
158 #define SIOCGSCCPARAM SIOCDEVPRIVATE
159 #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
160 
161 
162 /* Data types */
163 
164 struct scc_param {
165 	int pclk_hz;		/* frequency of BRG input (don't change) */
166 	int brg_tc;		/* BRG terminal count; BRG disabled if < 0 */
167 	int nrzi;		/* 0 (nrz), 1 (nrzi) */
168 	int clocks;		/* see dmascc_cfg documentation */
169 	int txdelay;		/* [1/TMR_0_HZ] */
170 	int txtimeout;		/* [1/HZ] */
171 	int txtail;		/* [1/TMR_0_HZ] */
172 	int waittime;		/* [1/TMR_0_HZ] */
173 	int slottime;		/* [1/TMR_0_HZ] */
174 	int persist;		/* 1 ... 256 */
175 	int dma;		/* -1 (disable), 0, 1, 3 */
176 	int txpause;		/* [1/TMR_0_HZ] */
177 	int rtsoff;		/* [1/TMR_0_HZ] */
178 	int dcdon;		/* [1/TMR_0_HZ] */
179 	int dcdoff;		/* [1/TMR_0_HZ] */
180 };
181 
182 struct scc_hardware {
183 	char *name;
184 	int io_region;
185 	int io_delta;
186 	int io_size;
187 	int num_devs;
188 	int scc_offset;
189 	int tmr_offset;
190 	int tmr_hz;
191 	int pclk_hz;
192 };
193 
194 struct scc_priv {
195 	int type;
196 	int chip;
197 	struct net_device *dev;
198 	struct scc_info *info;
199 
200 	int channel;
201 	int card_base, scc_cmd, scc_data;
202 	int tmr_cnt, tmr_ctrl, tmr_mode;
203 	struct scc_param param;
204 	char rx_buf[NUM_RX_BUF][BUF_SIZE];
205 	int rx_len[NUM_RX_BUF];
206 	int rx_ptr;
207 	struct work_struct rx_work;
208 	int rx_head, rx_tail, rx_count;
209 	int rx_over;
210 	char tx_buf[NUM_TX_BUF][BUF_SIZE];
211 	int tx_len[NUM_TX_BUF];
212 	int tx_ptr;
213 	int tx_head, tx_tail, tx_count;
214 	int state;
215 	unsigned long tx_start;
216 	int rr0;
217 	spinlock_t *register_lock;	/* Per scc_info */
218 	spinlock_t ring_lock;
219 };
220 
221 struct scc_info {
222 	int irq_used;
223 	int twin_serial_cfg;
224 	struct net_device *dev[2];
225 	struct scc_priv priv[2];
226 	struct scc_info *next;
227 	spinlock_t register_lock;	/* Per device register lock */
228 };
229 
230 
231 /* Function declarations */
232 static int setup_adapter(int card_base, int type, int n) __init;
233 
234 static void write_scc(struct scc_priv *priv, int reg, int val);
235 static void write_scc_data(struct scc_priv *priv, int val, int fast);
236 static int read_scc(struct scc_priv *priv, int reg);
237 static int read_scc_data(struct scc_priv *priv);
238 
239 static int scc_open(struct net_device *dev);
240 static int scc_close(struct net_device *dev);
241 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
242 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
243 static int scc_set_mac_address(struct net_device *dev, void *sa);
244 
245 static inline void tx_on(struct scc_priv *priv);
246 static inline void rx_on(struct scc_priv *priv);
247 static inline void rx_off(struct scc_priv *priv);
248 static void start_timer(struct scc_priv *priv, int t, int r15);
249 static inline unsigned char random(void);
250 
251 static inline void z8530_isr(struct scc_info *info);
252 static irqreturn_t scc_isr(int irq, void *dev_id);
253 static void rx_isr(struct scc_priv *priv);
254 static void special_condition(struct scc_priv *priv, int rc);
255 static void rx_bh(struct work_struct *);
256 static void tx_isr(struct scc_priv *priv);
257 static void es_isr(struct scc_priv *priv);
258 static void tm_isr(struct scc_priv *priv);
259 
260 
261 /* Initialization variables */
262 
263 static int io[MAX_NUM_DEVS] __initdata = { 0, };
264 
265 /* Beware! hw[] is also used in dmascc_exit(). */
266 static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
267 
268 
269 /* Global variables */
270 
271 static struct scc_info *first;
272 static unsigned long rand;
273 
274 
275 MODULE_AUTHOR("Klaus Kudielka");
276 MODULE_DESCRIPTION("Driver for high-speed SCC boards");
277 module_param_array(io, int, NULL, 0);
278 MODULE_LICENSE("GPL");
279 
dmascc_exit(void)280 static void __exit dmascc_exit(void)
281 {
282 	int i;
283 	struct scc_info *info;
284 
285 	while (first) {
286 		info = first;
287 
288 		/* Unregister devices */
289 		for (i = 0; i < 2; i++)
290 			unregister_netdev(info->dev[i]);
291 
292 		/* Reset board */
293 		if (info->priv[0].type == TYPE_TWIN)
294 			outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
295 		write_scc(&info->priv[0], R9, FHWRES);
296 		release_region(info->dev[0]->base_addr,
297 			       hw[info->priv[0].type].io_size);
298 
299 		for (i = 0; i < 2; i++)
300 			free_netdev(info->dev[i]);
301 
302 		/* Free memory */
303 		first = info->next;
304 		kfree(info);
305 	}
306 }
307 
dmascc_init(void)308 static int __init dmascc_init(void)
309 {
310 	int h, i, j, n;
311 	int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
312 	    t1[MAX_NUM_DEVS];
313 	unsigned t_val;
314 	unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
315 	    counting[MAX_NUM_DEVS];
316 
317 	/* Initialize random number generator */
318 	rand = jiffies;
319 	/* Cards found = 0 */
320 	n = 0;
321 	/* Warning message */
322 	if (!io[0])
323 		printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
324 
325 	/* Run autodetection for each card type */
326 	for (h = 0; h < NUM_TYPES; h++) {
327 
328 		if (io[0]) {
329 			/* User-specified I/O address regions */
330 			for (i = 0; i < hw[h].num_devs; i++)
331 				base[i] = 0;
332 			for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
333 				j = (io[i] -
334 				     hw[h].io_region) / hw[h].io_delta;
335 				if (j >= 0 && j < hw[h].num_devs &&
336 				    hw[h].io_region +
337 				    j * hw[h].io_delta == io[i]) {
338 					base[j] = io[i];
339 				}
340 			}
341 		} else {
342 			/* Default I/O address regions */
343 			for (i = 0; i < hw[h].num_devs; i++) {
344 				base[i] =
345 				    hw[h].io_region + i * hw[h].io_delta;
346 			}
347 		}
348 
349 		/* Check valid I/O address regions */
350 		for (i = 0; i < hw[h].num_devs; i++)
351 			if (base[i]) {
352 				if (!request_region
353 				    (base[i], hw[h].io_size, "dmascc"))
354 					base[i] = 0;
355 				else {
356 					tcmd[i] =
357 					    base[i] + hw[h].tmr_offset +
358 					    TMR_CTRL;
359 					t0[i] =
360 					    base[i] + hw[h].tmr_offset +
361 					    TMR_CNT0;
362 					t1[i] =
363 					    base[i] + hw[h].tmr_offset +
364 					    TMR_CNT1;
365 				}
366 			}
367 
368 		/* Start timers */
369 		for (i = 0; i < hw[h].num_devs; i++)
370 			if (base[i]) {
371 				/* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
372 				outb(0x36, tcmd[i]);
373 				outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
374 				     t0[i]);
375 				outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
376 				     t0[i]);
377 				/* Timer 1: LSB+MSB, Mode 0, HZ/10 */
378 				outb(0x70, tcmd[i]);
379 				outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
380 				outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
381 				start[i] = jiffies;
382 				delay[i] = 0;
383 				counting[i] = 1;
384 				/* Timer 2: LSB+MSB, Mode 0 */
385 				outb(0xb0, tcmd[i]);
386 			}
387 		time = jiffies;
388 		/* Wait until counter registers are loaded */
389 		udelay(2000000 / TMR_0_HZ);
390 
391 		/* Timing loop */
392 		while (jiffies - time < 13) {
393 			for (i = 0; i < hw[h].num_devs; i++)
394 				if (base[i] && counting[i]) {
395 					/* Read back Timer 1: latch; read LSB; read MSB */
396 					outb(0x40, tcmd[i]);
397 					t_val =
398 					    inb(t1[i]) + (inb(t1[i]) << 8);
399 					/* Also check whether counter did wrap */
400 					if (t_val == 0 ||
401 					    t_val > TMR_0_HZ / HZ * 10)
402 						counting[i] = 0;
403 					delay[i] = jiffies - start[i];
404 				}
405 		}
406 
407 		/* Evaluate measurements */
408 		for (i = 0; i < hw[h].num_devs; i++)
409 			if (base[i]) {
410 				if ((delay[i] >= 9 && delay[i] <= 11) &&
411 				    /* Ok, we have found an adapter */
412 				    (setup_adapter(base[i], h, n) == 0))
413 					n++;
414 				else
415 					release_region(base[i],
416 						       hw[h].io_size);
417 			}
418 
419 	}			/* NUM_TYPES */
420 
421 	/* If any adapter was successfully initialized, return ok */
422 	if (n)
423 		return 0;
424 
425 	/* If no adapter found, return error */
426 	printk(KERN_INFO "dmascc: no adapters found\n");
427 	return -EIO;
428 }
429 
430 module_init(dmascc_init);
431 module_exit(dmascc_exit);
432 
dev_setup(struct net_device * dev)433 static void __init dev_setup(struct net_device *dev)
434 {
435 	dev->type = ARPHRD_AX25;
436 	dev->hard_header_len = AX25_MAX_HEADER_LEN;
437 	dev->mtu = 1500;
438 	dev->addr_len = AX25_ADDR_LEN;
439 	dev->tx_queue_len = 64;
440 	memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
441 	memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
442 }
443 
444 static const struct net_device_ops scc_netdev_ops = {
445 	.ndo_open = scc_open,
446 	.ndo_stop = scc_close,
447 	.ndo_start_xmit = scc_send_packet,
448 	.ndo_do_ioctl = scc_ioctl,
449 	.ndo_set_mac_address = scc_set_mac_address,
450 };
451 
setup_adapter(int card_base,int type,int n)452 static int __init setup_adapter(int card_base, int type, int n)
453 {
454 	int i, irq, chip;
455 	struct scc_info *info;
456 	struct net_device *dev;
457 	struct scc_priv *priv;
458 	unsigned long time;
459 	unsigned int irqs;
460 	int tmr_base = card_base + hw[type].tmr_offset;
461 	int scc_base = card_base + hw[type].scc_offset;
462 	char *chipnames[] = CHIPNAMES;
463 
464 	/* Initialize what is necessary for write_scc and write_scc_data */
465 	info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
466 	if (!info)
467 		goto out;
468 
469 	info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
470 	if (!info->dev[0]) {
471 		printk(KERN_ERR "dmascc: "
472 		       "could not allocate memory for %s at %#3x\n",
473 		       hw[type].name, card_base);
474 		goto out1;
475 	}
476 
477 	info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
478 	if (!info->dev[1]) {
479 		printk(KERN_ERR "dmascc: "
480 		       "could not allocate memory for %s at %#3x\n",
481 		       hw[type].name, card_base);
482 		goto out2;
483 	}
484 	spin_lock_init(&info->register_lock);
485 
486 	priv = &info->priv[0];
487 	priv->type = type;
488 	priv->card_base = card_base;
489 	priv->scc_cmd = scc_base + SCCA_CMD;
490 	priv->scc_data = scc_base + SCCA_DATA;
491 	priv->register_lock = &info->register_lock;
492 
493 	/* Reset SCC */
494 	write_scc(priv, R9, FHWRES | MIE | NV);
495 
496 	/* Determine type of chip by enabling SDLC/HDLC enhancements */
497 	write_scc(priv, R15, SHDLCE);
498 	if (!read_scc(priv, R15)) {
499 		/* WR7' not present. This is an ordinary Z8530 SCC. */
500 		chip = Z8530;
501 	} else {
502 		/* Put one character in TX FIFO */
503 		write_scc_data(priv, 0, 0);
504 		if (read_scc(priv, R0) & Tx_BUF_EMP) {
505 			/* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
506 			chip = Z85230;
507 		} else {
508 			/* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
509 			chip = Z85C30;
510 		}
511 	}
512 	write_scc(priv, R15, 0);
513 
514 	/* Start IRQ auto-detection */
515 	irqs = probe_irq_on();
516 
517 	/* Enable interrupts */
518 	if (type == TYPE_TWIN) {
519 		outb(0, card_base + TWIN_DMA_CFG);
520 		inb(card_base + TWIN_CLR_TMR1);
521 		inb(card_base + TWIN_CLR_TMR2);
522 		info->twin_serial_cfg = TWIN_EI;
523 		outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
524 	} else {
525 		write_scc(priv, R15, CTSIE);
526 		write_scc(priv, R0, RES_EXT_INT);
527 		write_scc(priv, R1, EXT_INT_ENAB);
528 	}
529 
530 	/* Start timer */
531 	outb(1, tmr_base + TMR_CNT1);
532 	outb(0, tmr_base + TMR_CNT1);
533 
534 	/* Wait and detect IRQ */
535 	time = jiffies;
536 	while (jiffies - time < 2 + HZ / TMR_0_HZ);
537 	irq = probe_irq_off(irqs);
538 
539 	/* Clear pending interrupt, disable interrupts */
540 	if (type == TYPE_TWIN) {
541 		inb(card_base + TWIN_CLR_TMR1);
542 	} else {
543 		write_scc(priv, R1, 0);
544 		write_scc(priv, R15, 0);
545 		write_scc(priv, R0, RES_EXT_INT);
546 	}
547 
548 	if (irq <= 0) {
549 		printk(KERN_ERR
550 		       "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
551 		       hw[type].name, card_base, irq);
552 		goto out3;
553 	}
554 
555 	/* Set up data structures */
556 	for (i = 0; i < 2; i++) {
557 		dev = info->dev[i];
558 		priv = &info->priv[i];
559 		priv->type = type;
560 		priv->chip = chip;
561 		priv->dev = dev;
562 		priv->info = info;
563 		priv->channel = i;
564 		spin_lock_init(&priv->ring_lock);
565 		priv->register_lock = &info->register_lock;
566 		priv->card_base = card_base;
567 		priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
568 		priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
569 		priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
570 		priv->tmr_ctrl = tmr_base + TMR_CTRL;
571 		priv->tmr_mode = i ? 0xb0 : 0x70;
572 		priv->param.pclk_hz = hw[type].pclk_hz;
573 		priv->param.brg_tc = -1;
574 		priv->param.clocks = TCTRxCP | RCRTxCP;
575 		priv->param.persist = 256;
576 		priv->param.dma = -1;
577 		INIT_WORK(&priv->rx_work, rx_bh);
578 		dev->ml_priv = priv;
579 		sprintf(dev->name, "dmascc%i", 2 * n + i);
580 		dev->base_addr = card_base;
581 		dev->irq = irq;
582 		dev->netdev_ops = &scc_netdev_ops;
583 		dev->header_ops = &ax25_header_ops;
584 	}
585 	if (register_netdev(info->dev[0])) {
586 		printk(KERN_ERR "dmascc: could not register %s\n",
587 		       info->dev[0]->name);
588 		goto out3;
589 	}
590 	if (register_netdev(info->dev[1])) {
591 		printk(KERN_ERR "dmascc: could not register %s\n",
592 		       info->dev[1]->name);
593 		goto out4;
594 	}
595 
596 
597 	info->next = first;
598 	first = info;
599 	printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
600 	       hw[type].name, chipnames[chip], card_base, irq);
601 	return 0;
602 
603       out4:
604 	unregister_netdev(info->dev[0]);
605       out3:
606 	if (info->priv[0].type == TYPE_TWIN)
607 		outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
608 	write_scc(&info->priv[0], R9, FHWRES);
609 	free_netdev(info->dev[1]);
610       out2:
611 	free_netdev(info->dev[0]);
612       out1:
613 	kfree(info);
614       out:
615 	return -1;
616 }
617 
618 
619 /* Driver functions */
620 
write_scc(struct scc_priv * priv,int reg,int val)621 static void write_scc(struct scc_priv *priv, int reg, int val)
622 {
623 	unsigned long flags;
624 	switch (priv->type) {
625 	case TYPE_S5:
626 		if (reg)
627 			outb(reg, priv->scc_cmd);
628 		outb(val, priv->scc_cmd);
629 		return;
630 	case TYPE_TWIN:
631 		if (reg)
632 			outb_p(reg, priv->scc_cmd);
633 		outb_p(val, priv->scc_cmd);
634 		return;
635 	default:
636 		spin_lock_irqsave(priv->register_lock, flags);
637 		outb_p(0, priv->card_base + PI_DREQ_MASK);
638 		if (reg)
639 			outb_p(reg, priv->scc_cmd);
640 		outb_p(val, priv->scc_cmd);
641 		outb(1, priv->card_base + PI_DREQ_MASK);
642 		spin_unlock_irqrestore(priv->register_lock, flags);
643 		return;
644 	}
645 }
646 
647 
write_scc_data(struct scc_priv * priv,int val,int fast)648 static void write_scc_data(struct scc_priv *priv, int val, int fast)
649 {
650 	unsigned long flags;
651 	switch (priv->type) {
652 	case TYPE_S5:
653 		outb(val, priv->scc_data);
654 		return;
655 	case TYPE_TWIN:
656 		outb_p(val, priv->scc_data);
657 		return;
658 	default:
659 		if (fast)
660 			outb_p(val, priv->scc_data);
661 		else {
662 			spin_lock_irqsave(priv->register_lock, flags);
663 			outb_p(0, priv->card_base + PI_DREQ_MASK);
664 			outb_p(val, priv->scc_data);
665 			outb(1, priv->card_base + PI_DREQ_MASK);
666 			spin_unlock_irqrestore(priv->register_lock, flags);
667 		}
668 		return;
669 	}
670 }
671 
672 
read_scc(struct scc_priv * priv,int reg)673 static int read_scc(struct scc_priv *priv, int reg)
674 {
675 	int rc;
676 	unsigned long flags;
677 	switch (priv->type) {
678 	case TYPE_S5:
679 		if (reg)
680 			outb(reg, priv->scc_cmd);
681 		return inb(priv->scc_cmd);
682 	case TYPE_TWIN:
683 		if (reg)
684 			outb_p(reg, priv->scc_cmd);
685 		return inb_p(priv->scc_cmd);
686 	default:
687 		spin_lock_irqsave(priv->register_lock, flags);
688 		outb_p(0, priv->card_base + PI_DREQ_MASK);
689 		if (reg)
690 			outb_p(reg, priv->scc_cmd);
691 		rc = inb_p(priv->scc_cmd);
692 		outb(1, priv->card_base + PI_DREQ_MASK);
693 		spin_unlock_irqrestore(priv->register_lock, flags);
694 		return rc;
695 	}
696 }
697 
698 
read_scc_data(struct scc_priv * priv)699 static int read_scc_data(struct scc_priv *priv)
700 {
701 	int rc;
702 	unsigned long flags;
703 	switch (priv->type) {
704 	case TYPE_S5:
705 		return inb(priv->scc_data);
706 	case TYPE_TWIN:
707 		return inb_p(priv->scc_data);
708 	default:
709 		spin_lock_irqsave(priv->register_lock, flags);
710 		outb_p(0, priv->card_base + PI_DREQ_MASK);
711 		rc = inb_p(priv->scc_data);
712 		outb(1, priv->card_base + PI_DREQ_MASK);
713 		spin_unlock_irqrestore(priv->register_lock, flags);
714 		return rc;
715 	}
716 }
717 
718 
scc_open(struct net_device * dev)719 static int scc_open(struct net_device *dev)
720 {
721 	struct scc_priv *priv = dev->ml_priv;
722 	struct scc_info *info = priv->info;
723 	int card_base = priv->card_base;
724 
725 	/* Request IRQ if not already used by other channel */
726 	if (!info->irq_used) {
727 		if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
728 			return -EAGAIN;
729 		}
730 	}
731 	info->irq_used++;
732 
733 	/* Request DMA if required */
734 	if (priv->param.dma >= 0) {
735 		if (request_dma(priv->param.dma, "dmascc")) {
736 			if (--info->irq_used == 0)
737 				free_irq(dev->irq, info);
738 			return -EAGAIN;
739 		} else {
740 			unsigned long flags = claim_dma_lock();
741 			clear_dma_ff(priv->param.dma);
742 			release_dma_lock(flags);
743 		}
744 	}
745 
746 	/* Initialize local variables */
747 	priv->rx_ptr = 0;
748 	priv->rx_over = 0;
749 	priv->rx_head = priv->rx_tail = priv->rx_count = 0;
750 	priv->state = IDLE;
751 	priv->tx_head = priv->tx_tail = priv->tx_count = 0;
752 	priv->tx_ptr = 0;
753 
754 	/* Reset channel */
755 	write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
756 	/* X1 clock, SDLC mode */
757 	write_scc(priv, R4, SDLC | X1CLK);
758 	/* DMA */
759 	write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
760 	/* 8 bit RX char, RX disable */
761 	write_scc(priv, R3, Rx8);
762 	/* 8 bit TX char, TX disable */
763 	write_scc(priv, R5, Tx8);
764 	/* SDLC address field */
765 	write_scc(priv, R6, 0);
766 	/* SDLC flag */
767 	write_scc(priv, R7, FLAG);
768 	switch (priv->chip) {
769 	case Z85C30:
770 		/* Select WR7' */
771 		write_scc(priv, R15, SHDLCE);
772 		/* Auto EOM reset */
773 		write_scc(priv, R7, AUTOEOM);
774 		write_scc(priv, R15, 0);
775 		break;
776 	case Z85230:
777 		/* Select WR7' */
778 		write_scc(priv, R15, SHDLCE);
779 		/* The following bits are set (see 2.5.2.1):
780 		   - Automatic EOM reset
781 		   - Interrupt request if RX FIFO is half full
782 		   This bit should be ignored in DMA mode (according to the
783 		   documentation), but actually isn't. The receiver doesn't work if
784 		   it is set. Thus, we have to clear it in DMA mode.
785 		   - Interrupt/DMA request if TX FIFO is completely empty
786 		   a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
787 		   compatibility).
788 		   b) If cleared, DMA requests may follow each other very quickly,
789 		   filling up the TX FIFO.
790 		   Advantage: TX works even in case of high bus latency.
791 		   Disadvantage: Edge-triggered DMA request circuitry may miss
792 		   a request. No more data is delivered, resulting
793 		   in a TX FIFO underrun.
794 		   Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
795 		   The PackeTwin doesn't. I don't know about the PI, but let's
796 		   assume it behaves like the PI2.
797 		 */
798 		if (priv->param.dma >= 0) {
799 			if (priv->type == TYPE_TWIN)
800 				write_scc(priv, R7, AUTOEOM | TXFIFOE);
801 			else
802 				write_scc(priv, R7, AUTOEOM);
803 		} else {
804 			write_scc(priv, R7, AUTOEOM | RXFIFOH);
805 		}
806 		write_scc(priv, R15, 0);
807 		break;
808 	}
809 	/* Preset CRC, NRZ(I) encoding */
810 	write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
811 
812 	/* Configure baud rate generator */
813 	if (priv->param.brg_tc >= 0) {
814 		/* Program BR generator */
815 		write_scc(priv, R12, priv->param.brg_tc & 0xFF);
816 		write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
817 		/* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
818 		   PackeTwin, not connected on the PI2); set DPLL source to BRG */
819 		write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
820 		/* Enable DPLL */
821 		write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
822 	} else {
823 		/* Disable BR generator */
824 		write_scc(priv, R14, DTRREQ | BRSRC);
825 	}
826 
827 	/* Configure clocks */
828 	if (priv->type == TYPE_TWIN) {
829 		/* Disable external TX clock receiver */
830 		outb((info->twin_serial_cfg &=
831 		      ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
832 		     card_base + TWIN_SERIAL_CFG);
833 	}
834 	write_scc(priv, R11, priv->param.clocks);
835 	if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
836 		/* Enable external TX clock receiver */
837 		outb((info->twin_serial_cfg |=
838 		      (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
839 		     card_base + TWIN_SERIAL_CFG);
840 	}
841 
842 	/* Configure PackeTwin */
843 	if (priv->type == TYPE_TWIN) {
844 		/* Assert DTR, enable interrupts */
845 		outb((info->twin_serial_cfg |= TWIN_EI |
846 		      (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
847 		     card_base + TWIN_SERIAL_CFG);
848 	}
849 
850 	/* Read current status */
851 	priv->rr0 = read_scc(priv, R0);
852 	/* Enable DCD interrupt */
853 	write_scc(priv, R15, DCDIE);
854 
855 	netif_start_queue(dev);
856 
857 	return 0;
858 }
859 
860 
scc_close(struct net_device * dev)861 static int scc_close(struct net_device *dev)
862 {
863 	struct scc_priv *priv = dev->ml_priv;
864 	struct scc_info *info = priv->info;
865 	int card_base = priv->card_base;
866 
867 	netif_stop_queue(dev);
868 
869 	if (priv->type == TYPE_TWIN) {
870 		/* Drop DTR */
871 		outb((info->twin_serial_cfg &=
872 		      (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
873 		     card_base + TWIN_SERIAL_CFG);
874 	}
875 
876 	/* Reset channel, free DMA and IRQ */
877 	write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
878 	if (priv->param.dma >= 0) {
879 		if (priv->type == TYPE_TWIN)
880 			outb(0, card_base + TWIN_DMA_CFG);
881 		free_dma(priv->param.dma);
882 	}
883 	if (--info->irq_used == 0)
884 		free_irq(dev->irq, info);
885 
886 	return 0;
887 }
888 
889 
scc_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)890 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
891 {
892 	struct scc_priv *priv = dev->ml_priv;
893 
894 	switch (cmd) {
895 	case SIOCGSCCPARAM:
896 		if (copy_to_user
897 		    (ifr->ifr_data, &priv->param,
898 		     sizeof(struct scc_param)))
899 			return -EFAULT;
900 		return 0;
901 	case SIOCSSCCPARAM:
902 		if (!capable(CAP_NET_ADMIN))
903 			return -EPERM;
904 		if (netif_running(dev))
905 			return -EAGAIN;
906 		if (copy_from_user
907 		    (&priv->param, ifr->ifr_data,
908 		     sizeof(struct scc_param)))
909 			return -EFAULT;
910 		return 0;
911 	default:
912 		return -EINVAL;
913 	}
914 }
915 
916 
scc_send_packet(struct sk_buff * skb,struct net_device * dev)917 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
918 {
919 	struct scc_priv *priv = dev->ml_priv;
920 	unsigned long flags;
921 	int i;
922 
923 	if (skb->protocol == htons(ETH_P_IP))
924 		return ax25_ip_xmit(skb);
925 
926 	/* Temporarily stop the scheduler feeding us packets */
927 	netif_stop_queue(dev);
928 
929 	/* Transfer data to DMA buffer */
930 	i = priv->tx_head;
931 	skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
932 	priv->tx_len[i] = skb->len - 1;
933 
934 	/* Clear interrupts while we touch our circular buffers */
935 
936 	spin_lock_irqsave(&priv->ring_lock, flags);
937 	/* Move the ring buffer's head */
938 	priv->tx_head = (i + 1) % NUM_TX_BUF;
939 	priv->tx_count++;
940 
941 	/* If we just filled up the last buffer, leave queue stopped.
942 	   The higher layers must wait until we have a DMA buffer
943 	   to accept the data. */
944 	if (priv->tx_count < NUM_TX_BUF)
945 		netif_wake_queue(dev);
946 
947 	/* Set new TX state */
948 	if (priv->state == IDLE) {
949 		/* Assert RTS, start timer */
950 		priv->state = TX_HEAD;
951 		priv->tx_start = jiffies;
952 		write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
953 		write_scc(priv, R15, 0);
954 		start_timer(priv, priv->param.txdelay, 0);
955 	}
956 
957 	/* Turn interrupts back on and free buffer */
958 	spin_unlock_irqrestore(&priv->ring_lock, flags);
959 	dev_kfree_skb(skb);
960 
961 	return NETDEV_TX_OK;
962 }
963 
964 
scc_set_mac_address(struct net_device * dev,void * sa)965 static int scc_set_mac_address(struct net_device *dev, void *sa)
966 {
967 	memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
968 	       dev->addr_len);
969 	return 0;
970 }
971 
972 
tx_on(struct scc_priv * priv)973 static inline void tx_on(struct scc_priv *priv)
974 {
975 	int i, n;
976 	unsigned long flags;
977 
978 	if (priv->param.dma >= 0) {
979 		n = (priv->chip == Z85230) ? 3 : 1;
980 		/* Program DMA controller */
981 		flags = claim_dma_lock();
982 		set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
983 		set_dma_addr(priv->param.dma,
984 			     (int) priv->tx_buf[priv->tx_tail] + n);
985 		set_dma_count(priv->param.dma,
986 			      priv->tx_len[priv->tx_tail] - n);
987 		release_dma_lock(flags);
988 		/* Enable TX underrun interrupt */
989 		write_scc(priv, R15, TxUIE);
990 		/* Configure DREQ */
991 		if (priv->type == TYPE_TWIN)
992 			outb((priv->param.dma ==
993 			      1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
994 			     priv->card_base + TWIN_DMA_CFG);
995 		else
996 			write_scc(priv, R1,
997 				  EXT_INT_ENAB | WT_FN_RDYFN |
998 				  WT_RDY_ENAB);
999 		/* Write first byte(s) */
1000 		spin_lock_irqsave(priv->register_lock, flags);
1001 		for (i = 0; i < n; i++)
1002 			write_scc_data(priv,
1003 				       priv->tx_buf[priv->tx_tail][i], 1);
1004 		enable_dma(priv->param.dma);
1005 		spin_unlock_irqrestore(priv->register_lock, flags);
1006 	} else {
1007 		write_scc(priv, R15, TxUIE);
1008 		write_scc(priv, R1,
1009 			  EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1010 		tx_isr(priv);
1011 	}
1012 	/* Reset EOM latch if we do not have the AUTOEOM feature */
1013 	if (priv->chip == Z8530)
1014 		write_scc(priv, R0, RES_EOM_L);
1015 }
1016 
1017 
rx_on(struct scc_priv * priv)1018 static inline void rx_on(struct scc_priv *priv)
1019 {
1020 	unsigned long flags;
1021 
1022 	/* Clear RX FIFO */
1023 	while (read_scc(priv, R0) & Rx_CH_AV)
1024 		read_scc_data(priv);
1025 	priv->rx_over = 0;
1026 	if (priv->param.dma >= 0) {
1027 		/* Program DMA controller */
1028 		flags = claim_dma_lock();
1029 		set_dma_mode(priv->param.dma, DMA_MODE_READ);
1030 		set_dma_addr(priv->param.dma,
1031 			     (int) priv->rx_buf[priv->rx_head]);
1032 		set_dma_count(priv->param.dma, BUF_SIZE);
1033 		release_dma_lock(flags);
1034 		enable_dma(priv->param.dma);
1035 		/* Configure PackeTwin DMA */
1036 		if (priv->type == TYPE_TWIN) {
1037 			outb((priv->param.dma ==
1038 			      1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1039 			     priv->card_base + TWIN_DMA_CFG);
1040 		}
1041 		/* Sp. cond. intr. only, ext int enable, RX DMA enable */
1042 		write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1043 			  WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1044 	} else {
1045 		/* Reset current frame */
1046 		priv->rx_ptr = 0;
1047 		/* Intr. on all Rx characters and Sp. cond., ext int enable */
1048 		write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1049 			  WT_FN_RDYFN);
1050 	}
1051 	write_scc(priv, R0, ERR_RES);
1052 	write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1053 }
1054 
1055 
rx_off(struct scc_priv * priv)1056 static inline void rx_off(struct scc_priv *priv)
1057 {
1058 	/* Disable receiver */
1059 	write_scc(priv, R3, Rx8);
1060 	/* Disable DREQ / RX interrupt */
1061 	if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1062 		outb(0, priv->card_base + TWIN_DMA_CFG);
1063 	else
1064 		write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1065 	/* Disable DMA */
1066 	if (priv->param.dma >= 0)
1067 		disable_dma(priv->param.dma);
1068 }
1069 
1070 
start_timer(struct scc_priv * priv,int t,int r15)1071 static void start_timer(struct scc_priv *priv, int t, int r15)
1072 {
1073 	outb(priv->tmr_mode, priv->tmr_ctrl);
1074 	if (t == 0) {
1075 		tm_isr(priv);
1076 	} else if (t > 0) {
1077 		outb(t & 0xFF, priv->tmr_cnt);
1078 		outb((t >> 8) & 0xFF, priv->tmr_cnt);
1079 		if (priv->type != TYPE_TWIN) {
1080 			write_scc(priv, R15, r15 | CTSIE);
1081 			priv->rr0 |= CTS;
1082 		}
1083 	}
1084 }
1085 
1086 
random(void)1087 static inline unsigned char random(void)
1088 {
1089 	/* See "Numerical Recipes in C", second edition, p. 284 */
1090 	rand = rand * 1664525L + 1013904223L;
1091 	return (unsigned char) (rand >> 24);
1092 }
1093 
z8530_isr(struct scc_info * info)1094 static inline void z8530_isr(struct scc_info *info)
1095 {
1096 	int is, i = 100;
1097 
1098 	while ((is = read_scc(&info->priv[0], R3)) && i--) {
1099 		if (is & CHARxIP) {
1100 			rx_isr(&info->priv[0]);
1101 		} else if (is & CHATxIP) {
1102 			tx_isr(&info->priv[0]);
1103 		} else if (is & CHAEXT) {
1104 			es_isr(&info->priv[0]);
1105 		} else if (is & CHBRxIP) {
1106 			rx_isr(&info->priv[1]);
1107 		} else if (is & CHBTxIP) {
1108 			tx_isr(&info->priv[1]);
1109 		} else {
1110 			es_isr(&info->priv[1]);
1111 		}
1112 		write_scc(&info->priv[0], R0, RES_H_IUS);
1113 		i++;
1114 	}
1115 	if (i < 0) {
1116 		printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1117 		       is);
1118 	}
1119 	/* Ok, no interrupts pending from this 8530. The INT line should
1120 	   be inactive now. */
1121 }
1122 
1123 
scc_isr(int irq,void * dev_id)1124 static irqreturn_t scc_isr(int irq, void *dev_id)
1125 {
1126 	struct scc_info *info = dev_id;
1127 
1128 	spin_lock(info->priv[0].register_lock);
1129 	/* At this point interrupts are enabled, and the interrupt under service
1130 	   is already acknowledged, but masked off.
1131 
1132 	   Interrupt processing: We loop until we know that the IRQ line is
1133 	   low. If another positive edge occurs afterwards during the ISR,
1134 	   another interrupt will be triggered by the interrupt controller
1135 	   as soon as the IRQ level is enabled again (see asm/irq.h).
1136 
1137 	   Bottom-half handlers will be processed after scc_isr(). This is
1138 	   important, since we only have small ringbuffers and want new data
1139 	   to be fetched/delivered immediately. */
1140 
1141 	if (info->priv[0].type == TYPE_TWIN) {
1142 		int is, card_base = info->priv[0].card_base;
1143 		while ((is = ~inb(card_base + TWIN_INT_REG)) &
1144 		       TWIN_INT_MSK) {
1145 			if (is & TWIN_SCC_MSK) {
1146 				z8530_isr(info);
1147 			} else if (is & TWIN_TMR1_MSK) {
1148 				inb(card_base + TWIN_CLR_TMR1);
1149 				tm_isr(&info->priv[0]);
1150 			} else {
1151 				inb(card_base + TWIN_CLR_TMR2);
1152 				tm_isr(&info->priv[1]);
1153 			}
1154 		}
1155 	} else
1156 		z8530_isr(info);
1157 	spin_unlock(info->priv[0].register_lock);
1158 	return IRQ_HANDLED;
1159 }
1160 
1161 
rx_isr(struct scc_priv * priv)1162 static void rx_isr(struct scc_priv *priv)
1163 {
1164 	if (priv->param.dma >= 0) {
1165 		/* Check special condition and perform error reset. See 2.4.7.5. */
1166 		special_condition(priv, read_scc(priv, R1));
1167 		write_scc(priv, R0, ERR_RES);
1168 	} else {
1169 		/* Check special condition for each character. Error reset not necessary.
1170 		   Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1171 		int rc;
1172 		while (read_scc(priv, R0) & Rx_CH_AV) {
1173 			rc = read_scc(priv, R1);
1174 			if (priv->rx_ptr < BUF_SIZE)
1175 				priv->rx_buf[priv->rx_head][priv->
1176 							    rx_ptr++] =
1177 				    read_scc_data(priv);
1178 			else {
1179 				priv->rx_over = 2;
1180 				read_scc_data(priv);
1181 			}
1182 			special_condition(priv, rc);
1183 		}
1184 	}
1185 }
1186 
1187 
special_condition(struct scc_priv * priv,int rc)1188 static void special_condition(struct scc_priv *priv, int rc)
1189 {
1190 	int cb;
1191 	unsigned long flags;
1192 
1193 	/* See Figure 2-15. Only overrun and EOF need to be checked. */
1194 
1195 	if (rc & Rx_OVR) {
1196 		/* Receiver overrun */
1197 		priv->rx_over = 1;
1198 		if (priv->param.dma < 0)
1199 			write_scc(priv, R0, ERR_RES);
1200 	} else if (rc & END_FR) {
1201 		/* End of frame. Get byte count */
1202 		if (priv->param.dma >= 0) {
1203 			flags = claim_dma_lock();
1204 			cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1205 			    2;
1206 			release_dma_lock(flags);
1207 		} else {
1208 			cb = priv->rx_ptr - 2;
1209 		}
1210 		if (priv->rx_over) {
1211 			/* We had an overrun */
1212 			priv->dev->stats.rx_errors++;
1213 			if (priv->rx_over == 2)
1214 				priv->dev->stats.rx_length_errors++;
1215 			else
1216 				priv->dev->stats.rx_fifo_errors++;
1217 			priv->rx_over = 0;
1218 		} else if (rc & CRC_ERR) {
1219 			/* Count invalid CRC only if packet length >= minimum */
1220 			if (cb >= 15) {
1221 				priv->dev->stats.rx_errors++;
1222 				priv->dev->stats.rx_crc_errors++;
1223 			}
1224 		} else {
1225 			if (cb >= 15) {
1226 				if (priv->rx_count < NUM_RX_BUF - 1) {
1227 					/* Put good frame in FIFO */
1228 					priv->rx_len[priv->rx_head] = cb;
1229 					priv->rx_head =
1230 					    (priv->rx_head +
1231 					     1) % NUM_RX_BUF;
1232 					priv->rx_count++;
1233 					schedule_work(&priv->rx_work);
1234 				} else {
1235 					priv->dev->stats.rx_errors++;
1236 					priv->dev->stats.rx_over_errors++;
1237 				}
1238 			}
1239 		}
1240 		/* Get ready for new frame */
1241 		if (priv->param.dma >= 0) {
1242 			flags = claim_dma_lock();
1243 			set_dma_addr(priv->param.dma,
1244 				     (int) priv->rx_buf[priv->rx_head]);
1245 			set_dma_count(priv->param.dma, BUF_SIZE);
1246 			release_dma_lock(flags);
1247 		} else {
1248 			priv->rx_ptr = 0;
1249 		}
1250 	}
1251 }
1252 
1253 
rx_bh(struct work_struct * ugli_api)1254 static void rx_bh(struct work_struct *ugli_api)
1255 {
1256 	struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
1257 	int i = priv->rx_tail;
1258 	int cb;
1259 	unsigned long flags;
1260 	struct sk_buff *skb;
1261 	unsigned char *data;
1262 
1263 	spin_lock_irqsave(&priv->ring_lock, flags);
1264 	while (priv->rx_count) {
1265 		spin_unlock_irqrestore(&priv->ring_lock, flags);
1266 		cb = priv->rx_len[i];
1267 		/* Allocate buffer */
1268 		skb = dev_alloc_skb(cb + 1);
1269 		if (skb == NULL) {
1270 			/* Drop packet */
1271 			priv->dev->stats.rx_dropped++;
1272 		} else {
1273 			/* Fill buffer */
1274 			data = skb_put(skb, cb + 1);
1275 			data[0] = 0;
1276 			memcpy(&data[1], priv->rx_buf[i], cb);
1277 			skb->protocol = ax25_type_trans(skb, priv->dev);
1278 			netif_rx(skb);
1279 			priv->dev->stats.rx_packets++;
1280 			priv->dev->stats.rx_bytes += cb;
1281 		}
1282 		spin_lock_irqsave(&priv->ring_lock, flags);
1283 		/* Move tail */
1284 		priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1285 		priv->rx_count--;
1286 	}
1287 	spin_unlock_irqrestore(&priv->ring_lock, flags);
1288 }
1289 
1290 
tx_isr(struct scc_priv * priv)1291 static void tx_isr(struct scc_priv *priv)
1292 {
1293 	int i = priv->tx_tail, p = priv->tx_ptr;
1294 
1295 	/* Suspend TX interrupts if we don't want to send anything.
1296 	   See Figure 2-22. */
1297 	if (p == priv->tx_len[i]) {
1298 		write_scc(priv, R0, RES_Tx_P);
1299 		return;
1300 	}
1301 
1302 	/* Write characters */
1303 	while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1304 		write_scc_data(priv, priv->tx_buf[i][p++], 0);
1305 	}
1306 
1307 	/* Reset EOM latch of Z8530 */
1308 	if (!priv->tx_ptr && p && priv->chip == Z8530)
1309 		write_scc(priv, R0, RES_EOM_L);
1310 
1311 	priv->tx_ptr = p;
1312 }
1313 
1314 
es_isr(struct scc_priv * priv)1315 static void es_isr(struct scc_priv *priv)
1316 {
1317 	int i, rr0, drr0, res;
1318 	unsigned long flags;
1319 
1320 	/* Read status, reset interrupt bit (open latches) */
1321 	rr0 = read_scc(priv, R0);
1322 	write_scc(priv, R0, RES_EXT_INT);
1323 	drr0 = priv->rr0 ^ rr0;
1324 	priv->rr0 = rr0;
1325 
1326 	/* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1327 	   it might have already been cleared again by AUTOEOM. */
1328 	if (priv->state == TX_DATA) {
1329 		/* Get remaining bytes */
1330 		i = priv->tx_tail;
1331 		if (priv->param.dma >= 0) {
1332 			disable_dma(priv->param.dma);
1333 			flags = claim_dma_lock();
1334 			res = get_dma_residue(priv->param.dma);
1335 			release_dma_lock(flags);
1336 		} else {
1337 			res = priv->tx_len[i] - priv->tx_ptr;
1338 			priv->tx_ptr = 0;
1339 		}
1340 		/* Disable DREQ / TX interrupt */
1341 		if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1342 			outb(0, priv->card_base + TWIN_DMA_CFG);
1343 		else
1344 			write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1345 		if (res) {
1346 			/* Update packet statistics */
1347 			priv->dev->stats.tx_errors++;
1348 			priv->dev->stats.tx_fifo_errors++;
1349 			/* Other underrun interrupts may already be waiting */
1350 			write_scc(priv, R0, RES_EXT_INT);
1351 			write_scc(priv, R0, RES_EXT_INT);
1352 		} else {
1353 			/* Update packet statistics */
1354 			priv->dev->stats.tx_packets++;
1355 			priv->dev->stats.tx_bytes += priv->tx_len[i];
1356 			/* Remove frame from FIFO */
1357 			priv->tx_tail = (i + 1) % NUM_TX_BUF;
1358 			priv->tx_count--;
1359 			/* Inform upper layers */
1360 			netif_wake_queue(priv->dev);
1361 		}
1362 		/* Switch state */
1363 		write_scc(priv, R15, 0);
1364 		if (priv->tx_count &&
1365 		    (jiffies - priv->tx_start) < priv->param.txtimeout) {
1366 			priv->state = TX_PAUSE;
1367 			start_timer(priv, priv->param.txpause, 0);
1368 		} else {
1369 			priv->state = TX_TAIL;
1370 			start_timer(priv, priv->param.txtail, 0);
1371 		}
1372 	}
1373 
1374 	/* DCD transition */
1375 	if (drr0 & DCD) {
1376 		if (rr0 & DCD) {
1377 			switch (priv->state) {
1378 			case IDLE:
1379 			case WAIT:
1380 				priv->state = DCD_ON;
1381 				write_scc(priv, R15, 0);
1382 				start_timer(priv, priv->param.dcdon, 0);
1383 			}
1384 		} else {
1385 			switch (priv->state) {
1386 			case RX_ON:
1387 				rx_off(priv);
1388 				priv->state = DCD_OFF;
1389 				write_scc(priv, R15, 0);
1390 				start_timer(priv, priv->param.dcdoff, 0);
1391 			}
1392 		}
1393 	}
1394 
1395 	/* CTS transition */
1396 	if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1397 		tm_isr(priv);
1398 
1399 }
1400 
1401 
tm_isr(struct scc_priv * priv)1402 static void tm_isr(struct scc_priv *priv)
1403 {
1404 	switch (priv->state) {
1405 	case TX_HEAD:
1406 	case TX_PAUSE:
1407 		tx_on(priv);
1408 		priv->state = TX_DATA;
1409 		break;
1410 	case TX_TAIL:
1411 		write_scc(priv, R5, TxCRC_ENAB | Tx8);
1412 		priv->state = RTS_OFF;
1413 		if (priv->type != TYPE_TWIN)
1414 			write_scc(priv, R15, 0);
1415 		start_timer(priv, priv->param.rtsoff, 0);
1416 		break;
1417 	case RTS_OFF:
1418 		write_scc(priv, R15, DCDIE);
1419 		priv->rr0 = read_scc(priv, R0);
1420 		if (priv->rr0 & DCD) {
1421 			priv->dev->stats.collisions++;
1422 			rx_on(priv);
1423 			priv->state = RX_ON;
1424 		} else {
1425 			priv->state = WAIT;
1426 			start_timer(priv, priv->param.waittime, DCDIE);
1427 		}
1428 		break;
1429 	case WAIT:
1430 		if (priv->tx_count) {
1431 			priv->state = TX_HEAD;
1432 			priv->tx_start = jiffies;
1433 			write_scc(priv, R5,
1434 				  TxCRC_ENAB | RTS | TxENAB | Tx8);
1435 			write_scc(priv, R15, 0);
1436 			start_timer(priv, priv->param.txdelay, 0);
1437 		} else {
1438 			priv->state = IDLE;
1439 			if (priv->type != TYPE_TWIN)
1440 				write_scc(priv, R15, DCDIE);
1441 		}
1442 		break;
1443 	case DCD_ON:
1444 	case DCD_OFF:
1445 		write_scc(priv, R15, DCDIE);
1446 		priv->rr0 = read_scc(priv, R0);
1447 		if (priv->rr0 & DCD) {
1448 			rx_on(priv);
1449 			priv->state = RX_ON;
1450 		} else {
1451 			priv->state = WAIT;
1452 			start_timer(priv,
1453 				    random() / priv->param.persist *
1454 				    priv->param.slottime, DCDIE);
1455 		}
1456 		break;
1457 	}
1458 }
1459