1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Driver for high-speed SCC boards (those with DMA support)
4 * Copyright (C) 1997-2000 Klaus Kudielka
5 *
6 * S5SCC/DMA support by Janko Koleznik S52HI
7 */
8
9
10 #include <linux/module.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/if_arp.h>
15 #include <linux/in.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/netdevice.h>
22 #include <linux/slab.h>
23 #include <linux/rtnetlink.h>
24 #include <linux/sockios.h>
25 #include <linux/workqueue.h>
26 #include <linux/atomic.h>
27 #include <asm/dma.h>
28 #include <asm/io.h>
29 #include <asm/irq.h>
30 #include <linux/uaccess.h>
31 #include <net/ax25.h>
32 #include "z8530.h"
33
34
35 /* Number of buffers per channel */
36
37 #define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
38 #define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
39 #define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
40
41
42 /* Cards supported */
43
44 #define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
45 0, 8, 1843200, 3686400 }
46 #define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
47 0, 8, 3686400, 7372800 }
48 #define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
49 0, 4, 6144000, 6144000 }
50 #define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
51 0, 8, 4915200, 9830400 }
52
53 #define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
54
55 #define TMR_0_HZ 25600 /* Frequency of timer 0 */
56
57 #define TYPE_PI 0
58 #define TYPE_PI2 1
59 #define TYPE_TWIN 2
60 #define TYPE_S5 3
61 #define NUM_TYPES 4
62
63 #define MAX_NUM_DEVS 32
64
65
66 /* SCC chips supported */
67
68 #define Z8530 0
69 #define Z85C30 1
70 #define Z85230 2
71
72 #define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
73
74
75 /* I/O registers */
76
77 /* 8530 registers relative to card base */
78 #define SCCB_CMD 0x00
79 #define SCCB_DATA 0x01
80 #define SCCA_CMD 0x02
81 #define SCCA_DATA 0x03
82
83 /* 8253/8254 registers relative to card base */
84 #define TMR_CNT0 0x00
85 #define TMR_CNT1 0x01
86 #define TMR_CNT2 0x02
87 #define TMR_CTRL 0x03
88
89 /* Additional PI/PI2 registers relative to card base */
90 #define PI_DREQ_MASK 0x04
91
92 /* Additional PackeTwin registers relative to card base */
93 #define TWIN_INT_REG 0x08
94 #define TWIN_CLR_TMR1 0x09
95 #define TWIN_CLR_TMR2 0x0a
96 #define TWIN_SPARE_1 0x0b
97 #define TWIN_DMA_CFG 0x08
98 #define TWIN_SERIAL_CFG 0x09
99 #define TWIN_DMA_CLR_FF 0x0a
100 #define TWIN_SPARE_2 0x0b
101
102
103 /* PackeTwin I/O register values */
104
105 /* INT_REG */
106 #define TWIN_SCC_MSK 0x01
107 #define TWIN_TMR1_MSK 0x02
108 #define TWIN_TMR2_MSK 0x04
109 #define TWIN_INT_MSK 0x07
110
111 /* SERIAL_CFG */
112 #define TWIN_DTRA_ON 0x01
113 #define TWIN_DTRB_ON 0x02
114 #define TWIN_EXTCLKA 0x04
115 #define TWIN_EXTCLKB 0x08
116 #define TWIN_LOOPA_ON 0x10
117 #define TWIN_LOOPB_ON 0x20
118 #define TWIN_EI 0x80
119
120 /* DMA_CFG */
121 #define TWIN_DMA_HDX_T1 0x08
122 #define TWIN_DMA_HDX_R1 0x0a
123 #define TWIN_DMA_HDX_T3 0x14
124 #define TWIN_DMA_HDX_R3 0x16
125 #define TWIN_DMA_FDX_T3R1 0x1b
126 #define TWIN_DMA_FDX_T1R3 0x1d
127
128
129 /* Status values */
130
131 #define IDLE 0
132 #define TX_HEAD 1
133 #define TX_DATA 2
134 #define TX_PAUSE 3
135 #define TX_TAIL 4
136 #define RTS_OFF 5
137 #define WAIT 6
138 #define DCD_ON 7
139 #define RX_ON 8
140 #define DCD_OFF 9
141
142
143 /* Ioctls */
144
145 #define SIOCGSCCPARAM SIOCDEVPRIVATE
146 #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
147
148
149 /* Data types */
150
151 struct scc_param {
152 int pclk_hz; /* frequency of BRG input (don't change) */
153 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
154 int nrzi; /* 0 (nrz), 1 (nrzi) */
155 int clocks; /* see dmascc_cfg documentation */
156 int txdelay; /* [1/TMR_0_HZ] */
157 int txtimeout; /* [1/HZ] */
158 int txtail; /* [1/TMR_0_HZ] */
159 int waittime; /* [1/TMR_0_HZ] */
160 int slottime; /* [1/TMR_0_HZ] */
161 int persist; /* 1 ... 256 */
162 int dma; /* -1 (disable), 0, 1, 3 */
163 int txpause; /* [1/TMR_0_HZ] */
164 int rtsoff; /* [1/TMR_0_HZ] */
165 int dcdon; /* [1/TMR_0_HZ] */
166 int dcdoff; /* [1/TMR_0_HZ] */
167 };
168
169 struct scc_hardware {
170 char *name;
171 int io_region;
172 int io_delta;
173 int io_size;
174 int num_devs;
175 int scc_offset;
176 int tmr_offset;
177 int tmr_hz;
178 int pclk_hz;
179 };
180
181 struct scc_priv {
182 int type;
183 int chip;
184 struct net_device *dev;
185 struct scc_info *info;
186
187 int channel;
188 int card_base, scc_cmd, scc_data;
189 int tmr_cnt, tmr_ctrl, tmr_mode;
190 struct scc_param param;
191 char rx_buf[NUM_RX_BUF][BUF_SIZE];
192 int rx_len[NUM_RX_BUF];
193 int rx_ptr;
194 struct work_struct rx_work;
195 int rx_head, rx_tail, rx_count;
196 int rx_over;
197 char tx_buf[NUM_TX_BUF][BUF_SIZE];
198 int tx_len[NUM_TX_BUF];
199 int tx_ptr;
200 int tx_head, tx_tail, tx_count;
201 int state;
202 unsigned long tx_start;
203 int rr0;
204 spinlock_t *register_lock; /* Per scc_info */
205 spinlock_t ring_lock;
206 };
207
208 struct scc_info {
209 int irq_used;
210 int twin_serial_cfg;
211 struct net_device *dev[2];
212 struct scc_priv priv[2];
213 struct scc_info *next;
214 spinlock_t register_lock; /* Per device register lock */
215 };
216
217
218 /* Function declarations */
219 static int setup_adapter(int card_base, int type, int n) __init;
220
221 static void write_scc(struct scc_priv *priv, int reg, int val);
222 static void write_scc_data(struct scc_priv *priv, int val, int fast);
223 static int read_scc(struct scc_priv *priv, int reg);
224 static int read_scc_data(struct scc_priv *priv);
225
226 static int scc_open(struct net_device *dev);
227 static int scc_close(struct net_device *dev);
228 static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
229 void __user *data, int cmd);
230 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
231 static int scc_set_mac_address(struct net_device *dev, void *sa);
232
233 static inline void tx_on(struct scc_priv *priv);
234 static inline void rx_on(struct scc_priv *priv);
235 static inline void rx_off(struct scc_priv *priv);
236 static void start_timer(struct scc_priv *priv, int t, int r15);
237 static inline unsigned char random(void);
238
239 static inline void z8530_isr(struct scc_info *info);
240 static irqreturn_t scc_isr(int irq, void *dev_id);
241 static void rx_isr(struct scc_priv *priv);
242 static void special_condition(struct scc_priv *priv, int rc);
243 static void rx_bh(struct work_struct *);
244 static void tx_isr(struct scc_priv *priv);
245 static void es_isr(struct scc_priv *priv);
246 static void tm_isr(struct scc_priv *priv);
247
248
249 /* Initialization variables */
250
251 static int io[MAX_NUM_DEVS] __initdata = { 0, };
252
253 /* Beware! hw[] is also used in dmascc_exit(). */
254 static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
255
256
257 /* Global variables */
258
259 static struct scc_info *first;
260 static unsigned long rand;
261
262
263 MODULE_AUTHOR("Klaus Kudielka");
264 MODULE_DESCRIPTION("Driver for high-speed SCC boards");
265 module_param_hw_array(io, int, ioport, NULL, 0);
266 MODULE_LICENSE("GPL");
267
dmascc_exit(void)268 static void __exit dmascc_exit(void)
269 {
270 int i;
271 struct scc_info *info;
272
273 while (first) {
274 info = first;
275
276 /* Unregister devices */
277 for (i = 0; i < 2; i++)
278 unregister_netdev(info->dev[i]);
279
280 /* Reset board */
281 if (info->priv[0].type == TYPE_TWIN)
282 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
283 write_scc(&info->priv[0], R9, FHWRES);
284 release_region(info->dev[0]->base_addr,
285 hw[info->priv[0].type].io_size);
286
287 for (i = 0; i < 2; i++)
288 free_netdev(info->dev[i]);
289
290 /* Free memory */
291 first = info->next;
292 kfree(info);
293 }
294 }
295
dmascc_init(void)296 static int __init dmascc_init(void)
297 {
298 int h, i, j, n;
299 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
300 t1[MAX_NUM_DEVS];
301 unsigned t_val;
302 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
303 counting[MAX_NUM_DEVS];
304
305 /* Initialize random number generator */
306 rand = jiffies;
307 /* Cards found = 0 */
308 n = 0;
309 /* Warning message */
310 if (!io[0])
311 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
312
313 /* Run autodetection for each card type */
314 for (h = 0; h < NUM_TYPES; h++) {
315
316 if (io[0]) {
317 /* User-specified I/O address regions */
318 for (i = 0; i < hw[h].num_devs; i++)
319 base[i] = 0;
320 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
321 j = (io[i] -
322 hw[h].io_region) / hw[h].io_delta;
323 if (j >= 0 && j < hw[h].num_devs &&
324 hw[h].io_region +
325 j * hw[h].io_delta == io[i]) {
326 base[j] = io[i];
327 }
328 }
329 } else {
330 /* Default I/O address regions */
331 for (i = 0; i < hw[h].num_devs; i++) {
332 base[i] =
333 hw[h].io_region + i * hw[h].io_delta;
334 }
335 }
336
337 /* Check valid I/O address regions */
338 for (i = 0; i < hw[h].num_devs; i++)
339 if (base[i]) {
340 if (!request_region
341 (base[i], hw[h].io_size, "dmascc"))
342 base[i] = 0;
343 else {
344 tcmd[i] =
345 base[i] + hw[h].tmr_offset +
346 TMR_CTRL;
347 t0[i] =
348 base[i] + hw[h].tmr_offset +
349 TMR_CNT0;
350 t1[i] =
351 base[i] + hw[h].tmr_offset +
352 TMR_CNT1;
353 }
354 }
355
356 /* Start timers */
357 for (i = 0; i < hw[h].num_devs; i++)
358 if (base[i]) {
359 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
360 outb(0x36, tcmd[i]);
361 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
362 t0[i]);
363 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
364 t0[i]);
365 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
366 outb(0x70, tcmd[i]);
367 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
368 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
369 start[i] = jiffies;
370 delay[i] = 0;
371 counting[i] = 1;
372 /* Timer 2: LSB+MSB, Mode 0 */
373 outb(0xb0, tcmd[i]);
374 }
375 time = jiffies;
376 /* Wait until counter registers are loaded */
377 udelay(2000000 / TMR_0_HZ);
378
379 /* Timing loop */
380 while (jiffies - time < 13) {
381 for (i = 0; i < hw[h].num_devs; i++)
382 if (base[i] && counting[i]) {
383 /* Read back Timer 1: latch; read LSB; read MSB */
384 outb(0x40, tcmd[i]);
385 t_val =
386 inb(t1[i]) + (inb(t1[i]) << 8);
387 /* Also check whether counter did wrap */
388 if (t_val == 0 ||
389 t_val > TMR_0_HZ / HZ * 10)
390 counting[i] = 0;
391 delay[i] = jiffies - start[i];
392 }
393 }
394
395 /* Evaluate measurements */
396 for (i = 0; i < hw[h].num_devs; i++)
397 if (base[i]) {
398 if ((delay[i] >= 9 && delay[i] <= 11) &&
399 /* Ok, we have found an adapter */
400 (setup_adapter(base[i], h, n) == 0))
401 n++;
402 else
403 release_region(base[i],
404 hw[h].io_size);
405 }
406
407 } /* NUM_TYPES */
408
409 /* If any adapter was successfully initialized, return ok */
410 if (n)
411 return 0;
412
413 /* If no adapter found, return error */
414 printk(KERN_INFO "dmascc: no adapters found\n");
415 return -EIO;
416 }
417
418 module_init(dmascc_init);
419 module_exit(dmascc_exit);
420
dev_setup(struct net_device * dev)421 static void __init dev_setup(struct net_device *dev)
422 {
423 dev->type = ARPHRD_AX25;
424 dev->hard_header_len = AX25_MAX_HEADER_LEN;
425 dev->mtu = 1500;
426 dev->addr_len = AX25_ADDR_LEN;
427 dev->tx_queue_len = 64;
428 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
429 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
430 }
431
432 static const struct net_device_ops scc_netdev_ops = {
433 .ndo_open = scc_open,
434 .ndo_stop = scc_close,
435 .ndo_start_xmit = scc_send_packet,
436 .ndo_siocdevprivate = scc_siocdevprivate,
437 .ndo_set_mac_address = scc_set_mac_address,
438 };
439
setup_adapter(int card_base,int type,int n)440 static int __init setup_adapter(int card_base, int type, int n)
441 {
442 int i, irq, chip, err;
443 struct scc_info *info;
444 struct net_device *dev;
445 struct scc_priv *priv;
446 unsigned long time;
447 unsigned int irqs;
448 int tmr_base = card_base + hw[type].tmr_offset;
449 int scc_base = card_base + hw[type].scc_offset;
450 char *chipnames[] = CHIPNAMES;
451
452 /* Initialize what is necessary for write_scc and write_scc_data */
453 info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
454 if (!info) {
455 err = -ENOMEM;
456 goto out;
457 }
458
459 info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
460 if (!info->dev[0]) {
461 printk(KERN_ERR "dmascc: "
462 "could not allocate memory for %s at %#3x\n",
463 hw[type].name, card_base);
464 err = -ENOMEM;
465 goto out1;
466 }
467
468 info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
469 if (!info->dev[1]) {
470 printk(KERN_ERR "dmascc: "
471 "could not allocate memory for %s at %#3x\n",
472 hw[type].name, card_base);
473 err = -ENOMEM;
474 goto out2;
475 }
476 spin_lock_init(&info->register_lock);
477
478 priv = &info->priv[0];
479 priv->type = type;
480 priv->card_base = card_base;
481 priv->scc_cmd = scc_base + SCCA_CMD;
482 priv->scc_data = scc_base + SCCA_DATA;
483 priv->register_lock = &info->register_lock;
484
485 /* Reset SCC */
486 write_scc(priv, R9, FHWRES | MIE | NV);
487
488 /* Determine type of chip by enabling SDLC/HDLC enhancements */
489 write_scc(priv, R15, SHDLCE);
490 if (!read_scc(priv, R15)) {
491 /* WR7' not present. This is an ordinary Z8530 SCC. */
492 chip = Z8530;
493 } else {
494 /* Put one character in TX FIFO */
495 write_scc_data(priv, 0, 0);
496 if (read_scc(priv, R0) & Tx_BUF_EMP) {
497 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
498 chip = Z85230;
499 } else {
500 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
501 chip = Z85C30;
502 }
503 }
504 write_scc(priv, R15, 0);
505
506 /* Start IRQ auto-detection */
507 irqs = probe_irq_on();
508
509 /* Enable interrupts */
510 if (type == TYPE_TWIN) {
511 outb(0, card_base + TWIN_DMA_CFG);
512 inb(card_base + TWIN_CLR_TMR1);
513 inb(card_base + TWIN_CLR_TMR2);
514 info->twin_serial_cfg = TWIN_EI;
515 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
516 } else {
517 write_scc(priv, R15, CTSIE);
518 write_scc(priv, R0, RES_EXT_INT);
519 write_scc(priv, R1, EXT_INT_ENAB);
520 }
521
522 /* Start timer */
523 outb(1, tmr_base + TMR_CNT1);
524 outb(0, tmr_base + TMR_CNT1);
525
526 /* Wait and detect IRQ */
527 time = jiffies;
528 while (jiffies - time < 2 + HZ / TMR_0_HZ);
529 irq = probe_irq_off(irqs);
530
531 /* Clear pending interrupt, disable interrupts */
532 if (type == TYPE_TWIN) {
533 inb(card_base + TWIN_CLR_TMR1);
534 } else {
535 write_scc(priv, R1, 0);
536 write_scc(priv, R15, 0);
537 write_scc(priv, R0, RES_EXT_INT);
538 }
539
540 if (irq <= 0) {
541 printk(KERN_ERR
542 "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
543 hw[type].name, card_base, irq);
544 err = -ENODEV;
545 goto out3;
546 }
547
548 /* Set up data structures */
549 for (i = 0; i < 2; i++) {
550 dev = info->dev[i];
551 priv = &info->priv[i];
552 priv->type = type;
553 priv->chip = chip;
554 priv->dev = dev;
555 priv->info = info;
556 priv->channel = i;
557 spin_lock_init(&priv->ring_lock);
558 priv->register_lock = &info->register_lock;
559 priv->card_base = card_base;
560 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
561 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
562 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
563 priv->tmr_ctrl = tmr_base + TMR_CTRL;
564 priv->tmr_mode = i ? 0xb0 : 0x70;
565 priv->param.pclk_hz = hw[type].pclk_hz;
566 priv->param.brg_tc = -1;
567 priv->param.clocks = TCTRxCP | RCRTxCP;
568 priv->param.persist = 256;
569 priv->param.dma = -1;
570 INIT_WORK(&priv->rx_work, rx_bh);
571 dev->ml_priv = priv;
572 snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i);
573 dev->base_addr = card_base;
574 dev->irq = irq;
575 dev->netdev_ops = &scc_netdev_ops;
576 dev->header_ops = &ax25_header_ops;
577 }
578 if (register_netdev(info->dev[0])) {
579 printk(KERN_ERR "dmascc: could not register %s\n",
580 info->dev[0]->name);
581 err = -ENODEV;
582 goto out3;
583 }
584 if (register_netdev(info->dev[1])) {
585 printk(KERN_ERR "dmascc: could not register %s\n",
586 info->dev[1]->name);
587 err = -ENODEV;
588 goto out4;
589 }
590
591
592 info->next = first;
593 first = info;
594 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
595 hw[type].name, chipnames[chip], card_base, irq);
596 return 0;
597
598 out4:
599 unregister_netdev(info->dev[0]);
600 out3:
601 if (info->priv[0].type == TYPE_TWIN)
602 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
603 write_scc(&info->priv[0], R9, FHWRES);
604 free_netdev(info->dev[1]);
605 out2:
606 free_netdev(info->dev[0]);
607 out1:
608 kfree(info);
609 out:
610 return err;
611 }
612
613
614 /* Driver functions */
615
write_scc(struct scc_priv * priv,int reg,int val)616 static void write_scc(struct scc_priv *priv, int reg, int val)
617 {
618 unsigned long flags;
619 switch (priv->type) {
620 case TYPE_S5:
621 if (reg)
622 outb(reg, priv->scc_cmd);
623 outb(val, priv->scc_cmd);
624 return;
625 case TYPE_TWIN:
626 if (reg)
627 outb_p(reg, priv->scc_cmd);
628 outb_p(val, priv->scc_cmd);
629 return;
630 default:
631 spin_lock_irqsave(priv->register_lock, flags);
632 outb_p(0, priv->card_base + PI_DREQ_MASK);
633 if (reg)
634 outb_p(reg, priv->scc_cmd);
635 outb_p(val, priv->scc_cmd);
636 outb(1, priv->card_base + PI_DREQ_MASK);
637 spin_unlock_irqrestore(priv->register_lock, flags);
638 return;
639 }
640 }
641
642
write_scc_data(struct scc_priv * priv,int val,int fast)643 static void write_scc_data(struct scc_priv *priv, int val, int fast)
644 {
645 unsigned long flags;
646 switch (priv->type) {
647 case TYPE_S5:
648 outb(val, priv->scc_data);
649 return;
650 case TYPE_TWIN:
651 outb_p(val, priv->scc_data);
652 return;
653 default:
654 if (fast)
655 outb_p(val, priv->scc_data);
656 else {
657 spin_lock_irqsave(priv->register_lock, flags);
658 outb_p(0, priv->card_base + PI_DREQ_MASK);
659 outb_p(val, priv->scc_data);
660 outb(1, priv->card_base + PI_DREQ_MASK);
661 spin_unlock_irqrestore(priv->register_lock, flags);
662 }
663 return;
664 }
665 }
666
667
read_scc(struct scc_priv * priv,int reg)668 static int read_scc(struct scc_priv *priv, int reg)
669 {
670 int rc;
671 unsigned long flags;
672 switch (priv->type) {
673 case TYPE_S5:
674 if (reg)
675 outb(reg, priv->scc_cmd);
676 return inb(priv->scc_cmd);
677 case TYPE_TWIN:
678 if (reg)
679 outb_p(reg, priv->scc_cmd);
680 return inb_p(priv->scc_cmd);
681 default:
682 spin_lock_irqsave(priv->register_lock, flags);
683 outb_p(0, priv->card_base + PI_DREQ_MASK);
684 if (reg)
685 outb_p(reg, priv->scc_cmd);
686 rc = inb_p(priv->scc_cmd);
687 outb(1, priv->card_base + PI_DREQ_MASK);
688 spin_unlock_irqrestore(priv->register_lock, flags);
689 return rc;
690 }
691 }
692
693
read_scc_data(struct scc_priv * priv)694 static int read_scc_data(struct scc_priv *priv)
695 {
696 int rc;
697 unsigned long flags;
698 switch (priv->type) {
699 case TYPE_S5:
700 return inb(priv->scc_data);
701 case TYPE_TWIN:
702 return inb_p(priv->scc_data);
703 default:
704 spin_lock_irqsave(priv->register_lock, flags);
705 outb_p(0, priv->card_base + PI_DREQ_MASK);
706 rc = inb_p(priv->scc_data);
707 outb(1, priv->card_base + PI_DREQ_MASK);
708 spin_unlock_irqrestore(priv->register_lock, flags);
709 return rc;
710 }
711 }
712
713
scc_open(struct net_device * dev)714 static int scc_open(struct net_device *dev)
715 {
716 struct scc_priv *priv = dev->ml_priv;
717 struct scc_info *info = priv->info;
718 int card_base = priv->card_base;
719
720 /* Request IRQ if not already used by other channel */
721 if (!info->irq_used) {
722 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
723 return -EAGAIN;
724 }
725 }
726 info->irq_used++;
727
728 /* Request DMA if required */
729 if (priv->param.dma >= 0) {
730 if (request_dma(priv->param.dma, "dmascc")) {
731 if (--info->irq_used == 0)
732 free_irq(dev->irq, info);
733 return -EAGAIN;
734 } else {
735 unsigned long flags = claim_dma_lock();
736 clear_dma_ff(priv->param.dma);
737 release_dma_lock(flags);
738 }
739 }
740
741 /* Initialize local variables */
742 priv->rx_ptr = 0;
743 priv->rx_over = 0;
744 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
745 priv->state = IDLE;
746 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
747 priv->tx_ptr = 0;
748
749 /* Reset channel */
750 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
751 /* X1 clock, SDLC mode */
752 write_scc(priv, R4, SDLC | X1CLK);
753 /* DMA */
754 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
755 /* 8 bit RX char, RX disable */
756 write_scc(priv, R3, Rx8);
757 /* 8 bit TX char, TX disable */
758 write_scc(priv, R5, Tx8);
759 /* SDLC address field */
760 write_scc(priv, R6, 0);
761 /* SDLC flag */
762 write_scc(priv, R7, FLAG);
763 switch (priv->chip) {
764 case Z85C30:
765 /* Select WR7' */
766 write_scc(priv, R15, SHDLCE);
767 /* Auto EOM reset */
768 write_scc(priv, R7, AUTOEOM);
769 write_scc(priv, R15, 0);
770 break;
771 case Z85230:
772 /* Select WR7' */
773 write_scc(priv, R15, SHDLCE);
774 /* The following bits are set (see 2.5.2.1):
775 - Automatic EOM reset
776 - Interrupt request if RX FIFO is half full
777 This bit should be ignored in DMA mode (according to the
778 documentation), but actually isn't. The receiver doesn't work if
779 it is set. Thus, we have to clear it in DMA mode.
780 - Interrupt/DMA request if TX FIFO is completely empty
781 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
782 compatibility).
783 b) If cleared, DMA requests may follow each other very quickly,
784 filling up the TX FIFO.
785 Advantage: TX works even in case of high bus latency.
786 Disadvantage: Edge-triggered DMA request circuitry may miss
787 a request. No more data is delivered, resulting
788 in a TX FIFO underrun.
789 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
790 The PackeTwin doesn't. I don't know about the PI, but let's
791 assume it behaves like the PI2.
792 */
793 if (priv->param.dma >= 0) {
794 if (priv->type == TYPE_TWIN)
795 write_scc(priv, R7, AUTOEOM | TXFIFOE);
796 else
797 write_scc(priv, R7, AUTOEOM);
798 } else {
799 write_scc(priv, R7, AUTOEOM | RXFIFOH);
800 }
801 write_scc(priv, R15, 0);
802 break;
803 }
804 /* Preset CRC, NRZ(I) encoding */
805 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
806
807 /* Configure baud rate generator */
808 if (priv->param.brg_tc >= 0) {
809 /* Program BR generator */
810 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
811 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
812 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
813 PackeTwin, not connected on the PI2); set DPLL source to BRG */
814 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
815 /* Enable DPLL */
816 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
817 } else {
818 /* Disable BR generator */
819 write_scc(priv, R14, DTRREQ | BRSRC);
820 }
821
822 /* Configure clocks */
823 if (priv->type == TYPE_TWIN) {
824 /* Disable external TX clock receiver */
825 outb((info->twin_serial_cfg &=
826 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
827 card_base + TWIN_SERIAL_CFG);
828 }
829 write_scc(priv, R11, priv->param.clocks);
830 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
831 /* Enable external TX clock receiver */
832 outb((info->twin_serial_cfg |=
833 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
834 card_base + TWIN_SERIAL_CFG);
835 }
836
837 /* Configure PackeTwin */
838 if (priv->type == TYPE_TWIN) {
839 /* Assert DTR, enable interrupts */
840 outb((info->twin_serial_cfg |= TWIN_EI |
841 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
842 card_base + TWIN_SERIAL_CFG);
843 }
844
845 /* Read current status */
846 priv->rr0 = read_scc(priv, R0);
847 /* Enable DCD interrupt */
848 write_scc(priv, R15, DCDIE);
849
850 netif_start_queue(dev);
851
852 return 0;
853 }
854
855
scc_close(struct net_device * dev)856 static int scc_close(struct net_device *dev)
857 {
858 struct scc_priv *priv = dev->ml_priv;
859 struct scc_info *info = priv->info;
860 int card_base = priv->card_base;
861
862 netif_stop_queue(dev);
863
864 if (priv->type == TYPE_TWIN) {
865 /* Drop DTR */
866 outb((info->twin_serial_cfg &=
867 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
868 card_base + TWIN_SERIAL_CFG);
869 }
870
871 /* Reset channel, free DMA and IRQ */
872 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
873 if (priv->param.dma >= 0) {
874 if (priv->type == TYPE_TWIN)
875 outb(0, card_base + TWIN_DMA_CFG);
876 free_dma(priv->param.dma);
877 }
878 if (--info->irq_used == 0)
879 free_irq(dev->irq, info);
880
881 return 0;
882 }
883
884
scc_siocdevprivate(struct net_device * dev,struct ifreq * ifr,void __user * data,int cmd)885 static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
886 {
887 struct scc_priv *priv = dev->ml_priv;
888
889 switch (cmd) {
890 case SIOCGSCCPARAM:
891 if (copy_to_user(data, &priv->param, sizeof(struct scc_param)))
892 return -EFAULT;
893 return 0;
894 case SIOCSSCCPARAM:
895 if (!capable(CAP_NET_ADMIN))
896 return -EPERM;
897 if (netif_running(dev))
898 return -EAGAIN;
899 if (copy_from_user(&priv->param, data,
900 sizeof(struct scc_param)))
901 return -EFAULT;
902 return 0;
903 default:
904 return -EOPNOTSUPP;
905 }
906 }
907
908
scc_send_packet(struct sk_buff * skb,struct net_device * dev)909 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
910 {
911 struct scc_priv *priv = dev->ml_priv;
912 unsigned long flags;
913 int i;
914
915 if (skb->protocol == htons(ETH_P_IP))
916 return ax25_ip_xmit(skb);
917
918 /* Temporarily stop the scheduler feeding us packets */
919 netif_stop_queue(dev);
920
921 /* Transfer data to DMA buffer */
922 i = priv->tx_head;
923 skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
924 priv->tx_len[i] = skb->len - 1;
925
926 /* Clear interrupts while we touch our circular buffers */
927
928 spin_lock_irqsave(&priv->ring_lock, flags);
929 /* Move the ring buffer's head */
930 priv->tx_head = (i + 1) % NUM_TX_BUF;
931 priv->tx_count++;
932
933 /* If we just filled up the last buffer, leave queue stopped.
934 The higher layers must wait until we have a DMA buffer
935 to accept the data. */
936 if (priv->tx_count < NUM_TX_BUF)
937 netif_wake_queue(dev);
938
939 /* Set new TX state */
940 if (priv->state == IDLE) {
941 /* Assert RTS, start timer */
942 priv->state = TX_HEAD;
943 priv->tx_start = jiffies;
944 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
945 write_scc(priv, R15, 0);
946 start_timer(priv, priv->param.txdelay, 0);
947 }
948
949 /* Turn interrupts back on and free buffer */
950 spin_unlock_irqrestore(&priv->ring_lock, flags);
951 dev_kfree_skb(skb);
952
953 return NETDEV_TX_OK;
954 }
955
956
scc_set_mac_address(struct net_device * dev,void * sa)957 static int scc_set_mac_address(struct net_device *dev, void *sa)
958 {
959 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
960 dev->addr_len);
961 return 0;
962 }
963
964
tx_on(struct scc_priv * priv)965 static inline void tx_on(struct scc_priv *priv)
966 {
967 int i, n;
968 unsigned long flags;
969
970 if (priv->param.dma >= 0) {
971 n = (priv->chip == Z85230) ? 3 : 1;
972 /* Program DMA controller */
973 flags = claim_dma_lock();
974 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
975 set_dma_addr(priv->param.dma,
976 virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
977 set_dma_count(priv->param.dma,
978 priv->tx_len[priv->tx_tail] - n);
979 release_dma_lock(flags);
980 /* Enable TX underrun interrupt */
981 write_scc(priv, R15, TxUIE);
982 /* Configure DREQ */
983 if (priv->type == TYPE_TWIN)
984 outb((priv->param.dma ==
985 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
986 priv->card_base + TWIN_DMA_CFG);
987 else
988 write_scc(priv, R1,
989 EXT_INT_ENAB | WT_FN_RDYFN |
990 WT_RDY_ENAB);
991 /* Write first byte(s) */
992 spin_lock_irqsave(priv->register_lock, flags);
993 for (i = 0; i < n; i++)
994 write_scc_data(priv,
995 priv->tx_buf[priv->tx_tail][i], 1);
996 enable_dma(priv->param.dma);
997 spin_unlock_irqrestore(priv->register_lock, flags);
998 } else {
999 write_scc(priv, R15, TxUIE);
1000 write_scc(priv, R1,
1001 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1002 tx_isr(priv);
1003 }
1004 /* Reset EOM latch if we do not have the AUTOEOM feature */
1005 if (priv->chip == Z8530)
1006 write_scc(priv, R0, RES_EOM_L);
1007 }
1008
1009
rx_on(struct scc_priv * priv)1010 static inline void rx_on(struct scc_priv *priv)
1011 {
1012 unsigned long flags;
1013
1014 /* Clear RX FIFO */
1015 while (read_scc(priv, R0) & Rx_CH_AV)
1016 read_scc_data(priv);
1017 priv->rx_over = 0;
1018 if (priv->param.dma >= 0) {
1019 /* Program DMA controller */
1020 flags = claim_dma_lock();
1021 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1022 set_dma_addr(priv->param.dma,
1023 virt_to_bus(priv->rx_buf[priv->rx_head]));
1024 set_dma_count(priv->param.dma, BUF_SIZE);
1025 release_dma_lock(flags);
1026 enable_dma(priv->param.dma);
1027 /* Configure PackeTwin DMA */
1028 if (priv->type == TYPE_TWIN) {
1029 outb((priv->param.dma ==
1030 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1031 priv->card_base + TWIN_DMA_CFG);
1032 }
1033 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1034 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1035 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1036 } else {
1037 /* Reset current frame */
1038 priv->rx_ptr = 0;
1039 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1040 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1041 WT_FN_RDYFN);
1042 }
1043 write_scc(priv, R0, ERR_RES);
1044 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1045 }
1046
1047
rx_off(struct scc_priv * priv)1048 static inline void rx_off(struct scc_priv *priv)
1049 {
1050 /* Disable receiver */
1051 write_scc(priv, R3, Rx8);
1052 /* Disable DREQ / RX interrupt */
1053 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1054 outb(0, priv->card_base + TWIN_DMA_CFG);
1055 else
1056 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1057 /* Disable DMA */
1058 if (priv->param.dma >= 0)
1059 disable_dma(priv->param.dma);
1060 }
1061
1062
start_timer(struct scc_priv * priv,int t,int r15)1063 static void start_timer(struct scc_priv *priv, int t, int r15)
1064 {
1065 outb(priv->tmr_mode, priv->tmr_ctrl);
1066 if (t == 0) {
1067 tm_isr(priv);
1068 } else if (t > 0) {
1069 outb(t & 0xFF, priv->tmr_cnt);
1070 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1071 if (priv->type != TYPE_TWIN) {
1072 write_scc(priv, R15, r15 | CTSIE);
1073 priv->rr0 |= CTS;
1074 }
1075 }
1076 }
1077
1078
random(void)1079 static inline unsigned char random(void)
1080 {
1081 /* See "Numerical Recipes in C", second edition, p. 284 */
1082 rand = rand * 1664525L + 1013904223L;
1083 return (unsigned char) (rand >> 24);
1084 }
1085
z8530_isr(struct scc_info * info)1086 static inline void z8530_isr(struct scc_info *info)
1087 {
1088 int is, i = 100;
1089
1090 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1091 if (is & CHARxIP) {
1092 rx_isr(&info->priv[0]);
1093 } else if (is & CHATxIP) {
1094 tx_isr(&info->priv[0]);
1095 } else if (is & CHAEXT) {
1096 es_isr(&info->priv[0]);
1097 } else if (is & CHBRxIP) {
1098 rx_isr(&info->priv[1]);
1099 } else if (is & CHBTxIP) {
1100 tx_isr(&info->priv[1]);
1101 } else {
1102 es_isr(&info->priv[1]);
1103 }
1104 write_scc(&info->priv[0], R0, RES_H_IUS);
1105 i++;
1106 }
1107 if (i < 0) {
1108 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1109 is);
1110 }
1111 /* Ok, no interrupts pending from this 8530. The INT line should
1112 be inactive now. */
1113 }
1114
1115
scc_isr(int irq,void * dev_id)1116 static irqreturn_t scc_isr(int irq, void *dev_id)
1117 {
1118 struct scc_info *info = dev_id;
1119
1120 spin_lock(info->priv[0].register_lock);
1121 /* At this point interrupts are enabled, and the interrupt under service
1122 is already acknowledged, but masked off.
1123
1124 Interrupt processing: We loop until we know that the IRQ line is
1125 low. If another positive edge occurs afterwards during the ISR,
1126 another interrupt will be triggered by the interrupt controller
1127 as soon as the IRQ level is enabled again (see asm/irq.h).
1128
1129 Bottom-half handlers will be processed after scc_isr(). This is
1130 important, since we only have small ringbuffers and want new data
1131 to be fetched/delivered immediately. */
1132
1133 if (info->priv[0].type == TYPE_TWIN) {
1134 int is, card_base = info->priv[0].card_base;
1135 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1136 TWIN_INT_MSK) {
1137 if (is & TWIN_SCC_MSK) {
1138 z8530_isr(info);
1139 } else if (is & TWIN_TMR1_MSK) {
1140 inb(card_base + TWIN_CLR_TMR1);
1141 tm_isr(&info->priv[0]);
1142 } else {
1143 inb(card_base + TWIN_CLR_TMR2);
1144 tm_isr(&info->priv[1]);
1145 }
1146 }
1147 } else
1148 z8530_isr(info);
1149 spin_unlock(info->priv[0].register_lock);
1150 return IRQ_HANDLED;
1151 }
1152
1153
rx_isr(struct scc_priv * priv)1154 static void rx_isr(struct scc_priv *priv)
1155 {
1156 if (priv->param.dma >= 0) {
1157 /* Check special condition and perform error reset. See 2.4.7.5. */
1158 special_condition(priv, read_scc(priv, R1));
1159 write_scc(priv, R0, ERR_RES);
1160 } else {
1161 /* Check special condition for each character. Error reset not necessary.
1162 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1163 int rc;
1164 while (read_scc(priv, R0) & Rx_CH_AV) {
1165 rc = read_scc(priv, R1);
1166 if (priv->rx_ptr < BUF_SIZE)
1167 priv->rx_buf[priv->rx_head][priv->
1168 rx_ptr++] =
1169 read_scc_data(priv);
1170 else {
1171 priv->rx_over = 2;
1172 read_scc_data(priv);
1173 }
1174 special_condition(priv, rc);
1175 }
1176 }
1177 }
1178
1179
special_condition(struct scc_priv * priv,int rc)1180 static void special_condition(struct scc_priv *priv, int rc)
1181 {
1182 int cb;
1183 unsigned long flags;
1184
1185 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1186
1187 if (rc & Rx_OVR) {
1188 /* Receiver overrun */
1189 priv->rx_over = 1;
1190 if (priv->param.dma < 0)
1191 write_scc(priv, R0, ERR_RES);
1192 } else if (rc & END_FR) {
1193 /* End of frame. Get byte count */
1194 if (priv->param.dma >= 0) {
1195 flags = claim_dma_lock();
1196 cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1197 2;
1198 release_dma_lock(flags);
1199 } else {
1200 cb = priv->rx_ptr - 2;
1201 }
1202 if (priv->rx_over) {
1203 /* We had an overrun */
1204 priv->dev->stats.rx_errors++;
1205 if (priv->rx_over == 2)
1206 priv->dev->stats.rx_length_errors++;
1207 else
1208 priv->dev->stats.rx_fifo_errors++;
1209 priv->rx_over = 0;
1210 } else if (rc & CRC_ERR) {
1211 /* Count invalid CRC only if packet length >= minimum */
1212 if (cb >= 15) {
1213 priv->dev->stats.rx_errors++;
1214 priv->dev->stats.rx_crc_errors++;
1215 }
1216 } else {
1217 if (cb >= 15) {
1218 if (priv->rx_count < NUM_RX_BUF - 1) {
1219 /* Put good frame in FIFO */
1220 priv->rx_len[priv->rx_head] = cb;
1221 priv->rx_head =
1222 (priv->rx_head +
1223 1) % NUM_RX_BUF;
1224 priv->rx_count++;
1225 schedule_work(&priv->rx_work);
1226 } else {
1227 priv->dev->stats.rx_errors++;
1228 priv->dev->stats.rx_over_errors++;
1229 }
1230 }
1231 }
1232 /* Get ready for new frame */
1233 if (priv->param.dma >= 0) {
1234 flags = claim_dma_lock();
1235 set_dma_addr(priv->param.dma,
1236 virt_to_bus(priv->rx_buf[priv->rx_head]));
1237 set_dma_count(priv->param.dma, BUF_SIZE);
1238 release_dma_lock(flags);
1239 } else {
1240 priv->rx_ptr = 0;
1241 }
1242 }
1243 }
1244
1245
rx_bh(struct work_struct * ugli_api)1246 static void rx_bh(struct work_struct *ugli_api)
1247 {
1248 struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
1249 int i = priv->rx_tail;
1250 int cb;
1251 unsigned long flags;
1252 struct sk_buff *skb;
1253 unsigned char *data;
1254
1255 spin_lock_irqsave(&priv->ring_lock, flags);
1256 while (priv->rx_count) {
1257 spin_unlock_irqrestore(&priv->ring_lock, flags);
1258 cb = priv->rx_len[i];
1259 /* Allocate buffer */
1260 skb = dev_alloc_skb(cb + 1);
1261 if (skb == NULL) {
1262 /* Drop packet */
1263 priv->dev->stats.rx_dropped++;
1264 } else {
1265 /* Fill buffer */
1266 data = skb_put(skb, cb + 1);
1267 data[0] = 0;
1268 memcpy(&data[1], priv->rx_buf[i], cb);
1269 skb->protocol = ax25_type_trans(skb, priv->dev);
1270 netif_rx(skb);
1271 priv->dev->stats.rx_packets++;
1272 priv->dev->stats.rx_bytes += cb;
1273 }
1274 spin_lock_irqsave(&priv->ring_lock, flags);
1275 /* Move tail */
1276 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1277 priv->rx_count--;
1278 }
1279 spin_unlock_irqrestore(&priv->ring_lock, flags);
1280 }
1281
1282
tx_isr(struct scc_priv * priv)1283 static void tx_isr(struct scc_priv *priv)
1284 {
1285 int i = priv->tx_tail, p = priv->tx_ptr;
1286
1287 /* Suspend TX interrupts if we don't want to send anything.
1288 See Figure 2-22. */
1289 if (p == priv->tx_len[i]) {
1290 write_scc(priv, R0, RES_Tx_P);
1291 return;
1292 }
1293
1294 /* Write characters */
1295 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1296 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1297 }
1298
1299 /* Reset EOM latch of Z8530 */
1300 if (!priv->tx_ptr && p && priv->chip == Z8530)
1301 write_scc(priv, R0, RES_EOM_L);
1302
1303 priv->tx_ptr = p;
1304 }
1305
1306
es_isr(struct scc_priv * priv)1307 static void es_isr(struct scc_priv *priv)
1308 {
1309 int i, rr0, drr0, res;
1310 unsigned long flags;
1311
1312 /* Read status, reset interrupt bit (open latches) */
1313 rr0 = read_scc(priv, R0);
1314 write_scc(priv, R0, RES_EXT_INT);
1315 drr0 = priv->rr0 ^ rr0;
1316 priv->rr0 = rr0;
1317
1318 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1319 it might have already been cleared again by AUTOEOM. */
1320 if (priv->state == TX_DATA) {
1321 /* Get remaining bytes */
1322 i = priv->tx_tail;
1323 if (priv->param.dma >= 0) {
1324 disable_dma(priv->param.dma);
1325 flags = claim_dma_lock();
1326 res = get_dma_residue(priv->param.dma);
1327 release_dma_lock(flags);
1328 } else {
1329 res = priv->tx_len[i] - priv->tx_ptr;
1330 priv->tx_ptr = 0;
1331 }
1332 /* Disable DREQ / TX interrupt */
1333 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1334 outb(0, priv->card_base + TWIN_DMA_CFG);
1335 else
1336 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1337 if (res) {
1338 /* Update packet statistics */
1339 priv->dev->stats.tx_errors++;
1340 priv->dev->stats.tx_fifo_errors++;
1341 /* Other underrun interrupts may already be waiting */
1342 write_scc(priv, R0, RES_EXT_INT);
1343 write_scc(priv, R0, RES_EXT_INT);
1344 } else {
1345 /* Update packet statistics */
1346 priv->dev->stats.tx_packets++;
1347 priv->dev->stats.tx_bytes += priv->tx_len[i];
1348 /* Remove frame from FIFO */
1349 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1350 priv->tx_count--;
1351 /* Inform upper layers */
1352 netif_wake_queue(priv->dev);
1353 }
1354 /* Switch state */
1355 write_scc(priv, R15, 0);
1356 if (priv->tx_count &&
1357 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1358 priv->state = TX_PAUSE;
1359 start_timer(priv, priv->param.txpause, 0);
1360 } else {
1361 priv->state = TX_TAIL;
1362 start_timer(priv, priv->param.txtail, 0);
1363 }
1364 }
1365
1366 /* DCD transition */
1367 if (drr0 & DCD) {
1368 if (rr0 & DCD) {
1369 switch (priv->state) {
1370 case IDLE:
1371 case WAIT:
1372 priv->state = DCD_ON;
1373 write_scc(priv, R15, 0);
1374 start_timer(priv, priv->param.dcdon, 0);
1375 }
1376 } else {
1377 switch (priv->state) {
1378 case RX_ON:
1379 rx_off(priv);
1380 priv->state = DCD_OFF;
1381 write_scc(priv, R15, 0);
1382 start_timer(priv, priv->param.dcdoff, 0);
1383 }
1384 }
1385 }
1386
1387 /* CTS transition */
1388 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1389 tm_isr(priv);
1390
1391 }
1392
1393
tm_isr(struct scc_priv * priv)1394 static void tm_isr(struct scc_priv *priv)
1395 {
1396 switch (priv->state) {
1397 case TX_HEAD:
1398 case TX_PAUSE:
1399 tx_on(priv);
1400 priv->state = TX_DATA;
1401 break;
1402 case TX_TAIL:
1403 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1404 priv->state = RTS_OFF;
1405 if (priv->type != TYPE_TWIN)
1406 write_scc(priv, R15, 0);
1407 start_timer(priv, priv->param.rtsoff, 0);
1408 break;
1409 case RTS_OFF:
1410 write_scc(priv, R15, DCDIE);
1411 priv->rr0 = read_scc(priv, R0);
1412 if (priv->rr0 & DCD) {
1413 priv->dev->stats.collisions++;
1414 rx_on(priv);
1415 priv->state = RX_ON;
1416 } else {
1417 priv->state = WAIT;
1418 start_timer(priv, priv->param.waittime, DCDIE);
1419 }
1420 break;
1421 case WAIT:
1422 if (priv->tx_count) {
1423 priv->state = TX_HEAD;
1424 priv->tx_start = jiffies;
1425 write_scc(priv, R5,
1426 TxCRC_ENAB | RTS | TxENAB | Tx8);
1427 write_scc(priv, R15, 0);
1428 start_timer(priv, priv->param.txdelay, 0);
1429 } else {
1430 priv->state = IDLE;
1431 if (priv->type != TYPE_TWIN)
1432 write_scc(priv, R15, DCDIE);
1433 }
1434 break;
1435 case DCD_ON:
1436 case DCD_OFF:
1437 write_scc(priv, R15, DCDIE);
1438 priv->rr0 = read_scc(priv, R0);
1439 if (priv->rr0 & DCD) {
1440 rx_on(priv);
1441 priv->state = RX_ON;
1442 } else {
1443 priv->state = WAIT;
1444 start_timer(priv,
1445 random() / priv->param.persist *
1446 priv->param.slottime, DCDIE);
1447 }
1448 break;
1449 }
1450 }
1451