• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* 82596.c: A generic 82596 ethernet driver for linux. */
2 /*
3    Based on Apricot.c
4    Written 1994 by Mark Evans.
5    This driver is for the Apricot 82596 bus-master interface
6 
7    Modularised 12/94 Mark Evans
8 
9 
10    Modified to support the 82596 ethernet chips on 680x0 VME boards.
11    by Richard Hirst <richard@sleepie.demon.co.uk>
12    Renamed to be 82596.c
13 
14    980825:  Changed to receive directly in to sk_buffs which are
15    allocated at open() time.  Eliminates copy on incoming frames
16    (small ones are still copied).  Shared data now held in a
17    non-cached page, so we can run on 68060 in copyback mode.
18 
19    TBD:
20    * look at deferring rx frames rather than discarding (as per tulip)
21    * handle tx ring full as per tulip
22    * performace test to tune rx_copybreak
23 
24    Most of my modifications relate to the braindead big-endian
25    implementation by Intel.  When the i596 is operating in
26    'big-endian' mode, it thinks a 32 bit value of 0x12345678
27    should be stored as 0x56781234.  This is a real pain, when
28    you have linked lists which are shared by the 680x0 and the
29    i596.
30 
31    Driver skeleton
32    Written 1993 by Donald Becker.
33    Copyright 1993 United States Government as represented by the Director,
34    National Security Agency. This software may only be used and distributed
35    according to the terms of the GNU General Public License as modified by SRC,
36    incorporated herein by reference.
37 
38    The author may be reached as becker@scyld.com, or C/O
39    Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
40 
41  */
42 
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/string.h>
46 #include <linux/errno.h>
47 #include <linux/ioport.h>
48 #include <linux/slab.h>
49 #include <linux/interrupt.h>
50 #include <linux/delay.h>
51 #include <linux/netdevice.h>
52 #include <linux/etherdevice.h>
53 #include <linux/skbuff.h>
54 #include <linux/init.h>
55 #include <linux/bitops.h>
56 
57 #include <asm/io.h>
58 #include <asm/dma.h>
59 #include <asm/pgtable.h>
60 #include <asm/cacheflush.h>
61 
62 static char version[] __initdata =
63 	"82596.c $Revision: 1.5 $\n";
64 
65 #define DRV_NAME	"82596"
66 
67 /* DEBUG flags
68  */
69 
70 #define DEB_INIT	0x0001
71 #define DEB_PROBE	0x0002
72 #define DEB_SERIOUS	0x0004
73 #define DEB_ERRORS	0x0008
74 #define DEB_MULTI	0x0010
75 #define DEB_TDR		0x0020
76 #define DEB_OPEN	0x0040
77 #define DEB_RESET	0x0080
78 #define DEB_ADDCMD	0x0100
79 #define DEB_STATUS	0x0200
80 #define DEB_STARTTX	0x0400
81 #define DEB_RXADDR	0x0800
82 #define DEB_TXADDR	0x1000
83 #define DEB_RXFRAME	0x2000
84 #define DEB_INTS	0x4000
85 #define DEB_STRUCT	0x8000
86 #define DEB_ANY		0xffff
87 
88 
89 #define DEB(x,y)	if (i596_debug & (x)) y
90 
91 
92 #if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
93 #define ENABLE_MVME16x_NET
94 #endif
95 #if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
96 #define ENABLE_BVME6000_NET
97 #endif
98 #if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE)
99 #define ENABLE_APRICOT
100 #endif
101 
102 #ifdef ENABLE_MVME16x_NET
103 #include <asm/mvme16xhw.h>
104 #endif
105 #ifdef ENABLE_BVME6000_NET
106 #include <asm/bvme6000hw.h>
107 #endif
108 
109 /*
110  * Define various macros for Channel Attention, word swapping etc., dependent
111  * on architecture.  MVME and BVME are 680x0 based, otherwise it is Intel.
112  */
113 
114 #ifdef __mc68000__
115 #define WSWAPrfd(x)  ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
116 #define WSWAPrbd(x)  ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
117 #define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16)))
118 #define WSWAPscb(x)  ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
119 #define WSWAPcmd(x)  ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
120 #define WSWAPtbd(x)  ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
121 #define WSWAPchar(x) ((char *)            (((u32)(x)<<16) | ((((u32)(x)))>>16)))
122 #define ISCP_BUSY	0x00010000
123 #define MACH_IS_APRICOT	0
124 #else
125 #define WSWAPrfd(x)     ((struct i596_rfd *)(x))
126 #define WSWAPrbd(x)     ((struct i596_rbd *)(x))
127 #define WSWAPiscp(x)    ((struct i596_iscp *)(x))
128 #define WSWAPscb(x)     ((struct i596_scb *)(x))
129 #define WSWAPcmd(x)     ((struct i596_cmd *)(x))
130 #define WSWAPtbd(x)     ((struct i596_tbd *)(x))
131 #define WSWAPchar(x)    ((char *)(x))
132 #define ISCP_BUSY	0x0001
133 #define MACH_IS_APRICOT	1
134 #endif
135 
136 /*
137  * The MPU_PORT command allows direct access to the 82596. With PORT access
138  * the following commands are available (p5-18). The 32-bit port command
139  * must be word-swapped with the most significant word written first.
140  * This only applies to VME boards.
141  */
142 #define PORT_RESET		0x00	/* reset 82596 */
143 #define PORT_SELFTEST		0x01	/* selftest */
144 #define PORT_ALTSCP		0x02	/* alternate SCB address */
145 #define PORT_ALTDUMP		0x03	/* Alternate DUMP address */
146 
147 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
148 
149 MODULE_AUTHOR("Richard Hirst");
150 MODULE_DESCRIPTION("i82596 driver");
151 MODULE_LICENSE("GPL");
152 
153 module_param(i596_debug, int, 0);
154 MODULE_PARM_DESC(i596_debug, "i82596 debug mask");
155 
156 
157 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
158  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
159  */
160 static int rx_copybreak = 100;
161 
162 #define PKT_BUF_SZ	1536
163 #define MAX_MC_CNT	64
164 
165 #define I596_TOTAL_SIZE 17
166 
167 #define I596_NULL ((void *)0xffffffff)
168 
169 #define CMD_EOL		0x8000	/* The last command of the list, stop. */
170 #define CMD_SUSP	0x4000	/* Suspend after doing cmd. */
171 #define CMD_INTR	0x2000	/* Interrupt after doing cmd. */
172 
173 #define CMD_FLEX	0x0008	/* Enable flexible memory model */
174 
175 enum commands {
176 	CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
177 	CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
178 };
179 
180 #define STAT_C		0x8000	/* Set to 0 after execution */
181 #define STAT_B		0x4000	/* Command being executed */
182 #define STAT_OK		0x2000	/* Command executed ok */
183 #define STAT_A		0x1000	/* Command aborted */
184 
185 #define	 CUC_START	0x0100
186 #define	 CUC_RESUME	0x0200
187 #define	 CUC_SUSPEND    0x0300
188 #define	 CUC_ABORT	0x0400
189 #define	 RX_START	0x0010
190 #define	 RX_RESUME	0x0020
191 #define	 RX_SUSPEND	0x0030
192 #define	 RX_ABORT	0x0040
193 
194 #define TX_TIMEOUT	5
195 
196 
197 struct i596_reg {
198 	unsigned short porthi;
199 	unsigned short portlo;
200 	unsigned long ca;
201 };
202 
203 #define EOF		0x8000
204 #define SIZE_MASK	0x3fff
205 
206 struct i596_tbd {
207 	unsigned short size;
208 	unsigned short pad;
209 	struct i596_tbd *next;
210 	char *data;
211 };
212 
213 /* The command structure has two 'next' pointers; v_next is the address of
214  * the next command as seen by the CPU, b_next is the address of the next
215  * command as seen by the 82596.  The b_next pointer, as used by the 82596
216  * always references the status field of the next command, rather than the
217  * v_next field, because the 82596 is unaware of v_next.  It may seem more
218  * logical to put v_next at the end of the structure, but we cannot do that
219  * because the 82596 expects other fields to be there, depending on command
220  * type.
221  */
222 
223 struct i596_cmd {
224 	struct i596_cmd *v_next;	/* Address from CPUs viewpoint */
225 	unsigned short status;
226 	unsigned short command;
227 	struct i596_cmd *b_next;	/* Address from i596 viewpoint */
228 };
229 
230 struct tx_cmd {
231 	struct i596_cmd cmd;
232 	struct i596_tbd *tbd;
233 	unsigned short size;
234 	unsigned short pad;
235 	struct sk_buff *skb;	/* So we can free it after tx */
236 };
237 
238 struct tdr_cmd {
239 	struct i596_cmd cmd;
240 	unsigned short status;
241 	unsigned short pad;
242 };
243 
244 struct mc_cmd {
245 	struct i596_cmd cmd;
246 	short mc_cnt;
247 	char mc_addrs[MAX_MC_CNT*6];
248 };
249 
250 struct sa_cmd {
251 	struct i596_cmd cmd;
252 	char eth_addr[8];
253 };
254 
255 struct cf_cmd {
256 	struct i596_cmd cmd;
257 	char i596_config[16];
258 };
259 
260 struct i596_rfd {
261 	unsigned short stat;
262 	unsigned short cmd;
263 	struct i596_rfd *b_next;	/* Address from i596 viewpoint */
264 	struct i596_rbd *rbd;
265 	unsigned short count;
266 	unsigned short size;
267 	struct i596_rfd *v_next;	/* Address from CPUs viewpoint */
268 	struct i596_rfd *v_prev;
269 };
270 
271 struct i596_rbd {
272     unsigned short count;
273     unsigned short zero1;
274     struct i596_rbd *b_next;
275     unsigned char *b_data;		/* Address from i596 viewpoint */
276     unsigned short size;
277     unsigned short zero2;
278     struct sk_buff *skb;
279     struct i596_rbd *v_next;
280     struct i596_rbd *b_addr;		/* This rbd addr from i596 view */
281     unsigned char *v_data;		/* Address from CPUs viewpoint */
282 };
283 
284 #define TX_RING_SIZE 64
285 #define RX_RING_SIZE 16
286 
287 struct i596_scb {
288 	unsigned short status;
289 	unsigned short command;
290 	struct i596_cmd *cmd;
291 	struct i596_rfd *rfd;
292 	unsigned long crc_err;
293 	unsigned long align_err;
294 	unsigned long resource_err;
295 	unsigned long over_err;
296 	unsigned long rcvdt_err;
297 	unsigned long short_err;
298 	unsigned short t_on;
299 	unsigned short t_off;
300 };
301 
302 struct i596_iscp {
303 	unsigned long stat;
304 	struct i596_scb *scb;
305 };
306 
307 struct i596_scp {
308 	unsigned long sysbus;
309 	unsigned long pad;
310 	struct i596_iscp *iscp;
311 };
312 
313 struct i596_private {
314 	volatile struct i596_scp scp;
315 	volatile struct i596_iscp iscp;
316 	volatile struct i596_scb scb;
317 	struct sa_cmd sa_cmd;
318 	struct cf_cmd cf_cmd;
319 	struct tdr_cmd tdr_cmd;
320 	struct mc_cmd mc_cmd;
321 	unsigned long stat;
322 	int last_restart __attribute__((aligned(4)));
323 	struct i596_rfd *rfd_head;
324 	struct i596_rbd *rbd_head;
325 	struct i596_cmd *cmd_tail;
326 	struct i596_cmd *cmd_head;
327 	int cmd_backlog;
328 	unsigned long last_cmd;
329 	struct i596_rfd rfds[RX_RING_SIZE];
330 	struct i596_rbd rbds[RX_RING_SIZE];
331 	struct tx_cmd tx_cmds[TX_RING_SIZE];
332 	struct i596_tbd tbds[TX_RING_SIZE];
333 	int next_tx_cmd;
334 	spinlock_t lock;
335 };
336 
337 static char init_setup[] =
338 {
339 	0x8E,			/* length, prefetch on */
340 	0xC8,			/* fifo to 8, monitor off */
341 #ifdef CONFIG_VME
342 	0xc0,			/* don't save bad frames */
343 #else
344 	0x80,			/* don't save bad frames */
345 #endif
346 	0x2E,			/* No source address insertion, 8 byte preamble */
347 	0x00,			/* priority and backoff defaults */
348 	0x60,			/* interframe spacing */
349 	0x00,			/* slot time LSB */
350 	0xf2,			/* slot time and retries */
351 	0x00,			/* promiscuous mode */
352 	0x00,			/* collision detect */
353 	0x40,			/* minimum frame length */
354 	0xff,
355 	0x00,
356 	0x7f /*  *multi IA */ };
357 
358 static int i596_open(struct net_device *dev);
359 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
360 static irqreturn_t i596_interrupt(int irq, void *dev_id);
361 static int i596_close(struct net_device *dev);
362 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
363 static void i596_tx_timeout (struct net_device *dev);
364 static void print_eth(unsigned char *buf, char *str);
365 static void set_multicast_list(struct net_device *dev);
366 
367 static int rx_ring_size = RX_RING_SIZE;
368 static int ticks_limit = 25;
369 static int max_cmd_backlog = TX_RING_SIZE-1;
370 
371 
CA(struct net_device * dev)372 static inline void CA(struct net_device *dev)
373 {
374 #ifdef ENABLE_MVME16x_NET
375 	if (MACH_IS_MVME16x) {
376 		((struct i596_reg *) dev->base_addr)->ca = 1;
377 	}
378 #endif
379 #ifdef ENABLE_BVME6000_NET
380 	if (MACH_IS_BVME6000) {
381 		volatile u32 i;
382 
383 		i = *(volatile u32 *) (dev->base_addr);
384 	}
385 #endif
386 #ifdef ENABLE_APRICOT
387 	if (MACH_IS_APRICOT) {
388 		outw(0, (short) (dev->base_addr) + 4);
389 	}
390 #endif
391 }
392 
393 
MPU_PORT(struct net_device * dev,int c,volatile void * x)394 static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
395 {
396 #ifdef ENABLE_MVME16x_NET
397 	if (MACH_IS_MVME16x) {
398 		struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
399 		p->porthi = ((c) | (u32) (x)) & 0xffff;
400 		p->portlo = ((c) | (u32) (x)) >> 16;
401 	}
402 #endif
403 #ifdef ENABLE_BVME6000_NET
404 	if (MACH_IS_BVME6000) {
405 		u32 v = (u32) (c) | (u32) (x);
406 		v = ((u32) (v) << 16) | ((u32) (v) >> 16);
407 		*(volatile u32 *) dev->base_addr = v;
408 		udelay(1);
409 		*(volatile u32 *) dev->base_addr = v;
410 	}
411 #endif
412 }
413 
414 
wait_istat(struct net_device * dev,struct i596_private * lp,int delcnt,char * str)415 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
416 {
417 	while (--delcnt && lp->iscp.stat)
418 		udelay(10);
419 	if (!delcnt) {
420 		printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
421 		     dev->name, str, lp->scb.status, lp->scb.command);
422 		return -1;
423 	}
424 	else
425 		return 0;
426 }
427 
428 
wait_cmd(struct net_device * dev,struct i596_private * lp,int delcnt,char * str)429 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
430 {
431 	while (--delcnt && lp->scb.command)
432 		udelay(10);
433 	if (!delcnt) {
434 		printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
435 		     dev->name, str, lp->scb.status, lp->scb.command);
436 		return -1;
437 	}
438 	else
439 		return 0;
440 }
441 
442 
wait_cfg(struct net_device * dev,struct i596_cmd * cmd,int delcnt,char * str)443 static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str)
444 {
445 	volatile struct i596_cmd *c = cmd;
446 
447 	while (--delcnt && c->command)
448 		udelay(10);
449 	if (!delcnt) {
450 		printk(KERN_ERR "%s: %s.\n", dev->name, str);
451 		return -1;
452 	}
453 	else
454 		return 0;
455 }
456 
457 
i596_display_data(struct net_device * dev)458 static void i596_display_data(struct net_device *dev)
459 {
460 	struct i596_private *lp = dev->ml_priv;
461 	struct i596_cmd *cmd;
462 	struct i596_rfd *rfd;
463 	struct i596_rbd *rbd;
464 
465 	printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n",
466 	       &lp->scp, lp->scp.sysbus, lp->scp.iscp);
467 	printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n",
468 	       &lp->iscp, lp->iscp.stat, lp->iscp.scb);
469 	printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x,"
470 		" .cmd = %p, .rfd = %p\n",
471 	       &lp->scb, lp->scb.status, lp->scb.command,
472 		lp->scb.cmd, lp->scb.rfd);
473 	printk(KERN_ERR "   errors: crc %lx, align %lx, resource %lx,"
474                " over %lx, rcvdt %lx, short %lx\n",
475 		lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
476 		lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
477 	cmd = lp->cmd_head;
478 	while (cmd != I596_NULL) {
479 		printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n",
480 		  cmd, cmd->status, cmd->command, cmd->b_next);
481 		cmd = cmd->v_next;
482 	}
483 	rfd = lp->rfd_head;
484 	printk(KERN_ERR "rfd_head = %p\n", rfd);
485 	do {
486 		printk(KERN_ERR "   %p .stat %04x, .cmd %04x, b_next %p, rbd %p,"
487                         " count %04x\n",
488 			rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
489 			rfd->count);
490 		rfd = rfd->v_next;
491 	} while (rfd != lp->rfd_head);
492 	rbd = lp->rbd_head;
493 	printk(KERN_ERR "rbd_head = %p\n", rbd);
494 	do {
495 		printk(KERN_ERR "   %p .count %04x, b_next %p, b_data %p, size %04x\n",
496 			rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
497 		rbd = rbd->v_next;
498 	} while (rbd != lp->rbd_head);
499 }
500 
501 
502 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
i596_error(int irq,void * dev_id)503 static irqreturn_t i596_error(int irq, void *dev_id)
504 {
505 	struct net_device *dev = dev_id;
506 #ifdef ENABLE_MVME16x_NET
507 	if (MACH_IS_MVME16x) {
508 		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
509 
510 		pcc2[0x28] = 1;
511 		pcc2[0x2b] = 0x1d;
512 	}
513 #endif
514 #ifdef ENABLE_BVME6000_NET
515 	if (MACH_IS_BVME6000) {
516 		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
517 
518 		*ethirq = 1;
519 		*ethirq = 3;
520 	}
521 #endif
522 	printk(KERN_ERR "%s: Error interrupt\n", dev->name);
523 	i596_display_data(dev);
524 	return IRQ_HANDLED;
525 }
526 #endif
527 
init_rx_bufs(struct net_device * dev)528 static inline void init_rx_bufs(struct net_device *dev)
529 {
530 	struct i596_private *lp = dev->ml_priv;
531 	int i;
532 	struct i596_rfd *rfd;
533 	struct i596_rbd *rbd;
534 
535 	/* First build the Receive Buffer Descriptor List */
536 
537 	for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
538 		struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
539 
540 		if (skb == NULL)
541 			panic("82596: alloc_skb() failed");
542 		skb->dev = dev;
543 		rbd->v_next = rbd+1;
544 		rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
545 		rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
546 		rbd->skb = skb;
547 		rbd->v_data = skb->data;
548 		rbd->b_data = WSWAPchar(virt_to_bus(skb->data));
549 		rbd->size = PKT_BUF_SZ;
550 #ifdef __mc68000__
551 		cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ);
552 #endif
553 	}
554 	lp->rbd_head = lp->rbds;
555 	rbd = lp->rbds + rx_ring_size - 1;
556 	rbd->v_next = lp->rbds;
557 	rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds));
558 
559 	/* Now build the Receive Frame Descriptor List */
560 
561 	for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
562 		rfd->rbd = I596_NULL;
563 		rfd->v_next = rfd+1;
564 		rfd->v_prev = rfd-1;
565 		rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1));
566 		rfd->cmd = CMD_FLEX;
567 	}
568 	lp->rfd_head = lp->rfds;
569 	lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
570 	rfd = lp->rfds;
571 	rfd->rbd = lp->rbd_head;
572 	rfd->v_prev = lp->rfds + rx_ring_size - 1;
573 	rfd = lp->rfds + rx_ring_size - 1;
574 	rfd->v_next = lp->rfds;
575 	rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds));
576 	rfd->cmd = CMD_EOL|CMD_FLEX;
577 }
578 
remove_rx_bufs(struct net_device * dev)579 static inline void remove_rx_bufs(struct net_device *dev)
580 {
581 	struct i596_private *lp = dev->ml_priv;
582 	struct i596_rbd *rbd;
583 	int i;
584 
585 	for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
586 		if (rbd->skb == NULL)
587 			break;
588 		dev_kfree_skb(rbd->skb);
589 	}
590 }
591 
592 
rebuild_rx_bufs(struct net_device * dev)593 static void rebuild_rx_bufs(struct net_device *dev)
594 {
595 	struct i596_private *lp = dev->ml_priv;
596 	int i;
597 
598 	/* Ensure rx frame/buffer descriptors are tidy */
599 
600 	for (i = 0; i < rx_ring_size; i++) {
601 		lp->rfds[i].rbd = I596_NULL;
602 		lp->rfds[i].cmd = CMD_FLEX;
603 	}
604 	lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
605 	lp->rfd_head = lp->rfds;
606 	lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
607 	lp->rbd_head = lp->rbds;
608 	lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds));
609 }
610 
611 
init_i596_mem(struct net_device * dev)612 static int init_i596_mem(struct net_device *dev)
613 {
614 	struct i596_private *lp = dev->ml_priv;
615 #if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT)
616 	short ioaddr = dev->base_addr;
617 #endif
618 	unsigned long flags;
619 
620 	MPU_PORT(dev, PORT_RESET, NULL);
621 
622 	udelay(100);		/* Wait 100us - seems to help */
623 
624 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
625 #ifdef ENABLE_MVME16x_NET
626 	if (MACH_IS_MVME16x) {
627 		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
628 
629 		/* Disable all ints for now */
630 		pcc2[0x28] = 1;
631 		pcc2[0x2a] = 0x48;
632 		/* Following disables snooping.  Snooping is not required
633 		 * as we make appropriate use of non-cached pages for
634 		 * shared data, and cache_push/cache_clear.
635 		 */
636 		pcc2[0x2b] = 0x08;
637 	}
638 #endif
639 #ifdef ENABLE_BVME6000_NET
640 	if (MACH_IS_BVME6000) {
641 		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
642 
643 		*ethirq = 1;
644 	}
645 #endif
646 
647 	/* change the scp address */
648 
649 	MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp));
650 
651 #elif defined(ENABLE_APRICOT)
652 
653 	{
654 		u32 scp = virt_to_bus(&lp->scp);
655 
656 		/* change the scp address */
657 		outw(0, ioaddr);
658 		outw(0, ioaddr);
659 		outb(4, ioaddr + 0xf);
660 		outw(scp | 2, ioaddr);
661 		outw(scp >> 16, ioaddr);
662 	}
663 #endif
664 
665 	lp->last_cmd = jiffies;
666 
667 #ifdef ENABLE_MVME16x_NET
668 	if (MACH_IS_MVME16x)
669 		lp->scp.sysbus = 0x00000054;
670 #endif
671 #ifdef ENABLE_BVME6000_NET
672 	if (MACH_IS_BVME6000)
673 		lp->scp.sysbus = 0x0000004c;
674 #endif
675 #ifdef ENABLE_APRICOT
676 	if (MACH_IS_APRICOT)
677 		lp->scp.sysbus = 0x00440000;
678 #endif
679 
680 	lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp));
681 	lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb));
682 	lp->iscp.stat = ISCP_BUSY;
683 	lp->cmd_backlog = 0;
684 
685 	lp->cmd_head = lp->scb.cmd = I596_NULL;
686 
687 #ifdef ENABLE_BVME6000_NET
688 	if (MACH_IS_BVME6000) {
689 		lp->scb.t_on  = 7 * 25;
690 		lp->scb.t_off = 1 * 25;
691 	}
692 #endif
693 
694 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
695 
696 #if defined(ENABLE_APRICOT)
697 	(void) inb(ioaddr + 0x10);
698 	outb(4, ioaddr + 0xf);
699 #endif
700 	CA(dev);
701 
702 	if (wait_istat(dev,lp,1000,"initialization timed out"))
703 		goto failed;
704 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name));
705 
706 	/* Ensure rx frame/buffer descriptors are tidy */
707 	rebuild_rx_bufs(dev);
708 	lp->scb.command = 0;
709 
710 #ifdef ENABLE_MVME16x_NET
711 	if (MACH_IS_MVME16x) {
712 		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
713 
714 		/* Enable ints, etc. now */
715 		pcc2[0x2a] = 0x55;	/* Edge sensitive */
716 		pcc2[0x2b] = 0x15;
717 	}
718 #endif
719 #ifdef ENABLE_BVME6000_NET
720 	if (MACH_IS_BVME6000) {
721 		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
722 
723 		*ethirq = 3;
724 	}
725 #endif
726 
727 
728 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name));
729 	memcpy(lp->cf_cmd.i596_config, init_setup, 14);
730 	lp->cf_cmd.cmd.command = CmdConfigure;
731 	i596_add_cmd(dev, &lp->cf_cmd.cmd);
732 
733 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
734 	memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
735 	lp->sa_cmd.cmd.command = CmdSASetup;
736 	i596_add_cmd(dev, &lp->sa_cmd.cmd);
737 
738 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
739 	lp->tdr_cmd.cmd.command = CmdTDR;
740 	i596_add_cmd(dev, &lp->tdr_cmd.cmd);
741 
742 	spin_lock_irqsave (&lp->lock, flags);
743 
744 	if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
745 		spin_unlock_irqrestore (&lp->lock, flags);
746 		goto failed;
747 	}
748 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
749 	lp->scb.command = RX_START;
750 	CA(dev);
751 
752 	spin_unlock_irqrestore (&lp->lock, flags);
753 
754 	if (wait_cmd(dev,lp,1000,"RX_START not processed"))
755 		goto failed;
756 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name));
757 	return 0;
758 
759 failed:
760 	printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
761 	MPU_PORT(dev, PORT_RESET, NULL);
762 	return -1;
763 }
764 
i596_rx(struct net_device * dev)765 static inline int i596_rx(struct net_device *dev)
766 {
767 	struct i596_private *lp = dev->ml_priv;
768 	struct i596_rfd *rfd;
769 	struct i596_rbd *rbd;
770 	int frames = 0;
771 
772 	DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n",
773 			lp->rfd_head, lp->rbd_head));
774 
775 	rfd = lp->rfd_head;		/* Ref next frame to check */
776 
777 	while ((rfd->stat) & STAT_C) {	/* Loop while complete frames */
778 		if (rfd->rbd == I596_NULL)
779 			rbd = I596_NULL;
780 		else if (rfd->rbd == lp->rbd_head->b_addr)
781 			rbd = lp->rbd_head;
782 		else {
783 			printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name);
784 			/* XXX Now what? */
785 			rbd = I596_NULL;
786 		}
787 		DEB(DEB_RXFRAME, printk(KERN_DEBUG "  rfd %p, rfd.rbd %p, rfd.stat %04x\n",
788 			rfd, rfd->rbd, rfd->stat));
789 
790 		if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) {
791 			/* a good frame */
792 			int pkt_len = rbd->count & 0x3fff;
793 			struct sk_buff *skb = rbd->skb;
794 			int rx_in_place = 0;
795 
796 			DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
797 			frames++;
798 
799 			/* Check if the packet is long enough to just accept
800 			 * without copying to a properly sized skbuff.
801 			 */
802 
803 			if (pkt_len > rx_copybreak) {
804 				struct sk_buff *newskb;
805 
806 				/* Get fresh skbuff to replace filled one. */
807 				newskb = dev_alloc_skb(PKT_BUF_SZ);
808 				if (newskb == NULL) {
809 					skb = NULL;	/* drop pkt */
810 					goto memory_squeeze;
811 				}
812 				/* Pass up the skb already on the Rx ring. */
813 				skb_put(skb, pkt_len);
814 				rx_in_place = 1;
815 				rbd->skb = newskb;
816 				newskb->dev = dev;
817 				rbd->v_data = newskb->data;
818 				rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
819 #ifdef __mc68000__
820 				cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
821 #endif
822 			}
823 			else
824 				skb = dev_alloc_skb(pkt_len + 2);
825 memory_squeeze:
826 			if (skb == NULL) {
827 				/* XXX tulip.c can defer packets here!! */
828 				printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
829 				dev->stats.rx_dropped++;
830 			}
831 			else {
832 				if (!rx_in_place) {
833 					/* 16 byte align the data fields */
834 					skb_reserve(skb, 2);
835 					memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
836 				}
837 				skb->protocol=eth_type_trans(skb,dev);
838 				skb->len = pkt_len;
839 #ifdef __mc68000__
840 				cache_clear(virt_to_phys(rbd->skb->data),
841 						pkt_len);
842 #endif
843 				netif_rx(skb);
844 				dev->stats.rx_packets++;
845 				dev->stats.rx_bytes+=pkt_len;
846 			}
847 		}
848 		else {
849 			DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n",
850 					dev->name, rfd->stat));
851 			dev->stats.rx_errors++;
852 			if ((rfd->stat) & 0x0001)
853 				dev->stats.collisions++;
854 			if ((rfd->stat) & 0x0080)
855 				dev->stats.rx_length_errors++;
856 			if ((rfd->stat) & 0x0100)
857 				dev->stats.rx_over_errors++;
858 			if ((rfd->stat) & 0x0200)
859 				dev->stats.rx_fifo_errors++;
860 			if ((rfd->stat) & 0x0400)
861 				dev->stats.rx_frame_errors++;
862 			if ((rfd->stat) & 0x0800)
863 				dev->stats.rx_crc_errors++;
864 			if ((rfd->stat) & 0x1000)
865 				dev->stats.rx_length_errors++;
866 		}
867 
868 		/* Clear the buffer descriptor count and EOF + F flags */
869 
870 		if (rbd != I596_NULL && (rbd->count & 0x4000)) {
871 			rbd->count = 0;
872 			lp->rbd_head = rbd->v_next;
873 		}
874 
875 		/* Tidy the frame descriptor, marking it as end of list */
876 
877 		rfd->rbd = I596_NULL;
878 		rfd->stat = 0;
879 		rfd->cmd = CMD_EOL|CMD_FLEX;
880 		rfd->count = 0;
881 
882 		/* Remove end-of-list from old end descriptor */
883 
884 		rfd->v_prev->cmd = CMD_FLEX;
885 
886 		/* Update record of next frame descriptor to process */
887 
888 		lp->scb.rfd = rfd->b_next;
889 		lp->rfd_head = rfd->v_next;
890 		rfd = lp->rfd_head;
891 	}
892 
893 	DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames));
894 
895 	return 0;
896 }
897 
898 
i596_cleanup_cmd(struct net_device * dev,struct i596_private * lp)899 static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
900 {
901 	struct i596_cmd *ptr;
902 
903 	while (lp->cmd_head != I596_NULL) {
904 		ptr = lp->cmd_head;
905 		lp->cmd_head = ptr->v_next;
906 		lp->cmd_backlog--;
907 
908 		switch ((ptr->command) & 0x7) {
909 		case CmdTx:
910 			{
911 				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
912 				struct sk_buff *skb = tx_cmd->skb;
913 
914 				dev_kfree_skb(skb);
915 
916 				dev->stats.tx_errors++;
917 				dev->stats.tx_aborted_errors++;
918 
919 				ptr->v_next = ptr->b_next = I596_NULL;
920 				tx_cmd->cmd.command = 0;  /* Mark as free */
921 				break;
922 			}
923 		default:
924 			ptr->v_next = ptr->b_next = I596_NULL;
925 		}
926 	}
927 
928 	wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
929 	lp->scb.cmd = I596_NULL;
930 }
931 
i596_reset(struct net_device * dev,struct i596_private * lp,int ioaddr)932 static void i596_reset(struct net_device *dev, struct i596_private *lp,
933 			int ioaddr)
934 {
935 	unsigned long flags;
936 
937 	DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n"));
938 
939 	spin_lock_irqsave (&lp->lock, flags);
940 
941 	wait_cmd(dev,lp,100,"i596_reset timed out");
942 
943 	netif_stop_queue(dev);
944 
945 	lp->scb.command = CUC_ABORT | RX_ABORT;
946 	CA(dev);
947 
948 	/* wait for shutdown */
949 	wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
950 	spin_unlock_irqrestore (&lp->lock, flags);
951 
952 	i596_cleanup_cmd(dev,lp);
953 	i596_rx(dev);
954 
955 	netif_start_queue(dev);
956 	init_i596_mem(dev);
957 }
958 
i596_add_cmd(struct net_device * dev,struct i596_cmd * cmd)959 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
960 {
961 	struct i596_private *lp = dev->ml_priv;
962 	int ioaddr = dev->base_addr;
963 	unsigned long flags;
964 
965 	DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n"));
966 
967 	cmd->status = 0;
968 	cmd->command |= (CMD_EOL | CMD_INTR);
969 	cmd->v_next = cmd->b_next = I596_NULL;
970 
971 	spin_lock_irqsave (&lp->lock, flags);
972 
973 	if (lp->cmd_head != I596_NULL) {
974 		lp->cmd_tail->v_next = cmd;
975 		lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status));
976 	} else {
977 		lp->cmd_head = cmd;
978 		wait_cmd(dev,lp,100,"i596_add_cmd timed out");
979 		lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status));
980 		lp->scb.command = CUC_START;
981 		CA(dev);
982 	}
983 	lp->cmd_tail = cmd;
984 	lp->cmd_backlog++;
985 
986 	spin_unlock_irqrestore (&lp->lock, flags);
987 
988 	if (lp->cmd_backlog > max_cmd_backlog) {
989 		unsigned long tickssofar = jiffies - lp->last_cmd;
990 
991 		if (tickssofar < ticks_limit)
992 			return;
993 
994 		printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name);
995 
996 		i596_reset(dev, lp, ioaddr);
997 	}
998 }
999 
i596_open(struct net_device * dev)1000 static int i596_open(struct net_device *dev)
1001 {
1002 	int res = 0;
1003 
1004 	DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq));
1005 
1006 	if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
1007 		printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
1008 		return -EAGAIN;
1009 	}
1010 #ifdef ENABLE_MVME16x_NET
1011 	if (MACH_IS_MVME16x) {
1012 		if (request_irq(0x56, i596_error, 0, "i82596_error", dev))
1013 			return -EAGAIN;
1014 	}
1015 #endif
1016 	init_rx_bufs(dev);
1017 
1018 	netif_start_queue(dev);
1019 
1020 	/* Initialize the 82596 memory */
1021 	if (init_i596_mem(dev)) {
1022 		res = -EAGAIN;
1023 		free_irq(dev->irq, dev);
1024 	}
1025 
1026 	return res;
1027 }
1028 
i596_tx_timeout(struct net_device * dev)1029 static void i596_tx_timeout (struct net_device *dev)
1030 {
1031 	struct i596_private *lp = dev->ml_priv;
1032 	int ioaddr = dev->base_addr;
1033 
1034 	/* Transmitter timeout, serious problems. */
1035 	DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n",
1036 			dev->name));
1037 
1038 	dev->stats.tx_errors++;
1039 
1040 	/* Try to restart the adaptor */
1041 	if (lp->last_restart == dev->stats.tx_packets) {
1042 		DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n"));
1043 		/* Shutdown and restart */
1044 		i596_reset (dev, lp, ioaddr);
1045 	} else {
1046 		/* Issue a channel attention signal */
1047 		DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n"));
1048 		lp->scb.command = CUC_START | RX_START;
1049 		CA (dev);
1050 		lp->last_restart = dev->stats.tx_packets;
1051 	}
1052 
1053 	dev->trans_start = jiffies;
1054 	netif_wake_queue (dev);
1055 }
1056 
1057 
i596_start_xmit(struct sk_buff * skb,struct net_device * dev)1058 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1059 {
1060 	struct i596_private *lp = dev->ml_priv;
1061 	struct tx_cmd *tx_cmd;
1062 	struct i596_tbd *tbd;
1063 	short length = skb->len;
1064 	dev->trans_start = jiffies;
1065 
1066 	DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n",
1067 				dev->name, skb->len, skb->data));
1068 
1069 	if (skb->len < ETH_ZLEN) {
1070 		if (skb_padto(skb, ETH_ZLEN))
1071 			return 0;
1072 		length = ETH_ZLEN;
1073 	}
1074 	netif_stop_queue(dev);
1075 
1076 	tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1077 	tbd = lp->tbds + lp->next_tx_cmd;
1078 
1079 	if (tx_cmd->cmd.command) {
1080 		printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n",
1081 				dev->name);
1082 		dev->stats.tx_dropped++;
1083 
1084 		dev_kfree_skb(skb);
1085 	} else {
1086 		if (++lp->next_tx_cmd == TX_RING_SIZE)
1087 			lp->next_tx_cmd = 0;
1088 		tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd));
1089 		tbd->next = I596_NULL;
1090 
1091 		tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1092 		tx_cmd->skb = skb;
1093 
1094 		tx_cmd->pad = 0;
1095 		tx_cmd->size = 0;
1096 		tbd->pad = 0;
1097 		tbd->size = EOF | length;
1098 
1099 		tbd->data = WSWAPchar(virt_to_bus(skb->data));
1100 
1101 #ifdef __mc68000__
1102 		cache_push(virt_to_phys(skb->data), length);
1103 #endif
1104 		DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1105 		i596_add_cmd(dev, &tx_cmd->cmd);
1106 
1107 		dev->stats.tx_packets++;
1108 		dev->stats.tx_bytes += length;
1109 	}
1110 
1111 	netif_start_queue(dev);
1112 
1113 	return 0;
1114 }
1115 
print_eth(unsigned char * add,char * str)1116 static void print_eth(unsigned char *add, char *str)
1117 {
1118 	printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1119 	       add, add + 6, add, add[12], add[13], str);
1120 }
1121 
1122 static int io = 0x300;
1123 static int irq = 10;
1124 
i82596_probe(int unit)1125 struct net_device * __init i82596_probe(int unit)
1126 {
1127 	struct net_device *dev;
1128 	int i;
1129 	struct i596_private *lp;
1130 	char eth_addr[8];
1131 	static int probed;
1132 	int err;
1133 
1134 	if (probed)
1135 		return ERR_PTR(-ENODEV);
1136 	probed++;
1137 
1138 	dev = alloc_etherdev(0);
1139 	if (!dev)
1140 		return ERR_PTR(-ENOMEM);
1141 
1142 	if (unit >= 0) {
1143 		sprintf(dev->name, "eth%d", unit);
1144 		netdev_boot_setup_check(dev);
1145 	} else {
1146 		dev->base_addr = io;
1147 		dev->irq = irq;
1148 	}
1149 
1150 #ifdef ENABLE_MVME16x_NET
1151 	if (MACH_IS_MVME16x) {
1152 		if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) {
1153 			printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n");
1154 			err = -ENODEV;
1155 			goto out;
1156 		}
1157 		memcpy(eth_addr, (void *) 0xfffc1f2c, 6);	/* YUCK! Get addr from NOVRAM */
1158 		dev->base_addr = MVME_I596_BASE;
1159 		dev->irq = (unsigned) MVME16x_IRQ_I596;
1160 		goto found;
1161 	}
1162 #endif
1163 #ifdef ENABLE_BVME6000_NET
1164 	if (MACH_IS_BVME6000) {
1165 		volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE;
1166 		unsigned char msr = rtc[3];
1167 		int i;
1168 
1169 		rtc[3] |= 0x80;
1170 		for (i = 0; i < 6; i++)
1171 			eth_addr[i] = rtc[i * 4 + 7];	/* Stored in RTC RAM at offset 1 */
1172 		rtc[3] = msr;
1173 		dev->base_addr = BVME_I596_BASE;
1174 		dev->irq = (unsigned) BVME_IRQ_I596;
1175 		goto found;
1176 	}
1177 #endif
1178 #ifdef ENABLE_APRICOT
1179 	{
1180 		int checksum = 0;
1181 		int ioaddr = 0x300;
1182 
1183 		/* this is easy the ethernet interface can only be at 0x300 */
1184 		/* first check nothing is already registered here */
1185 
1186 		if (!request_region(ioaddr, I596_TOTAL_SIZE, DRV_NAME)) {
1187 			printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr);
1188 			err = -EBUSY;
1189 			goto out;
1190 		}
1191 
1192 		dev->base_addr = ioaddr;
1193 
1194 		for (i = 0; i < 8; i++) {
1195 			eth_addr[i] = inb(ioaddr + 8 + i);
1196 			checksum += eth_addr[i];
1197 		}
1198 
1199 		/* checksum is a multiple of 0x100, got this wrong first time
1200 		   some machines have 0x100, some 0x200. The DOS driver doesn't
1201 		   even bother with the checksum.
1202 		   Some other boards trip the checksum.. but then appear as
1203 		   ether address 0. Trap these - AC */
1204 
1205 		if ((checksum % 0x100) ||
1206 		    (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) {
1207 			err = -ENODEV;
1208 			goto out1;
1209 		}
1210 
1211 		dev->irq = 10;
1212 		goto found;
1213 	}
1214 #endif
1215 	err = -ENODEV;
1216 	goto out;
1217 
1218 found:
1219 	dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
1220 	if (!dev->mem_start) {
1221 		err = -ENOMEM;
1222 		goto out1;
1223 	}
1224 
1225 	DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1226 
1227 	for (i = 0; i < 6; i++)
1228 		DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1229 
1230 	DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1231 
1232 	DEB(DEB_PROBE,printk(KERN_INFO "%s", version));
1233 
1234 	/* The 82596-specific entries in the device structure. */
1235 	dev->open = i596_open;
1236 	dev->stop = i596_close;
1237 	dev->hard_start_xmit = i596_start_xmit;
1238 	dev->set_multicast_list = set_multicast_list;
1239 	dev->tx_timeout = i596_tx_timeout;
1240 	dev->watchdog_timeo = TX_TIMEOUT;
1241 
1242 	dev->ml_priv = (void *)(dev->mem_start);
1243 
1244 	lp = dev->ml_priv;
1245 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), "
1246 			"lp->scb at 0x%08lx\n",
1247 			dev->name, (unsigned long)lp,
1248 			sizeof(struct i596_private), (unsigned long)&lp->scb));
1249 	memset((void *) lp, 0, sizeof(struct i596_private));
1250 
1251 #ifdef __mc68000__
1252 	cache_push(virt_to_phys((void *)(dev->mem_start)), 4096);
1253 	cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096);
1254 	kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
1255 #endif
1256 	lp->scb.command = 0;
1257 	lp->scb.cmd = I596_NULL;
1258 	lp->scb.rfd = I596_NULL;
1259 	spin_lock_init(&lp->lock);
1260 
1261 	err = register_netdev(dev);
1262 	if (err)
1263 		goto out2;
1264 	return dev;
1265 out2:
1266 #ifdef __mc68000__
1267 	/* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1268 	 * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1269 	 */
1270 	kernel_set_cachemode((void *)(dev->mem_start), 4096,
1271 			IOMAP_FULL_CACHING);
1272 #endif
1273 	free_page ((u32)(dev->mem_start));
1274 out1:
1275 #ifdef ENABLE_APRICOT
1276 	release_region(dev->base_addr, I596_TOTAL_SIZE);
1277 #endif
1278 out:
1279 	free_netdev(dev);
1280 	return ERR_PTR(err);
1281 }
1282 
i596_interrupt(int irq,void * dev_id)1283 static irqreturn_t i596_interrupt(int irq, void *dev_id)
1284 {
1285 	struct net_device *dev = dev_id;
1286 	struct i596_private *lp;
1287 	short ioaddr;
1288 	unsigned short status, ack_cmd = 0;
1289 	int handled = 0;
1290 
1291 #ifdef ENABLE_BVME6000_NET
1292 	if (MACH_IS_BVME6000) {
1293 		if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) {
1294 			i596_error(irq, dev_id);
1295 			return IRQ_HANDLED;
1296 		}
1297 	}
1298 #endif
1299 	if (dev == NULL) {
1300 		printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq);
1301 		return IRQ_NONE;
1302 	}
1303 
1304 	ioaddr = dev->base_addr;
1305 	lp = dev->ml_priv;
1306 
1307 	spin_lock (&lp->lock);
1308 
1309 	wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1310 	status = lp->scb.status;
1311 
1312 	DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1313 			dev->name, irq, status));
1314 
1315 	ack_cmd = status & 0xf000;
1316 
1317 	if ((status & 0x8000) || (status & 0x2000)) {
1318 		struct i596_cmd *ptr;
1319 
1320 		handled = 1;
1321 		if ((status & 0x8000))
1322 			DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name));
1323 		if ((status & 0x2000))
1324 			DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1325 
1326 		while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) {
1327 			ptr = lp->cmd_head;
1328 
1329 			DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n",
1330 				       lp->cmd_head->status, lp->cmd_head->command));
1331 			lp->cmd_head = ptr->v_next;
1332 			lp->cmd_backlog--;
1333 
1334 			switch ((ptr->command) & 0x7) {
1335 			case CmdTx:
1336 			    {
1337 				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1338 				struct sk_buff *skb = tx_cmd->skb;
1339 
1340 				if ((ptr->status) & STAT_OK) {
1341 					DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1342 				} else {
1343 					dev->stats.tx_errors++;
1344 					if ((ptr->status) & 0x0020)
1345 						dev->stats.collisions++;
1346 					if (!((ptr->status) & 0x0040))
1347 						dev->stats.tx_heartbeat_errors++;
1348 					if ((ptr->status) & 0x0400)
1349 						dev->stats.tx_carrier_errors++;
1350 					if ((ptr->status) & 0x0800)
1351 						dev->stats.collisions++;
1352 					if ((ptr->status) & 0x1000)
1353 						dev->stats.tx_aborted_errors++;
1354 				}
1355 
1356 				dev_kfree_skb_irq(skb);
1357 
1358 				tx_cmd->cmd.command = 0; /* Mark free */
1359 				break;
1360 			    }
1361 			case CmdTDR:
1362 			    {
1363 				unsigned short status = ((struct tdr_cmd *)ptr)->status;
1364 
1365 				if (status & 0x8000) {
1366 					DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name));
1367 				} else {
1368 					if (status & 0x4000)
1369 						printk(KERN_ERR "%s: Transceiver problem.\n", dev->name);
1370 					if (status & 0x2000)
1371 						printk(KERN_ERR "%s: Termination problem.\n", dev->name);
1372 					if (status & 0x1000)
1373 						printk(KERN_ERR "%s: Short circuit.\n", dev->name);
1374 
1375 					DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff));
1376 				}
1377 				break;
1378 			    }
1379 			case CmdConfigure:
1380 			case CmdMulticastList:
1381 				/* Zap command so set_multicast_list() knows it is free */
1382 				ptr->command = 0;
1383 				break;
1384 			}
1385 			ptr->v_next = ptr->b_next = I596_NULL;
1386 			lp->last_cmd = jiffies;
1387 		}
1388 
1389 		ptr = lp->cmd_head;
1390 		while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) {
1391 			ptr->command &= 0x1fff;
1392 			ptr = ptr->v_next;
1393 		}
1394 
1395 		if ((lp->cmd_head != I596_NULL))
1396 			ack_cmd |= CUC_START;
1397 		lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status));
1398 	}
1399 	if ((status & 0x1000) || (status & 0x4000)) {
1400 		if ((status & 0x4000))
1401 			DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name));
1402 		i596_rx(dev);
1403 		/* Only RX_START if stopped - RGH 07-07-96 */
1404 		if (status & 0x1000) {
1405 			if (netif_running(dev)) {
1406 				DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1407 				ack_cmd |= RX_START;
1408 				dev->stats.rx_errors++;
1409 				dev->stats.rx_fifo_errors++;
1410 				rebuild_rx_bufs(dev);
1411 			}
1412 		}
1413 	}
1414 	wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1415 	lp->scb.command = ack_cmd;
1416 
1417 #ifdef ENABLE_MVME16x_NET
1418 	if (MACH_IS_MVME16x) {
1419 		/* Ack the interrupt */
1420 
1421 		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1422 
1423 		pcc2[0x2a] |= 0x08;
1424 	}
1425 #endif
1426 #ifdef ENABLE_BVME6000_NET
1427 	if (MACH_IS_BVME6000) {
1428 		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1429 
1430 		*ethirq = 1;
1431 		*ethirq = 3;
1432 	}
1433 #endif
1434 #ifdef ENABLE_APRICOT
1435 	(void) inb(ioaddr + 0x10);
1436 	outb(4, ioaddr + 0xf);
1437 #endif
1438 	CA(dev);
1439 
1440 	DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1441 
1442 	spin_unlock (&lp->lock);
1443 	return IRQ_RETVAL(handled);
1444 }
1445 
i596_close(struct net_device * dev)1446 static int i596_close(struct net_device *dev)
1447 {
1448 	struct i596_private *lp = dev->ml_priv;
1449 	unsigned long flags;
1450 
1451 	netif_stop_queue(dev);
1452 
1453 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1454 		       dev->name, lp->scb.status));
1455 
1456 	spin_lock_irqsave(&lp->lock, flags);
1457 
1458 	wait_cmd(dev,lp,100,"close1 timed out");
1459 	lp->scb.command = CUC_ABORT | RX_ABORT;
1460 	CA(dev);
1461 
1462 	wait_cmd(dev,lp,100,"close2 timed out");
1463 
1464 	spin_unlock_irqrestore(&lp->lock, flags);
1465 	DEB(DEB_STRUCT,i596_display_data(dev));
1466 	i596_cleanup_cmd(dev,lp);
1467 
1468 #ifdef ENABLE_MVME16x_NET
1469 	if (MACH_IS_MVME16x) {
1470 		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1471 
1472 		/* Disable all ints */
1473 		pcc2[0x28] = 1;
1474 		pcc2[0x2a] = 0x40;
1475 		pcc2[0x2b] = 0x40;	/* Set snooping bits now! */
1476 	}
1477 #endif
1478 #ifdef ENABLE_BVME6000_NET
1479 	if (MACH_IS_BVME6000) {
1480 		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1481 
1482 		*ethirq = 1;
1483 	}
1484 #endif
1485 
1486 	free_irq(dev->irq, dev);
1487 	remove_rx_bufs(dev);
1488 
1489 	return 0;
1490 }
1491 
1492 /*
1493  *    Set or clear the multicast filter for this adaptor.
1494  */
1495 
set_multicast_list(struct net_device * dev)1496 static void set_multicast_list(struct net_device *dev)
1497 {
1498 	struct i596_private *lp = dev->ml_priv;
1499 	int config = 0, cnt;
1500 
1501 	DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1502 		dev->name, dev->mc_count,
1503 		dev->flags & IFF_PROMISC  ? "ON" : "OFF",
1504 		dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1505 
1506 	if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out"))
1507 		return;
1508 
1509 	if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1510 		lp->cf_cmd.i596_config[8] |= 0x01;
1511 		config = 1;
1512 	}
1513 	if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1514 		lp->cf_cmd.i596_config[8] &= ~0x01;
1515 		config = 1;
1516 	}
1517 	if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1518 		lp->cf_cmd.i596_config[11] &= ~0x20;
1519 		config = 1;
1520 	}
1521 	if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1522 		lp->cf_cmd.i596_config[11] |= 0x20;
1523 		config = 1;
1524 	}
1525 	if (config) {
1526 		lp->cf_cmd.cmd.command = CmdConfigure;
1527 		i596_add_cmd(dev, &lp->cf_cmd.cmd);
1528 	}
1529 
1530 	cnt = dev->mc_count;
1531 	if (cnt > MAX_MC_CNT)
1532 	{
1533 		cnt = MAX_MC_CNT;
1534 		printk(KERN_ERR "%s: Only %d multicast addresses supported",
1535 			dev->name, cnt);
1536 	}
1537 
1538 	if (dev->mc_count > 0) {
1539 		struct dev_mc_list *dmi;
1540 		unsigned char *cp;
1541 		struct mc_cmd *cmd;
1542 
1543 		if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out"))
1544 			return;
1545 		cmd = &lp->mc_cmd;
1546 		cmd->cmd.command = CmdMulticastList;
1547 		cmd->mc_cnt = dev->mc_count * 6;
1548 		cp = cmd->mc_addrs;
1549 		for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1550 			memcpy(cp, dmi->dmi_addr, 6);
1551 			if (i596_debug > 1)
1552 				DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
1553 						dev->name, cp));
1554 		}
1555 		i596_add_cmd(dev, &cmd->cmd);
1556 	}
1557 }
1558 
1559 #ifdef MODULE
1560 static struct net_device *dev_82596;
1561 
1562 #ifdef ENABLE_APRICOT
1563 module_param(irq, int, 0);
1564 MODULE_PARM_DESC(irq, "Apricot IRQ number");
1565 #endif
1566 
1567 static int debug = -1;
1568 module_param(debug, int, 0);
1569 MODULE_PARM_DESC(debug, "i82596 debug mask");
1570 
init_module(void)1571 int __init init_module(void)
1572 {
1573 	if (debug >= 0)
1574 		i596_debug = debug;
1575 	dev_82596 = i82596_probe(-1);
1576 	if (IS_ERR(dev_82596))
1577 		return PTR_ERR(dev_82596);
1578 	return 0;
1579 }
1580 
cleanup_module(void)1581 void __exit cleanup_module(void)
1582 {
1583 	unregister_netdev(dev_82596);
1584 #ifdef __mc68000__
1585 	/* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1586 	 * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1587 	 */
1588 
1589 	kernel_set_cachemode((void *)(dev_82596->mem_start), 4096,
1590 			IOMAP_FULL_CACHING);
1591 #endif
1592 	free_page ((u32)(dev_82596->mem_start));
1593 #ifdef ENABLE_APRICOT
1594 	/* If we don't do this, we can't re-insmod it later. */
1595 	release_region(dev_82596->base_addr, I596_TOTAL_SIZE);
1596 #endif
1597 	free_netdev(dev_82596);
1598 }
1599 
1600 #endif				/* MODULE */
1601