• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2    munged into HPPA boxen .
3 
4    This driver is based upon 82596.c, original credits are below...
5    but there were too many hoops which HP wants jumped through to
6    keep this code in there in a sane manner.
7 
8    3 primary sources of the mess --
9    1) hppa needs *lots* of cacheline flushing to keep this kind of
10    MMIO running.
11 
12    2) The 82596 needs to see all of its pointers as their physical
13    address.  Thus virt_to_bus/bus_to_virt are *everywhere*.
14 
15    3) The implementation HP is using seems to be significantly pickier
16    about when and how the command and RX units are started.  some
17    command ordering was changed.
18 
19    Examination of the mach driver leads one to believe that there
20    might be a saner way to pull this off...  anyone who feels like a
21    full rewrite can be my guest.
22 
23    Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
24 
25    02/01/2000  Initial modifications for parisc by Helge Deller (deller@gmx.de)
26    03/02/2000  changes for better/correct(?) cache-flushing (deller)
27 */
28 
29 /* 82596.c: A generic 82596 ethernet driver for linux. */
30 /*
31    Based on Apricot.c
32    Written 1994 by Mark Evans.
33    This driver is for the Apricot 82596 bus-master interface
34 
35    Modularised 12/94 Mark Evans
36 
37 
38    Modified to support the 82596 ethernet chips on 680x0 VME boards.
39    by Richard Hirst <richard@sleepie.demon.co.uk>
40    Renamed to be 82596.c
41 
42    980825:  Changed to receive directly in to sk_buffs which are
43    allocated at open() time.  Eliminates copy on incoming frames
44    (small ones are still copied).  Shared data now held in a
45    non-cached page, so we can run on 68060 in copyback mode.
46 
47    TBD:
48    * look at deferring rx frames rather than discarding (as per tulip)
49    * handle tx ring full as per tulip
50    * performace test to tune rx_copybreak
51 
52    Most of my modifications relate to the braindead big-endian
53    implementation by Intel.  When the i596 is operating in
54    'big-endian' mode, it thinks a 32 bit value of 0x12345678
55    should be stored as 0x56781234.  This is a real pain, when
56    you have linked lists which are shared by the 680x0 and the
57    i596.
58 
59    Driver skeleton
60    Written 1993 by Donald Becker.
61    Copyright 1993 United States Government as represented by the Director,
62    National Security Agency. This software may only be used and distributed
63    according to the terms of the GNU General Public License as modified by SRC,
64    incorporated herein by reference.
65 
66    The author may be reached as becker@scyld.com, or C/O
67    Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
68 
69  */
70 
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/errno.h>
75 #include <linux/ioport.h>
76 #include <linux/slab.h>
77 #include <linux/interrupt.h>
78 #include <linux/delay.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/init.h>
83 #include <linux/types.h>
84 #include <linux/bitops.h>
85 #include <linux/dma-mapping.h>
86 #include <linux/io.h>
87 #include <linux/irq.h>
88 
89 /* DEBUG flags
90  */
91 
92 #define DEB_INIT	0x0001
93 #define DEB_PROBE	0x0002
94 #define DEB_SERIOUS	0x0004
95 #define DEB_ERRORS	0x0008
96 #define DEB_MULTI	0x0010
97 #define DEB_TDR		0x0020
98 #define DEB_OPEN	0x0040
99 #define DEB_RESET	0x0080
100 #define DEB_ADDCMD	0x0100
101 #define DEB_STATUS	0x0200
102 #define DEB_STARTTX	0x0400
103 #define DEB_RXADDR	0x0800
104 #define DEB_TXADDR	0x1000
105 #define DEB_RXFRAME	0x2000
106 #define DEB_INTS	0x4000
107 #define DEB_STRUCT	0x8000
108 #define DEB_ANY		0xffff
109 
110 
111 #define DEB(x, y)	if (i596_debug & (x)) { y; }
112 
113 
114 /*
115  * The MPU_PORT command allows direct access to the 82596. With PORT access
116  * the following commands are available (p5-18). The 32-bit port command
117  * must be word-swapped with the most significant word written first.
118  * This only applies to VME boards.
119  */
120 #define PORT_RESET		0x00	/* reset 82596 */
121 #define PORT_SELFTEST		0x01	/* selftest */
122 #define PORT_ALTSCP		0x02	/* alternate SCB address */
123 #define PORT_ALTDUMP		0x03	/* Alternate DUMP address */
124 
125 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
126 
127 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
128  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
129  */
130 static int rx_copybreak = 100;
131 
132 #define PKT_BUF_SZ	1536
133 #define MAX_MC_CNT	64
134 
135 #define ISCP_BUSY	0x0001
136 
137 #define I596_NULL ((u32)0xffffffff)
138 
139 #define CMD_EOL		0x8000	/* The last command of the list, stop. */
140 #define CMD_SUSP	0x4000	/* Suspend after doing cmd. */
141 #define CMD_INTR	0x2000	/* Interrupt after doing cmd. */
142 
143 #define CMD_FLEX	0x0008	/* Enable flexible memory model */
144 
145 enum commands {
146 	CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
147 	CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
148 };
149 
150 #define STAT_C		0x8000	/* Set to 0 after execution */
151 #define STAT_B		0x4000	/* Command being executed */
152 #define STAT_OK		0x2000	/* Command executed ok */
153 #define STAT_A		0x1000	/* Command aborted */
154 
155 #define	 CUC_START	0x0100
156 #define	 CUC_RESUME	0x0200
157 #define	 CUC_SUSPEND    0x0300
158 #define	 CUC_ABORT	0x0400
159 #define	 RX_START	0x0010
160 #define	 RX_RESUME	0x0020
161 #define	 RX_SUSPEND	0x0030
162 #define	 RX_ABORT	0x0040
163 
164 #define TX_TIMEOUT	5
165 
166 
167 struct i596_reg {
168 	unsigned short porthi;
169 	unsigned short portlo;
170 	u32            ca;
171 };
172 
173 #define EOF		0x8000
174 #define SIZE_MASK	0x3fff
175 
176 struct i596_tbd {
177 	unsigned short size;
178 	unsigned short pad;
179 	u32            next;
180 	u32            data;
181 	u32 cache_pad[5];		/* Total 32 bytes... */
182 };
183 
184 /* The command structure has two 'next' pointers; v_next is the address of
185  * the next command as seen by the CPU, b_next is the address of the next
186  * command as seen by the 82596.  The b_next pointer, as used by the 82596
187  * always references the status field of the next command, rather than the
188  * v_next field, because the 82596 is unaware of v_next.  It may seem more
189  * logical to put v_next at the end of the structure, but we cannot do that
190  * because the 82596 expects other fields to be there, depending on command
191  * type.
192  */
193 
194 struct i596_cmd {
195 	struct i596_cmd *v_next;	/* Address from CPUs viewpoint */
196 	unsigned short status;
197 	unsigned short command;
198 	u32            b_next;	/* Address from i596 viewpoint */
199 };
200 
201 struct tx_cmd {
202 	struct i596_cmd cmd;
203 	u32            tbd;
204 	unsigned short size;
205 	unsigned short pad;
206 	struct sk_buff *skb;		/* So we can free it after tx */
207 	dma_addr_t dma_addr;
208 #ifdef __LP64__
209 	u32 cache_pad[6];		/* Total 64 bytes... */
210 #else
211 	u32 cache_pad[1];		/* Total 32 bytes... */
212 #endif
213 };
214 
215 struct tdr_cmd {
216 	struct i596_cmd cmd;
217 	unsigned short status;
218 	unsigned short pad;
219 };
220 
221 struct mc_cmd {
222 	struct i596_cmd cmd;
223 	short mc_cnt;
224 	char mc_addrs[MAX_MC_CNT*6];
225 };
226 
227 struct sa_cmd {
228 	struct i596_cmd cmd;
229 	char eth_addr[8];
230 };
231 
232 struct cf_cmd {
233 	struct i596_cmd cmd;
234 	char i596_config[16];
235 };
236 
237 struct i596_rfd {
238 	unsigned short stat;
239 	unsigned short cmd;
240 	u32            b_next;	/* Address from i596 viewpoint */
241 	u32            rbd;
242 	unsigned short count;
243 	unsigned short size;
244 	struct i596_rfd *v_next;	/* Address from CPUs viewpoint */
245 	struct i596_rfd *v_prev;
246 #ifndef __LP64__
247 	u32 cache_pad[2];		/* Total 32 bytes... */
248 #endif
249 };
250 
251 struct i596_rbd {
252 	/* hardware data */
253 	unsigned short count;
254 	unsigned short zero1;
255 	u32            b_next;
256 	u32            b_data;		/* Address from i596 viewpoint */
257 	unsigned short size;
258 	unsigned short zero2;
259 	/* driver data */
260 	struct sk_buff *skb;
261 	struct i596_rbd *v_next;
262 	u32            b_addr;		/* This rbd addr from i596 view */
263 	unsigned char *v_data;		/* Address from CPUs viewpoint */
264 					/* Total 32 bytes... */
265 #ifdef __LP64__
266     u32 cache_pad[4];
267 #endif
268 };
269 
270 /* These values as chosen so struct i596_dma fits in one page... */
271 
272 #define TX_RING_SIZE 32
273 #define RX_RING_SIZE 16
274 
275 struct i596_scb {
276 	unsigned short status;
277 	unsigned short command;
278 	u32           cmd;
279 	u32           rfd;
280 	u32           crc_err;
281 	u32           align_err;
282 	u32           resource_err;
283 	u32           over_err;
284 	u32           rcvdt_err;
285 	u32           short_err;
286 	unsigned short t_on;
287 	unsigned short t_off;
288 };
289 
290 struct i596_iscp {
291 	u32 stat;
292 	u32 scb;
293 };
294 
295 struct i596_scp {
296 	u32 sysbus;
297 	u32 pad;
298 	u32 iscp;
299 };
300 
301 struct i596_dma {
302 	struct i596_scp scp		        __attribute__((aligned(32)));
303 	volatile struct i596_iscp iscp		__attribute__((aligned(32)));
304 	volatile struct i596_scb scb		__attribute__((aligned(32)));
305 	struct sa_cmd sa_cmd			__attribute__((aligned(32)));
306 	struct cf_cmd cf_cmd			__attribute__((aligned(32)));
307 	struct tdr_cmd tdr_cmd			__attribute__((aligned(32)));
308 	struct mc_cmd mc_cmd			__attribute__((aligned(32)));
309 	struct i596_rfd rfds[RX_RING_SIZE]	__attribute__((aligned(32)));
310 	struct i596_rbd rbds[RX_RING_SIZE]	__attribute__((aligned(32)));
311 	struct tx_cmd tx_cmds[TX_RING_SIZE]	__attribute__((aligned(32)));
312 	struct i596_tbd tbds[TX_RING_SIZE]	__attribute__((aligned(32)));
313 };
314 
315 struct i596_private {
316 	struct i596_dma *dma;
317 	u32    stat;
318 	int last_restart;
319 	struct i596_rfd *rfd_head;
320 	struct i596_rbd *rbd_head;
321 	struct i596_cmd *cmd_tail;
322 	struct i596_cmd *cmd_head;
323 	int cmd_backlog;
324 	u32    last_cmd;
325 	int next_tx_cmd;
326 	int options;
327 	spinlock_t lock;       /* serialize access to chip */
328 	dma_addr_t dma_addr;
329 	void __iomem *mpu_port;
330 	void __iomem *ca;
331 };
332 
333 static const char init_setup[] =
334 {
335 	0x8E,		/* length, prefetch on */
336 	0xC8,		/* fifo to 8, monitor off */
337 	0x80,		/* don't save bad frames */
338 	0x2E,		/* No source address insertion, 8 byte preamble */
339 	0x00,		/* priority and backoff defaults */
340 	0x60,		/* interframe spacing */
341 	0x00,		/* slot time LSB */
342 	0xf2,		/* slot time and retries */
343 	0x00,		/* promiscuous mode */
344 	0x00,		/* collision detect */
345 	0x40,		/* minimum frame length */
346 	0xff,
347 	0x00,
348 	0x7f /*  *multi IA */ };
349 
350 static int i596_open(struct net_device *dev);
351 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
352 static irqreturn_t i596_interrupt(int irq, void *dev_id);
353 static int i596_close(struct net_device *dev);
354 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
355 static void i596_tx_timeout (struct net_device *dev);
356 static void print_eth(unsigned char *buf, char *str);
357 static void set_multicast_list(struct net_device *dev);
358 static inline void ca(struct net_device *dev);
359 static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
360 
361 static int rx_ring_size = RX_RING_SIZE;
362 static int ticks_limit = 100;
363 static int max_cmd_backlog = TX_RING_SIZE-1;
364 
365 #ifdef CONFIG_NET_POLL_CONTROLLER
366 static void i596_poll_controller(struct net_device *dev);
367 #endif
368 
369 
wait_istat(struct net_device * dev,struct i596_dma * dma,int delcnt,char * str)370 static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
371 {
372 	DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
373 	while (--delcnt && dma->iscp.stat) {
374 		udelay(10);
375 		DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
376 	}
377 	if (!delcnt) {
378 		printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
379 		     dev->name, str, SWAP16(dma->iscp.stat));
380 		return -1;
381 	} else
382 		return 0;
383 }
384 
385 
wait_cmd(struct net_device * dev,struct i596_dma * dma,int delcnt,char * str)386 static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
387 {
388 	DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
389 	while (--delcnt && dma->scb.command) {
390 		udelay(10);
391 		DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
392 	}
393 	if (!delcnt) {
394 		printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
395 		       dev->name, str,
396 		       SWAP16(dma->scb.status),
397 		       SWAP16(dma->scb.command));
398 		return -1;
399 	} else
400 		return 0;
401 }
402 
403 
i596_display_data(struct net_device * dev)404 static void i596_display_data(struct net_device *dev)
405 {
406 	struct i596_private *lp = netdev_priv(dev);
407 	struct i596_dma *dma = lp->dma;
408 	struct i596_cmd *cmd;
409 	struct i596_rfd *rfd;
410 	struct i596_rbd *rbd;
411 
412 	printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
413 	       &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
414 	printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
415 	       &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
416 	printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
417 		" .cmd = %08x, .rfd = %08x\n",
418 	       &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
419 		SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
420 	printk(KERN_DEBUG "   errors: crc %x, align %x, resource %x,"
421 	       " over %x, rcvdt %x, short %x\n",
422 	       SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
423 	       SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
424 	       SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
425 	cmd = lp->cmd_head;
426 	while (cmd != NULL) {
427 		printk(KERN_DEBUG
428 		       "cmd at %p, .status = %04x, .command = %04x,"
429 		       " .b_next = %08x\n",
430 		       cmd, SWAP16(cmd->status), SWAP16(cmd->command),
431 		       SWAP32(cmd->b_next));
432 		cmd = cmd->v_next;
433 	}
434 	rfd = lp->rfd_head;
435 	printk(KERN_DEBUG "rfd_head = %p\n", rfd);
436 	do {
437 		printk(KERN_DEBUG
438 		       "   %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
439 		       " count %04x\n",
440 		       rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
441 		       SWAP32(rfd->b_next), SWAP32(rfd->rbd),
442 		       SWAP16(rfd->count));
443 		rfd = rfd->v_next;
444 	} while (rfd != lp->rfd_head);
445 	rbd = lp->rbd_head;
446 	printk(KERN_DEBUG "rbd_head = %p\n", rbd);
447 	do {
448 		printk(KERN_DEBUG
449 		       "   %p .count %04x, b_next %08x, b_data %08x,"
450 		       " size %04x\n",
451 			rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
452 		       SWAP32(rbd->b_data), SWAP16(rbd->size));
453 		rbd = rbd->v_next;
454 	} while (rbd != lp->rbd_head);
455 	DMA_INV(dev, dma, sizeof(struct i596_dma));
456 }
457 
458 
459 #define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
460 
init_rx_bufs(struct net_device * dev)461 static inline int init_rx_bufs(struct net_device *dev)
462 {
463 	struct i596_private *lp = netdev_priv(dev);
464 	struct i596_dma *dma = lp->dma;
465 	int i;
466 	struct i596_rfd *rfd;
467 	struct i596_rbd *rbd;
468 
469 	/* First build the Receive Buffer Descriptor List */
470 
471 	for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
472 		dma_addr_t dma_addr;
473 		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
474 
475 		if (skb == NULL)
476 			return -1;
477 		skb_reserve(skb, 2);
478 		dma_addr = dma_map_single(dev->dev.parent, skb->data,
479 					  PKT_BUF_SZ, DMA_FROM_DEVICE);
480 		rbd->v_next = rbd+1;
481 		rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
482 		rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
483 		rbd->skb = skb;
484 		rbd->v_data = skb->data;
485 		rbd->b_data = SWAP32(dma_addr);
486 		rbd->size = SWAP16(PKT_BUF_SZ);
487 	}
488 	lp->rbd_head = dma->rbds;
489 	rbd = dma->rbds + rx_ring_size - 1;
490 	rbd->v_next = dma->rbds;
491 	rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
492 
493 	/* Now build the Receive Frame Descriptor List */
494 
495 	for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
496 		rfd->rbd = I596_NULL;
497 		rfd->v_next = rfd+1;
498 		rfd->v_prev = rfd-1;
499 		rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
500 		rfd->cmd = SWAP16(CMD_FLEX);
501 	}
502 	lp->rfd_head = dma->rfds;
503 	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
504 	rfd = dma->rfds;
505 	rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
506 	rfd->v_prev = dma->rfds + rx_ring_size - 1;
507 	rfd = dma->rfds + rx_ring_size - 1;
508 	rfd->v_next = dma->rfds;
509 	rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
510 	rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
511 
512 	DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
513 	return 0;
514 }
515 
remove_rx_bufs(struct net_device * dev)516 static inline void remove_rx_bufs(struct net_device *dev)
517 {
518 	struct i596_private *lp = netdev_priv(dev);
519 	struct i596_rbd *rbd;
520 	int i;
521 
522 	for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
523 		if (rbd->skb == NULL)
524 			break;
525 		dma_unmap_single(dev->dev.parent,
526 				 (dma_addr_t)SWAP32(rbd->b_data),
527 				 PKT_BUF_SZ, DMA_FROM_DEVICE);
528 		dev_kfree_skb(rbd->skb);
529 	}
530 }
531 
532 
rebuild_rx_bufs(struct net_device * dev)533 static void rebuild_rx_bufs(struct net_device *dev)
534 {
535 	struct i596_private *lp = netdev_priv(dev);
536 	struct i596_dma *dma = lp->dma;
537 	int i;
538 
539 	/* Ensure rx frame/buffer descriptors are tidy */
540 
541 	for (i = 0; i < rx_ring_size; i++) {
542 		dma->rfds[i].rbd = I596_NULL;
543 		dma->rfds[i].cmd = SWAP16(CMD_FLEX);
544 	}
545 	dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
546 	lp->rfd_head = dma->rfds;
547 	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
548 	lp->rbd_head = dma->rbds;
549 	dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
550 
551 	DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
552 }
553 
554 
init_i596_mem(struct net_device * dev)555 static int init_i596_mem(struct net_device *dev)
556 {
557 	struct i596_private *lp = netdev_priv(dev);
558 	struct i596_dma *dma = lp->dma;
559 	unsigned long flags;
560 
561 	mpu_port(dev, PORT_RESET, 0);
562 	udelay(100);			/* Wait 100us - seems to help */
563 
564 	/* change the scp address */
565 
566 	lp->last_cmd = jiffies;
567 
568 	dma->scp.sysbus = SYSBUS;
569 	dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
570 	dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
571 	dma->iscp.stat = SWAP32(ISCP_BUSY);
572 	lp->cmd_backlog = 0;
573 
574 	lp->cmd_head = NULL;
575 	dma->scb.cmd = I596_NULL;
576 
577 	DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
578 
579 	DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
580 	DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
581 	DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
582 
583 	mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
584 	ca(dev);
585 	if (wait_istat(dev, dma, 1000, "initialization timed out"))
586 		goto failed;
587 	DEB(DEB_INIT, printk(KERN_DEBUG
588 			     "%s: i82596 initialization successful\n",
589 			     dev->name));
590 
591 	if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
592 		printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
593 		goto failed;
594 	}
595 
596 	/* Ensure rx frame/buffer descriptors are tidy */
597 	rebuild_rx_bufs(dev);
598 
599 	dma->scb.command = 0;
600 	DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
601 
602 	DEB(DEB_INIT, printk(KERN_DEBUG
603 			     "%s: queuing CmdConfigure\n", dev->name));
604 	memcpy(dma->cf_cmd.i596_config, init_setup, 14);
605 	dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
606 	DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
607 	i596_add_cmd(dev, &dma->cf_cmd.cmd);
608 
609 	DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
610 	memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
611 	dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
612 	DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
613 	i596_add_cmd(dev, &dma->sa_cmd.cmd);
614 
615 	DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
616 	dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
617 	DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
618 	i596_add_cmd(dev, &dma->tdr_cmd.cmd);
619 
620 	spin_lock_irqsave (&lp->lock, flags);
621 
622 	if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
623 		spin_unlock_irqrestore (&lp->lock, flags);
624 		goto failed_free_irq;
625 	}
626 	DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
627 	dma->scb.command = SWAP16(RX_START);
628 	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
629 	DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
630 
631 	ca(dev);
632 
633 	spin_unlock_irqrestore (&lp->lock, flags);
634 	if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
635 		goto failed_free_irq;
636 	DEB(DEB_INIT, printk(KERN_DEBUG
637 			     "%s: Receive unit started OK\n", dev->name));
638 	return 0;
639 
640 failed_free_irq:
641 	free_irq(dev->irq, dev);
642 failed:
643 	printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
644 	mpu_port(dev, PORT_RESET, 0);
645 	return -1;
646 }
647 
648 
i596_rx(struct net_device * dev)649 static inline int i596_rx(struct net_device *dev)
650 {
651 	struct i596_private *lp = netdev_priv(dev);
652 	struct i596_rfd *rfd;
653 	struct i596_rbd *rbd;
654 	int frames = 0;
655 
656 	DEB(DEB_RXFRAME, printk(KERN_DEBUG
657 				"i596_rx(), rfd_head %p, rbd_head %p\n",
658 				lp->rfd_head, lp->rbd_head));
659 
660 
661 	rfd = lp->rfd_head;		/* Ref next frame to check */
662 
663 	DMA_INV(dev, rfd, sizeof(struct i596_rfd));
664 	while (rfd->stat & SWAP16(STAT_C)) {	/* Loop while complete frames */
665 		if (rfd->rbd == I596_NULL)
666 			rbd = NULL;
667 		else if (rfd->rbd == lp->rbd_head->b_addr) {
668 			rbd = lp->rbd_head;
669 			DMA_INV(dev, rbd, sizeof(struct i596_rbd));
670 		} else {
671 			printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
672 			/* XXX Now what? */
673 			rbd = NULL;
674 		}
675 		DEB(DEB_RXFRAME, printk(KERN_DEBUG
676 				      "  rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
677 				      rfd, rfd->rbd, rfd->stat));
678 
679 		if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
680 			/* a good frame */
681 			int pkt_len = SWAP16(rbd->count) & 0x3fff;
682 			struct sk_buff *skb = rbd->skb;
683 			int rx_in_place = 0;
684 
685 			DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
686 			frames++;
687 
688 			/* Check if the packet is long enough to just accept
689 			 * without copying to a properly sized skbuff.
690 			 */
691 
692 			if (pkt_len > rx_copybreak) {
693 				struct sk_buff *newskb;
694 				dma_addr_t dma_addr;
695 
696 				dma_unmap_single(dev->dev.parent,
697 						 (dma_addr_t)SWAP32(rbd->b_data),
698 						 PKT_BUF_SZ, DMA_FROM_DEVICE);
699 				/* Get fresh skbuff to replace filled one. */
700 				newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
701 				if (newskb == NULL) {
702 					skb = NULL;	/* drop pkt */
703 					goto memory_squeeze;
704 				}
705 				skb_reserve(newskb, 2);
706 
707 				/* Pass up the skb already on the Rx ring. */
708 				skb_put(skb, pkt_len);
709 				rx_in_place = 1;
710 				rbd->skb = newskb;
711 				dma_addr = dma_map_single(dev->dev.parent,
712 							  newskb->data,
713 							  PKT_BUF_SZ,
714 							  DMA_FROM_DEVICE);
715 				rbd->v_data = newskb->data;
716 				rbd->b_data = SWAP32(dma_addr);
717 				DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
718 			} else
719 				skb = netdev_alloc_skb(dev, pkt_len + 2);
720 memory_squeeze:
721 			if (skb == NULL) {
722 				/* XXX tulip.c can defer packets here!! */
723 				printk(KERN_ERR
724 				       "%s: i596_rx Memory squeeze, dropping packet.\n",
725 				       dev->name);
726 				dev->stats.rx_dropped++;
727 			} else {
728 				if (!rx_in_place) {
729 					/* 16 byte align the data fields */
730 					dma_sync_single_for_cpu(dev->dev.parent,
731 								(dma_addr_t)SWAP32(rbd->b_data),
732 								PKT_BUF_SZ, DMA_FROM_DEVICE);
733 					skb_reserve(skb, 2);
734 					memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
735 					dma_sync_single_for_device(dev->dev.parent,
736 								   (dma_addr_t)SWAP32(rbd->b_data),
737 								   PKT_BUF_SZ, DMA_FROM_DEVICE);
738 				}
739 				skb->len = pkt_len;
740 				skb->protocol = eth_type_trans(skb, dev);
741 				netif_rx(skb);
742 				dev->stats.rx_packets++;
743 				dev->stats.rx_bytes += pkt_len;
744 			}
745 		} else {
746 			DEB(DEB_ERRORS, printk(KERN_DEBUG
747 					       "%s: Error, rfd.stat = 0x%04x\n",
748 					       dev->name, rfd->stat));
749 			dev->stats.rx_errors++;
750 			if (rfd->stat & SWAP16(0x0100))
751 				dev->stats.collisions++;
752 			if (rfd->stat & SWAP16(0x8000))
753 				dev->stats.rx_length_errors++;
754 			if (rfd->stat & SWAP16(0x0001))
755 				dev->stats.rx_over_errors++;
756 			if (rfd->stat & SWAP16(0x0002))
757 				dev->stats.rx_fifo_errors++;
758 			if (rfd->stat & SWAP16(0x0004))
759 				dev->stats.rx_frame_errors++;
760 			if (rfd->stat & SWAP16(0x0008))
761 				dev->stats.rx_crc_errors++;
762 			if (rfd->stat & SWAP16(0x0010))
763 				dev->stats.rx_length_errors++;
764 		}
765 
766 		/* Clear the buffer descriptor count and EOF + F flags */
767 
768 		if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
769 			rbd->count = 0;
770 			lp->rbd_head = rbd->v_next;
771 			DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
772 		}
773 
774 		/* Tidy the frame descriptor, marking it as end of list */
775 
776 		rfd->rbd = I596_NULL;
777 		rfd->stat = 0;
778 		rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
779 		rfd->count = 0;
780 
781 		/* Update record of next frame descriptor to process */
782 
783 		lp->dma->scb.rfd = rfd->b_next;
784 		lp->rfd_head = rfd->v_next;
785 		DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
786 
787 		/* Remove end-of-list from old end descriptor */
788 
789 		rfd->v_prev->cmd = SWAP16(CMD_FLEX);
790 		DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
791 		rfd = lp->rfd_head;
792 		DMA_INV(dev, rfd, sizeof(struct i596_rfd));
793 	}
794 
795 	DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
796 
797 	return 0;
798 }
799 
800 
i596_cleanup_cmd(struct net_device * dev,struct i596_private * lp)801 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
802 {
803 	struct i596_cmd *ptr;
804 
805 	while (lp->cmd_head != NULL) {
806 		ptr = lp->cmd_head;
807 		lp->cmd_head = ptr->v_next;
808 		lp->cmd_backlog--;
809 
810 		switch (SWAP16(ptr->command) & 0x7) {
811 		case CmdTx:
812 			{
813 				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
814 				struct sk_buff *skb = tx_cmd->skb;
815 				dma_unmap_single(dev->dev.parent,
816 						 tx_cmd->dma_addr,
817 						 skb->len, DMA_TO_DEVICE);
818 
819 				dev_kfree_skb(skb);
820 
821 				dev->stats.tx_errors++;
822 				dev->stats.tx_aborted_errors++;
823 
824 				ptr->v_next = NULL;
825 				ptr->b_next = I596_NULL;
826 				tx_cmd->cmd.command = 0;  /* Mark as free */
827 				break;
828 			}
829 		default:
830 			ptr->v_next = NULL;
831 			ptr->b_next = I596_NULL;
832 		}
833 		DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
834 	}
835 
836 	wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
837 	lp->dma->scb.cmd = I596_NULL;
838 	DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
839 }
840 
841 
i596_reset(struct net_device * dev,struct i596_private * lp)842 static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
843 {
844 	unsigned long flags;
845 
846 	DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
847 
848 	spin_lock_irqsave (&lp->lock, flags);
849 
850 	wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
851 
852 	netif_stop_queue(dev);
853 
854 	/* FIXME: this command might cause an lpmc */
855 	lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
856 	DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
857 	ca(dev);
858 
859 	/* wait for shutdown */
860 	wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
861 	spin_unlock_irqrestore (&lp->lock, flags);
862 
863 	i596_cleanup_cmd(dev, lp);
864 	i596_rx(dev);
865 
866 	netif_start_queue(dev);
867 	init_i596_mem(dev);
868 }
869 
870 
i596_add_cmd(struct net_device * dev,struct i596_cmd * cmd)871 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
872 {
873 	struct i596_private *lp = netdev_priv(dev);
874 	struct i596_dma *dma = lp->dma;
875 	unsigned long flags;
876 
877 	DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
878 			       lp->cmd_head));
879 
880 	cmd->status = 0;
881 	cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
882 	cmd->v_next = NULL;
883 	cmd->b_next = I596_NULL;
884 	DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
885 
886 	spin_lock_irqsave (&lp->lock, flags);
887 
888 	if (lp->cmd_head != NULL) {
889 		lp->cmd_tail->v_next = cmd;
890 		lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
891 		DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
892 	} else {
893 		lp->cmd_head = cmd;
894 		wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
895 		dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
896 		dma->scb.command = SWAP16(CUC_START);
897 		DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
898 		ca(dev);
899 	}
900 	lp->cmd_tail = cmd;
901 	lp->cmd_backlog++;
902 
903 	spin_unlock_irqrestore (&lp->lock, flags);
904 
905 	if (lp->cmd_backlog > max_cmd_backlog) {
906 		unsigned long tickssofar = jiffies - lp->last_cmd;
907 
908 		if (tickssofar < ticks_limit)
909 			return;
910 
911 		printk(KERN_ERR
912 		       "%s: command unit timed out, status resetting.\n",
913 		       dev->name);
914 #if 1
915 		i596_reset(dev, lp);
916 #endif
917 	}
918 }
919 
i596_open(struct net_device * dev)920 static int i596_open(struct net_device *dev)
921 {
922 	DEB(DEB_OPEN, printk(KERN_DEBUG
923 			     "%s: i596_open() irq %d.\n", dev->name, dev->irq));
924 
925 	if (init_rx_bufs(dev)) {
926 		printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
927 		return -EAGAIN;
928 	}
929 	if (init_i596_mem(dev)) {
930 		printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
931 		goto out_remove_rx_bufs;
932 	}
933 	netif_start_queue(dev);
934 
935 	return 0;
936 
937 out_remove_rx_bufs:
938 	remove_rx_bufs(dev);
939 	return -EAGAIN;
940 }
941 
i596_tx_timeout(struct net_device * dev)942 static void i596_tx_timeout (struct net_device *dev)
943 {
944 	struct i596_private *lp = netdev_priv(dev);
945 
946 	/* Transmitter timeout, serious problems. */
947 	DEB(DEB_ERRORS, printk(KERN_DEBUG
948 			       "%s: transmit timed out, status resetting.\n",
949 			       dev->name));
950 
951 	dev->stats.tx_errors++;
952 
953 	/* Try to restart the adaptor */
954 	if (lp->last_restart == dev->stats.tx_packets) {
955 		DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
956 		/* Shutdown and restart */
957 		i596_reset (dev, lp);
958 	} else {
959 		/* Issue a channel attention signal */
960 		DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
961 		lp->dma->scb.command = SWAP16(CUC_START | RX_START);
962 		DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
963 		ca (dev);
964 		lp->last_restart = dev->stats.tx_packets;
965 	}
966 
967 	dev->trans_start = jiffies;
968 	netif_wake_queue (dev);
969 }
970 
971 
i596_start_xmit(struct sk_buff * skb,struct net_device * dev)972 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
973 {
974 	struct i596_private *lp = netdev_priv(dev);
975 	struct tx_cmd *tx_cmd;
976 	struct i596_tbd *tbd;
977 	short length = skb->len;
978 	dev->trans_start = jiffies;
979 
980 	DEB(DEB_STARTTX, printk(KERN_DEBUG
981 				"%s: i596_start_xmit(%x,%p) called\n",
982 				dev->name, skb->len, skb->data));
983 
984 	if (length < ETH_ZLEN) {
985 		if (skb_padto(skb, ETH_ZLEN))
986 			return 0;
987 		length = ETH_ZLEN;
988 	}
989 
990 	netif_stop_queue(dev);
991 
992 	tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
993 	tbd = lp->dma->tbds + lp->next_tx_cmd;
994 
995 	if (tx_cmd->cmd.command) {
996 		DEB(DEB_ERRORS, printk(KERN_DEBUG
997 				       "%s: xmit ring full, dropping packet.\n",
998 				       dev->name));
999 		dev->stats.tx_dropped++;
1000 
1001 		dev_kfree_skb(skb);
1002 	} else {
1003 		if (++lp->next_tx_cmd == TX_RING_SIZE)
1004 			lp->next_tx_cmd = 0;
1005 		tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1006 		tbd->next = I596_NULL;
1007 
1008 		tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1009 		tx_cmd->skb = skb;
1010 
1011 		tx_cmd->pad = 0;
1012 		tx_cmd->size = 0;
1013 		tbd->pad = 0;
1014 		tbd->size = SWAP16(EOF | length);
1015 
1016 		tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1017 						  skb->len, DMA_TO_DEVICE);
1018 		tbd->data = SWAP32(tx_cmd->dma_addr);
1019 
1020 		DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1021 		DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
1022 		DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
1023 		i596_add_cmd(dev, &tx_cmd->cmd);
1024 
1025 		dev->stats.tx_packets++;
1026 		dev->stats.tx_bytes += length;
1027 	}
1028 
1029 	netif_start_queue(dev);
1030 
1031 	return 0;
1032 }
1033 
print_eth(unsigned char * add,char * str)1034 static void print_eth(unsigned char *add, char *str)
1035 {
1036 	printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1037 	       add, add + 6, add, add[12], add[13], str);
1038 }
1039 
i82596_probe(struct net_device * dev)1040 static int __devinit i82596_probe(struct net_device *dev)
1041 {
1042 	int i;
1043 	struct i596_private *lp = netdev_priv(dev);
1044 	struct i596_dma *dma;
1045 
1046 	/* This lot is ensure things have been cache line aligned. */
1047 	BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1048 	BUILD_BUG_ON(sizeof(struct i596_rbd) &  31);
1049 	BUILD_BUG_ON(sizeof(struct tx_cmd)   &  31);
1050 	BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1051 #ifndef __LP64__
1052 	BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1053 #endif
1054 
1055 	if (!dev->base_addr || !dev->irq)
1056 		return -ENODEV;
1057 
1058 	dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent,
1059 		sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL);
1060 	if (!dma) {
1061 		printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1062 		return -ENOMEM;
1063 	}
1064 
1065 	/* The 82596-specific entries in the device structure. */
1066 	dev->open = i596_open;
1067 	dev->stop = i596_close;
1068 	dev->hard_start_xmit = i596_start_xmit;
1069 	dev->set_multicast_list = set_multicast_list;
1070 	dev->tx_timeout = i596_tx_timeout;
1071 	dev->watchdog_timeo = TX_TIMEOUT;
1072 #ifdef CONFIG_NET_POLL_CONTROLLER
1073 	dev->poll_controller = i596_poll_controller;
1074 #endif
1075 
1076 	memset(dma, 0, sizeof(struct i596_dma));
1077 	lp->dma = dma;
1078 
1079 	dma->scb.command = 0;
1080 	dma->scb.cmd = I596_NULL;
1081 	dma->scb.rfd = I596_NULL;
1082 	spin_lock_init(&lp->lock);
1083 
1084 	DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
1085 
1086 	i = register_netdev(dev);
1087 	if (i) {
1088 		DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
1089 				    (void *)dma, lp->dma_addr);
1090 		return i;
1091 	};
1092 
1093 	DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,",
1094 			      dev->name, dev->base_addr));
1095 	for (i = 0; i < 6; i++)
1096 		DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
1097 	DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
1098 	DEB(DEB_INIT, printk(KERN_INFO
1099 			     "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1100 			     dev->name, dma, (int)sizeof(struct i596_dma),
1101 			     &dma->scb));
1102 
1103 	return 0;
1104 }
1105 
1106 #ifdef CONFIG_NET_POLL_CONTROLLER
i596_poll_controller(struct net_device * dev)1107 static void i596_poll_controller(struct net_device *dev)
1108 {
1109 	disable_irq(dev->irq);
1110 	i596_interrupt(dev->irq, dev);
1111 	enable_irq(dev->irq);
1112 }
1113 #endif
1114 
i596_interrupt(int irq,void * dev_id)1115 static irqreturn_t i596_interrupt(int irq, void *dev_id)
1116 {
1117 	struct net_device *dev = dev_id;
1118 	struct i596_private *lp;
1119 	struct i596_dma *dma;
1120 	unsigned short status, ack_cmd = 0;
1121 
1122 	lp = netdev_priv(dev);
1123 	dma = lp->dma;
1124 
1125 	spin_lock (&lp->lock);
1126 
1127 	wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1128 	status = SWAP16(dma->scb.status);
1129 
1130 	DEB(DEB_INTS, printk(KERN_DEBUG
1131 			     "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1132 			dev->name, dev->irq, status));
1133 
1134 	ack_cmd = status & 0xf000;
1135 
1136 	if (!ack_cmd) {
1137 		DEB(DEB_ERRORS, printk(KERN_DEBUG
1138 				       "%s: interrupt with no events\n",
1139 				       dev->name));
1140 		spin_unlock (&lp->lock);
1141 		return IRQ_NONE;
1142 	}
1143 
1144 	if ((status & 0x8000) || (status & 0x2000)) {
1145 		struct i596_cmd *ptr;
1146 
1147 		if ((status & 0x8000))
1148 			DEB(DEB_INTS,
1149 			    printk(KERN_DEBUG
1150 				   "%s: i596 interrupt completed command.\n",
1151 				   dev->name));
1152 		if ((status & 0x2000))
1153 			DEB(DEB_INTS,
1154 			    printk(KERN_DEBUG
1155 				   "%s: i596 interrupt command unit inactive %x.\n",
1156 				   dev->name, status & 0x0700));
1157 
1158 		while (lp->cmd_head != NULL) {
1159 			DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
1160 			if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1161 				break;
1162 
1163 			ptr = lp->cmd_head;
1164 
1165 			DEB(DEB_STATUS,
1166 			    printk(KERN_DEBUG
1167 				   "cmd_head->status = %04x, ->command = %04x\n",
1168 				   SWAP16(lp->cmd_head->status),
1169 				   SWAP16(lp->cmd_head->command)));
1170 			lp->cmd_head = ptr->v_next;
1171 			lp->cmd_backlog--;
1172 
1173 			switch (SWAP16(ptr->command) & 0x7) {
1174 			case CmdTx:
1175 			    {
1176 				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1177 				struct sk_buff *skb = tx_cmd->skb;
1178 
1179 				if (ptr->status & SWAP16(STAT_OK)) {
1180 					DEB(DEB_TXADDR,
1181 					    print_eth(skb->data, "tx-done"));
1182 				} else {
1183 					dev->stats.tx_errors++;
1184 					if (ptr->status & SWAP16(0x0020))
1185 						dev->stats.collisions++;
1186 					if (!(ptr->status & SWAP16(0x0040)))
1187 						dev->stats.tx_heartbeat_errors++;
1188 					if (ptr->status & SWAP16(0x0400))
1189 						dev->stats.tx_carrier_errors++;
1190 					if (ptr->status & SWAP16(0x0800))
1191 						dev->stats.collisions++;
1192 					if (ptr->status & SWAP16(0x1000))
1193 						dev->stats.tx_aborted_errors++;
1194 				}
1195 				dma_unmap_single(dev->dev.parent,
1196 						 tx_cmd->dma_addr,
1197 						 skb->len, DMA_TO_DEVICE);
1198 				dev_kfree_skb_irq(skb);
1199 
1200 				tx_cmd->cmd.command = 0; /* Mark free */
1201 				break;
1202 			    }
1203 			case CmdTDR:
1204 			    {
1205 				unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1206 
1207 				if (status & 0x8000) {
1208 					DEB(DEB_ANY,
1209 					    printk(KERN_DEBUG "%s: link ok.\n",
1210 						   dev->name));
1211 				} else {
1212 					if (status & 0x4000)
1213 						printk(KERN_ERR
1214 						       "%s: Transceiver problem.\n",
1215 						       dev->name);
1216 					if (status & 0x2000)
1217 						printk(KERN_ERR
1218 						       "%s: Termination problem.\n",
1219 						       dev->name);
1220 					if (status & 0x1000)
1221 						printk(KERN_ERR
1222 						       "%s: Short circuit.\n",
1223 						       dev->name);
1224 
1225 					DEB(DEB_TDR,
1226 					    printk(KERN_DEBUG "%s: Time %d.\n",
1227 						   dev->name, status & 0x07ff));
1228 				}
1229 				break;
1230 			    }
1231 			case CmdConfigure:
1232 				/*
1233 				 * Zap command so set_multicast_list() know
1234 				 * it is free
1235 				 */
1236 				ptr->command = 0;
1237 				break;
1238 			}
1239 			ptr->v_next = NULL;
1240 			ptr->b_next = I596_NULL;
1241 			DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
1242 			lp->last_cmd = jiffies;
1243 		}
1244 
1245 		/* This mess is arranging that only the last of any outstanding
1246 		 * commands has the interrupt bit set.  Should probably really
1247 		 * only add to the cmd queue when the CU is stopped.
1248 		 */
1249 		ptr = lp->cmd_head;
1250 		while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1251 			struct i596_cmd *prev = ptr;
1252 
1253 			ptr->command &= SWAP16(0x1fff);
1254 			ptr = ptr->v_next;
1255 			DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
1256 		}
1257 
1258 		if (lp->cmd_head != NULL)
1259 			ack_cmd |= CUC_START;
1260 		dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1261 		DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
1262 	}
1263 	if ((status & 0x1000) || (status & 0x4000)) {
1264 		if ((status & 0x4000))
1265 			DEB(DEB_INTS,
1266 			    printk(KERN_DEBUG
1267 				   "%s: i596 interrupt received a frame.\n",
1268 				   dev->name));
1269 		i596_rx(dev);
1270 		/* Only RX_START if stopped - RGH 07-07-96 */
1271 		if (status & 0x1000) {
1272 			if (netif_running(dev)) {
1273 				DEB(DEB_ERRORS,
1274 				    printk(KERN_DEBUG
1275 					   "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1276 					   dev->name, status));
1277 				ack_cmd |= RX_START;
1278 				dev->stats.rx_errors++;
1279 				dev->stats.rx_fifo_errors++;
1280 				rebuild_rx_bufs(dev);
1281 			}
1282 		}
1283 	}
1284 	wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1285 	dma->scb.command = SWAP16(ack_cmd);
1286 	DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
1287 
1288 	/* DANGER: I suspect that some kind of interrupt
1289 	 acknowledgement aside from acking the 82596 might be needed
1290 	 here...  but it's running acceptably without */
1291 
1292 	ca(dev);
1293 
1294 	wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1295 	DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1296 
1297 	spin_unlock (&lp->lock);
1298 	return IRQ_HANDLED;
1299 }
1300 
i596_close(struct net_device * dev)1301 static int i596_close(struct net_device *dev)
1302 {
1303 	struct i596_private *lp = netdev_priv(dev);
1304 	unsigned long flags;
1305 
1306 	netif_stop_queue(dev);
1307 
1308 	DEB(DEB_INIT,
1309 	    printk(KERN_DEBUG
1310 		   "%s: Shutting down ethercard, status was %4.4x.\n",
1311 		   dev->name, SWAP16(lp->dma->scb.status)));
1312 
1313 	spin_lock_irqsave(&lp->lock, flags);
1314 
1315 	wait_cmd(dev, lp->dma, 100, "close1 timed out");
1316 	lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1317 	DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
1318 
1319 	ca(dev);
1320 
1321 	wait_cmd(dev, lp->dma, 100, "close2 timed out");
1322 	spin_unlock_irqrestore(&lp->lock, flags);
1323 	DEB(DEB_STRUCT, i596_display_data(dev));
1324 	i596_cleanup_cmd(dev, lp);
1325 
1326 	free_irq(dev->irq, dev);
1327 	remove_rx_bufs(dev);
1328 
1329 	return 0;
1330 }
1331 
1332 /*
1333  *    Set or clear the multicast filter for this adaptor.
1334  */
1335 
set_multicast_list(struct net_device * dev)1336 static void set_multicast_list(struct net_device *dev)
1337 {
1338 	struct i596_private *lp = netdev_priv(dev);
1339 	struct i596_dma *dma = lp->dma;
1340 	int config = 0, cnt;
1341 
1342 	DEB(DEB_MULTI,
1343 	    printk(KERN_DEBUG
1344 		   "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1345 		   dev->name, dev->mc_count,
1346 		   dev->flags & IFF_PROMISC ? "ON" : "OFF",
1347 		   dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1348 
1349 	if ((dev->flags & IFF_PROMISC) &&
1350 	    !(dma->cf_cmd.i596_config[8] & 0x01)) {
1351 		dma->cf_cmd.i596_config[8] |= 0x01;
1352 		config = 1;
1353 	}
1354 	if (!(dev->flags & IFF_PROMISC) &&
1355 	    (dma->cf_cmd.i596_config[8] & 0x01)) {
1356 		dma->cf_cmd.i596_config[8] &= ~0x01;
1357 		config = 1;
1358 	}
1359 	if ((dev->flags & IFF_ALLMULTI) &&
1360 	    (dma->cf_cmd.i596_config[11] & 0x20)) {
1361 		dma->cf_cmd.i596_config[11] &= ~0x20;
1362 		config = 1;
1363 	}
1364 	if (!(dev->flags & IFF_ALLMULTI) &&
1365 	    !(dma->cf_cmd.i596_config[11] & 0x20)) {
1366 		dma->cf_cmd.i596_config[11] |= 0x20;
1367 		config = 1;
1368 	}
1369 	if (config) {
1370 		if (dma->cf_cmd.cmd.command)
1371 			printk(KERN_INFO
1372 			       "%s: config change request already queued\n",
1373 			       dev->name);
1374 		else {
1375 			dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1376 			DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1377 			i596_add_cmd(dev, &dma->cf_cmd.cmd);
1378 		}
1379 	}
1380 
1381 	cnt = dev->mc_count;
1382 	if (cnt > MAX_MC_CNT) {
1383 		cnt = MAX_MC_CNT;
1384 		printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1385 			dev->name, cnt);
1386 	}
1387 
1388 	if (dev->mc_count > 0) {
1389 		struct dev_mc_list *dmi;
1390 		unsigned char *cp;
1391 		struct mc_cmd *cmd;
1392 
1393 		cmd = &dma->mc_cmd;
1394 		cmd->cmd.command = SWAP16(CmdMulticastList);
1395 		cmd->mc_cnt = SWAP16(dev->mc_count * 6);
1396 		cp = cmd->mc_addrs;
1397 		for (dmi = dev->mc_list;
1398 		     cnt && dmi != NULL;
1399 		     dmi = dmi->next, cnt--, cp += 6) {
1400 			memcpy(cp, dmi->dmi_addr, 6);
1401 			if (i596_debug > 1)
1402 				DEB(DEB_MULTI,
1403 				    printk(KERN_DEBUG
1404 					   "%s: Adding address %pM\n",
1405 					   dev->name, cp));
1406 		}
1407 		DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1408 		i596_add_cmd(dev, &cmd->cmd);
1409 	}
1410 }
1411