• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2 /* PLIP: A parallel port "network" driver for Linux. */
3 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
4 /*
5  * Authors:	Donald Becker <becker@scyld.com>
6  *		Tommy Thorn <thorn@daimi.aau.dk>
7  *		Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8  *		Alan Cox <gw4pts@gw4pts.ampr.org>
9  *		Peter Bauer <100136.3530@compuserve.com>
10  *		Niibe Yutaka <gniibe@mri.co.jp>
11  *		Nimrod Zimerman <zimerman@mailandnews.com>
12  *
13  * Enhancements:
14  *		Modularization and ifreq/ifmap support by Alan Cox.
15  *		Rewritten by Niibe Yutaka.
16  *		parport-sharing awareness code by Philip Blundell.
17  *		SMP locking by Niibe Yutaka.
18  *		Support for parallel ports with no IRQ (poll mode),
19  *		Modifications to use the parallel port API
20  *		by Nimrod Zimerman.
21  *
22  * Fixes:
23  *		Niibe Yutaka
24  *		  - Module initialization.
25  *		  - MTU fix.
26  *		  - Make sure other end is OK, before sending a packet.
27  *		  - Fix immediate timer problem.
28  *
29  *		Al Viro
30  *		  - Changed {enable,disable}_irq handling to make it work
31  *		    with new ("stack") semantics.
32  *
33  *		This program is free software; you can redistribute it and/or
34  *		modify it under the terms of the GNU General Public License
35  *		as published by the Free Software Foundation; either version
36  *		2 of the License, or (at your option) any later version.
37  */
38 
39 /*
40  * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
41  * inspired by Russ Nelson's parallel port packet driver.
42  *
43  * NOTE:
44  *     Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45  *     Because of the necessity to communicate to DOS machines with the
46  *     Crynwr packet driver, Peter Bauer changed the protocol again
47  *     back to original protocol.
48  *
49  *     This version follows original PLIP protocol.
50  *     So, this PLIP can't communicate the PLIP of Linux v1.0.
51  */
52 
53 /*
54  *     To use with DOS box, please do (Turn on ARP switch):
55  *	# ifconfig plip[0-2] arp
56  */
57 static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
58 
59 /*
60   Sources:
61 	Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62 	"parallel.asm" parallel port packet driver.
63 
64   The "Crynwr" parallel port standard specifies the following protocol:
65     Trigger by sending nibble '0x8' (this causes interrupt on other end)
66     count-low octet
67     count-high octet
68     ... data octets
69     checksum octet
70   Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71 			<wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
72 
73   The packet is encapsulated as if it were ethernet.
74 
75   The cable used is a de facto standard parallel null cable -- sold as
76   a "LapLink" cable by various places.  You'll need a 12-conductor cable to
77   make one yourself.  The wiring is:
78     SLCTIN	17 - 17
79     GROUND	25 - 25
80     D0->ERROR	2 - 15		15 - 2
81     D1->SLCT	3 - 13		13 - 3
82     D2->PAPOUT	4 - 12		12 - 4
83     D3->ACK	5 - 10		10 - 5
84     D4->BUSY	6 - 11		11 - 6
85   Do not connect the other pins.  They are
86     D5,D6,D7 are 7,8,9
87     STROBE is 1, FEED is 14, INIT is 16
88     extra grounds are 18,19,20,21,22,23,24
89 */
90 
91 #include <linux/module.h>
92 #include <linux/kernel.h>
93 #include <linux/types.h>
94 #include <linux/fcntl.h>
95 #include <linux/interrupt.h>
96 #include <linux/string.h>
97 #include <linux/if_ether.h>
98 #include <linux/in.h>
99 #include <linux/errno.h>
100 #include <linux/delay.h>
101 #include <linux/init.h>
102 #include <linux/netdevice.h>
103 #include <linux/etherdevice.h>
104 #include <linux/inetdevice.h>
105 #include <linux/skbuff.h>
106 #include <linux/if_plip.h>
107 #include <linux/workqueue.h>
108 #include <linux/spinlock.h>
109 #include <linux/completion.h>
110 #include <linux/parport.h>
111 #include <linux/bitops.h>
112 
113 #include <net/neighbour.h>
114 
115 #include <asm/system.h>
116 #include <asm/irq.h>
117 #include <asm/byteorder.h>
118 
119 /* Maximum number of devices to support. */
120 #define PLIP_MAX  8
121 
122 /* Use 0 for production, 1 for verification, >2 for debug */
123 #ifndef NET_DEBUG
124 #define NET_DEBUG 1
125 #endif
126 static const unsigned int net_debug = NET_DEBUG;
127 
128 #define ENABLE(irq)  if (irq != -1) enable_irq(irq)
129 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
130 
131 /* In micro second */
132 #define PLIP_DELAY_UNIT		   1
133 
134 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
135 #define PLIP_TRIGGER_WAIT	 500
136 
137 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
138 #define PLIP_NIBBLE_WAIT        3000
139 
140 /* Bottom halves */
141 static void plip_kick_bh(struct work_struct *work);
142 static void plip_bh(struct work_struct *work);
143 static void plip_timer_bh(struct work_struct *work);
144 
145 /* Interrupt handler */
146 static void plip_interrupt(void *dev_id);
147 
148 /* Functions for DEV methods */
149 static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
150 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
151                             unsigned short type, const void *daddr,
152 			    const void *saddr, unsigned len);
153 static int plip_hard_header_cache(const struct neighbour *neigh,
154                                   struct hh_cache *hh);
155 static int plip_open(struct net_device *dev);
156 static int plip_close(struct net_device *dev);
157 static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
158 static int plip_preempt(void *handle);
159 static void plip_wakeup(void *handle);
160 
161 enum plip_connection_state {
162 	PLIP_CN_NONE=0,
163 	PLIP_CN_RECEIVE,
164 	PLIP_CN_SEND,
165 	PLIP_CN_CLOSING,
166 	PLIP_CN_ERROR
167 };
168 
169 enum plip_packet_state {
170 	PLIP_PK_DONE=0,
171 	PLIP_PK_TRIGGER,
172 	PLIP_PK_LENGTH_LSB,
173 	PLIP_PK_LENGTH_MSB,
174 	PLIP_PK_DATA,
175 	PLIP_PK_CHECKSUM
176 };
177 
178 enum plip_nibble_state {
179 	PLIP_NB_BEGIN,
180 	PLIP_NB_1,
181 	PLIP_NB_2,
182 };
183 
184 struct plip_local {
185 	enum plip_packet_state state;
186 	enum plip_nibble_state nibble;
187 	union {
188 		struct {
189 #if defined(__LITTLE_ENDIAN)
190 			unsigned char lsb;
191 			unsigned char msb;
192 #elif defined(__BIG_ENDIAN)
193 			unsigned char msb;
194 			unsigned char lsb;
195 #else
196 #error	"Please fix the endianness defines in <asm/byteorder.h>"
197 #endif
198 		} b;
199 		unsigned short h;
200 	} length;
201 	unsigned short byte;
202 	unsigned char  checksum;
203 	unsigned char  data;
204 	struct sk_buff *skb;
205 };
206 
207 struct net_local {
208 	struct net_device *dev;
209 	struct work_struct immediate;
210 	struct delayed_work deferred;
211 	struct delayed_work timer;
212 	struct plip_local snd_data;
213 	struct plip_local rcv_data;
214 	struct pardevice *pardev;
215 	unsigned long  trigger;
216 	unsigned long  nibble;
217 	enum plip_connection_state connection;
218 	unsigned short timeout_count;
219 	int is_deferred;
220 	int port_owner;
221 	int should_relinquish;
222 	spinlock_t lock;
223 	atomic_t kill_timer;
224 	struct completion killed_timer_cmp;
225 };
226 
enable_parport_interrupts(struct net_device * dev)227 static inline void enable_parport_interrupts (struct net_device *dev)
228 {
229 	if (dev->irq != -1)
230 	{
231 		struct parport *port =
232 		   ((struct net_local *)netdev_priv(dev))->pardev->port;
233 		port->ops->enable_irq (port);
234 	}
235 }
236 
disable_parport_interrupts(struct net_device * dev)237 static inline void disable_parport_interrupts (struct net_device *dev)
238 {
239 	if (dev->irq != -1)
240 	{
241 		struct parport *port =
242 		   ((struct net_local *)netdev_priv(dev))->pardev->port;
243 		port->ops->disable_irq (port);
244 	}
245 }
246 
write_data(struct net_device * dev,unsigned char data)247 static inline void write_data (struct net_device *dev, unsigned char data)
248 {
249 	struct parport *port =
250 	   ((struct net_local *)netdev_priv(dev))->pardev->port;
251 
252 	port->ops->write_data (port, data);
253 }
254 
read_status(struct net_device * dev)255 static inline unsigned char read_status (struct net_device *dev)
256 {
257 	struct parport *port =
258 	   ((struct net_local *)netdev_priv(dev))->pardev->port;
259 
260 	return port->ops->read_status (port);
261 }
262 
263 static const struct header_ops plip_header_ops = {
264 	.create	= plip_hard_header,
265 	.cache  = plip_hard_header_cache,
266 };
267 
268 static const struct net_device_ops plip_netdev_ops = {
269 	.ndo_open		 = plip_open,
270 	.ndo_stop		 = plip_close,
271 	.ndo_start_xmit		 = plip_tx_packet,
272 	.ndo_do_ioctl		 = plip_ioctl,
273 };
274 
275 /* Entry point of PLIP driver.
276    Probe the hardware, and register/initialize the driver.
277 
278    PLIP is rather weird, because of the way it interacts with the parport
279    system.  It is _not_ initialised from Space.c.  Instead, plip_init()
280    is called, and that function makes up a "struct net_device" for each port, and
281    then calls us here.
282 
283    */
284 static void
plip_init_netdev(struct net_device * dev)285 plip_init_netdev(struct net_device *dev)
286 {
287 	struct net_local *nl = netdev_priv(dev);
288 
289 	/* Then, override parts of it */
290 	dev->tx_queue_len 	 = 10;
291 	dev->flags	         = IFF_POINTOPOINT|IFF_NOARP;
292 	memset(dev->dev_addr, 0xfc, ETH_ALEN);
293 
294 	dev->netdev_ops		 = &plip_netdev_ops;
295 	dev->header_ops          = &plip_header_ops;
296 
297 
298 	nl->port_owner = 0;
299 
300 	/* Initialize constants */
301 	nl->trigger	= PLIP_TRIGGER_WAIT;
302 	nl->nibble	= PLIP_NIBBLE_WAIT;
303 
304 	/* Initialize task queue structures */
305 	INIT_WORK(&nl->immediate, plip_bh);
306 	INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
307 
308 	if (dev->irq == -1)
309 		INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
310 
311 	spin_lock_init(&nl->lock);
312 }
313 
314 /* Bottom half handler for the delayed request.
315    This routine is kicked by do_timer().
316    Request `plip_bh' to be invoked. */
317 static void
plip_kick_bh(struct work_struct * work)318 plip_kick_bh(struct work_struct *work)
319 {
320 	struct net_local *nl =
321 		container_of(work, struct net_local, deferred.work);
322 
323 	if (nl->is_deferred)
324 		schedule_work(&nl->immediate);
325 }
326 
327 /* Forward declarations of internal routines */
328 static int plip_none(struct net_device *, struct net_local *,
329 		     struct plip_local *, struct plip_local *);
330 static int plip_receive_packet(struct net_device *, struct net_local *,
331 			       struct plip_local *, struct plip_local *);
332 static int plip_send_packet(struct net_device *, struct net_local *,
333 			    struct plip_local *, struct plip_local *);
334 static int plip_connection_close(struct net_device *, struct net_local *,
335 				 struct plip_local *, struct plip_local *);
336 static int plip_error(struct net_device *, struct net_local *,
337 		      struct plip_local *, struct plip_local *);
338 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
339 				 struct plip_local *snd,
340 				 struct plip_local *rcv,
341 				 int error);
342 
343 #define OK        0
344 #define TIMEOUT   1
345 #define ERROR     2
346 #define HS_TIMEOUT	3
347 
348 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
349 			 struct plip_local *snd, struct plip_local *rcv);
350 
351 static const plip_func connection_state_table[] =
352 {
353 	plip_none,
354 	plip_receive_packet,
355 	plip_send_packet,
356 	plip_connection_close,
357 	plip_error
358 };
359 
360 /* Bottom half handler of PLIP. */
361 static void
plip_bh(struct work_struct * work)362 plip_bh(struct work_struct *work)
363 {
364 	struct net_local *nl = container_of(work, struct net_local, immediate);
365 	struct plip_local *snd = &nl->snd_data;
366 	struct plip_local *rcv = &nl->rcv_data;
367 	plip_func f;
368 	int r;
369 
370 	nl->is_deferred = 0;
371 	f = connection_state_table[nl->connection];
372 	if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
373 	    && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
374 		nl->is_deferred = 1;
375 		schedule_delayed_work(&nl->deferred, 1);
376 	}
377 }
378 
379 static void
plip_timer_bh(struct work_struct * work)380 plip_timer_bh(struct work_struct *work)
381 {
382 	struct net_local *nl =
383 		container_of(work, struct net_local, timer.work);
384 
385 	if (!(atomic_read (&nl->kill_timer))) {
386 		plip_interrupt (nl->dev);
387 
388 		schedule_delayed_work(&nl->timer, 1);
389 	}
390 	else {
391 		complete(&nl->killed_timer_cmp);
392 	}
393 }
394 
395 static int
plip_bh_timeout_error(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv,int error)396 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
397 		      struct plip_local *snd, struct plip_local *rcv,
398 		      int error)
399 {
400 	unsigned char c0;
401 	/*
402 	 * This is tricky. If we got here from the beginning of send (either
403 	 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
404 	 * already disabled. With the old variant of {enable,disable}_irq()
405 	 * extra disable_irq() was a no-op. Now it became mortal - it's
406 	 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
407 	 * that is). So we have to treat HS_TIMEOUT and ERROR from send
408 	 * in a special way.
409 	 */
410 
411 	spin_lock_irq(&nl->lock);
412 	if (nl->connection == PLIP_CN_SEND) {
413 
414 		if (error != ERROR) { /* Timeout */
415 			nl->timeout_count++;
416 			if ((error == HS_TIMEOUT
417 			     && nl->timeout_count <= 10)
418 			    || nl->timeout_count <= 3) {
419 				spin_unlock_irq(&nl->lock);
420 				/* Try again later */
421 				return TIMEOUT;
422 			}
423 			c0 = read_status(dev);
424 			printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
425 			       dev->name, snd->state, c0);
426 		} else
427 			error = HS_TIMEOUT;
428 		dev->stats.tx_errors++;
429 		dev->stats.tx_aborted_errors++;
430 	} else if (nl->connection == PLIP_CN_RECEIVE) {
431 		if (rcv->state == PLIP_PK_TRIGGER) {
432 			/* Transmission was interrupted. */
433 			spin_unlock_irq(&nl->lock);
434 			return OK;
435 		}
436 		if (error != ERROR) { /* Timeout */
437 			if (++nl->timeout_count <= 3) {
438 				spin_unlock_irq(&nl->lock);
439 				/* Try again later */
440 				return TIMEOUT;
441 			}
442 			c0 = read_status(dev);
443 			printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
444 			       dev->name, rcv->state, c0);
445 		}
446 		dev->stats.rx_dropped++;
447 	}
448 	rcv->state = PLIP_PK_DONE;
449 	if (rcv->skb) {
450 		kfree_skb(rcv->skb);
451 		rcv->skb = NULL;
452 	}
453 	snd->state = PLIP_PK_DONE;
454 	if (snd->skb) {
455 		dev_kfree_skb(snd->skb);
456 		snd->skb = NULL;
457 	}
458 	spin_unlock_irq(&nl->lock);
459 	if (error == HS_TIMEOUT) {
460 		DISABLE(dev->irq);
461 		synchronize_irq(dev->irq);
462 	}
463 	disable_parport_interrupts (dev);
464 	netif_stop_queue (dev);
465 	nl->connection = PLIP_CN_ERROR;
466 	write_data (dev, 0x00);
467 
468 	return TIMEOUT;
469 }
470 
471 static int
plip_none(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)472 plip_none(struct net_device *dev, struct net_local *nl,
473 	  struct plip_local *snd, struct plip_local *rcv)
474 {
475 	return OK;
476 }
477 
478 /* PLIP_RECEIVE --- receive a byte(two nibbles)
479    Returns OK on success, TIMEOUT on timeout */
480 static inline int
plip_receive(unsigned short nibble_timeout,struct net_device * dev,enum plip_nibble_state * ns_p,unsigned char * data_p)481 plip_receive(unsigned short nibble_timeout, struct net_device *dev,
482 	     enum plip_nibble_state *ns_p, unsigned char *data_p)
483 {
484 	unsigned char c0, c1;
485 	unsigned int cx;
486 
487 	switch (*ns_p) {
488 	case PLIP_NB_BEGIN:
489 		cx = nibble_timeout;
490 		while (1) {
491 			c0 = read_status(dev);
492 			udelay(PLIP_DELAY_UNIT);
493 			if ((c0 & 0x80) == 0) {
494 				c1 = read_status(dev);
495 				if (c0 == c1)
496 					break;
497 			}
498 			if (--cx == 0)
499 				return TIMEOUT;
500 		}
501 		*data_p = (c0 >> 3) & 0x0f;
502 		write_data (dev, 0x10); /* send ACK */
503 		*ns_p = PLIP_NB_1;
504 
505 	case PLIP_NB_1:
506 		cx = nibble_timeout;
507 		while (1) {
508 			c0 = read_status(dev);
509 			udelay(PLIP_DELAY_UNIT);
510 			if (c0 & 0x80) {
511 				c1 = read_status(dev);
512 				if (c0 == c1)
513 					break;
514 			}
515 			if (--cx == 0)
516 				return TIMEOUT;
517 		}
518 		*data_p |= (c0 << 1) & 0xf0;
519 		write_data (dev, 0x00); /* send ACK */
520 		*ns_p = PLIP_NB_BEGIN;
521 	case PLIP_NB_2:
522 		break;
523 	}
524 	return OK;
525 }
526 
527 /*
528  *	Determine the packet's protocol ID. The rule here is that we
529  *	assume 802.3 if the type field is short enough to be a length.
530  *	This is normal practice and works for any 'now in use' protocol.
531  *
532  *	PLIP is ethernet ish but the daddr might not be valid if unicast.
533  *	PLIP fortunately has no bus architecture (its Point-to-point).
534  *
535  *	We can't fix the daddr thing as that quirk (more bug) is embedded
536  *	in far too many old systems not all even running Linux.
537  */
538 
plip_type_trans(struct sk_buff * skb,struct net_device * dev)539 static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
540 {
541 	struct ethhdr *eth;
542 	unsigned char *rawp;
543 
544 	skb_reset_mac_header(skb);
545 	skb_pull(skb,dev->hard_header_len);
546 	eth = eth_hdr(skb);
547 
548 	if(*eth->h_dest&1)
549 	{
550 		if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
551 			skb->pkt_type=PACKET_BROADCAST;
552 		else
553 			skb->pkt_type=PACKET_MULTICAST;
554 	}
555 
556 	/*
557 	 *	This ALLMULTI check should be redundant by 1.4
558 	 *	so don't forget to remove it.
559 	 */
560 
561 	if (ntohs(eth->h_proto) >= 1536)
562 		return eth->h_proto;
563 
564 	rawp = skb->data;
565 
566 	/*
567 	 *	This is a magic hack to spot IPX packets. Older Novell breaks
568 	 *	the protocol design and runs IPX over 802.3 without an 802.2 LLC
569 	 *	layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
570 	 *	won't work for fault tolerant netware but does for the rest.
571 	 */
572 	if (*(unsigned short *)rawp == 0xFFFF)
573 		return htons(ETH_P_802_3);
574 
575 	/*
576 	 *	Real 802.2 LLC
577 	 */
578 	return htons(ETH_P_802_2);
579 }
580 
581 /* PLIP_RECEIVE_PACKET --- receive a packet */
582 static int
plip_receive_packet(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)583 plip_receive_packet(struct net_device *dev, struct net_local *nl,
584 		    struct plip_local *snd, struct plip_local *rcv)
585 {
586 	unsigned short nibble_timeout = nl->nibble;
587 	unsigned char *lbuf;
588 
589 	switch (rcv->state) {
590 	case PLIP_PK_TRIGGER:
591 		DISABLE(dev->irq);
592 		/* Don't need to synchronize irq, as we can safely ignore it */
593 		disable_parport_interrupts (dev);
594 		write_data (dev, 0x01); /* send ACK */
595 		if (net_debug > 2)
596 			printk(KERN_DEBUG "%s: receive start\n", dev->name);
597 		rcv->state = PLIP_PK_LENGTH_LSB;
598 		rcv->nibble = PLIP_NB_BEGIN;
599 
600 	case PLIP_PK_LENGTH_LSB:
601 		if (snd->state != PLIP_PK_DONE) {
602 			if (plip_receive(nl->trigger, dev,
603 					 &rcv->nibble, &rcv->length.b.lsb)) {
604 				/* collision, here dev->tbusy == 1 */
605 				rcv->state = PLIP_PK_DONE;
606 				nl->is_deferred = 1;
607 				nl->connection = PLIP_CN_SEND;
608 				schedule_delayed_work(&nl->deferred, 1);
609 				enable_parport_interrupts (dev);
610 				ENABLE(dev->irq);
611 				return OK;
612 			}
613 		} else {
614 			if (plip_receive(nibble_timeout, dev,
615 					 &rcv->nibble, &rcv->length.b.lsb))
616 				return TIMEOUT;
617 		}
618 		rcv->state = PLIP_PK_LENGTH_MSB;
619 
620 	case PLIP_PK_LENGTH_MSB:
621 		if (plip_receive(nibble_timeout, dev,
622 				 &rcv->nibble, &rcv->length.b.msb))
623 			return TIMEOUT;
624 		if (rcv->length.h > dev->mtu + dev->hard_header_len
625 		    || rcv->length.h < 8) {
626 			printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
627 			return ERROR;
628 		}
629 		/* Malloc up new buffer. */
630 		rcv->skb = dev_alloc_skb(rcv->length.h + 2);
631 		if (rcv->skb == NULL) {
632 			printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
633 			return ERROR;
634 		}
635 		skb_reserve(rcv->skb, 2);	/* Align IP on 16 byte boundaries */
636 		skb_put(rcv->skb,rcv->length.h);
637 		rcv->skb->dev = dev;
638 		rcv->state = PLIP_PK_DATA;
639 		rcv->byte = 0;
640 		rcv->checksum = 0;
641 
642 	case PLIP_PK_DATA:
643 		lbuf = rcv->skb->data;
644 		do {
645 			if (plip_receive(nibble_timeout, dev,
646 					 &rcv->nibble, &lbuf[rcv->byte]))
647 				return TIMEOUT;
648 		} while (++rcv->byte < rcv->length.h);
649 		do {
650 			rcv->checksum += lbuf[--rcv->byte];
651 		} while (rcv->byte);
652 		rcv->state = PLIP_PK_CHECKSUM;
653 
654 	case PLIP_PK_CHECKSUM:
655 		if (plip_receive(nibble_timeout, dev,
656 				 &rcv->nibble, &rcv->data))
657 			return TIMEOUT;
658 		if (rcv->data != rcv->checksum) {
659 			dev->stats.rx_crc_errors++;
660 			if (net_debug)
661 				printk(KERN_DEBUG "%s: checksum error\n", dev->name);
662 			return ERROR;
663 		}
664 		rcv->state = PLIP_PK_DONE;
665 
666 	case PLIP_PK_DONE:
667 		/* Inform the upper layer for the arrival of a packet. */
668 		rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
669 		netif_rx_ni(rcv->skb);
670 		dev->stats.rx_bytes += rcv->length.h;
671 		dev->stats.rx_packets++;
672 		rcv->skb = NULL;
673 		if (net_debug > 2)
674 			printk(KERN_DEBUG "%s: receive end\n", dev->name);
675 
676 		/* Close the connection. */
677 		write_data (dev, 0x00);
678 		spin_lock_irq(&nl->lock);
679 		if (snd->state != PLIP_PK_DONE) {
680 			nl->connection = PLIP_CN_SEND;
681 			spin_unlock_irq(&nl->lock);
682 			schedule_work(&nl->immediate);
683 			enable_parport_interrupts (dev);
684 			ENABLE(dev->irq);
685 			return OK;
686 		} else {
687 			nl->connection = PLIP_CN_NONE;
688 			spin_unlock_irq(&nl->lock);
689 			enable_parport_interrupts (dev);
690 			ENABLE(dev->irq);
691 			return OK;
692 		}
693 	}
694 	return OK;
695 }
696 
697 /* PLIP_SEND --- send a byte (two nibbles)
698    Returns OK on success, TIMEOUT when timeout    */
699 static inline int
plip_send(unsigned short nibble_timeout,struct net_device * dev,enum plip_nibble_state * ns_p,unsigned char data)700 plip_send(unsigned short nibble_timeout, struct net_device *dev,
701 	  enum plip_nibble_state *ns_p, unsigned char data)
702 {
703 	unsigned char c0;
704 	unsigned int cx;
705 
706 	switch (*ns_p) {
707 	case PLIP_NB_BEGIN:
708 		write_data (dev, data & 0x0f);
709 		*ns_p = PLIP_NB_1;
710 
711 	case PLIP_NB_1:
712 		write_data (dev, 0x10 | (data & 0x0f));
713 		cx = nibble_timeout;
714 		while (1) {
715 			c0 = read_status(dev);
716 			if ((c0 & 0x80) == 0)
717 				break;
718 			if (--cx == 0)
719 				return TIMEOUT;
720 			udelay(PLIP_DELAY_UNIT);
721 		}
722 		write_data (dev, 0x10 | (data >> 4));
723 		*ns_p = PLIP_NB_2;
724 
725 	case PLIP_NB_2:
726 		write_data (dev, (data >> 4));
727 		cx = nibble_timeout;
728 		while (1) {
729 			c0 = read_status(dev);
730 			if (c0 & 0x80)
731 				break;
732 			if (--cx == 0)
733 				return TIMEOUT;
734 			udelay(PLIP_DELAY_UNIT);
735 		}
736 		*ns_p = PLIP_NB_BEGIN;
737 		return OK;
738 	}
739 	return OK;
740 }
741 
742 /* PLIP_SEND_PACKET --- send a packet */
743 static int
plip_send_packet(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)744 plip_send_packet(struct net_device *dev, struct net_local *nl,
745 		 struct plip_local *snd, struct plip_local *rcv)
746 {
747 	unsigned short nibble_timeout = nl->nibble;
748 	unsigned char *lbuf;
749 	unsigned char c0;
750 	unsigned int cx;
751 
752 	if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
753 		printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
754 		snd->state = PLIP_PK_DONE;
755 		snd->skb = NULL;
756 		return ERROR;
757 	}
758 
759 	switch (snd->state) {
760 	case PLIP_PK_TRIGGER:
761 		if ((read_status(dev) & 0xf8) != 0x80)
762 			return HS_TIMEOUT;
763 
764 		/* Trigger remote rx interrupt. */
765 		write_data (dev, 0x08);
766 		cx = nl->trigger;
767 		while (1) {
768 			udelay(PLIP_DELAY_UNIT);
769 			spin_lock_irq(&nl->lock);
770 			if (nl->connection == PLIP_CN_RECEIVE) {
771 				spin_unlock_irq(&nl->lock);
772 				/* Interrupted. */
773 				dev->stats.collisions++;
774 				return OK;
775 			}
776 			c0 = read_status(dev);
777 			if (c0 & 0x08) {
778 				spin_unlock_irq(&nl->lock);
779 				DISABLE(dev->irq);
780 				synchronize_irq(dev->irq);
781 				if (nl->connection == PLIP_CN_RECEIVE) {
782 					/* Interrupted.
783 					   We don't need to enable irq,
784 					   as it is soon disabled.    */
785 					/* Yes, we do. New variant of
786 					   {enable,disable}_irq *counts*
787 					   them.  -- AV  */
788 					ENABLE(dev->irq);
789 					dev->stats.collisions++;
790 					return OK;
791 				}
792 				disable_parport_interrupts (dev);
793 				if (net_debug > 2)
794 					printk(KERN_DEBUG "%s: send start\n", dev->name);
795 				snd->state = PLIP_PK_LENGTH_LSB;
796 				snd->nibble = PLIP_NB_BEGIN;
797 				nl->timeout_count = 0;
798 				break;
799 			}
800 			spin_unlock_irq(&nl->lock);
801 			if (--cx == 0) {
802 				write_data (dev, 0x00);
803 				return HS_TIMEOUT;
804 			}
805 		}
806 
807 	case PLIP_PK_LENGTH_LSB:
808 		if (plip_send(nibble_timeout, dev,
809 			      &snd->nibble, snd->length.b.lsb))
810 			return TIMEOUT;
811 		snd->state = PLIP_PK_LENGTH_MSB;
812 
813 	case PLIP_PK_LENGTH_MSB:
814 		if (plip_send(nibble_timeout, dev,
815 			      &snd->nibble, snd->length.b.msb))
816 			return TIMEOUT;
817 		snd->state = PLIP_PK_DATA;
818 		snd->byte = 0;
819 		snd->checksum = 0;
820 
821 	case PLIP_PK_DATA:
822 		do {
823 			if (plip_send(nibble_timeout, dev,
824 				      &snd->nibble, lbuf[snd->byte]))
825 				return TIMEOUT;
826 		} while (++snd->byte < snd->length.h);
827 		do {
828 			snd->checksum += lbuf[--snd->byte];
829 		} while (snd->byte);
830 		snd->state = PLIP_PK_CHECKSUM;
831 
832 	case PLIP_PK_CHECKSUM:
833 		if (plip_send(nibble_timeout, dev,
834 			      &snd->nibble, snd->checksum))
835 			return TIMEOUT;
836 
837 		dev->stats.tx_bytes += snd->skb->len;
838 		dev_kfree_skb(snd->skb);
839 		dev->stats.tx_packets++;
840 		snd->state = PLIP_PK_DONE;
841 
842 	case PLIP_PK_DONE:
843 		/* Close the connection */
844 		write_data (dev, 0x00);
845 		snd->skb = NULL;
846 		if (net_debug > 2)
847 			printk(KERN_DEBUG "%s: send end\n", dev->name);
848 		nl->connection = PLIP_CN_CLOSING;
849 		nl->is_deferred = 1;
850 		schedule_delayed_work(&nl->deferred, 1);
851 		enable_parport_interrupts (dev);
852 		ENABLE(dev->irq);
853 		return OK;
854 	}
855 	return OK;
856 }
857 
858 static int
plip_connection_close(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)859 plip_connection_close(struct net_device *dev, struct net_local *nl,
860 		      struct plip_local *snd, struct plip_local *rcv)
861 {
862 	spin_lock_irq(&nl->lock);
863 	if (nl->connection == PLIP_CN_CLOSING) {
864 		nl->connection = PLIP_CN_NONE;
865 		netif_wake_queue (dev);
866 	}
867 	spin_unlock_irq(&nl->lock);
868 	if (nl->should_relinquish) {
869 		nl->should_relinquish = nl->port_owner = 0;
870 		parport_release(nl->pardev);
871 	}
872 	return OK;
873 }
874 
875 /* PLIP_ERROR --- wait till other end settled */
876 static int
plip_error(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)877 plip_error(struct net_device *dev, struct net_local *nl,
878 	   struct plip_local *snd, struct plip_local *rcv)
879 {
880 	unsigned char status;
881 
882 	status = read_status(dev);
883 	if ((status & 0xf8) == 0x80) {
884 		if (net_debug > 2)
885 			printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
886 		nl->connection = PLIP_CN_NONE;
887 		nl->should_relinquish = 0;
888 		netif_start_queue (dev);
889 		enable_parport_interrupts (dev);
890 		ENABLE(dev->irq);
891 		netif_wake_queue (dev);
892 	} else {
893 		nl->is_deferred = 1;
894 		schedule_delayed_work(&nl->deferred, 1);
895 	}
896 
897 	return OK;
898 }
899 
900 /* Handle the parallel port interrupts. */
901 static void
plip_interrupt(void * dev_id)902 plip_interrupt(void *dev_id)
903 {
904 	struct net_device *dev = dev_id;
905 	struct net_local *nl;
906 	struct plip_local *rcv;
907 	unsigned char c0;
908 	unsigned long flags;
909 
910 	nl = netdev_priv(dev);
911 	rcv = &nl->rcv_data;
912 
913 	spin_lock_irqsave (&nl->lock, flags);
914 
915 	c0 = read_status(dev);
916 	if ((c0 & 0xf8) != 0xc0) {
917 		if ((dev->irq != -1) && (net_debug > 1))
918 			printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
919 		spin_unlock_irqrestore (&nl->lock, flags);
920 		return;
921 	}
922 
923 	if (net_debug > 3)
924 		printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
925 
926 	switch (nl->connection) {
927 	case PLIP_CN_CLOSING:
928 		netif_wake_queue (dev);
929 	case PLIP_CN_NONE:
930 	case PLIP_CN_SEND:
931 		rcv->state = PLIP_PK_TRIGGER;
932 		nl->connection = PLIP_CN_RECEIVE;
933 		nl->timeout_count = 0;
934 		schedule_work(&nl->immediate);
935 		break;
936 
937 	case PLIP_CN_RECEIVE:
938 		/* May occur because there is race condition
939 		   around test and set of dev->interrupt.
940 		   Ignore this interrupt. */
941 		break;
942 
943 	case PLIP_CN_ERROR:
944 		printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
945 		break;
946 	}
947 
948 	spin_unlock_irqrestore(&nl->lock, flags);
949 }
950 
951 static int
plip_tx_packet(struct sk_buff * skb,struct net_device * dev)952 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
953 {
954 	struct net_local *nl = netdev_priv(dev);
955 	struct plip_local *snd = &nl->snd_data;
956 
957 	if (netif_queue_stopped(dev))
958 		return 1;
959 
960 	/* We may need to grab the bus */
961 	if (!nl->port_owner) {
962 		if (parport_claim(nl->pardev))
963 			return 1;
964 		nl->port_owner = 1;
965 	}
966 
967 	netif_stop_queue (dev);
968 
969 	if (skb->len > dev->mtu + dev->hard_header_len) {
970 		printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
971 		netif_start_queue (dev);
972 		return 1;
973 	}
974 
975 	if (net_debug > 2)
976 		printk(KERN_DEBUG "%s: send request\n", dev->name);
977 
978 	spin_lock_irq(&nl->lock);
979 	dev->trans_start = jiffies;
980 	snd->skb = skb;
981 	snd->length.h = skb->len;
982 	snd->state = PLIP_PK_TRIGGER;
983 	if (nl->connection == PLIP_CN_NONE) {
984 		nl->connection = PLIP_CN_SEND;
985 		nl->timeout_count = 0;
986 	}
987 	schedule_work(&nl->immediate);
988 	spin_unlock_irq(&nl->lock);
989 
990 	return 0;
991 }
992 
993 static void
plip_rewrite_address(const struct net_device * dev,struct ethhdr * eth)994 plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
995 {
996 	const struct in_device *in_dev = dev->ip_ptr;
997 
998 	if (in_dev) {
999 		/* Any address will do - we take the first */
1000 		const struct in_ifaddr *ifa = in_dev->ifa_list;
1001 		if (ifa) {
1002 			memcpy(eth->h_source, dev->dev_addr, 6);
1003 			memset(eth->h_dest, 0xfc, 2);
1004 			memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1005 		}
1006 	}
1007 }
1008 
1009 static int
plip_hard_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned len)1010 plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1011 		 unsigned short type, const void *daddr,
1012 		 const void *saddr, unsigned len)
1013 {
1014 	int ret;
1015 
1016 	ret = eth_header(skb, dev, type, daddr, saddr, len);
1017 	if (ret >= 0)
1018 		plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1019 
1020 	return ret;
1021 }
1022 
plip_hard_header_cache(const struct neighbour * neigh,struct hh_cache * hh)1023 static int plip_hard_header_cache(const struct neighbour *neigh,
1024 				  struct hh_cache *hh)
1025 {
1026 	int ret;
1027 
1028 	ret = eth_header_cache(neigh, hh);
1029 	if (ret == 0) {
1030 		struct ethhdr *eth;
1031 
1032 		eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1033 				       HH_DATA_OFF(sizeof(*eth)));
1034 		plip_rewrite_address (neigh->dev, eth);
1035 	}
1036 
1037 	return ret;
1038 }
1039 
1040 /* Open/initialize the board.  This is called (in the current kernel)
1041    sometime after booting when the 'ifconfig' program is run.
1042 
1043    This routine gets exclusive access to the parallel port by allocating
1044    its IRQ line.
1045  */
1046 static int
plip_open(struct net_device * dev)1047 plip_open(struct net_device *dev)
1048 {
1049 	struct net_local *nl = netdev_priv(dev);
1050 	struct in_device *in_dev;
1051 
1052 	/* Grab the port */
1053 	if (!nl->port_owner) {
1054 		if (parport_claim(nl->pardev)) return -EAGAIN;
1055 		nl->port_owner = 1;
1056 	}
1057 
1058 	nl->should_relinquish = 0;
1059 
1060 	/* Clear the data port. */
1061 	write_data (dev, 0x00);
1062 
1063 	/* Enable rx interrupt. */
1064 	enable_parport_interrupts (dev);
1065 	if (dev->irq == -1)
1066 	{
1067 		atomic_set (&nl->kill_timer, 0);
1068 		schedule_delayed_work(&nl->timer, 1);
1069 	}
1070 
1071 	/* Initialize the state machine. */
1072 	nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1073 	nl->rcv_data.skb = nl->snd_data.skb = NULL;
1074 	nl->connection = PLIP_CN_NONE;
1075 	nl->is_deferred = 0;
1076 
1077 	/* Fill in the MAC-level header.
1078 	   We used to abuse dev->broadcast to store the point-to-point
1079 	   MAC address, but we no longer do it. Instead, we fetch the
1080 	   interface address whenever it is needed, which is cheap enough
1081 	   because we use the hh_cache. Actually, abusing dev->broadcast
1082 	   didn't work, because when using plip_open the point-to-point
1083 	   address isn't yet known.
1084 	   PLIP doesn't have a real MAC address, but we need it to be
1085 	   DOS compatible, and to properly support taps (otherwise,
1086 	   when the device address isn't identical to the address of a
1087 	   received frame, the kernel incorrectly drops it).             */
1088 
1089 	if ((in_dev=dev->ip_ptr) != NULL) {
1090 		/* Any address will do - we take the first. We already
1091 		   have the first two bytes filled with 0xfc, from
1092 		   plip_init_dev(). */
1093 		struct in_ifaddr *ifa=in_dev->ifa_list;
1094 		if (ifa != NULL) {
1095 			memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1096 		}
1097 	}
1098 
1099 	netif_start_queue (dev);
1100 
1101 	return 0;
1102 }
1103 
1104 /* The inverse routine to plip_open (). */
1105 static int
plip_close(struct net_device * dev)1106 plip_close(struct net_device *dev)
1107 {
1108 	struct net_local *nl = netdev_priv(dev);
1109 	struct plip_local *snd = &nl->snd_data;
1110 	struct plip_local *rcv = &nl->rcv_data;
1111 
1112 	netif_stop_queue (dev);
1113 	DISABLE(dev->irq);
1114 	synchronize_irq(dev->irq);
1115 
1116 	if (dev->irq == -1)
1117 	{
1118 		init_completion(&nl->killed_timer_cmp);
1119 		atomic_set (&nl->kill_timer, 1);
1120 		wait_for_completion(&nl->killed_timer_cmp);
1121 	}
1122 
1123 #ifdef NOTDEF
1124 	outb(0x00, PAR_DATA(dev));
1125 #endif
1126 	nl->is_deferred = 0;
1127 	nl->connection = PLIP_CN_NONE;
1128 	if (nl->port_owner) {
1129 		parport_release(nl->pardev);
1130 		nl->port_owner = 0;
1131 	}
1132 
1133 	snd->state = PLIP_PK_DONE;
1134 	if (snd->skb) {
1135 		dev_kfree_skb(snd->skb);
1136 		snd->skb = NULL;
1137 	}
1138 	rcv->state = PLIP_PK_DONE;
1139 	if (rcv->skb) {
1140 		kfree_skb(rcv->skb);
1141 		rcv->skb = NULL;
1142 	}
1143 
1144 #ifdef NOTDEF
1145 	/* Reset. */
1146 	outb(0x00, PAR_CONTROL(dev));
1147 #endif
1148 	return 0;
1149 }
1150 
1151 static int
plip_preempt(void * handle)1152 plip_preempt(void *handle)
1153 {
1154 	struct net_device *dev = (struct net_device *)handle;
1155 	struct net_local *nl = netdev_priv(dev);
1156 
1157 	/* Stand our ground if a datagram is on the wire */
1158 	if (nl->connection != PLIP_CN_NONE) {
1159 		nl->should_relinquish = 1;
1160 		return 1;
1161 	}
1162 
1163 	nl->port_owner = 0;	/* Remember that we released the bus */
1164 	return 0;
1165 }
1166 
1167 static void
plip_wakeup(void * handle)1168 plip_wakeup(void *handle)
1169 {
1170 	struct net_device *dev = (struct net_device *)handle;
1171 	struct net_local *nl = netdev_priv(dev);
1172 
1173 	if (nl->port_owner) {
1174 		/* Why are we being woken up? */
1175 		printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1176 		if (!parport_claim(nl->pardev))
1177 			/* bus_owner is already set (but why?) */
1178 			printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1179 		else
1180 			return;
1181 	}
1182 
1183 	if (!(dev->flags & IFF_UP))
1184 		/* Don't need the port when the interface is down */
1185 		return;
1186 
1187 	if (!parport_claim(nl->pardev)) {
1188 		nl->port_owner = 1;
1189 		/* Clear the data port. */
1190 		write_data (dev, 0x00);
1191 	}
1192 
1193 	return;
1194 }
1195 
1196 static int
plip_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1197 plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1198 {
1199 	struct net_local *nl = netdev_priv(dev);
1200 	struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1201 
1202 	if (cmd != SIOCDEVPLIP)
1203 		return -EOPNOTSUPP;
1204 
1205 	switch(pc->pcmd) {
1206 	case PLIP_GET_TIMEOUT:
1207 		pc->trigger = nl->trigger;
1208 		pc->nibble  = nl->nibble;
1209 		break;
1210 	case PLIP_SET_TIMEOUT:
1211 		if(!capable(CAP_NET_ADMIN))
1212 			return -EPERM;
1213 		nl->trigger = pc->trigger;
1214 		nl->nibble  = pc->nibble;
1215 		break;
1216 	default:
1217 		return -EOPNOTSUPP;
1218 	}
1219 	return 0;
1220 }
1221 
1222 static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1223 static int timid;
1224 
1225 module_param_array(parport, int, NULL, 0);
1226 module_param(timid, int, 0);
1227 MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1228 
1229 static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1230 
1231 static inline int
plip_searchfor(int list[],int a)1232 plip_searchfor(int list[], int a)
1233 {
1234 	int i;
1235 	for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1236 		if (list[i] == a) return 1;
1237 	}
1238 	return 0;
1239 }
1240 
1241 /* plip_attach() is called (by the parport code) when a port is
1242  * available to use. */
plip_attach(struct parport * port)1243 static void plip_attach (struct parport *port)
1244 {
1245 	static int unit;
1246 	struct net_device *dev;
1247 	struct net_local *nl;
1248 	char name[IFNAMSIZ];
1249 
1250 	if ((parport[0] == -1 && (!timid || !port->devices)) ||
1251 	    plip_searchfor(parport, port->number)) {
1252 		if (unit == PLIP_MAX) {
1253 			printk(KERN_ERR "plip: too many devices\n");
1254 			return;
1255 		}
1256 
1257 		sprintf(name, "plip%d", unit);
1258 		dev = alloc_etherdev(sizeof(struct net_local));
1259 		if (!dev) {
1260 			printk(KERN_ERR "plip: memory squeeze\n");
1261 			return;
1262 		}
1263 
1264 		strcpy(dev->name, name);
1265 
1266 		dev->irq = port->irq;
1267 		dev->base_addr = port->base;
1268 		if (port->irq == -1) {
1269 			printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1270 		                 "which is fairly inefficient!\n", port->name);
1271 		}
1272 
1273 		nl = netdev_priv(dev);
1274 		nl->dev = dev;
1275 		nl->pardev = parport_register_device(port, dev->name, plip_preempt,
1276 						 plip_wakeup, plip_interrupt,
1277 						 0, dev);
1278 
1279 		if (!nl->pardev) {
1280 			printk(KERN_ERR "%s: parport_register failed\n", name);
1281 			goto err_free_dev;
1282 			return;
1283 		}
1284 
1285 		plip_init_netdev(dev);
1286 
1287 		if (register_netdev(dev)) {
1288 			printk(KERN_ERR "%s: network register failed\n", name);
1289 			goto err_parport_unregister;
1290 		}
1291 
1292 		printk(KERN_INFO "%s", version);
1293 		if (dev->irq != -1)
1294 			printk(KERN_INFO "%s: Parallel port at %#3lx, "
1295 					 "using IRQ %d.\n",
1296 				         dev->name, dev->base_addr, dev->irq);
1297 		else
1298 			printk(KERN_INFO "%s: Parallel port at %#3lx, "
1299 					 "not using IRQ.\n",
1300 					 dev->name, dev->base_addr);
1301 		dev_plip[unit++] = dev;
1302 	}
1303 	return;
1304 
1305 err_parport_unregister:
1306 	parport_unregister_device(nl->pardev);
1307 err_free_dev:
1308 	free_netdev(dev);
1309 	return;
1310 }
1311 
1312 /* plip_detach() is called (by the parport code) when a port is
1313  * no longer available to use. */
plip_detach(struct parport * port)1314 static void plip_detach (struct parport *port)
1315 {
1316 	/* Nothing to do */
1317 }
1318 
1319 static struct parport_driver plip_driver = {
1320 	.name	= "plip",
1321 	.attach = plip_attach,
1322 	.detach = plip_detach
1323 };
1324 
plip_cleanup_module(void)1325 static void __exit plip_cleanup_module (void)
1326 {
1327 	struct net_device *dev;
1328 	int i;
1329 
1330 	parport_unregister_driver (&plip_driver);
1331 
1332 	for (i=0; i < PLIP_MAX; i++) {
1333 		if ((dev = dev_plip[i])) {
1334 			struct net_local *nl = netdev_priv(dev);
1335 			unregister_netdev(dev);
1336 			if (nl->port_owner)
1337 				parport_release(nl->pardev);
1338 			parport_unregister_device(nl->pardev);
1339 			free_netdev(dev);
1340 			dev_plip[i] = NULL;
1341 		}
1342 	}
1343 }
1344 
1345 #ifndef MODULE
1346 
1347 static int parport_ptr;
1348 
plip_setup(char * str)1349 static int __init plip_setup(char *str)
1350 {
1351 	int ints[4];
1352 
1353 	str = get_options(str, ARRAY_SIZE(ints), ints);
1354 
1355 	/* Ugh. */
1356 	if (!strncmp(str, "parport", 7)) {
1357 		int n = simple_strtoul(str+7, NULL, 10);
1358 		if (parport_ptr < PLIP_MAX)
1359 			parport[parport_ptr++] = n;
1360 		else
1361 			printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1362 			       str);
1363 	} else if (!strcmp(str, "timid")) {
1364 		timid = 1;
1365 	} else {
1366 		if (ints[0] == 0 || ints[1] == 0) {
1367 			/* disable driver on "plip=" or "plip=0" */
1368 			parport[0] = -2;
1369 		} else {
1370 			printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1371 			       ints[1]);
1372 		}
1373 	}
1374 	return 1;
1375 }
1376 
1377 __setup("plip=", plip_setup);
1378 
1379 #endif /* !MODULE */
1380 
plip_init(void)1381 static int __init plip_init (void)
1382 {
1383 	if (parport[0] == -2)
1384 		return 0;
1385 
1386 	if (parport[0] != -1 && timid) {
1387 		printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1388 		timid = 0;
1389 	}
1390 
1391 	if (parport_register_driver (&plip_driver)) {
1392 		printk (KERN_WARNING "plip: couldn't register driver\n");
1393 		return 1;
1394 	}
1395 
1396 	return 0;
1397 }
1398 
1399 module_init(plip_init);
1400 module_exit(plip_cleanup_module);
1401 MODULE_LICENSE("GPL");
1402