• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* ppa.c   --  low level driver for the IOMEGA PPA3
2  * parallel port SCSI host adapter.
3  *
4  * (The PPA3 is the embedded controller in the ZIP drive.)
5  *
6  * (c) 1995,1996 Grant R. Guenther, grant@torque.net,
7  * under the terms of the GNU General Public License.
8  *
9  */
10 
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/blkdev.h>
16 #include <linux/parport.h>
17 #include <linux/workqueue.h>
18 #include <linux/delay.h>
19 #include <linux/jiffies.h>
20 #include <asm/io.h>
21 
22 #include <scsi/scsi.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_device.h>
25 #include <scsi/scsi_host.h>
26 
27 
28 static void ppa_reset_pulse(unsigned int base);
29 
30 typedef struct {
31 	struct pardevice *dev;	/* Parport device entry         */
32 	int base;		/* Actual port address          */
33 	int mode;		/* Transfer mode                */
34 	struct scsi_cmnd *cur_cmd;	/* Current queued command       */
35 	struct delayed_work ppa_tq;	/* Polling interrupt stuff       */
36 	unsigned long jstart;	/* Jiffies at start             */
37 	unsigned long recon_tmo;	/* How many usecs to wait for reconnection (6th bit) */
38 	unsigned int failed:1;	/* Failure flag                 */
39 	unsigned wanted:1;	/* Parport sharing busy flag    */
40 	unsigned int dev_no;	/* Device number		*/
41 	wait_queue_head_t *waiting;
42 	struct Scsi_Host *host;
43 	struct list_head list;
44 } ppa_struct;
45 
46 #include  "ppa.h"
47 
ppa_dev(struct Scsi_Host * host)48 static inline ppa_struct *ppa_dev(struct Scsi_Host *host)
49 {
50 	return *(ppa_struct **)&host->hostdata;
51 }
52 
53 static DEFINE_SPINLOCK(arbitration_lock);
54 
got_it(ppa_struct * dev)55 static void got_it(ppa_struct *dev)
56 {
57 	dev->base = dev->dev->port->base;
58 	if (dev->cur_cmd)
59 		dev->cur_cmd->SCp.phase = 1;
60 	else
61 		wake_up(dev->waiting);
62 }
63 
ppa_wakeup(void * ref)64 static void ppa_wakeup(void *ref)
65 {
66 	ppa_struct *dev = (ppa_struct *) ref;
67 	unsigned long flags;
68 
69 	spin_lock_irqsave(&arbitration_lock, flags);
70 	if (dev->wanted) {
71 		parport_claim(dev->dev);
72 		got_it(dev);
73 		dev->wanted = 0;
74 	}
75 	spin_unlock_irqrestore(&arbitration_lock, flags);
76 	return;
77 }
78 
ppa_pb_claim(ppa_struct * dev)79 static int ppa_pb_claim(ppa_struct *dev)
80 {
81 	unsigned long flags;
82 	int res = 1;
83 	spin_lock_irqsave(&arbitration_lock, flags);
84 	if (parport_claim(dev->dev) == 0) {
85 		got_it(dev);
86 		res = 0;
87 	}
88 	dev->wanted = res;
89 	spin_unlock_irqrestore(&arbitration_lock, flags);
90 	return res;
91 }
92 
ppa_pb_dismiss(ppa_struct * dev)93 static void ppa_pb_dismiss(ppa_struct *dev)
94 {
95 	unsigned long flags;
96 	int wanted;
97 	spin_lock_irqsave(&arbitration_lock, flags);
98 	wanted = dev->wanted;
99 	dev->wanted = 0;
100 	spin_unlock_irqrestore(&arbitration_lock, flags);
101 	if (!wanted)
102 		parport_release(dev->dev);
103 }
104 
ppa_pb_release(ppa_struct * dev)105 static inline void ppa_pb_release(ppa_struct *dev)
106 {
107 	parport_release(dev->dev);
108 }
109 
110 /*
111  * Start of Chipset kludges
112  */
113 
114 /* This is to give the ppa driver a way to modify the timings (and other
115  * parameters) by writing to the /proc/scsi/ppa/0 file.
116  * Very simple method really... (To simple, no error checking :( )
117  * Reason: Kernel hackers HATE having to unload and reload modules for
118  * testing...
119  * Also gives a method to use a script to obtain optimum timings (TODO)
120  */
121 
ppa_write_info(struct Scsi_Host * host,char * buffer,int length)122 static inline int ppa_write_info(struct Scsi_Host *host, char *buffer, int length)
123 {
124 	ppa_struct *dev = ppa_dev(host);
125 	unsigned long x;
126 
127 	if ((length > 5) && (strncmp(buffer, "mode=", 5) == 0)) {
128 		x = simple_strtoul(buffer + 5, NULL, 0);
129 		dev->mode = x;
130 		return length;
131 	}
132 	if ((length > 10) && (strncmp(buffer, "recon_tmo=", 10) == 0)) {
133 		x = simple_strtoul(buffer + 10, NULL, 0);
134 		dev->recon_tmo = x;
135 		printk(KERN_INFO "ppa: recon_tmo set to %ld\n", x);
136 		return length;
137 	}
138 	printk(KERN_WARNING "ppa /proc: invalid variable\n");
139 	return -EINVAL;
140 }
141 
ppa_show_info(struct seq_file * m,struct Scsi_Host * host)142 static int ppa_show_info(struct seq_file *m, struct Scsi_Host *host)
143 {
144 	ppa_struct *dev = ppa_dev(host);
145 
146 	seq_printf(m, "Version : %s\n", PPA_VERSION);
147 	seq_printf(m, "Parport : %s\n", dev->dev->port->name);
148 	seq_printf(m, "Mode    : %s\n", PPA_MODE_STRING[dev->mode]);
149 #if PPA_DEBUG > 0
150 	seq_printf(m, "recon_tmo : %lu\n", dev->recon_tmo);
151 #endif
152 	return 0;
153 }
154 
155 static int device_check(ppa_struct *dev);
156 
157 #if PPA_DEBUG > 0
158 #define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\
159 	   y, __func__, __LINE__); ppa_fail_func(x,y);
ppa_fail_func(ppa_struct * dev,int error_code)160 static inline void ppa_fail_func(ppa_struct *dev, int error_code)
161 #else
162 static inline void ppa_fail(ppa_struct *dev, int error_code)
163 #endif
164 {
165 	/* If we fail a device then we trash status / message bytes */
166 	if (dev->cur_cmd) {
167 		dev->cur_cmd->result = error_code << 16;
168 		dev->failed = 1;
169 	}
170 }
171 
172 /*
173  * Wait for the high bit to be set.
174  *
175  * In principle, this could be tied to an interrupt, but the adapter
176  * doesn't appear to be designed to support interrupts.  We spin on
177  * the 0x80 ready bit.
178  */
ppa_wait(ppa_struct * dev)179 static unsigned char ppa_wait(ppa_struct *dev)
180 {
181 	int k;
182 	unsigned short ppb = dev->base;
183 	unsigned char r;
184 
185 	k = PPA_SPIN_TMO;
186 	/* Wait for bit 6 and 7 - PJC */
187 	for (r = r_str(ppb); ((r & 0xc0) != 0xc0) && (k); k--) {
188 		udelay(1);
189 		r = r_str(ppb);
190 	}
191 
192 	/*
193 	 * return some status information.
194 	 * Semantics: 0xc0 = ZIP wants more data
195 	 *            0xd0 = ZIP wants to send more data
196 	 *            0xe0 = ZIP is expecting SCSI command data
197 	 *            0xf0 = end of transfer, ZIP is sending status
198 	 */
199 	if (k)
200 		return (r & 0xf0);
201 
202 	/* Counter expired - Time out occurred */
203 	ppa_fail(dev, DID_TIME_OUT);
204 	printk(KERN_WARNING "ppa timeout in ppa_wait\n");
205 	return 0;		/* command timed out */
206 }
207 
208 /*
209  * Clear EPP Timeout Bit
210  */
epp_reset(unsigned short ppb)211 static inline void epp_reset(unsigned short ppb)
212 {
213 	int i;
214 
215 	i = r_str(ppb);
216 	w_str(ppb, i);
217 	w_str(ppb, i & 0xfe);
218 }
219 
220 /*
221  * Wait for empty ECP fifo (if we are in ECP fifo mode only)
222  */
ecp_sync(ppa_struct * dev)223 static inline void ecp_sync(ppa_struct *dev)
224 {
225 	int i, ppb_hi = dev->dev->port->base_hi;
226 
227 	if (ppb_hi == 0)
228 		return;
229 
230 	if ((r_ecr(ppb_hi) & 0xe0) == 0x60) {	/* mode 011 == ECP fifo mode */
231 		for (i = 0; i < 100; i++) {
232 			if (r_ecr(ppb_hi) & 0x01)
233 				return;
234 			udelay(5);
235 		}
236 		printk(KERN_WARNING "ppa: ECP sync failed as data still present in FIFO.\n");
237 	}
238 }
239 
ppa_byte_out(unsigned short base,const char * buffer,int len)240 static int ppa_byte_out(unsigned short base, const char *buffer, int len)
241 {
242 	int i;
243 
244 	for (i = len; i; i--) {
245 		w_dtr(base, *buffer++);
246 		w_ctr(base, 0xe);
247 		w_ctr(base, 0xc);
248 	}
249 	return 1;		/* All went well - we hope! */
250 }
251 
ppa_byte_in(unsigned short base,char * buffer,int len)252 static int ppa_byte_in(unsigned short base, char *buffer, int len)
253 {
254 	int i;
255 
256 	for (i = len; i; i--) {
257 		*buffer++ = r_dtr(base);
258 		w_ctr(base, 0x27);
259 		w_ctr(base, 0x25);
260 	}
261 	return 1;		/* All went well - we hope! */
262 }
263 
ppa_nibble_in(unsigned short base,char * buffer,int len)264 static int ppa_nibble_in(unsigned short base, char *buffer, int len)
265 {
266 	for (; len; len--) {
267 		unsigned char h;
268 
269 		w_ctr(base, 0x4);
270 		h = r_str(base) & 0xf0;
271 		w_ctr(base, 0x6);
272 		*buffer++ = h | ((r_str(base) & 0xf0) >> 4);
273 	}
274 	return 1;		/* All went well - we hope! */
275 }
276 
ppa_out(ppa_struct * dev,char * buffer,int len)277 static int ppa_out(ppa_struct *dev, char *buffer, int len)
278 {
279 	int r;
280 	unsigned short ppb = dev->base;
281 
282 	r = ppa_wait(dev);
283 
284 	if ((r & 0x50) != 0x40) {
285 		ppa_fail(dev, DID_ERROR);
286 		return 0;
287 	}
288 	switch (dev->mode) {
289 	case PPA_NIBBLE:
290 	case PPA_PS2:
291 		/* 8 bit output, with a loop */
292 		r = ppa_byte_out(ppb, buffer, len);
293 		break;
294 
295 	case PPA_EPP_32:
296 	case PPA_EPP_16:
297 	case PPA_EPP_8:
298 		epp_reset(ppb);
299 		w_ctr(ppb, 0x4);
300 #ifdef CONFIG_SCSI_IZIP_EPP16
301 		if (!(((long) buffer | len) & 0x01))
302 			outsw(ppb + 4, buffer, len >> 1);
303 #else
304 		if (!(((long) buffer | len) & 0x03))
305 			outsl(ppb + 4, buffer, len >> 2);
306 #endif
307 		else
308 			outsb(ppb + 4, buffer, len);
309 		w_ctr(ppb, 0xc);
310 		r = !(r_str(ppb) & 0x01);
311 		w_ctr(ppb, 0xc);
312 		ecp_sync(dev);
313 		break;
314 
315 	default:
316 		printk(KERN_ERR "PPA: bug in ppa_out()\n");
317 		r = 0;
318 	}
319 	return r;
320 }
321 
ppa_in(ppa_struct * dev,char * buffer,int len)322 static int ppa_in(ppa_struct *dev, char *buffer, int len)
323 {
324 	int r;
325 	unsigned short ppb = dev->base;
326 
327 	r = ppa_wait(dev);
328 
329 	if ((r & 0x50) != 0x50) {
330 		ppa_fail(dev, DID_ERROR);
331 		return 0;
332 	}
333 	switch (dev->mode) {
334 	case PPA_NIBBLE:
335 		/* 4 bit input, with a loop */
336 		r = ppa_nibble_in(ppb, buffer, len);
337 		w_ctr(ppb, 0xc);
338 		break;
339 
340 	case PPA_PS2:
341 		/* 8 bit input, with a loop */
342 		w_ctr(ppb, 0x25);
343 		r = ppa_byte_in(ppb, buffer, len);
344 		w_ctr(ppb, 0x4);
345 		w_ctr(ppb, 0xc);
346 		break;
347 
348 	case PPA_EPP_32:
349 	case PPA_EPP_16:
350 	case PPA_EPP_8:
351 		epp_reset(ppb);
352 		w_ctr(ppb, 0x24);
353 #ifdef CONFIG_SCSI_IZIP_EPP16
354 		if (!(((long) buffer | len) & 0x01))
355 			insw(ppb + 4, buffer, len >> 1);
356 #else
357 		if (!(((long) buffer | len) & 0x03))
358 			insl(ppb + 4, buffer, len >> 2);
359 #endif
360 		else
361 			insb(ppb + 4, buffer, len);
362 		w_ctr(ppb, 0x2c);
363 		r = !(r_str(ppb) & 0x01);
364 		w_ctr(ppb, 0x2c);
365 		ecp_sync(dev);
366 		break;
367 
368 	default:
369 		printk(KERN_ERR "PPA: bug in ppa_ins()\n");
370 		r = 0;
371 		break;
372 	}
373 	return r;
374 }
375 
376 /* end of ppa_io.h */
ppa_d_pulse(unsigned short ppb,unsigned char b)377 static inline void ppa_d_pulse(unsigned short ppb, unsigned char b)
378 {
379 	w_dtr(ppb, b);
380 	w_ctr(ppb, 0xc);
381 	w_ctr(ppb, 0xe);
382 	w_ctr(ppb, 0xc);
383 	w_ctr(ppb, 0x4);
384 	w_ctr(ppb, 0xc);
385 }
386 
ppa_disconnect(ppa_struct * dev)387 static void ppa_disconnect(ppa_struct *dev)
388 {
389 	unsigned short ppb = dev->base;
390 
391 	ppa_d_pulse(ppb, 0);
392 	ppa_d_pulse(ppb, 0x3c);
393 	ppa_d_pulse(ppb, 0x20);
394 	ppa_d_pulse(ppb, 0xf);
395 }
396 
ppa_c_pulse(unsigned short ppb,unsigned char b)397 static inline void ppa_c_pulse(unsigned short ppb, unsigned char b)
398 {
399 	w_dtr(ppb, b);
400 	w_ctr(ppb, 0x4);
401 	w_ctr(ppb, 0x6);
402 	w_ctr(ppb, 0x4);
403 	w_ctr(ppb, 0xc);
404 }
405 
ppa_connect(ppa_struct * dev,int flag)406 static inline void ppa_connect(ppa_struct *dev, int flag)
407 {
408 	unsigned short ppb = dev->base;
409 
410 	ppa_c_pulse(ppb, 0);
411 	ppa_c_pulse(ppb, 0x3c);
412 	ppa_c_pulse(ppb, 0x20);
413 	if ((flag == CONNECT_EPP_MAYBE) && IN_EPP_MODE(dev->mode))
414 		ppa_c_pulse(ppb, 0xcf);
415 	else
416 		ppa_c_pulse(ppb, 0x8f);
417 }
418 
ppa_select(ppa_struct * dev,int target)419 static int ppa_select(ppa_struct *dev, int target)
420 {
421 	int k;
422 	unsigned short ppb = dev->base;
423 
424 	/*
425 	 * Bit 6 (0x40) is the device selected bit.
426 	 * First we must wait till the current device goes off line...
427 	 */
428 	k = PPA_SELECT_TMO;
429 	do {
430 		k--;
431 		udelay(1);
432 	} while ((r_str(ppb) & 0x40) && (k));
433 	if (!k)
434 		return 0;
435 
436 	w_dtr(ppb, (1 << target));
437 	w_ctr(ppb, 0xe);
438 	w_ctr(ppb, 0xc);
439 	w_dtr(ppb, 0x80);	/* This is NOT the initator */
440 	w_ctr(ppb, 0x8);
441 
442 	k = PPA_SELECT_TMO;
443 	do {
444 		k--;
445 		udelay(1);
446 	}
447 	while (!(r_str(ppb) & 0x40) && (k));
448 	if (!k)
449 		return 0;
450 
451 	return 1;
452 }
453 
454 /*
455  * This is based on a trace of what the Iomega DOS 'guest' driver does.
456  * I've tried several different kinds of parallel ports with guest and
457  * coded this to react in the same ways that it does.
458  *
459  * The return value from this function is just a hint about where the
460  * handshaking failed.
461  *
462  */
ppa_init(ppa_struct * dev)463 static int ppa_init(ppa_struct *dev)
464 {
465 	int retv;
466 	unsigned short ppb = dev->base;
467 
468 	ppa_disconnect(dev);
469 	ppa_connect(dev, CONNECT_NORMAL);
470 
471 	retv = 2;		/* Failed */
472 
473 	w_ctr(ppb, 0xe);
474 	if ((r_str(ppb) & 0x08) == 0x08)
475 		retv--;
476 
477 	w_ctr(ppb, 0xc);
478 	if ((r_str(ppb) & 0x08) == 0x00)
479 		retv--;
480 
481 	if (!retv)
482 		ppa_reset_pulse(ppb);
483 	udelay(1000);		/* Allow devices to settle down */
484 	ppa_disconnect(dev);
485 	udelay(1000);		/* Another delay to allow devices to settle */
486 
487 	if (retv)
488 		return -EIO;
489 
490 	return device_check(dev);
491 }
492 
ppa_send_command(struct scsi_cmnd * cmd)493 static inline int ppa_send_command(struct scsi_cmnd *cmd)
494 {
495 	ppa_struct *dev = ppa_dev(cmd->device->host);
496 	int k;
497 
498 	w_ctr(dev->base, 0x0c);
499 
500 	for (k = 0; k < cmd->cmd_len; k++)
501 		if (!ppa_out(dev, &cmd->cmnd[k], 1))
502 			return 0;
503 	return 1;
504 }
505 
506 /*
507  * The bulk flag enables some optimisations in the data transfer loops,
508  * it should be true for any command that transfers data in integral
509  * numbers of sectors.
510  *
511  * The driver appears to remain stable if we speed up the parallel port
512  * i/o in this function, but not elsewhere.
513  */
ppa_completion(struct scsi_cmnd * cmd)514 static int ppa_completion(struct scsi_cmnd *cmd)
515 {
516 	/* Return codes:
517 	 * -1     Error
518 	 *  0     Told to schedule
519 	 *  1     Finished data transfer
520 	 */
521 	ppa_struct *dev = ppa_dev(cmd->device->host);
522 	unsigned short ppb = dev->base;
523 	unsigned long start_jiffies = jiffies;
524 
525 	unsigned char r, v;
526 	int fast, bulk, status;
527 
528 	v = cmd->cmnd[0];
529 	bulk = ((v == READ_6) ||
530 		(v == READ_10) || (v == WRITE_6) || (v == WRITE_10));
531 
532 	/*
533 	 * We only get here if the drive is ready to comunicate,
534 	 * hence no need for a full ppa_wait.
535 	 */
536 	r = (r_str(ppb) & 0xf0);
537 
538 	while (r != (unsigned char) 0xf0) {
539 		/*
540 		 * If we have been running for more than a full timer tick
541 		 * then take a rest.
542 		 */
543 		if (time_after(jiffies, start_jiffies + 1))
544 			return 0;
545 
546 		if ((cmd->SCp.this_residual <= 0)) {
547 			ppa_fail(dev, DID_ERROR);
548 			return -1;	/* ERROR_RETURN */
549 		}
550 
551 		/* On some hardware we have SCSI disconnected (6th bit low)
552 		 * for about 100usecs. It is too expensive to wait a
553 		 * tick on every loop so we busy wait for no more than
554 		 * 500usecs to give the drive a chance first. We do not
555 		 * change things for "normal" hardware since generally
556 		 * the 6th bit is always high.
557 		 * This makes the CPU load higher on some hardware
558 		 * but otherwise we can not get more than 50K/secs
559 		 * on this problem hardware.
560 		 */
561 		if ((r & 0xc0) != 0xc0) {
562 			/* Wait for reconnection should be no more than
563 			 * jiffy/2 = 5ms = 5000 loops
564 			 */
565 			unsigned long k = dev->recon_tmo;
566 			for (; k && ((r = (r_str(ppb) & 0xf0)) & 0xc0) != 0xc0;
567 			     k--)
568 				udelay(1);
569 
570 			if (!k)
571 				return 0;
572 		}
573 
574 		/* determine if we should use burst I/O */
575 		fast = (bulk && (cmd->SCp.this_residual >= PPA_BURST_SIZE))
576 		    ? PPA_BURST_SIZE : 1;
577 
578 		if (r == (unsigned char) 0xc0)
579 			status = ppa_out(dev, cmd->SCp.ptr, fast);
580 		else
581 			status = ppa_in(dev, cmd->SCp.ptr, fast);
582 
583 		cmd->SCp.ptr += fast;
584 		cmd->SCp.this_residual -= fast;
585 
586 		if (!status) {
587 			ppa_fail(dev, DID_BUS_BUSY);
588 			return -1;	/* ERROR_RETURN */
589 		}
590 		if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
591 			/* if scatter/gather, advance to the next segment */
592 			if (cmd->SCp.buffers_residual--) {
593 				cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
594 				cmd->SCp.this_residual =
595 				    cmd->SCp.buffer->length;
596 				cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
597 			}
598 		}
599 		/* Now check to see if the drive is ready to comunicate */
600 		r = (r_str(ppb) & 0xf0);
601 		/* If not, drop back down to the scheduler and wait a timer tick */
602 		if (!(r & 0x80))
603 			return 0;
604 	}
605 	return 1;		/* FINISH_RETURN */
606 }
607 
608 /*
609  * Since the PPA itself doesn't generate interrupts, we use
610  * the scheduler's task queue to generate a stream of call-backs and
611  * complete the request when the drive is ready.
612  */
ppa_interrupt(struct work_struct * work)613 static void ppa_interrupt(struct work_struct *work)
614 {
615 	ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work);
616 	struct scsi_cmnd *cmd = dev->cur_cmd;
617 
618 	if (!cmd) {
619 		printk(KERN_ERR "PPA: bug in ppa_interrupt\n");
620 		return;
621 	}
622 	if (ppa_engine(dev, cmd)) {
623 		schedule_delayed_work(&dev->ppa_tq, 1);
624 		return;
625 	}
626 	/* Command must of completed hence it is safe to let go... */
627 #if PPA_DEBUG > 0
628 	switch ((cmd->result >> 16) & 0xff) {
629 	case DID_OK:
630 		break;
631 	case DID_NO_CONNECT:
632 		printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", cmd->device->target);
633 		break;
634 	case DID_BUS_BUSY:
635 		printk(KERN_DEBUG "ppa: BUS BUSY - EPP timeout detected\n");
636 		break;
637 	case DID_TIME_OUT:
638 		printk(KERN_DEBUG "ppa: unknown timeout\n");
639 		break;
640 	case DID_ABORT:
641 		printk(KERN_DEBUG "ppa: told to abort\n");
642 		break;
643 	case DID_PARITY:
644 		printk(KERN_DEBUG "ppa: parity error (???)\n");
645 		break;
646 	case DID_ERROR:
647 		printk(KERN_DEBUG "ppa: internal driver error\n");
648 		break;
649 	case DID_RESET:
650 		printk(KERN_DEBUG "ppa: told to reset device\n");
651 		break;
652 	case DID_BAD_INTR:
653 		printk(KERN_WARNING "ppa: bad interrupt (???)\n");
654 		break;
655 	default:
656 		printk(KERN_WARNING "ppa: bad return code (%02x)\n",
657 		       (cmd->result >> 16) & 0xff);
658 	}
659 #endif
660 
661 	if (cmd->SCp.phase > 1)
662 		ppa_disconnect(dev);
663 
664 	ppa_pb_dismiss(dev);
665 
666 	dev->cur_cmd = NULL;
667 
668 	cmd->scsi_done(cmd);
669 }
670 
ppa_engine(ppa_struct * dev,struct scsi_cmnd * cmd)671 static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
672 {
673 	unsigned short ppb = dev->base;
674 	unsigned char l = 0, h = 0;
675 	int retv;
676 
677 	/* First check for any errors that may of occurred
678 	 * Here we check for internal errors
679 	 */
680 	if (dev->failed)
681 		return 0;
682 
683 	switch (cmd->SCp.phase) {
684 	case 0:		/* Phase 0 - Waiting for parport */
685 		if (time_after(jiffies, dev->jstart + HZ)) {
686 			/*
687 			 * We waited more than a second
688 			 * for parport to call us
689 			 */
690 			ppa_fail(dev, DID_BUS_BUSY);
691 			return 0;
692 		}
693 		return 1;	/* wait until ppa_wakeup claims parport */
694 	case 1:		/* Phase 1 - Connected */
695 		{		/* Perform a sanity check for cable unplugged */
696 			int retv = 2;	/* Failed */
697 
698 			ppa_connect(dev, CONNECT_EPP_MAYBE);
699 
700 			w_ctr(ppb, 0xe);
701 			if ((r_str(ppb) & 0x08) == 0x08)
702 				retv--;
703 
704 			w_ctr(ppb, 0xc);
705 			if ((r_str(ppb) & 0x08) == 0x00)
706 				retv--;
707 
708 			if (retv) {
709 				if (time_after(jiffies, dev->jstart + (1 * HZ))) {
710 					printk(KERN_ERR "ppa: Parallel port cable is unplugged.\n");
711 					ppa_fail(dev, DID_BUS_BUSY);
712 					return 0;
713 				} else {
714 					ppa_disconnect(dev);
715 					return 1;	/* Try again in a jiffy */
716 				}
717 			}
718 			cmd->SCp.phase++;
719 		}
720 		/* fall through */
721 
722 	case 2:		/* Phase 2 - We are now talking to the scsi bus */
723 		if (!ppa_select(dev, scmd_id(cmd))) {
724 			ppa_fail(dev, DID_NO_CONNECT);
725 			return 0;
726 		}
727 		cmd->SCp.phase++;
728 		/* fall through */
729 
730 	case 3:		/* Phase 3 - Ready to accept a command */
731 		w_ctr(ppb, 0x0c);
732 		if (!(r_str(ppb) & 0x80))
733 			return 1;
734 
735 		if (!ppa_send_command(cmd))
736 			return 0;
737 		cmd->SCp.phase++;
738 		/* fall through */
739 
740 	case 4:		/* Phase 4 - Setup scatter/gather buffers */
741 		if (scsi_bufflen(cmd)) {
742 			cmd->SCp.buffer = scsi_sglist(cmd);
743 			cmd->SCp.this_residual = cmd->SCp.buffer->length;
744 			cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
745 		} else {
746 			cmd->SCp.buffer = NULL;
747 			cmd->SCp.this_residual = 0;
748 			cmd->SCp.ptr = NULL;
749 		}
750 		cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
751 		cmd->SCp.phase++;
752 		/* fall through */
753 
754 	case 5:		/* Phase 5 - Data transfer stage */
755 		w_ctr(ppb, 0x0c);
756 		if (!(r_str(ppb) & 0x80))
757 			return 1;
758 
759 		retv = ppa_completion(cmd);
760 		if (retv == -1)
761 			return 0;
762 		if (retv == 0)
763 			return 1;
764 		cmd->SCp.phase++;
765 		/* fall through */
766 
767 	case 6:		/* Phase 6 - Read status/message */
768 		cmd->result = DID_OK << 16;
769 		/* Check for data overrun */
770 		if (ppa_wait(dev) != (unsigned char) 0xf0) {
771 			ppa_fail(dev, DID_ERROR);
772 			return 0;
773 		}
774 		if (ppa_in(dev, &l, 1)) {	/* read status byte */
775 			/* Check for optional message byte */
776 			if (ppa_wait(dev) == (unsigned char) 0xf0)
777 				ppa_in(dev, &h, 1);
778 			cmd->result =
779 			    (DID_OK << 16) + (h << 8) + (l & STATUS_MASK);
780 		}
781 		return 0;	/* Finished */
782 		break;
783 
784 	default:
785 		printk(KERN_ERR "ppa: Invalid scsi phase\n");
786 	}
787 	return 0;
788 }
789 
ppa_queuecommand_lck(struct scsi_cmnd * cmd,void (* done)(struct scsi_cmnd *))790 static int ppa_queuecommand_lck(struct scsi_cmnd *cmd,
791 		void (*done) (struct scsi_cmnd *))
792 {
793 	ppa_struct *dev = ppa_dev(cmd->device->host);
794 
795 	if (dev->cur_cmd) {
796 		printk(KERN_ERR "PPA: bug in ppa_queuecommand\n");
797 		return 0;
798 	}
799 	dev->failed = 0;
800 	dev->jstart = jiffies;
801 	dev->cur_cmd = cmd;
802 	cmd->scsi_done = done;
803 	cmd->result = DID_ERROR << 16;	/* default return code */
804 	cmd->SCp.phase = 0;	/* bus free */
805 
806 	schedule_delayed_work(&dev->ppa_tq, 0);
807 
808 	ppa_pb_claim(dev);
809 
810 	return 0;
811 }
812 
DEF_SCSI_QCMD(ppa_queuecommand)813 static DEF_SCSI_QCMD(ppa_queuecommand)
814 
815 /*
816  * Apparently the disk->capacity attribute is off by 1 sector
817  * for all disk drives.  We add the one here, but it should really
818  * be done in sd.c.  Even if it gets fixed there, this will still
819  * work.
820  */
821 static int ppa_biosparam(struct scsi_device *sdev, struct block_device *dev,
822 	      sector_t capacity, int ip[])
823 {
824 	ip[0] = 0x40;
825 	ip[1] = 0x20;
826 	ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]);
827 	if (ip[2] > 1024) {
828 		ip[0] = 0xff;
829 		ip[1] = 0x3f;
830 		ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]);
831 		if (ip[2] > 1023)
832 			ip[2] = 1023;
833 	}
834 	return 0;
835 }
836 
ppa_abort(struct scsi_cmnd * cmd)837 static int ppa_abort(struct scsi_cmnd *cmd)
838 {
839 	ppa_struct *dev = ppa_dev(cmd->device->host);
840 	/*
841 	 * There is no method for aborting commands since Iomega
842 	 * have tied the SCSI_MESSAGE line high in the interface
843 	 */
844 
845 	switch (cmd->SCp.phase) {
846 	case 0:		/* Do not have access to parport */
847 	case 1:		/* Have not connected to interface */
848 		dev->cur_cmd = NULL;	/* Forget the problem */
849 		return SUCCESS;
850 		break;
851 	default:		/* SCSI command sent, can not abort */
852 		return FAILED;
853 		break;
854 	}
855 }
856 
ppa_reset_pulse(unsigned int base)857 static void ppa_reset_pulse(unsigned int base)
858 {
859 	w_dtr(base, 0x40);
860 	w_ctr(base, 0x8);
861 	udelay(30);
862 	w_ctr(base, 0xc);
863 }
864 
ppa_reset(struct scsi_cmnd * cmd)865 static int ppa_reset(struct scsi_cmnd *cmd)
866 {
867 	ppa_struct *dev = ppa_dev(cmd->device->host);
868 
869 	if (cmd->SCp.phase)
870 		ppa_disconnect(dev);
871 	dev->cur_cmd = NULL;	/* Forget the problem */
872 
873 	ppa_connect(dev, CONNECT_NORMAL);
874 	ppa_reset_pulse(dev->base);
875 	mdelay(1);		/* device settle delay */
876 	ppa_disconnect(dev);
877 	mdelay(1);		/* device settle delay */
878 	return SUCCESS;
879 }
880 
device_check(ppa_struct * dev)881 static int device_check(ppa_struct *dev)
882 {
883 	/* This routine looks for a device and then attempts to use EPP
884 	   to send a command. If all goes as planned then EPP is available. */
885 
886 	static u8 cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
887 	int loop, old_mode, status, k, ppb = dev->base;
888 	unsigned char l;
889 
890 	old_mode = dev->mode;
891 	for (loop = 0; loop < 8; loop++) {
892 		/* Attempt to use EPP for Test Unit Ready */
893 		if ((ppb & 0x0007) == 0x0000)
894 			dev->mode = PPA_EPP_32;
895 
896 second_pass:
897 		ppa_connect(dev, CONNECT_EPP_MAYBE);
898 		/* Select SCSI device */
899 		if (!ppa_select(dev, loop)) {
900 			ppa_disconnect(dev);
901 			continue;
902 		}
903 		printk(KERN_INFO "ppa: Found device at ID %i, Attempting to use %s\n",
904 		       loop, PPA_MODE_STRING[dev->mode]);
905 
906 		/* Send SCSI command */
907 		status = 1;
908 		w_ctr(ppb, 0x0c);
909 		for (l = 0; (l < 6) && (status); l++)
910 			status = ppa_out(dev, cmd, 1);
911 
912 		if (!status) {
913 			ppa_disconnect(dev);
914 			ppa_connect(dev, CONNECT_EPP_MAYBE);
915 			w_dtr(ppb, 0x40);
916 			w_ctr(ppb, 0x08);
917 			udelay(30);
918 			w_ctr(ppb, 0x0c);
919 			udelay(1000);
920 			ppa_disconnect(dev);
921 			udelay(1000);
922 			if (dev->mode == PPA_EPP_32) {
923 				dev->mode = old_mode;
924 				goto second_pass;
925 			}
926 			return -EIO;
927 		}
928 		w_ctr(ppb, 0x0c);
929 		k = 1000000;	/* 1 Second */
930 		do {
931 			l = r_str(ppb);
932 			k--;
933 			udelay(1);
934 		} while (!(l & 0x80) && (k));
935 
936 		l &= 0xf0;
937 
938 		if (l != 0xf0) {
939 			ppa_disconnect(dev);
940 			ppa_connect(dev, CONNECT_EPP_MAYBE);
941 			ppa_reset_pulse(ppb);
942 			udelay(1000);
943 			ppa_disconnect(dev);
944 			udelay(1000);
945 			if (dev->mode == PPA_EPP_32) {
946 				dev->mode = old_mode;
947 				goto second_pass;
948 			}
949 			return -EIO;
950 		}
951 		ppa_disconnect(dev);
952 		printk(KERN_INFO "ppa: Communication established with ID %i using %s\n",
953 		       loop, PPA_MODE_STRING[dev->mode]);
954 		ppa_connect(dev, CONNECT_EPP_MAYBE);
955 		ppa_reset_pulse(ppb);
956 		udelay(1000);
957 		ppa_disconnect(dev);
958 		udelay(1000);
959 		return 0;
960 	}
961 	return -ENODEV;
962 }
963 
ppa_adjust_queue(struct scsi_device * device)964 static int ppa_adjust_queue(struct scsi_device *device)
965 {
966 	blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
967 	return 0;
968 }
969 
970 static struct scsi_host_template ppa_template = {
971 	.module			= THIS_MODULE,
972 	.proc_name		= "ppa",
973 	.show_info		= ppa_show_info,
974 	.write_info		= ppa_write_info,
975 	.name			= "Iomega VPI0 (ppa) interface",
976 	.queuecommand		= ppa_queuecommand,
977 	.eh_abort_handler	= ppa_abort,
978 	.eh_host_reset_handler	= ppa_reset,
979 	.bios_param		= ppa_biosparam,
980 	.this_id		= -1,
981 	.sg_tablesize		= SG_ALL,
982 	.can_queue		= 1,
983 	.slave_alloc		= ppa_adjust_queue,
984 };
985 
986 /***************************************************************************
987  *                   Parallel port probing routines                        *
988  ***************************************************************************/
989 
990 static LIST_HEAD(ppa_hosts);
991 
992 /*
993  * Finds the first available device number that can be alloted to the
994  * new ppa device and returns the address of the previous node so that
995  * we can add to the tail and have a list in the ascending order.
996  */
997 
find_parent(void)998 static inline ppa_struct *find_parent(void)
999 {
1000 	ppa_struct *dev, *par = NULL;
1001 	unsigned int cnt = 0;
1002 
1003 	if (list_empty(&ppa_hosts))
1004 		return NULL;
1005 
1006 	list_for_each_entry(dev, &ppa_hosts, list) {
1007 		if (dev->dev_no != cnt)
1008 			return par;
1009 		cnt++;
1010 		par = dev;
1011 	}
1012 
1013 	return par;
1014 }
1015 
__ppa_attach(struct parport * pb)1016 static int __ppa_attach(struct parport *pb)
1017 {
1018 	struct Scsi_Host *host;
1019 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting);
1020 	DEFINE_WAIT(wait);
1021 	ppa_struct *dev, *temp;
1022 	int ports;
1023 	int modes, ppb, ppb_hi;
1024 	int err = -ENOMEM;
1025 	struct pardev_cb ppa_cb;
1026 
1027 	dev = kzalloc(sizeof(ppa_struct), GFP_KERNEL);
1028 	if (!dev)
1029 		return -ENOMEM;
1030 	dev->base = -1;
1031 	dev->mode = PPA_AUTODETECT;
1032 	dev->recon_tmo = PPA_RECON_TMO;
1033 	init_waitqueue_head(&waiting);
1034 	temp = find_parent();
1035 	if (temp)
1036 		dev->dev_no = temp->dev_no + 1;
1037 
1038 	memset(&ppa_cb, 0, sizeof(ppa_cb));
1039 	ppa_cb.private = dev;
1040 	ppa_cb.wakeup = ppa_wakeup;
1041 
1042 	dev->dev = parport_register_dev_model(pb, "ppa", &ppa_cb, dev->dev_no);
1043 
1044 	if (!dev->dev)
1045 		goto out;
1046 
1047 	/* Claim the bus so it remembers what we do to the control
1048 	 * registers. [ CTR and ECP ]
1049 	 */
1050 	err = -EBUSY;
1051 	dev->waiting = &waiting;
1052 	prepare_to_wait(&waiting, &wait, TASK_UNINTERRUPTIBLE);
1053 	if (ppa_pb_claim(dev))
1054 		schedule_timeout(3 * HZ);
1055 	if (dev->wanted) {
1056 		printk(KERN_ERR "ppa%d: failed to claim parport because "
1057 				"a pardevice is owning the port for too long "
1058 				"time!\n", pb->number);
1059 		ppa_pb_dismiss(dev);
1060 		dev->waiting = NULL;
1061 		finish_wait(&waiting, &wait);
1062 		goto out1;
1063 	}
1064 	dev->waiting = NULL;
1065 	finish_wait(&waiting, &wait);
1066 	ppb = dev->base = dev->dev->port->base;
1067 	ppb_hi = dev->dev->port->base_hi;
1068 	w_ctr(ppb, 0x0c);
1069 	modes = dev->dev->port->modes;
1070 
1071 	/* Mode detection works up the chain of speed
1072 	 * This avoids a nasty if-then-else-if-... tree
1073 	 */
1074 	dev->mode = PPA_NIBBLE;
1075 
1076 	if (modes & PARPORT_MODE_TRISTATE)
1077 		dev->mode = PPA_PS2;
1078 
1079 	if (modes & PARPORT_MODE_ECP) {
1080 		w_ecr(ppb_hi, 0x20);
1081 		dev->mode = PPA_PS2;
1082 	}
1083 	if ((modes & PARPORT_MODE_EPP) && (modes & PARPORT_MODE_ECP))
1084 		w_ecr(ppb_hi, 0x80);
1085 
1086 	/* Done configuration */
1087 
1088 	err = ppa_init(dev);
1089 	ppa_pb_release(dev);
1090 
1091 	if (err)
1092 		goto out1;
1093 
1094 	/* now the glue ... */
1095 	if (dev->mode == PPA_NIBBLE || dev->mode == PPA_PS2)
1096 		ports = 3;
1097 	else
1098 		ports = 8;
1099 
1100 	INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt);
1101 
1102 	err = -ENOMEM;
1103 	host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
1104 	if (!host)
1105 		goto out1;
1106 	host->io_port = pb->base;
1107 	host->n_io_port = ports;
1108 	host->dma_channel = -1;
1109 	host->unique_id = pb->number;
1110 	*(ppa_struct **)&host->hostdata = dev;
1111 	dev->host = host;
1112 	list_add_tail(&dev->list, &ppa_hosts);
1113 	err = scsi_add_host(host, NULL);
1114 	if (err)
1115 		goto out2;
1116 	scsi_scan_host(host);
1117 	return 0;
1118 out2:
1119 	list_del_init(&dev->list);
1120 	scsi_host_put(host);
1121 out1:
1122 	parport_unregister_device(dev->dev);
1123 out:
1124 	kfree(dev);
1125 	return err;
1126 }
1127 
ppa_attach(struct parport * pb)1128 static void ppa_attach(struct parport *pb)
1129 {
1130 	__ppa_attach(pb);
1131 }
1132 
ppa_detach(struct parport * pb)1133 static void ppa_detach(struct parport *pb)
1134 {
1135 	ppa_struct *dev;
1136 	list_for_each_entry(dev, &ppa_hosts, list) {
1137 		if (dev->dev->port == pb) {
1138 			list_del_init(&dev->list);
1139 			scsi_remove_host(dev->host);
1140 			scsi_host_put(dev->host);
1141 			parport_unregister_device(dev->dev);
1142 			kfree(dev);
1143 			break;
1144 		}
1145 	}
1146 }
1147 
1148 static struct parport_driver ppa_driver = {
1149 	.name		= "ppa",
1150 	.match_port	= ppa_attach,
1151 	.detach		= ppa_detach,
1152 	.devmodel	= true,
1153 };
1154 
ppa_driver_init(void)1155 static int __init ppa_driver_init(void)
1156 {
1157 	printk(KERN_INFO "ppa: Version %s\n", PPA_VERSION);
1158 	return parport_register_driver(&ppa_driver);
1159 }
1160 
ppa_driver_exit(void)1161 static void __exit ppa_driver_exit(void)
1162 {
1163 	parport_unregister_driver(&ppa_driver);
1164 }
1165 
1166 module_init(ppa_driver_init);
1167 module_exit(ppa_driver_exit);
1168 MODULE_LICENSE("GPL");
1169