• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2         pf.c    (c) 1997-8  Grant R. Guenther <grant@torque.net>
3                             Under the terms of the GNU General Public License.
4 
5         This is the high-level driver for parallel port ATAPI disk
6         drives based on chips supported by the paride module.
7 
8         By default, the driver will autoprobe for a single parallel
9         port ATAPI disk drive, but if their individual parameters are
10         specified, the driver can handle up to 4 drives.
11 
12         The behaviour of the pf driver can be altered by setting
13         some parameters from the insmod command line.  The following
14         parameters are adjustable:
15 
16             drive0      These four arguments can be arrays of
17             drive1      1-7 integers as follows:
18             drive2
19             drive3      <prt>,<pro>,<uni>,<mod>,<slv>,<lun>,<dly>
20 
21                         Where,
22 
23                 <prt>   is the base of the parallel port address for
24                         the corresponding drive.  (required)
25 
26                 <pro>   is the protocol number for the adapter that
27                         supports this drive.  These numbers are
28                         logged by 'paride' when the protocol modules
29                         are initialised.  (0 if not given)
30 
31                 <uni>   for those adapters that support chained
32                         devices, this is the unit selector for the
33                         chain of devices on the given port.  It should
34                         be zero for devices that don't support chaining.
35                         (0 if not given)
36 
37                 <mod>   this can be -1 to choose the best mode, or one
38                         of the mode numbers supported by the adapter.
39                         (-1 if not given)
40 
41                 <slv>   ATAPI CDroms can be jumpered to master or slave.
42                         Set this to 0 to choose the master drive, 1 to
43                         choose the slave, -1 (the default) to choose the
44                         first drive found.
45 
46 		<lun>   Some ATAPI devices support multiple LUNs.
47                         One example is the ATAPI PD/CD drive from
48                         Matshita/Panasonic.  This device has a
49                         CD drive on LUN 0 and a PD drive on LUN 1.
50                         By default, the driver will search for the
51                         first LUN with a supported device.  Set
52                         this parameter to force it to use a specific
53                         LUN.  (default -1)
54 
55                 <dly>   some parallel ports require the driver to
56                         go more slowly.  -1 sets a default value that
57                         should work with the chosen protocol.  Otherwise,
58                         set this to a small integer, the larger it is
59                         the slower the port i/o.  In some cases, setting
60                         this to zero will speed up the device. (default -1)
61 
62 	    major	You may use this parameter to override the
63 			default major number (47) that this driver
64 			will use.  Be sure to change the device
65 			name as well.
66 
67 	    name	This parameter is a character string that
68 			contains the name the kernel will use for this
69 			device (in /proc output, for instance).
70 			(default "pf").
71 
72             cluster     The driver will attempt to aggregate requests
73                         for adjacent blocks into larger multi-block
74                         clusters.  The maximum cluster size (in 512
75                         byte sectors) is set with this parameter.
76                         (default 64)
77 
78             verbose     This parameter controls the amount of logging
79                         that the driver will do.  Set it to 0 for
80                         normal operation, 1 to see autoprobe progress
81                         messages, or 2 to see additional debugging
82                         output.  (default 0)
83 
84 	    nice        This parameter controls the driver's use of
85 			idle CPU time, at the expense of some speed.
86 
87         If this driver is built into the kernel, you can use the
88         following command line parameters, with the same values
89         as the corresponding module parameters listed above:
90 
91             pf.drive0
92             pf.drive1
93             pf.drive2
94             pf.drive3
95 	    pf.cluster
96             pf.nice
97 
98         In addition, you can use the parameter pf.disable to disable
99         the driver entirely.
100 
101 */
102 
103 /* Changes:
104 
105 	1.01	GRG 1998.05.03  Changes for SMP.  Eliminate sti().
106 				Fix for drives that don't clear STAT_ERR
107 			        until after next CDB delivered.
108 				Small change in pf_completion to round
109 				up transfer size.
110 	1.02    GRG 1998.06.16  Eliminated an Ugh
111 	1.03    GRG 1998.08.16  Use HZ in loop timings, extra debugging
112 	1.04    GRG 1998.09.24  Added jumbo support
113 
114 */
115 
116 #define PF_VERSION      "1.04"
117 #define PF_MAJOR	47
118 #define PF_NAME		"pf"
119 #define PF_UNITS	4
120 
121 #include <linux/types.h>
122 
123 /* Here are things one can override from the insmod command.
124    Most are autoprobed by paride unless set here.  Verbose is off
125    by default.
126 
127 */
128 
129 static bool verbose = 0;
130 static int major = PF_MAJOR;
131 static char *name = PF_NAME;
132 static int cluster = 64;
133 static int nice = 0;
134 static int disable = 0;
135 
136 static int drive0[7] = { 0, 0, 0, -1, -1, -1, -1 };
137 static int drive1[7] = { 0, 0, 0, -1, -1, -1, -1 };
138 static int drive2[7] = { 0, 0, 0, -1, -1, -1, -1 };
139 static int drive3[7] = { 0, 0, 0, -1, -1, -1, -1 };
140 
141 static int (*drives[4])[7] = {&drive0, &drive1, &drive2, &drive3};
142 static int pf_drive_count;
143 
144 enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
145 
146 /* end of parameters */
147 
148 #include <linux/module.h>
149 #include <linux/init.h>
150 #include <linux/fs.h>
151 #include <linux/delay.h>
152 #include <linux/hdreg.h>
153 #include <linux/cdrom.h>
154 #include <linux/spinlock.h>
155 #include <linux/blk-mq.h>
156 #include <linux/blkpg.h>
157 #include <linux/mutex.h>
158 #include <linux/uaccess.h>
159 
160 static DEFINE_MUTEX(pf_mutex);
161 static DEFINE_SPINLOCK(pf_spin_lock);
162 
163 module_param(verbose, bool, 0644);
164 module_param(major, int, 0);
165 module_param(name, charp, 0);
166 module_param(cluster, int, 0);
167 module_param(nice, int, 0);
168 module_param_array(drive0, int, NULL, 0);
169 module_param_array(drive1, int, NULL, 0);
170 module_param_array(drive2, int, NULL, 0);
171 module_param_array(drive3, int, NULL, 0);
172 
173 #include "paride.h"
174 #include "pseudo.h"
175 
176 /* constants for faking geometry numbers */
177 
178 #define PF_FD_MAX	8192	/* use FD geometry under this size */
179 #define PF_FD_HDS	2
180 #define PF_FD_SPT	18
181 #define PF_HD_HDS	64
182 #define PF_HD_SPT	32
183 
184 #define PF_MAX_RETRIES  5
185 #define PF_TMO          800	/* interrupt timeout in jiffies */
186 #define PF_SPIN_DEL     50	/* spin delay in micro-seconds  */
187 
188 #define PF_SPIN         (1000000*PF_TMO)/(HZ*PF_SPIN_DEL)
189 
190 #define STAT_ERR        0x00001
191 #define STAT_INDEX      0x00002
192 #define STAT_ECC        0x00004
193 #define STAT_DRQ        0x00008
194 #define STAT_SEEK       0x00010
195 #define STAT_WRERR      0x00020
196 #define STAT_READY      0x00040
197 #define STAT_BUSY       0x00080
198 
199 #define ATAPI_REQ_SENSE		0x03
200 #define ATAPI_LOCK		0x1e
201 #define ATAPI_DOOR		0x1b
202 #define ATAPI_MODE_SENSE	0x5a
203 #define ATAPI_CAPACITY		0x25
204 #define ATAPI_IDENTIFY		0x12
205 #define ATAPI_READ_10		0x28
206 #define ATAPI_WRITE_10		0x2a
207 
208 static int pf_open(struct block_device *bdev, fmode_t mode);
209 static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
210 				const struct blk_mq_queue_data *bd);
211 static int pf_ioctl(struct block_device *bdev, fmode_t mode,
212 		    unsigned int cmd, unsigned long arg);
213 static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
214 
215 static void pf_release(struct gendisk *disk, fmode_t mode);
216 
217 static int pf_detect(void);
218 static void do_pf_read(void);
219 static void do_pf_read_start(void);
220 static void do_pf_write(void);
221 static void do_pf_write_start(void);
222 static void do_pf_read_drq(void);
223 static void do_pf_write_done(void);
224 
225 #define PF_NM           0
226 #define PF_RO           1
227 #define PF_RW           2
228 
229 #define PF_NAMELEN      8
230 
231 struct pf_unit {
232 	struct pi_adapter pia;	/* interface to paride layer */
233 	struct pi_adapter *pi;
234 	int removable;		/* removable media device  ?  */
235 	int media_status;	/* media present ?  WP ? */
236 	int drive;		/* drive */
237 	int lun;
238 	int access;		/* count of active opens ... */
239 	int present;		/* device present ? */
240 	char name[PF_NAMELEN];	/* pf0, pf1, ... */
241 	struct gendisk *disk;
242 	struct blk_mq_tag_set tag_set;
243 	struct list_head rq_list;
244 };
245 
246 static struct pf_unit units[PF_UNITS];
247 
248 static int pf_identify(struct pf_unit *pf);
249 static void pf_lock(struct pf_unit *pf, int func);
250 static void pf_eject(struct pf_unit *pf);
251 static unsigned int pf_check_events(struct gendisk *disk,
252 				    unsigned int clearing);
253 
254 static char pf_scratch[512];	/* scratch block buffer */
255 
256 /* the variables below are used mainly in the I/O request engine, which
257    processes only one request at a time.
258 */
259 
260 static int pf_retries = 0;	/* i/o error retry count */
261 static int pf_busy = 0;		/* request being processed ? */
262 static struct request *pf_req;	/* current request */
263 static int pf_block;		/* address of next requested block */
264 static int pf_count;		/* number of blocks still to do */
265 static int pf_run;		/* sectors in current cluster */
266 static int pf_cmd;		/* current command READ/WRITE */
267 static struct pf_unit *pf_current;/* unit of current request */
268 static int pf_mask;		/* stopper for pseudo-int */
269 static char *pf_buf;		/* buffer for request in progress */
270 static void *par_drv;		/* reference of parport driver */
271 
272 /* kernel glue structures */
273 
274 static const struct block_device_operations pf_fops = {
275 	.owner		= THIS_MODULE,
276 	.open		= pf_open,
277 	.release	= pf_release,
278 	.ioctl		= pf_ioctl,
279 	.compat_ioctl	= pf_ioctl,
280 	.getgeo		= pf_getgeo,
281 	.check_events	= pf_check_events,
282 };
283 
284 static const struct blk_mq_ops pf_mq_ops = {
285 	.queue_rq	= pf_queue_rq,
286 };
287 
pf_init_units(void)288 static void __init pf_init_units(void)
289 {
290 	struct pf_unit *pf;
291 	int unit;
292 
293 	pf_drive_count = 0;
294 	for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
295 		struct gendisk *disk;
296 
297 		if (blk_mq_alloc_sq_tag_set(&pf->tag_set, &pf_mq_ops, 1,
298 				BLK_MQ_F_SHOULD_MERGE))
299 			continue;
300 
301 		disk = blk_mq_alloc_disk(&pf->tag_set, pf);
302 		if (IS_ERR(disk)) {
303 			blk_mq_free_tag_set(&pf->tag_set);
304 			continue;
305 		}
306 
307 		INIT_LIST_HEAD(&pf->rq_list);
308 		blk_queue_max_segments(disk->queue, cluster);
309 		blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
310 		pf->disk = disk;
311 		pf->pi = &pf->pia;
312 		pf->media_status = PF_NM;
313 		pf->drive = (*drives[unit])[D_SLV];
314 		pf->lun = (*drives[unit])[D_LUN];
315 		snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit);
316 		disk->major = major;
317 		disk->first_minor = unit;
318 		disk->minors = 1;
319 		strcpy(disk->disk_name, pf->name);
320 		disk->fops = &pf_fops;
321 		disk->events = DISK_EVENT_MEDIA_CHANGE;
322 		if (!(*drives[unit])[D_PRT])
323 			pf_drive_count++;
324 	}
325 }
326 
pf_open(struct block_device * bdev,fmode_t mode)327 static int pf_open(struct block_device *bdev, fmode_t mode)
328 {
329 	struct pf_unit *pf = bdev->bd_disk->private_data;
330 	int ret;
331 
332 	mutex_lock(&pf_mutex);
333 	pf_identify(pf);
334 
335 	ret = -ENODEV;
336 	if (pf->media_status == PF_NM)
337 		goto out;
338 
339 	ret = -EROFS;
340 	if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE))
341 		goto out;
342 
343 	ret = 0;
344 	pf->access++;
345 	if (pf->removable)
346 		pf_lock(pf, 1);
347 out:
348 	mutex_unlock(&pf_mutex);
349 	return ret;
350 }
351 
pf_getgeo(struct block_device * bdev,struct hd_geometry * geo)352 static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo)
353 {
354 	struct pf_unit *pf = bdev->bd_disk->private_data;
355 	sector_t capacity = get_capacity(pf->disk);
356 
357 	if (capacity < PF_FD_MAX) {
358 		geo->cylinders = sector_div(capacity, PF_FD_HDS * PF_FD_SPT);
359 		geo->heads = PF_FD_HDS;
360 		geo->sectors = PF_FD_SPT;
361 	} else {
362 		geo->cylinders = sector_div(capacity, PF_HD_HDS * PF_HD_SPT);
363 		geo->heads = PF_HD_HDS;
364 		geo->sectors = PF_HD_SPT;
365 	}
366 
367 	return 0;
368 }
369 
pf_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)370 static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
371 {
372 	struct pf_unit *pf = bdev->bd_disk->private_data;
373 
374 	if (cmd != CDROMEJECT)
375 		return -EINVAL;
376 
377 	if (pf->access != 1)
378 		return -EBUSY;
379 	mutex_lock(&pf_mutex);
380 	pf_eject(pf);
381 	mutex_unlock(&pf_mutex);
382 
383 	return 0;
384 }
385 
pf_release(struct gendisk * disk,fmode_t mode)386 static void pf_release(struct gendisk *disk, fmode_t mode)
387 {
388 	struct pf_unit *pf = disk->private_data;
389 
390 	mutex_lock(&pf_mutex);
391 	if (pf->access <= 0) {
392 		mutex_unlock(&pf_mutex);
393 		WARN_ON(1);
394 		return;
395 	}
396 
397 	pf->access--;
398 
399 	if (!pf->access && pf->removable)
400 		pf_lock(pf, 0);
401 
402 	mutex_unlock(&pf_mutex);
403 }
404 
pf_check_events(struct gendisk * disk,unsigned int clearing)405 static unsigned int pf_check_events(struct gendisk *disk, unsigned int clearing)
406 {
407 	return DISK_EVENT_MEDIA_CHANGE;
408 }
409 
status_reg(struct pf_unit * pf)410 static inline int status_reg(struct pf_unit *pf)
411 {
412 	return pi_read_regr(pf->pi, 1, 6);
413 }
414 
read_reg(struct pf_unit * pf,int reg)415 static inline int read_reg(struct pf_unit *pf, int reg)
416 {
417 	return pi_read_regr(pf->pi, 0, reg);
418 }
419 
write_reg(struct pf_unit * pf,int reg,int val)420 static inline void write_reg(struct pf_unit *pf, int reg, int val)
421 {
422 	pi_write_regr(pf->pi, 0, reg, val);
423 }
424 
pf_wait(struct pf_unit * pf,int go,int stop,char * fun,char * msg)425 static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg)
426 {
427 	int j, r, e, s, p;
428 
429 	j = 0;
430 	while ((((r = status_reg(pf)) & go) || (stop && (!(r & stop))))
431 	       && (j++ < PF_SPIN))
432 		udelay(PF_SPIN_DEL);
433 
434 	if ((r & (STAT_ERR & stop)) || (j > PF_SPIN)) {
435 		s = read_reg(pf, 7);
436 		e = read_reg(pf, 1);
437 		p = read_reg(pf, 2);
438 		if (j > PF_SPIN)
439 			e |= 0x100;
440 		if (fun)
441 			printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
442 			       " loop=%d phase=%d\n",
443 			       pf->name, fun, msg, r, s, e, j, p);
444 		return (e << 8) + s;
445 	}
446 	return 0;
447 }
448 
pf_command(struct pf_unit * pf,char * cmd,int dlen,char * fun)449 static int pf_command(struct pf_unit *pf, char *cmd, int dlen, char *fun)
450 {
451 	pi_connect(pf->pi);
452 
453 	write_reg(pf, 6, 0xa0+0x10*pf->drive);
454 
455 	if (pf_wait(pf, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) {
456 		pi_disconnect(pf->pi);
457 		return -1;
458 	}
459 
460 	write_reg(pf, 4, dlen % 256);
461 	write_reg(pf, 5, dlen / 256);
462 	write_reg(pf, 7, 0xa0);	/* ATAPI packet command */
463 
464 	if (pf_wait(pf, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) {
465 		pi_disconnect(pf->pi);
466 		return -1;
467 	}
468 
469 	if (read_reg(pf, 2) != 1) {
470 		printk("%s: %s: command phase error\n", pf->name, fun);
471 		pi_disconnect(pf->pi);
472 		return -1;
473 	}
474 
475 	pi_write_block(pf->pi, cmd, 12);
476 
477 	return 0;
478 }
479 
pf_completion(struct pf_unit * pf,char * buf,char * fun)480 static int pf_completion(struct pf_unit *pf, char *buf, char *fun)
481 {
482 	int r, s, n;
483 
484 	r = pf_wait(pf, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
485 		    fun, "completion");
486 
487 	if ((read_reg(pf, 2) & 2) && (read_reg(pf, 7) & STAT_DRQ)) {
488 		n = (((read_reg(pf, 4) + 256 * read_reg(pf, 5)) +
489 		      3) & 0xfffc);
490 		pi_read_block(pf->pi, buf, n);
491 	}
492 
493 	s = pf_wait(pf, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done");
494 
495 	pi_disconnect(pf->pi);
496 
497 	return (r ? r : s);
498 }
499 
pf_req_sense(struct pf_unit * pf,int quiet)500 static void pf_req_sense(struct pf_unit *pf, int quiet)
501 {
502 	char rs_cmd[12] =
503 	    { ATAPI_REQ_SENSE, pf->lun << 5, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
504 	char buf[16];
505 	int r;
506 
507 	r = pf_command(pf, rs_cmd, 16, "Request sense");
508 	mdelay(1);
509 	if (!r)
510 		pf_completion(pf, buf, "Request sense");
511 
512 	if ((!r) && (!quiet))
513 		printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n",
514 		       pf->name, buf[2] & 0xf, buf[12], buf[13]);
515 }
516 
pf_atapi(struct pf_unit * pf,char * cmd,int dlen,char * buf,char * fun)517 static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fun)
518 {
519 	int r;
520 
521 	r = pf_command(pf, cmd, dlen, fun);
522 	mdelay(1);
523 	if (!r)
524 		r = pf_completion(pf, buf, fun);
525 	if (r)
526 		pf_req_sense(pf, !fun);
527 
528 	return r;
529 }
530 
pf_lock(struct pf_unit * pf,int func)531 static void pf_lock(struct pf_unit *pf, int func)
532 {
533 	char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 };
534 
535 	pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "lock" : "unlock");
536 }
537 
pf_eject(struct pf_unit * pf)538 static void pf_eject(struct pf_unit *pf)
539 {
540 	char ej_cmd[12] = { ATAPI_DOOR, pf->lun << 5, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 };
541 
542 	pf_lock(pf, 0);
543 	pf_atapi(pf, ej_cmd, 0, pf_scratch, "eject");
544 }
545 
546 #define PF_RESET_TMO   30	/* in tenths of a second */
547 
pf_sleep(int cs)548 static void pf_sleep(int cs)
549 {
550 	schedule_timeout_interruptible(cs);
551 }
552 
553 /* the ATAPI standard actually specifies the contents of all 7 registers
554    after a reset, but the specification is ambiguous concerning the last
555    two bytes, and different drives interpret the standard differently.
556  */
557 
pf_reset(struct pf_unit * pf)558 static int pf_reset(struct pf_unit *pf)
559 {
560 	int i, k, flg;
561 	int expect[5] = { 1, 1, 1, 0x14, 0xeb };
562 
563 	pi_connect(pf->pi);
564 	write_reg(pf, 6, 0xa0+0x10*pf->drive);
565 	write_reg(pf, 7, 8);
566 
567 	pf_sleep(20 * HZ / 1000);
568 
569 	k = 0;
570 	while ((k++ < PF_RESET_TMO) && (status_reg(pf) & STAT_BUSY))
571 		pf_sleep(HZ / 10);
572 
573 	flg = 1;
574 	for (i = 0; i < 5; i++)
575 		flg &= (read_reg(pf, i + 1) == expect[i]);
576 
577 	if (verbose) {
578 		printk("%s: Reset (%d) signature = ", pf->name, k);
579 		for (i = 0; i < 5; i++)
580 			printk("%3x", read_reg(pf, i + 1));
581 		if (!flg)
582 			printk(" (incorrect)");
583 		printk("\n");
584 	}
585 
586 	pi_disconnect(pf->pi);
587 	return flg - 1;
588 }
589 
pf_mode_sense(struct pf_unit * pf)590 static void pf_mode_sense(struct pf_unit *pf)
591 {
592 	char ms_cmd[12] =
593 	    { ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 };
594 	char buf[8];
595 
596 	pf_atapi(pf, ms_cmd, 8, buf, "mode sense");
597 	pf->media_status = PF_RW;
598 	if (buf[3] & 0x80)
599 		pf->media_status = PF_RO;
600 }
601 
xs(char * buf,char * targ,int offs,int len)602 static void xs(char *buf, char *targ, int offs, int len)
603 {
604 	int j, k, l;
605 
606 	j = 0;
607 	l = 0;
608 	for (k = 0; k < len; k++)
609 		if ((buf[k + offs] != 0x20) || (buf[k + offs] != l))
610 			l = targ[j++] = buf[k + offs];
611 	if (l == 0x20)
612 		j--;
613 	targ[j] = 0;
614 }
615 
xl(char * buf,int offs)616 static int xl(char *buf, int offs)
617 {
618 	int v, k;
619 
620 	v = 0;
621 	for (k = 0; k < 4; k++)
622 		v = v * 256 + (buf[k + offs] & 0xff);
623 	return v;
624 }
625 
pf_get_capacity(struct pf_unit * pf)626 static void pf_get_capacity(struct pf_unit *pf)
627 {
628 	char rc_cmd[12] = { ATAPI_CAPACITY, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
629 	char buf[8];
630 	int bs;
631 
632 	if (pf_atapi(pf, rc_cmd, 8, buf, "get capacity")) {
633 		pf->media_status = PF_NM;
634 		return;
635 	}
636 	set_capacity(pf->disk, xl(buf, 0) + 1);
637 	bs = xl(buf, 4);
638 	if (bs != 512) {
639 		set_capacity(pf->disk, 0);
640 		if (verbose)
641 			printk("%s: Drive %d, LUN %d,"
642 			       " unsupported block size %d\n",
643 			       pf->name, pf->drive, pf->lun, bs);
644 	}
645 }
646 
pf_identify(struct pf_unit * pf)647 static int pf_identify(struct pf_unit *pf)
648 {
649 	int dt, s;
650 	char *ms[2] = { "master", "slave" };
651 	char mf[10], id[18];
652 	char id_cmd[12] =
653 	    { ATAPI_IDENTIFY, pf->lun << 5, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
654 	char buf[36];
655 
656 	s = pf_atapi(pf, id_cmd, 36, buf, "identify");
657 	if (s)
658 		return -1;
659 
660 	dt = buf[0] & 0x1f;
661 	if ((dt != 0) && (dt != 7)) {
662 		if (verbose)
663 			printk("%s: Drive %d, LUN %d, unsupported type %d\n",
664 			       pf->name, pf->drive, pf->lun, dt);
665 		return -1;
666 	}
667 
668 	xs(buf, mf, 8, 8);
669 	xs(buf, id, 16, 16);
670 
671 	pf->removable = (buf[1] & 0x80);
672 
673 	pf_mode_sense(pf);
674 	pf_mode_sense(pf);
675 	pf_mode_sense(pf);
676 
677 	pf_get_capacity(pf);
678 
679 	printk("%s: %s %s, %s LUN %d, type %d",
680 	       pf->name, mf, id, ms[pf->drive], pf->lun, dt);
681 	if (pf->removable)
682 		printk(", removable");
683 	if (pf->media_status == PF_NM)
684 		printk(", no media\n");
685 	else {
686 		if (pf->media_status == PF_RO)
687 			printk(", RO");
688 		printk(", %llu blocks\n",
689 			(unsigned long long)get_capacity(pf->disk));
690 	}
691 	return 0;
692 }
693 
694 /*	returns  0, with id set if drive is detected
695 	        -1, if drive detection failed
696 */
pf_probe(struct pf_unit * pf)697 static int pf_probe(struct pf_unit *pf)
698 {
699 	if (pf->drive == -1) {
700 		for (pf->drive = 0; pf->drive <= 1; pf->drive++)
701 			if (!pf_reset(pf)) {
702 				if (pf->lun != -1)
703 					return pf_identify(pf);
704 				else
705 					for (pf->lun = 0; pf->lun < 8; pf->lun++)
706 						if (!pf_identify(pf))
707 							return 0;
708 			}
709 	} else {
710 		if (pf_reset(pf))
711 			return -1;
712 		if (pf->lun != -1)
713 			return pf_identify(pf);
714 		for (pf->lun = 0; pf->lun < 8; pf->lun++)
715 			if (!pf_identify(pf))
716 				return 0;
717 	}
718 	return -1;
719 }
720 
pf_detect(void)721 static int pf_detect(void)
722 {
723 	struct pf_unit *pf = units;
724 	int k, unit;
725 
726 	printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
727 	       name, name, PF_VERSION, major, cluster, nice);
728 
729 	par_drv = pi_register_driver(name);
730 	if (!par_drv) {
731 		pr_err("failed to register %s driver\n", name);
732 		return -1;
733 	}
734 	k = 0;
735 	if (pf_drive_count == 0) {
736 		if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF,
737 			    verbose, pf->name)) {
738 			if (!pf_probe(pf) && pf->disk) {
739 				pf->present = 1;
740 				k++;
741 			} else
742 				pi_release(pf->pi);
743 		}
744 
745 	} else
746 		for (unit = 0; unit < PF_UNITS; unit++, pf++) {
747 			int *conf = *drives[unit];
748 			if (!conf[D_PRT])
749 				continue;
750 			if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD],
751 				    conf[D_UNI], conf[D_PRO], conf[D_DLY],
752 				    pf_scratch, PI_PF, verbose, pf->name)) {
753 				if (pf->disk && !pf_probe(pf)) {
754 					pf->present = 1;
755 					k++;
756 				} else
757 					pi_release(pf->pi);
758 			}
759 		}
760 	if (k)
761 		return 0;
762 
763 	printk("%s: No ATAPI disk detected\n", name);
764 	for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
765 		if (!pf->disk)
766 			continue;
767 		blk_cleanup_disk(pf->disk);
768 		blk_mq_free_tag_set(&pf->tag_set);
769 	}
770 	pi_unregister_driver(par_drv);
771 	return -1;
772 }
773 
774 /* The i/o request engine */
775 
pf_start(struct pf_unit * pf,int cmd,int b,int c)776 static int pf_start(struct pf_unit *pf, int cmd, int b, int c)
777 {
778 	int i;
779 	char io_cmd[12] = { cmd, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
780 
781 	for (i = 0; i < 4; i++) {
782 		io_cmd[5 - i] = b & 0xff;
783 		b = b >> 8;
784 	}
785 
786 	io_cmd[8] = c & 0xff;
787 	io_cmd[7] = (c >> 8) & 0xff;
788 
789 	i = pf_command(pf, io_cmd, c * 512, "start i/o");
790 
791 	mdelay(1);
792 
793 	return i;
794 }
795 
pf_ready(void)796 static int pf_ready(void)
797 {
798 	return (((status_reg(pf_current) & (STAT_BUSY | pf_mask)) == pf_mask));
799 }
800 
801 static int pf_queue;
802 
set_next_request(void)803 static int set_next_request(void)
804 {
805 	struct pf_unit *pf;
806 	int old_pos = pf_queue;
807 
808 	do {
809 		pf = &units[pf_queue];
810 		if (++pf_queue == PF_UNITS)
811 			pf_queue = 0;
812 		if (pf->present && !list_empty(&pf->rq_list)) {
813 			pf_req = list_first_entry(&pf->rq_list, struct request,
814 							queuelist);
815 			list_del_init(&pf_req->queuelist);
816 			blk_mq_start_request(pf_req);
817 			break;
818 		}
819 	} while (pf_queue != old_pos);
820 
821 	return pf_req != NULL;
822 }
823 
pf_end_request(blk_status_t err)824 static void pf_end_request(blk_status_t err)
825 {
826 	if (!pf_req)
827 		return;
828 	if (!blk_update_request(pf_req, err, blk_rq_cur_bytes(pf_req))) {
829 		__blk_mq_end_request(pf_req, err);
830 		pf_req = NULL;
831 	}
832 }
833 
pf_request(void)834 static void pf_request(void)
835 {
836 	if (pf_busy)
837 		return;
838 repeat:
839 	if (!pf_req && !set_next_request())
840 		return;
841 
842 	pf_current = pf_req->rq_disk->private_data;
843 	pf_block = blk_rq_pos(pf_req);
844 	pf_run = blk_rq_sectors(pf_req);
845 	pf_count = blk_rq_cur_sectors(pf_req);
846 
847 	if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
848 		pf_end_request(BLK_STS_IOERR);
849 		goto repeat;
850 	}
851 
852 	pf_cmd = rq_data_dir(pf_req);
853 	pf_buf = bio_data(pf_req->bio);
854 	pf_retries = 0;
855 
856 	pf_busy = 1;
857 	if (pf_cmd == READ)
858 		pi_do_claimed(pf_current->pi, do_pf_read);
859 	else if (pf_cmd == WRITE)
860 		pi_do_claimed(pf_current->pi, do_pf_write);
861 	else {
862 		pf_busy = 0;
863 		pf_end_request(BLK_STS_IOERR);
864 		goto repeat;
865 	}
866 }
867 
pf_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)868 static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
869 				const struct blk_mq_queue_data *bd)
870 {
871 	struct pf_unit *pf = hctx->queue->queuedata;
872 
873 	spin_lock_irq(&pf_spin_lock);
874 	list_add_tail(&bd->rq->queuelist, &pf->rq_list);
875 	pf_request();
876 	spin_unlock_irq(&pf_spin_lock);
877 
878 	return BLK_STS_OK;
879 }
880 
pf_next_buf(void)881 static int pf_next_buf(void)
882 {
883 	unsigned long saved_flags;
884 
885 	pf_count--;
886 	pf_run--;
887 	pf_buf += 512;
888 	pf_block++;
889 	if (!pf_run)
890 		return 1;
891 	if (!pf_count) {
892 		spin_lock_irqsave(&pf_spin_lock, saved_flags);
893 		pf_end_request(0);
894 		spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
895 		if (!pf_req)
896 			return 1;
897 		pf_count = blk_rq_cur_sectors(pf_req);
898 		pf_buf = bio_data(pf_req->bio);
899 	}
900 	return 0;
901 }
902 
next_request(blk_status_t err)903 static inline void next_request(blk_status_t err)
904 {
905 	unsigned long saved_flags;
906 
907 	spin_lock_irqsave(&pf_spin_lock, saved_flags);
908 	pf_end_request(err);
909 	pf_busy = 0;
910 	pf_request();
911 	spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
912 }
913 
914 /* detach from the calling context - in case the spinlock is held */
do_pf_read(void)915 static void do_pf_read(void)
916 {
917 	ps_set_intr(do_pf_read_start, NULL, 0, nice);
918 }
919 
do_pf_read_start(void)920 static void do_pf_read_start(void)
921 {
922 	pf_busy = 1;
923 
924 	if (pf_start(pf_current, ATAPI_READ_10, pf_block, pf_run)) {
925 		pi_disconnect(pf_current->pi);
926 		if (pf_retries < PF_MAX_RETRIES) {
927 			pf_retries++;
928 			pi_do_claimed(pf_current->pi, do_pf_read_start);
929 			return;
930 		}
931 		next_request(BLK_STS_IOERR);
932 		return;
933 	}
934 	pf_mask = STAT_DRQ;
935 	ps_set_intr(do_pf_read_drq, pf_ready, PF_TMO, nice);
936 }
937 
do_pf_read_drq(void)938 static void do_pf_read_drq(void)
939 {
940 	while (1) {
941 		if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
942 			    "read block", "completion") & STAT_ERR) {
943 			pi_disconnect(pf_current->pi);
944 			if (pf_retries < PF_MAX_RETRIES) {
945 				pf_req_sense(pf_current, 0);
946 				pf_retries++;
947 				pi_do_claimed(pf_current->pi, do_pf_read_start);
948 				return;
949 			}
950 			next_request(BLK_STS_IOERR);
951 			return;
952 		}
953 		pi_read_block(pf_current->pi, pf_buf, 512);
954 		if (pf_next_buf())
955 			break;
956 	}
957 	pi_disconnect(pf_current->pi);
958 	next_request(0);
959 }
960 
do_pf_write(void)961 static void do_pf_write(void)
962 {
963 	ps_set_intr(do_pf_write_start, NULL, 0, nice);
964 }
965 
do_pf_write_start(void)966 static void do_pf_write_start(void)
967 {
968 	pf_busy = 1;
969 
970 	if (pf_start(pf_current, ATAPI_WRITE_10, pf_block, pf_run)) {
971 		pi_disconnect(pf_current->pi);
972 		if (pf_retries < PF_MAX_RETRIES) {
973 			pf_retries++;
974 			pi_do_claimed(pf_current->pi, do_pf_write_start);
975 			return;
976 		}
977 		next_request(BLK_STS_IOERR);
978 		return;
979 	}
980 
981 	while (1) {
982 		if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
983 			    "write block", "data wait") & STAT_ERR) {
984 			pi_disconnect(pf_current->pi);
985 			if (pf_retries < PF_MAX_RETRIES) {
986 				pf_retries++;
987 				pi_do_claimed(pf_current->pi, do_pf_write_start);
988 				return;
989 			}
990 			next_request(BLK_STS_IOERR);
991 			return;
992 		}
993 		pi_write_block(pf_current->pi, pf_buf, 512);
994 		if (pf_next_buf())
995 			break;
996 	}
997 	pf_mask = 0;
998 	ps_set_intr(do_pf_write_done, pf_ready, PF_TMO, nice);
999 }
1000 
do_pf_write_done(void)1001 static void do_pf_write_done(void)
1002 {
1003 	if (pf_wait(pf_current, STAT_BUSY, 0, "write block", "done") & STAT_ERR) {
1004 		pi_disconnect(pf_current->pi);
1005 		if (pf_retries < PF_MAX_RETRIES) {
1006 			pf_retries++;
1007 			pi_do_claimed(pf_current->pi, do_pf_write_start);
1008 			return;
1009 		}
1010 		next_request(BLK_STS_IOERR);
1011 		return;
1012 	}
1013 	pi_disconnect(pf_current->pi);
1014 	next_request(0);
1015 }
1016 
pf_init(void)1017 static int __init pf_init(void)
1018 {				/* preliminary initialisation */
1019 	struct pf_unit *pf;
1020 	int unit;
1021 
1022 	if (disable)
1023 		return -EINVAL;
1024 
1025 	pf_init_units();
1026 
1027 	if (pf_detect())
1028 		return -ENODEV;
1029 	pf_busy = 0;
1030 
1031 	if (register_blkdev(major, name)) {
1032 		for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
1033 			if (!pf->disk)
1034 				continue;
1035 			blk_cleanup_queue(pf->disk->queue);
1036 			blk_mq_free_tag_set(&pf->tag_set);
1037 			put_disk(pf->disk);
1038 		}
1039 		return -EBUSY;
1040 	}
1041 
1042 	for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
1043 		struct gendisk *disk = pf->disk;
1044 
1045 		if (!pf->present)
1046 			continue;
1047 		disk->private_data = pf;
1048 		add_disk(disk);
1049 	}
1050 	return 0;
1051 }
1052 
pf_exit(void)1053 static void __exit pf_exit(void)
1054 {
1055 	struct pf_unit *pf;
1056 	int unit;
1057 	unregister_blkdev(major, name);
1058 	for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
1059 		if (!pf->disk)
1060 			continue;
1061 
1062 		if (pf->present)
1063 			del_gendisk(pf->disk);
1064 
1065 		blk_cleanup_queue(pf->disk->queue);
1066 		blk_mq_free_tag_set(&pf->tag_set);
1067 		put_disk(pf->disk);
1068 
1069 		if (pf->present)
1070 			pi_release(pf->pi);
1071 	}
1072 }
1073 
1074 MODULE_LICENSE("GPL");
1075 module_init(pf_init)
1076 module_exit(pf_exit)
1077