• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2         pd.c    (c) 1997-8  Grant R. Guenther <grant@torque.net>
3                             Under the terms of the GNU General Public License.
4 
5         This is the high-level driver for parallel port IDE hard
6         drives based on chips supported by the paride module.
7 
8 	By default, the driver will autoprobe for a single parallel
9 	port IDE drive, but if their individual parameters are
10         specified, the driver can handle up to 4 drives.
11 
12         The behaviour of the pd driver can be altered by setting
13         some parameters from the insmod command line.  The following
14         parameters are adjustable:
15 
16 	    drive0  	These four arguments can be arrays of
17 	    drive1	1-8 integers as follows:
18 	    drive2
19 	    drive3	<prt>,<pro>,<uni>,<mod>,<geo>,<sby>,<dly>,<slv>
20 
21 			Where,
22 
23 		<prt>	is the base of the parallel port address for
24 			the corresponding drive.  (required)
25 
26 		<pro>   is the protocol number for the adapter that
27 			supports this drive.  These numbers are
28                         logged by 'paride' when the protocol modules
29 			are initialised.  (0 if not given)
30 
31 		<uni>   for those adapters that support chained
32 			devices, this is the unit selector for the
33 		        chain of devices on the given port.  It should
34 			be zero for devices that don't support chaining.
35 			(0 if not given)
36 
37 		<mod>   this can be -1 to choose the best mode, or one
38 		        of the mode numbers supported by the adapter.
39 			(-1 if not given)
40 
41 		<geo>   this defaults to 0 to indicate that the driver
42 			should use the CHS geometry provided by the drive
43 			itself.  If set to 1, the driver will provide
44 			a logical geometry with 64 heads and 32 sectors
45 			per track, to be consistent with most SCSI
46 		        drivers.  (0 if not given)
47 
48 		<sby>   set this to zero to disable the power saving
49 			standby mode, if needed.  (1 if not given)
50 
51 		<dly>   some parallel ports require the driver to
52 			go more slowly.  -1 sets a default value that
53 			should work with the chosen protocol.  Otherwise,
54 			set this to a small integer, the larger it is
55 			the slower the port i/o.  In some cases, setting
56 			this to zero will speed up the device. (default -1)
57 
58 		<slv>   IDE disks can be jumpered to master or slave.
59                         Set this to 0 to choose the master drive, 1 to
60                         choose the slave, -1 (the default) to choose the
61                         first drive found.
62 
63 
64             major       You may use this parameter to override the
65                         default major number (45) that this driver
66                         will use.  Be sure to change the device
67                         name as well.
68 
69             name        This parameter is a character string that
70                         contains the name the kernel will use for this
71                         device (in /proc output, for instance).
72 			(default "pd")
73 
74 	    cluster	The driver will attempt to aggregate requests
75 			for adjacent blocks into larger multi-block
76 			clusters.  The maximum cluster size (in 512
77 			byte sectors) is set with this parameter.
78 			(default 64)
79 
80 	    verbose	This parameter controls the amount of logging
81 			that the driver will do.  Set it to 0 for
82 			normal operation, 1 to see autoprobe progress
83 			messages, or 2 to see additional debugging
84 			output.  (default 0)
85 
86             nice        This parameter controls the driver's use of
87                         idle CPU time, at the expense of some speed.
88 
89         If this driver is built into the kernel, you can use kernel
90         the following command line parameters, with the same values
91         as the corresponding module parameters listed above:
92 
93             pd.drive0
94             pd.drive1
95             pd.drive2
96             pd.drive3
97             pd.cluster
98             pd.nice
99 
100         In addition, you can use the parameter pd.disable to disable
101         the driver entirely.
102 
103 */
104 
105 /* Changes:
106 
107 	1.01	GRG 1997.01.24	Restored pd_reset()
108 				Added eject ioctl
109 	1.02    GRG 1998.05.06  SMP spinlock changes,
110 				Added slave support
111 	1.03    GRG 1998.06.16  Eliminate an Ugh.
112 	1.04	GRG 1998.08.15  Extra debugging, use HZ in loop timing
113 	1.05    GRG 1998.09.24  Added jumbo support
114 
115 */
116 
117 #define PD_VERSION      "1.05"
118 #define PD_MAJOR	45
119 #define PD_NAME		"pd"
120 #define PD_UNITS	4
121 
122 /* Here are things one can override from the insmod command.
123    Most are autoprobed by paride unless set here.  Verbose is off
124    by default.
125 
126 */
127 #include <linux/types.h>
128 
129 static int verbose = 0;
130 static int major = PD_MAJOR;
131 static char *name = PD_NAME;
132 static int cluster = 64;
133 static int nice = 0;
134 static int disable = 0;
135 
136 static int drive0[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
137 static int drive1[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
138 static int drive2[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
139 static int drive3[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
140 
141 static int (*drives[4])[8] = {&drive0, &drive1, &drive2, &drive3};
142 
143 enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
144 
145 /* end of parameters */
146 
147 #include <linux/init.h>
148 #include <linux/module.h>
149 #include <linux/gfp.h>
150 #include <linux/fs.h>
151 #include <linux/delay.h>
152 #include <linux/hdreg.h>
153 #include <linux/cdrom.h>	/* for the eject ioctl */
154 #include <linux/blk-mq.h>
155 #include <linux/blkpg.h>
156 #include <linux/kernel.h>
157 #include <linux/mutex.h>
158 #include <linux/uaccess.h>
159 #include <linux/workqueue.h>
160 
161 static DEFINE_MUTEX(pd_mutex);
162 static DEFINE_SPINLOCK(pd_lock);
163 
164 module_param(verbose, int, 0);
165 module_param(major, int, 0);
166 module_param(name, charp, 0);
167 module_param(cluster, int, 0);
168 module_param(nice, int, 0);
169 module_param_array(drive0, int, NULL, 0);
170 module_param_array(drive1, int, NULL, 0);
171 module_param_array(drive2, int, NULL, 0);
172 module_param_array(drive3, int, NULL, 0);
173 
174 #include "paride.h"
175 
176 #define PD_BITS    4
177 
178 /* numbers for "SCSI" geometry */
179 
180 #define PD_LOG_HEADS    64
181 #define PD_LOG_SECTS    32
182 
183 #define PD_ID_OFF       54
184 #define PD_ID_LEN       14
185 
186 #define PD_MAX_RETRIES  5
187 #define PD_TMO          800	/* interrupt timeout in jiffies */
188 #define PD_SPIN_DEL     50	/* spin delay in micro-seconds  */
189 
190 #define PD_SPIN         (1000000*PD_TMO)/(HZ*PD_SPIN_DEL)
191 
192 #define STAT_ERR        0x00001
193 #define STAT_INDEX      0x00002
194 #define STAT_ECC        0x00004
195 #define STAT_DRQ        0x00008
196 #define STAT_SEEK       0x00010
197 #define STAT_WRERR      0x00020
198 #define STAT_READY      0x00040
199 #define STAT_BUSY       0x00080
200 
201 #define ERR_AMNF        0x00100
202 #define ERR_TK0NF       0x00200
203 #define ERR_ABRT        0x00400
204 #define ERR_MCR         0x00800
205 #define ERR_IDNF        0x01000
206 #define ERR_MC          0x02000
207 #define ERR_UNC         0x04000
208 #define ERR_TMO         0x10000
209 
210 #define IDE_READ        	0x20
211 #define IDE_WRITE       	0x30
212 #define IDE_READ_VRFY		0x40
213 #define IDE_INIT_DEV_PARMS	0x91
214 #define IDE_STANDBY     	0x96
215 #define IDE_ACKCHANGE   	0xdb
216 #define IDE_DOORLOCK    	0xde
217 #define IDE_DOORUNLOCK  	0xdf
218 #define IDE_IDENTIFY    	0xec
219 #define IDE_EJECT		0xed
220 
221 #define PD_NAMELEN	8
222 
223 struct pd_unit {
224 	struct pi_adapter pia;	/* interface to paride layer */
225 	struct pi_adapter *pi;
226 	int access;		/* count of active opens ... */
227 	int capacity;		/* Size of this volume in sectors */
228 	int heads;		/* physical geometry */
229 	int sectors;
230 	int cylinders;
231 	int can_lba;
232 	int drive;		/* master=0 slave=1 */
233 	int changed;		/* Have we seen a disk change ? */
234 	int removable;		/* removable media device  ?  */
235 	int standby;
236 	int alt_geom;
237 	char name[PD_NAMELEN];	/* pda, pdb, etc ... */
238 	struct gendisk *gd;
239 	struct blk_mq_tag_set tag_set;
240 	struct list_head rq_list;
241 };
242 
243 static struct pd_unit pd[PD_UNITS];
244 
245 struct pd_req {
246 	/* for REQ_OP_DRV_IN: */
247 	enum action (*func)(struct pd_unit *disk);
248 };
249 
250 static char pd_scratch[512];	/* scratch block buffer */
251 
252 static char *pd_errs[17] = { "ERR", "INDEX", "ECC", "DRQ", "SEEK", "WRERR",
253 	"READY", "BUSY", "AMNF", "TK0NF", "ABRT", "MCR",
254 	"IDNF", "MC", "UNC", "???", "TMO"
255 };
256 
257 static void *par_drv;		/* reference of parport driver */
258 
status_reg(struct pd_unit * disk)259 static inline int status_reg(struct pd_unit *disk)
260 {
261 	return pi_read_regr(disk->pi, 1, 6);
262 }
263 
read_reg(struct pd_unit * disk,int reg)264 static inline int read_reg(struct pd_unit *disk, int reg)
265 {
266 	return pi_read_regr(disk->pi, 0, reg);
267 }
268 
write_status(struct pd_unit * disk,int val)269 static inline void write_status(struct pd_unit *disk, int val)
270 {
271 	pi_write_regr(disk->pi, 1, 6, val);
272 }
273 
write_reg(struct pd_unit * disk,int reg,int val)274 static inline void write_reg(struct pd_unit *disk, int reg, int val)
275 {
276 	pi_write_regr(disk->pi, 0, reg, val);
277 }
278 
DRIVE(struct pd_unit * disk)279 static inline u8 DRIVE(struct pd_unit *disk)
280 {
281 	return 0xa0+0x10*disk->drive;
282 }
283 
284 /*  ide command interface */
285 
pd_print_error(struct pd_unit * disk,char * msg,int status)286 static void pd_print_error(struct pd_unit *disk, char *msg, int status)
287 {
288 	int i;
289 
290 	printk("%s: %s: status = 0x%x =", disk->name, msg, status);
291 	for (i = 0; i < ARRAY_SIZE(pd_errs); i++)
292 		if (status & (1 << i))
293 			printk(" %s", pd_errs[i]);
294 	printk("\n");
295 }
296 
pd_reset(struct pd_unit * disk)297 static void pd_reset(struct pd_unit *disk)
298 {				/* called only for MASTER drive */
299 	write_status(disk, 4);
300 	udelay(50);
301 	write_status(disk, 0);
302 	udelay(250);
303 }
304 
305 #define DBMSG(msg)	((verbose>1)?(msg):NULL)
306 
pd_wait_for(struct pd_unit * disk,int w,char * msg)307 static int pd_wait_for(struct pd_unit *disk, int w, char *msg)
308 {				/* polled wait */
309 	int k, r, e;
310 
311 	k = 0;
312 	while (k < PD_SPIN) {
313 		r = status_reg(disk);
314 		k++;
315 		if (((r & w) == w) && !(r & STAT_BUSY))
316 			break;
317 		udelay(PD_SPIN_DEL);
318 	}
319 	e = (read_reg(disk, 1) << 8) + read_reg(disk, 7);
320 	if (k >= PD_SPIN)
321 		e |= ERR_TMO;
322 	if ((e & (STAT_ERR | ERR_TMO)) && (msg != NULL))
323 		pd_print_error(disk, msg, e);
324 	return e;
325 }
326 
pd_send_command(struct pd_unit * disk,int n,int s,int h,int c0,int c1,int func)327 static void pd_send_command(struct pd_unit *disk, int n, int s, int h, int c0, int c1, int func)
328 {
329 	write_reg(disk, 6, DRIVE(disk) + h);
330 	write_reg(disk, 1, 0);		/* the IDE task file */
331 	write_reg(disk, 2, n);
332 	write_reg(disk, 3, s);
333 	write_reg(disk, 4, c0);
334 	write_reg(disk, 5, c1);
335 	write_reg(disk, 7, func);
336 
337 	udelay(1);
338 }
339 
pd_ide_command(struct pd_unit * disk,int func,int block,int count)340 static void pd_ide_command(struct pd_unit *disk, int func, int block, int count)
341 {
342 	int c1, c0, h, s;
343 
344 	if (disk->can_lba) {
345 		s = block & 255;
346 		c0 = (block >>= 8) & 255;
347 		c1 = (block >>= 8) & 255;
348 		h = ((block >>= 8) & 15) + 0x40;
349 	} else {
350 		s = (block % disk->sectors) + 1;
351 		h = (block /= disk->sectors) % disk->heads;
352 		c0 = (block /= disk->heads) % 256;
353 		c1 = (block >>= 8);
354 	}
355 	pd_send_command(disk, count, s, h, c0, c1, func);
356 }
357 
358 /* The i/o request engine */
359 
360 enum action {Fail = 0, Ok = 1, Hold, Wait};
361 
362 static struct request *pd_req;	/* current request */
363 static enum action (*phase)(void);
364 
365 static void run_fsm(void);
366 
367 static void ps_tq_int(struct work_struct *work);
368 
369 static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
370 
schedule_fsm(void)371 static void schedule_fsm(void)
372 {
373 	if (!nice)
374 		schedule_delayed_work(&fsm_tq, 0);
375 	else
376 		schedule_delayed_work(&fsm_tq, nice-1);
377 }
378 
ps_tq_int(struct work_struct * work)379 static void ps_tq_int(struct work_struct *work)
380 {
381 	run_fsm();
382 }
383 
384 static enum action do_pd_io_start(void);
385 static enum action pd_special(void);
386 static enum action do_pd_read_start(void);
387 static enum action do_pd_write_start(void);
388 static enum action do_pd_read_drq(void);
389 static enum action do_pd_write_done(void);
390 
391 static int pd_queue;
392 static int pd_claimed;
393 
394 static struct pd_unit *pd_current; /* current request's drive */
395 static PIA *pi_current; /* current request's PIA */
396 
set_next_request(void)397 static int set_next_request(void)
398 {
399 	struct gendisk *disk;
400 	struct request_queue *q;
401 	int old_pos = pd_queue;
402 
403 	do {
404 		disk = pd[pd_queue].gd;
405 		q = disk ? disk->queue : NULL;
406 		if (++pd_queue == PD_UNITS)
407 			pd_queue = 0;
408 		if (q) {
409 			struct pd_unit *disk = q->queuedata;
410 
411 			if (list_empty(&disk->rq_list))
412 				continue;
413 
414 			pd_req = list_first_entry(&disk->rq_list,
415 							struct request,
416 							queuelist);
417 			list_del_init(&pd_req->queuelist);
418 			blk_mq_start_request(pd_req);
419 			break;
420 		}
421 	} while (pd_queue != old_pos);
422 
423 	return pd_req != NULL;
424 }
425 
run_fsm(void)426 static void run_fsm(void)
427 {
428 	while (1) {
429 		enum action res;
430 		int stop = 0;
431 
432 		if (!phase) {
433 			pd_current = pd_req->rq_disk->private_data;
434 			pi_current = pd_current->pi;
435 			phase = do_pd_io_start;
436 		}
437 
438 		switch (pd_claimed) {
439 			case 0:
440 				pd_claimed = 1;
441 				if (!pi_schedule_claimed(pi_current, run_fsm))
442 					return;
443 				fallthrough;
444 			case 1:
445 				pd_claimed = 2;
446 				pi_current->proto->connect(pi_current);
447 		}
448 
449 		switch(res = phase()) {
450 			case Ok: case Fail: {
451 				blk_status_t err;
452 
453 				err = res == Ok ? 0 : BLK_STS_IOERR;
454 				pi_disconnect(pi_current);
455 				pd_claimed = 0;
456 				phase = NULL;
457 				spin_lock_irq(&pd_lock);
458 				if (!blk_update_request(pd_req, err,
459 						blk_rq_cur_bytes(pd_req))) {
460 					__blk_mq_end_request(pd_req, err);
461 					pd_req = NULL;
462 					stop = !set_next_request();
463 				}
464 				spin_unlock_irq(&pd_lock);
465 				if (stop)
466 					return;
467 				}
468 				fallthrough;
469 			case Hold:
470 				schedule_fsm();
471 				return;
472 			case Wait:
473 				pi_disconnect(pi_current);
474 				pd_claimed = 0;
475 		}
476 	}
477 }
478 
479 static int pd_retries = 0;	/* i/o error retry count */
480 static int pd_block;		/* address of next requested block */
481 static int pd_count;		/* number of blocks still to do */
482 static int pd_run;		/* sectors in current cluster */
483 static char *pd_buf;		/* buffer for request in progress */
484 
do_pd_io_start(void)485 static enum action do_pd_io_start(void)
486 {
487 	switch (req_op(pd_req)) {
488 	case REQ_OP_DRV_IN:
489 		phase = pd_special;
490 		return pd_special();
491 	case REQ_OP_READ:
492 	case REQ_OP_WRITE:
493 		pd_block = blk_rq_pos(pd_req);
494 		pd_count = blk_rq_cur_sectors(pd_req);
495 		if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
496 			return Fail;
497 		pd_run = blk_rq_sectors(pd_req);
498 		pd_buf = bio_data(pd_req->bio);
499 		pd_retries = 0;
500 		if (req_op(pd_req) == REQ_OP_READ)
501 			return do_pd_read_start();
502 		else
503 			return do_pd_write_start();
504 	}
505 	return Fail;
506 }
507 
pd_special(void)508 static enum action pd_special(void)
509 {
510 	struct pd_req *req = blk_mq_rq_to_pdu(pd_req);
511 
512 	return req->func(pd_current);
513 }
514 
pd_next_buf(void)515 static int pd_next_buf(void)
516 {
517 	unsigned long saved_flags;
518 
519 	pd_count--;
520 	pd_run--;
521 	pd_buf += 512;
522 	pd_block++;
523 	if (!pd_run)
524 		return 1;
525 	if (pd_count)
526 		return 0;
527 	spin_lock_irqsave(&pd_lock, saved_flags);
528 	if (!blk_update_request(pd_req, 0, blk_rq_cur_bytes(pd_req))) {
529 		__blk_mq_end_request(pd_req, 0);
530 		pd_req = NULL;
531 		pd_count = 0;
532 		pd_buf = NULL;
533 	} else {
534 		pd_count = blk_rq_cur_sectors(pd_req);
535 		pd_buf = bio_data(pd_req->bio);
536 	}
537 	spin_unlock_irqrestore(&pd_lock, saved_flags);
538 	return !pd_count;
539 }
540 
541 static unsigned long pd_timeout;
542 
do_pd_read_start(void)543 static enum action do_pd_read_start(void)
544 {
545 	if (pd_wait_for(pd_current, STAT_READY, "do_pd_read") & STAT_ERR) {
546 		if (pd_retries < PD_MAX_RETRIES) {
547 			pd_retries++;
548 			return Wait;
549 		}
550 		return Fail;
551 	}
552 	pd_ide_command(pd_current, IDE_READ, pd_block, pd_run);
553 	phase = do_pd_read_drq;
554 	pd_timeout = jiffies + PD_TMO;
555 	return Hold;
556 }
557 
do_pd_write_start(void)558 static enum action do_pd_write_start(void)
559 {
560 	if (pd_wait_for(pd_current, STAT_READY, "do_pd_write") & STAT_ERR) {
561 		if (pd_retries < PD_MAX_RETRIES) {
562 			pd_retries++;
563 			return Wait;
564 		}
565 		return Fail;
566 	}
567 	pd_ide_command(pd_current, IDE_WRITE, pd_block, pd_run);
568 	while (1) {
569 		if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_write_drq") & STAT_ERR) {
570 			if (pd_retries < PD_MAX_RETRIES) {
571 				pd_retries++;
572 				return Wait;
573 			}
574 			return Fail;
575 		}
576 		pi_write_block(pd_current->pi, pd_buf, 512);
577 		if (pd_next_buf())
578 			break;
579 	}
580 	phase = do_pd_write_done;
581 	pd_timeout = jiffies + PD_TMO;
582 	return Hold;
583 }
584 
pd_ready(void)585 static inline int pd_ready(void)
586 {
587 	return !(status_reg(pd_current) & STAT_BUSY);
588 }
589 
do_pd_read_drq(void)590 static enum action do_pd_read_drq(void)
591 {
592 	if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
593 		return Hold;
594 
595 	while (1) {
596 		if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_read_drq") & STAT_ERR) {
597 			if (pd_retries < PD_MAX_RETRIES) {
598 				pd_retries++;
599 				phase = do_pd_read_start;
600 				return Wait;
601 			}
602 			return Fail;
603 		}
604 		pi_read_block(pd_current->pi, pd_buf, 512);
605 		if (pd_next_buf())
606 			break;
607 	}
608 	return Ok;
609 }
610 
do_pd_write_done(void)611 static enum action do_pd_write_done(void)
612 {
613 	if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
614 		return Hold;
615 
616 	if (pd_wait_for(pd_current, STAT_READY, "do_pd_write_done") & STAT_ERR) {
617 		if (pd_retries < PD_MAX_RETRIES) {
618 			pd_retries++;
619 			phase = do_pd_write_start;
620 			return Wait;
621 		}
622 		return Fail;
623 	}
624 	return Ok;
625 }
626 
627 /* special io requests */
628 
629 /* According to the ATA standard, the default CHS geometry should be
630    available following a reset.  Some Western Digital drives come up
631    in a mode where only LBA addresses are accepted until the device
632    parameters are initialised.
633 */
634 
pd_init_dev_parms(struct pd_unit * disk)635 static void pd_init_dev_parms(struct pd_unit *disk)
636 {
637 	pd_wait_for(disk, 0, DBMSG("before init_dev_parms"));
638 	pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0,
639 			IDE_INIT_DEV_PARMS);
640 	udelay(300);
641 	pd_wait_for(disk, 0, "Initialise device parameters");
642 }
643 
pd_door_lock(struct pd_unit * disk)644 static enum action pd_door_lock(struct pd_unit *disk)
645 {
646 	if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
647 		pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORLOCK);
648 		pd_wait_for(disk, STAT_READY, "Lock done");
649 	}
650 	return Ok;
651 }
652 
pd_door_unlock(struct pd_unit * disk)653 static enum action pd_door_unlock(struct pd_unit *disk)
654 {
655 	if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
656 		pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
657 		pd_wait_for(disk, STAT_READY, "Lock done");
658 	}
659 	return Ok;
660 }
661 
pd_eject(struct pd_unit * disk)662 static enum action pd_eject(struct pd_unit *disk)
663 {
664 	pd_wait_for(disk, 0, DBMSG("before unlock on eject"));
665 	pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
666 	pd_wait_for(disk, 0, DBMSG("after unlock on eject"));
667 	pd_wait_for(disk, 0, DBMSG("before eject"));
668 	pd_send_command(disk, 0, 0, 0, 0, 0, IDE_EJECT);
669 	pd_wait_for(disk, 0, DBMSG("after eject"));
670 	return Ok;
671 }
672 
pd_media_check(struct pd_unit * disk)673 static enum action pd_media_check(struct pd_unit *disk)
674 {
675 	int r = pd_wait_for(disk, STAT_READY, DBMSG("before media_check"));
676 	if (!(r & STAT_ERR)) {
677 		pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
678 		r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after READ_VRFY"));
679 	} else
680 		disk->changed = 1;	/* say changed if other error */
681 	if (r & ERR_MC) {
682 		disk->changed = 1;
683 		pd_send_command(disk, 1, 0, 0, 0, 0, IDE_ACKCHANGE);
684 		pd_wait_for(disk, STAT_READY, DBMSG("RDY after ACKCHANGE"));
685 		pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
686 		r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after VRFY"));
687 	}
688 	return Ok;
689 }
690 
pd_standby_off(struct pd_unit * disk)691 static void pd_standby_off(struct pd_unit *disk)
692 {
693 	pd_wait_for(disk, 0, DBMSG("before STANDBY"));
694 	pd_send_command(disk, 0, 0, 0, 0, 0, IDE_STANDBY);
695 	pd_wait_for(disk, 0, DBMSG("after STANDBY"));
696 }
697 
pd_identify(struct pd_unit * disk)698 static enum action pd_identify(struct pd_unit *disk)
699 {
700 	int j;
701 	char id[PD_ID_LEN + 1];
702 
703 /* WARNING:  here there may be dragons.  reset() applies to both drives,
704    but we call it only on probing the MASTER. This should allow most
705    common configurations to work, but be warned that a reset can clear
706    settings on the SLAVE drive.
707 */
708 
709 	if (disk->drive == 0)
710 		pd_reset(disk);
711 
712 	write_reg(disk, 6, DRIVE(disk));
713 	pd_wait_for(disk, 0, DBMSG("before IDENT"));
714 	pd_send_command(disk, 1, 0, 0, 0, 0, IDE_IDENTIFY);
715 
716 	if (pd_wait_for(disk, STAT_DRQ, DBMSG("IDENT DRQ")) & STAT_ERR)
717 		return Fail;
718 	pi_read_block(disk->pi, pd_scratch, 512);
719 	disk->can_lba = pd_scratch[99] & 2;
720 	disk->sectors = le16_to_cpu(*(__le16 *) (pd_scratch + 12));
721 	disk->heads = le16_to_cpu(*(__le16 *) (pd_scratch + 6));
722 	disk->cylinders = le16_to_cpu(*(__le16 *) (pd_scratch + 2));
723 	if (disk->can_lba)
724 		disk->capacity = le32_to_cpu(*(__le32 *) (pd_scratch + 120));
725 	else
726 		disk->capacity = disk->sectors * disk->heads * disk->cylinders;
727 
728 	for (j = 0; j < PD_ID_LEN; j++)
729 		id[j ^ 1] = pd_scratch[j + PD_ID_OFF];
730 	j = PD_ID_LEN - 1;
731 	while ((j >= 0) && (id[j] <= 0x20))
732 		j--;
733 	j++;
734 	id[j] = 0;
735 
736 	disk->removable = pd_scratch[0] & 0x80;
737 
738 	printk("%s: %s, %s, %d blocks [%dM], (%d/%d/%d), %s media\n",
739 	       disk->name, id,
740 	       disk->drive ? "slave" : "master",
741 	       disk->capacity, disk->capacity / 2048,
742 	       disk->cylinders, disk->heads, disk->sectors,
743 	       disk->removable ? "removable" : "fixed");
744 
745 	if (disk->capacity)
746 		pd_init_dev_parms(disk);
747 	if (!disk->standby)
748 		pd_standby_off(disk);
749 
750 	return Ok;
751 }
752 
753 /* end of io request engine */
754 
pd_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)755 static blk_status_t pd_queue_rq(struct blk_mq_hw_ctx *hctx,
756 				const struct blk_mq_queue_data *bd)
757 {
758 	struct pd_unit *disk = hctx->queue->queuedata;
759 
760 	spin_lock_irq(&pd_lock);
761 	if (!pd_req) {
762 		pd_req = bd->rq;
763 		blk_mq_start_request(pd_req);
764 	} else
765 		list_add_tail(&bd->rq->queuelist, &disk->rq_list);
766 	spin_unlock_irq(&pd_lock);
767 
768 	run_fsm();
769 	return BLK_STS_OK;
770 }
771 
pd_special_command(struct pd_unit * disk,enum action (* func)(struct pd_unit * disk))772 static int pd_special_command(struct pd_unit *disk,
773 		      enum action (*func)(struct pd_unit *disk))
774 {
775 	struct request *rq;
776 	struct pd_req *req;
777 
778 	rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
779 	if (IS_ERR(rq))
780 		return PTR_ERR(rq);
781 	req = blk_mq_rq_to_pdu(rq);
782 
783 	req->func = func;
784 	blk_execute_rq(disk->gd, rq, 0);
785 	blk_put_request(rq);
786 	return 0;
787 }
788 
789 /* kernel glue structures */
790 
pd_open(struct block_device * bdev,fmode_t mode)791 static int pd_open(struct block_device *bdev, fmode_t mode)
792 {
793 	struct pd_unit *disk = bdev->bd_disk->private_data;
794 
795 	mutex_lock(&pd_mutex);
796 	disk->access++;
797 
798 	if (disk->removable) {
799 		pd_special_command(disk, pd_media_check);
800 		pd_special_command(disk, pd_door_lock);
801 	}
802 	mutex_unlock(&pd_mutex);
803 	return 0;
804 }
805 
pd_getgeo(struct block_device * bdev,struct hd_geometry * geo)806 static int pd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
807 {
808 	struct pd_unit *disk = bdev->bd_disk->private_data;
809 
810 	if (disk->alt_geom) {
811 		geo->heads = PD_LOG_HEADS;
812 		geo->sectors = PD_LOG_SECTS;
813 		geo->cylinders = disk->capacity / (geo->heads * geo->sectors);
814 	} else {
815 		geo->heads = disk->heads;
816 		geo->sectors = disk->sectors;
817 		geo->cylinders = disk->cylinders;
818 	}
819 
820 	return 0;
821 }
822 
pd_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)823 static int pd_ioctl(struct block_device *bdev, fmode_t mode,
824 	 unsigned int cmd, unsigned long arg)
825 {
826 	struct pd_unit *disk = bdev->bd_disk->private_data;
827 
828 	switch (cmd) {
829 	case CDROMEJECT:
830 		mutex_lock(&pd_mutex);
831 		if (disk->access == 1)
832 			pd_special_command(disk, pd_eject);
833 		mutex_unlock(&pd_mutex);
834 		return 0;
835 	default:
836 		return -EINVAL;
837 	}
838 }
839 
pd_release(struct gendisk * p,fmode_t mode)840 static void pd_release(struct gendisk *p, fmode_t mode)
841 {
842 	struct pd_unit *disk = p->private_data;
843 
844 	mutex_lock(&pd_mutex);
845 	if (!--disk->access && disk->removable)
846 		pd_special_command(disk, pd_door_unlock);
847 	mutex_unlock(&pd_mutex);
848 }
849 
pd_check_events(struct gendisk * p,unsigned int clearing)850 static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing)
851 {
852 	struct pd_unit *disk = p->private_data;
853 	int r;
854 	if (!disk->removable)
855 		return 0;
856 	pd_special_command(disk, pd_media_check);
857 	r = disk->changed;
858 	disk->changed = 0;
859 	return r ? DISK_EVENT_MEDIA_CHANGE : 0;
860 }
861 
862 static const struct block_device_operations pd_fops = {
863 	.owner		= THIS_MODULE,
864 	.open		= pd_open,
865 	.release	= pd_release,
866 	.ioctl		= pd_ioctl,
867 	.compat_ioctl	= pd_ioctl,
868 	.getgeo		= pd_getgeo,
869 	.check_events	= pd_check_events,
870 };
871 
872 /* probing */
873 
874 static const struct blk_mq_ops pd_mq_ops = {
875 	.queue_rq	= pd_queue_rq,
876 };
877 
pd_probe_drive(struct pd_unit * disk)878 static void pd_probe_drive(struct pd_unit *disk)
879 {
880 	struct gendisk *p;
881 
882 	memset(&disk->tag_set, 0, sizeof(disk->tag_set));
883 	disk->tag_set.ops = &pd_mq_ops;
884 	disk->tag_set.cmd_size = sizeof(struct pd_req);
885 	disk->tag_set.nr_hw_queues = 1;
886 	disk->tag_set.nr_maps = 1;
887 	disk->tag_set.queue_depth = 2;
888 	disk->tag_set.numa_node = NUMA_NO_NODE;
889 	disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
890 
891 	if (blk_mq_alloc_tag_set(&disk->tag_set))
892 		return;
893 
894 	p = blk_mq_alloc_disk(&disk->tag_set, disk);
895 	if (IS_ERR(p)) {
896 		blk_mq_free_tag_set(&disk->tag_set);
897 		return;
898 	}
899 	disk->gd = p;
900 
901 	strcpy(p->disk_name, disk->name);
902 	p->fops = &pd_fops;
903 	p->major = major;
904 	p->first_minor = (disk - pd) << PD_BITS;
905 	p->minors = 1 << PD_BITS;
906 	p->events = DISK_EVENT_MEDIA_CHANGE;
907 	p->private_data = disk;
908 
909 	blk_queue_max_hw_sectors(p->queue, cluster);
910 	blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
911 
912 	if (disk->drive == -1) {
913 		for (disk->drive = 0; disk->drive <= 1; disk->drive++)
914 			if (pd_special_command(disk, pd_identify) == 0)
915 				return;
916 	} else if (pd_special_command(disk, pd_identify) == 0)
917 		return;
918 	disk->gd = NULL;
919 	put_disk(p);
920 }
921 
pd_detect(void)922 static int pd_detect(void)
923 {
924 	int found = 0, unit, pd_drive_count = 0;
925 	struct pd_unit *disk;
926 
927 	for (unit = 0; unit < PD_UNITS; unit++) {
928 		int *parm = *drives[unit];
929 		struct pd_unit *disk = pd + unit;
930 		disk->pi = &disk->pia;
931 		disk->access = 0;
932 		disk->changed = 1;
933 		disk->capacity = 0;
934 		disk->drive = parm[D_SLV];
935 		snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a'+unit);
936 		disk->alt_geom = parm[D_GEO];
937 		disk->standby = parm[D_SBY];
938 		if (parm[D_PRT])
939 			pd_drive_count++;
940 		INIT_LIST_HEAD(&disk->rq_list);
941 	}
942 
943 	par_drv = pi_register_driver(name);
944 	if (!par_drv) {
945 		pr_err("failed to register %s driver\n", name);
946 		return -1;
947 	}
948 
949 	if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
950 		disk = pd;
951 		if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch,
952 			    PI_PD, verbose, disk->name)) {
953 			pd_probe_drive(disk);
954 			if (!disk->gd)
955 				pi_release(disk->pi);
956 		}
957 
958 	} else {
959 		for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
960 			int *parm = *drives[unit];
961 			if (!parm[D_PRT])
962 				continue;
963 			if (pi_init(disk->pi, 0, parm[D_PRT], parm[D_MOD],
964 				     parm[D_UNI], parm[D_PRO], parm[D_DLY],
965 				     pd_scratch, PI_PD, verbose, disk->name)) {
966 				pd_probe_drive(disk);
967 				if (!disk->gd)
968 					pi_release(disk->pi);
969 			}
970 		}
971 	}
972 	for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
973 		if (disk->gd) {
974 			set_capacity(disk->gd, disk->capacity);
975 			add_disk(disk->gd);
976 			found = 1;
977 		}
978 	}
979 	if (!found) {
980 		printk("%s: no valid drive found\n", name);
981 		pi_unregister_driver(par_drv);
982 	}
983 	return found;
984 }
985 
pd_init(void)986 static int __init pd_init(void)
987 {
988 	if (disable)
989 		goto out1;
990 
991 	if (register_blkdev(major, name))
992 		goto out1;
993 
994 	printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
995 	       name, name, PD_VERSION, major, cluster, nice);
996 	if (!pd_detect())
997 		goto out2;
998 
999 	return 0;
1000 
1001 out2:
1002 	unregister_blkdev(major, name);
1003 out1:
1004 	return -ENODEV;
1005 }
1006 
pd_exit(void)1007 static void __exit pd_exit(void)
1008 {
1009 	struct pd_unit *disk;
1010 	int unit;
1011 	unregister_blkdev(major, name);
1012 	for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
1013 		struct gendisk *p = disk->gd;
1014 		if (p) {
1015 			disk->gd = NULL;
1016 			del_gendisk(p);
1017 			blk_cleanup_disk(p);
1018 			blk_mq_free_tag_set(&disk->tag_set);
1019 			pi_release(disk->pi);
1020 		}
1021 	}
1022 }
1023 
1024 MODULE_LICENSE("GPL");
1025 module_init(pd_init)
1026 module_exit(pd_exit)
1027