• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (C) 1994-1998	   Linus Torvalds & authors (see below)
3  *  Copyright (C) 1998-2002	   Linux ATA Development
4  *				      Andre Hedrick <andre@linux-ide.org>
5  *  Copyright (C) 2003		   Red Hat
6  *  Copyright (C) 2003-2005, 2007  Bartlomiej Zolnierkiewicz
7  */
8 
9 /*
10  *  Mostly written by Mark Lord <mlord@pobox.com>
11  *                and Gadi Oxman <gadio@netvision.net.il>
12  *                and Andre Hedrick <andre@linux-ide.org>
13  *
14  * This is the IDE/ATA disk driver, as evolved from hd.c and ide.c.
15  */
16 
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/mm.h>
22 #include <linux/interrupt.h>
23 #include <linux/major.h>
24 #include <linux/errno.h>
25 #include <linux/genhd.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/mutex.h>
29 #include <linux/leds.h>
30 #include <linux/ide.h>
31 #include <linux/hdreg.h>
32 
33 #include <asm/byteorder.h>
34 #include <asm/irq.h>
35 #include <asm/uaccess.h>
36 #include <asm/io.h>
37 #include <asm/div64.h>
38 
39 #include "ide-disk.h"
40 
41 static const u8 ide_rw_cmds[] = {
42 	ATA_CMD_READ_MULTI,
43 	ATA_CMD_WRITE_MULTI,
44 	ATA_CMD_READ_MULTI_EXT,
45 	ATA_CMD_WRITE_MULTI_EXT,
46 	ATA_CMD_PIO_READ,
47 	ATA_CMD_PIO_WRITE,
48 	ATA_CMD_PIO_READ_EXT,
49 	ATA_CMD_PIO_WRITE_EXT,
50 	ATA_CMD_READ,
51 	ATA_CMD_WRITE,
52 	ATA_CMD_READ_EXT,
53 	ATA_CMD_WRITE_EXT,
54 };
55 
56 static const u8 ide_data_phases[] = {
57 	TASKFILE_MULTI_IN,
58 	TASKFILE_MULTI_OUT,
59 	TASKFILE_IN,
60 	TASKFILE_OUT,
61 	TASKFILE_IN_DMA,
62 	TASKFILE_OUT_DMA,
63 };
64 
ide_tf_set_cmd(ide_drive_t * drive,ide_task_t * task,u8 dma)65 static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma)
66 {
67 	u8 index, lba48, write;
68 
69 	lba48 = (task->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0;
70 	write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
71 
72 	if (dma)
73 		index = 8;
74 	else
75 		index = drive->mult_count ? 0 : 4;
76 
77 	task->tf.command = ide_rw_cmds[index + lba48 + write];
78 
79 	if (dma)
80 		index = 8; /* fixup index */
81 
82 	task->data_phase = ide_data_phases[index / 2 + write];
83 }
84 
85 /*
86  * __ide_do_rw_disk() issues READ and WRITE commands to a disk,
87  * using LBA if supported, or CHS otherwise, to address sectors.
88  */
__ide_do_rw_disk(ide_drive_t * drive,struct request * rq,sector_t block)89 static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
90 					sector_t block)
91 {
92 	ide_hwif_t *hwif	= drive->hwif;
93 	u16 nsectors		= (u16)rq->nr_sectors;
94 	u8 lba48		= !!(drive->dev_flags & IDE_DFLAG_LBA48);
95 	u8 dma			= !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
96 	ide_task_t		task;
97 	struct ide_taskfile	*tf = &task.tf;
98 	ide_startstop_t		rc;
99 
100 	if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
101 		if (block + rq->nr_sectors > 1ULL << 28)
102 			dma = 0;
103 		else
104 			lba48 = 0;
105 	}
106 
107 	if (!dma) {
108 		ide_init_sg_cmd(drive, rq);
109 		ide_map_sg(drive, rq);
110 	}
111 
112 	memset(&task, 0, sizeof(task));
113 	task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
114 
115 	if (drive->dev_flags & IDE_DFLAG_LBA) {
116 		if (lba48) {
117 			pr_debug("%s: LBA=0x%012llx\n", drive->name,
118 					(unsigned long long)block);
119 
120 			tf->hob_nsect = (nsectors >> 8) & 0xff;
121 			tf->hob_lbal  = (u8)(block >> 24);
122 			if (sizeof(block) != 4) {
123 				tf->hob_lbam = (u8)((u64)block >> 32);
124 				tf->hob_lbah = (u8)((u64)block >> 40);
125 			}
126 
127 			tf->nsect  = nsectors & 0xff;
128 			tf->lbal   = (u8) block;
129 			tf->lbam   = (u8)(block >>  8);
130 			tf->lbah   = (u8)(block >> 16);
131 
132 			task.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
133 		} else {
134 			tf->nsect  = nsectors & 0xff;
135 			tf->lbal   = block;
136 			tf->lbam   = block >>= 8;
137 			tf->lbah   = block >>= 8;
138 			tf->device = (block >> 8) & 0xf;
139 		}
140 
141 		tf->device |= ATA_LBA;
142 	} else {
143 		unsigned int sect, head, cyl, track;
144 
145 		track = (int)block / drive->sect;
146 		sect  = (int)block % drive->sect + 1;
147 		head  = track % drive->head;
148 		cyl   = track / drive->head;
149 
150 		pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect);
151 
152 		tf->nsect  = nsectors & 0xff;
153 		tf->lbal   = sect;
154 		tf->lbam   = cyl;
155 		tf->lbah   = cyl >> 8;
156 		tf->device = head;
157 	}
158 
159 	if (rq_data_dir(rq))
160 		task.tf_flags |= IDE_TFLAG_WRITE;
161 
162 	ide_tf_set_cmd(drive, &task, dma);
163 	if (!dma)
164 		hwif->data_phase = task.data_phase;
165 	task.rq = rq;
166 
167 	rc = do_rw_taskfile(drive, &task);
168 
169 	if (rc == ide_stopped && dma) {
170 		/* fallback to PIO */
171 		task.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
172 		ide_tf_set_cmd(drive, &task, 0);
173 		hwif->data_phase = task.data_phase;
174 		ide_init_sg_cmd(drive, rq);
175 		rc = do_rw_taskfile(drive, &task);
176 	}
177 
178 	return rc;
179 }
180 
181 /*
182  * 268435455  == 137439 MB or 28bit limit
183  * 320173056  == 163929 MB or 48bit addressing
184  * 1073741822 == 549756 MB or 48bit addressing fake drive
185  */
186 
ide_do_rw_disk(ide_drive_t * drive,struct request * rq,sector_t block)187 static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
188 				      sector_t block)
189 {
190 	ide_hwif_t *hwif = drive->hwif;
191 
192 	BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
193 
194 	if (!blk_fs_request(rq)) {
195 		blk_dump_rq_flags(rq, "ide_do_rw_disk - bad command");
196 		ide_end_request(drive, 0, 0);
197 		return ide_stopped;
198 	}
199 
200 	ledtrig_ide_activity();
201 
202 	pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
203 		 drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
204 		 (unsigned long long)block, rq->nr_sectors,
205 		 (unsigned long)rq->buffer);
206 
207 	if (hwif->rw_disk)
208 		hwif->rw_disk(drive, rq);
209 
210 	return __ide_do_rw_disk(drive, rq, block);
211 }
212 
213 /*
214  * Queries for true maximum capacity of the drive.
215  * Returns maximum LBA address (> 0) of the drive, 0 if failed.
216  */
idedisk_read_native_max_address(ide_drive_t * drive,int lba48)217 static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48)
218 {
219 	ide_task_t args;
220 	struct ide_taskfile *tf = &args.tf;
221 	u64 addr = 0;
222 
223 	/* Create IDE/ATA command request structure */
224 	memset(&args, 0, sizeof(ide_task_t));
225 	if (lba48)
226 		tf->command = ATA_CMD_READ_NATIVE_MAX_EXT;
227 	else
228 		tf->command = ATA_CMD_READ_NATIVE_MAX;
229 	tf->device  = ATA_LBA;
230 	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
231 	if (lba48)
232 		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
233 	/* submit command request */
234 	ide_no_data_taskfile(drive, &args);
235 
236 	/* if OK, compute maximum address value */
237 	if ((tf->status & 0x01) == 0)
238 		addr = ide_get_lba_addr(tf, lba48) + 1;
239 
240 	return addr;
241 }
242 
243 /*
244  * Sets maximum virtual LBA address of the drive.
245  * Returns new maximum virtual LBA address (> 0) or 0 on failure.
246  */
idedisk_set_max_address(ide_drive_t * drive,u64 addr_req,int lba48)247 static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
248 {
249 	ide_task_t args;
250 	struct ide_taskfile *tf = &args.tf;
251 	u64 addr_set = 0;
252 
253 	addr_req--;
254 	/* Create IDE/ATA command request structure */
255 	memset(&args, 0, sizeof(ide_task_t));
256 	tf->lbal     = (addr_req >>  0) & 0xff;
257 	tf->lbam     = (addr_req >>= 8) & 0xff;
258 	tf->lbah     = (addr_req >>= 8) & 0xff;
259 	if (lba48) {
260 		tf->hob_lbal = (addr_req >>= 8) & 0xff;
261 		tf->hob_lbam = (addr_req >>= 8) & 0xff;
262 		tf->hob_lbah = (addr_req >>= 8) & 0xff;
263 		tf->command  = ATA_CMD_SET_MAX_EXT;
264 	} else {
265 		tf->device   = (addr_req >>= 8) & 0x0f;
266 		tf->command  = ATA_CMD_SET_MAX;
267 	}
268 	tf->device |= ATA_LBA;
269 	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
270 	if (lba48)
271 		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
272 	/* submit command request */
273 	ide_no_data_taskfile(drive, &args);
274 	/* if OK, compute maximum address value */
275 	if ((tf->status & 0x01) == 0)
276 		addr_set = ide_get_lba_addr(tf, lba48) + 1;
277 
278 	return addr_set;
279 }
280 
sectors_to_MB(unsigned long long n)281 static unsigned long long sectors_to_MB(unsigned long long n)
282 {
283 	n <<= 9;		/* make it bytes */
284 	do_div(n, 1000000);	/* make it MB */
285 	return n;
286 }
287 
288 /*
289  * Some disks report total number of sectors instead of
290  * maximum sector address.  We list them here.
291  */
292 static const struct drive_list_entry hpa_list[] = {
293 	{ "ST340823A",	NULL },
294 	{ "ST320413A",	NULL },
295 	{ "ST310211A",	NULL },
296 	{ NULL,		NULL }
297 };
298 
idedisk_check_hpa(ide_drive_t * drive)299 static void idedisk_check_hpa(ide_drive_t *drive)
300 {
301 	unsigned long long capacity, set_max;
302 	int lba48 = ata_id_lba48_enabled(drive->id);
303 
304 	capacity = drive->capacity64;
305 
306 	set_max = idedisk_read_native_max_address(drive, lba48);
307 
308 	if (ide_in_drive_list(drive->id, hpa_list)) {
309 		/*
310 		 * Since we are inclusive wrt to firmware revisions do this
311 		 * extra check and apply the workaround only when needed.
312 		 */
313 		if (set_max == capacity + 1)
314 			set_max--;
315 	}
316 
317 	if (set_max <= capacity)
318 		return;
319 
320 	printk(KERN_INFO "%s: Host Protected Area detected.\n"
321 			 "\tcurrent capacity is %llu sectors (%llu MB)\n"
322 			 "\tnative  capacity is %llu sectors (%llu MB)\n",
323 			 drive->name,
324 			 capacity, sectors_to_MB(capacity),
325 			 set_max, sectors_to_MB(set_max));
326 
327 	set_max = idedisk_set_max_address(drive, set_max, lba48);
328 
329 	if (set_max) {
330 		drive->capacity64 = set_max;
331 		printk(KERN_INFO "%s: Host Protected Area disabled.\n",
332 				 drive->name);
333 	}
334 }
335 
ide_disk_get_capacity(ide_drive_t * drive)336 static int ide_disk_get_capacity(ide_drive_t *drive)
337 {
338 	u16 *id = drive->id;
339 	int lba;
340 
341 	if (ata_id_lba48_enabled(id)) {
342 		/* drive speaks 48-bit LBA */
343 		lba = 1;
344 		drive->capacity64 = ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
345 	} else if (ata_id_has_lba(id) && ata_id_is_lba_capacity_ok(id)) {
346 		/* drive speaks 28-bit LBA */
347 		lba = 1;
348 		drive->capacity64 = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
349 	} else {
350 		/* drive speaks boring old 28-bit CHS */
351 		lba = 0;
352 		drive->capacity64 = drive->cyl * drive->head * drive->sect;
353 	}
354 
355 	if (lba) {
356 		drive->dev_flags |= IDE_DFLAG_LBA;
357 
358 		/*
359 		* If this device supports the Host Protected Area feature set,
360 		* then we may need to change our opinion about its capacity.
361 		*/
362 		if (ata_id_hpa_enabled(id))
363 			idedisk_check_hpa(drive);
364 	}
365 
366 	/* limit drive capacity to 137GB if LBA48 cannot be used */
367 	if ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 &&
368 	    drive->capacity64 > 1ULL << 28) {
369 		printk(KERN_WARNING "%s: cannot use LBA48 - full capacity "
370 		       "%llu sectors (%llu MB)\n",
371 		       drive->name, (unsigned long long)drive->capacity64,
372 		       sectors_to_MB(drive->capacity64));
373 		drive->capacity64 = 1ULL << 28;
374 	}
375 
376 	if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
377 	    (drive->dev_flags & IDE_DFLAG_LBA48)) {
378 		if (drive->capacity64 > 1ULL << 28) {
379 			printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
380 					 " will be used for accessing sectors "
381 					 "> %u\n", drive->name, 1 << 28);
382 		} else
383 			drive->dev_flags &= ~IDE_DFLAG_LBA48;
384 	}
385 
386 	return 0;
387 }
388 
idedisk_prepare_flush(struct request_queue * q,struct request * rq)389 static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
390 {
391 	ide_drive_t *drive = q->queuedata;
392 	ide_task_t *task = kmalloc(sizeof(*task), GFP_ATOMIC);
393 
394 	/* FIXME: map struct ide_taskfile on rq->cmd[] */
395 	BUG_ON(task == NULL);
396 
397 	memset(task, 0, sizeof(*task));
398 	if (ata_id_flush_ext_enabled(drive->id) &&
399 	    (drive->capacity64 >= (1UL << 28)))
400 		task->tf.command = ATA_CMD_FLUSH_EXT;
401 	else
402 		task->tf.command = ATA_CMD_FLUSH;
403 	task->tf_flags	 = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE |
404 			   IDE_TFLAG_DYN;
405 	task->data_phase = TASKFILE_NO_DATA;
406 
407 	rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
408 	rq->cmd_flags |= REQ_SOFTBARRIER;
409 	rq->special = task;
410 }
411 
412 ide_devset_get(multcount, mult_count);
413 
414 /*
415  * This is tightly woven into the driver->do_special can not touch.
416  * DON'T do it again until a total personality rewrite is committed.
417  */
set_multcount(ide_drive_t * drive,int arg)418 static int set_multcount(ide_drive_t *drive, int arg)
419 {
420 	struct request *rq;
421 	int error;
422 
423 	if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
424 		return -EINVAL;
425 
426 	if (drive->special.b.set_multmode)
427 		return -EBUSY;
428 
429 	rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
430 	rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
431 
432 	drive->mult_req = arg;
433 	drive->special.b.set_multmode = 1;
434 	error = blk_execute_rq(drive->queue, NULL, rq, 0);
435 	blk_put_request(rq);
436 
437 	return (drive->mult_count == arg) ? 0 : -EIO;
438 }
439 
440 ide_devset_get_flag(nowerr, IDE_DFLAG_NOWERR);
441 
set_nowerr(ide_drive_t * drive,int arg)442 static int set_nowerr(ide_drive_t *drive, int arg)
443 {
444 	if (arg < 0 || arg > 1)
445 		return -EINVAL;
446 
447 	if (arg)
448 		drive->dev_flags |= IDE_DFLAG_NOWERR;
449 	else
450 		drive->dev_flags &= ~IDE_DFLAG_NOWERR;
451 
452 	drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
453 
454 	return 0;
455 }
456 
ide_do_setfeature(ide_drive_t * drive,u8 feature,u8 nsect)457 static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
458 {
459 	ide_task_t task;
460 
461 	memset(&task, 0, sizeof(task));
462 	task.tf.feature = feature;
463 	task.tf.nsect   = nsect;
464 	task.tf.command = ATA_CMD_SET_FEATURES;
465 	task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
466 
467 	return ide_no_data_taskfile(drive, &task);
468 }
469 
update_ordered(ide_drive_t * drive)470 static void update_ordered(ide_drive_t *drive)
471 {
472 	u16 *id = drive->id;
473 	unsigned ordered = QUEUE_ORDERED_NONE;
474 	prepare_flush_fn *prep_fn = NULL;
475 
476 	if (drive->dev_flags & IDE_DFLAG_WCACHE) {
477 		unsigned long long capacity;
478 		int barrier;
479 		/*
480 		 * We must avoid issuing commands a drive does not
481 		 * understand or we may crash it. We check flush cache
482 		 * is supported. We also check we have the LBA48 flush
483 		 * cache if the drive capacity is too large. By this
484 		 * time we have trimmed the drive capacity if LBA48 is
485 		 * not available so we don't need to recheck that.
486 		 */
487 		capacity = ide_gd_capacity(drive);
488 		barrier = ata_id_flush_enabled(id) &&
489 			(drive->dev_flags & IDE_DFLAG_NOFLUSH) == 0 &&
490 			((drive->dev_flags & IDE_DFLAG_LBA48) == 0 ||
491 			 capacity <= (1ULL << 28) ||
492 			 ata_id_flush_ext_enabled(id));
493 
494 		printk(KERN_INFO "%s: cache flushes %ssupported\n",
495 		       drive->name, barrier ? "" : "not ");
496 
497 		if (barrier) {
498 			ordered = QUEUE_ORDERED_DRAIN_FLUSH;
499 			prep_fn = idedisk_prepare_flush;
500 		}
501 	} else
502 		ordered = QUEUE_ORDERED_DRAIN;
503 
504 	blk_queue_ordered(drive->queue, ordered, prep_fn);
505 }
506 
507 ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
508 
set_wcache(ide_drive_t * drive,int arg)509 static int set_wcache(ide_drive_t *drive, int arg)
510 {
511 	int err = 1;
512 
513 	if (arg < 0 || arg > 1)
514 		return -EINVAL;
515 
516 	if (ata_id_flush_enabled(drive->id)) {
517 		err = ide_do_setfeature(drive,
518 			arg ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF, 0);
519 		if (err == 0) {
520 			if (arg)
521 				drive->dev_flags |= IDE_DFLAG_WCACHE;
522 			else
523 				drive->dev_flags &= ~IDE_DFLAG_WCACHE;
524 		}
525 	}
526 
527 	update_ordered(drive);
528 
529 	return err;
530 }
531 
do_idedisk_flushcache(ide_drive_t * drive)532 static int do_idedisk_flushcache(ide_drive_t *drive)
533 {
534 	ide_task_t args;
535 
536 	memset(&args, 0, sizeof(ide_task_t));
537 	if (ata_id_flush_ext_enabled(drive->id))
538 		args.tf.command = ATA_CMD_FLUSH_EXT;
539 	else
540 		args.tf.command = ATA_CMD_FLUSH;
541 	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
542 	return ide_no_data_taskfile(drive, &args);
543 }
544 
545 ide_devset_get(acoustic, acoustic);
546 
set_acoustic(ide_drive_t * drive,int arg)547 static int set_acoustic(ide_drive_t *drive, int arg)
548 {
549 	if (arg < 0 || arg > 254)
550 		return -EINVAL;
551 
552 	ide_do_setfeature(drive,
553 		arg ? SETFEATURES_AAM_ON : SETFEATURES_AAM_OFF, arg);
554 
555 	drive->acoustic = arg;
556 
557 	return 0;
558 }
559 
560 ide_devset_get_flag(addressing, IDE_DFLAG_LBA48);
561 
562 /*
563  * drive->addressing:
564  *	0: 28-bit
565  *	1: 48-bit
566  *	2: 48-bit capable doing 28-bit
567  */
set_addressing(ide_drive_t * drive,int arg)568 static int set_addressing(ide_drive_t *drive, int arg)
569 {
570 	if (arg < 0 || arg > 2)
571 		return -EINVAL;
572 
573 	if (arg && ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
574 	    ata_id_lba48_enabled(drive->id) == 0))
575 		return -EIO;
576 
577 	if (arg == 2)
578 		arg = 0;
579 
580 	if (arg)
581 		drive->dev_flags |= IDE_DFLAG_LBA48;
582 	else
583 		drive->dev_flags &= ~IDE_DFLAG_LBA48;
584 
585 	return 0;
586 }
587 
588 ide_ext_devset_rw(acoustic, acoustic);
589 ide_ext_devset_rw(address, addressing);
590 ide_ext_devset_rw(multcount, multcount);
591 ide_ext_devset_rw(wcache, wcache);
592 
593 ide_ext_devset_rw_sync(nowerr, nowerr);
594 
ide_disk_check(ide_drive_t * drive,const char * s)595 static int ide_disk_check(ide_drive_t *drive, const char *s)
596 {
597 	return 1;
598 }
599 
ide_disk_setup(ide_drive_t * drive)600 static void ide_disk_setup(ide_drive_t *drive)
601 {
602 	struct ide_disk_obj *idkp = drive->driver_data;
603 	struct request_queue *q = drive->queue;
604 	ide_hwif_t *hwif = drive->hwif;
605 	u16 *id = drive->id;
606 	char *m = (char *)&id[ATA_ID_PROD];
607 	unsigned long long capacity;
608 
609 	ide_proc_register_driver(drive, idkp->driver);
610 
611 	if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0)
612 		return;
613 
614 	if (drive->dev_flags & IDE_DFLAG_REMOVABLE) {
615 		/*
616 		 * Removable disks (eg. SYQUEST); ignore 'WD' drives
617 		 */
618 		if (m[0] != 'W' || m[1] != 'D')
619 			drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
620 	}
621 
622 	(void)set_addressing(drive, 1);
623 
624 	if (drive->dev_flags & IDE_DFLAG_LBA48) {
625 		int max_s = 2048;
626 
627 		if (max_s > hwif->rqsize)
628 			max_s = hwif->rqsize;
629 
630 		blk_queue_max_sectors(q, max_s);
631 	}
632 
633 	printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
634 		q->max_sectors / 2);
635 
636 	if (ata_id_is_ssd(id))
637 		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
638 
639 	/* calculate drive capacity, and select LBA if possible */
640 	ide_disk_get_capacity(drive);
641 
642 	/*
643 	 * if possible, give fdisk access to more of the drive,
644 	 * by correcting bios_cyls:
645 	 */
646 	capacity = ide_gd_capacity(drive);
647 
648 	if ((drive->dev_flags & IDE_DFLAG_FORCED_GEOM) == 0) {
649 		if (ata_id_lba48_enabled(drive->id)) {
650 			/* compatibility */
651 			drive->bios_sect = 63;
652 			drive->bios_head = 255;
653 		}
654 
655 		if (drive->bios_sect && drive->bios_head) {
656 			unsigned int cap0 = capacity; /* truncate to 32 bits */
657 			unsigned int cylsz, cyl;
658 
659 			if (cap0 != capacity)
660 				drive->bios_cyl = 65535;
661 			else {
662 				cylsz = drive->bios_sect * drive->bios_head;
663 				cyl = cap0 / cylsz;
664 				if (cyl > 65535)
665 					cyl = 65535;
666 				if (cyl > drive->bios_cyl)
667 					drive->bios_cyl = cyl;
668 			}
669 		}
670 	}
671 	printk(KERN_INFO "%s: %llu sectors (%llu MB)",
672 			 drive->name, capacity, sectors_to_MB(capacity));
673 
674 	/* Only print cache size when it was specified */
675 	if (id[ATA_ID_BUF_SIZE])
676 		printk(KERN_CONT " w/%dKiB Cache", id[ATA_ID_BUF_SIZE] / 2);
677 
678 	printk(KERN_CONT ", CHS=%d/%d/%d\n",
679 			 drive->bios_cyl, drive->bios_head, drive->bios_sect);
680 
681 	/* write cache enabled? */
682 	if ((id[ATA_ID_CSFO] & 1) || ata_id_wcache_enabled(id))
683 		drive->dev_flags |= IDE_DFLAG_WCACHE;
684 
685 	set_wcache(drive, 1);
686 
687 	if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 &&
688 	    (drive->head == 0 || drive->head > 16)) {
689 		printk(KERN_ERR "%s: invalid geometry: %d physical heads?\n",
690 			drive->name, drive->head);
691 		drive->dev_flags &= ~IDE_DFLAG_ATTACH;
692 	} else
693 		drive->dev_flags |= IDE_DFLAG_ATTACH;
694 }
695 
ide_disk_flush(ide_drive_t * drive)696 static void ide_disk_flush(ide_drive_t *drive)
697 {
698 	if (ata_id_flush_enabled(drive->id) == 0 ||
699 	    (drive->dev_flags & IDE_DFLAG_WCACHE) == 0)
700 		return;
701 
702 	if (do_idedisk_flushcache(drive))
703 		printk(KERN_INFO "%s: wcache flush failed!\n", drive->name);
704 }
705 
ide_disk_init_media(ide_drive_t * drive,struct gendisk * disk)706 static int ide_disk_init_media(ide_drive_t *drive, struct gendisk *disk)
707 {
708 	return 0;
709 }
710 
ide_disk_set_doorlock(ide_drive_t * drive,struct gendisk * disk,int on)711 static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
712 				 int on)
713 {
714 	ide_task_t task;
715 	int ret;
716 
717 	if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
718 		return 0;
719 
720 	memset(&task, 0, sizeof(task));
721 	task.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
722 	task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
723 
724 	ret = ide_no_data_taskfile(drive, &task);
725 
726 	if (ret)
727 		drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
728 
729 	return ret;
730 }
731 
732 const struct ide_disk_ops ide_ata_disk_ops = {
733 	.check		= ide_disk_check,
734 	.get_capacity	= ide_disk_get_capacity,
735 	.setup		= ide_disk_setup,
736 	.flush		= ide_disk_flush,
737 	.init_media	= ide_disk_init_media,
738 	.set_doorlock	= ide_disk_set_doorlock,
739 	.do_request	= ide_do_rw_disk,
740 	.end_request	= ide_end_request,
741 	.ioctl		= ide_disk_ioctl,
742 };
743