• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Tejun Heo <tj@kernel.org>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/log2.h>
60 #include <linux/slab.h>
61 #include <linux/glob.h>
62 #include <scsi/scsi.h>
63 #include <scsi/scsi_cmnd.h>
64 #include <scsi/scsi_host.h>
65 #include <linux/libata.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
68 #include <linux/ratelimit.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/platform_device.h>
71 
72 #define CREATE_TRACE_POINTS
73 #include <trace/events/libata.h>
74 
75 #include "libata.h"
76 #include "libata-transport.h"
77 
78 /* debounce timing parameters in msecs { interval, duration, timeout } */
79 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
80 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
81 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
82 
83 const struct ata_port_operations ata_base_port_ops = {
84 	.prereset		= ata_std_prereset,
85 	.postreset		= ata_std_postreset,
86 	.error_handler		= ata_std_error_handler,
87 	.sched_eh		= ata_std_sched_eh,
88 	.end_eh			= ata_std_end_eh,
89 };
90 
91 const struct ata_port_operations sata_port_ops = {
92 	.inherits		= &ata_base_port_ops,
93 
94 	.qc_defer		= ata_std_qc_defer,
95 	.hardreset		= sata_std_hardreset,
96 };
97 
98 static unsigned int ata_dev_init_params(struct ata_device *dev,
99 					u16 heads, u16 sectors);
100 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
101 static void ata_dev_xfermask(struct ata_device *dev);
102 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
103 
104 atomic_t ata_print_id = ATOMIC_INIT(0);
105 
106 struct ata_force_param {
107 	const char	*name;
108 	unsigned int	cbl;
109 	int		spd_limit;
110 	unsigned long	xfer_mask;
111 	unsigned int	horkage_on;
112 	unsigned int	horkage_off;
113 	unsigned int	lflags;
114 };
115 
116 struct ata_force_ent {
117 	int			port;
118 	int			device;
119 	struct ata_force_param	param;
120 };
121 
122 static struct ata_force_ent *ata_force_tbl;
123 static int ata_force_tbl_size;
124 
125 static char ata_force_param_buf[PAGE_SIZE] __initdata;
126 /* param_buf is thrown away after initialization, disallow read */
127 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
128 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
129 
130 static int atapi_enabled = 1;
131 module_param(atapi_enabled, int, 0444);
132 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
133 
134 static int atapi_dmadir = 0;
135 module_param(atapi_dmadir, int, 0444);
136 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
137 
138 int atapi_passthru16 = 1;
139 module_param(atapi_passthru16, int, 0444);
140 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
141 
142 int libata_fua = 0;
143 module_param_named(fua, libata_fua, int, 0444);
144 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
145 
146 static int ata_ignore_hpa;
147 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
148 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
149 
150 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
151 module_param_named(dma, libata_dma_mask, int, 0444);
152 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
153 
154 static int ata_probe_timeout;
155 module_param(ata_probe_timeout, int, 0444);
156 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
157 
158 int libata_noacpi = 0;
159 module_param_named(noacpi, libata_noacpi, int, 0444);
160 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
161 
162 int libata_allow_tpm = 0;
163 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
164 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
165 
166 static int atapi_an;
167 module_param(atapi_an, int, 0444);
168 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
169 
170 MODULE_AUTHOR("Jeff Garzik");
171 MODULE_DESCRIPTION("Library module for ATA devices");
172 MODULE_LICENSE("GPL");
173 MODULE_VERSION(DRV_VERSION);
174 
175 
ata_sstatus_online(u32 sstatus)176 static bool ata_sstatus_online(u32 sstatus)
177 {
178 	return (sstatus & 0xf) == 0x3;
179 }
180 
181 /**
182  *	ata_link_next - link iteration helper
183  *	@link: the previous link, NULL to start
184  *	@ap: ATA port containing links to iterate
185  *	@mode: iteration mode, one of ATA_LITER_*
186  *
187  *	LOCKING:
188  *	Host lock or EH context.
189  *
190  *	RETURNS:
191  *	Pointer to the next link.
192  */
ata_link_next(struct ata_link * link,struct ata_port * ap,enum ata_link_iter_mode mode)193 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
194 			       enum ata_link_iter_mode mode)
195 {
196 	BUG_ON(mode != ATA_LITER_EDGE &&
197 	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
198 
199 	/* NULL link indicates start of iteration */
200 	if (!link)
201 		switch (mode) {
202 		case ATA_LITER_EDGE:
203 		case ATA_LITER_PMP_FIRST:
204 			if (sata_pmp_attached(ap))
205 				return ap->pmp_link;
206 			/* fall through */
207 		case ATA_LITER_HOST_FIRST:
208 			return &ap->link;
209 		}
210 
211 	/* we just iterated over the host link, what's next? */
212 	if (link == &ap->link)
213 		switch (mode) {
214 		case ATA_LITER_HOST_FIRST:
215 			if (sata_pmp_attached(ap))
216 				return ap->pmp_link;
217 			/* fall through */
218 		case ATA_LITER_PMP_FIRST:
219 			if (unlikely(ap->slave_link))
220 				return ap->slave_link;
221 			/* fall through */
222 		case ATA_LITER_EDGE:
223 			return NULL;
224 		}
225 
226 	/* slave_link excludes PMP */
227 	if (unlikely(link == ap->slave_link))
228 		return NULL;
229 
230 	/* we were over a PMP link */
231 	if (++link < ap->pmp_link + ap->nr_pmp_links)
232 		return link;
233 
234 	if (mode == ATA_LITER_PMP_FIRST)
235 		return &ap->link;
236 
237 	return NULL;
238 }
239 
240 /**
241  *	ata_dev_next - device iteration helper
242  *	@dev: the previous device, NULL to start
243  *	@link: ATA link containing devices to iterate
244  *	@mode: iteration mode, one of ATA_DITER_*
245  *
246  *	LOCKING:
247  *	Host lock or EH context.
248  *
249  *	RETURNS:
250  *	Pointer to the next device.
251  */
ata_dev_next(struct ata_device * dev,struct ata_link * link,enum ata_dev_iter_mode mode)252 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
253 				enum ata_dev_iter_mode mode)
254 {
255 	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
256 	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
257 
258 	/* NULL dev indicates start of iteration */
259 	if (!dev)
260 		switch (mode) {
261 		case ATA_DITER_ENABLED:
262 		case ATA_DITER_ALL:
263 			dev = link->device;
264 			goto check;
265 		case ATA_DITER_ENABLED_REVERSE:
266 		case ATA_DITER_ALL_REVERSE:
267 			dev = link->device + ata_link_max_devices(link) - 1;
268 			goto check;
269 		}
270 
271  next:
272 	/* move to the next one */
273 	switch (mode) {
274 	case ATA_DITER_ENABLED:
275 	case ATA_DITER_ALL:
276 		if (++dev < link->device + ata_link_max_devices(link))
277 			goto check;
278 		return NULL;
279 	case ATA_DITER_ENABLED_REVERSE:
280 	case ATA_DITER_ALL_REVERSE:
281 		if (--dev >= link->device)
282 			goto check;
283 		return NULL;
284 	}
285 
286  check:
287 	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
288 	    !ata_dev_enabled(dev))
289 		goto next;
290 	return dev;
291 }
292 
293 /**
294  *	ata_dev_phys_link - find physical link for a device
295  *	@dev: ATA device to look up physical link for
296  *
297  *	Look up physical link which @dev is attached to.  Note that
298  *	this is different from @dev->link only when @dev is on slave
299  *	link.  For all other cases, it's the same as @dev->link.
300  *
301  *	LOCKING:
302  *	Don't care.
303  *
304  *	RETURNS:
305  *	Pointer to the found physical link.
306  */
ata_dev_phys_link(struct ata_device * dev)307 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
308 {
309 	struct ata_port *ap = dev->link->ap;
310 
311 	if (!ap->slave_link)
312 		return dev->link;
313 	if (!dev->devno)
314 		return &ap->link;
315 	return ap->slave_link;
316 }
317 
318 /**
319  *	ata_force_cbl - force cable type according to libata.force
320  *	@ap: ATA port of interest
321  *
322  *	Force cable type according to libata.force and whine about it.
323  *	The last entry which has matching port number is used, so it
324  *	can be specified as part of device force parameters.  For
325  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
326  *	same effect.
327  *
328  *	LOCKING:
329  *	EH context.
330  */
ata_force_cbl(struct ata_port * ap)331 void ata_force_cbl(struct ata_port *ap)
332 {
333 	int i;
334 
335 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
336 		const struct ata_force_ent *fe = &ata_force_tbl[i];
337 
338 		if (fe->port != -1 && fe->port != ap->print_id)
339 			continue;
340 
341 		if (fe->param.cbl == ATA_CBL_NONE)
342 			continue;
343 
344 		ap->cbl = fe->param.cbl;
345 		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
346 		return;
347 	}
348 }
349 
350 /**
351  *	ata_force_link_limits - force link limits according to libata.force
352  *	@link: ATA link of interest
353  *
354  *	Force link flags and SATA spd limit according to libata.force
355  *	and whine about it.  When only the port part is specified
356  *	(e.g. 1:), the limit applies to all links connected to both
357  *	the host link and all fan-out ports connected via PMP.  If the
358  *	device part is specified as 0 (e.g. 1.00:), it specifies the
359  *	first fan-out link not the host link.  Device number 15 always
360  *	points to the host link whether PMP is attached or not.  If the
361  *	controller has slave link, device number 16 points to it.
362  *
363  *	LOCKING:
364  *	EH context.
365  */
ata_force_link_limits(struct ata_link * link)366 static void ata_force_link_limits(struct ata_link *link)
367 {
368 	bool did_spd = false;
369 	int linkno = link->pmp;
370 	int i;
371 
372 	if (ata_is_host_link(link))
373 		linkno += 15;
374 
375 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
376 		const struct ata_force_ent *fe = &ata_force_tbl[i];
377 
378 		if (fe->port != -1 && fe->port != link->ap->print_id)
379 			continue;
380 
381 		if (fe->device != -1 && fe->device != linkno)
382 			continue;
383 
384 		/* only honor the first spd limit */
385 		if (!did_spd && fe->param.spd_limit) {
386 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
387 			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
388 					fe->param.name);
389 			did_spd = true;
390 		}
391 
392 		/* let lflags stack */
393 		if (fe->param.lflags) {
394 			link->flags |= fe->param.lflags;
395 			ata_link_notice(link,
396 					"FORCE: link flag 0x%x forced -> 0x%x\n",
397 					fe->param.lflags, link->flags);
398 		}
399 	}
400 }
401 
402 /**
403  *	ata_force_xfermask - force xfermask according to libata.force
404  *	@dev: ATA device of interest
405  *
406  *	Force xfer_mask according to libata.force and whine about it.
407  *	For consistency with link selection, device number 15 selects
408  *	the first device connected to the host link.
409  *
410  *	LOCKING:
411  *	EH context.
412  */
ata_force_xfermask(struct ata_device * dev)413 static void ata_force_xfermask(struct ata_device *dev)
414 {
415 	int devno = dev->link->pmp + dev->devno;
416 	int alt_devno = devno;
417 	int i;
418 
419 	/* allow n.15/16 for devices attached to host port */
420 	if (ata_is_host_link(dev->link))
421 		alt_devno += 15;
422 
423 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
424 		const struct ata_force_ent *fe = &ata_force_tbl[i];
425 		unsigned long pio_mask, mwdma_mask, udma_mask;
426 
427 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
428 			continue;
429 
430 		if (fe->device != -1 && fe->device != devno &&
431 		    fe->device != alt_devno)
432 			continue;
433 
434 		if (!fe->param.xfer_mask)
435 			continue;
436 
437 		ata_unpack_xfermask(fe->param.xfer_mask,
438 				    &pio_mask, &mwdma_mask, &udma_mask);
439 		if (udma_mask)
440 			dev->udma_mask = udma_mask;
441 		else if (mwdma_mask) {
442 			dev->udma_mask = 0;
443 			dev->mwdma_mask = mwdma_mask;
444 		} else {
445 			dev->udma_mask = 0;
446 			dev->mwdma_mask = 0;
447 			dev->pio_mask = pio_mask;
448 		}
449 
450 		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
451 			       fe->param.name);
452 		return;
453 	}
454 }
455 
456 /**
457  *	ata_force_horkage - force horkage according to libata.force
458  *	@dev: ATA device of interest
459  *
460  *	Force horkage according to libata.force and whine about it.
461  *	For consistency with link selection, device number 15 selects
462  *	the first device connected to the host link.
463  *
464  *	LOCKING:
465  *	EH context.
466  */
ata_force_horkage(struct ata_device * dev)467 static void ata_force_horkage(struct ata_device *dev)
468 {
469 	int devno = dev->link->pmp + dev->devno;
470 	int alt_devno = devno;
471 	int i;
472 
473 	/* allow n.15/16 for devices attached to host port */
474 	if (ata_is_host_link(dev->link))
475 		alt_devno += 15;
476 
477 	for (i = 0; i < ata_force_tbl_size; i++) {
478 		const struct ata_force_ent *fe = &ata_force_tbl[i];
479 
480 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
481 			continue;
482 
483 		if (fe->device != -1 && fe->device != devno &&
484 		    fe->device != alt_devno)
485 			continue;
486 
487 		if (!(~dev->horkage & fe->param.horkage_on) &&
488 		    !(dev->horkage & fe->param.horkage_off))
489 			continue;
490 
491 		dev->horkage |= fe->param.horkage_on;
492 		dev->horkage &= ~fe->param.horkage_off;
493 
494 		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
495 			       fe->param.name);
496 	}
497 }
498 
499 /**
500  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
501  *	@opcode: SCSI opcode
502  *
503  *	Determine ATAPI command type from @opcode.
504  *
505  *	LOCKING:
506  *	None.
507  *
508  *	RETURNS:
509  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
510  */
atapi_cmd_type(u8 opcode)511 int atapi_cmd_type(u8 opcode)
512 {
513 	switch (opcode) {
514 	case GPCMD_READ_10:
515 	case GPCMD_READ_12:
516 		return ATAPI_READ;
517 
518 	case GPCMD_WRITE_10:
519 	case GPCMD_WRITE_12:
520 	case GPCMD_WRITE_AND_VERIFY_10:
521 		return ATAPI_WRITE;
522 
523 	case GPCMD_READ_CD:
524 	case GPCMD_READ_CD_MSF:
525 		return ATAPI_READ_CD;
526 
527 	case ATA_16:
528 	case ATA_12:
529 		if (atapi_passthru16)
530 			return ATAPI_PASS_THRU;
531 		/* fall thru */
532 	default:
533 		return ATAPI_MISC;
534 	}
535 }
536 
537 /**
538  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
539  *	@tf: Taskfile to convert
540  *	@pmp: Port multiplier port
541  *	@is_cmd: This FIS is for command
542  *	@fis: Buffer into which data will output
543  *
544  *	Converts a standard ATA taskfile to a Serial ATA
545  *	FIS structure (Register - Host to Device).
546  *
547  *	LOCKING:
548  *	Inherited from caller.
549  */
ata_tf_to_fis(const struct ata_taskfile * tf,u8 pmp,int is_cmd,u8 * fis)550 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
551 {
552 	fis[0] = 0x27;			/* Register - Host to Device FIS */
553 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
554 	if (is_cmd)
555 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
556 
557 	fis[2] = tf->command;
558 	fis[3] = tf->feature;
559 
560 	fis[4] = tf->lbal;
561 	fis[5] = tf->lbam;
562 	fis[6] = tf->lbah;
563 	fis[7] = tf->device;
564 
565 	fis[8] = tf->hob_lbal;
566 	fis[9] = tf->hob_lbam;
567 	fis[10] = tf->hob_lbah;
568 	fis[11] = tf->hob_feature;
569 
570 	fis[12] = tf->nsect;
571 	fis[13] = tf->hob_nsect;
572 	fis[14] = 0;
573 	fis[15] = tf->ctl;
574 
575 	fis[16] = tf->auxiliary & 0xff;
576 	fis[17] = (tf->auxiliary >> 8) & 0xff;
577 	fis[18] = (tf->auxiliary >> 16) & 0xff;
578 	fis[19] = (tf->auxiliary >> 24) & 0xff;
579 }
580 
581 /**
582  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
583  *	@fis: Buffer from which data will be input
584  *	@tf: Taskfile to output
585  *
586  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
587  *
588  *	LOCKING:
589  *	Inherited from caller.
590  */
591 
ata_tf_from_fis(const u8 * fis,struct ata_taskfile * tf)592 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
593 {
594 	tf->command	= fis[2];	/* status */
595 	tf->feature	= fis[3];	/* error */
596 
597 	tf->lbal	= fis[4];
598 	tf->lbam	= fis[5];
599 	tf->lbah	= fis[6];
600 	tf->device	= fis[7];
601 
602 	tf->hob_lbal	= fis[8];
603 	tf->hob_lbam	= fis[9];
604 	tf->hob_lbah	= fis[10];
605 
606 	tf->nsect	= fis[12];
607 	tf->hob_nsect	= fis[13];
608 }
609 
610 static const u8 ata_rw_cmds[] = {
611 	/* pio multi */
612 	ATA_CMD_READ_MULTI,
613 	ATA_CMD_WRITE_MULTI,
614 	ATA_CMD_READ_MULTI_EXT,
615 	ATA_CMD_WRITE_MULTI_EXT,
616 	0,
617 	0,
618 	0,
619 	ATA_CMD_WRITE_MULTI_FUA_EXT,
620 	/* pio */
621 	ATA_CMD_PIO_READ,
622 	ATA_CMD_PIO_WRITE,
623 	ATA_CMD_PIO_READ_EXT,
624 	ATA_CMD_PIO_WRITE_EXT,
625 	0,
626 	0,
627 	0,
628 	0,
629 	/* dma */
630 	ATA_CMD_READ,
631 	ATA_CMD_WRITE,
632 	ATA_CMD_READ_EXT,
633 	ATA_CMD_WRITE_EXT,
634 	0,
635 	0,
636 	0,
637 	ATA_CMD_WRITE_FUA_EXT
638 };
639 
640 /**
641  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
642  *	@tf: command to examine and configure
643  *	@dev: device tf belongs to
644  *
645  *	Examine the device configuration and tf->flags to calculate
646  *	the proper read/write commands and protocol to use.
647  *
648  *	LOCKING:
649  *	caller.
650  */
ata_rwcmd_protocol(struct ata_taskfile * tf,struct ata_device * dev)651 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
652 {
653 	u8 cmd;
654 
655 	int index, fua, lba48, write;
656 
657 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
658 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
659 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
660 
661 	if (dev->flags & ATA_DFLAG_PIO) {
662 		tf->protocol = ATA_PROT_PIO;
663 		index = dev->multi_count ? 0 : 8;
664 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
665 		/* Unable to use DMA due to host limitation */
666 		tf->protocol = ATA_PROT_PIO;
667 		index = dev->multi_count ? 0 : 8;
668 	} else {
669 		tf->protocol = ATA_PROT_DMA;
670 		index = 16;
671 	}
672 
673 	cmd = ata_rw_cmds[index + fua + lba48 + write];
674 	if (cmd) {
675 		tf->command = cmd;
676 		return 0;
677 	}
678 	return -1;
679 }
680 
681 /**
682  *	ata_tf_read_block - Read block address from ATA taskfile
683  *	@tf: ATA taskfile of interest
684  *	@dev: ATA device @tf belongs to
685  *
686  *	LOCKING:
687  *	None.
688  *
689  *	Read block address from @tf.  This function can handle all
690  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
691  *	flags select the address format to use.
692  *
693  *	RETURNS:
694  *	Block address read from @tf.
695  */
ata_tf_read_block(struct ata_taskfile * tf,struct ata_device * dev)696 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
697 {
698 	u64 block = 0;
699 
700 	if (tf->flags & ATA_TFLAG_LBA) {
701 		if (tf->flags & ATA_TFLAG_LBA48) {
702 			block |= (u64)tf->hob_lbah << 40;
703 			block |= (u64)tf->hob_lbam << 32;
704 			block |= (u64)tf->hob_lbal << 24;
705 		} else
706 			block |= (tf->device & 0xf) << 24;
707 
708 		block |= tf->lbah << 16;
709 		block |= tf->lbam << 8;
710 		block |= tf->lbal;
711 	} else {
712 		u32 cyl, head, sect;
713 
714 		cyl = tf->lbam | (tf->lbah << 8);
715 		head = tf->device & 0xf;
716 		sect = tf->lbal;
717 
718 		if (!sect) {
719 			ata_dev_warn(dev,
720 				     "device reported invalid CHS sector 0\n");
721 			sect = 1; /* oh well */
722 		}
723 
724 		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
725 	}
726 
727 	return block;
728 }
729 
730 /**
731  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
732  *	@tf: Target ATA taskfile
733  *	@dev: ATA device @tf belongs to
734  *	@block: Block address
735  *	@n_block: Number of blocks
736  *	@tf_flags: RW/FUA etc...
737  *	@tag: tag
738  *
739  *	LOCKING:
740  *	None.
741  *
742  *	Build ATA taskfile @tf for read/write request described by
743  *	@block, @n_block, @tf_flags and @tag on @dev.
744  *
745  *	RETURNS:
746  *
747  *	0 on success, -ERANGE if the request is too large for @dev,
748  *	-EINVAL if the request is invalid.
749  */
ata_build_rw_tf(struct ata_taskfile * tf,struct ata_device * dev,u64 block,u32 n_block,unsigned int tf_flags,unsigned int tag)750 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
751 		    u64 block, u32 n_block, unsigned int tf_flags,
752 		    unsigned int tag)
753 {
754 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
755 	tf->flags |= tf_flags;
756 
757 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
758 		/* yay, NCQ */
759 		if (!lba_48_ok(block, n_block))
760 			return -ERANGE;
761 
762 		tf->protocol = ATA_PROT_NCQ;
763 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
764 
765 		if (tf->flags & ATA_TFLAG_WRITE)
766 			tf->command = ATA_CMD_FPDMA_WRITE;
767 		else
768 			tf->command = ATA_CMD_FPDMA_READ;
769 
770 		tf->nsect = tag << 3;
771 		tf->hob_feature = (n_block >> 8) & 0xff;
772 		tf->feature = n_block & 0xff;
773 
774 		tf->hob_lbah = (block >> 40) & 0xff;
775 		tf->hob_lbam = (block >> 32) & 0xff;
776 		tf->hob_lbal = (block >> 24) & 0xff;
777 		tf->lbah = (block >> 16) & 0xff;
778 		tf->lbam = (block >> 8) & 0xff;
779 		tf->lbal = block & 0xff;
780 
781 		tf->device = ATA_LBA;
782 		if (tf->flags & ATA_TFLAG_FUA)
783 			tf->device |= 1 << 7;
784 	} else if (dev->flags & ATA_DFLAG_LBA) {
785 		tf->flags |= ATA_TFLAG_LBA;
786 
787 		if (lba_28_ok(block, n_block)) {
788 			/* use LBA28 */
789 			tf->device |= (block >> 24) & 0xf;
790 		} else if (lba_48_ok(block, n_block)) {
791 			if (!(dev->flags & ATA_DFLAG_LBA48))
792 				return -ERANGE;
793 
794 			/* use LBA48 */
795 			tf->flags |= ATA_TFLAG_LBA48;
796 
797 			tf->hob_nsect = (n_block >> 8) & 0xff;
798 
799 			tf->hob_lbah = (block >> 40) & 0xff;
800 			tf->hob_lbam = (block >> 32) & 0xff;
801 			tf->hob_lbal = (block >> 24) & 0xff;
802 		} else
803 			/* request too large even for LBA48 */
804 			return -ERANGE;
805 
806 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
807 			return -EINVAL;
808 
809 		tf->nsect = n_block & 0xff;
810 
811 		tf->lbah = (block >> 16) & 0xff;
812 		tf->lbam = (block >> 8) & 0xff;
813 		tf->lbal = block & 0xff;
814 
815 		tf->device |= ATA_LBA;
816 	} else {
817 		/* CHS */
818 		u32 sect, head, cyl, track;
819 
820 		/* The request -may- be too large for CHS addressing. */
821 		if (!lba_28_ok(block, n_block))
822 			return -ERANGE;
823 
824 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
825 			return -EINVAL;
826 
827 		/* Convert LBA to CHS */
828 		track = (u32)block / dev->sectors;
829 		cyl   = track / dev->heads;
830 		head  = track % dev->heads;
831 		sect  = (u32)block % dev->sectors + 1;
832 
833 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
834 			(u32)block, track, cyl, head, sect);
835 
836 		/* Check whether the converted CHS can fit.
837 		   Cylinder: 0-65535
838 		   Head: 0-15
839 		   Sector: 1-255*/
840 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
841 			return -ERANGE;
842 
843 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
844 		tf->lbal = sect;
845 		tf->lbam = cyl;
846 		tf->lbah = cyl >> 8;
847 		tf->device |= head;
848 	}
849 
850 	return 0;
851 }
852 
853 /**
854  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
855  *	@pio_mask: pio_mask
856  *	@mwdma_mask: mwdma_mask
857  *	@udma_mask: udma_mask
858  *
859  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
860  *	unsigned int xfer_mask.
861  *
862  *	LOCKING:
863  *	None.
864  *
865  *	RETURNS:
866  *	Packed xfer_mask.
867  */
ata_pack_xfermask(unsigned long pio_mask,unsigned long mwdma_mask,unsigned long udma_mask)868 unsigned long ata_pack_xfermask(unsigned long pio_mask,
869 				unsigned long mwdma_mask,
870 				unsigned long udma_mask)
871 {
872 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
873 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
874 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
875 }
876 
877 /**
878  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
879  *	@xfer_mask: xfer_mask to unpack
880  *	@pio_mask: resulting pio_mask
881  *	@mwdma_mask: resulting mwdma_mask
882  *	@udma_mask: resulting udma_mask
883  *
884  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
885  *	Any NULL distination masks will be ignored.
886  */
ata_unpack_xfermask(unsigned long xfer_mask,unsigned long * pio_mask,unsigned long * mwdma_mask,unsigned long * udma_mask)887 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
888 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
889 {
890 	if (pio_mask)
891 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
892 	if (mwdma_mask)
893 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
894 	if (udma_mask)
895 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
896 }
897 
898 static const struct ata_xfer_ent {
899 	int shift, bits;
900 	u8 base;
901 } ata_xfer_tbl[] = {
902 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
903 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
904 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
905 	{ -1, },
906 };
907 
908 /**
909  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
910  *	@xfer_mask: xfer_mask of interest
911  *
912  *	Return matching XFER_* value for @xfer_mask.  Only the highest
913  *	bit of @xfer_mask is considered.
914  *
915  *	LOCKING:
916  *	None.
917  *
918  *	RETURNS:
919  *	Matching XFER_* value, 0xff if no match found.
920  */
ata_xfer_mask2mode(unsigned long xfer_mask)921 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
922 {
923 	int highbit = fls(xfer_mask) - 1;
924 	const struct ata_xfer_ent *ent;
925 
926 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
927 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
928 			return ent->base + highbit - ent->shift;
929 	return 0xff;
930 }
931 
932 /**
933  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
934  *	@xfer_mode: XFER_* of interest
935  *
936  *	Return matching xfer_mask for @xfer_mode.
937  *
938  *	LOCKING:
939  *	None.
940  *
941  *	RETURNS:
942  *	Matching xfer_mask, 0 if no match found.
943  */
ata_xfer_mode2mask(u8 xfer_mode)944 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
945 {
946 	const struct ata_xfer_ent *ent;
947 
948 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
949 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
950 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
951 				& ~((1 << ent->shift) - 1);
952 	return 0;
953 }
954 
955 /**
956  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
957  *	@xfer_mode: XFER_* of interest
958  *
959  *	Return matching xfer_shift for @xfer_mode.
960  *
961  *	LOCKING:
962  *	None.
963  *
964  *	RETURNS:
965  *	Matching xfer_shift, -1 if no match found.
966  */
ata_xfer_mode2shift(unsigned long xfer_mode)967 int ata_xfer_mode2shift(unsigned long xfer_mode)
968 {
969 	const struct ata_xfer_ent *ent;
970 
971 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
972 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
973 			return ent->shift;
974 	return -1;
975 }
976 
977 /**
978  *	ata_mode_string - convert xfer_mask to string
979  *	@xfer_mask: mask of bits supported; only highest bit counts.
980  *
981  *	Determine string which represents the highest speed
982  *	(highest bit in @modemask).
983  *
984  *	LOCKING:
985  *	None.
986  *
987  *	RETURNS:
988  *	Constant C string representing highest speed listed in
989  *	@mode_mask, or the constant C string "<n/a>".
990  */
ata_mode_string(unsigned long xfer_mask)991 const char *ata_mode_string(unsigned long xfer_mask)
992 {
993 	static const char * const xfer_mode_str[] = {
994 		"PIO0",
995 		"PIO1",
996 		"PIO2",
997 		"PIO3",
998 		"PIO4",
999 		"PIO5",
1000 		"PIO6",
1001 		"MWDMA0",
1002 		"MWDMA1",
1003 		"MWDMA2",
1004 		"MWDMA3",
1005 		"MWDMA4",
1006 		"UDMA/16",
1007 		"UDMA/25",
1008 		"UDMA/33",
1009 		"UDMA/44",
1010 		"UDMA/66",
1011 		"UDMA/100",
1012 		"UDMA/133",
1013 		"UDMA7",
1014 	};
1015 	int highbit;
1016 
1017 	highbit = fls(xfer_mask) - 1;
1018 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1019 		return xfer_mode_str[highbit];
1020 	return "<n/a>";
1021 }
1022 
sata_spd_string(unsigned int spd)1023 const char *sata_spd_string(unsigned int spd)
1024 {
1025 	static const char * const spd_str[] = {
1026 		"1.5 Gbps",
1027 		"3.0 Gbps",
1028 		"6.0 Gbps",
1029 	};
1030 
1031 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1032 		return "<unknown>";
1033 	return spd_str[spd - 1];
1034 }
1035 
1036 /**
1037  *	ata_dev_classify - determine device type based on ATA-spec signature
1038  *	@tf: ATA taskfile register set for device to be identified
1039  *
1040  *	Determine from taskfile register contents whether a device is
1041  *	ATA or ATAPI, as per "Signature and persistence" section
1042  *	of ATA/PI spec (volume 1, sect 5.14).
1043  *
1044  *	LOCKING:
1045  *	None.
1046  *
1047  *	RETURNS:
1048  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1049  *	%ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1050  */
ata_dev_classify(const struct ata_taskfile * tf)1051 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1052 {
1053 	/* Apple's open source Darwin code hints that some devices only
1054 	 * put a proper signature into the LBA mid/high registers,
1055 	 * So, we only check those.  It's sufficient for uniqueness.
1056 	 *
1057 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1058 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1059 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1060 	 * spec has never mentioned about using different signatures
1061 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1062 	 * Multiplier specification began to use 0x69/0x96 to identify
1063 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1064 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1065 	 * 0x69/0x96 shortly and described them as reserved for
1066 	 * SerialATA.
1067 	 *
1068 	 * We follow the current spec and consider that 0x69/0x96
1069 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1070 	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1071 	 * SEMB signature.  This is worked around in
1072 	 * ata_dev_read_id().
1073 	 */
1074 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1075 		DPRINTK("found ATA device by sig\n");
1076 		return ATA_DEV_ATA;
1077 	}
1078 
1079 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1080 		DPRINTK("found ATAPI device by sig\n");
1081 		return ATA_DEV_ATAPI;
1082 	}
1083 
1084 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1085 		DPRINTK("found PMP device by sig\n");
1086 		return ATA_DEV_PMP;
1087 	}
1088 
1089 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1090 		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1091 		return ATA_DEV_SEMB;
1092 	}
1093 
1094 	if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1095 		DPRINTK("found ZAC device by sig\n");
1096 		return ATA_DEV_ZAC;
1097 	}
1098 
1099 	DPRINTK("unknown device\n");
1100 	return ATA_DEV_UNKNOWN;
1101 }
1102 
1103 /**
1104  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1105  *	@id: IDENTIFY DEVICE results we will examine
1106  *	@s: string into which data is output
1107  *	@ofs: offset into identify device page
1108  *	@len: length of string to return. must be an even number.
1109  *
1110  *	The strings in the IDENTIFY DEVICE page are broken up into
1111  *	16-bit chunks.  Run through the string, and output each
1112  *	8-bit chunk linearly, regardless of platform.
1113  *
1114  *	LOCKING:
1115  *	caller.
1116  */
1117 
ata_id_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1118 void ata_id_string(const u16 *id, unsigned char *s,
1119 		   unsigned int ofs, unsigned int len)
1120 {
1121 	unsigned int c;
1122 
1123 	BUG_ON(len & 1);
1124 
1125 	while (len > 0) {
1126 		c = id[ofs] >> 8;
1127 		*s = c;
1128 		s++;
1129 
1130 		c = id[ofs] & 0xff;
1131 		*s = c;
1132 		s++;
1133 
1134 		ofs++;
1135 		len -= 2;
1136 	}
1137 }
1138 
1139 /**
1140  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1141  *	@id: IDENTIFY DEVICE results we will examine
1142  *	@s: string into which data is output
1143  *	@ofs: offset into identify device page
1144  *	@len: length of string to return. must be an odd number.
1145  *
1146  *	This function is identical to ata_id_string except that it
1147  *	trims trailing spaces and terminates the resulting string with
1148  *	null.  @len must be actual maximum length (even number) + 1.
1149  *
1150  *	LOCKING:
1151  *	caller.
1152  */
ata_id_c_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1153 void ata_id_c_string(const u16 *id, unsigned char *s,
1154 		     unsigned int ofs, unsigned int len)
1155 {
1156 	unsigned char *p;
1157 
1158 	ata_id_string(id, s, ofs, len - 1);
1159 
1160 	p = s + strnlen(s, len - 1);
1161 	while (p > s && p[-1] == ' ')
1162 		p--;
1163 	*p = '\0';
1164 }
1165 
ata_id_n_sectors(const u16 * id)1166 static u64 ata_id_n_sectors(const u16 *id)
1167 {
1168 	if (ata_id_has_lba(id)) {
1169 		if (ata_id_has_lba48(id))
1170 			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1171 		else
1172 			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1173 	} else {
1174 		if (ata_id_current_chs_valid(id))
1175 			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1176 			       id[ATA_ID_CUR_SECTORS];
1177 		else
1178 			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1179 			       id[ATA_ID_SECTORS];
1180 	}
1181 }
1182 
ata_tf_to_lba48(const struct ata_taskfile * tf)1183 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1184 {
1185 	u64 sectors = 0;
1186 
1187 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1188 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1189 	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1190 	sectors |= (tf->lbah & 0xff) << 16;
1191 	sectors |= (tf->lbam & 0xff) << 8;
1192 	sectors |= (tf->lbal & 0xff);
1193 
1194 	return sectors;
1195 }
1196 
ata_tf_to_lba(const struct ata_taskfile * tf)1197 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1198 {
1199 	u64 sectors = 0;
1200 
1201 	sectors |= (tf->device & 0x0f) << 24;
1202 	sectors |= (tf->lbah & 0xff) << 16;
1203 	sectors |= (tf->lbam & 0xff) << 8;
1204 	sectors |= (tf->lbal & 0xff);
1205 
1206 	return sectors;
1207 }
1208 
1209 /**
1210  *	ata_read_native_max_address - Read native max address
1211  *	@dev: target device
1212  *	@max_sectors: out parameter for the result native max address
1213  *
1214  *	Perform an LBA48 or LBA28 native size query upon the device in
1215  *	question.
1216  *
1217  *	RETURNS:
1218  *	0 on success, -EACCES if command is aborted by the drive.
1219  *	-EIO on other errors.
1220  */
ata_read_native_max_address(struct ata_device * dev,u64 * max_sectors)1221 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1222 {
1223 	unsigned int err_mask;
1224 	struct ata_taskfile tf;
1225 	int lba48 = ata_id_has_lba48(dev->id);
1226 
1227 	ata_tf_init(dev, &tf);
1228 
1229 	/* always clear all address registers */
1230 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1231 
1232 	if (lba48) {
1233 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1234 		tf.flags |= ATA_TFLAG_LBA48;
1235 	} else
1236 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1237 
1238 	tf.protocol |= ATA_PROT_NODATA;
1239 	tf.device |= ATA_LBA;
1240 
1241 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1242 	if (err_mask) {
1243 		ata_dev_warn(dev,
1244 			     "failed to read native max address (err_mask=0x%x)\n",
1245 			     err_mask);
1246 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1247 			return -EACCES;
1248 		return -EIO;
1249 	}
1250 
1251 	if (lba48)
1252 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1253 	else
1254 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1255 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1256 		(*max_sectors)--;
1257 	return 0;
1258 }
1259 
1260 /**
1261  *	ata_set_max_sectors - Set max sectors
1262  *	@dev: target device
1263  *	@new_sectors: new max sectors value to set for the device
1264  *
1265  *	Set max sectors of @dev to @new_sectors.
1266  *
1267  *	RETURNS:
1268  *	0 on success, -EACCES if command is aborted or denied (due to
1269  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1270  *	errors.
1271  */
ata_set_max_sectors(struct ata_device * dev,u64 new_sectors)1272 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1273 {
1274 	unsigned int err_mask;
1275 	struct ata_taskfile tf;
1276 	int lba48 = ata_id_has_lba48(dev->id);
1277 
1278 	new_sectors--;
1279 
1280 	ata_tf_init(dev, &tf);
1281 
1282 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1283 
1284 	if (lba48) {
1285 		tf.command = ATA_CMD_SET_MAX_EXT;
1286 		tf.flags |= ATA_TFLAG_LBA48;
1287 
1288 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1289 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1290 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1291 	} else {
1292 		tf.command = ATA_CMD_SET_MAX;
1293 
1294 		tf.device |= (new_sectors >> 24) & 0xf;
1295 	}
1296 
1297 	tf.protocol |= ATA_PROT_NODATA;
1298 	tf.device |= ATA_LBA;
1299 
1300 	tf.lbal = (new_sectors >> 0) & 0xff;
1301 	tf.lbam = (new_sectors >> 8) & 0xff;
1302 	tf.lbah = (new_sectors >> 16) & 0xff;
1303 
1304 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1305 	if (err_mask) {
1306 		ata_dev_warn(dev,
1307 			     "failed to set max address (err_mask=0x%x)\n",
1308 			     err_mask);
1309 		if (err_mask == AC_ERR_DEV &&
1310 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1311 			return -EACCES;
1312 		return -EIO;
1313 	}
1314 
1315 	return 0;
1316 }
1317 
1318 /**
1319  *	ata_hpa_resize		-	Resize a device with an HPA set
1320  *	@dev: Device to resize
1321  *
1322  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1323  *	it if required to the full size of the media. The caller must check
1324  *	the drive has the HPA feature set enabled.
1325  *
1326  *	RETURNS:
1327  *	0 on success, -errno on failure.
1328  */
ata_hpa_resize(struct ata_device * dev)1329 static int ata_hpa_resize(struct ata_device *dev)
1330 {
1331 	struct ata_eh_context *ehc = &dev->link->eh_context;
1332 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1333 	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1334 	u64 sectors = ata_id_n_sectors(dev->id);
1335 	u64 native_sectors;
1336 	int rc;
1337 
1338 	/* do we need to do it? */
1339 	if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1340 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1341 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1342 		return 0;
1343 
1344 	/* read native max address */
1345 	rc = ata_read_native_max_address(dev, &native_sectors);
1346 	if (rc) {
1347 		/* If device aborted the command or HPA isn't going to
1348 		 * be unlocked, skip HPA resizing.
1349 		 */
1350 		if (rc == -EACCES || !unlock_hpa) {
1351 			ata_dev_warn(dev,
1352 				     "HPA support seems broken, skipping HPA handling\n");
1353 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1354 
1355 			/* we can continue if device aborted the command */
1356 			if (rc == -EACCES)
1357 				rc = 0;
1358 		}
1359 
1360 		return rc;
1361 	}
1362 	dev->n_native_sectors = native_sectors;
1363 
1364 	/* nothing to do? */
1365 	if (native_sectors <= sectors || !unlock_hpa) {
1366 		if (!print_info || native_sectors == sectors)
1367 			return 0;
1368 
1369 		if (native_sectors > sectors)
1370 			ata_dev_info(dev,
1371 				"HPA detected: current %llu, native %llu\n",
1372 				(unsigned long long)sectors,
1373 				(unsigned long long)native_sectors);
1374 		else if (native_sectors < sectors)
1375 			ata_dev_warn(dev,
1376 				"native sectors (%llu) is smaller than sectors (%llu)\n",
1377 				(unsigned long long)native_sectors,
1378 				(unsigned long long)sectors);
1379 		return 0;
1380 	}
1381 
1382 	/* let's unlock HPA */
1383 	rc = ata_set_max_sectors(dev, native_sectors);
1384 	if (rc == -EACCES) {
1385 		/* if device aborted the command, skip HPA resizing */
1386 		ata_dev_warn(dev,
1387 			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1388 			     (unsigned long long)sectors,
1389 			     (unsigned long long)native_sectors);
1390 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1391 		return 0;
1392 	} else if (rc)
1393 		return rc;
1394 
1395 	/* re-read IDENTIFY data */
1396 	rc = ata_dev_reread_id(dev, 0);
1397 	if (rc) {
1398 		ata_dev_err(dev,
1399 			    "failed to re-read IDENTIFY data after HPA resizing\n");
1400 		return rc;
1401 	}
1402 
1403 	if (print_info) {
1404 		u64 new_sectors = ata_id_n_sectors(dev->id);
1405 		ata_dev_info(dev,
1406 			"HPA unlocked: %llu -> %llu, native %llu\n",
1407 			(unsigned long long)sectors,
1408 			(unsigned long long)new_sectors,
1409 			(unsigned long long)native_sectors);
1410 	}
1411 
1412 	return 0;
1413 }
1414 
1415 /**
1416  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1417  *	@id: IDENTIFY DEVICE page to dump
1418  *
1419  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1420  *	page.
1421  *
1422  *	LOCKING:
1423  *	caller.
1424  */
1425 
ata_dump_id(const u16 * id)1426 static inline void ata_dump_id(const u16 *id)
1427 {
1428 	DPRINTK("49==0x%04x  "
1429 		"53==0x%04x  "
1430 		"63==0x%04x  "
1431 		"64==0x%04x  "
1432 		"75==0x%04x  \n",
1433 		id[49],
1434 		id[53],
1435 		id[63],
1436 		id[64],
1437 		id[75]);
1438 	DPRINTK("80==0x%04x  "
1439 		"81==0x%04x  "
1440 		"82==0x%04x  "
1441 		"83==0x%04x  "
1442 		"84==0x%04x  \n",
1443 		id[80],
1444 		id[81],
1445 		id[82],
1446 		id[83],
1447 		id[84]);
1448 	DPRINTK("88==0x%04x  "
1449 		"93==0x%04x\n",
1450 		id[88],
1451 		id[93]);
1452 }
1453 
1454 /**
1455  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1456  *	@id: IDENTIFY data to compute xfer mask from
1457  *
1458  *	Compute the xfermask for this device. This is not as trivial
1459  *	as it seems if we must consider early devices correctly.
1460  *
1461  *	FIXME: pre IDE drive timing (do we care ?).
1462  *
1463  *	LOCKING:
1464  *	None.
1465  *
1466  *	RETURNS:
1467  *	Computed xfermask
1468  */
ata_id_xfermask(const u16 * id)1469 unsigned long ata_id_xfermask(const u16 *id)
1470 {
1471 	unsigned long pio_mask, mwdma_mask, udma_mask;
1472 
1473 	/* Usual case. Word 53 indicates word 64 is valid */
1474 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1475 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1476 		pio_mask <<= 3;
1477 		pio_mask |= 0x7;
1478 	} else {
1479 		/* If word 64 isn't valid then Word 51 high byte holds
1480 		 * the PIO timing number for the maximum. Turn it into
1481 		 * a mask.
1482 		 */
1483 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1484 		if (mode < 5)	/* Valid PIO range */
1485 			pio_mask = (2 << mode) - 1;
1486 		else
1487 			pio_mask = 1;
1488 
1489 		/* But wait.. there's more. Design your standards by
1490 		 * committee and you too can get a free iordy field to
1491 		 * process. However its the speeds not the modes that
1492 		 * are supported... Note drivers using the timing API
1493 		 * will get this right anyway
1494 		 */
1495 	}
1496 
1497 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1498 
1499 	if (ata_id_is_cfa(id)) {
1500 		/*
1501 		 *	Process compact flash extended modes
1502 		 */
1503 		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1504 		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1505 
1506 		if (pio)
1507 			pio_mask |= (1 << 5);
1508 		if (pio > 1)
1509 			pio_mask |= (1 << 6);
1510 		if (dma)
1511 			mwdma_mask |= (1 << 3);
1512 		if (dma > 1)
1513 			mwdma_mask |= (1 << 4);
1514 	}
1515 
1516 	udma_mask = 0;
1517 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1518 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1519 
1520 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1521 }
1522 
ata_qc_complete_internal(struct ata_queued_cmd * qc)1523 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1524 {
1525 	struct completion *waiting = qc->private_data;
1526 
1527 	complete(waiting);
1528 }
1529 
1530 /**
1531  *	ata_exec_internal_sg - execute libata internal command
1532  *	@dev: Device to which the command is sent
1533  *	@tf: Taskfile registers for the command and the result
1534  *	@cdb: CDB for packet command
1535  *	@dma_dir: Data transfer direction of the command
1536  *	@sgl: sg list for the data buffer of the command
1537  *	@n_elem: Number of sg entries
1538  *	@timeout: Timeout in msecs (0 for default)
1539  *
1540  *	Executes libata internal command with timeout.  @tf contains
1541  *	command on entry and result on return.  Timeout and error
1542  *	conditions are reported via return value.  No recovery action
1543  *	is taken after a command times out.  It's caller's duty to
1544  *	clean up after timeout.
1545  *
1546  *	LOCKING:
1547  *	None.  Should be called with kernel context, might sleep.
1548  *
1549  *	RETURNS:
1550  *	Zero on success, AC_ERR_* mask on failure
1551  */
ata_exec_internal_sg(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,int dma_dir,struct scatterlist * sgl,unsigned int n_elem,unsigned long timeout)1552 unsigned ata_exec_internal_sg(struct ata_device *dev,
1553 			      struct ata_taskfile *tf, const u8 *cdb,
1554 			      int dma_dir, struct scatterlist *sgl,
1555 			      unsigned int n_elem, unsigned long timeout)
1556 {
1557 	struct ata_link *link = dev->link;
1558 	struct ata_port *ap = link->ap;
1559 	u8 command = tf->command;
1560 	int auto_timeout = 0;
1561 	struct ata_queued_cmd *qc;
1562 	unsigned int tag, preempted_tag;
1563 	u32 preempted_sactive, preempted_qc_active;
1564 	int preempted_nr_active_links;
1565 	DECLARE_COMPLETION_ONSTACK(wait);
1566 	unsigned long flags;
1567 	unsigned int err_mask;
1568 	int rc;
1569 
1570 	spin_lock_irqsave(ap->lock, flags);
1571 
1572 	/* no internal command while frozen */
1573 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1574 		spin_unlock_irqrestore(ap->lock, flags);
1575 		return AC_ERR_SYSTEM;
1576 	}
1577 
1578 	/* initialize internal qc */
1579 
1580 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1581 	 * drivers choke if any other tag is given.  This breaks
1582 	 * ata_tag_internal() test for those drivers.  Don't use new
1583 	 * EH stuff without converting to it.
1584 	 */
1585 	if (ap->ops->error_handler)
1586 		tag = ATA_TAG_INTERNAL;
1587 	else
1588 		tag = 0;
1589 
1590 	qc = __ata_qc_from_tag(ap, tag);
1591 
1592 	qc->tag = tag;
1593 	qc->scsicmd = NULL;
1594 	qc->ap = ap;
1595 	qc->dev = dev;
1596 	ata_qc_reinit(qc);
1597 
1598 	preempted_tag = link->active_tag;
1599 	preempted_sactive = link->sactive;
1600 	preempted_qc_active = ap->qc_active;
1601 	preempted_nr_active_links = ap->nr_active_links;
1602 	link->active_tag = ATA_TAG_POISON;
1603 	link->sactive = 0;
1604 	ap->qc_active = 0;
1605 	ap->nr_active_links = 0;
1606 
1607 	/* prepare & issue qc */
1608 	qc->tf = *tf;
1609 	if (cdb)
1610 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1611 
1612 	/* some SATA bridges need us to indicate data xfer direction */
1613 	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1614 	    dma_dir == DMA_FROM_DEVICE)
1615 		qc->tf.feature |= ATAPI_DMADIR;
1616 
1617 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1618 	qc->dma_dir = dma_dir;
1619 	if (dma_dir != DMA_NONE) {
1620 		unsigned int i, buflen = 0;
1621 		struct scatterlist *sg;
1622 
1623 		for_each_sg(sgl, sg, n_elem, i)
1624 			buflen += sg->length;
1625 
1626 		ata_sg_init(qc, sgl, n_elem);
1627 		qc->nbytes = buflen;
1628 	}
1629 
1630 	qc->private_data = &wait;
1631 	qc->complete_fn = ata_qc_complete_internal;
1632 
1633 	ata_qc_issue(qc);
1634 
1635 	spin_unlock_irqrestore(ap->lock, flags);
1636 
1637 	if (!timeout) {
1638 		if (ata_probe_timeout)
1639 			timeout = ata_probe_timeout * 1000;
1640 		else {
1641 			timeout = ata_internal_cmd_timeout(dev, command);
1642 			auto_timeout = 1;
1643 		}
1644 	}
1645 
1646 	if (ap->ops->error_handler)
1647 		ata_eh_release(ap);
1648 
1649 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1650 
1651 	if (ap->ops->error_handler)
1652 		ata_eh_acquire(ap);
1653 
1654 	ata_sff_flush_pio_task(ap);
1655 
1656 	if (!rc) {
1657 		spin_lock_irqsave(ap->lock, flags);
1658 
1659 		/* We're racing with irq here.  If we lose, the
1660 		 * following test prevents us from completing the qc
1661 		 * twice.  If we win, the port is frozen and will be
1662 		 * cleaned up by ->post_internal_cmd().
1663 		 */
1664 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1665 			qc->err_mask |= AC_ERR_TIMEOUT;
1666 
1667 			if (ap->ops->error_handler)
1668 				ata_port_freeze(ap);
1669 			else
1670 				ata_qc_complete(qc);
1671 
1672 			if (ata_msg_warn(ap))
1673 				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1674 					     command);
1675 		}
1676 
1677 		spin_unlock_irqrestore(ap->lock, flags);
1678 	}
1679 
1680 	/* do post_internal_cmd */
1681 	if (ap->ops->post_internal_cmd)
1682 		ap->ops->post_internal_cmd(qc);
1683 
1684 	/* perform minimal error analysis */
1685 	if (qc->flags & ATA_QCFLAG_FAILED) {
1686 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1687 			qc->err_mask |= AC_ERR_DEV;
1688 
1689 		if (!qc->err_mask)
1690 			qc->err_mask |= AC_ERR_OTHER;
1691 
1692 		if (qc->err_mask & ~AC_ERR_OTHER)
1693 			qc->err_mask &= ~AC_ERR_OTHER;
1694 	}
1695 
1696 	/* finish up */
1697 	spin_lock_irqsave(ap->lock, flags);
1698 
1699 	*tf = qc->result_tf;
1700 	err_mask = qc->err_mask;
1701 
1702 	ata_qc_free(qc);
1703 	link->active_tag = preempted_tag;
1704 	link->sactive = preempted_sactive;
1705 	ap->qc_active = preempted_qc_active;
1706 	ap->nr_active_links = preempted_nr_active_links;
1707 
1708 	spin_unlock_irqrestore(ap->lock, flags);
1709 
1710 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1711 		ata_internal_cmd_timed_out(dev, command);
1712 
1713 	return err_mask;
1714 }
1715 
1716 /**
1717  *	ata_exec_internal - execute libata internal command
1718  *	@dev: Device to which the command is sent
1719  *	@tf: Taskfile registers for the command and the result
1720  *	@cdb: CDB for packet command
1721  *	@dma_dir: Data transfer direction of the command
1722  *	@buf: Data buffer of the command
1723  *	@buflen: Length of data buffer
1724  *	@timeout: Timeout in msecs (0 for default)
1725  *
1726  *	Wrapper around ata_exec_internal_sg() which takes simple
1727  *	buffer instead of sg list.
1728  *
1729  *	LOCKING:
1730  *	None.  Should be called with kernel context, might sleep.
1731  *
1732  *	RETURNS:
1733  *	Zero on success, AC_ERR_* mask on failure
1734  */
ata_exec_internal(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,int dma_dir,void * buf,unsigned int buflen,unsigned long timeout)1735 unsigned ata_exec_internal(struct ata_device *dev,
1736 			   struct ata_taskfile *tf, const u8 *cdb,
1737 			   int dma_dir, void *buf, unsigned int buflen,
1738 			   unsigned long timeout)
1739 {
1740 	struct scatterlist *psg = NULL, sg;
1741 	unsigned int n_elem = 0;
1742 
1743 	if (dma_dir != DMA_NONE) {
1744 		WARN_ON(!buf);
1745 		sg_init_one(&sg, buf, buflen);
1746 		psg = &sg;
1747 		n_elem++;
1748 	}
1749 
1750 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1751 				    timeout);
1752 }
1753 
1754 /**
1755  *	ata_pio_need_iordy	-	check if iordy needed
1756  *	@adev: ATA device
1757  *
1758  *	Check if the current speed of the device requires IORDY. Used
1759  *	by various controllers for chip configuration.
1760  */
ata_pio_need_iordy(const struct ata_device * adev)1761 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1762 {
1763 	/* Don't set IORDY if we're preparing for reset.  IORDY may
1764 	 * lead to controller lock up on certain controllers if the
1765 	 * port is not occupied.  See bko#11703 for details.
1766 	 */
1767 	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1768 		return 0;
1769 	/* Controller doesn't support IORDY.  Probably a pointless
1770 	 * check as the caller should know this.
1771 	 */
1772 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1773 		return 0;
1774 	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1775 	if (ata_id_is_cfa(adev->id)
1776 	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1777 		return 0;
1778 	/* PIO3 and higher it is mandatory */
1779 	if (adev->pio_mode > XFER_PIO_2)
1780 		return 1;
1781 	/* We turn it on when possible */
1782 	if (ata_id_has_iordy(adev->id))
1783 		return 1;
1784 	return 0;
1785 }
1786 
1787 /**
1788  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1789  *	@adev: ATA device
1790  *
1791  *	Compute the highest mode possible if we are not using iordy. Return
1792  *	-1 if no iordy mode is available.
1793  */
ata_pio_mask_no_iordy(const struct ata_device * adev)1794 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1795 {
1796 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1797 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1798 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1799 		/* Is the speed faster than the drive allows non IORDY ? */
1800 		if (pio) {
1801 			/* This is cycle times not frequency - watch the logic! */
1802 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1803 				return 3 << ATA_SHIFT_PIO;
1804 			return 7 << ATA_SHIFT_PIO;
1805 		}
1806 	}
1807 	return 3 << ATA_SHIFT_PIO;
1808 }
1809 
1810 /**
1811  *	ata_do_dev_read_id		-	default ID read method
1812  *	@dev: device
1813  *	@tf: proposed taskfile
1814  *	@id: data buffer
1815  *
1816  *	Issue the identify taskfile and hand back the buffer containing
1817  *	identify data. For some RAID controllers and for pre ATA devices
1818  *	this function is wrapped or replaced by the driver
1819  */
ata_do_dev_read_id(struct ata_device * dev,struct ata_taskfile * tf,u16 * id)1820 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1821 					struct ata_taskfile *tf, u16 *id)
1822 {
1823 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1824 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1825 }
1826 
1827 /**
1828  *	ata_dev_read_id - Read ID data from the specified device
1829  *	@dev: target device
1830  *	@p_class: pointer to class of the target device (may be changed)
1831  *	@flags: ATA_READID_* flags
1832  *	@id: buffer to read IDENTIFY data into
1833  *
1834  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1835  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1836  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1837  *	for pre-ATA4 drives.
1838  *
1839  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1840  *	now we abort if we hit that case.
1841  *
1842  *	LOCKING:
1843  *	Kernel thread context (may sleep)
1844  *
1845  *	RETURNS:
1846  *	0 on success, -errno otherwise.
1847  */
ata_dev_read_id(struct ata_device * dev,unsigned int * p_class,unsigned int flags,u16 * id)1848 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1849 		    unsigned int flags, u16 *id)
1850 {
1851 	struct ata_port *ap = dev->link->ap;
1852 	unsigned int class = *p_class;
1853 	struct ata_taskfile tf;
1854 	unsigned int err_mask = 0;
1855 	const char *reason;
1856 	bool is_semb = class == ATA_DEV_SEMB;
1857 	int may_fallback = 1, tried_spinup = 0;
1858 	int rc;
1859 
1860 	if (ata_msg_ctl(ap))
1861 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1862 
1863 retry:
1864 	ata_tf_init(dev, &tf);
1865 
1866 	switch (class) {
1867 	case ATA_DEV_SEMB:
1868 		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1869 	case ATA_DEV_ATA:
1870 	case ATA_DEV_ZAC:
1871 		tf.command = ATA_CMD_ID_ATA;
1872 		break;
1873 	case ATA_DEV_ATAPI:
1874 		tf.command = ATA_CMD_ID_ATAPI;
1875 		break;
1876 	default:
1877 		rc = -ENODEV;
1878 		reason = "unsupported class";
1879 		goto err_out;
1880 	}
1881 
1882 	tf.protocol = ATA_PROT_PIO;
1883 
1884 	/* Some devices choke if TF registers contain garbage.  Make
1885 	 * sure those are properly initialized.
1886 	 */
1887 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1888 
1889 	/* Device presence detection is unreliable on some
1890 	 * controllers.  Always poll IDENTIFY if available.
1891 	 */
1892 	tf.flags |= ATA_TFLAG_POLLING;
1893 
1894 	if (ap->ops->read_id)
1895 		err_mask = ap->ops->read_id(dev, &tf, id);
1896 	else
1897 		err_mask = ata_do_dev_read_id(dev, &tf, id);
1898 
1899 	if (err_mask) {
1900 		if (err_mask & AC_ERR_NODEV_HINT) {
1901 			ata_dev_dbg(dev, "NODEV after polling detection\n");
1902 			return -ENOENT;
1903 		}
1904 
1905 		if (is_semb) {
1906 			ata_dev_info(dev,
1907 		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1908 			/* SEMB is not supported yet */
1909 			*p_class = ATA_DEV_SEMB_UNSUP;
1910 			return 0;
1911 		}
1912 
1913 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1914 			/* Device or controller might have reported
1915 			 * the wrong device class.  Give a shot at the
1916 			 * other IDENTIFY if the current one is
1917 			 * aborted by the device.
1918 			 */
1919 			if (may_fallback) {
1920 				may_fallback = 0;
1921 
1922 				if (class == ATA_DEV_ATA)
1923 					class = ATA_DEV_ATAPI;
1924 				else
1925 					class = ATA_DEV_ATA;
1926 				goto retry;
1927 			}
1928 
1929 			/* Control reaches here iff the device aborted
1930 			 * both flavors of IDENTIFYs which happens
1931 			 * sometimes with phantom devices.
1932 			 */
1933 			ata_dev_dbg(dev,
1934 				    "both IDENTIFYs aborted, assuming NODEV\n");
1935 			return -ENOENT;
1936 		}
1937 
1938 		rc = -EIO;
1939 		reason = "I/O error";
1940 		goto err_out;
1941 	}
1942 
1943 	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1944 		ata_dev_dbg(dev, "dumping IDENTIFY data, "
1945 			    "class=%d may_fallback=%d tried_spinup=%d\n",
1946 			    class, may_fallback, tried_spinup);
1947 		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1948 			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1949 	}
1950 
1951 	/* Falling back doesn't make sense if ID data was read
1952 	 * successfully at least once.
1953 	 */
1954 	may_fallback = 0;
1955 
1956 	swap_buf_le16(id, ATA_ID_WORDS);
1957 
1958 	/* sanity check */
1959 	rc = -EINVAL;
1960 	reason = "device reports invalid type";
1961 
1962 	if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1963 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1964 			goto err_out;
1965 		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1966 							ata_id_is_ata(id)) {
1967 			ata_dev_dbg(dev,
1968 				"host indicates ignore ATA devices, ignored\n");
1969 			return -ENOENT;
1970 		}
1971 	} else {
1972 		if (ata_id_is_ata(id))
1973 			goto err_out;
1974 	}
1975 
1976 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1977 		tried_spinup = 1;
1978 		/*
1979 		 * Drive powered-up in standby mode, and requires a specific
1980 		 * SET_FEATURES spin-up subcommand before it will accept
1981 		 * anything other than the original IDENTIFY command.
1982 		 */
1983 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1984 		if (err_mask && id[2] != 0x738c) {
1985 			rc = -EIO;
1986 			reason = "SPINUP failed";
1987 			goto err_out;
1988 		}
1989 		/*
1990 		 * If the drive initially returned incomplete IDENTIFY info,
1991 		 * we now must reissue the IDENTIFY command.
1992 		 */
1993 		if (id[2] == 0x37c8)
1994 			goto retry;
1995 	}
1996 
1997 	if ((flags & ATA_READID_POSTRESET) &&
1998 	    (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
1999 		/*
2000 		 * The exact sequence expected by certain pre-ATA4 drives is:
2001 		 * SRST RESET
2002 		 * IDENTIFY (optional in early ATA)
2003 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2004 		 * anything else..
2005 		 * Some drives were very specific about that exact sequence.
2006 		 *
2007 		 * Note that ATA4 says lba is mandatory so the second check
2008 		 * should never trigger.
2009 		 */
2010 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2011 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2012 			if (err_mask) {
2013 				rc = -EIO;
2014 				reason = "INIT_DEV_PARAMS failed";
2015 				goto err_out;
2016 			}
2017 
2018 			/* current CHS translation info (id[53-58]) might be
2019 			 * changed. reread the identify device info.
2020 			 */
2021 			flags &= ~ATA_READID_POSTRESET;
2022 			goto retry;
2023 		}
2024 	}
2025 
2026 	*p_class = class;
2027 
2028 	return 0;
2029 
2030  err_out:
2031 	if (ata_msg_warn(ap))
2032 		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2033 			     reason, err_mask);
2034 	return rc;
2035 }
2036 
ata_do_link_spd_horkage(struct ata_device * dev)2037 static int ata_do_link_spd_horkage(struct ata_device *dev)
2038 {
2039 	struct ata_link *plink = ata_dev_phys_link(dev);
2040 	u32 target, target_limit;
2041 
2042 	if (!sata_scr_valid(plink))
2043 		return 0;
2044 
2045 	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2046 		target = 1;
2047 	else
2048 		return 0;
2049 
2050 	target_limit = (1 << target) - 1;
2051 
2052 	/* if already on stricter limit, no need to push further */
2053 	if (plink->sata_spd_limit <= target_limit)
2054 		return 0;
2055 
2056 	plink->sata_spd_limit = target_limit;
2057 
2058 	/* Request another EH round by returning -EAGAIN if link is
2059 	 * going faster than the target speed.  Forward progress is
2060 	 * guaranteed by setting sata_spd_limit to target_limit above.
2061 	 */
2062 	if (plink->sata_spd > target) {
2063 		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2064 			     sata_spd_string(target));
2065 		return -EAGAIN;
2066 	}
2067 	return 0;
2068 }
2069 
ata_dev_knobble(struct ata_device * dev)2070 static inline u8 ata_dev_knobble(struct ata_device *dev)
2071 {
2072 	struct ata_port *ap = dev->link->ap;
2073 
2074 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2075 		return 0;
2076 
2077 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2078 }
2079 
ata_dev_check_adapter(struct ata_device * dev,unsigned short vendor_id)2080 static bool ata_dev_check_adapter(struct ata_device *dev,
2081 				  unsigned short vendor_id)
2082 {
2083 	struct pci_dev *pcidev = NULL;
2084 	struct device *parent_dev = NULL;
2085 
2086 	for (parent_dev = dev->tdev.parent; parent_dev != NULL;
2087 	     parent_dev = parent_dev->parent) {
2088 		if (dev_is_pci(parent_dev)) {
2089 			pcidev = to_pci_dev(parent_dev);
2090 			if (pcidev->vendor == vendor_id)
2091 				return true;
2092 			break;
2093 		}
2094 	}
2095 
2096 	return false;
2097 }
2098 
ata_dev_config_ncq(struct ata_device * dev,char * desc,size_t desc_sz)2099 static int ata_dev_config_ncq(struct ata_device *dev,
2100 			       char *desc, size_t desc_sz)
2101 {
2102 	struct ata_port *ap = dev->link->ap;
2103 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2104 	unsigned int err_mask;
2105 	char *aa_desc = "";
2106 
2107 	if (!ata_id_has_ncq(dev->id)) {
2108 		desc[0] = '\0';
2109 		return 0;
2110 	}
2111 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2112 		snprintf(desc, desc_sz, "NCQ (not used)");
2113 		return 0;
2114 	}
2115 
2116 	if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
2117 	    ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
2118 		snprintf(desc, desc_sz, "NCQ (not used)");
2119 		return 0;
2120 	}
2121 
2122 	if (ap->flags & ATA_FLAG_NCQ) {
2123 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2124 		dev->flags |= ATA_DFLAG_NCQ;
2125 	}
2126 
2127 	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2128 		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2129 		ata_id_has_fpdma_aa(dev->id)) {
2130 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2131 			SATA_FPDMA_AA);
2132 		if (err_mask) {
2133 			ata_dev_err(dev,
2134 				    "failed to enable AA (error_mask=0x%x)\n",
2135 				    err_mask);
2136 			if (err_mask != AC_ERR_DEV) {
2137 				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2138 				return -EIO;
2139 			}
2140 		} else
2141 			aa_desc = ", AA";
2142 	}
2143 
2144 	if (hdepth >= ddepth)
2145 		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2146 	else
2147 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2148 			ddepth, aa_desc);
2149 
2150 	if ((ap->flags & ATA_FLAG_FPDMA_AUX) &&
2151 	    ata_id_has_ncq_send_and_recv(dev->id)) {
2152 		err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2153 					     0, ap->sector_buf, 1);
2154 		if (err_mask) {
2155 			ata_dev_dbg(dev,
2156 				    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2157 				    err_mask);
2158 		} else {
2159 			u8 *cmds = dev->ncq_send_recv_cmds;
2160 
2161 			dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2162 			memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2163 
2164 			if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2165 				ata_dev_dbg(dev, "disabling queued TRIM support\n");
2166 				cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2167 					~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2168 			}
2169 		}
2170 	}
2171 
2172 	return 0;
2173 }
2174 
2175 /**
2176  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2177  *	@dev: Target device to configure
2178  *
2179  *	Configure @dev according to @dev->id.  Generic and low-level
2180  *	driver specific fixups are also applied.
2181  *
2182  *	LOCKING:
2183  *	Kernel thread context (may sleep)
2184  *
2185  *	RETURNS:
2186  *	0 on success, -errno otherwise
2187  */
ata_dev_configure(struct ata_device * dev)2188 int ata_dev_configure(struct ata_device *dev)
2189 {
2190 	struct ata_port *ap = dev->link->ap;
2191 	struct ata_eh_context *ehc = &dev->link->eh_context;
2192 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2193 	const u16 *id = dev->id;
2194 	unsigned long xfer_mask;
2195 	unsigned int err_mask;
2196 	char revbuf[7];		/* XYZ-99\0 */
2197 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2198 	char modelbuf[ATA_ID_PROD_LEN+1];
2199 	int rc;
2200 
2201 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2202 		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2203 		return 0;
2204 	}
2205 
2206 	if (ata_msg_probe(ap))
2207 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2208 
2209 	/* set horkage */
2210 	dev->horkage |= ata_dev_blacklisted(dev);
2211 	ata_force_horkage(dev);
2212 
2213 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2214 		ata_dev_info(dev, "unsupported device, disabling\n");
2215 		ata_dev_disable(dev);
2216 		return 0;
2217 	}
2218 
2219 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2220 	    dev->class == ATA_DEV_ATAPI) {
2221 		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2222 			     atapi_enabled ? "not supported with this driver"
2223 			     : "disabled");
2224 		ata_dev_disable(dev);
2225 		return 0;
2226 	}
2227 
2228 	rc = ata_do_link_spd_horkage(dev);
2229 	if (rc)
2230 		return rc;
2231 
2232 	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2233 	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2234 	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2235 		dev->horkage |= ATA_HORKAGE_NOLPM;
2236 
2237 	if (ap->flags & ATA_FLAG_NO_LPM)
2238 		dev->horkage |= ATA_HORKAGE_NOLPM;
2239 
2240 	if (dev->horkage & ATA_HORKAGE_NOLPM) {
2241 		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2242 		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2243 	}
2244 
2245 	/* let ACPI work its magic */
2246 	rc = ata_acpi_on_devcfg(dev);
2247 	if (rc)
2248 		return rc;
2249 
2250 	/* massage HPA, do it early as it might change IDENTIFY data */
2251 	rc = ata_hpa_resize(dev);
2252 	if (rc)
2253 		return rc;
2254 
2255 	/* print device capabilities */
2256 	if (ata_msg_probe(ap))
2257 		ata_dev_dbg(dev,
2258 			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2259 			    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2260 			    __func__,
2261 			    id[49], id[82], id[83], id[84],
2262 			    id[85], id[86], id[87], id[88]);
2263 
2264 	/* initialize to-be-configured parameters */
2265 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2266 	dev->max_sectors = 0;
2267 	dev->cdb_len = 0;
2268 	dev->n_sectors = 0;
2269 	dev->cylinders = 0;
2270 	dev->heads = 0;
2271 	dev->sectors = 0;
2272 	dev->multi_count = 0;
2273 
2274 	/*
2275 	 * common ATA, ATAPI feature tests
2276 	 */
2277 
2278 	/* find max transfer mode; for printk only */
2279 	xfer_mask = ata_id_xfermask(id);
2280 
2281 	if (ata_msg_probe(ap))
2282 		ata_dump_id(id);
2283 
2284 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2285 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2286 			sizeof(fwrevbuf));
2287 
2288 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2289 			sizeof(modelbuf));
2290 
2291 	/* ATA-specific feature tests */
2292 	if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2293 		if (ata_id_is_cfa(id)) {
2294 			/* CPRM may make this media unusable */
2295 			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2296 				ata_dev_warn(dev,
2297 	"supports DRM functions and may not be fully accessible\n");
2298 			snprintf(revbuf, 7, "CFA");
2299 		} else {
2300 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2301 			/* Warn the user if the device has TPM extensions */
2302 			if (ata_id_has_tpm(id))
2303 				ata_dev_warn(dev,
2304 	"supports DRM functions and may not be fully accessible\n");
2305 		}
2306 
2307 		dev->n_sectors = ata_id_n_sectors(id);
2308 
2309 		/* get current R/W Multiple count setting */
2310 		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2311 			unsigned int max = dev->id[47] & 0xff;
2312 			unsigned int cnt = dev->id[59] & 0xff;
2313 			/* only recognize/allow powers of two here */
2314 			if (is_power_of_2(max) && is_power_of_2(cnt))
2315 				if (cnt <= max)
2316 					dev->multi_count = cnt;
2317 		}
2318 
2319 		if (ata_id_has_lba(id)) {
2320 			const char *lba_desc;
2321 			char ncq_desc[24];
2322 
2323 			lba_desc = "LBA";
2324 			dev->flags |= ATA_DFLAG_LBA;
2325 			if (ata_id_has_lba48(id)) {
2326 				dev->flags |= ATA_DFLAG_LBA48;
2327 				lba_desc = "LBA48";
2328 
2329 				if (dev->n_sectors >= (1UL << 28) &&
2330 				    ata_id_has_flush_ext(id))
2331 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2332 			}
2333 
2334 			/* config NCQ */
2335 			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2336 			if (rc)
2337 				return rc;
2338 
2339 			/* print device info to dmesg */
2340 			if (ata_msg_drv(ap) && print_info) {
2341 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2342 					     revbuf, modelbuf, fwrevbuf,
2343 					     ata_mode_string(xfer_mask));
2344 				ata_dev_info(dev,
2345 					     "%llu sectors, multi %u: %s %s\n",
2346 					(unsigned long long)dev->n_sectors,
2347 					dev->multi_count, lba_desc, ncq_desc);
2348 			}
2349 		} else {
2350 			/* CHS */
2351 
2352 			/* Default translation */
2353 			dev->cylinders	= id[1];
2354 			dev->heads	= id[3];
2355 			dev->sectors	= id[6];
2356 
2357 			if (ata_id_current_chs_valid(id)) {
2358 				/* Current CHS translation is valid. */
2359 				dev->cylinders = id[54];
2360 				dev->heads     = id[55];
2361 				dev->sectors   = id[56];
2362 			}
2363 
2364 			/* print device info to dmesg */
2365 			if (ata_msg_drv(ap) && print_info) {
2366 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2367 					     revbuf,	modelbuf, fwrevbuf,
2368 					     ata_mode_string(xfer_mask));
2369 				ata_dev_info(dev,
2370 					     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2371 					     (unsigned long long)dev->n_sectors,
2372 					     dev->multi_count, dev->cylinders,
2373 					     dev->heads, dev->sectors);
2374 			}
2375 		}
2376 
2377 		/* Check and mark DevSlp capability. Get DevSlp timing variables
2378 		 * from SATA Settings page of Identify Device Data Log.
2379 		 */
2380 		if (ata_id_has_devslp(dev->id)) {
2381 			u8 *sata_setting = ap->sector_buf;
2382 			int i, j;
2383 
2384 			dev->flags |= ATA_DFLAG_DEVSLP;
2385 			err_mask = ata_read_log_page(dev,
2386 						     ATA_LOG_SATA_ID_DEV_DATA,
2387 						     ATA_LOG_SATA_SETTINGS,
2388 						     sata_setting,
2389 						     1);
2390 			if (err_mask)
2391 				ata_dev_dbg(dev,
2392 					    "failed to get Identify Device Data, Emask 0x%x\n",
2393 					    err_mask);
2394 			else
2395 				for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2396 					j = ATA_LOG_DEVSLP_OFFSET + i;
2397 					dev->devslp_timing[i] = sata_setting[j];
2398 				}
2399 		}
2400 
2401 		dev->cdb_len = 16;
2402 	}
2403 
2404 	/* ATAPI-specific feature tests */
2405 	else if (dev->class == ATA_DEV_ATAPI) {
2406 		const char *cdb_intr_string = "";
2407 		const char *atapi_an_string = "";
2408 		const char *dma_dir_string = "";
2409 		u32 sntf;
2410 
2411 		rc = atapi_cdb_len(id);
2412 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2413 			if (ata_msg_warn(ap))
2414 				ata_dev_warn(dev, "unsupported CDB len\n");
2415 			rc = -EINVAL;
2416 			goto err_out_nosup;
2417 		}
2418 		dev->cdb_len = (unsigned int) rc;
2419 
2420 		/* Enable ATAPI AN if both the host and device have
2421 		 * the support.  If PMP is attached, SNTF is required
2422 		 * to enable ATAPI AN to discern between PHY status
2423 		 * changed notifications and ATAPI ANs.
2424 		 */
2425 		if (atapi_an &&
2426 		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2427 		    (!sata_pmp_attached(ap) ||
2428 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2429 			/* issue SET feature command to turn this on */
2430 			err_mask = ata_dev_set_feature(dev,
2431 					SETFEATURES_SATA_ENABLE, SATA_AN);
2432 			if (err_mask)
2433 				ata_dev_err(dev,
2434 					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
2435 					    err_mask);
2436 			else {
2437 				dev->flags |= ATA_DFLAG_AN;
2438 				atapi_an_string = ", ATAPI AN";
2439 			}
2440 		}
2441 
2442 		if (ata_id_cdb_intr(dev->id)) {
2443 			dev->flags |= ATA_DFLAG_CDB_INTR;
2444 			cdb_intr_string = ", CDB intr";
2445 		}
2446 
2447 		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2448 			dev->flags |= ATA_DFLAG_DMADIR;
2449 			dma_dir_string = ", DMADIR";
2450 		}
2451 
2452 		if (ata_id_has_da(dev->id)) {
2453 			dev->flags |= ATA_DFLAG_DA;
2454 			zpodd_init(dev);
2455 		}
2456 
2457 		/* print device info to dmesg */
2458 		if (ata_msg_drv(ap) && print_info)
2459 			ata_dev_info(dev,
2460 				     "ATAPI: %s, %s, max %s%s%s%s\n",
2461 				     modelbuf, fwrevbuf,
2462 				     ata_mode_string(xfer_mask),
2463 				     cdb_intr_string, atapi_an_string,
2464 				     dma_dir_string);
2465 	}
2466 
2467 	/* determine max_sectors */
2468 	dev->max_sectors = ATA_MAX_SECTORS;
2469 	if (dev->flags & ATA_DFLAG_LBA48)
2470 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2471 
2472 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2473 	   200 sectors */
2474 	if (ata_dev_knobble(dev)) {
2475 		if (ata_msg_drv(ap) && print_info)
2476 			ata_dev_info(dev, "applying bridge limits\n");
2477 		dev->udma_mask &= ATA_UDMA5;
2478 		dev->max_sectors = ATA_MAX_SECTORS;
2479 	}
2480 
2481 	if ((dev->class == ATA_DEV_ATAPI) &&
2482 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2483 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2484 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2485 	}
2486 
2487 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2488 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2489 					 dev->max_sectors);
2490 
2491 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2492 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2493 					 dev->max_sectors);
2494 
2495 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2496 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2497 
2498 	if (ap->ops->dev_config)
2499 		ap->ops->dev_config(dev);
2500 
2501 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2502 		/* Let the user know. We don't want to disallow opens for
2503 		   rescue purposes, or in case the vendor is just a blithering
2504 		   idiot. Do this after the dev_config call as some controllers
2505 		   with buggy firmware may want to avoid reporting false device
2506 		   bugs */
2507 
2508 		if (print_info) {
2509 			ata_dev_warn(dev,
2510 "Drive reports diagnostics failure. This may indicate a drive\n");
2511 			ata_dev_warn(dev,
2512 "fault or invalid emulation. Contact drive vendor for information.\n");
2513 		}
2514 	}
2515 
2516 	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2517 		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2518 		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
2519 	}
2520 
2521 	return 0;
2522 
2523 err_out_nosup:
2524 	if (ata_msg_probe(ap))
2525 		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2526 	return rc;
2527 }
2528 
2529 /**
2530  *	ata_cable_40wire	-	return 40 wire cable type
2531  *	@ap: port
2532  *
2533  *	Helper method for drivers which want to hardwire 40 wire cable
2534  *	detection.
2535  */
2536 
ata_cable_40wire(struct ata_port * ap)2537 int ata_cable_40wire(struct ata_port *ap)
2538 {
2539 	return ATA_CBL_PATA40;
2540 }
2541 
2542 /**
2543  *	ata_cable_80wire	-	return 80 wire cable type
2544  *	@ap: port
2545  *
2546  *	Helper method for drivers which want to hardwire 80 wire cable
2547  *	detection.
2548  */
2549 
ata_cable_80wire(struct ata_port * ap)2550 int ata_cable_80wire(struct ata_port *ap)
2551 {
2552 	return ATA_CBL_PATA80;
2553 }
2554 
2555 /**
2556  *	ata_cable_unknown	-	return unknown PATA cable.
2557  *	@ap: port
2558  *
2559  *	Helper method for drivers which have no PATA cable detection.
2560  */
2561 
ata_cable_unknown(struct ata_port * ap)2562 int ata_cable_unknown(struct ata_port *ap)
2563 {
2564 	return ATA_CBL_PATA_UNK;
2565 }
2566 
2567 /**
2568  *	ata_cable_ignore	-	return ignored PATA cable.
2569  *	@ap: port
2570  *
2571  *	Helper method for drivers which don't use cable type to limit
2572  *	transfer mode.
2573  */
ata_cable_ignore(struct ata_port * ap)2574 int ata_cable_ignore(struct ata_port *ap)
2575 {
2576 	return ATA_CBL_PATA_IGN;
2577 }
2578 
2579 /**
2580  *	ata_cable_sata	-	return SATA cable type
2581  *	@ap: port
2582  *
2583  *	Helper method for drivers which have SATA cables
2584  */
2585 
ata_cable_sata(struct ata_port * ap)2586 int ata_cable_sata(struct ata_port *ap)
2587 {
2588 	return ATA_CBL_SATA;
2589 }
2590 
2591 /**
2592  *	ata_bus_probe - Reset and probe ATA bus
2593  *	@ap: Bus to probe
2594  *
2595  *	Master ATA bus probing function.  Initiates a hardware-dependent
2596  *	bus reset, then attempts to identify any devices found on
2597  *	the bus.
2598  *
2599  *	LOCKING:
2600  *	PCI/etc. bus probe sem.
2601  *
2602  *	RETURNS:
2603  *	Zero on success, negative errno otherwise.
2604  */
2605 
ata_bus_probe(struct ata_port * ap)2606 int ata_bus_probe(struct ata_port *ap)
2607 {
2608 	unsigned int classes[ATA_MAX_DEVICES];
2609 	int tries[ATA_MAX_DEVICES];
2610 	int rc;
2611 	struct ata_device *dev;
2612 
2613 	ata_for_each_dev(dev, &ap->link, ALL)
2614 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2615 
2616  retry:
2617 	ata_for_each_dev(dev, &ap->link, ALL) {
2618 		/* If we issue an SRST then an ATA drive (not ATAPI)
2619 		 * may change configuration and be in PIO0 timing. If
2620 		 * we do a hard reset (or are coming from power on)
2621 		 * this is true for ATA or ATAPI. Until we've set a
2622 		 * suitable controller mode we should not touch the
2623 		 * bus as we may be talking too fast.
2624 		 */
2625 		dev->pio_mode = XFER_PIO_0;
2626 		dev->dma_mode = 0xff;
2627 
2628 		/* If the controller has a pio mode setup function
2629 		 * then use it to set the chipset to rights. Don't
2630 		 * touch the DMA setup as that will be dealt with when
2631 		 * configuring devices.
2632 		 */
2633 		if (ap->ops->set_piomode)
2634 			ap->ops->set_piomode(ap, dev);
2635 	}
2636 
2637 	/* reset and determine device classes */
2638 	ap->ops->phy_reset(ap);
2639 
2640 	ata_for_each_dev(dev, &ap->link, ALL) {
2641 		if (dev->class != ATA_DEV_UNKNOWN)
2642 			classes[dev->devno] = dev->class;
2643 		else
2644 			classes[dev->devno] = ATA_DEV_NONE;
2645 
2646 		dev->class = ATA_DEV_UNKNOWN;
2647 	}
2648 
2649 	/* read IDENTIFY page and configure devices. We have to do the identify
2650 	   specific sequence bass-ackwards so that PDIAG- is released by
2651 	   the slave device */
2652 
2653 	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2654 		if (tries[dev->devno])
2655 			dev->class = classes[dev->devno];
2656 
2657 		if (!ata_dev_enabled(dev))
2658 			continue;
2659 
2660 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2661 				     dev->id);
2662 		if (rc)
2663 			goto fail;
2664 	}
2665 
2666 	/* Now ask for the cable type as PDIAG- should have been released */
2667 	if (ap->ops->cable_detect)
2668 		ap->cbl = ap->ops->cable_detect(ap);
2669 
2670 	/* We may have SATA bridge glue hiding here irrespective of
2671 	 * the reported cable types and sensed types.  When SATA
2672 	 * drives indicate we have a bridge, we don't know which end
2673 	 * of the link the bridge is which is a problem.
2674 	 */
2675 	ata_for_each_dev(dev, &ap->link, ENABLED)
2676 		if (ata_id_is_sata(dev->id))
2677 			ap->cbl = ATA_CBL_SATA;
2678 
2679 	/* After the identify sequence we can now set up the devices. We do
2680 	   this in the normal order so that the user doesn't get confused */
2681 
2682 	ata_for_each_dev(dev, &ap->link, ENABLED) {
2683 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2684 		rc = ata_dev_configure(dev);
2685 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2686 		if (rc)
2687 			goto fail;
2688 	}
2689 
2690 	/* configure transfer mode */
2691 	rc = ata_set_mode(&ap->link, &dev);
2692 	if (rc)
2693 		goto fail;
2694 
2695 	ata_for_each_dev(dev, &ap->link, ENABLED)
2696 		return 0;
2697 
2698 	return -ENODEV;
2699 
2700  fail:
2701 	tries[dev->devno]--;
2702 
2703 	switch (rc) {
2704 	case -EINVAL:
2705 		/* eeek, something went very wrong, give up */
2706 		tries[dev->devno] = 0;
2707 		break;
2708 
2709 	case -ENODEV:
2710 		/* give it just one more chance */
2711 		tries[dev->devno] = min(tries[dev->devno], 1);
2712 	case -EIO:
2713 		if (tries[dev->devno] == 1) {
2714 			/* This is the last chance, better to slow
2715 			 * down than lose it.
2716 			 */
2717 			sata_down_spd_limit(&ap->link, 0);
2718 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2719 		}
2720 	}
2721 
2722 	if (!tries[dev->devno])
2723 		ata_dev_disable(dev);
2724 
2725 	goto retry;
2726 }
2727 
2728 /**
2729  *	sata_print_link_status - Print SATA link status
2730  *	@link: SATA link to printk link status about
2731  *
2732  *	This function prints link speed and status of a SATA link.
2733  *
2734  *	LOCKING:
2735  *	None.
2736  */
sata_print_link_status(struct ata_link * link)2737 static void sata_print_link_status(struct ata_link *link)
2738 {
2739 	u32 sstatus, scontrol, tmp;
2740 
2741 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2742 		return;
2743 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2744 
2745 	if (ata_phys_link_online(link)) {
2746 		tmp = (sstatus >> 4) & 0xf;
2747 		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2748 			      sata_spd_string(tmp), sstatus, scontrol);
2749 	} else {
2750 		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2751 			      sstatus, scontrol);
2752 	}
2753 }
2754 
2755 /**
2756  *	ata_dev_pair		-	return other device on cable
2757  *	@adev: device
2758  *
2759  *	Obtain the other device on the same cable, or if none is
2760  *	present NULL is returned
2761  */
2762 
ata_dev_pair(struct ata_device * adev)2763 struct ata_device *ata_dev_pair(struct ata_device *adev)
2764 {
2765 	struct ata_link *link = adev->link;
2766 	struct ata_device *pair = &link->device[1 - adev->devno];
2767 	if (!ata_dev_enabled(pair))
2768 		return NULL;
2769 	return pair;
2770 }
2771 
2772 /**
2773  *	sata_down_spd_limit - adjust SATA spd limit downward
2774  *	@link: Link to adjust SATA spd limit for
2775  *	@spd_limit: Additional limit
2776  *
2777  *	Adjust SATA spd limit of @link downward.  Note that this
2778  *	function only adjusts the limit.  The change must be applied
2779  *	using sata_set_spd().
2780  *
2781  *	If @spd_limit is non-zero, the speed is limited to equal to or
2782  *	lower than @spd_limit if such speed is supported.  If
2783  *	@spd_limit is slower than any supported speed, only the lowest
2784  *	supported speed is allowed.
2785  *
2786  *	LOCKING:
2787  *	Inherited from caller.
2788  *
2789  *	RETURNS:
2790  *	0 on success, negative errno on failure
2791  */
sata_down_spd_limit(struct ata_link * link,u32 spd_limit)2792 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2793 {
2794 	u32 sstatus, spd, mask;
2795 	int rc, bit;
2796 
2797 	if (!sata_scr_valid(link))
2798 		return -EOPNOTSUPP;
2799 
2800 	/* If SCR can be read, use it to determine the current SPD.
2801 	 * If not, use cached value in link->sata_spd.
2802 	 */
2803 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2804 	if (rc == 0 && ata_sstatus_online(sstatus))
2805 		spd = (sstatus >> 4) & 0xf;
2806 	else
2807 		spd = link->sata_spd;
2808 
2809 	mask = link->sata_spd_limit;
2810 	if (mask <= 1)
2811 		return -EINVAL;
2812 
2813 	/* unconditionally mask off the highest bit */
2814 	bit = fls(mask) - 1;
2815 	mask &= ~(1 << bit);
2816 
2817 	/* Mask off all speeds higher than or equal to the current
2818 	 * one.  Force 1.5Gbps if current SPD is not available.
2819 	 */
2820 	if (spd > 1)
2821 		mask &= (1 << (spd - 1)) - 1;
2822 	else
2823 		mask &= 1;
2824 
2825 	/* were we already at the bottom? */
2826 	if (!mask)
2827 		return -EINVAL;
2828 
2829 	if (spd_limit) {
2830 		if (mask & ((1 << spd_limit) - 1))
2831 			mask &= (1 << spd_limit) - 1;
2832 		else {
2833 			bit = ffs(mask) - 1;
2834 			mask = 1 << bit;
2835 		}
2836 	}
2837 
2838 	link->sata_spd_limit = mask;
2839 
2840 	ata_link_warn(link, "limiting SATA link speed to %s\n",
2841 		      sata_spd_string(fls(mask)));
2842 
2843 	return 0;
2844 }
2845 
__sata_set_spd_needed(struct ata_link * link,u32 * scontrol)2846 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2847 {
2848 	struct ata_link *host_link = &link->ap->link;
2849 	u32 limit, target, spd;
2850 
2851 	limit = link->sata_spd_limit;
2852 
2853 	/* Don't configure downstream link faster than upstream link.
2854 	 * It doesn't speed up anything and some PMPs choke on such
2855 	 * configuration.
2856 	 */
2857 	if (!ata_is_host_link(link) && host_link->sata_spd)
2858 		limit &= (1 << host_link->sata_spd) - 1;
2859 
2860 	if (limit == UINT_MAX)
2861 		target = 0;
2862 	else
2863 		target = fls(limit);
2864 
2865 	spd = (*scontrol >> 4) & 0xf;
2866 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2867 
2868 	return spd != target;
2869 }
2870 
2871 /**
2872  *	sata_set_spd_needed - is SATA spd configuration needed
2873  *	@link: Link in question
2874  *
2875  *	Test whether the spd limit in SControl matches
2876  *	@link->sata_spd_limit.  This function is used to determine
2877  *	whether hardreset is necessary to apply SATA spd
2878  *	configuration.
2879  *
2880  *	LOCKING:
2881  *	Inherited from caller.
2882  *
2883  *	RETURNS:
2884  *	1 if SATA spd configuration is needed, 0 otherwise.
2885  */
sata_set_spd_needed(struct ata_link * link)2886 static int sata_set_spd_needed(struct ata_link *link)
2887 {
2888 	u32 scontrol;
2889 
2890 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2891 		return 1;
2892 
2893 	return __sata_set_spd_needed(link, &scontrol);
2894 }
2895 
2896 /**
2897  *	sata_set_spd - set SATA spd according to spd limit
2898  *	@link: Link to set SATA spd for
2899  *
2900  *	Set SATA spd of @link according to sata_spd_limit.
2901  *
2902  *	LOCKING:
2903  *	Inherited from caller.
2904  *
2905  *	RETURNS:
2906  *	0 if spd doesn't need to be changed, 1 if spd has been
2907  *	changed.  Negative errno if SCR registers are inaccessible.
2908  */
sata_set_spd(struct ata_link * link)2909 int sata_set_spd(struct ata_link *link)
2910 {
2911 	u32 scontrol;
2912 	int rc;
2913 
2914 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2915 		return rc;
2916 
2917 	if (!__sata_set_spd_needed(link, &scontrol))
2918 		return 0;
2919 
2920 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2921 		return rc;
2922 
2923 	return 1;
2924 }
2925 
2926 /*
2927  * This mode timing computation functionality is ported over from
2928  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2929  */
2930 /*
2931  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2932  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2933  * for UDMA6, which is currently supported only by Maxtor drives.
2934  *
2935  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2936  */
2937 
2938 static const struct ata_timing ata_timing[] = {
2939 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
2940 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
2941 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
2942 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
2943 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
2944 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
2945 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
2946 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
2947 
2948 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
2949 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
2950 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
2951 
2952 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
2953 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
2954 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
2955 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
2956 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
2957 
2958 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
2959 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
2960 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
2961 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
2962 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
2963 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
2964 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
2965 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
2966 
2967 	{ 0xFF }
2968 };
2969 
2970 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2971 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2972 
ata_timing_quantize(const struct ata_timing * t,struct ata_timing * q,int T,int UT)2973 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2974 {
2975 	q->setup	= EZ(t->setup      * 1000,  T);
2976 	q->act8b	= EZ(t->act8b      * 1000,  T);
2977 	q->rec8b	= EZ(t->rec8b      * 1000,  T);
2978 	q->cyc8b	= EZ(t->cyc8b      * 1000,  T);
2979 	q->active	= EZ(t->active     * 1000,  T);
2980 	q->recover	= EZ(t->recover    * 1000,  T);
2981 	q->dmack_hold	= EZ(t->dmack_hold * 1000,  T);
2982 	q->cycle	= EZ(t->cycle      * 1000,  T);
2983 	q->udma		= EZ(t->udma       * 1000, UT);
2984 }
2985 
ata_timing_merge(const struct ata_timing * a,const struct ata_timing * b,struct ata_timing * m,unsigned int what)2986 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2987 		      struct ata_timing *m, unsigned int what)
2988 {
2989 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2990 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2991 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2992 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2993 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2994 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2995 	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2996 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2997 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2998 }
2999 
ata_timing_find_mode(u8 xfer_mode)3000 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3001 {
3002 	const struct ata_timing *t = ata_timing;
3003 
3004 	while (xfer_mode > t->mode)
3005 		t++;
3006 
3007 	if (xfer_mode == t->mode)
3008 		return t;
3009 
3010 	WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3011 			__func__, xfer_mode);
3012 
3013 	return NULL;
3014 }
3015 
ata_timing_compute(struct ata_device * adev,unsigned short speed,struct ata_timing * t,int T,int UT)3016 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3017 		       struct ata_timing *t, int T, int UT)
3018 {
3019 	const u16 *id = adev->id;
3020 	const struct ata_timing *s;
3021 	struct ata_timing p;
3022 
3023 	/*
3024 	 * Find the mode.
3025 	 */
3026 
3027 	if (!(s = ata_timing_find_mode(speed)))
3028 		return -EINVAL;
3029 
3030 	memcpy(t, s, sizeof(*s));
3031 
3032 	/*
3033 	 * If the drive is an EIDE drive, it can tell us it needs extended
3034 	 * PIO/MW_DMA cycle timing.
3035 	 */
3036 
3037 	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3038 		memset(&p, 0, sizeof(p));
3039 
3040 		if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3041 			if (speed <= XFER_PIO_2)
3042 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3043 			else if ((speed <= XFER_PIO_4) ||
3044 				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3045 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3046 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3047 			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3048 
3049 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3050 	}
3051 
3052 	/*
3053 	 * Convert the timing to bus clock counts.
3054 	 */
3055 
3056 	ata_timing_quantize(t, t, T, UT);
3057 
3058 	/*
3059 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3060 	 * S.M.A.R.T * and some other commands. We have to ensure that the
3061 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3062 	 */
3063 
3064 	if (speed > XFER_PIO_6) {
3065 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3066 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3067 	}
3068 
3069 	/*
3070 	 * Lengthen active & recovery time so that cycle time is correct.
3071 	 */
3072 
3073 	if (t->act8b + t->rec8b < t->cyc8b) {
3074 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3075 		t->rec8b = t->cyc8b - t->act8b;
3076 	}
3077 
3078 	if (t->active + t->recover < t->cycle) {
3079 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3080 		t->recover = t->cycle - t->active;
3081 	}
3082 
3083 	/* In a few cases quantisation may produce enough errors to
3084 	   leave t->cycle too low for the sum of active and recovery
3085 	   if so we must correct this */
3086 	if (t->active + t->recover > t->cycle)
3087 		t->cycle = t->active + t->recover;
3088 
3089 	return 0;
3090 }
3091 
3092 /**
3093  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3094  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3095  *	@cycle: cycle duration in ns
3096  *
3097  *	Return matching xfer mode for @cycle.  The returned mode is of
3098  *	the transfer type specified by @xfer_shift.  If @cycle is too
3099  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3100  *	than the fastest known mode, the fasted mode is returned.
3101  *
3102  *	LOCKING:
3103  *	None.
3104  *
3105  *	RETURNS:
3106  *	Matching xfer_mode, 0xff if no match found.
3107  */
ata_timing_cycle2mode(unsigned int xfer_shift,int cycle)3108 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3109 {
3110 	u8 base_mode = 0xff, last_mode = 0xff;
3111 	const struct ata_xfer_ent *ent;
3112 	const struct ata_timing *t;
3113 
3114 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3115 		if (ent->shift == xfer_shift)
3116 			base_mode = ent->base;
3117 
3118 	for (t = ata_timing_find_mode(base_mode);
3119 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3120 		unsigned short this_cycle;
3121 
3122 		switch (xfer_shift) {
3123 		case ATA_SHIFT_PIO:
3124 		case ATA_SHIFT_MWDMA:
3125 			this_cycle = t->cycle;
3126 			break;
3127 		case ATA_SHIFT_UDMA:
3128 			this_cycle = t->udma;
3129 			break;
3130 		default:
3131 			return 0xff;
3132 		}
3133 
3134 		if (cycle > this_cycle)
3135 			break;
3136 
3137 		last_mode = t->mode;
3138 	}
3139 
3140 	return last_mode;
3141 }
3142 
3143 /**
3144  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3145  *	@dev: Device to adjust xfer masks
3146  *	@sel: ATA_DNXFER_* selector
3147  *
3148  *	Adjust xfer masks of @dev downward.  Note that this function
3149  *	does not apply the change.  Invoking ata_set_mode() afterwards
3150  *	will apply the limit.
3151  *
3152  *	LOCKING:
3153  *	Inherited from caller.
3154  *
3155  *	RETURNS:
3156  *	0 on success, negative errno on failure
3157  */
ata_down_xfermask_limit(struct ata_device * dev,unsigned int sel)3158 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3159 {
3160 	char buf[32];
3161 	unsigned long orig_mask, xfer_mask;
3162 	unsigned long pio_mask, mwdma_mask, udma_mask;
3163 	int quiet, highbit;
3164 
3165 	quiet = !!(sel & ATA_DNXFER_QUIET);
3166 	sel &= ~ATA_DNXFER_QUIET;
3167 
3168 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3169 						  dev->mwdma_mask,
3170 						  dev->udma_mask);
3171 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3172 
3173 	switch (sel) {
3174 	case ATA_DNXFER_PIO:
3175 		highbit = fls(pio_mask) - 1;
3176 		pio_mask &= ~(1 << highbit);
3177 		break;
3178 
3179 	case ATA_DNXFER_DMA:
3180 		if (udma_mask) {
3181 			highbit = fls(udma_mask) - 1;
3182 			udma_mask &= ~(1 << highbit);
3183 			if (!udma_mask)
3184 				return -ENOENT;
3185 		} else if (mwdma_mask) {
3186 			highbit = fls(mwdma_mask) - 1;
3187 			mwdma_mask &= ~(1 << highbit);
3188 			if (!mwdma_mask)
3189 				return -ENOENT;
3190 		}
3191 		break;
3192 
3193 	case ATA_DNXFER_40C:
3194 		udma_mask &= ATA_UDMA_MASK_40C;
3195 		break;
3196 
3197 	case ATA_DNXFER_FORCE_PIO0:
3198 		pio_mask &= 1;
3199 	case ATA_DNXFER_FORCE_PIO:
3200 		mwdma_mask = 0;
3201 		udma_mask = 0;
3202 		break;
3203 
3204 	default:
3205 		BUG();
3206 	}
3207 
3208 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3209 
3210 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3211 		return -ENOENT;
3212 
3213 	if (!quiet) {
3214 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3215 			snprintf(buf, sizeof(buf), "%s:%s",
3216 				 ata_mode_string(xfer_mask),
3217 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3218 		else
3219 			snprintf(buf, sizeof(buf), "%s",
3220 				 ata_mode_string(xfer_mask));
3221 
3222 		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3223 	}
3224 
3225 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3226 			    &dev->udma_mask);
3227 
3228 	return 0;
3229 }
3230 
ata_dev_set_mode(struct ata_device * dev)3231 static int ata_dev_set_mode(struct ata_device *dev)
3232 {
3233 	struct ata_port *ap = dev->link->ap;
3234 	struct ata_eh_context *ehc = &dev->link->eh_context;
3235 	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3236 	const char *dev_err_whine = "";
3237 	int ign_dev_err = 0;
3238 	unsigned int err_mask = 0;
3239 	int rc;
3240 
3241 	dev->flags &= ~ATA_DFLAG_PIO;
3242 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3243 		dev->flags |= ATA_DFLAG_PIO;
3244 
3245 	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3246 		dev_err_whine = " (SET_XFERMODE skipped)";
3247 	else {
3248 		if (nosetxfer)
3249 			ata_dev_warn(dev,
3250 				     "NOSETXFER but PATA detected - can't "
3251 				     "skip SETXFER, might malfunction\n");
3252 		err_mask = ata_dev_set_xfermode(dev);
3253 	}
3254 
3255 	if (err_mask & ~AC_ERR_DEV)
3256 		goto fail;
3257 
3258 	/* revalidate */
3259 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3260 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3261 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3262 	if (rc)
3263 		return rc;
3264 
3265 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3266 		/* Old CFA may refuse this command, which is just fine */
3267 		if (ata_id_is_cfa(dev->id))
3268 			ign_dev_err = 1;
3269 		/* Catch several broken garbage emulations plus some pre
3270 		   ATA devices */
3271 		if (ata_id_major_version(dev->id) == 0 &&
3272 					dev->pio_mode <= XFER_PIO_2)
3273 			ign_dev_err = 1;
3274 		/* Some very old devices and some bad newer ones fail
3275 		   any kind of SET_XFERMODE request but support PIO0-2
3276 		   timings and no IORDY */
3277 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3278 			ign_dev_err = 1;
3279 	}
3280 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3281 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3282 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3283 	    dev->dma_mode == XFER_MW_DMA_0 &&
3284 	    (dev->id[63] >> 8) & 1)
3285 		ign_dev_err = 1;
3286 
3287 	/* if the device is actually configured correctly, ignore dev err */
3288 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3289 		ign_dev_err = 1;
3290 
3291 	if (err_mask & AC_ERR_DEV) {
3292 		if (!ign_dev_err)
3293 			goto fail;
3294 		else
3295 			dev_err_whine = " (device error ignored)";
3296 	}
3297 
3298 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3299 		dev->xfer_shift, (int)dev->xfer_mode);
3300 
3301 	ata_dev_info(dev, "configured for %s%s\n",
3302 		     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3303 		     dev_err_whine);
3304 
3305 	return 0;
3306 
3307  fail:
3308 	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3309 	return -EIO;
3310 }
3311 
3312 /**
3313  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3314  *	@link: link on which timings will be programmed
3315  *	@r_failed_dev: out parameter for failed device
3316  *
3317  *	Standard implementation of the function used to tune and set
3318  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3319  *	ata_dev_set_mode() fails, pointer to the failing device is
3320  *	returned in @r_failed_dev.
3321  *
3322  *	LOCKING:
3323  *	PCI/etc. bus probe sem.
3324  *
3325  *	RETURNS:
3326  *	0 on success, negative errno otherwise
3327  */
3328 
ata_do_set_mode(struct ata_link * link,struct ata_device ** r_failed_dev)3329 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3330 {
3331 	struct ata_port *ap = link->ap;
3332 	struct ata_device *dev;
3333 	int rc = 0, used_dma = 0, found = 0;
3334 
3335 	/* step 1: calculate xfer_mask */
3336 	ata_for_each_dev(dev, link, ENABLED) {
3337 		unsigned long pio_mask, dma_mask;
3338 		unsigned int mode_mask;
3339 
3340 		mode_mask = ATA_DMA_MASK_ATA;
3341 		if (dev->class == ATA_DEV_ATAPI)
3342 			mode_mask = ATA_DMA_MASK_ATAPI;
3343 		else if (ata_id_is_cfa(dev->id))
3344 			mode_mask = ATA_DMA_MASK_CFA;
3345 
3346 		ata_dev_xfermask(dev);
3347 		ata_force_xfermask(dev);
3348 
3349 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3350 
3351 		if (libata_dma_mask & mode_mask)
3352 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3353 						     dev->udma_mask);
3354 		else
3355 			dma_mask = 0;
3356 
3357 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3358 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3359 
3360 		found = 1;
3361 		if (ata_dma_enabled(dev))
3362 			used_dma = 1;
3363 	}
3364 	if (!found)
3365 		goto out;
3366 
3367 	/* step 2: always set host PIO timings */
3368 	ata_for_each_dev(dev, link, ENABLED) {
3369 		if (dev->pio_mode == 0xff) {
3370 			ata_dev_warn(dev, "no PIO support\n");
3371 			rc = -EINVAL;
3372 			goto out;
3373 		}
3374 
3375 		dev->xfer_mode = dev->pio_mode;
3376 		dev->xfer_shift = ATA_SHIFT_PIO;
3377 		if (ap->ops->set_piomode)
3378 			ap->ops->set_piomode(ap, dev);
3379 	}
3380 
3381 	/* step 3: set host DMA timings */
3382 	ata_for_each_dev(dev, link, ENABLED) {
3383 		if (!ata_dma_enabled(dev))
3384 			continue;
3385 
3386 		dev->xfer_mode = dev->dma_mode;
3387 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3388 		if (ap->ops->set_dmamode)
3389 			ap->ops->set_dmamode(ap, dev);
3390 	}
3391 
3392 	/* step 4: update devices' xfer mode */
3393 	ata_for_each_dev(dev, link, ENABLED) {
3394 		rc = ata_dev_set_mode(dev);
3395 		if (rc)
3396 			goto out;
3397 	}
3398 
3399 	/* Record simplex status. If we selected DMA then the other
3400 	 * host channels are not permitted to do so.
3401 	 */
3402 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3403 		ap->host->simplex_claimed = ap;
3404 
3405  out:
3406 	if (rc)
3407 		*r_failed_dev = dev;
3408 	return rc;
3409 }
3410 
3411 /**
3412  *	ata_wait_ready - wait for link to become ready
3413  *	@link: link to be waited on
3414  *	@deadline: deadline jiffies for the operation
3415  *	@check_ready: callback to check link readiness
3416  *
3417  *	Wait for @link to become ready.  @check_ready should return
3418  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3419  *	link doesn't seem to be occupied, other errno for other error
3420  *	conditions.
3421  *
3422  *	Transient -ENODEV conditions are allowed for
3423  *	ATA_TMOUT_FF_WAIT.
3424  *
3425  *	LOCKING:
3426  *	EH context.
3427  *
3428  *	RETURNS:
3429  *	0 if @linke is ready before @deadline; otherwise, -errno.
3430  */
ata_wait_ready(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3431 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3432 		   int (*check_ready)(struct ata_link *link))
3433 {
3434 	unsigned long start = jiffies;
3435 	unsigned long nodev_deadline;
3436 	int warned = 0;
3437 
3438 	/* choose which 0xff timeout to use, read comment in libata.h */
3439 	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3440 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3441 	else
3442 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3443 
3444 	/* Slave readiness can't be tested separately from master.  On
3445 	 * M/S emulation configuration, this function should be called
3446 	 * only on the master and it will handle both master and slave.
3447 	 */
3448 	WARN_ON(link == link->ap->slave_link);
3449 
3450 	if (time_after(nodev_deadline, deadline))
3451 		nodev_deadline = deadline;
3452 
3453 	while (1) {
3454 		unsigned long now = jiffies;
3455 		int ready, tmp;
3456 
3457 		ready = tmp = check_ready(link);
3458 		if (ready > 0)
3459 			return 0;
3460 
3461 		/*
3462 		 * -ENODEV could be transient.  Ignore -ENODEV if link
3463 		 * is online.  Also, some SATA devices take a long
3464 		 * time to clear 0xff after reset.  Wait for
3465 		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3466 		 * offline.
3467 		 *
3468 		 * Note that some PATA controllers (pata_ali) explode
3469 		 * if status register is read more than once when
3470 		 * there's no device attached.
3471 		 */
3472 		if (ready == -ENODEV) {
3473 			if (ata_link_online(link))
3474 				ready = 0;
3475 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3476 				 !ata_link_offline(link) &&
3477 				 time_before(now, nodev_deadline))
3478 				ready = 0;
3479 		}
3480 
3481 		if (ready)
3482 			return ready;
3483 		if (time_after(now, deadline))
3484 			return -EBUSY;
3485 
3486 		if (!warned && time_after(now, start + 5 * HZ) &&
3487 		    (deadline - now > 3 * HZ)) {
3488 			ata_link_warn(link,
3489 				"link is slow to respond, please be patient "
3490 				"(ready=%d)\n", tmp);
3491 			warned = 1;
3492 		}
3493 
3494 		ata_msleep(link->ap, 50);
3495 	}
3496 }
3497 
3498 /**
3499  *	ata_wait_after_reset - wait for link to become ready after reset
3500  *	@link: link to be waited on
3501  *	@deadline: deadline jiffies for the operation
3502  *	@check_ready: callback to check link readiness
3503  *
3504  *	Wait for @link to become ready after reset.
3505  *
3506  *	LOCKING:
3507  *	EH context.
3508  *
3509  *	RETURNS:
3510  *	0 if @linke is ready before @deadline; otherwise, -errno.
3511  */
ata_wait_after_reset(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3512 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3513 				int (*check_ready)(struct ata_link *link))
3514 {
3515 	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3516 
3517 	return ata_wait_ready(link, deadline, check_ready);
3518 }
3519 
3520 /**
3521  *	sata_link_debounce - debounce SATA phy status
3522  *	@link: ATA link to debounce SATA phy status for
3523  *	@params: timing parameters { interval, duratinon, timeout } in msec
3524  *	@deadline: deadline jiffies for the operation
3525  *
3526  *	Make sure SStatus of @link reaches stable state, determined by
3527  *	holding the same value where DET is not 1 for @duration polled
3528  *	every @interval, before @timeout.  Timeout constraints the
3529  *	beginning of the stable state.  Because DET gets stuck at 1 on
3530  *	some controllers after hot unplugging, this functions waits
3531  *	until timeout then returns 0 if DET is stable at 1.
3532  *
3533  *	@timeout is further limited by @deadline.  The sooner of the
3534  *	two is used.
3535  *
3536  *	LOCKING:
3537  *	Kernel thread context (may sleep)
3538  *
3539  *	RETURNS:
3540  *	0 on success, -errno on failure.
3541  */
sata_link_debounce(struct ata_link * link,const unsigned long * params,unsigned long deadline)3542 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3543 		       unsigned long deadline)
3544 {
3545 	unsigned long interval = params[0];
3546 	unsigned long duration = params[1];
3547 	unsigned long last_jiffies, t;
3548 	u32 last, cur;
3549 	int rc;
3550 
3551 	t = ata_deadline(jiffies, params[2]);
3552 	if (time_before(t, deadline))
3553 		deadline = t;
3554 
3555 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3556 		return rc;
3557 	cur &= 0xf;
3558 
3559 	last = cur;
3560 	last_jiffies = jiffies;
3561 
3562 	while (1) {
3563 		ata_msleep(link->ap, interval);
3564 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3565 			return rc;
3566 		cur &= 0xf;
3567 
3568 		/* DET stable? */
3569 		if (cur == last) {
3570 			if (cur == 1 && time_before(jiffies, deadline))
3571 				continue;
3572 			if (time_after(jiffies,
3573 				       ata_deadline(last_jiffies, duration)))
3574 				return 0;
3575 			continue;
3576 		}
3577 
3578 		/* unstable, start over */
3579 		last = cur;
3580 		last_jiffies = jiffies;
3581 
3582 		/* Check deadline.  If debouncing failed, return
3583 		 * -EPIPE to tell upper layer to lower link speed.
3584 		 */
3585 		if (time_after(jiffies, deadline))
3586 			return -EPIPE;
3587 	}
3588 }
3589 
3590 /**
3591  *	sata_link_resume - resume SATA link
3592  *	@link: ATA link to resume SATA
3593  *	@params: timing parameters { interval, duratinon, timeout } in msec
3594  *	@deadline: deadline jiffies for the operation
3595  *
3596  *	Resume SATA phy @link and debounce it.
3597  *
3598  *	LOCKING:
3599  *	Kernel thread context (may sleep)
3600  *
3601  *	RETURNS:
3602  *	0 on success, -errno on failure.
3603  */
sata_link_resume(struct ata_link * link,const unsigned long * params,unsigned long deadline)3604 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3605 		     unsigned long deadline)
3606 {
3607 	int tries = ATA_LINK_RESUME_TRIES;
3608 	u32 scontrol, serror;
3609 	int rc;
3610 
3611 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3612 		return rc;
3613 
3614 	/*
3615 	 * Writes to SControl sometimes get ignored under certain
3616 	 * controllers (ata_piix SIDPR).  Make sure DET actually is
3617 	 * cleared.
3618 	 */
3619 	do {
3620 		scontrol = (scontrol & 0x0f0) | 0x300;
3621 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3622 			return rc;
3623 		/*
3624 		 * Some PHYs react badly if SStatus is pounded
3625 		 * immediately after resuming.  Delay 200ms before
3626 		 * debouncing.
3627 		 */
3628 		ata_msleep(link->ap, 200);
3629 
3630 		/* is SControl restored correctly? */
3631 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3632 			return rc;
3633 	} while ((scontrol & 0xf0f) != 0x300 && --tries);
3634 
3635 	if ((scontrol & 0xf0f) != 0x300) {
3636 		ata_link_warn(link, "failed to resume link (SControl %X)\n",
3637 			     scontrol);
3638 		return 0;
3639 	}
3640 
3641 	if (tries < ATA_LINK_RESUME_TRIES)
3642 		ata_link_warn(link, "link resume succeeded after %d retries\n",
3643 			      ATA_LINK_RESUME_TRIES - tries);
3644 
3645 	if ((rc = sata_link_debounce(link, params, deadline)))
3646 		return rc;
3647 
3648 	/* clear SError, some PHYs require this even for SRST to work */
3649 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3650 		rc = sata_scr_write(link, SCR_ERROR, serror);
3651 
3652 	return rc != -EINVAL ? rc : 0;
3653 }
3654 
3655 /**
3656  *	sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3657  *	@link: ATA link to manipulate SControl for
3658  *	@policy: LPM policy to configure
3659  *	@spm_wakeup: initiate LPM transition to active state
3660  *
3661  *	Manipulate the IPM field of the SControl register of @link
3662  *	according to @policy.  If @policy is ATA_LPM_MAX_POWER and
3663  *	@spm_wakeup is %true, the SPM field is manipulated to wake up
3664  *	the link.  This function also clears PHYRDY_CHG before
3665  *	returning.
3666  *
3667  *	LOCKING:
3668  *	EH context.
3669  *
3670  *	RETURNS:
3671  *	0 on success, -errno otherwise.
3672  */
sata_link_scr_lpm(struct ata_link * link,enum ata_lpm_policy policy,bool spm_wakeup)3673 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3674 		      bool spm_wakeup)
3675 {
3676 	struct ata_eh_context *ehc = &link->eh_context;
3677 	bool woken_up = false;
3678 	u32 scontrol;
3679 	int rc;
3680 
3681 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3682 	if (rc)
3683 		return rc;
3684 
3685 	switch (policy) {
3686 	case ATA_LPM_MAX_POWER:
3687 		/* disable all LPM transitions */
3688 		scontrol |= (0x7 << 8);
3689 		/* initiate transition to active state */
3690 		if (spm_wakeup) {
3691 			scontrol |= (0x4 << 12);
3692 			woken_up = true;
3693 		}
3694 		break;
3695 	case ATA_LPM_MED_POWER:
3696 		/* allow LPM to PARTIAL */
3697 		scontrol &= ~(0x1 << 8);
3698 		scontrol |= (0x6 << 8);
3699 		break;
3700 	case ATA_LPM_MIN_POWER:
3701 		if (ata_link_nr_enabled(link) > 0)
3702 			/* no restrictions on LPM transitions */
3703 			scontrol &= ~(0x7 << 8);
3704 		else {
3705 			/* empty port, power off */
3706 			scontrol &= ~0xf;
3707 			scontrol |= (0x1 << 2);
3708 		}
3709 		break;
3710 	default:
3711 		WARN_ON(1);
3712 	}
3713 
3714 	rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3715 	if (rc)
3716 		return rc;
3717 
3718 	/* give the link time to transit out of LPM state */
3719 	if (woken_up)
3720 		msleep(10);
3721 
3722 	/* clear PHYRDY_CHG from SError */
3723 	ehc->i.serror &= ~SERR_PHYRDY_CHG;
3724 	return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3725 }
3726 
3727 /**
3728  *	ata_std_prereset - prepare for reset
3729  *	@link: ATA link to be reset
3730  *	@deadline: deadline jiffies for the operation
3731  *
3732  *	@link is about to be reset.  Initialize it.  Failure from
3733  *	prereset makes libata abort whole reset sequence and give up
3734  *	that port, so prereset should be best-effort.  It does its
3735  *	best to prepare for reset sequence but if things go wrong, it
3736  *	should just whine, not fail.
3737  *
3738  *	LOCKING:
3739  *	Kernel thread context (may sleep)
3740  *
3741  *	RETURNS:
3742  *	0 on success, -errno otherwise.
3743  */
ata_std_prereset(struct ata_link * link,unsigned long deadline)3744 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3745 {
3746 	struct ata_port *ap = link->ap;
3747 	struct ata_eh_context *ehc = &link->eh_context;
3748 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3749 	int rc;
3750 
3751 	/* if we're about to do hardreset, nothing more to do */
3752 	if (ehc->i.action & ATA_EH_HARDRESET)
3753 		return 0;
3754 
3755 	/* if SATA, resume link */
3756 	if (ap->flags & ATA_FLAG_SATA) {
3757 		rc = sata_link_resume(link, timing, deadline);
3758 		/* whine about phy resume failure but proceed */
3759 		if (rc && rc != -EOPNOTSUPP)
3760 			ata_link_warn(link,
3761 				      "failed to resume link for reset (errno=%d)\n",
3762 				      rc);
3763 	}
3764 
3765 	/* no point in trying softreset on offline link */
3766 	if (ata_phys_link_offline(link))
3767 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3768 
3769 	return 0;
3770 }
3771 
3772 /**
3773  *	sata_link_hardreset - reset link via SATA phy reset
3774  *	@link: link to reset
3775  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3776  *	@deadline: deadline jiffies for the operation
3777  *	@online: optional out parameter indicating link onlineness
3778  *	@check_ready: optional callback to check link readiness
3779  *
3780  *	SATA phy-reset @link using DET bits of SControl register.
3781  *	After hardreset, link readiness is waited upon using
3782  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3783  *	allowed to not specify @check_ready and wait itself after this
3784  *	function returns.  Device classification is LLD's
3785  *	responsibility.
3786  *
3787  *	*@online is set to one iff reset succeeded and @link is online
3788  *	after reset.
3789  *
3790  *	LOCKING:
3791  *	Kernel thread context (may sleep)
3792  *
3793  *	RETURNS:
3794  *	0 on success, -errno otherwise.
3795  */
sata_link_hardreset(struct ata_link * link,const unsigned long * timing,unsigned long deadline,bool * online,int (* check_ready)(struct ata_link *))3796 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3797 			unsigned long deadline,
3798 			bool *online, int (*check_ready)(struct ata_link *))
3799 {
3800 	u32 scontrol;
3801 	int rc;
3802 
3803 	DPRINTK("ENTER\n");
3804 
3805 	if (online)
3806 		*online = false;
3807 
3808 	if (sata_set_spd_needed(link)) {
3809 		/* SATA spec says nothing about how to reconfigure
3810 		 * spd.  To be on the safe side, turn off phy during
3811 		 * reconfiguration.  This works for at least ICH7 AHCI
3812 		 * and Sil3124.
3813 		 */
3814 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3815 			goto out;
3816 
3817 		scontrol = (scontrol & 0x0f0) | 0x304;
3818 
3819 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3820 			goto out;
3821 
3822 		sata_set_spd(link);
3823 	}
3824 
3825 	/* issue phy wake/reset */
3826 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3827 		goto out;
3828 
3829 	scontrol = (scontrol & 0x0f0) | 0x301;
3830 
3831 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3832 		goto out;
3833 
3834 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3835 	 * 10.4.2 says at least 1 ms.
3836 	 */
3837 	ata_msleep(link->ap, 1);
3838 
3839 	/* bring link back */
3840 	rc = sata_link_resume(link, timing, deadline);
3841 	if (rc)
3842 		goto out;
3843 	/* if link is offline nothing more to do */
3844 	if (ata_phys_link_offline(link))
3845 		goto out;
3846 
3847 	/* Link is online.  From this point, -ENODEV too is an error. */
3848 	if (online)
3849 		*online = true;
3850 
3851 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3852 		/* If PMP is supported, we have to do follow-up SRST.
3853 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3854 		 * the first port is empty.  Wait only for
3855 		 * ATA_TMOUT_PMP_SRST_WAIT.
3856 		 */
3857 		if (check_ready) {
3858 			unsigned long pmp_deadline;
3859 
3860 			pmp_deadline = ata_deadline(jiffies,
3861 						    ATA_TMOUT_PMP_SRST_WAIT);
3862 			if (time_after(pmp_deadline, deadline))
3863 				pmp_deadline = deadline;
3864 			ata_wait_ready(link, pmp_deadline, check_ready);
3865 		}
3866 		rc = -EAGAIN;
3867 		goto out;
3868 	}
3869 
3870 	rc = 0;
3871 	if (check_ready)
3872 		rc = ata_wait_ready(link, deadline, check_ready);
3873  out:
3874 	if (rc && rc != -EAGAIN) {
3875 		/* online is set iff link is online && reset succeeded */
3876 		if (online)
3877 			*online = false;
3878 		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3879 	}
3880 	DPRINTK("EXIT, rc=%d\n", rc);
3881 	return rc;
3882 }
3883 
3884 /**
3885  *	sata_std_hardreset - COMRESET w/o waiting or classification
3886  *	@link: link to reset
3887  *	@class: resulting class of attached device
3888  *	@deadline: deadline jiffies for the operation
3889  *
3890  *	Standard SATA COMRESET w/o waiting or classification.
3891  *
3892  *	LOCKING:
3893  *	Kernel thread context (may sleep)
3894  *
3895  *	RETURNS:
3896  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3897  */
sata_std_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)3898 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3899 		       unsigned long deadline)
3900 {
3901 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3902 	bool online;
3903 	int rc;
3904 
3905 	/* do hardreset */
3906 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3907 	return online ? -EAGAIN : rc;
3908 }
3909 
3910 /**
3911  *	ata_std_postreset - standard postreset callback
3912  *	@link: the target ata_link
3913  *	@classes: classes of attached devices
3914  *
3915  *	This function is invoked after a successful reset.  Note that
3916  *	the device might have been reset more than once using
3917  *	different reset methods before postreset is invoked.
3918  *
3919  *	LOCKING:
3920  *	Kernel thread context (may sleep)
3921  */
ata_std_postreset(struct ata_link * link,unsigned int * classes)3922 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3923 {
3924 	u32 serror;
3925 
3926 	DPRINTK("ENTER\n");
3927 
3928 	/* reset complete, clear SError */
3929 	if (!sata_scr_read(link, SCR_ERROR, &serror))
3930 		sata_scr_write(link, SCR_ERROR, serror);
3931 
3932 	/* print link status */
3933 	sata_print_link_status(link);
3934 
3935 	DPRINTK("EXIT\n");
3936 }
3937 
3938 /**
3939  *	ata_dev_same_device - Determine whether new ID matches configured device
3940  *	@dev: device to compare against
3941  *	@new_class: class of the new device
3942  *	@new_id: IDENTIFY page of the new device
3943  *
3944  *	Compare @new_class and @new_id against @dev and determine
3945  *	whether @dev is the device indicated by @new_class and
3946  *	@new_id.
3947  *
3948  *	LOCKING:
3949  *	None.
3950  *
3951  *	RETURNS:
3952  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3953  */
ata_dev_same_device(struct ata_device * dev,unsigned int new_class,const u16 * new_id)3954 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3955 			       const u16 *new_id)
3956 {
3957 	const u16 *old_id = dev->id;
3958 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3959 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3960 
3961 	if (dev->class != new_class) {
3962 		ata_dev_info(dev, "class mismatch %d != %d\n",
3963 			     dev->class, new_class);
3964 		return 0;
3965 	}
3966 
3967 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3968 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3969 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3970 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3971 
3972 	if (strcmp(model[0], model[1])) {
3973 		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3974 			     model[0], model[1]);
3975 		return 0;
3976 	}
3977 
3978 	if (strcmp(serial[0], serial[1])) {
3979 		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3980 			     serial[0], serial[1]);
3981 		return 0;
3982 	}
3983 
3984 	return 1;
3985 }
3986 
3987 /**
3988  *	ata_dev_reread_id - Re-read IDENTIFY data
3989  *	@dev: target ATA device
3990  *	@readid_flags: read ID flags
3991  *
3992  *	Re-read IDENTIFY page and make sure @dev is still attached to
3993  *	the port.
3994  *
3995  *	LOCKING:
3996  *	Kernel thread context (may sleep)
3997  *
3998  *	RETURNS:
3999  *	0 on success, negative errno otherwise
4000  */
ata_dev_reread_id(struct ata_device * dev,unsigned int readid_flags)4001 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4002 {
4003 	unsigned int class = dev->class;
4004 	u16 *id = (void *)dev->link->ap->sector_buf;
4005 	int rc;
4006 
4007 	/* read ID data */
4008 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
4009 	if (rc)
4010 		return rc;
4011 
4012 	/* is the device still there? */
4013 	if (!ata_dev_same_device(dev, class, id))
4014 		return -ENODEV;
4015 
4016 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4017 	return 0;
4018 }
4019 
4020 /**
4021  *	ata_dev_revalidate - Revalidate ATA device
4022  *	@dev: device to revalidate
4023  *	@new_class: new class code
4024  *	@readid_flags: read ID flags
4025  *
4026  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4027  *	port and reconfigure it according to the new IDENTIFY page.
4028  *
4029  *	LOCKING:
4030  *	Kernel thread context (may sleep)
4031  *
4032  *	RETURNS:
4033  *	0 on success, negative errno otherwise
4034  */
ata_dev_revalidate(struct ata_device * dev,unsigned int new_class,unsigned int readid_flags)4035 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4036 		       unsigned int readid_flags)
4037 {
4038 	u64 n_sectors = dev->n_sectors;
4039 	u64 n_native_sectors = dev->n_native_sectors;
4040 	int rc;
4041 
4042 	if (!ata_dev_enabled(dev))
4043 		return -ENODEV;
4044 
4045 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4046 	if (ata_class_enabled(new_class) &&
4047 	    new_class != ATA_DEV_ATA &&
4048 	    new_class != ATA_DEV_ATAPI &&
4049 	    new_class != ATA_DEV_ZAC &&
4050 	    new_class != ATA_DEV_SEMB) {
4051 		ata_dev_info(dev, "class mismatch %u != %u\n",
4052 			     dev->class, new_class);
4053 		rc = -ENODEV;
4054 		goto fail;
4055 	}
4056 
4057 	/* re-read ID */
4058 	rc = ata_dev_reread_id(dev, readid_flags);
4059 	if (rc)
4060 		goto fail;
4061 
4062 	/* configure device according to the new ID */
4063 	rc = ata_dev_configure(dev);
4064 	if (rc)
4065 		goto fail;
4066 
4067 	/* verify n_sectors hasn't changed */
4068 	if (dev->class != ATA_DEV_ATA || !n_sectors ||
4069 	    dev->n_sectors == n_sectors)
4070 		return 0;
4071 
4072 	/* n_sectors has changed */
4073 	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4074 		     (unsigned long long)n_sectors,
4075 		     (unsigned long long)dev->n_sectors);
4076 
4077 	/*
4078 	 * Something could have caused HPA to be unlocked
4079 	 * involuntarily.  If n_native_sectors hasn't changed and the
4080 	 * new size matches it, keep the device.
4081 	 */
4082 	if (dev->n_native_sectors == n_native_sectors &&
4083 	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4084 		ata_dev_warn(dev,
4085 			     "new n_sectors matches native, probably "
4086 			     "late HPA unlock, n_sectors updated\n");
4087 		/* use the larger n_sectors */
4088 		return 0;
4089 	}
4090 
4091 	/*
4092 	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4093 	 * unlocking HPA in those cases.
4094 	 *
4095 	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4096 	 */
4097 	if (dev->n_native_sectors == n_native_sectors &&
4098 	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4099 	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4100 		ata_dev_warn(dev,
4101 			     "old n_sectors matches native, probably "
4102 			     "late HPA lock, will try to unlock HPA\n");
4103 		/* try unlocking HPA */
4104 		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4105 		rc = -EIO;
4106 	} else
4107 		rc = -ENODEV;
4108 
4109 	/* restore original n_[native_]sectors and fail */
4110 	dev->n_native_sectors = n_native_sectors;
4111 	dev->n_sectors = n_sectors;
4112  fail:
4113 	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4114 	return rc;
4115 }
4116 
4117 struct ata_blacklist_entry {
4118 	const char *model_num;
4119 	const char *model_rev;
4120 	unsigned long horkage;
4121 };
4122 
4123 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4124 	/* Devices with DMA related problems under Linux */
4125 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4126 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4127 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4128 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4129 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4130 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4131 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4132 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4133 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4134 	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
4135 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4136 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4137 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4138 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4139 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4140 	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
4141 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4142 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4143 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4144 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4145 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4146 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4147 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4148 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4149 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4150 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4151 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4152 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4153 	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
4154 	/* Odd clown on sil3726/4726 PMPs */
4155 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4156 	/* Similar story with ASMedia 1092 */
4157 	{ "ASMT109x- Config",	NULL,		ATA_HORKAGE_DISABLE },
4158 
4159 	/* Weird ATAPI devices */
4160 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4161 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4162 	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4163 	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4164 
4165 	/*
4166 	 * Causes silent data corruption with higher max sects.
4167 	 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4168 	 */
4169 	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
4170 
4171 	/*
4172 	 * These devices time out with higher max sects.
4173 	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4174 	 */
4175 	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
4176 	{ "LITEON EP1-*",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
4177 
4178 	/* Devices we expect to fail diagnostics */
4179 
4180 	/* Devices where NCQ should be avoided */
4181 	/* NCQ is slow */
4182 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4183 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4184 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4185 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4186 	/* NCQ is broken */
4187 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4188 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4189 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4190 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4191 	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4192 
4193 	/* Seagate NCQ + FLUSH CACHE firmware bug */
4194 	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4195 						ATA_HORKAGE_FIRMWARE_WARN },
4196 
4197 	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4198 						ATA_HORKAGE_FIRMWARE_WARN },
4199 
4200 	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4201 						ATA_HORKAGE_FIRMWARE_WARN },
4202 
4203 	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4204 						ATA_HORKAGE_FIRMWARE_WARN },
4205 
4206 	/* drives which fail FPDMA_AA activation (some may freeze afterwards)
4207 	   the ST disks also have LPM issues */
4208 	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA |
4209 						ATA_HORKAGE_NOLPM, },
4210 	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA |
4211 						ATA_HORKAGE_NOLPM, },
4212 	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
4213 
4214 	/* Blacklist entries taken from Silicon Image 3124/3132
4215 	   Windows driver .inf file - also several Linux problem reports */
4216 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4217 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4218 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4219 
4220 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4221 	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
4222 
4223 	/* Some Sandisk SSDs lock up hard with NCQ enabled.  Reported on
4224 	   SD7SN6S256G and SD8SN8U256G */
4225 	{ "SanDisk SD[78]SN*G",	NULL,		ATA_HORKAGE_NONCQ, },
4226 
4227 	/* devices which puke on READ_NATIVE_MAX */
4228 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4229 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4230 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4231 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4232 
4233 	/* this one allows HPA unlocking but fails IOs on the area */
4234 	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4235 
4236 	/* Devices which report 1 sector over size HPA */
4237 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4238 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4239 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4240 
4241 	/* Devices which get the IVB wrong */
4242 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4243 	/* Maybe we should just blacklist TSSTcorp... */
4244 	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB, },
4245 
4246 	/* Devices that do not need bridging limits applied */
4247 	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4248 	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4249 
4250 	/* Devices which aren't very happy with higher link speeds */
4251 	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
4252 	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS, },
4253 
4254 	/*
4255 	 * Devices which choke on SETXFER.  Applies only if both the
4256 	 * device and controller are SATA.
4257 	 */
4258 	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
4259 	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
4260 	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
4261 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
4262 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
4263 
4264 	/* Crucial BX100 SSD 500GB has broken LPM support */
4265 	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM },
4266 
4267 	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4268 	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4269 						ATA_HORKAGE_ZERO_AFTER_TRIM |
4270 						ATA_HORKAGE_NOLPM, },
4271 	/* 512GB MX100 with newer firmware has only LPM issues */
4272 	{ "Crucial_CT512MX100*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM |
4273 						ATA_HORKAGE_NOLPM, },
4274 
4275 	/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4276 	{ "Crucial_CT480M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4277 						ATA_HORKAGE_ZERO_AFTER_TRIM |
4278 						ATA_HORKAGE_NOLPM, },
4279 	{ "Crucial_CT960M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4280 						ATA_HORKAGE_ZERO_AFTER_TRIM |
4281 						ATA_HORKAGE_NOLPM, },
4282 
4283 	/* devices that don't properly handle queued TRIM commands */
4284 	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4285 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4286 	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4287 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4288 	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4289 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4290 	{ "Micron_M5[15]0_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4291 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4292 	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4293 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4294 	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4295 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4296 	{ "Samsung SSD 840*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4297 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4298 	{ "Samsung SSD 850*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4299 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4300 	{ "Samsung SSD 860*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4301 						ATA_HORKAGE_ZERO_AFTER_TRIM |
4302 						ATA_HORKAGE_NO_NCQ_ON_ATI, },
4303 	{ "Samsung SSD 870*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4304 						ATA_HORKAGE_ZERO_AFTER_TRIM |
4305 						ATA_HORKAGE_NO_NCQ_ON_ATI, },
4306 	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4307 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4308 
4309 	/* devices that don't properly handle TRIM commands */
4310 	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
4311 
4312 	/*
4313 	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4314 	 * (Return Zero After Trim) flags in the ATA Command Set are
4315 	 * unreliable in the sense that they only define what happens if
4316 	 * the device successfully executed the DSM TRIM command. TRIM
4317 	 * is only advisory, however, and the device is free to silently
4318 	 * ignore all or parts of the request.
4319 	 *
4320 	 * Whitelist drives that are known to reliably return zeroes
4321 	 * after TRIM.
4322 	 */
4323 
4324 	/*
4325 	 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4326 	 * that model before whitelisting all other intel SSDs.
4327 	 */
4328 	{ "INTEL*SSDSC2MH*",		NULL,	0, },
4329 
4330 	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4331 	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4332 	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4333 	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4334 	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4335 	{ "SAMSUNG*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4336 	{ "SAMSUNG*MZ7KM*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4337 	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4338 
4339 	/*
4340 	 * Some WD SATA-I drives spin up and down erratically when the link
4341 	 * is put into the slumber mode.  We don't have full list of the
4342 	 * affected devices.  Disable LPM if the device matches one of the
4343 	 * known prefixes and is SATA-1.  As a side effect LPM partial is
4344 	 * lost too.
4345 	 *
4346 	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4347 	 */
4348 	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4349 	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4350 	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4351 	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4352 	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4353 	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4354 	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4355 
4356 	/* End Marker */
4357 	{ }
4358 };
4359 
ata_dev_blacklisted(const struct ata_device * dev)4360 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4361 {
4362 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4363 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4364 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4365 
4366 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4367 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4368 
4369 	while (ad->model_num) {
4370 		if (glob_match(ad->model_num, model_num)) {
4371 			if (ad->model_rev == NULL)
4372 				return ad->horkage;
4373 			if (glob_match(ad->model_rev, model_rev))
4374 				return ad->horkage;
4375 		}
4376 		ad++;
4377 	}
4378 	return 0;
4379 }
4380 
ata_dma_blacklisted(const struct ata_device * dev)4381 static int ata_dma_blacklisted(const struct ata_device *dev)
4382 {
4383 	/* We don't support polling DMA.
4384 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4385 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4386 	 */
4387 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4388 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4389 		return 1;
4390 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4391 }
4392 
4393 /**
4394  *	ata_is_40wire		-	check drive side detection
4395  *	@dev: device
4396  *
4397  *	Perform drive side detection decoding, allowing for device vendors
4398  *	who can't follow the documentation.
4399  */
4400 
ata_is_40wire(struct ata_device * dev)4401 static int ata_is_40wire(struct ata_device *dev)
4402 {
4403 	if (dev->horkage & ATA_HORKAGE_IVB)
4404 		return ata_drive_40wire_relaxed(dev->id);
4405 	return ata_drive_40wire(dev->id);
4406 }
4407 
4408 /**
4409  *	cable_is_40wire		-	40/80/SATA decider
4410  *	@ap: port to consider
4411  *
4412  *	This function encapsulates the policy for speed management
4413  *	in one place. At the moment we don't cache the result but
4414  *	there is a good case for setting ap->cbl to the result when
4415  *	we are called with unknown cables (and figuring out if it
4416  *	impacts hotplug at all).
4417  *
4418  *	Return 1 if the cable appears to be 40 wire.
4419  */
4420 
cable_is_40wire(struct ata_port * ap)4421 static int cable_is_40wire(struct ata_port *ap)
4422 {
4423 	struct ata_link *link;
4424 	struct ata_device *dev;
4425 
4426 	/* If the controller thinks we are 40 wire, we are. */
4427 	if (ap->cbl == ATA_CBL_PATA40)
4428 		return 1;
4429 
4430 	/* If the controller thinks we are 80 wire, we are. */
4431 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4432 		return 0;
4433 
4434 	/* If the system is known to be 40 wire short cable (eg
4435 	 * laptop), then we allow 80 wire modes even if the drive
4436 	 * isn't sure.
4437 	 */
4438 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4439 		return 0;
4440 
4441 	/* If the controller doesn't know, we scan.
4442 	 *
4443 	 * Note: We look for all 40 wire detects at this point.  Any
4444 	 *       80 wire detect is taken to be 80 wire cable because
4445 	 * - in many setups only the one drive (slave if present) will
4446 	 *   give a valid detect
4447 	 * - if you have a non detect capable drive you don't want it
4448 	 *   to colour the choice
4449 	 */
4450 	ata_for_each_link(link, ap, EDGE) {
4451 		ata_for_each_dev(dev, link, ENABLED) {
4452 			if (!ata_is_40wire(dev))
4453 				return 0;
4454 		}
4455 	}
4456 	return 1;
4457 }
4458 
4459 /**
4460  *	ata_dev_xfermask - Compute supported xfermask of the given device
4461  *	@dev: Device to compute xfermask for
4462  *
4463  *	Compute supported xfermask of @dev and store it in
4464  *	dev->*_mask.  This function is responsible for applying all
4465  *	known limits including host controller limits, device
4466  *	blacklist, etc...
4467  *
4468  *	LOCKING:
4469  *	None.
4470  */
ata_dev_xfermask(struct ata_device * dev)4471 static void ata_dev_xfermask(struct ata_device *dev)
4472 {
4473 	struct ata_link *link = dev->link;
4474 	struct ata_port *ap = link->ap;
4475 	struct ata_host *host = ap->host;
4476 	unsigned long xfer_mask;
4477 
4478 	/* controller modes available */
4479 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4480 				      ap->mwdma_mask, ap->udma_mask);
4481 
4482 	/* drive modes available */
4483 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4484 				       dev->mwdma_mask, dev->udma_mask);
4485 	xfer_mask &= ata_id_xfermask(dev->id);
4486 
4487 	/*
4488 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4489 	 *	cable
4490 	 */
4491 	if (ata_dev_pair(dev)) {
4492 		/* No PIO5 or PIO6 */
4493 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4494 		/* No MWDMA3 or MWDMA 4 */
4495 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4496 	}
4497 
4498 	if (ata_dma_blacklisted(dev)) {
4499 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4500 		ata_dev_warn(dev,
4501 			     "device is on DMA blacklist, disabling DMA\n");
4502 	}
4503 
4504 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4505 	    host->simplex_claimed && host->simplex_claimed != ap) {
4506 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4507 		ata_dev_warn(dev,
4508 			     "simplex DMA is claimed by other device, disabling DMA\n");
4509 	}
4510 
4511 	if (ap->flags & ATA_FLAG_NO_IORDY)
4512 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4513 
4514 	if (ap->ops->mode_filter)
4515 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4516 
4517 	/* Apply cable rule here.  Don't apply it early because when
4518 	 * we handle hot plug the cable type can itself change.
4519 	 * Check this last so that we know if the transfer rate was
4520 	 * solely limited by the cable.
4521 	 * Unknown or 80 wire cables reported host side are checked
4522 	 * drive side as well. Cases where we know a 40wire cable
4523 	 * is used safely for 80 are not checked here.
4524 	 */
4525 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4526 		/* UDMA/44 or higher would be available */
4527 		if (cable_is_40wire(ap)) {
4528 			ata_dev_warn(dev,
4529 				     "limited to UDMA/33 due to 40-wire cable\n");
4530 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4531 		}
4532 
4533 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4534 			    &dev->mwdma_mask, &dev->udma_mask);
4535 }
4536 
4537 /**
4538  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4539  *	@dev: Device to which command will be sent
4540  *
4541  *	Issue SET FEATURES - XFER MODE command to device @dev
4542  *	on port @ap.
4543  *
4544  *	LOCKING:
4545  *	PCI/etc. bus probe sem.
4546  *
4547  *	RETURNS:
4548  *	0 on success, AC_ERR_* mask otherwise.
4549  */
4550 
ata_dev_set_xfermode(struct ata_device * dev)4551 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4552 {
4553 	struct ata_taskfile tf;
4554 	unsigned int err_mask;
4555 
4556 	/* set up set-features taskfile */
4557 	DPRINTK("set features - xfer mode\n");
4558 
4559 	/* Some controllers and ATAPI devices show flaky interrupt
4560 	 * behavior after setting xfer mode.  Use polling instead.
4561 	 */
4562 	ata_tf_init(dev, &tf);
4563 	tf.command = ATA_CMD_SET_FEATURES;
4564 	tf.feature = SETFEATURES_XFER;
4565 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4566 	tf.protocol = ATA_PROT_NODATA;
4567 	/* If we are using IORDY we must send the mode setting command */
4568 	if (ata_pio_need_iordy(dev))
4569 		tf.nsect = dev->xfer_mode;
4570 	/* If the device has IORDY and the controller does not - turn it off */
4571  	else if (ata_id_has_iordy(dev->id))
4572 		tf.nsect = 0x01;
4573 	else /* In the ancient relic department - skip all of this */
4574 		return 0;
4575 
4576 	/* On some disks, this command causes spin-up, so we need longer timeout */
4577 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4578 
4579 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4580 	return err_mask;
4581 }
4582 
4583 /**
4584  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4585  *	@dev: Device to which command will be sent
4586  *	@enable: Whether to enable or disable the feature
4587  *	@feature: The sector count represents the feature to set
4588  *
4589  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4590  *	on port @ap with sector count
4591  *
4592  *	LOCKING:
4593  *	PCI/etc. bus probe sem.
4594  *
4595  *	RETURNS:
4596  *	0 on success, AC_ERR_* mask otherwise.
4597  */
ata_dev_set_feature(struct ata_device * dev,u8 enable,u8 feature)4598 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4599 {
4600 	struct ata_taskfile tf;
4601 	unsigned int err_mask;
4602 
4603 	/* set up set-features taskfile */
4604 	DPRINTK("set features - SATA features\n");
4605 
4606 	ata_tf_init(dev, &tf);
4607 	tf.command = ATA_CMD_SET_FEATURES;
4608 	tf.feature = enable;
4609 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4610 	tf.protocol = ATA_PROT_NODATA;
4611 	tf.nsect = feature;
4612 
4613 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4614 
4615 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4616 	return err_mask;
4617 }
4618 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4619 
4620 /**
4621  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4622  *	@dev: Device to which command will be sent
4623  *	@heads: Number of heads (taskfile parameter)
4624  *	@sectors: Number of sectors (taskfile parameter)
4625  *
4626  *	LOCKING:
4627  *	Kernel thread context (may sleep)
4628  *
4629  *	RETURNS:
4630  *	0 on success, AC_ERR_* mask otherwise.
4631  */
ata_dev_init_params(struct ata_device * dev,u16 heads,u16 sectors)4632 static unsigned int ata_dev_init_params(struct ata_device *dev,
4633 					u16 heads, u16 sectors)
4634 {
4635 	struct ata_taskfile tf;
4636 	unsigned int err_mask;
4637 
4638 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4639 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4640 		return AC_ERR_INVALID;
4641 
4642 	/* set up init dev params taskfile */
4643 	DPRINTK("init dev params \n");
4644 
4645 	ata_tf_init(dev, &tf);
4646 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4647 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4648 	tf.protocol = ATA_PROT_NODATA;
4649 	tf.nsect = sectors;
4650 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4651 
4652 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4653 	/* A clean abort indicates an original or just out of spec drive
4654 	   and we should continue as we issue the setup based on the
4655 	   drive reported working geometry */
4656 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4657 		err_mask = 0;
4658 
4659 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4660 	return err_mask;
4661 }
4662 
4663 /**
4664  *	ata_sg_clean - Unmap DMA memory associated with command
4665  *	@qc: Command containing DMA memory to be released
4666  *
4667  *	Unmap all mapped DMA memory associated with this command.
4668  *
4669  *	LOCKING:
4670  *	spin_lock_irqsave(host lock)
4671  */
ata_sg_clean(struct ata_queued_cmd * qc)4672 void ata_sg_clean(struct ata_queued_cmd *qc)
4673 {
4674 	struct ata_port *ap = qc->ap;
4675 	struct scatterlist *sg = qc->sg;
4676 	int dir = qc->dma_dir;
4677 
4678 	WARN_ON_ONCE(sg == NULL);
4679 
4680 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4681 
4682 	if (qc->n_elem)
4683 		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4684 
4685 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4686 	qc->sg = NULL;
4687 }
4688 
4689 /**
4690  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4691  *	@qc: Metadata associated with taskfile to check
4692  *
4693  *	Allow low-level driver to filter ATA PACKET commands, returning
4694  *	a status indicating whether or not it is OK to use DMA for the
4695  *	supplied PACKET command.
4696  *
4697  *	LOCKING:
4698  *	spin_lock_irqsave(host lock)
4699  *
4700  *	RETURNS: 0 when ATAPI DMA can be used
4701  *               nonzero otherwise
4702  */
atapi_check_dma(struct ata_queued_cmd * qc)4703 int atapi_check_dma(struct ata_queued_cmd *qc)
4704 {
4705 	struct ata_port *ap = qc->ap;
4706 
4707 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4708 	 * few ATAPI devices choke on such DMA requests.
4709 	 */
4710 	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4711 	    unlikely(qc->nbytes & 15))
4712 		return 1;
4713 
4714 	if (ap->ops->check_atapi_dma)
4715 		return ap->ops->check_atapi_dma(qc);
4716 
4717 	return 0;
4718 }
4719 
4720 /**
4721  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4722  *	@qc: ATA command in question
4723  *
4724  *	Non-NCQ commands cannot run with any other command, NCQ or
4725  *	not.  As upper layer only knows the queue depth, we are
4726  *	responsible for maintaining exclusion.  This function checks
4727  *	whether a new command @qc can be issued.
4728  *
4729  *	LOCKING:
4730  *	spin_lock_irqsave(host lock)
4731  *
4732  *	RETURNS:
4733  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4734  */
ata_std_qc_defer(struct ata_queued_cmd * qc)4735 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4736 {
4737 	struct ata_link *link = qc->dev->link;
4738 
4739 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4740 		if (!ata_tag_valid(link->active_tag))
4741 			return 0;
4742 	} else {
4743 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4744 			return 0;
4745 	}
4746 
4747 	return ATA_DEFER_LINK;
4748 }
4749 
ata_noop_qc_prep(struct ata_queued_cmd * qc)4750 enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
4751 {
4752 	return AC_ERR_OK;
4753 }
4754 
4755 /**
4756  *	ata_sg_init - Associate command with scatter-gather table.
4757  *	@qc: Command to be associated
4758  *	@sg: Scatter-gather table.
4759  *	@n_elem: Number of elements in s/g table.
4760  *
4761  *	Initialize the data-related elements of queued_cmd @qc
4762  *	to point to a scatter-gather table @sg, containing @n_elem
4763  *	elements.
4764  *
4765  *	LOCKING:
4766  *	spin_lock_irqsave(host lock)
4767  */
ata_sg_init(struct ata_queued_cmd * qc,struct scatterlist * sg,unsigned int n_elem)4768 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4769 		 unsigned int n_elem)
4770 {
4771 	qc->sg = sg;
4772 	qc->n_elem = n_elem;
4773 	qc->cursg = qc->sg;
4774 }
4775 
4776 /**
4777  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4778  *	@qc: Command with scatter-gather table to be mapped.
4779  *
4780  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4781  *
4782  *	LOCKING:
4783  *	spin_lock_irqsave(host lock)
4784  *
4785  *	RETURNS:
4786  *	Zero on success, negative on error.
4787  *
4788  */
ata_sg_setup(struct ata_queued_cmd * qc)4789 static int ata_sg_setup(struct ata_queued_cmd *qc)
4790 {
4791 	struct ata_port *ap = qc->ap;
4792 	unsigned int n_elem;
4793 
4794 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4795 
4796 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4797 	if (n_elem < 1)
4798 		return -1;
4799 
4800 	DPRINTK("%d sg elements mapped\n", n_elem);
4801 	qc->orig_n_elem = qc->n_elem;
4802 	qc->n_elem = n_elem;
4803 	qc->flags |= ATA_QCFLAG_DMAMAP;
4804 
4805 	return 0;
4806 }
4807 
4808 /**
4809  *	swap_buf_le16 - swap halves of 16-bit words in place
4810  *	@buf:  Buffer to swap
4811  *	@buf_words:  Number of 16-bit words in buffer.
4812  *
4813  *	Swap halves of 16-bit words if needed to convert from
4814  *	little-endian byte order to native cpu byte order, or
4815  *	vice-versa.
4816  *
4817  *	LOCKING:
4818  *	Inherited from caller.
4819  */
swap_buf_le16(u16 * buf,unsigned int buf_words)4820 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4821 {
4822 #ifdef __BIG_ENDIAN
4823 	unsigned int i;
4824 
4825 	for (i = 0; i < buf_words; i++)
4826 		buf[i] = le16_to_cpu(buf[i]);
4827 #endif /* __BIG_ENDIAN */
4828 }
4829 
4830 /**
4831  *	ata_qc_new_init - Request an available ATA command, and initialize it
4832  *	@dev: Device from whom we request an available command structure
4833  *	@tag: tag
4834  *
4835  *	LOCKING:
4836  *	None.
4837  */
4838 
ata_qc_new_init(struct ata_device * dev,int tag)4839 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4840 {
4841 	struct ata_port *ap = dev->link->ap;
4842 	struct ata_queued_cmd *qc;
4843 
4844 	/* no command while frozen */
4845 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4846 		return NULL;
4847 
4848 	/* libsas case */
4849 	if (ap->flags & ATA_FLAG_SAS_HOST) {
4850 		tag = ata_sas_allocate_tag(ap);
4851 		if (tag < 0)
4852 			return NULL;
4853 	}
4854 
4855 	qc = __ata_qc_from_tag(ap, tag);
4856 	qc->tag = tag;
4857 	qc->scsicmd = NULL;
4858 	qc->ap = ap;
4859 	qc->dev = dev;
4860 
4861 	ata_qc_reinit(qc);
4862 
4863 	return qc;
4864 }
4865 
4866 /**
4867  *	ata_qc_free - free unused ata_queued_cmd
4868  *	@qc: Command to complete
4869  *
4870  *	Designed to free unused ata_queued_cmd object
4871  *	in case something prevents using it.
4872  *
4873  *	LOCKING:
4874  *	spin_lock_irqsave(host lock)
4875  */
ata_qc_free(struct ata_queued_cmd * qc)4876 void ata_qc_free(struct ata_queued_cmd *qc)
4877 {
4878 	struct ata_port *ap;
4879 	unsigned int tag;
4880 
4881 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4882 	ap = qc->ap;
4883 
4884 	qc->flags = 0;
4885 	tag = qc->tag;
4886 	if (likely(ata_tag_valid(tag))) {
4887 		qc->tag = ATA_TAG_POISON;
4888 		if (ap->flags & ATA_FLAG_SAS_HOST)
4889 			ata_sas_free_tag(tag, ap);
4890 	}
4891 }
4892 
__ata_qc_complete(struct ata_queued_cmd * qc)4893 void __ata_qc_complete(struct ata_queued_cmd *qc)
4894 {
4895 	struct ata_port *ap;
4896 	struct ata_link *link;
4897 
4898 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4899 	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4900 	ap = qc->ap;
4901 	link = qc->dev->link;
4902 
4903 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4904 		ata_sg_clean(qc);
4905 
4906 	/* command should be marked inactive atomically with qc completion */
4907 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4908 		link->sactive &= ~(1 << qc->tag);
4909 		if (!link->sactive)
4910 			ap->nr_active_links--;
4911 	} else {
4912 		link->active_tag = ATA_TAG_POISON;
4913 		ap->nr_active_links--;
4914 	}
4915 
4916 	/* clear exclusive status */
4917 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4918 		     ap->excl_link == link))
4919 		ap->excl_link = NULL;
4920 
4921 	/* atapi: mark qc as inactive to prevent the interrupt handler
4922 	 * from completing the command twice later, before the error handler
4923 	 * is called. (when rc != 0 and atapi request sense is needed)
4924 	 */
4925 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4926 	ap->qc_active &= ~(1 << qc->tag);
4927 
4928 	/* call completion callback */
4929 	qc->complete_fn(qc);
4930 }
4931 
fill_result_tf(struct ata_queued_cmd * qc)4932 static void fill_result_tf(struct ata_queued_cmd *qc)
4933 {
4934 	struct ata_port *ap = qc->ap;
4935 
4936 	qc->result_tf.flags = qc->tf.flags;
4937 	ap->ops->qc_fill_rtf(qc);
4938 }
4939 
ata_verify_xfer(struct ata_queued_cmd * qc)4940 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4941 {
4942 	struct ata_device *dev = qc->dev;
4943 
4944 	if (ata_is_nodata(qc->tf.protocol))
4945 		return;
4946 
4947 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4948 		return;
4949 
4950 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4951 }
4952 
4953 /**
4954  *	ata_qc_complete - Complete an active ATA command
4955  *	@qc: Command to complete
4956  *
4957  *	Indicate to the mid and upper layers that an ATA command has
4958  *	completed, with either an ok or not-ok status.
4959  *
4960  *	Refrain from calling this function multiple times when
4961  *	successfully completing multiple NCQ commands.
4962  *	ata_qc_complete_multiple() should be used instead, which will
4963  *	properly update IRQ expect state.
4964  *
4965  *	LOCKING:
4966  *	spin_lock_irqsave(host lock)
4967  */
ata_qc_complete(struct ata_queued_cmd * qc)4968 void ata_qc_complete(struct ata_queued_cmd *qc)
4969 {
4970 	struct ata_port *ap = qc->ap;
4971 
4972 	/* XXX: New EH and old EH use different mechanisms to
4973 	 * synchronize EH with regular execution path.
4974 	 *
4975 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4976 	 * Normal execution path is responsible for not accessing a
4977 	 * failed qc.  libata core enforces the rule by returning NULL
4978 	 * from ata_qc_from_tag() for failed qcs.
4979 	 *
4980 	 * Old EH depends on ata_qc_complete() nullifying completion
4981 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4982 	 * not synchronize with interrupt handler.  Only PIO task is
4983 	 * taken care of.
4984 	 */
4985 	if (ap->ops->error_handler) {
4986 		struct ata_device *dev = qc->dev;
4987 		struct ata_eh_info *ehi = &dev->link->eh_info;
4988 
4989 		if (unlikely(qc->err_mask))
4990 			qc->flags |= ATA_QCFLAG_FAILED;
4991 
4992 		/*
4993 		 * Finish internal commands without any further processing
4994 		 * and always with the result TF filled.
4995 		 */
4996 		if (unlikely(ata_tag_internal(qc->tag))) {
4997 			fill_result_tf(qc);
4998 			trace_ata_qc_complete_internal(qc);
4999 			__ata_qc_complete(qc);
5000 			return;
5001 		}
5002 
5003 		/*
5004 		 * Non-internal qc has failed.  Fill the result TF and
5005 		 * summon EH.
5006 		 */
5007 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5008 			fill_result_tf(qc);
5009 			trace_ata_qc_complete_failed(qc);
5010 			ata_qc_schedule_eh(qc);
5011 			return;
5012 		}
5013 
5014 		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5015 
5016 		/* read result TF if requested */
5017 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
5018 			fill_result_tf(qc);
5019 
5020 		trace_ata_qc_complete_done(qc);
5021 		/* Some commands need post-processing after successful
5022 		 * completion.
5023 		 */
5024 		switch (qc->tf.command) {
5025 		case ATA_CMD_SET_FEATURES:
5026 			if (qc->tf.feature != SETFEATURES_WC_ON &&
5027 			    qc->tf.feature != SETFEATURES_WC_OFF)
5028 				break;
5029 			/* fall through */
5030 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5031 		case ATA_CMD_SET_MULTI: /* multi_count changed */
5032 			/* revalidate device */
5033 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5034 			ata_port_schedule_eh(ap);
5035 			break;
5036 
5037 		case ATA_CMD_SLEEP:
5038 			dev->flags |= ATA_DFLAG_SLEEPING;
5039 			break;
5040 		}
5041 
5042 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5043 			ata_verify_xfer(qc);
5044 
5045 		__ata_qc_complete(qc);
5046 	} else {
5047 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5048 			return;
5049 
5050 		/* read result TF if failed or requested */
5051 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5052 			fill_result_tf(qc);
5053 
5054 		__ata_qc_complete(qc);
5055 	}
5056 }
5057 
5058 /**
5059  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5060  *	@ap: port in question
5061  *	@qc_active: new qc_active mask
5062  *
5063  *	Complete in-flight commands.  This functions is meant to be
5064  *	called from low-level driver's interrupt routine to complete
5065  *	requests normally.  ap->qc_active and @qc_active is compared
5066  *	and commands are completed accordingly.
5067  *
5068  *	Always use this function when completing multiple NCQ commands
5069  *	from IRQ handlers instead of calling ata_qc_complete()
5070  *	multiple times to keep IRQ expect status properly in sync.
5071  *
5072  *	LOCKING:
5073  *	spin_lock_irqsave(host lock)
5074  *
5075  *	RETURNS:
5076  *	Number of completed commands on success, -errno otherwise.
5077  */
ata_qc_complete_multiple(struct ata_port * ap,u32 qc_active)5078 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5079 {
5080 	int nr_done = 0;
5081 	u32 done_mask;
5082 
5083 	done_mask = ap->qc_active ^ qc_active;
5084 
5085 	if (unlikely(done_mask & qc_active)) {
5086 		ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5087 			     ap->qc_active, qc_active);
5088 		return -EINVAL;
5089 	}
5090 
5091 	while (done_mask) {
5092 		struct ata_queued_cmd *qc;
5093 		unsigned int tag = __ffs(done_mask);
5094 
5095 		qc = ata_qc_from_tag(ap, tag);
5096 		if (qc) {
5097 			ata_qc_complete(qc);
5098 			nr_done++;
5099 		}
5100 		done_mask &= ~(1 << tag);
5101 	}
5102 
5103 	return nr_done;
5104 }
5105 
5106 /**
5107  *	ata_qc_issue - issue taskfile to device
5108  *	@qc: command to issue to device
5109  *
5110  *	Prepare an ATA command to submission to device.
5111  *	This includes mapping the data into a DMA-able
5112  *	area, filling in the S/G table, and finally
5113  *	writing the taskfile to hardware, starting the command.
5114  *
5115  *	LOCKING:
5116  *	spin_lock_irqsave(host lock)
5117  */
ata_qc_issue(struct ata_queued_cmd * qc)5118 void ata_qc_issue(struct ata_queued_cmd *qc)
5119 {
5120 	struct ata_port *ap = qc->ap;
5121 	struct ata_link *link = qc->dev->link;
5122 	u8 prot = qc->tf.protocol;
5123 
5124 	/* Make sure only one non-NCQ command is outstanding.  The
5125 	 * check is skipped for old EH because it reuses active qc to
5126 	 * request ATAPI sense.
5127 	 */
5128 	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5129 
5130 	if (ata_is_ncq(prot)) {
5131 		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5132 
5133 		if (!link->sactive)
5134 			ap->nr_active_links++;
5135 		link->sactive |= 1 << qc->tag;
5136 	} else {
5137 		WARN_ON_ONCE(link->sactive);
5138 
5139 		ap->nr_active_links++;
5140 		link->active_tag = qc->tag;
5141 	}
5142 
5143 	qc->flags |= ATA_QCFLAG_ACTIVE;
5144 	ap->qc_active |= 1 << qc->tag;
5145 
5146 	/*
5147 	 * We guarantee to LLDs that they will have at least one
5148 	 * non-zero sg if the command is a data command.
5149 	 */
5150 	if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
5151 		goto sys_err;
5152 
5153 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5154 				 (ap->flags & ATA_FLAG_PIO_DMA)))
5155 		if (ata_sg_setup(qc))
5156 			goto sys_err;
5157 
5158 	/* if device is sleeping, schedule reset and abort the link */
5159 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5160 		link->eh_info.action |= ATA_EH_RESET;
5161 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5162 		ata_link_abort(link);
5163 		return;
5164 	}
5165 
5166 	qc->err_mask |= ap->ops->qc_prep(qc);
5167 	if (unlikely(qc->err_mask))
5168 		goto err;
5169 	trace_ata_qc_issue(qc);
5170 	qc->err_mask |= ap->ops->qc_issue(qc);
5171 	if (unlikely(qc->err_mask))
5172 		goto err;
5173 	return;
5174 
5175 sys_err:
5176 	qc->err_mask |= AC_ERR_SYSTEM;
5177 err:
5178 	ata_qc_complete(qc);
5179 }
5180 
5181 /**
5182  *	sata_scr_valid - test whether SCRs are accessible
5183  *	@link: ATA link to test SCR accessibility for
5184  *
5185  *	Test whether SCRs are accessible for @link.
5186  *
5187  *	LOCKING:
5188  *	None.
5189  *
5190  *	RETURNS:
5191  *	1 if SCRs are accessible, 0 otherwise.
5192  */
sata_scr_valid(struct ata_link * link)5193 int sata_scr_valid(struct ata_link *link)
5194 {
5195 	struct ata_port *ap = link->ap;
5196 
5197 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5198 }
5199 
5200 /**
5201  *	sata_scr_read - read SCR register of the specified port
5202  *	@link: ATA link to read SCR for
5203  *	@reg: SCR to read
5204  *	@val: Place to store read value
5205  *
5206  *	Read SCR register @reg of @link into *@val.  This function is
5207  *	guaranteed to succeed if @link is ap->link, the cable type of
5208  *	the port is SATA and the port implements ->scr_read.
5209  *
5210  *	LOCKING:
5211  *	None if @link is ap->link.  Kernel thread context otherwise.
5212  *
5213  *	RETURNS:
5214  *	0 on success, negative errno on failure.
5215  */
sata_scr_read(struct ata_link * link,int reg,u32 * val)5216 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5217 {
5218 	if (ata_is_host_link(link)) {
5219 		if (sata_scr_valid(link))
5220 			return link->ap->ops->scr_read(link, reg, val);
5221 		return -EOPNOTSUPP;
5222 	}
5223 
5224 	return sata_pmp_scr_read(link, reg, val);
5225 }
5226 
5227 /**
5228  *	sata_scr_write - write SCR register of the specified port
5229  *	@link: ATA link to write SCR for
5230  *	@reg: SCR to write
5231  *	@val: value to write
5232  *
5233  *	Write @val to SCR register @reg of @link.  This function is
5234  *	guaranteed to succeed if @link is ap->link, the cable type of
5235  *	the port is SATA and the port implements ->scr_read.
5236  *
5237  *	LOCKING:
5238  *	None if @link is ap->link.  Kernel thread context otherwise.
5239  *
5240  *	RETURNS:
5241  *	0 on success, negative errno on failure.
5242  */
sata_scr_write(struct ata_link * link,int reg,u32 val)5243 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5244 {
5245 	if (ata_is_host_link(link)) {
5246 		if (sata_scr_valid(link))
5247 			return link->ap->ops->scr_write(link, reg, val);
5248 		return -EOPNOTSUPP;
5249 	}
5250 
5251 	return sata_pmp_scr_write(link, reg, val);
5252 }
5253 
5254 /**
5255  *	sata_scr_write_flush - write SCR register of the specified port and flush
5256  *	@link: ATA link to write SCR for
5257  *	@reg: SCR to write
5258  *	@val: value to write
5259  *
5260  *	This function is identical to sata_scr_write() except that this
5261  *	function performs flush after writing to the register.
5262  *
5263  *	LOCKING:
5264  *	None if @link is ap->link.  Kernel thread context otherwise.
5265  *
5266  *	RETURNS:
5267  *	0 on success, negative errno on failure.
5268  */
sata_scr_write_flush(struct ata_link * link,int reg,u32 val)5269 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5270 {
5271 	if (ata_is_host_link(link)) {
5272 		int rc;
5273 
5274 		if (sata_scr_valid(link)) {
5275 			rc = link->ap->ops->scr_write(link, reg, val);
5276 			if (rc == 0)
5277 				rc = link->ap->ops->scr_read(link, reg, &val);
5278 			return rc;
5279 		}
5280 		return -EOPNOTSUPP;
5281 	}
5282 
5283 	return sata_pmp_scr_write(link, reg, val);
5284 }
5285 
5286 /**
5287  *	ata_phys_link_online - test whether the given link is online
5288  *	@link: ATA link to test
5289  *
5290  *	Test whether @link is online.  Note that this function returns
5291  *	0 if online status of @link cannot be obtained, so
5292  *	ata_link_online(link) != !ata_link_offline(link).
5293  *
5294  *	LOCKING:
5295  *	None.
5296  *
5297  *	RETURNS:
5298  *	True if the port online status is available and online.
5299  */
ata_phys_link_online(struct ata_link * link)5300 bool ata_phys_link_online(struct ata_link *link)
5301 {
5302 	u32 sstatus;
5303 
5304 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5305 	    ata_sstatus_online(sstatus))
5306 		return true;
5307 	return false;
5308 }
5309 
5310 /**
5311  *	ata_phys_link_offline - test whether the given link is offline
5312  *	@link: ATA link to test
5313  *
5314  *	Test whether @link is offline.  Note that this function
5315  *	returns 0 if offline status of @link cannot be obtained, so
5316  *	ata_link_online(link) != !ata_link_offline(link).
5317  *
5318  *	LOCKING:
5319  *	None.
5320  *
5321  *	RETURNS:
5322  *	True if the port offline status is available and offline.
5323  */
ata_phys_link_offline(struct ata_link * link)5324 bool ata_phys_link_offline(struct ata_link *link)
5325 {
5326 	u32 sstatus;
5327 
5328 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5329 	    !ata_sstatus_online(sstatus))
5330 		return true;
5331 	return false;
5332 }
5333 
5334 /**
5335  *	ata_link_online - test whether the given link is online
5336  *	@link: ATA link to test
5337  *
5338  *	Test whether @link is online.  This is identical to
5339  *	ata_phys_link_online() when there's no slave link.  When
5340  *	there's a slave link, this function should only be called on
5341  *	the master link and will return true if any of M/S links is
5342  *	online.
5343  *
5344  *	LOCKING:
5345  *	None.
5346  *
5347  *	RETURNS:
5348  *	True if the port online status is available and online.
5349  */
ata_link_online(struct ata_link * link)5350 bool ata_link_online(struct ata_link *link)
5351 {
5352 	struct ata_link *slave = link->ap->slave_link;
5353 
5354 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5355 
5356 	return ata_phys_link_online(link) ||
5357 		(slave && ata_phys_link_online(slave));
5358 }
5359 
5360 /**
5361  *	ata_link_offline - test whether the given link is offline
5362  *	@link: ATA link to test
5363  *
5364  *	Test whether @link is offline.  This is identical to
5365  *	ata_phys_link_offline() when there's no slave link.  When
5366  *	there's a slave link, this function should only be called on
5367  *	the master link and will return true if both M/S links are
5368  *	offline.
5369  *
5370  *	LOCKING:
5371  *	None.
5372  *
5373  *	RETURNS:
5374  *	True if the port offline status is available and offline.
5375  */
ata_link_offline(struct ata_link * link)5376 bool ata_link_offline(struct ata_link *link)
5377 {
5378 	struct ata_link *slave = link->ap->slave_link;
5379 
5380 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5381 
5382 	return ata_phys_link_offline(link) &&
5383 		(!slave || ata_phys_link_offline(slave));
5384 }
5385 
5386 #ifdef CONFIG_PM
ata_port_request_pm(struct ata_port * ap,pm_message_t mesg,unsigned int action,unsigned int ehi_flags,bool async)5387 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5388 				unsigned int action, unsigned int ehi_flags,
5389 				bool async)
5390 {
5391 	struct ata_link *link;
5392 	unsigned long flags;
5393 
5394 	/* Previous resume operation might still be in
5395 	 * progress.  Wait for PM_PENDING to clear.
5396 	 */
5397 	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5398 		ata_port_wait_eh(ap);
5399 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5400 	}
5401 
5402 	/* request PM ops to EH */
5403 	spin_lock_irqsave(ap->lock, flags);
5404 
5405 	ap->pm_mesg = mesg;
5406 	ap->pflags |= ATA_PFLAG_PM_PENDING;
5407 	ata_for_each_link(link, ap, HOST_FIRST) {
5408 		link->eh_info.action |= action;
5409 		link->eh_info.flags |= ehi_flags;
5410 	}
5411 
5412 	ata_port_schedule_eh(ap);
5413 
5414 	spin_unlock_irqrestore(ap->lock, flags);
5415 
5416 	if (!async) {
5417 		ata_port_wait_eh(ap);
5418 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5419 	}
5420 }
5421 
5422 /*
5423  * On some hardware, device fails to respond after spun down for suspend.  As
5424  * the device won't be used before being resumed, we don't need to touch the
5425  * device.  Ask EH to skip the usual stuff and proceed directly to suspend.
5426  *
5427  * http://thread.gmane.org/gmane.linux.ide/46764
5428  */
5429 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5430 						 | ATA_EHI_NO_AUTOPSY
5431 						 | ATA_EHI_NO_RECOVERY;
5432 
ata_port_suspend(struct ata_port * ap,pm_message_t mesg)5433 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5434 {
5435 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5436 }
5437 
ata_port_suspend_async(struct ata_port * ap,pm_message_t mesg)5438 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5439 {
5440 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5441 }
5442 
ata_port_pm_suspend(struct device * dev)5443 static int ata_port_pm_suspend(struct device *dev)
5444 {
5445 	struct ata_port *ap = to_ata_port(dev);
5446 
5447 	if (pm_runtime_suspended(dev))
5448 		return 0;
5449 
5450 	ata_port_suspend(ap, PMSG_SUSPEND);
5451 	return 0;
5452 }
5453 
ata_port_pm_freeze(struct device * dev)5454 static int ata_port_pm_freeze(struct device *dev)
5455 {
5456 	struct ata_port *ap = to_ata_port(dev);
5457 
5458 	if (pm_runtime_suspended(dev))
5459 		return 0;
5460 
5461 	ata_port_suspend(ap, PMSG_FREEZE);
5462 	return 0;
5463 }
5464 
ata_port_pm_poweroff(struct device * dev)5465 static int ata_port_pm_poweroff(struct device *dev)
5466 {
5467 	ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5468 	return 0;
5469 }
5470 
5471 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5472 						| ATA_EHI_QUIET;
5473 
ata_port_resume(struct ata_port * ap,pm_message_t mesg)5474 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5475 {
5476 	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5477 }
5478 
ata_port_resume_async(struct ata_port * ap,pm_message_t mesg)5479 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5480 {
5481 	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5482 }
5483 
ata_port_pm_resume(struct device * dev)5484 static int ata_port_pm_resume(struct device *dev)
5485 {
5486 	ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5487 	pm_runtime_disable(dev);
5488 	pm_runtime_set_active(dev);
5489 	pm_runtime_enable(dev);
5490 	return 0;
5491 }
5492 
5493 /*
5494  * For ODDs, the upper layer will poll for media change every few seconds,
5495  * which will make it enter and leave suspend state every few seconds. And
5496  * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5497  * is very little and the ODD may malfunction after constantly being reset.
5498  * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5499  * ODD is attached to the port.
5500  */
ata_port_runtime_idle(struct device * dev)5501 static int ata_port_runtime_idle(struct device *dev)
5502 {
5503 	struct ata_port *ap = to_ata_port(dev);
5504 	struct ata_link *link;
5505 	struct ata_device *adev;
5506 
5507 	ata_for_each_link(link, ap, HOST_FIRST) {
5508 		ata_for_each_dev(adev, link, ENABLED)
5509 			if (adev->class == ATA_DEV_ATAPI &&
5510 			    !zpodd_dev_enabled(adev))
5511 				return -EBUSY;
5512 	}
5513 
5514 	return 0;
5515 }
5516 
ata_port_runtime_suspend(struct device * dev)5517 static int ata_port_runtime_suspend(struct device *dev)
5518 {
5519 	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5520 	return 0;
5521 }
5522 
ata_port_runtime_resume(struct device * dev)5523 static int ata_port_runtime_resume(struct device *dev)
5524 {
5525 	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5526 	return 0;
5527 }
5528 
5529 static const struct dev_pm_ops ata_port_pm_ops = {
5530 	.suspend = ata_port_pm_suspend,
5531 	.resume = ata_port_pm_resume,
5532 	.freeze = ata_port_pm_freeze,
5533 	.thaw = ata_port_pm_resume,
5534 	.poweroff = ata_port_pm_poweroff,
5535 	.restore = ata_port_pm_resume,
5536 
5537 	.runtime_suspend = ata_port_runtime_suspend,
5538 	.runtime_resume = ata_port_runtime_resume,
5539 	.runtime_idle = ata_port_runtime_idle,
5540 };
5541 
5542 /* sas ports don't participate in pm runtime management of ata_ports,
5543  * and need to resume ata devices at the domain level, not the per-port
5544  * level. sas suspend/resume is async to allow parallel port recovery
5545  * since sas has multiple ata_port instances per Scsi_Host.
5546  */
ata_sas_port_suspend(struct ata_port * ap)5547 void ata_sas_port_suspend(struct ata_port *ap)
5548 {
5549 	ata_port_suspend_async(ap, PMSG_SUSPEND);
5550 }
5551 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5552 
ata_sas_port_resume(struct ata_port * ap)5553 void ata_sas_port_resume(struct ata_port *ap)
5554 {
5555 	ata_port_resume_async(ap, PMSG_RESUME);
5556 }
5557 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5558 
5559 /**
5560  *	ata_host_suspend - suspend host
5561  *	@host: host to suspend
5562  *	@mesg: PM message
5563  *
5564  *	Suspend @host.  Actual operation is performed by port suspend.
5565  */
ata_host_suspend(struct ata_host * host,pm_message_t mesg)5566 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5567 {
5568 	host->dev->power.power_state = mesg;
5569 	return 0;
5570 }
5571 
5572 /**
5573  *	ata_host_resume - resume host
5574  *	@host: host to resume
5575  *
5576  *	Resume @host.  Actual operation is performed by port resume.
5577  */
ata_host_resume(struct ata_host * host)5578 void ata_host_resume(struct ata_host *host)
5579 {
5580 	host->dev->power.power_state = PMSG_ON;
5581 }
5582 #endif
5583 
5584 struct device_type ata_port_type = {
5585 	.name = "ata_port",
5586 #ifdef CONFIG_PM
5587 	.pm = &ata_port_pm_ops,
5588 #endif
5589 };
5590 
5591 /**
5592  *	ata_dev_init - Initialize an ata_device structure
5593  *	@dev: Device structure to initialize
5594  *
5595  *	Initialize @dev in preparation for probing.
5596  *
5597  *	LOCKING:
5598  *	Inherited from caller.
5599  */
ata_dev_init(struct ata_device * dev)5600 void ata_dev_init(struct ata_device *dev)
5601 {
5602 	struct ata_link *link = ata_dev_phys_link(dev);
5603 	struct ata_port *ap = link->ap;
5604 	unsigned long flags;
5605 
5606 	/* SATA spd limit is bound to the attached device, reset together */
5607 	link->sata_spd_limit = link->hw_sata_spd_limit;
5608 	link->sata_spd = 0;
5609 
5610 	/* High bits of dev->flags are used to record warm plug
5611 	 * requests which occur asynchronously.  Synchronize using
5612 	 * host lock.
5613 	 */
5614 	spin_lock_irqsave(ap->lock, flags);
5615 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5616 	dev->horkage = 0;
5617 	spin_unlock_irqrestore(ap->lock, flags);
5618 
5619 	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5620 	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5621 	dev->pio_mask = UINT_MAX;
5622 	dev->mwdma_mask = UINT_MAX;
5623 	dev->udma_mask = UINT_MAX;
5624 }
5625 
5626 /**
5627  *	ata_link_init - Initialize an ata_link structure
5628  *	@ap: ATA port link is attached to
5629  *	@link: Link structure to initialize
5630  *	@pmp: Port multiplier port number
5631  *
5632  *	Initialize @link.
5633  *
5634  *	LOCKING:
5635  *	Kernel thread context (may sleep)
5636  */
ata_link_init(struct ata_port * ap,struct ata_link * link,int pmp)5637 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5638 {
5639 	int i;
5640 
5641 	/* clear everything except for devices */
5642 	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5643 	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5644 
5645 	link->ap = ap;
5646 	link->pmp = pmp;
5647 	link->active_tag = ATA_TAG_POISON;
5648 	link->hw_sata_spd_limit = UINT_MAX;
5649 
5650 	/* can't use iterator, ap isn't initialized yet */
5651 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5652 		struct ata_device *dev = &link->device[i];
5653 
5654 		dev->link = link;
5655 		dev->devno = dev - link->device;
5656 #ifdef CONFIG_ATA_ACPI
5657 		dev->gtf_filter = ata_acpi_gtf_filter;
5658 #endif
5659 		ata_dev_init(dev);
5660 	}
5661 }
5662 
5663 /**
5664  *	sata_link_init_spd - Initialize link->sata_spd_limit
5665  *	@link: Link to configure sata_spd_limit for
5666  *
5667  *	Initialize @link->[hw_]sata_spd_limit to the currently
5668  *	configured value.
5669  *
5670  *	LOCKING:
5671  *	Kernel thread context (may sleep).
5672  *
5673  *	RETURNS:
5674  *	0 on success, -errno on failure.
5675  */
sata_link_init_spd(struct ata_link * link)5676 int sata_link_init_spd(struct ata_link *link)
5677 {
5678 	u8 spd;
5679 	int rc;
5680 
5681 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5682 	if (rc)
5683 		return rc;
5684 
5685 	spd = (link->saved_scontrol >> 4) & 0xf;
5686 	if (spd)
5687 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5688 
5689 	ata_force_link_limits(link);
5690 
5691 	link->sata_spd_limit = link->hw_sata_spd_limit;
5692 
5693 	return 0;
5694 }
5695 
5696 /**
5697  *	ata_port_alloc - allocate and initialize basic ATA port resources
5698  *	@host: ATA host this allocated port belongs to
5699  *
5700  *	Allocate and initialize basic ATA port resources.
5701  *
5702  *	RETURNS:
5703  *	Allocate ATA port on success, NULL on failure.
5704  *
5705  *	LOCKING:
5706  *	Inherited from calling layer (may sleep).
5707  */
ata_port_alloc(struct ata_host * host)5708 struct ata_port *ata_port_alloc(struct ata_host *host)
5709 {
5710 	struct ata_port *ap;
5711 
5712 	DPRINTK("ENTER\n");
5713 
5714 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5715 	if (!ap)
5716 		return NULL;
5717 
5718 	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5719 	ap->lock = &host->lock;
5720 	ap->print_id = -1;
5721 	ap->local_port_no = -1;
5722 	ap->host = host;
5723 	ap->dev = host->dev;
5724 
5725 #if defined(ATA_VERBOSE_DEBUG)
5726 	/* turn on all debugging levels */
5727 	ap->msg_enable = 0x00FF;
5728 #elif defined(ATA_DEBUG)
5729 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5730 #else
5731 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5732 #endif
5733 
5734 	mutex_init(&ap->scsi_scan_mutex);
5735 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5736 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5737 	INIT_LIST_HEAD(&ap->eh_done_q);
5738 	init_waitqueue_head(&ap->eh_wait_q);
5739 	init_completion(&ap->park_req_pending);
5740 	init_timer_deferrable(&ap->fastdrain_timer);
5741 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5742 	ap->fastdrain_timer.data = (unsigned long)ap;
5743 
5744 	ap->cbl = ATA_CBL_NONE;
5745 
5746 	ata_link_init(ap, &ap->link, 0);
5747 
5748 #ifdef ATA_IRQ_TRAP
5749 	ap->stats.unhandled_irq = 1;
5750 	ap->stats.idle_irq = 1;
5751 #endif
5752 	ata_sff_port_init(ap);
5753 
5754 	return ap;
5755 }
5756 
ata_host_release(struct device * gendev,void * res)5757 static void ata_host_release(struct device *gendev, void *res)
5758 {
5759 	struct ata_host *host = dev_get_drvdata(gendev);
5760 	int i;
5761 
5762 	for (i = 0; i < host->n_ports; i++) {
5763 		struct ata_port *ap = host->ports[i];
5764 
5765 		if (!ap)
5766 			continue;
5767 
5768 		if (ap->scsi_host)
5769 			scsi_host_put(ap->scsi_host);
5770 
5771 		kfree(ap->pmp_link);
5772 		kfree(ap->slave_link);
5773 		kfree(ap);
5774 		host->ports[i] = NULL;
5775 	}
5776 
5777 	dev_set_drvdata(gendev, NULL);
5778 }
5779 
5780 /**
5781  *	ata_host_alloc - allocate and init basic ATA host resources
5782  *	@dev: generic device this host is associated with
5783  *	@max_ports: maximum number of ATA ports associated with this host
5784  *
5785  *	Allocate and initialize basic ATA host resources.  LLD calls
5786  *	this function to allocate a host, initializes it fully and
5787  *	attaches it using ata_host_register().
5788  *
5789  *	@max_ports ports are allocated and host->n_ports is
5790  *	initialized to @max_ports.  The caller is allowed to decrease
5791  *	host->n_ports before calling ata_host_register().  The unused
5792  *	ports will be automatically freed on registration.
5793  *
5794  *	RETURNS:
5795  *	Allocate ATA host on success, NULL on failure.
5796  *
5797  *	LOCKING:
5798  *	Inherited from calling layer (may sleep).
5799  */
ata_host_alloc(struct device * dev,int max_ports)5800 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5801 {
5802 	struct ata_host *host;
5803 	size_t sz;
5804 	int i;
5805 
5806 	DPRINTK("ENTER\n");
5807 
5808 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5809 		return NULL;
5810 
5811 	/* alloc a container for our list of ATA ports (buses) */
5812 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5813 	/* alloc a container for our list of ATA ports (buses) */
5814 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5815 	if (!host)
5816 		goto err_out;
5817 
5818 	devres_add(dev, host);
5819 	dev_set_drvdata(dev, host);
5820 
5821 	spin_lock_init(&host->lock);
5822 	mutex_init(&host->eh_mutex);
5823 	host->dev = dev;
5824 	host->n_ports = max_ports;
5825 
5826 	/* allocate ports bound to this host */
5827 	for (i = 0; i < max_ports; i++) {
5828 		struct ata_port *ap;
5829 
5830 		ap = ata_port_alloc(host);
5831 		if (!ap)
5832 			goto err_out;
5833 
5834 		ap->port_no = i;
5835 		host->ports[i] = ap;
5836 	}
5837 
5838 	devres_remove_group(dev, NULL);
5839 	return host;
5840 
5841  err_out:
5842 	devres_release_group(dev, NULL);
5843 	return NULL;
5844 }
5845 
5846 /**
5847  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5848  *	@dev: generic device this host is associated with
5849  *	@ppi: array of ATA port_info to initialize host with
5850  *	@n_ports: number of ATA ports attached to this host
5851  *
5852  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5853  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5854  *	last entry will be used for the remaining ports.
5855  *
5856  *	RETURNS:
5857  *	Allocate ATA host on success, NULL on failure.
5858  *
5859  *	LOCKING:
5860  *	Inherited from calling layer (may sleep).
5861  */
ata_host_alloc_pinfo(struct device * dev,const struct ata_port_info * const * ppi,int n_ports)5862 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5863 				      const struct ata_port_info * const * ppi,
5864 				      int n_ports)
5865 {
5866 	const struct ata_port_info *pi;
5867 	struct ata_host *host;
5868 	int i, j;
5869 
5870 	host = ata_host_alloc(dev, n_ports);
5871 	if (!host)
5872 		return NULL;
5873 
5874 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5875 		struct ata_port *ap = host->ports[i];
5876 
5877 		if (ppi[j])
5878 			pi = ppi[j++];
5879 
5880 		ap->pio_mask = pi->pio_mask;
5881 		ap->mwdma_mask = pi->mwdma_mask;
5882 		ap->udma_mask = pi->udma_mask;
5883 		ap->flags |= pi->flags;
5884 		ap->link.flags |= pi->link_flags;
5885 		ap->ops = pi->port_ops;
5886 
5887 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5888 			host->ops = pi->port_ops;
5889 	}
5890 
5891 	return host;
5892 }
5893 
5894 /**
5895  *	ata_slave_link_init - initialize slave link
5896  *	@ap: port to initialize slave link for
5897  *
5898  *	Create and initialize slave link for @ap.  This enables slave
5899  *	link handling on the port.
5900  *
5901  *	In libata, a port contains links and a link contains devices.
5902  *	There is single host link but if a PMP is attached to it,
5903  *	there can be multiple fan-out links.  On SATA, there's usually
5904  *	a single device connected to a link but PATA and SATA
5905  *	controllers emulating TF based interface can have two - master
5906  *	and slave.
5907  *
5908  *	However, there are a few controllers which don't fit into this
5909  *	abstraction too well - SATA controllers which emulate TF
5910  *	interface with both master and slave devices but also have
5911  *	separate SCR register sets for each device.  These controllers
5912  *	need separate links for physical link handling
5913  *	(e.g. onlineness, link speed) but should be treated like a
5914  *	traditional M/S controller for everything else (e.g. command
5915  *	issue, softreset).
5916  *
5917  *	slave_link is libata's way of handling this class of
5918  *	controllers without impacting core layer too much.  For
5919  *	anything other than physical link handling, the default host
5920  *	link is used for both master and slave.  For physical link
5921  *	handling, separate @ap->slave_link is used.  All dirty details
5922  *	are implemented inside libata core layer.  From LLD's POV, the
5923  *	only difference is that prereset, hardreset and postreset are
5924  *	called once more for the slave link, so the reset sequence
5925  *	looks like the following.
5926  *
5927  *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5928  *	softreset(M) -> postreset(M) -> postreset(S)
5929  *
5930  *	Note that softreset is called only for the master.  Softreset
5931  *	resets both M/S by definition, so SRST on master should handle
5932  *	both (the standard method will work just fine).
5933  *
5934  *	LOCKING:
5935  *	Should be called before host is registered.
5936  *
5937  *	RETURNS:
5938  *	0 on success, -errno on failure.
5939  */
ata_slave_link_init(struct ata_port * ap)5940 int ata_slave_link_init(struct ata_port *ap)
5941 {
5942 	struct ata_link *link;
5943 
5944 	WARN_ON(ap->slave_link);
5945 	WARN_ON(ap->flags & ATA_FLAG_PMP);
5946 
5947 	link = kzalloc(sizeof(*link), GFP_KERNEL);
5948 	if (!link)
5949 		return -ENOMEM;
5950 
5951 	ata_link_init(ap, link, 1);
5952 	ap->slave_link = link;
5953 	return 0;
5954 }
5955 
ata_host_stop(struct device * gendev,void * res)5956 static void ata_host_stop(struct device *gendev, void *res)
5957 {
5958 	struct ata_host *host = dev_get_drvdata(gendev);
5959 	int i;
5960 
5961 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5962 
5963 	for (i = 0; i < host->n_ports; i++) {
5964 		struct ata_port *ap = host->ports[i];
5965 
5966 		if (ap->ops->port_stop)
5967 			ap->ops->port_stop(ap);
5968 	}
5969 
5970 	if (host->ops->host_stop)
5971 		host->ops->host_stop(host);
5972 }
5973 
5974 /**
5975  *	ata_finalize_port_ops - finalize ata_port_operations
5976  *	@ops: ata_port_operations to finalize
5977  *
5978  *	An ata_port_operations can inherit from another ops and that
5979  *	ops can again inherit from another.  This can go on as many
5980  *	times as necessary as long as there is no loop in the
5981  *	inheritance chain.
5982  *
5983  *	Ops tables are finalized when the host is started.  NULL or
5984  *	unspecified entries are inherited from the closet ancestor
5985  *	which has the method and the entry is populated with it.
5986  *	After finalization, the ops table directly points to all the
5987  *	methods and ->inherits is no longer necessary and cleared.
5988  *
5989  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5990  *
5991  *	LOCKING:
5992  *	None.
5993  */
ata_finalize_port_ops(struct ata_port_operations * ops)5994 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5995 {
5996 	static DEFINE_SPINLOCK(lock);
5997 	const struct ata_port_operations *cur;
5998 	void **begin = (void **)ops;
5999 	void **end = (void **)&ops->inherits;
6000 	void **pp;
6001 
6002 	if (!ops || !ops->inherits)
6003 		return;
6004 
6005 	spin_lock(&lock);
6006 
6007 	for (cur = ops->inherits; cur; cur = cur->inherits) {
6008 		void **inherit = (void **)cur;
6009 
6010 		for (pp = begin; pp < end; pp++, inherit++)
6011 			if (!*pp)
6012 				*pp = *inherit;
6013 	}
6014 
6015 	for (pp = begin; pp < end; pp++)
6016 		if (IS_ERR(*pp))
6017 			*pp = NULL;
6018 
6019 	ops->inherits = NULL;
6020 
6021 	spin_unlock(&lock);
6022 }
6023 
6024 /**
6025  *	ata_host_start - start and freeze ports of an ATA host
6026  *	@host: ATA host to start ports for
6027  *
6028  *	Start and then freeze ports of @host.  Started status is
6029  *	recorded in host->flags, so this function can be called
6030  *	multiple times.  Ports are guaranteed to get started only
6031  *	once.  If host->ops isn't initialized yet, its set to the
6032  *	first non-dummy port ops.
6033  *
6034  *	LOCKING:
6035  *	Inherited from calling layer (may sleep).
6036  *
6037  *	RETURNS:
6038  *	0 if all ports are started successfully, -errno otherwise.
6039  */
ata_host_start(struct ata_host * host)6040 int ata_host_start(struct ata_host *host)
6041 {
6042 	int have_stop = 0;
6043 	void *start_dr = NULL;
6044 	int i, rc;
6045 
6046 	if (host->flags & ATA_HOST_STARTED)
6047 		return 0;
6048 
6049 	ata_finalize_port_ops(host->ops);
6050 
6051 	for (i = 0; i < host->n_ports; i++) {
6052 		struct ata_port *ap = host->ports[i];
6053 
6054 		ata_finalize_port_ops(ap->ops);
6055 
6056 		if (!host->ops && !ata_port_is_dummy(ap))
6057 			host->ops = ap->ops;
6058 
6059 		if (ap->ops->port_stop)
6060 			have_stop = 1;
6061 	}
6062 
6063 	if (host->ops && host->ops->host_stop)
6064 		have_stop = 1;
6065 
6066 	if (have_stop) {
6067 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6068 		if (!start_dr)
6069 			return -ENOMEM;
6070 	}
6071 
6072 	for (i = 0; i < host->n_ports; i++) {
6073 		struct ata_port *ap = host->ports[i];
6074 
6075 		if (ap->ops->port_start) {
6076 			rc = ap->ops->port_start(ap);
6077 			if (rc) {
6078 				if (rc != -ENODEV)
6079 					dev_err(host->dev,
6080 						"failed to start port %d (errno=%d)\n",
6081 						i, rc);
6082 				goto err_out;
6083 			}
6084 		}
6085 		ata_eh_freeze_port(ap);
6086 	}
6087 
6088 	if (start_dr)
6089 		devres_add(host->dev, start_dr);
6090 	host->flags |= ATA_HOST_STARTED;
6091 	return 0;
6092 
6093  err_out:
6094 	while (--i >= 0) {
6095 		struct ata_port *ap = host->ports[i];
6096 
6097 		if (ap->ops->port_stop)
6098 			ap->ops->port_stop(ap);
6099 	}
6100 	devres_free(start_dr);
6101 	return rc;
6102 }
6103 
6104 /**
6105  *	ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6106  *	@host:	host to initialize
6107  *	@dev:	device host is attached to
6108  *	@ops:	port_ops
6109  *
6110  */
ata_host_init(struct ata_host * host,struct device * dev,struct ata_port_operations * ops)6111 void ata_host_init(struct ata_host *host, struct device *dev,
6112 		   struct ata_port_operations *ops)
6113 {
6114 	spin_lock_init(&host->lock);
6115 	mutex_init(&host->eh_mutex);
6116 	host->n_tags = ATA_MAX_QUEUE - 1;
6117 	host->dev = dev;
6118 	host->ops = ops;
6119 }
6120 
__ata_port_probe(struct ata_port * ap)6121 void __ata_port_probe(struct ata_port *ap)
6122 {
6123 	struct ata_eh_info *ehi = &ap->link.eh_info;
6124 	unsigned long flags;
6125 
6126 	/* kick EH for boot probing */
6127 	spin_lock_irqsave(ap->lock, flags);
6128 
6129 	ehi->probe_mask |= ATA_ALL_DEVICES;
6130 	ehi->action |= ATA_EH_RESET;
6131 	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6132 
6133 	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6134 	ap->pflags |= ATA_PFLAG_LOADING;
6135 	ata_port_schedule_eh(ap);
6136 
6137 	spin_unlock_irqrestore(ap->lock, flags);
6138 }
6139 
ata_port_probe(struct ata_port * ap)6140 int ata_port_probe(struct ata_port *ap)
6141 {
6142 	int rc = 0;
6143 
6144 	if (ap->ops->error_handler) {
6145 		__ata_port_probe(ap);
6146 		ata_port_wait_eh(ap);
6147 	} else {
6148 		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6149 		rc = ata_bus_probe(ap);
6150 		DPRINTK("ata%u: bus probe end\n", ap->print_id);
6151 	}
6152 	return rc;
6153 }
6154 
6155 
async_port_probe(void * data,async_cookie_t cookie)6156 static void async_port_probe(void *data, async_cookie_t cookie)
6157 {
6158 	struct ata_port *ap = data;
6159 
6160 	/*
6161 	 * If we're not allowed to scan this host in parallel,
6162 	 * we need to wait until all previous scans have completed
6163 	 * before going further.
6164 	 * Jeff Garzik says this is only within a controller, so we
6165 	 * don't need to wait for port 0, only for later ports.
6166 	 */
6167 	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6168 		async_synchronize_cookie(cookie);
6169 
6170 	(void)ata_port_probe(ap);
6171 
6172 	/* in order to keep device order, we need to synchronize at this point */
6173 	async_synchronize_cookie(cookie);
6174 
6175 	ata_scsi_scan_host(ap, 1);
6176 }
6177 
6178 /**
6179  *	ata_host_register - register initialized ATA host
6180  *	@host: ATA host to register
6181  *	@sht: template for SCSI host
6182  *
6183  *	Register initialized ATA host.  @host is allocated using
6184  *	ata_host_alloc() and fully initialized by LLD.  This function
6185  *	starts ports, registers @host with ATA and SCSI layers and
6186  *	probe registered devices.
6187  *
6188  *	LOCKING:
6189  *	Inherited from calling layer (may sleep).
6190  *
6191  *	RETURNS:
6192  *	0 on success, -errno otherwise.
6193  */
ata_host_register(struct ata_host * host,struct scsi_host_template * sht)6194 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6195 {
6196 	int i, rc;
6197 
6198 	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6199 
6200 	/* host must have been started */
6201 	if (!(host->flags & ATA_HOST_STARTED)) {
6202 		dev_err(host->dev, "BUG: trying to register unstarted host\n");
6203 		WARN_ON(1);
6204 		return -EINVAL;
6205 	}
6206 
6207 	/* Blow away unused ports.  This happens when LLD can't
6208 	 * determine the exact number of ports to allocate at
6209 	 * allocation time.
6210 	 */
6211 	for (i = host->n_ports; host->ports[i]; i++)
6212 		kfree(host->ports[i]);
6213 
6214 	/* give ports names and add SCSI hosts */
6215 	for (i = 0; i < host->n_ports; i++) {
6216 		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6217 		host->ports[i]->local_port_no = i + 1;
6218 	}
6219 
6220 	/* Create associated sysfs transport objects  */
6221 	for (i = 0; i < host->n_ports; i++) {
6222 		rc = ata_tport_add(host->dev,host->ports[i]);
6223 		if (rc) {
6224 			goto err_tadd;
6225 		}
6226 	}
6227 
6228 	rc = ata_scsi_add_hosts(host, sht);
6229 	if (rc)
6230 		goto err_tadd;
6231 
6232 	/* set cable, sata_spd_limit and report */
6233 	for (i = 0; i < host->n_ports; i++) {
6234 		struct ata_port *ap = host->ports[i];
6235 		unsigned long xfer_mask;
6236 
6237 		/* set SATA cable type if still unset */
6238 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6239 			ap->cbl = ATA_CBL_SATA;
6240 
6241 		/* init sata_spd_limit to the current value */
6242 		sata_link_init_spd(&ap->link);
6243 		if (ap->slave_link)
6244 			sata_link_init_spd(ap->slave_link);
6245 
6246 		/* print per-port info to dmesg */
6247 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6248 					      ap->udma_mask);
6249 
6250 		if (!ata_port_is_dummy(ap)) {
6251 			ata_port_info(ap, "%cATA max %s %s\n",
6252 				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6253 				      ata_mode_string(xfer_mask),
6254 				      ap->link.eh_info.desc);
6255 			ata_ehi_clear_desc(&ap->link.eh_info);
6256 		} else
6257 			ata_port_info(ap, "DUMMY\n");
6258 	}
6259 
6260 	/* perform each probe asynchronously */
6261 	for (i = 0; i < host->n_ports; i++) {
6262 		struct ata_port *ap = host->ports[i];
6263 		ap->cookie = async_schedule(async_port_probe, ap);
6264 	}
6265 
6266 	return 0;
6267 
6268  err_tadd:
6269 	while (--i >= 0) {
6270 		ata_tport_delete(host->ports[i]);
6271 	}
6272 	return rc;
6273 
6274 }
6275 
6276 /**
6277  *	ata_host_activate - start host, request IRQ and register it
6278  *	@host: target ATA host
6279  *	@irq: IRQ to request
6280  *	@irq_handler: irq_handler used when requesting IRQ
6281  *	@irq_flags: irq_flags used when requesting IRQ
6282  *	@sht: scsi_host_template to use when registering the host
6283  *
6284  *	After allocating an ATA host and initializing it, most libata
6285  *	LLDs perform three steps to activate the host - start host,
6286  *	request IRQ and register it.  This helper takes necessasry
6287  *	arguments and performs the three steps in one go.
6288  *
6289  *	An invalid IRQ skips the IRQ registration and expects the host to
6290  *	have set polling mode on the port. In this case, @irq_handler
6291  *	should be NULL.
6292  *
6293  *	LOCKING:
6294  *	Inherited from calling layer (may sleep).
6295  *
6296  *	RETURNS:
6297  *	0 on success, -errno otherwise.
6298  */
ata_host_activate(struct ata_host * host,int irq,irq_handler_t irq_handler,unsigned long irq_flags,struct scsi_host_template * sht)6299 int ata_host_activate(struct ata_host *host, int irq,
6300 		      irq_handler_t irq_handler, unsigned long irq_flags,
6301 		      struct scsi_host_template *sht)
6302 {
6303 	int i, rc;
6304 
6305 	rc = ata_host_start(host);
6306 	if (rc)
6307 		return rc;
6308 
6309 	/* Special case for polling mode */
6310 	if (!irq) {
6311 		WARN_ON(irq_handler);
6312 		return ata_host_register(host, sht);
6313 	}
6314 
6315 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6316 			      dev_name(host->dev), host);
6317 	if (rc)
6318 		return rc;
6319 
6320 	for (i = 0; i < host->n_ports; i++)
6321 		ata_port_desc(host->ports[i], "irq %d", irq);
6322 
6323 	rc = ata_host_register(host, sht);
6324 	/* if failed, just free the IRQ and leave ports alone */
6325 	if (rc)
6326 		devm_free_irq(host->dev, irq, host);
6327 
6328 	return rc;
6329 }
6330 
6331 /**
6332  *	ata_port_detach - Detach ATA port in prepration of device removal
6333  *	@ap: ATA port to be detached
6334  *
6335  *	Detach all ATA devices and the associated SCSI devices of @ap;
6336  *	then, remove the associated SCSI host.  @ap is guaranteed to
6337  *	be quiescent on return from this function.
6338  *
6339  *	LOCKING:
6340  *	Kernel thread context (may sleep).
6341  */
ata_port_detach(struct ata_port * ap)6342 static void ata_port_detach(struct ata_port *ap)
6343 {
6344 	unsigned long flags;
6345 	struct ata_link *link;
6346 	struct ata_device *dev;
6347 
6348 	if (!ap->ops->error_handler)
6349 		goto skip_eh;
6350 
6351 	/* tell EH we're leaving & flush EH */
6352 	spin_lock_irqsave(ap->lock, flags);
6353 	ap->pflags |= ATA_PFLAG_UNLOADING;
6354 	ata_port_schedule_eh(ap);
6355 	spin_unlock_irqrestore(ap->lock, flags);
6356 
6357 	/* wait till EH commits suicide */
6358 	ata_port_wait_eh(ap);
6359 
6360 	/* it better be dead now */
6361 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6362 
6363 	cancel_delayed_work_sync(&ap->hotplug_task);
6364 
6365  skip_eh:
6366 	/* clean up zpodd on port removal */
6367 	ata_for_each_link(link, ap, HOST_FIRST) {
6368 		ata_for_each_dev(dev, link, ALL) {
6369 			if (zpodd_dev_enabled(dev))
6370 				zpodd_exit(dev);
6371 		}
6372 	}
6373 	if (ap->pmp_link) {
6374 		int i;
6375 		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6376 			ata_tlink_delete(&ap->pmp_link[i]);
6377 	}
6378 	/* remove the associated SCSI host */
6379 	scsi_remove_host(ap->scsi_host);
6380 	ata_tport_delete(ap);
6381 }
6382 
6383 /**
6384  *	ata_host_detach - Detach all ports of an ATA host
6385  *	@host: Host to detach
6386  *
6387  *	Detach all ports of @host.
6388  *
6389  *	LOCKING:
6390  *	Kernel thread context (may sleep).
6391  */
ata_host_detach(struct ata_host * host)6392 void ata_host_detach(struct ata_host *host)
6393 {
6394 	int i;
6395 
6396 	for (i = 0; i < host->n_ports; i++) {
6397 		/* Ensure ata_port probe has completed */
6398 		async_synchronize_cookie(host->ports[i]->cookie + 1);
6399 		ata_port_detach(host->ports[i]);
6400 	}
6401 
6402 	/* the host is dead now, dissociate ACPI */
6403 	ata_acpi_dissociate(host);
6404 }
6405 
6406 #ifdef CONFIG_PCI
6407 
6408 /**
6409  *	ata_pci_remove_one - PCI layer callback for device removal
6410  *	@pdev: PCI device that was removed
6411  *
6412  *	PCI layer indicates to libata via this hook that hot-unplug or
6413  *	module unload event has occurred.  Detach all ports.  Resource
6414  *	release is handled via devres.
6415  *
6416  *	LOCKING:
6417  *	Inherited from PCI layer (may sleep).
6418  */
ata_pci_remove_one(struct pci_dev * pdev)6419 void ata_pci_remove_one(struct pci_dev *pdev)
6420 {
6421 	struct ata_host *host = pci_get_drvdata(pdev);
6422 
6423 	ata_host_detach(host);
6424 }
6425 
6426 /* move to PCI subsystem */
pci_test_config_bits(struct pci_dev * pdev,const struct pci_bits * bits)6427 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6428 {
6429 	unsigned long tmp = 0;
6430 
6431 	switch (bits->width) {
6432 	case 1: {
6433 		u8 tmp8 = 0;
6434 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6435 		tmp = tmp8;
6436 		break;
6437 	}
6438 	case 2: {
6439 		u16 tmp16 = 0;
6440 		pci_read_config_word(pdev, bits->reg, &tmp16);
6441 		tmp = tmp16;
6442 		break;
6443 	}
6444 	case 4: {
6445 		u32 tmp32 = 0;
6446 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6447 		tmp = tmp32;
6448 		break;
6449 	}
6450 
6451 	default:
6452 		return -EINVAL;
6453 	}
6454 
6455 	tmp &= bits->mask;
6456 
6457 	return (tmp == bits->val) ? 1 : 0;
6458 }
6459 
6460 #ifdef CONFIG_PM
ata_pci_device_do_suspend(struct pci_dev * pdev,pm_message_t mesg)6461 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6462 {
6463 	pci_save_state(pdev);
6464 	pci_disable_device(pdev);
6465 
6466 	if (mesg.event & PM_EVENT_SLEEP)
6467 		pci_set_power_state(pdev, PCI_D3hot);
6468 }
6469 
ata_pci_device_do_resume(struct pci_dev * pdev)6470 int ata_pci_device_do_resume(struct pci_dev *pdev)
6471 {
6472 	int rc;
6473 
6474 	pci_set_power_state(pdev, PCI_D0);
6475 	pci_restore_state(pdev);
6476 
6477 	rc = pcim_enable_device(pdev);
6478 	if (rc) {
6479 		dev_err(&pdev->dev,
6480 			"failed to enable device after resume (%d)\n", rc);
6481 		return rc;
6482 	}
6483 
6484 	pci_set_master(pdev);
6485 	return 0;
6486 }
6487 
ata_pci_device_suspend(struct pci_dev * pdev,pm_message_t mesg)6488 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6489 {
6490 	struct ata_host *host = pci_get_drvdata(pdev);
6491 	int rc = 0;
6492 
6493 	rc = ata_host_suspend(host, mesg);
6494 	if (rc)
6495 		return rc;
6496 
6497 	ata_pci_device_do_suspend(pdev, mesg);
6498 
6499 	return 0;
6500 }
6501 
ata_pci_device_resume(struct pci_dev * pdev)6502 int ata_pci_device_resume(struct pci_dev *pdev)
6503 {
6504 	struct ata_host *host = pci_get_drvdata(pdev);
6505 	int rc;
6506 
6507 	rc = ata_pci_device_do_resume(pdev);
6508 	if (rc == 0)
6509 		ata_host_resume(host);
6510 	return rc;
6511 }
6512 #endif /* CONFIG_PM */
6513 
6514 #endif /* CONFIG_PCI */
6515 
6516 /**
6517  *	ata_platform_remove_one - Platform layer callback for device removal
6518  *	@pdev: Platform device that was removed
6519  *
6520  *	Platform layer indicates to libata via this hook that hot-unplug or
6521  *	module unload event has occurred.  Detach all ports.  Resource
6522  *	release is handled via devres.
6523  *
6524  *	LOCKING:
6525  *	Inherited from platform layer (may sleep).
6526  */
ata_platform_remove_one(struct platform_device * pdev)6527 int ata_platform_remove_one(struct platform_device *pdev)
6528 {
6529 	struct ata_host *host = platform_get_drvdata(pdev);
6530 
6531 	ata_host_detach(host);
6532 
6533 	return 0;
6534 }
6535 
ata_parse_force_one(char ** cur,struct ata_force_ent * force_ent,const char ** reason)6536 static int __init ata_parse_force_one(char **cur,
6537 				      struct ata_force_ent *force_ent,
6538 				      const char **reason)
6539 {
6540 	static const struct ata_force_param force_tbl[] __initconst = {
6541 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6542 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6543 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6544 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6545 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6546 		{ "sata",	.cbl		= ATA_CBL_SATA },
6547 		{ "1.5Gbps",	.spd_limit	= 1 },
6548 		{ "3.0Gbps",	.spd_limit	= 2 },
6549 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6550 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6551 		{ "noncqtrim",	.horkage_on	= ATA_HORKAGE_NO_NCQ_TRIM },
6552 		{ "ncqtrim",	.horkage_off	= ATA_HORKAGE_NO_NCQ_TRIM },
6553 		{ "noncqati",	.horkage_on	= ATA_HORKAGE_NO_NCQ_ON_ATI },
6554 		{ "ncqati",	.horkage_off	= ATA_HORKAGE_NO_NCQ_ON_ATI },
6555 		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
6556 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6557 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6558 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6559 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6560 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6561 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6562 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6563 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6564 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6565 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6566 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6567 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6568 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6569 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6570 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6571 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6572 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6573 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6574 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6575 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6576 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6577 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6578 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6579 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6580 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6581 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6582 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6583 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6584 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6585 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6586 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6587 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6588 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6589 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6590 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6591 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6592 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6593 		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE },
6594 		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR },
6595 		{ "disable",	.horkage_on	= ATA_HORKAGE_DISABLE },
6596 	};
6597 	char *start = *cur, *p = *cur;
6598 	char *id, *val, *endp;
6599 	const struct ata_force_param *match_fp = NULL;
6600 	int nr_matches = 0, i;
6601 
6602 	/* find where this param ends and update *cur */
6603 	while (*p != '\0' && *p != ',')
6604 		p++;
6605 
6606 	if (*p == '\0')
6607 		*cur = p;
6608 	else
6609 		*cur = p + 1;
6610 
6611 	*p = '\0';
6612 
6613 	/* parse */
6614 	p = strchr(start, ':');
6615 	if (!p) {
6616 		val = strstrip(start);
6617 		goto parse_val;
6618 	}
6619 	*p = '\0';
6620 
6621 	id = strstrip(start);
6622 	val = strstrip(p + 1);
6623 
6624 	/* parse id */
6625 	p = strchr(id, '.');
6626 	if (p) {
6627 		*p++ = '\0';
6628 		force_ent->device = simple_strtoul(p, &endp, 10);
6629 		if (p == endp || *endp != '\0') {
6630 			*reason = "invalid device";
6631 			return -EINVAL;
6632 		}
6633 	}
6634 
6635 	force_ent->port = simple_strtoul(id, &endp, 10);
6636 	if (p == endp || *endp != '\0') {
6637 		*reason = "invalid port/link";
6638 		return -EINVAL;
6639 	}
6640 
6641  parse_val:
6642 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6643 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6644 		const struct ata_force_param *fp = &force_tbl[i];
6645 
6646 		if (strncasecmp(val, fp->name, strlen(val)))
6647 			continue;
6648 
6649 		nr_matches++;
6650 		match_fp = fp;
6651 
6652 		if (strcasecmp(val, fp->name) == 0) {
6653 			nr_matches = 1;
6654 			break;
6655 		}
6656 	}
6657 
6658 	if (!nr_matches) {
6659 		*reason = "unknown value";
6660 		return -EINVAL;
6661 	}
6662 	if (nr_matches > 1) {
6663 		*reason = "ambigious value";
6664 		return -EINVAL;
6665 	}
6666 
6667 	force_ent->param = *match_fp;
6668 
6669 	return 0;
6670 }
6671 
ata_parse_force_param(void)6672 static void __init ata_parse_force_param(void)
6673 {
6674 	int idx = 0, size = 1;
6675 	int last_port = -1, last_device = -1;
6676 	char *p, *cur, *next;
6677 
6678 	/* calculate maximum number of params and allocate force_tbl */
6679 	for (p = ata_force_param_buf; *p; p++)
6680 		if (*p == ',')
6681 			size++;
6682 
6683 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6684 	if (!ata_force_tbl) {
6685 		printk(KERN_WARNING "ata: failed to extend force table, "
6686 		       "libata.force ignored\n");
6687 		return;
6688 	}
6689 
6690 	/* parse and populate the table */
6691 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6692 		const char *reason = "";
6693 		struct ata_force_ent te = { .port = -1, .device = -1 };
6694 
6695 		next = cur;
6696 		if (ata_parse_force_one(&next, &te, &reason)) {
6697 			printk(KERN_WARNING "ata: failed to parse force "
6698 			       "parameter \"%s\" (%s)\n",
6699 			       cur, reason);
6700 			continue;
6701 		}
6702 
6703 		if (te.port == -1) {
6704 			te.port = last_port;
6705 			te.device = last_device;
6706 		}
6707 
6708 		ata_force_tbl[idx++] = te;
6709 
6710 		last_port = te.port;
6711 		last_device = te.device;
6712 	}
6713 
6714 	ata_force_tbl_size = idx;
6715 }
6716 
ata_init(void)6717 static int __init ata_init(void)
6718 {
6719 	int rc;
6720 
6721 	ata_parse_force_param();
6722 
6723 	rc = ata_sff_init();
6724 	if (rc) {
6725 		kfree(ata_force_tbl);
6726 		return rc;
6727 	}
6728 
6729 	libata_transport_init();
6730 	ata_scsi_transport_template = ata_attach_transport();
6731 	if (!ata_scsi_transport_template) {
6732 		ata_sff_exit();
6733 		rc = -ENOMEM;
6734 		goto err_out;
6735 	}
6736 
6737 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6738 	return 0;
6739 
6740 err_out:
6741 	return rc;
6742 }
6743 
ata_exit(void)6744 static void __exit ata_exit(void)
6745 {
6746 	ata_release_transport(ata_scsi_transport_template);
6747 	libata_transport_exit();
6748 	ata_sff_exit();
6749 	kfree(ata_force_tbl);
6750 }
6751 
6752 subsys_initcall(ata_init);
6753 module_exit(ata_exit);
6754 
6755 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6756 
ata_ratelimit(void)6757 int ata_ratelimit(void)
6758 {
6759 	return __ratelimit(&ratelimit);
6760 }
6761 
6762 /**
6763  *	ata_msleep - ATA EH owner aware msleep
6764  *	@ap: ATA port to attribute the sleep to
6765  *	@msecs: duration to sleep in milliseconds
6766  *
6767  *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
6768  *	ownership is released before going to sleep and reacquired
6769  *	after the sleep is complete.  IOW, other ports sharing the
6770  *	@ap->host will be allowed to own the EH while this task is
6771  *	sleeping.
6772  *
6773  *	LOCKING:
6774  *	Might sleep.
6775  */
ata_msleep(struct ata_port * ap,unsigned int msecs)6776 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6777 {
6778 	bool owns_eh = ap && ap->host->eh_owner == current;
6779 
6780 	if (owns_eh)
6781 		ata_eh_release(ap);
6782 
6783 	msleep(msecs);
6784 
6785 	if (owns_eh)
6786 		ata_eh_acquire(ap);
6787 }
6788 
6789 /**
6790  *	ata_wait_register - wait until register value changes
6791  *	@ap: ATA port to wait register for, can be NULL
6792  *	@reg: IO-mapped register
6793  *	@mask: Mask to apply to read register value
6794  *	@val: Wait condition
6795  *	@interval: polling interval in milliseconds
6796  *	@timeout: timeout in milliseconds
6797  *
6798  *	Waiting for some bits of register to change is a common
6799  *	operation for ATA controllers.  This function reads 32bit LE
6800  *	IO-mapped register @reg and tests for the following condition.
6801  *
6802  *	(*@reg & mask) != val
6803  *
6804  *	If the condition is met, it returns; otherwise, the process is
6805  *	repeated after @interval_msec until timeout.
6806  *
6807  *	LOCKING:
6808  *	Kernel thread context (may sleep)
6809  *
6810  *	RETURNS:
6811  *	The final register value.
6812  */
ata_wait_register(struct ata_port * ap,void __iomem * reg,u32 mask,u32 val,unsigned long interval,unsigned long timeout)6813 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6814 		      unsigned long interval, unsigned long timeout)
6815 {
6816 	unsigned long deadline;
6817 	u32 tmp;
6818 
6819 	tmp = ioread32(reg);
6820 
6821 	/* Calculate timeout _after_ the first read to make sure
6822 	 * preceding writes reach the controller before starting to
6823 	 * eat away the timeout.
6824 	 */
6825 	deadline = ata_deadline(jiffies, timeout);
6826 
6827 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6828 		ata_msleep(ap, interval);
6829 		tmp = ioread32(reg);
6830 	}
6831 
6832 	return tmp;
6833 }
6834 
6835 /**
6836  *	sata_lpm_ignore_phy_events - test if PHY event should be ignored
6837  *	@link: Link receiving the event
6838  *
6839  *	Test whether the received PHY event has to be ignored or not.
6840  *
6841  *	LOCKING:
6842  *	None:
6843  *
6844  *	RETURNS:
6845  *	True if the event has to be ignored.
6846  */
sata_lpm_ignore_phy_events(struct ata_link * link)6847 bool sata_lpm_ignore_phy_events(struct ata_link *link)
6848 {
6849 	unsigned long lpm_timeout = link->last_lpm_change +
6850 				    msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
6851 
6852 	/* if LPM is enabled, PHYRDY doesn't mean anything */
6853 	if (link->lpm_policy > ATA_LPM_MAX_POWER)
6854 		return true;
6855 
6856 	/* ignore the first PHY event after the LPM policy changed
6857 	 * as it is might be spurious
6858 	 */
6859 	if ((link->flags & ATA_LFLAG_CHANGED) &&
6860 	    time_before(jiffies, lpm_timeout))
6861 		return true;
6862 
6863 	return false;
6864 }
6865 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
6866 
6867 /*
6868  * Dummy port_ops
6869  */
ata_dummy_qc_issue(struct ata_queued_cmd * qc)6870 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6871 {
6872 	return AC_ERR_SYSTEM;
6873 }
6874 
ata_dummy_error_handler(struct ata_port * ap)6875 static void ata_dummy_error_handler(struct ata_port *ap)
6876 {
6877 	/* truly dummy */
6878 }
6879 
6880 struct ata_port_operations ata_dummy_port_ops = {
6881 	.qc_prep		= ata_noop_qc_prep,
6882 	.qc_issue		= ata_dummy_qc_issue,
6883 	.error_handler		= ata_dummy_error_handler,
6884 	.sched_eh		= ata_std_sched_eh,
6885 	.end_eh			= ata_std_end_eh,
6886 };
6887 
6888 const struct ata_port_info ata_dummy_port_info = {
6889 	.port_ops		= &ata_dummy_port_ops,
6890 };
6891 
6892 /*
6893  * Utility print functions
6894  */
ata_port_printk(const struct ata_port * ap,const char * level,const char * fmt,...)6895 void ata_port_printk(const struct ata_port *ap, const char *level,
6896 		     const char *fmt, ...)
6897 {
6898 	struct va_format vaf;
6899 	va_list args;
6900 
6901 	va_start(args, fmt);
6902 
6903 	vaf.fmt = fmt;
6904 	vaf.va = &args;
6905 
6906 	printk("%sata%u: %pV", level, ap->print_id, &vaf);
6907 
6908 	va_end(args);
6909 }
6910 EXPORT_SYMBOL(ata_port_printk);
6911 
ata_link_printk(const struct ata_link * link,const char * level,const char * fmt,...)6912 void ata_link_printk(const struct ata_link *link, const char *level,
6913 		     const char *fmt, ...)
6914 {
6915 	struct va_format vaf;
6916 	va_list args;
6917 
6918 	va_start(args, fmt);
6919 
6920 	vaf.fmt = fmt;
6921 	vaf.va = &args;
6922 
6923 	if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6924 		printk("%sata%u.%02u: %pV",
6925 		       level, link->ap->print_id, link->pmp, &vaf);
6926 	else
6927 		printk("%sata%u: %pV",
6928 		       level, link->ap->print_id, &vaf);
6929 
6930 	va_end(args);
6931 }
6932 EXPORT_SYMBOL(ata_link_printk);
6933 
ata_dev_printk(const struct ata_device * dev,const char * level,const char * fmt,...)6934 void ata_dev_printk(const struct ata_device *dev, const char *level,
6935 		    const char *fmt, ...)
6936 {
6937 	struct va_format vaf;
6938 	va_list args;
6939 
6940 	va_start(args, fmt);
6941 
6942 	vaf.fmt = fmt;
6943 	vaf.va = &args;
6944 
6945 	printk("%sata%u.%02u: %pV",
6946 	       level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6947 	       &vaf);
6948 
6949 	va_end(args);
6950 }
6951 EXPORT_SYMBOL(ata_dev_printk);
6952 
ata_print_version(const struct device * dev,const char * version)6953 void ata_print_version(const struct device *dev, const char *version)
6954 {
6955 	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6956 }
6957 EXPORT_SYMBOL(ata_print_version);
6958 
6959 /*
6960  * libata is essentially a library of internal helper functions for
6961  * low-level ATA host controller drivers.  As such, the API/ABI is
6962  * likely to change as new drivers are added and updated.
6963  * Do not depend on ABI/API stability.
6964  */
6965 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6966 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6967 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6968 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6969 EXPORT_SYMBOL_GPL(sata_port_ops);
6970 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6971 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6972 EXPORT_SYMBOL_GPL(ata_link_next);
6973 EXPORT_SYMBOL_GPL(ata_dev_next);
6974 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6975 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6976 EXPORT_SYMBOL_GPL(ata_host_init);
6977 EXPORT_SYMBOL_GPL(ata_host_alloc);
6978 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6979 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6980 EXPORT_SYMBOL_GPL(ata_host_start);
6981 EXPORT_SYMBOL_GPL(ata_host_register);
6982 EXPORT_SYMBOL_GPL(ata_host_activate);
6983 EXPORT_SYMBOL_GPL(ata_host_detach);
6984 EXPORT_SYMBOL_GPL(ata_sg_init);
6985 EXPORT_SYMBOL_GPL(ata_qc_complete);
6986 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6987 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6988 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6989 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6990 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6991 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6992 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6993 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6994 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6995 EXPORT_SYMBOL_GPL(ata_mode_string);
6996 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6997 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6998 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6999 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7000 EXPORT_SYMBOL_GPL(ata_dev_disable);
7001 EXPORT_SYMBOL_GPL(sata_set_spd);
7002 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7003 EXPORT_SYMBOL_GPL(sata_link_debounce);
7004 EXPORT_SYMBOL_GPL(sata_link_resume);
7005 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
7006 EXPORT_SYMBOL_GPL(ata_std_prereset);
7007 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7008 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7009 EXPORT_SYMBOL_GPL(ata_std_postreset);
7010 EXPORT_SYMBOL_GPL(ata_dev_classify);
7011 EXPORT_SYMBOL_GPL(ata_dev_pair);
7012 EXPORT_SYMBOL_GPL(ata_ratelimit);
7013 EXPORT_SYMBOL_GPL(ata_msleep);
7014 EXPORT_SYMBOL_GPL(ata_wait_register);
7015 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7016 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7017 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7018 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7019 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
7020 EXPORT_SYMBOL_GPL(sata_scr_valid);
7021 EXPORT_SYMBOL_GPL(sata_scr_read);
7022 EXPORT_SYMBOL_GPL(sata_scr_write);
7023 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7024 EXPORT_SYMBOL_GPL(ata_link_online);
7025 EXPORT_SYMBOL_GPL(ata_link_offline);
7026 #ifdef CONFIG_PM
7027 EXPORT_SYMBOL_GPL(ata_host_suspend);
7028 EXPORT_SYMBOL_GPL(ata_host_resume);
7029 #endif /* CONFIG_PM */
7030 EXPORT_SYMBOL_GPL(ata_id_string);
7031 EXPORT_SYMBOL_GPL(ata_id_c_string);
7032 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
7033 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7034 
7035 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7036 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7037 EXPORT_SYMBOL_GPL(ata_timing_compute);
7038 EXPORT_SYMBOL_GPL(ata_timing_merge);
7039 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7040 
7041 #ifdef CONFIG_PCI
7042 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7043 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7044 #ifdef CONFIG_PM
7045 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7046 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7047 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7048 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7049 #endif /* CONFIG_PM */
7050 #endif /* CONFIG_PCI */
7051 
7052 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7053 
7054 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7055 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7056 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7057 EXPORT_SYMBOL_GPL(ata_port_desc);
7058 #ifdef CONFIG_PCI
7059 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7060 #endif /* CONFIG_PCI */
7061 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7062 EXPORT_SYMBOL_GPL(ata_link_abort);
7063 EXPORT_SYMBOL_GPL(ata_port_abort);
7064 EXPORT_SYMBOL_GPL(ata_port_freeze);
7065 EXPORT_SYMBOL_GPL(sata_async_notification);
7066 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7067 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7068 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7069 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7070 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7071 EXPORT_SYMBOL_GPL(ata_do_eh);
7072 EXPORT_SYMBOL_GPL(ata_std_error_handler);
7073 
7074 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7075 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7076 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7077 EXPORT_SYMBOL_GPL(ata_cable_ignore);
7078 EXPORT_SYMBOL_GPL(ata_cable_sata);
7079