• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Tejun Heo <tj@kernel.org>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/async.h>
60 #include <linux/log2.h>
61 #include <linux/slab.h>
62 #include <linux/glob.h>
63 #include <scsi/scsi.h>
64 #include <scsi/scsi_cmnd.h>
65 #include <scsi/scsi_host.h>
66 #include <linux/libata.h>
67 #include <asm/byteorder.h>
68 #include <linux/cdrom.h>
69 #include <linux/ratelimit.h>
70 #include <linux/pm_runtime.h>
71 #include <linux/platform_device.h>
72 
73 #include "libata.h"
74 #include "libata-transport.h"
75 
76 /* debounce timing parameters in msecs { interval, duration, timeout } */
77 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
78 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
79 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
80 
81 const struct ata_port_operations ata_base_port_ops = {
82 	.prereset		= ata_std_prereset,
83 	.postreset		= ata_std_postreset,
84 	.error_handler		= ata_std_error_handler,
85 	.sched_eh		= ata_std_sched_eh,
86 	.end_eh			= ata_std_end_eh,
87 };
88 
89 const struct ata_port_operations sata_port_ops = {
90 	.inherits		= &ata_base_port_ops,
91 
92 	.qc_defer		= ata_std_qc_defer,
93 	.hardreset		= sata_std_hardreset,
94 };
95 
96 static unsigned int ata_dev_init_params(struct ata_device *dev,
97 					u16 heads, u16 sectors);
98 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
99 static void ata_dev_xfermask(struct ata_device *dev);
100 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
101 
102 atomic_t ata_print_id = ATOMIC_INIT(0);
103 
104 struct ata_force_param {
105 	const char	*name;
106 	unsigned int	cbl;
107 	int		spd_limit;
108 	unsigned long	xfer_mask;
109 	unsigned int	horkage_on;
110 	unsigned int	horkage_off;
111 	unsigned int	lflags;
112 };
113 
114 struct ata_force_ent {
115 	int			port;
116 	int			device;
117 	struct ata_force_param	param;
118 };
119 
120 static struct ata_force_ent *ata_force_tbl;
121 static int ata_force_tbl_size;
122 
123 static char ata_force_param_buf[PAGE_SIZE] __initdata;
124 /* param_buf is thrown away after initialization, disallow read */
125 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
126 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
127 
128 static int atapi_enabled = 1;
129 module_param(atapi_enabled, int, 0444);
130 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
131 
132 static int atapi_dmadir = 0;
133 module_param(atapi_dmadir, int, 0444);
134 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
135 
136 int atapi_passthru16 = 1;
137 module_param(atapi_passthru16, int, 0444);
138 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
139 
140 int libata_fua = 0;
141 module_param_named(fua, libata_fua, int, 0444);
142 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
143 
144 static int ata_ignore_hpa;
145 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
146 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
147 
148 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
149 module_param_named(dma, libata_dma_mask, int, 0444);
150 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
151 
152 static int ata_probe_timeout;
153 module_param(ata_probe_timeout, int, 0444);
154 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
155 
156 int libata_noacpi = 0;
157 module_param_named(noacpi, libata_noacpi, int, 0444);
158 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
159 
160 int libata_allow_tpm = 0;
161 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
162 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
163 
164 static int atapi_an;
165 module_param(atapi_an, int, 0444);
166 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
167 
168 MODULE_AUTHOR("Jeff Garzik");
169 MODULE_DESCRIPTION("Library module for ATA devices");
170 MODULE_LICENSE("GPL");
171 MODULE_VERSION(DRV_VERSION);
172 
173 
ata_sstatus_online(u32 sstatus)174 static bool ata_sstatus_online(u32 sstatus)
175 {
176 	return (sstatus & 0xf) == 0x3;
177 }
178 
179 /**
180  *	ata_link_next - link iteration helper
181  *	@link: the previous link, NULL to start
182  *	@ap: ATA port containing links to iterate
183  *	@mode: iteration mode, one of ATA_LITER_*
184  *
185  *	LOCKING:
186  *	Host lock or EH context.
187  *
188  *	RETURNS:
189  *	Pointer to the next link.
190  */
ata_link_next(struct ata_link * link,struct ata_port * ap,enum ata_link_iter_mode mode)191 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
192 			       enum ata_link_iter_mode mode)
193 {
194 	BUG_ON(mode != ATA_LITER_EDGE &&
195 	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
196 
197 	/* NULL link indicates start of iteration */
198 	if (!link)
199 		switch (mode) {
200 		case ATA_LITER_EDGE:
201 		case ATA_LITER_PMP_FIRST:
202 			if (sata_pmp_attached(ap))
203 				return ap->pmp_link;
204 			/* fall through */
205 		case ATA_LITER_HOST_FIRST:
206 			return &ap->link;
207 		}
208 
209 	/* we just iterated over the host link, what's next? */
210 	if (link == &ap->link)
211 		switch (mode) {
212 		case ATA_LITER_HOST_FIRST:
213 			if (sata_pmp_attached(ap))
214 				return ap->pmp_link;
215 			/* fall through */
216 		case ATA_LITER_PMP_FIRST:
217 			if (unlikely(ap->slave_link))
218 				return ap->slave_link;
219 			/* fall through */
220 		case ATA_LITER_EDGE:
221 			return NULL;
222 		}
223 
224 	/* slave_link excludes PMP */
225 	if (unlikely(link == ap->slave_link))
226 		return NULL;
227 
228 	/* we were over a PMP link */
229 	if (++link < ap->pmp_link + ap->nr_pmp_links)
230 		return link;
231 
232 	if (mode == ATA_LITER_PMP_FIRST)
233 		return &ap->link;
234 
235 	return NULL;
236 }
237 
238 /**
239  *	ata_dev_next - device iteration helper
240  *	@dev: the previous device, NULL to start
241  *	@link: ATA link containing devices to iterate
242  *	@mode: iteration mode, one of ATA_DITER_*
243  *
244  *	LOCKING:
245  *	Host lock or EH context.
246  *
247  *	RETURNS:
248  *	Pointer to the next device.
249  */
ata_dev_next(struct ata_device * dev,struct ata_link * link,enum ata_dev_iter_mode mode)250 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
251 				enum ata_dev_iter_mode mode)
252 {
253 	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
254 	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
255 
256 	/* NULL dev indicates start of iteration */
257 	if (!dev)
258 		switch (mode) {
259 		case ATA_DITER_ENABLED:
260 		case ATA_DITER_ALL:
261 			dev = link->device;
262 			goto check;
263 		case ATA_DITER_ENABLED_REVERSE:
264 		case ATA_DITER_ALL_REVERSE:
265 			dev = link->device + ata_link_max_devices(link) - 1;
266 			goto check;
267 		}
268 
269  next:
270 	/* move to the next one */
271 	switch (mode) {
272 	case ATA_DITER_ENABLED:
273 	case ATA_DITER_ALL:
274 		if (++dev < link->device + ata_link_max_devices(link))
275 			goto check;
276 		return NULL;
277 	case ATA_DITER_ENABLED_REVERSE:
278 	case ATA_DITER_ALL_REVERSE:
279 		if (--dev >= link->device)
280 			goto check;
281 		return NULL;
282 	}
283 
284  check:
285 	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
286 	    !ata_dev_enabled(dev))
287 		goto next;
288 	return dev;
289 }
290 
291 /**
292  *	ata_dev_phys_link - find physical link for a device
293  *	@dev: ATA device to look up physical link for
294  *
295  *	Look up physical link which @dev is attached to.  Note that
296  *	this is different from @dev->link only when @dev is on slave
297  *	link.  For all other cases, it's the same as @dev->link.
298  *
299  *	LOCKING:
300  *	Don't care.
301  *
302  *	RETURNS:
303  *	Pointer to the found physical link.
304  */
ata_dev_phys_link(struct ata_device * dev)305 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
306 {
307 	struct ata_port *ap = dev->link->ap;
308 
309 	if (!ap->slave_link)
310 		return dev->link;
311 	if (!dev->devno)
312 		return &ap->link;
313 	return ap->slave_link;
314 }
315 
316 /**
317  *	ata_force_cbl - force cable type according to libata.force
318  *	@ap: ATA port of interest
319  *
320  *	Force cable type according to libata.force and whine about it.
321  *	The last entry which has matching port number is used, so it
322  *	can be specified as part of device force parameters.  For
323  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
324  *	same effect.
325  *
326  *	LOCKING:
327  *	EH context.
328  */
ata_force_cbl(struct ata_port * ap)329 void ata_force_cbl(struct ata_port *ap)
330 {
331 	int i;
332 
333 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
334 		const struct ata_force_ent *fe = &ata_force_tbl[i];
335 
336 		if (fe->port != -1 && fe->port != ap->print_id)
337 			continue;
338 
339 		if (fe->param.cbl == ATA_CBL_NONE)
340 			continue;
341 
342 		ap->cbl = fe->param.cbl;
343 		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
344 		return;
345 	}
346 }
347 
348 /**
349  *	ata_force_link_limits - force link limits according to libata.force
350  *	@link: ATA link of interest
351  *
352  *	Force link flags and SATA spd limit according to libata.force
353  *	and whine about it.  When only the port part is specified
354  *	(e.g. 1:), the limit applies to all links connected to both
355  *	the host link and all fan-out ports connected via PMP.  If the
356  *	device part is specified as 0 (e.g. 1.00:), it specifies the
357  *	first fan-out link not the host link.  Device number 15 always
358  *	points to the host link whether PMP is attached or not.  If the
359  *	controller has slave link, device number 16 points to it.
360  *
361  *	LOCKING:
362  *	EH context.
363  */
ata_force_link_limits(struct ata_link * link)364 static void ata_force_link_limits(struct ata_link *link)
365 {
366 	bool did_spd = false;
367 	int linkno = link->pmp;
368 	int i;
369 
370 	if (ata_is_host_link(link))
371 		linkno += 15;
372 
373 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
374 		const struct ata_force_ent *fe = &ata_force_tbl[i];
375 
376 		if (fe->port != -1 && fe->port != link->ap->print_id)
377 			continue;
378 
379 		if (fe->device != -1 && fe->device != linkno)
380 			continue;
381 
382 		/* only honor the first spd limit */
383 		if (!did_spd && fe->param.spd_limit) {
384 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
385 			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
386 					fe->param.name);
387 			did_spd = true;
388 		}
389 
390 		/* let lflags stack */
391 		if (fe->param.lflags) {
392 			link->flags |= fe->param.lflags;
393 			ata_link_notice(link,
394 					"FORCE: link flag 0x%x forced -> 0x%x\n",
395 					fe->param.lflags, link->flags);
396 		}
397 	}
398 }
399 
400 /**
401  *	ata_force_xfermask - force xfermask according to libata.force
402  *	@dev: ATA device of interest
403  *
404  *	Force xfer_mask according to libata.force and whine about it.
405  *	For consistency with link selection, device number 15 selects
406  *	the first device connected to the host link.
407  *
408  *	LOCKING:
409  *	EH context.
410  */
ata_force_xfermask(struct ata_device * dev)411 static void ata_force_xfermask(struct ata_device *dev)
412 {
413 	int devno = dev->link->pmp + dev->devno;
414 	int alt_devno = devno;
415 	int i;
416 
417 	/* allow n.15/16 for devices attached to host port */
418 	if (ata_is_host_link(dev->link))
419 		alt_devno += 15;
420 
421 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
422 		const struct ata_force_ent *fe = &ata_force_tbl[i];
423 		unsigned long pio_mask, mwdma_mask, udma_mask;
424 
425 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
426 			continue;
427 
428 		if (fe->device != -1 && fe->device != devno &&
429 		    fe->device != alt_devno)
430 			continue;
431 
432 		if (!fe->param.xfer_mask)
433 			continue;
434 
435 		ata_unpack_xfermask(fe->param.xfer_mask,
436 				    &pio_mask, &mwdma_mask, &udma_mask);
437 		if (udma_mask)
438 			dev->udma_mask = udma_mask;
439 		else if (mwdma_mask) {
440 			dev->udma_mask = 0;
441 			dev->mwdma_mask = mwdma_mask;
442 		} else {
443 			dev->udma_mask = 0;
444 			dev->mwdma_mask = 0;
445 			dev->pio_mask = pio_mask;
446 		}
447 
448 		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
449 			       fe->param.name);
450 		return;
451 	}
452 }
453 
454 /**
455  *	ata_force_horkage - force horkage according to libata.force
456  *	@dev: ATA device of interest
457  *
458  *	Force horkage according to libata.force and whine about it.
459  *	For consistency with link selection, device number 15 selects
460  *	the first device connected to the host link.
461  *
462  *	LOCKING:
463  *	EH context.
464  */
ata_force_horkage(struct ata_device * dev)465 static void ata_force_horkage(struct ata_device *dev)
466 {
467 	int devno = dev->link->pmp + dev->devno;
468 	int alt_devno = devno;
469 	int i;
470 
471 	/* allow n.15/16 for devices attached to host port */
472 	if (ata_is_host_link(dev->link))
473 		alt_devno += 15;
474 
475 	for (i = 0; i < ata_force_tbl_size; i++) {
476 		const struct ata_force_ent *fe = &ata_force_tbl[i];
477 
478 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
479 			continue;
480 
481 		if (fe->device != -1 && fe->device != devno &&
482 		    fe->device != alt_devno)
483 			continue;
484 
485 		if (!(~dev->horkage & fe->param.horkage_on) &&
486 		    !(dev->horkage & fe->param.horkage_off))
487 			continue;
488 
489 		dev->horkage |= fe->param.horkage_on;
490 		dev->horkage &= ~fe->param.horkage_off;
491 
492 		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
493 			       fe->param.name);
494 	}
495 }
496 
497 /**
498  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
499  *	@opcode: SCSI opcode
500  *
501  *	Determine ATAPI command type from @opcode.
502  *
503  *	LOCKING:
504  *	None.
505  *
506  *	RETURNS:
507  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
508  */
atapi_cmd_type(u8 opcode)509 int atapi_cmd_type(u8 opcode)
510 {
511 	switch (opcode) {
512 	case GPCMD_READ_10:
513 	case GPCMD_READ_12:
514 		return ATAPI_READ;
515 
516 	case GPCMD_WRITE_10:
517 	case GPCMD_WRITE_12:
518 	case GPCMD_WRITE_AND_VERIFY_10:
519 		return ATAPI_WRITE;
520 
521 	case GPCMD_READ_CD:
522 	case GPCMD_READ_CD_MSF:
523 		return ATAPI_READ_CD;
524 
525 	case ATA_16:
526 	case ATA_12:
527 		if (atapi_passthru16)
528 			return ATAPI_PASS_THRU;
529 		/* fall thru */
530 	default:
531 		return ATAPI_MISC;
532 	}
533 }
534 
535 /**
536  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
537  *	@tf: Taskfile to convert
538  *	@pmp: Port multiplier port
539  *	@is_cmd: This FIS is for command
540  *	@fis: Buffer into which data will output
541  *
542  *	Converts a standard ATA taskfile to a Serial ATA
543  *	FIS structure (Register - Host to Device).
544  *
545  *	LOCKING:
546  *	Inherited from caller.
547  */
ata_tf_to_fis(const struct ata_taskfile * tf,u8 pmp,int is_cmd,u8 * fis)548 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
549 {
550 	fis[0] = 0x27;			/* Register - Host to Device FIS */
551 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
552 	if (is_cmd)
553 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
554 
555 	fis[2] = tf->command;
556 	fis[3] = tf->feature;
557 
558 	fis[4] = tf->lbal;
559 	fis[5] = tf->lbam;
560 	fis[6] = tf->lbah;
561 	fis[7] = tf->device;
562 
563 	fis[8] = tf->hob_lbal;
564 	fis[9] = tf->hob_lbam;
565 	fis[10] = tf->hob_lbah;
566 	fis[11] = tf->hob_feature;
567 
568 	fis[12] = tf->nsect;
569 	fis[13] = tf->hob_nsect;
570 	fis[14] = 0;
571 	fis[15] = tf->ctl;
572 
573 	fis[16] = tf->auxiliary & 0xff;
574 	fis[17] = (tf->auxiliary >> 8) & 0xff;
575 	fis[18] = (tf->auxiliary >> 16) & 0xff;
576 	fis[19] = (tf->auxiliary >> 24) & 0xff;
577 }
578 
579 /**
580  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
581  *	@fis: Buffer from which data will be input
582  *	@tf: Taskfile to output
583  *
584  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
585  *
586  *	LOCKING:
587  *	Inherited from caller.
588  */
589 
ata_tf_from_fis(const u8 * fis,struct ata_taskfile * tf)590 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
591 {
592 	tf->command	= fis[2];	/* status */
593 	tf->feature	= fis[3];	/* error */
594 
595 	tf->lbal	= fis[4];
596 	tf->lbam	= fis[5];
597 	tf->lbah	= fis[6];
598 	tf->device	= fis[7];
599 
600 	tf->hob_lbal	= fis[8];
601 	tf->hob_lbam	= fis[9];
602 	tf->hob_lbah	= fis[10];
603 
604 	tf->nsect	= fis[12];
605 	tf->hob_nsect	= fis[13];
606 }
607 
608 static const u8 ata_rw_cmds[] = {
609 	/* pio multi */
610 	ATA_CMD_READ_MULTI,
611 	ATA_CMD_WRITE_MULTI,
612 	ATA_CMD_READ_MULTI_EXT,
613 	ATA_CMD_WRITE_MULTI_EXT,
614 	0,
615 	0,
616 	0,
617 	ATA_CMD_WRITE_MULTI_FUA_EXT,
618 	/* pio */
619 	ATA_CMD_PIO_READ,
620 	ATA_CMD_PIO_WRITE,
621 	ATA_CMD_PIO_READ_EXT,
622 	ATA_CMD_PIO_WRITE_EXT,
623 	0,
624 	0,
625 	0,
626 	0,
627 	/* dma */
628 	ATA_CMD_READ,
629 	ATA_CMD_WRITE,
630 	ATA_CMD_READ_EXT,
631 	ATA_CMD_WRITE_EXT,
632 	0,
633 	0,
634 	0,
635 	ATA_CMD_WRITE_FUA_EXT
636 };
637 
638 /**
639  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
640  *	@tf: command to examine and configure
641  *	@dev: device tf belongs to
642  *
643  *	Examine the device configuration and tf->flags to calculate
644  *	the proper read/write commands and protocol to use.
645  *
646  *	LOCKING:
647  *	caller.
648  */
ata_rwcmd_protocol(struct ata_taskfile * tf,struct ata_device * dev)649 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
650 {
651 	u8 cmd;
652 
653 	int index, fua, lba48, write;
654 
655 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
656 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
657 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
658 
659 	if (dev->flags & ATA_DFLAG_PIO) {
660 		tf->protocol = ATA_PROT_PIO;
661 		index = dev->multi_count ? 0 : 8;
662 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
663 		/* Unable to use DMA due to host limitation */
664 		tf->protocol = ATA_PROT_PIO;
665 		index = dev->multi_count ? 0 : 8;
666 	} else {
667 		tf->protocol = ATA_PROT_DMA;
668 		index = 16;
669 	}
670 
671 	cmd = ata_rw_cmds[index + fua + lba48 + write];
672 	if (cmd) {
673 		tf->command = cmd;
674 		return 0;
675 	}
676 	return -1;
677 }
678 
679 /**
680  *	ata_tf_read_block - Read block address from ATA taskfile
681  *	@tf: ATA taskfile of interest
682  *	@dev: ATA device @tf belongs to
683  *
684  *	LOCKING:
685  *	None.
686  *
687  *	Read block address from @tf.  This function can handle all
688  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
689  *	flags select the address format to use.
690  *
691  *	RETURNS:
692  *	Block address read from @tf.
693  */
ata_tf_read_block(struct ata_taskfile * tf,struct ata_device * dev)694 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
695 {
696 	u64 block = 0;
697 
698 	if (tf->flags & ATA_TFLAG_LBA) {
699 		if (tf->flags & ATA_TFLAG_LBA48) {
700 			block |= (u64)tf->hob_lbah << 40;
701 			block |= (u64)tf->hob_lbam << 32;
702 			block |= (u64)tf->hob_lbal << 24;
703 		} else
704 			block |= (tf->device & 0xf) << 24;
705 
706 		block |= tf->lbah << 16;
707 		block |= tf->lbam << 8;
708 		block |= tf->lbal;
709 	} else {
710 		u32 cyl, head, sect;
711 
712 		cyl = tf->lbam | (tf->lbah << 8);
713 		head = tf->device & 0xf;
714 		sect = tf->lbal;
715 
716 		if (!sect) {
717 			ata_dev_warn(dev,
718 				     "device reported invalid CHS sector 0\n");
719 			sect = 1; /* oh well */
720 		}
721 
722 		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
723 	}
724 
725 	return block;
726 }
727 
728 /**
729  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
730  *	@tf: Target ATA taskfile
731  *	@dev: ATA device @tf belongs to
732  *	@block: Block address
733  *	@n_block: Number of blocks
734  *	@tf_flags: RW/FUA etc...
735  *	@tag: tag
736  *
737  *	LOCKING:
738  *	None.
739  *
740  *	Build ATA taskfile @tf for read/write request described by
741  *	@block, @n_block, @tf_flags and @tag on @dev.
742  *
743  *	RETURNS:
744  *
745  *	0 on success, -ERANGE if the request is too large for @dev,
746  *	-EINVAL if the request is invalid.
747  */
ata_build_rw_tf(struct ata_taskfile * tf,struct ata_device * dev,u64 block,u32 n_block,unsigned int tf_flags,unsigned int tag)748 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
749 		    u64 block, u32 n_block, unsigned int tf_flags,
750 		    unsigned int tag)
751 {
752 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
753 	tf->flags |= tf_flags;
754 
755 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
756 		/* yay, NCQ */
757 		if (!lba_48_ok(block, n_block))
758 			return -ERANGE;
759 
760 		tf->protocol = ATA_PROT_NCQ;
761 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
762 
763 		if (tf->flags & ATA_TFLAG_WRITE)
764 			tf->command = ATA_CMD_FPDMA_WRITE;
765 		else
766 			tf->command = ATA_CMD_FPDMA_READ;
767 
768 		tf->nsect = tag << 3;
769 		tf->hob_feature = (n_block >> 8) & 0xff;
770 		tf->feature = n_block & 0xff;
771 
772 		tf->hob_lbah = (block >> 40) & 0xff;
773 		tf->hob_lbam = (block >> 32) & 0xff;
774 		tf->hob_lbal = (block >> 24) & 0xff;
775 		tf->lbah = (block >> 16) & 0xff;
776 		tf->lbam = (block >> 8) & 0xff;
777 		tf->lbal = block & 0xff;
778 
779 		tf->device = ATA_LBA;
780 		if (tf->flags & ATA_TFLAG_FUA)
781 			tf->device |= 1 << 7;
782 	} else if (dev->flags & ATA_DFLAG_LBA) {
783 		tf->flags |= ATA_TFLAG_LBA;
784 
785 		if (lba_28_ok(block, n_block)) {
786 			/* use LBA28 */
787 			tf->device |= (block >> 24) & 0xf;
788 		} else if (lba_48_ok(block, n_block)) {
789 			if (!(dev->flags & ATA_DFLAG_LBA48))
790 				return -ERANGE;
791 
792 			/* use LBA48 */
793 			tf->flags |= ATA_TFLAG_LBA48;
794 
795 			tf->hob_nsect = (n_block >> 8) & 0xff;
796 
797 			tf->hob_lbah = (block >> 40) & 0xff;
798 			tf->hob_lbam = (block >> 32) & 0xff;
799 			tf->hob_lbal = (block >> 24) & 0xff;
800 		} else
801 			/* request too large even for LBA48 */
802 			return -ERANGE;
803 
804 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
805 			return -EINVAL;
806 
807 		tf->nsect = n_block & 0xff;
808 
809 		tf->lbah = (block >> 16) & 0xff;
810 		tf->lbam = (block >> 8) & 0xff;
811 		tf->lbal = block & 0xff;
812 
813 		tf->device |= ATA_LBA;
814 	} else {
815 		/* CHS */
816 		u32 sect, head, cyl, track;
817 
818 		/* The request -may- be too large for CHS addressing. */
819 		if (!lba_28_ok(block, n_block))
820 			return -ERANGE;
821 
822 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
823 			return -EINVAL;
824 
825 		/* Convert LBA to CHS */
826 		track = (u32)block / dev->sectors;
827 		cyl   = track / dev->heads;
828 		head  = track % dev->heads;
829 		sect  = (u32)block % dev->sectors + 1;
830 
831 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
832 			(u32)block, track, cyl, head, sect);
833 
834 		/* Check whether the converted CHS can fit.
835 		   Cylinder: 0-65535
836 		   Head: 0-15
837 		   Sector: 1-255*/
838 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
839 			return -ERANGE;
840 
841 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
842 		tf->lbal = sect;
843 		tf->lbam = cyl;
844 		tf->lbah = cyl >> 8;
845 		tf->device |= head;
846 	}
847 
848 	return 0;
849 }
850 
851 /**
852  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
853  *	@pio_mask: pio_mask
854  *	@mwdma_mask: mwdma_mask
855  *	@udma_mask: udma_mask
856  *
857  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
858  *	unsigned int xfer_mask.
859  *
860  *	LOCKING:
861  *	None.
862  *
863  *	RETURNS:
864  *	Packed xfer_mask.
865  */
ata_pack_xfermask(unsigned long pio_mask,unsigned long mwdma_mask,unsigned long udma_mask)866 unsigned long ata_pack_xfermask(unsigned long pio_mask,
867 				unsigned long mwdma_mask,
868 				unsigned long udma_mask)
869 {
870 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
871 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
872 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
873 }
874 
875 /**
876  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
877  *	@xfer_mask: xfer_mask to unpack
878  *	@pio_mask: resulting pio_mask
879  *	@mwdma_mask: resulting mwdma_mask
880  *	@udma_mask: resulting udma_mask
881  *
882  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
883  *	Any NULL distination masks will be ignored.
884  */
ata_unpack_xfermask(unsigned long xfer_mask,unsigned long * pio_mask,unsigned long * mwdma_mask,unsigned long * udma_mask)885 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
886 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
887 {
888 	if (pio_mask)
889 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
890 	if (mwdma_mask)
891 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
892 	if (udma_mask)
893 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
894 }
895 
896 static const struct ata_xfer_ent {
897 	int shift, bits;
898 	u8 base;
899 } ata_xfer_tbl[] = {
900 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
901 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
902 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
903 	{ -1, },
904 };
905 
906 /**
907  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
908  *	@xfer_mask: xfer_mask of interest
909  *
910  *	Return matching XFER_* value for @xfer_mask.  Only the highest
911  *	bit of @xfer_mask is considered.
912  *
913  *	LOCKING:
914  *	None.
915  *
916  *	RETURNS:
917  *	Matching XFER_* value, 0xff if no match found.
918  */
ata_xfer_mask2mode(unsigned long xfer_mask)919 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
920 {
921 	int highbit = fls(xfer_mask) - 1;
922 	const struct ata_xfer_ent *ent;
923 
924 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
925 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
926 			return ent->base + highbit - ent->shift;
927 	return 0xff;
928 }
929 
930 /**
931  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
932  *	@xfer_mode: XFER_* of interest
933  *
934  *	Return matching xfer_mask for @xfer_mode.
935  *
936  *	LOCKING:
937  *	None.
938  *
939  *	RETURNS:
940  *	Matching xfer_mask, 0 if no match found.
941  */
ata_xfer_mode2mask(u8 xfer_mode)942 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
943 {
944 	const struct ata_xfer_ent *ent;
945 
946 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
947 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
948 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
949 				& ~((1 << ent->shift) - 1);
950 	return 0;
951 }
952 
953 /**
954  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
955  *	@xfer_mode: XFER_* of interest
956  *
957  *	Return matching xfer_shift for @xfer_mode.
958  *
959  *	LOCKING:
960  *	None.
961  *
962  *	RETURNS:
963  *	Matching xfer_shift, -1 if no match found.
964  */
ata_xfer_mode2shift(unsigned long xfer_mode)965 int ata_xfer_mode2shift(unsigned long xfer_mode)
966 {
967 	const struct ata_xfer_ent *ent;
968 
969 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
970 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
971 			return ent->shift;
972 	return -1;
973 }
974 
975 /**
976  *	ata_mode_string - convert xfer_mask to string
977  *	@xfer_mask: mask of bits supported; only highest bit counts.
978  *
979  *	Determine string which represents the highest speed
980  *	(highest bit in @modemask).
981  *
982  *	LOCKING:
983  *	None.
984  *
985  *	RETURNS:
986  *	Constant C string representing highest speed listed in
987  *	@mode_mask, or the constant C string "<n/a>".
988  */
ata_mode_string(unsigned long xfer_mask)989 const char *ata_mode_string(unsigned long xfer_mask)
990 {
991 	static const char * const xfer_mode_str[] = {
992 		"PIO0",
993 		"PIO1",
994 		"PIO2",
995 		"PIO3",
996 		"PIO4",
997 		"PIO5",
998 		"PIO6",
999 		"MWDMA0",
1000 		"MWDMA1",
1001 		"MWDMA2",
1002 		"MWDMA3",
1003 		"MWDMA4",
1004 		"UDMA/16",
1005 		"UDMA/25",
1006 		"UDMA/33",
1007 		"UDMA/44",
1008 		"UDMA/66",
1009 		"UDMA/100",
1010 		"UDMA/133",
1011 		"UDMA7",
1012 	};
1013 	int highbit;
1014 
1015 	highbit = fls(xfer_mask) - 1;
1016 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1017 		return xfer_mode_str[highbit];
1018 	return "<n/a>";
1019 }
1020 
sata_spd_string(unsigned int spd)1021 const char *sata_spd_string(unsigned int spd)
1022 {
1023 	static const char * const spd_str[] = {
1024 		"1.5 Gbps",
1025 		"3.0 Gbps",
1026 		"6.0 Gbps",
1027 	};
1028 
1029 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1030 		return "<unknown>";
1031 	return spd_str[spd - 1];
1032 }
1033 
1034 /**
1035  *	ata_dev_classify - determine device type based on ATA-spec signature
1036  *	@tf: ATA taskfile register set for device to be identified
1037  *
1038  *	Determine from taskfile register contents whether a device is
1039  *	ATA or ATAPI, as per "Signature and persistence" section
1040  *	of ATA/PI spec (volume 1, sect 5.14).
1041  *
1042  *	LOCKING:
1043  *	None.
1044  *
1045  *	RETURNS:
1046  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1047  *	%ATA_DEV_UNKNOWN the event of failure.
1048  */
ata_dev_classify(const struct ata_taskfile * tf)1049 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1050 {
1051 	/* Apple's open source Darwin code hints that some devices only
1052 	 * put a proper signature into the LBA mid/high registers,
1053 	 * So, we only check those.  It's sufficient for uniqueness.
1054 	 *
1055 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1056 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1057 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1058 	 * spec has never mentioned about using different signatures
1059 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1060 	 * Multiplier specification began to use 0x69/0x96 to identify
1061 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1062 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1063 	 * 0x69/0x96 shortly and described them as reserved for
1064 	 * SerialATA.
1065 	 *
1066 	 * We follow the current spec and consider that 0x69/0x96
1067 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1068 	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1069 	 * SEMB signature.  This is worked around in
1070 	 * ata_dev_read_id().
1071 	 */
1072 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1073 		DPRINTK("found ATA device by sig\n");
1074 		return ATA_DEV_ATA;
1075 	}
1076 
1077 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1078 		DPRINTK("found ATAPI device by sig\n");
1079 		return ATA_DEV_ATAPI;
1080 	}
1081 
1082 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1083 		DPRINTK("found PMP device by sig\n");
1084 		return ATA_DEV_PMP;
1085 	}
1086 
1087 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1088 		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1089 		return ATA_DEV_SEMB;
1090 	}
1091 
1092 	DPRINTK("unknown device\n");
1093 	return ATA_DEV_UNKNOWN;
1094 }
1095 
1096 /**
1097  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1098  *	@id: IDENTIFY DEVICE results we will examine
1099  *	@s: string into which data is output
1100  *	@ofs: offset into identify device page
1101  *	@len: length of string to return. must be an even number.
1102  *
1103  *	The strings in the IDENTIFY DEVICE page are broken up into
1104  *	16-bit chunks.  Run through the string, and output each
1105  *	8-bit chunk linearly, regardless of platform.
1106  *
1107  *	LOCKING:
1108  *	caller.
1109  */
1110 
ata_id_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1111 void ata_id_string(const u16 *id, unsigned char *s,
1112 		   unsigned int ofs, unsigned int len)
1113 {
1114 	unsigned int c;
1115 
1116 	BUG_ON(len & 1);
1117 
1118 	while (len > 0) {
1119 		c = id[ofs] >> 8;
1120 		*s = c;
1121 		s++;
1122 
1123 		c = id[ofs] & 0xff;
1124 		*s = c;
1125 		s++;
1126 
1127 		ofs++;
1128 		len -= 2;
1129 	}
1130 }
1131 
1132 /**
1133  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1134  *	@id: IDENTIFY DEVICE results we will examine
1135  *	@s: string into which data is output
1136  *	@ofs: offset into identify device page
1137  *	@len: length of string to return. must be an odd number.
1138  *
1139  *	This function is identical to ata_id_string except that it
1140  *	trims trailing spaces and terminates the resulting string with
1141  *	null.  @len must be actual maximum length (even number) + 1.
1142  *
1143  *	LOCKING:
1144  *	caller.
1145  */
ata_id_c_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1146 void ata_id_c_string(const u16 *id, unsigned char *s,
1147 		     unsigned int ofs, unsigned int len)
1148 {
1149 	unsigned char *p;
1150 
1151 	ata_id_string(id, s, ofs, len - 1);
1152 
1153 	p = s + strnlen(s, len - 1);
1154 	while (p > s && p[-1] == ' ')
1155 		p--;
1156 	*p = '\0';
1157 }
1158 
ata_id_n_sectors(const u16 * id)1159 static u64 ata_id_n_sectors(const u16 *id)
1160 {
1161 	if (ata_id_has_lba(id)) {
1162 		if (ata_id_has_lba48(id))
1163 			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1164 		else
1165 			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1166 	} else {
1167 		if (ata_id_current_chs_valid(id))
1168 			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1169 			       id[ATA_ID_CUR_SECTORS];
1170 		else
1171 			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1172 			       id[ATA_ID_SECTORS];
1173 	}
1174 }
1175 
ata_tf_to_lba48(const struct ata_taskfile * tf)1176 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1177 {
1178 	u64 sectors = 0;
1179 
1180 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1181 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1182 	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1183 	sectors |= (tf->lbah & 0xff) << 16;
1184 	sectors |= (tf->lbam & 0xff) << 8;
1185 	sectors |= (tf->lbal & 0xff);
1186 
1187 	return sectors;
1188 }
1189 
ata_tf_to_lba(const struct ata_taskfile * tf)1190 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1191 {
1192 	u64 sectors = 0;
1193 
1194 	sectors |= (tf->device & 0x0f) << 24;
1195 	sectors |= (tf->lbah & 0xff) << 16;
1196 	sectors |= (tf->lbam & 0xff) << 8;
1197 	sectors |= (tf->lbal & 0xff);
1198 
1199 	return sectors;
1200 }
1201 
1202 /**
1203  *	ata_read_native_max_address - Read native max address
1204  *	@dev: target device
1205  *	@max_sectors: out parameter for the result native max address
1206  *
1207  *	Perform an LBA48 or LBA28 native size query upon the device in
1208  *	question.
1209  *
1210  *	RETURNS:
1211  *	0 on success, -EACCES if command is aborted by the drive.
1212  *	-EIO on other errors.
1213  */
ata_read_native_max_address(struct ata_device * dev,u64 * max_sectors)1214 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1215 {
1216 	unsigned int err_mask;
1217 	struct ata_taskfile tf;
1218 	int lba48 = ata_id_has_lba48(dev->id);
1219 
1220 	ata_tf_init(dev, &tf);
1221 
1222 	/* always clear all address registers */
1223 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1224 
1225 	if (lba48) {
1226 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1227 		tf.flags |= ATA_TFLAG_LBA48;
1228 	} else
1229 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1230 
1231 	tf.protocol |= ATA_PROT_NODATA;
1232 	tf.device |= ATA_LBA;
1233 
1234 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1235 	if (err_mask) {
1236 		ata_dev_warn(dev,
1237 			     "failed to read native max address (err_mask=0x%x)\n",
1238 			     err_mask);
1239 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1240 			return -EACCES;
1241 		return -EIO;
1242 	}
1243 
1244 	if (lba48)
1245 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1246 	else
1247 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1248 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1249 		(*max_sectors)--;
1250 	return 0;
1251 }
1252 
1253 /**
1254  *	ata_set_max_sectors - Set max sectors
1255  *	@dev: target device
1256  *	@new_sectors: new max sectors value to set for the device
1257  *
1258  *	Set max sectors of @dev to @new_sectors.
1259  *
1260  *	RETURNS:
1261  *	0 on success, -EACCES if command is aborted or denied (due to
1262  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1263  *	errors.
1264  */
ata_set_max_sectors(struct ata_device * dev,u64 new_sectors)1265 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1266 {
1267 	unsigned int err_mask;
1268 	struct ata_taskfile tf;
1269 	int lba48 = ata_id_has_lba48(dev->id);
1270 
1271 	new_sectors--;
1272 
1273 	ata_tf_init(dev, &tf);
1274 
1275 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1276 
1277 	if (lba48) {
1278 		tf.command = ATA_CMD_SET_MAX_EXT;
1279 		tf.flags |= ATA_TFLAG_LBA48;
1280 
1281 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1282 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1283 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1284 	} else {
1285 		tf.command = ATA_CMD_SET_MAX;
1286 
1287 		tf.device |= (new_sectors >> 24) & 0xf;
1288 	}
1289 
1290 	tf.protocol |= ATA_PROT_NODATA;
1291 	tf.device |= ATA_LBA;
1292 
1293 	tf.lbal = (new_sectors >> 0) & 0xff;
1294 	tf.lbam = (new_sectors >> 8) & 0xff;
1295 	tf.lbah = (new_sectors >> 16) & 0xff;
1296 
1297 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1298 	if (err_mask) {
1299 		ata_dev_warn(dev,
1300 			     "failed to set max address (err_mask=0x%x)\n",
1301 			     err_mask);
1302 		if (err_mask == AC_ERR_DEV &&
1303 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1304 			return -EACCES;
1305 		return -EIO;
1306 	}
1307 
1308 	return 0;
1309 }
1310 
1311 /**
1312  *	ata_hpa_resize		-	Resize a device with an HPA set
1313  *	@dev: Device to resize
1314  *
1315  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1316  *	it if required to the full size of the media. The caller must check
1317  *	the drive has the HPA feature set enabled.
1318  *
1319  *	RETURNS:
1320  *	0 on success, -errno on failure.
1321  */
ata_hpa_resize(struct ata_device * dev)1322 static int ata_hpa_resize(struct ata_device *dev)
1323 {
1324 	struct ata_eh_context *ehc = &dev->link->eh_context;
1325 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1326 	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1327 	u64 sectors = ata_id_n_sectors(dev->id);
1328 	u64 native_sectors;
1329 	int rc;
1330 
1331 	/* do we need to do it? */
1332 	if (dev->class != ATA_DEV_ATA ||
1333 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1334 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1335 		return 0;
1336 
1337 	/* read native max address */
1338 	rc = ata_read_native_max_address(dev, &native_sectors);
1339 	if (rc) {
1340 		/* If device aborted the command or HPA isn't going to
1341 		 * be unlocked, skip HPA resizing.
1342 		 */
1343 		if (rc == -EACCES || !unlock_hpa) {
1344 			ata_dev_warn(dev,
1345 				     "HPA support seems broken, skipping HPA handling\n");
1346 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1347 
1348 			/* we can continue if device aborted the command */
1349 			if (rc == -EACCES)
1350 				rc = 0;
1351 		}
1352 
1353 		return rc;
1354 	}
1355 	dev->n_native_sectors = native_sectors;
1356 
1357 	/* nothing to do? */
1358 	if (native_sectors <= sectors || !unlock_hpa) {
1359 		if (!print_info || native_sectors == sectors)
1360 			return 0;
1361 
1362 		if (native_sectors > sectors)
1363 			ata_dev_info(dev,
1364 				"HPA detected: current %llu, native %llu\n",
1365 				(unsigned long long)sectors,
1366 				(unsigned long long)native_sectors);
1367 		else if (native_sectors < sectors)
1368 			ata_dev_warn(dev,
1369 				"native sectors (%llu) is smaller than sectors (%llu)\n",
1370 				(unsigned long long)native_sectors,
1371 				(unsigned long long)sectors);
1372 		return 0;
1373 	}
1374 
1375 	/* let's unlock HPA */
1376 	rc = ata_set_max_sectors(dev, native_sectors);
1377 	if (rc == -EACCES) {
1378 		/* if device aborted the command, skip HPA resizing */
1379 		ata_dev_warn(dev,
1380 			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1381 			     (unsigned long long)sectors,
1382 			     (unsigned long long)native_sectors);
1383 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1384 		return 0;
1385 	} else if (rc)
1386 		return rc;
1387 
1388 	/* re-read IDENTIFY data */
1389 	rc = ata_dev_reread_id(dev, 0);
1390 	if (rc) {
1391 		ata_dev_err(dev,
1392 			    "failed to re-read IDENTIFY data after HPA resizing\n");
1393 		return rc;
1394 	}
1395 
1396 	if (print_info) {
1397 		u64 new_sectors = ata_id_n_sectors(dev->id);
1398 		ata_dev_info(dev,
1399 			"HPA unlocked: %llu -> %llu, native %llu\n",
1400 			(unsigned long long)sectors,
1401 			(unsigned long long)new_sectors,
1402 			(unsigned long long)native_sectors);
1403 	}
1404 
1405 	return 0;
1406 }
1407 
1408 /**
1409  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1410  *	@id: IDENTIFY DEVICE page to dump
1411  *
1412  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1413  *	page.
1414  *
1415  *	LOCKING:
1416  *	caller.
1417  */
1418 
ata_dump_id(const u16 * id)1419 static inline void ata_dump_id(const u16 *id)
1420 {
1421 	DPRINTK("49==0x%04x  "
1422 		"53==0x%04x  "
1423 		"63==0x%04x  "
1424 		"64==0x%04x  "
1425 		"75==0x%04x  \n",
1426 		id[49],
1427 		id[53],
1428 		id[63],
1429 		id[64],
1430 		id[75]);
1431 	DPRINTK("80==0x%04x  "
1432 		"81==0x%04x  "
1433 		"82==0x%04x  "
1434 		"83==0x%04x  "
1435 		"84==0x%04x  \n",
1436 		id[80],
1437 		id[81],
1438 		id[82],
1439 		id[83],
1440 		id[84]);
1441 	DPRINTK("88==0x%04x  "
1442 		"93==0x%04x\n",
1443 		id[88],
1444 		id[93]);
1445 }
1446 
1447 /**
1448  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1449  *	@id: IDENTIFY data to compute xfer mask from
1450  *
1451  *	Compute the xfermask for this device. This is not as trivial
1452  *	as it seems if we must consider early devices correctly.
1453  *
1454  *	FIXME: pre IDE drive timing (do we care ?).
1455  *
1456  *	LOCKING:
1457  *	None.
1458  *
1459  *	RETURNS:
1460  *	Computed xfermask
1461  */
ata_id_xfermask(const u16 * id)1462 unsigned long ata_id_xfermask(const u16 *id)
1463 {
1464 	unsigned long pio_mask, mwdma_mask, udma_mask;
1465 
1466 	/* Usual case. Word 53 indicates word 64 is valid */
1467 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1468 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1469 		pio_mask <<= 3;
1470 		pio_mask |= 0x7;
1471 	} else {
1472 		/* If word 64 isn't valid then Word 51 high byte holds
1473 		 * the PIO timing number for the maximum. Turn it into
1474 		 * a mask.
1475 		 */
1476 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1477 		if (mode < 5)	/* Valid PIO range */
1478 			pio_mask = (2 << mode) - 1;
1479 		else
1480 			pio_mask = 1;
1481 
1482 		/* But wait.. there's more. Design your standards by
1483 		 * committee and you too can get a free iordy field to
1484 		 * process. However its the speeds not the modes that
1485 		 * are supported... Note drivers using the timing API
1486 		 * will get this right anyway
1487 		 */
1488 	}
1489 
1490 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1491 
1492 	if (ata_id_is_cfa(id)) {
1493 		/*
1494 		 *	Process compact flash extended modes
1495 		 */
1496 		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1497 		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1498 
1499 		if (pio)
1500 			pio_mask |= (1 << 5);
1501 		if (pio > 1)
1502 			pio_mask |= (1 << 6);
1503 		if (dma)
1504 			mwdma_mask |= (1 << 3);
1505 		if (dma > 1)
1506 			mwdma_mask |= (1 << 4);
1507 	}
1508 
1509 	udma_mask = 0;
1510 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1511 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1512 
1513 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1514 }
1515 
ata_qc_complete_internal(struct ata_queued_cmd * qc)1516 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1517 {
1518 	struct completion *waiting = qc->private_data;
1519 
1520 	complete(waiting);
1521 }
1522 
1523 /**
1524  *	ata_exec_internal_sg - execute libata internal command
1525  *	@dev: Device to which the command is sent
1526  *	@tf: Taskfile registers for the command and the result
1527  *	@cdb: CDB for packet command
1528  *	@dma_dir: Data transfer direction of the command
1529  *	@sgl: sg list for the data buffer of the command
1530  *	@n_elem: Number of sg entries
1531  *	@timeout: Timeout in msecs (0 for default)
1532  *
1533  *	Executes libata internal command with timeout.  @tf contains
1534  *	command on entry and result on return.  Timeout and error
1535  *	conditions are reported via return value.  No recovery action
1536  *	is taken after a command times out.  It's caller's duty to
1537  *	clean up after timeout.
1538  *
1539  *	LOCKING:
1540  *	None.  Should be called with kernel context, might sleep.
1541  *
1542  *	RETURNS:
1543  *	Zero on success, AC_ERR_* mask on failure
1544  */
ata_exec_internal_sg(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,int dma_dir,struct scatterlist * sgl,unsigned int n_elem,unsigned long timeout)1545 unsigned ata_exec_internal_sg(struct ata_device *dev,
1546 			      struct ata_taskfile *tf, const u8 *cdb,
1547 			      int dma_dir, struct scatterlist *sgl,
1548 			      unsigned int n_elem, unsigned long timeout)
1549 {
1550 	struct ata_link *link = dev->link;
1551 	struct ata_port *ap = link->ap;
1552 	u8 command = tf->command;
1553 	int auto_timeout = 0;
1554 	struct ata_queued_cmd *qc;
1555 	unsigned int tag, preempted_tag;
1556 	u32 preempted_sactive, preempted_qc_active;
1557 	int preempted_nr_active_links;
1558 	DECLARE_COMPLETION_ONSTACK(wait);
1559 	unsigned long flags;
1560 	unsigned int err_mask;
1561 	int rc;
1562 
1563 	spin_lock_irqsave(ap->lock, flags);
1564 
1565 	/* no internal command while frozen */
1566 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1567 		spin_unlock_irqrestore(ap->lock, flags);
1568 		return AC_ERR_SYSTEM;
1569 	}
1570 
1571 	/* initialize internal qc */
1572 
1573 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1574 	 * drivers choke if any other tag is given.  This breaks
1575 	 * ata_tag_internal() test for those drivers.  Don't use new
1576 	 * EH stuff without converting to it.
1577 	 */
1578 	if (ap->ops->error_handler)
1579 		tag = ATA_TAG_INTERNAL;
1580 	else
1581 		tag = 0;
1582 
1583 	if (test_and_set_bit(tag, &ap->qc_allocated))
1584 		BUG();
1585 	qc = __ata_qc_from_tag(ap, tag);
1586 
1587 	qc->tag = tag;
1588 	qc->scsicmd = NULL;
1589 	qc->ap = ap;
1590 	qc->dev = dev;
1591 	ata_qc_reinit(qc);
1592 
1593 	preempted_tag = link->active_tag;
1594 	preempted_sactive = link->sactive;
1595 	preempted_qc_active = ap->qc_active;
1596 	preempted_nr_active_links = ap->nr_active_links;
1597 	link->active_tag = ATA_TAG_POISON;
1598 	link->sactive = 0;
1599 	ap->qc_active = 0;
1600 	ap->nr_active_links = 0;
1601 
1602 	/* prepare & issue qc */
1603 	qc->tf = *tf;
1604 	if (cdb)
1605 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1606 
1607 	/* some SATA bridges need us to indicate data xfer direction */
1608 	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1609 	    dma_dir == DMA_FROM_DEVICE)
1610 		qc->tf.feature |= ATAPI_DMADIR;
1611 
1612 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1613 	qc->dma_dir = dma_dir;
1614 	if (dma_dir != DMA_NONE) {
1615 		unsigned int i, buflen = 0;
1616 		struct scatterlist *sg;
1617 
1618 		for_each_sg(sgl, sg, n_elem, i)
1619 			buflen += sg->length;
1620 
1621 		ata_sg_init(qc, sgl, n_elem);
1622 		qc->nbytes = buflen;
1623 	}
1624 
1625 	qc->private_data = &wait;
1626 	qc->complete_fn = ata_qc_complete_internal;
1627 
1628 	ata_qc_issue(qc);
1629 
1630 	spin_unlock_irqrestore(ap->lock, flags);
1631 
1632 	if (!timeout) {
1633 		if (ata_probe_timeout)
1634 			timeout = ata_probe_timeout * 1000;
1635 		else {
1636 			timeout = ata_internal_cmd_timeout(dev, command);
1637 			auto_timeout = 1;
1638 		}
1639 	}
1640 
1641 	if (ap->ops->error_handler)
1642 		ata_eh_release(ap);
1643 
1644 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1645 
1646 	if (ap->ops->error_handler)
1647 		ata_eh_acquire(ap);
1648 
1649 	ata_sff_flush_pio_task(ap);
1650 
1651 	if (!rc) {
1652 		spin_lock_irqsave(ap->lock, flags);
1653 
1654 		/* We're racing with irq here.  If we lose, the
1655 		 * following test prevents us from completing the qc
1656 		 * twice.  If we win, the port is frozen and will be
1657 		 * cleaned up by ->post_internal_cmd().
1658 		 */
1659 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1660 			qc->err_mask |= AC_ERR_TIMEOUT;
1661 
1662 			if (ap->ops->error_handler)
1663 				ata_port_freeze(ap);
1664 			else
1665 				ata_qc_complete(qc);
1666 
1667 			if (ata_msg_warn(ap))
1668 				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1669 					     command);
1670 		}
1671 
1672 		spin_unlock_irqrestore(ap->lock, flags);
1673 	}
1674 
1675 	/* do post_internal_cmd */
1676 	if (ap->ops->post_internal_cmd)
1677 		ap->ops->post_internal_cmd(qc);
1678 
1679 	/* perform minimal error analysis */
1680 	if (qc->flags & ATA_QCFLAG_FAILED) {
1681 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1682 			qc->err_mask |= AC_ERR_DEV;
1683 
1684 		if (!qc->err_mask)
1685 			qc->err_mask |= AC_ERR_OTHER;
1686 
1687 		if (qc->err_mask & ~AC_ERR_OTHER)
1688 			qc->err_mask &= ~AC_ERR_OTHER;
1689 	}
1690 
1691 	/* finish up */
1692 	spin_lock_irqsave(ap->lock, flags);
1693 
1694 	*tf = qc->result_tf;
1695 	err_mask = qc->err_mask;
1696 
1697 	ata_qc_free(qc);
1698 	link->active_tag = preempted_tag;
1699 	link->sactive = preempted_sactive;
1700 	ap->qc_active = preempted_qc_active;
1701 	ap->nr_active_links = preempted_nr_active_links;
1702 
1703 	spin_unlock_irqrestore(ap->lock, flags);
1704 
1705 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1706 		ata_internal_cmd_timed_out(dev, command);
1707 
1708 	return err_mask;
1709 }
1710 
1711 /**
1712  *	ata_exec_internal - execute libata internal command
1713  *	@dev: Device to which the command is sent
1714  *	@tf: Taskfile registers for the command and the result
1715  *	@cdb: CDB for packet command
1716  *	@dma_dir: Data transfer direction of the command
1717  *	@buf: Data buffer of the command
1718  *	@buflen: Length of data buffer
1719  *	@timeout: Timeout in msecs (0 for default)
1720  *
1721  *	Wrapper around ata_exec_internal_sg() which takes simple
1722  *	buffer instead of sg list.
1723  *
1724  *	LOCKING:
1725  *	None.  Should be called with kernel context, might sleep.
1726  *
1727  *	RETURNS:
1728  *	Zero on success, AC_ERR_* mask on failure
1729  */
ata_exec_internal(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,int dma_dir,void * buf,unsigned int buflen,unsigned long timeout)1730 unsigned ata_exec_internal(struct ata_device *dev,
1731 			   struct ata_taskfile *tf, const u8 *cdb,
1732 			   int dma_dir, void *buf, unsigned int buflen,
1733 			   unsigned long timeout)
1734 {
1735 	struct scatterlist *psg = NULL, sg;
1736 	unsigned int n_elem = 0;
1737 
1738 	if (dma_dir != DMA_NONE) {
1739 		WARN_ON(!buf);
1740 		sg_init_one(&sg, buf, buflen);
1741 		psg = &sg;
1742 		n_elem++;
1743 	}
1744 
1745 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1746 				    timeout);
1747 }
1748 
1749 /**
1750  *	ata_do_simple_cmd - execute simple internal command
1751  *	@dev: Device to which the command is sent
1752  *	@cmd: Opcode to execute
1753  *
1754  *	Execute a 'simple' command, that only consists of the opcode
1755  *	'cmd' itself, without filling any other registers
1756  *
1757  *	LOCKING:
1758  *	Kernel thread context (may sleep).
1759  *
1760  *	RETURNS:
1761  *	Zero on success, AC_ERR_* mask on failure
1762  */
ata_do_simple_cmd(struct ata_device * dev,u8 cmd)1763 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1764 {
1765 	struct ata_taskfile tf;
1766 
1767 	ata_tf_init(dev, &tf);
1768 
1769 	tf.command = cmd;
1770 	tf.flags |= ATA_TFLAG_DEVICE;
1771 	tf.protocol = ATA_PROT_NODATA;
1772 
1773 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1774 }
1775 
1776 /**
1777  *	ata_pio_need_iordy	-	check if iordy needed
1778  *	@adev: ATA device
1779  *
1780  *	Check if the current speed of the device requires IORDY. Used
1781  *	by various controllers for chip configuration.
1782  */
ata_pio_need_iordy(const struct ata_device * adev)1783 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1784 {
1785 	/* Don't set IORDY if we're preparing for reset.  IORDY may
1786 	 * lead to controller lock up on certain controllers if the
1787 	 * port is not occupied.  See bko#11703 for details.
1788 	 */
1789 	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1790 		return 0;
1791 	/* Controller doesn't support IORDY.  Probably a pointless
1792 	 * check as the caller should know this.
1793 	 */
1794 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1795 		return 0;
1796 	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1797 	if (ata_id_is_cfa(adev->id)
1798 	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1799 		return 0;
1800 	/* PIO3 and higher it is mandatory */
1801 	if (adev->pio_mode > XFER_PIO_2)
1802 		return 1;
1803 	/* We turn it on when possible */
1804 	if (ata_id_has_iordy(adev->id))
1805 		return 1;
1806 	return 0;
1807 }
1808 
1809 /**
1810  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1811  *	@adev: ATA device
1812  *
1813  *	Compute the highest mode possible if we are not using iordy. Return
1814  *	-1 if no iordy mode is available.
1815  */
ata_pio_mask_no_iordy(const struct ata_device * adev)1816 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1817 {
1818 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1819 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1820 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1821 		/* Is the speed faster than the drive allows non IORDY ? */
1822 		if (pio) {
1823 			/* This is cycle times not frequency - watch the logic! */
1824 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1825 				return 3 << ATA_SHIFT_PIO;
1826 			return 7 << ATA_SHIFT_PIO;
1827 		}
1828 	}
1829 	return 3 << ATA_SHIFT_PIO;
1830 }
1831 
1832 /**
1833  *	ata_do_dev_read_id		-	default ID read method
1834  *	@dev: device
1835  *	@tf: proposed taskfile
1836  *	@id: data buffer
1837  *
1838  *	Issue the identify taskfile and hand back the buffer containing
1839  *	identify data. For some RAID controllers and for pre ATA devices
1840  *	this function is wrapped or replaced by the driver
1841  */
ata_do_dev_read_id(struct ata_device * dev,struct ata_taskfile * tf,u16 * id)1842 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1843 					struct ata_taskfile *tf, u16 *id)
1844 {
1845 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1846 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1847 }
1848 
1849 /**
1850  *	ata_dev_read_id - Read ID data from the specified device
1851  *	@dev: target device
1852  *	@p_class: pointer to class of the target device (may be changed)
1853  *	@flags: ATA_READID_* flags
1854  *	@id: buffer to read IDENTIFY data into
1855  *
1856  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1857  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1858  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1859  *	for pre-ATA4 drives.
1860  *
1861  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1862  *	now we abort if we hit that case.
1863  *
1864  *	LOCKING:
1865  *	Kernel thread context (may sleep)
1866  *
1867  *	RETURNS:
1868  *	0 on success, -errno otherwise.
1869  */
ata_dev_read_id(struct ata_device * dev,unsigned int * p_class,unsigned int flags,u16 * id)1870 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1871 		    unsigned int flags, u16 *id)
1872 {
1873 	struct ata_port *ap = dev->link->ap;
1874 	unsigned int class = *p_class;
1875 	struct ata_taskfile tf;
1876 	unsigned int err_mask = 0;
1877 	const char *reason;
1878 	bool is_semb = class == ATA_DEV_SEMB;
1879 	int may_fallback = 1, tried_spinup = 0;
1880 	int rc;
1881 
1882 	if (ata_msg_ctl(ap))
1883 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1884 
1885 retry:
1886 	ata_tf_init(dev, &tf);
1887 
1888 	switch (class) {
1889 	case ATA_DEV_SEMB:
1890 		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1891 	case ATA_DEV_ATA:
1892 		tf.command = ATA_CMD_ID_ATA;
1893 		break;
1894 	case ATA_DEV_ATAPI:
1895 		tf.command = ATA_CMD_ID_ATAPI;
1896 		break;
1897 	default:
1898 		rc = -ENODEV;
1899 		reason = "unsupported class";
1900 		goto err_out;
1901 	}
1902 
1903 	tf.protocol = ATA_PROT_PIO;
1904 
1905 	/* Some devices choke if TF registers contain garbage.  Make
1906 	 * sure those are properly initialized.
1907 	 */
1908 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1909 
1910 	/* Device presence detection is unreliable on some
1911 	 * controllers.  Always poll IDENTIFY if available.
1912 	 */
1913 	tf.flags |= ATA_TFLAG_POLLING;
1914 
1915 	if (ap->ops->read_id)
1916 		err_mask = ap->ops->read_id(dev, &tf, id);
1917 	else
1918 		err_mask = ata_do_dev_read_id(dev, &tf, id);
1919 
1920 	if (err_mask) {
1921 		if (err_mask & AC_ERR_NODEV_HINT) {
1922 			ata_dev_dbg(dev, "NODEV after polling detection\n");
1923 			return -ENOENT;
1924 		}
1925 
1926 		if (is_semb) {
1927 			ata_dev_info(dev,
1928 		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1929 			/* SEMB is not supported yet */
1930 			*p_class = ATA_DEV_SEMB_UNSUP;
1931 			return 0;
1932 		}
1933 
1934 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1935 			/* Device or controller might have reported
1936 			 * the wrong device class.  Give a shot at the
1937 			 * other IDENTIFY if the current one is
1938 			 * aborted by the device.
1939 			 */
1940 			if (may_fallback) {
1941 				may_fallback = 0;
1942 
1943 				if (class == ATA_DEV_ATA)
1944 					class = ATA_DEV_ATAPI;
1945 				else
1946 					class = ATA_DEV_ATA;
1947 				goto retry;
1948 			}
1949 
1950 			/* Control reaches here iff the device aborted
1951 			 * both flavors of IDENTIFYs which happens
1952 			 * sometimes with phantom devices.
1953 			 */
1954 			ata_dev_dbg(dev,
1955 				    "both IDENTIFYs aborted, assuming NODEV\n");
1956 			return -ENOENT;
1957 		}
1958 
1959 		rc = -EIO;
1960 		reason = "I/O error";
1961 		goto err_out;
1962 	}
1963 
1964 	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1965 		ata_dev_dbg(dev, "dumping IDENTIFY data, "
1966 			    "class=%d may_fallback=%d tried_spinup=%d\n",
1967 			    class, may_fallback, tried_spinup);
1968 		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1969 			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1970 	}
1971 
1972 	/* Falling back doesn't make sense if ID data was read
1973 	 * successfully at least once.
1974 	 */
1975 	may_fallback = 0;
1976 
1977 	swap_buf_le16(id, ATA_ID_WORDS);
1978 
1979 	/* sanity check */
1980 	rc = -EINVAL;
1981 	reason = "device reports invalid type";
1982 
1983 	if (class == ATA_DEV_ATA) {
1984 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1985 			goto err_out;
1986 		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1987 							ata_id_is_ata(id)) {
1988 			ata_dev_dbg(dev,
1989 				"host indicates ignore ATA devices, ignored\n");
1990 			return -ENOENT;
1991 		}
1992 	} else {
1993 		if (ata_id_is_ata(id))
1994 			goto err_out;
1995 	}
1996 
1997 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1998 		tried_spinup = 1;
1999 		/*
2000 		 * Drive powered-up in standby mode, and requires a specific
2001 		 * SET_FEATURES spin-up subcommand before it will accept
2002 		 * anything other than the original IDENTIFY command.
2003 		 */
2004 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2005 		if (err_mask && id[2] != 0x738c) {
2006 			rc = -EIO;
2007 			reason = "SPINUP failed";
2008 			goto err_out;
2009 		}
2010 		/*
2011 		 * If the drive initially returned incomplete IDENTIFY info,
2012 		 * we now must reissue the IDENTIFY command.
2013 		 */
2014 		if (id[2] == 0x37c8)
2015 			goto retry;
2016 	}
2017 
2018 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2019 		/*
2020 		 * The exact sequence expected by certain pre-ATA4 drives is:
2021 		 * SRST RESET
2022 		 * IDENTIFY (optional in early ATA)
2023 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2024 		 * anything else..
2025 		 * Some drives were very specific about that exact sequence.
2026 		 *
2027 		 * Note that ATA4 says lba is mandatory so the second check
2028 		 * should never trigger.
2029 		 */
2030 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2031 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2032 			if (err_mask) {
2033 				rc = -EIO;
2034 				reason = "INIT_DEV_PARAMS failed";
2035 				goto err_out;
2036 			}
2037 
2038 			/* current CHS translation info (id[53-58]) might be
2039 			 * changed. reread the identify device info.
2040 			 */
2041 			flags &= ~ATA_READID_POSTRESET;
2042 			goto retry;
2043 		}
2044 	}
2045 
2046 	*p_class = class;
2047 
2048 	return 0;
2049 
2050  err_out:
2051 	if (ata_msg_warn(ap))
2052 		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2053 			     reason, err_mask);
2054 	return rc;
2055 }
2056 
ata_do_link_spd_horkage(struct ata_device * dev)2057 static int ata_do_link_spd_horkage(struct ata_device *dev)
2058 {
2059 	struct ata_link *plink = ata_dev_phys_link(dev);
2060 	u32 target, target_limit;
2061 
2062 	if (!sata_scr_valid(plink))
2063 		return 0;
2064 
2065 	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2066 		target = 1;
2067 	else
2068 		return 0;
2069 
2070 	target_limit = (1 << target) - 1;
2071 
2072 	/* if already on stricter limit, no need to push further */
2073 	if (plink->sata_spd_limit <= target_limit)
2074 		return 0;
2075 
2076 	plink->sata_spd_limit = target_limit;
2077 
2078 	/* Request another EH round by returning -EAGAIN if link is
2079 	 * going faster than the target speed.  Forward progress is
2080 	 * guaranteed by setting sata_spd_limit to target_limit above.
2081 	 */
2082 	if (plink->sata_spd > target) {
2083 		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2084 			     sata_spd_string(target));
2085 		return -EAGAIN;
2086 	}
2087 	return 0;
2088 }
2089 
ata_dev_knobble(struct ata_device * dev)2090 static inline u8 ata_dev_knobble(struct ata_device *dev)
2091 {
2092 	struct ata_port *ap = dev->link->ap;
2093 
2094 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2095 		return 0;
2096 
2097 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2098 }
2099 
ata_dev_config_ncq(struct ata_device * dev,char * desc,size_t desc_sz)2100 static int ata_dev_config_ncq(struct ata_device *dev,
2101 			       char *desc, size_t desc_sz)
2102 {
2103 	struct ata_port *ap = dev->link->ap;
2104 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2105 	unsigned int err_mask;
2106 	char *aa_desc = "";
2107 
2108 	if (!ata_id_has_ncq(dev->id)) {
2109 		desc[0] = '\0';
2110 		return 0;
2111 	}
2112 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2113 		snprintf(desc, desc_sz, "NCQ (not used)");
2114 		return 0;
2115 	}
2116 	if (ap->flags & ATA_FLAG_NCQ) {
2117 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2118 		dev->flags |= ATA_DFLAG_NCQ;
2119 	}
2120 
2121 	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2122 		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2123 		ata_id_has_fpdma_aa(dev->id)) {
2124 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2125 			SATA_FPDMA_AA);
2126 		if (err_mask) {
2127 			ata_dev_err(dev,
2128 				    "failed to enable AA (error_mask=0x%x)\n",
2129 				    err_mask);
2130 			if (err_mask != AC_ERR_DEV) {
2131 				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2132 				return -EIO;
2133 			}
2134 		} else
2135 			aa_desc = ", AA";
2136 	}
2137 
2138 	if (hdepth >= ddepth)
2139 		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2140 	else
2141 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2142 			ddepth, aa_desc);
2143 
2144 	if ((ap->flags & ATA_FLAG_FPDMA_AUX) &&
2145 	    ata_id_has_ncq_send_and_recv(dev->id)) {
2146 		err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2147 					     0, ap->sector_buf, 1);
2148 		if (err_mask) {
2149 			ata_dev_dbg(dev,
2150 				    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2151 				    err_mask);
2152 		} else {
2153 			u8 *cmds = dev->ncq_send_recv_cmds;
2154 
2155 			dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2156 			memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2157 
2158 			if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2159 				ata_dev_dbg(dev, "disabling queued TRIM support\n");
2160 				cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2161 					~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2162 			}
2163 		}
2164 	}
2165 
2166 	return 0;
2167 }
2168 
2169 /**
2170  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2171  *	@dev: Target device to configure
2172  *
2173  *	Configure @dev according to @dev->id.  Generic and low-level
2174  *	driver specific fixups are also applied.
2175  *
2176  *	LOCKING:
2177  *	Kernel thread context (may sleep)
2178  *
2179  *	RETURNS:
2180  *	0 on success, -errno otherwise
2181  */
ata_dev_configure(struct ata_device * dev)2182 int ata_dev_configure(struct ata_device *dev)
2183 {
2184 	struct ata_port *ap = dev->link->ap;
2185 	struct ata_eh_context *ehc = &dev->link->eh_context;
2186 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2187 	const u16 *id = dev->id;
2188 	unsigned long xfer_mask;
2189 	unsigned int err_mask;
2190 	char revbuf[7];		/* XYZ-99\0 */
2191 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2192 	char modelbuf[ATA_ID_PROD_LEN+1];
2193 	int rc;
2194 
2195 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2196 		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2197 		return 0;
2198 	}
2199 
2200 	if (ata_msg_probe(ap))
2201 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2202 
2203 	/* set horkage */
2204 	dev->horkage |= ata_dev_blacklisted(dev);
2205 	ata_force_horkage(dev);
2206 
2207 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2208 		ata_dev_info(dev, "unsupported device, disabling\n");
2209 		ata_dev_disable(dev);
2210 		return 0;
2211 	}
2212 
2213 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2214 	    dev->class == ATA_DEV_ATAPI) {
2215 		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2216 			     atapi_enabled ? "not supported with this driver"
2217 			     : "disabled");
2218 		ata_dev_disable(dev);
2219 		return 0;
2220 	}
2221 
2222 	rc = ata_do_link_spd_horkage(dev);
2223 	if (rc)
2224 		return rc;
2225 
2226 	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2227 	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2228 	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2229 		dev->horkage |= ATA_HORKAGE_NOLPM;
2230 
2231 	if (dev->horkage & ATA_HORKAGE_NOLPM) {
2232 		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2233 		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2234 	}
2235 
2236 	/* let ACPI work its magic */
2237 	rc = ata_acpi_on_devcfg(dev);
2238 	if (rc)
2239 		return rc;
2240 
2241 	/* massage HPA, do it early as it might change IDENTIFY data */
2242 	rc = ata_hpa_resize(dev);
2243 	if (rc)
2244 		return rc;
2245 
2246 	/* print device capabilities */
2247 	if (ata_msg_probe(ap))
2248 		ata_dev_dbg(dev,
2249 			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2250 			    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2251 			    __func__,
2252 			    id[49], id[82], id[83], id[84],
2253 			    id[85], id[86], id[87], id[88]);
2254 
2255 	/* initialize to-be-configured parameters */
2256 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2257 	dev->max_sectors = 0;
2258 	dev->cdb_len = 0;
2259 	dev->n_sectors = 0;
2260 	dev->cylinders = 0;
2261 	dev->heads = 0;
2262 	dev->sectors = 0;
2263 	dev->multi_count = 0;
2264 
2265 	/*
2266 	 * common ATA, ATAPI feature tests
2267 	 */
2268 
2269 	/* find max transfer mode; for printk only */
2270 	xfer_mask = ata_id_xfermask(id);
2271 
2272 	if (ata_msg_probe(ap))
2273 		ata_dump_id(id);
2274 
2275 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2276 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2277 			sizeof(fwrevbuf));
2278 
2279 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2280 			sizeof(modelbuf));
2281 
2282 	/* ATA-specific feature tests */
2283 	if (dev->class == ATA_DEV_ATA) {
2284 		if (ata_id_is_cfa(id)) {
2285 			/* CPRM may make this media unusable */
2286 			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2287 				ata_dev_warn(dev,
2288 	"supports DRM functions and may not be fully accessible\n");
2289 			snprintf(revbuf, 7, "CFA");
2290 		} else {
2291 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2292 			/* Warn the user if the device has TPM extensions */
2293 			if (ata_id_has_tpm(id))
2294 				ata_dev_warn(dev,
2295 	"supports DRM functions and may not be fully accessible\n");
2296 		}
2297 
2298 		dev->n_sectors = ata_id_n_sectors(id);
2299 
2300 		/* get current R/W Multiple count setting */
2301 		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2302 			unsigned int max = dev->id[47] & 0xff;
2303 			unsigned int cnt = dev->id[59] & 0xff;
2304 			/* only recognize/allow powers of two here */
2305 			if (is_power_of_2(max) && is_power_of_2(cnt))
2306 				if (cnt <= max)
2307 					dev->multi_count = cnt;
2308 		}
2309 
2310 		if (ata_id_has_lba(id)) {
2311 			const char *lba_desc;
2312 			char ncq_desc[24];
2313 
2314 			lba_desc = "LBA";
2315 			dev->flags |= ATA_DFLAG_LBA;
2316 			if (ata_id_has_lba48(id)) {
2317 				dev->flags |= ATA_DFLAG_LBA48;
2318 				lba_desc = "LBA48";
2319 
2320 				if (dev->n_sectors >= (1UL << 28) &&
2321 				    ata_id_has_flush_ext(id))
2322 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2323 			}
2324 
2325 			/* config NCQ */
2326 			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2327 			if (rc)
2328 				return rc;
2329 
2330 			/* print device info to dmesg */
2331 			if (ata_msg_drv(ap) && print_info) {
2332 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2333 					     revbuf, modelbuf, fwrevbuf,
2334 					     ata_mode_string(xfer_mask));
2335 				ata_dev_info(dev,
2336 					     "%llu sectors, multi %u: %s %s\n",
2337 					(unsigned long long)dev->n_sectors,
2338 					dev->multi_count, lba_desc, ncq_desc);
2339 			}
2340 		} else {
2341 			/* CHS */
2342 
2343 			/* Default translation */
2344 			dev->cylinders	= id[1];
2345 			dev->heads	= id[3];
2346 			dev->sectors	= id[6];
2347 
2348 			if (ata_id_current_chs_valid(id)) {
2349 				/* Current CHS translation is valid. */
2350 				dev->cylinders = id[54];
2351 				dev->heads     = id[55];
2352 				dev->sectors   = id[56];
2353 			}
2354 
2355 			/* print device info to dmesg */
2356 			if (ata_msg_drv(ap) && print_info) {
2357 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2358 					     revbuf,	modelbuf, fwrevbuf,
2359 					     ata_mode_string(xfer_mask));
2360 				ata_dev_info(dev,
2361 					     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2362 					     (unsigned long long)dev->n_sectors,
2363 					     dev->multi_count, dev->cylinders,
2364 					     dev->heads, dev->sectors);
2365 			}
2366 		}
2367 
2368 		/* Check and mark DevSlp capability. Get DevSlp timing variables
2369 		 * from SATA Settings page of Identify Device Data Log.
2370 		 */
2371 		if (ata_id_has_devslp(dev->id)) {
2372 			u8 *sata_setting = ap->sector_buf;
2373 			int i, j;
2374 
2375 			dev->flags |= ATA_DFLAG_DEVSLP;
2376 			err_mask = ata_read_log_page(dev,
2377 						     ATA_LOG_SATA_ID_DEV_DATA,
2378 						     ATA_LOG_SATA_SETTINGS,
2379 						     sata_setting,
2380 						     1);
2381 			if (err_mask)
2382 				ata_dev_dbg(dev,
2383 					    "failed to get Identify Device Data, Emask 0x%x\n",
2384 					    err_mask);
2385 			else
2386 				for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2387 					j = ATA_LOG_DEVSLP_OFFSET + i;
2388 					dev->devslp_timing[i] = sata_setting[j];
2389 				}
2390 		}
2391 
2392 		dev->cdb_len = 16;
2393 	}
2394 
2395 	/* ATAPI-specific feature tests */
2396 	else if (dev->class == ATA_DEV_ATAPI) {
2397 		const char *cdb_intr_string = "";
2398 		const char *atapi_an_string = "";
2399 		const char *dma_dir_string = "";
2400 		u32 sntf;
2401 
2402 		rc = atapi_cdb_len(id);
2403 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2404 			if (ata_msg_warn(ap))
2405 				ata_dev_warn(dev, "unsupported CDB len\n");
2406 			rc = -EINVAL;
2407 			goto err_out_nosup;
2408 		}
2409 		dev->cdb_len = (unsigned int) rc;
2410 
2411 		/* Enable ATAPI AN if both the host and device have
2412 		 * the support.  If PMP is attached, SNTF is required
2413 		 * to enable ATAPI AN to discern between PHY status
2414 		 * changed notifications and ATAPI ANs.
2415 		 */
2416 		if (atapi_an &&
2417 		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2418 		    (!sata_pmp_attached(ap) ||
2419 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2420 			/* issue SET feature command to turn this on */
2421 			err_mask = ata_dev_set_feature(dev,
2422 					SETFEATURES_SATA_ENABLE, SATA_AN);
2423 			if (err_mask)
2424 				ata_dev_err(dev,
2425 					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
2426 					    err_mask);
2427 			else {
2428 				dev->flags |= ATA_DFLAG_AN;
2429 				atapi_an_string = ", ATAPI AN";
2430 			}
2431 		}
2432 
2433 		if (ata_id_cdb_intr(dev->id)) {
2434 			dev->flags |= ATA_DFLAG_CDB_INTR;
2435 			cdb_intr_string = ", CDB intr";
2436 		}
2437 
2438 		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2439 			dev->flags |= ATA_DFLAG_DMADIR;
2440 			dma_dir_string = ", DMADIR";
2441 		}
2442 
2443 		if (ata_id_has_da(dev->id)) {
2444 			dev->flags |= ATA_DFLAG_DA;
2445 			zpodd_init(dev);
2446 		}
2447 
2448 		/* print device info to dmesg */
2449 		if (ata_msg_drv(ap) && print_info)
2450 			ata_dev_info(dev,
2451 				     "ATAPI: %s, %s, max %s%s%s%s\n",
2452 				     modelbuf, fwrevbuf,
2453 				     ata_mode_string(xfer_mask),
2454 				     cdb_intr_string, atapi_an_string,
2455 				     dma_dir_string);
2456 	}
2457 
2458 	/* determine max_sectors */
2459 	dev->max_sectors = ATA_MAX_SECTORS;
2460 	if (dev->flags & ATA_DFLAG_LBA48)
2461 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2462 
2463 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2464 	   200 sectors */
2465 	if (ata_dev_knobble(dev)) {
2466 		if (ata_msg_drv(ap) && print_info)
2467 			ata_dev_info(dev, "applying bridge limits\n");
2468 		dev->udma_mask &= ATA_UDMA5;
2469 		dev->max_sectors = ATA_MAX_SECTORS;
2470 	}
2471 
2472 	if ((dev->class == ATA_DEV_ATAPI) &&
2473 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2474 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2475 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2476 	}
2477 
2478 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2479 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2480 					 dev->max_sectors);
2481 
2482 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2483 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2484 
2485 	if (ap->ops->dev_config)
2486 		ap->ops->dev_config(dev);
2487 
2488 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2489 		/* Let the user know. We don't want to disallow opens for
2490 		   rescue purposes, or in case the vendor is just a blithering
2491 		   idiot. Do this after the dev_config call as some controllers
2492 		   with buggy firmware may want to avoid reporting false device
2493 		   bugs */
2494 
2495 		if (print_info) {
2496 			ata_dev_warn(dev,
2497 "Drive reports diagnostics failure. This may indicate a drive\n");
2498 			ata_dev_warn(dev,
2499 "fault or invalid emulation. Contact drive vendor for information.\n");
2500 		}
2501 	}
2502 
2503 	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2504 		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2505 		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
2506 	}
2507 
2508 	return 0;
2509 
2510 err_out_nosup:
2511 	if (ata_msg_probe(ap))
2512 		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2513 	return rc;
2514 }
2515 
2516 /**
2517  *	ata_cable_40wire	-	return 40 wire cable type
2518  *	@ap: port
2519  *
2520  *	Helper method for drivers which want to hardwire 40 wire cable
2521  *	detection.
2522  */
2523 
ata_cable_40wire(struct ata_port * ap)2524 int ata_cable_40wire(struct ata_port *ap)
2525 {
2526 	return ATA_CBL_PATA40;
2527 }
2528 
2529 /**
2530  *	ata_cable_80wire	-	return 80 wire cable type
2531  *	@ap: port
2532  *
2533  *	Helper method for drivers which want to hardwire 80 wire cable
2534  *	detection.
2535  */
2536 
ata_cable_80wire(struct ata_port * ap)2537 int ata_cable_80wire(struct ata_port *ap)
2538 {
2539 	return ATA_CBL_PATA80;
2540 }
2541 
2542 /**
2543  *	ata_cable_unknown	-	return unknown PATA cable.
2544  *	@ap: port
2545  *
2546  *	Helper method for drivers which have no PATA cable detection.
2547  */
2548 
ata_cable_unknown(struct ata_port * ap)2549 int ata_cable_unknown(struct ata_port *ap)
2550 {
2551 	return ATA_CBL_PATA_UNK;
2552 }
2553 
2554 /**
2555  *	ata_cable_ignore	-	return ignored PATA cable.
2556  *	@ap: port
2557  *
2558  *	Helper method for drivers which don't use cable type to limit
2559  *	transfer mode.
2560  */
ata_cable_ignore(struct ata_port * ap)2561 int ata_cable_ignore(struct ata_port *ap)
2562 {
2563 	return ATA_CBL_PATA_IGN;
2564 }
2565 
2566 /**
2567  *	ata_cable_sata	-	return SATA cable type
2568  *	@ap: port
2569  *
2570  *	Helper method for drivers which have SATA cables
2571  */
2572 
ata_cable_sata(struct ata_port * ap)2573 int ata_cable_sata(struct ata_port *ap)
2574 {
2575 	return ATA_CBL_SATA;
2576 }
2577 
2578 /**
2579  *	ata_bus_probe - Reset and probe ATA bus
2580  *	@ap: Bus to probe
2581  *
2582  *	Master ATA bus probing function.  Initiates a hardware-dependent
2583  *	bus reset, then attempts to identify any devices found on
2584  *	the bus.
2585  *
2586  *	LOCKING:
2587  *	PCI/etc. bus probe sem.
2588  *
2589  *	RETURNS:
2590  *	Zero on success, negative errno otherwise.
2591  */
2592 
ata_bus_probe(struct ata_port * ap)2593 int ata_bus_probe(struct ata_port *ap)
2594 {
2595 	unsigned int classes[ATA_MAX_DEVICES];
2596 	int tries[ATA_MAX_DEVICES];
2597 	int rc;
2598 	struct ata_device *dev;
2599 
2600 	ata_for_each_dev(dev, &ap->link, ALL)
2601 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2602 
2603  retry:
2604 	ata_for_each_dev(dev, &ap->link, ALL) {
2605 		/* If we issue an SRST then an ATA drive (not ATAPI)
2606 		 * may change configuration and be in PIO0 timing. If
2607 		 * we do a hard reset (or are coming from power on)
2608 		 * this is true for ATA or ATAPI. Until we've set a
2609 		 * suitable controller mode we should not touch the
2610 		 * bus as we may be talking too fast.
2611 		 */
2612 		dev->pio_mode = XFER_PIO_0;
2613 		dev->dma_mode = 0xff;
2614 
2615 		/* If the controller has a pio mode setup function
2616 		 * then use it to set the chipset to rights. Don't
2617 		 * touch the DMA setup as that will be dealt with when
2618 		 * configuring devices.
2619 		 */
2620 		if (ap->ops->set_piomode)
2621 			ap->ops->set_piomode(ap, dev);
2622 	}
2623 
2624 	/* reset and determine device classes */
2625 	ap->ops->phy_reset(ap);
2626 
2627 	ata_for_each_dev(dev, &ap->link, ALL) {
2628 		if (dev->class != ATA_DEV_UNKNOWN)
2629 			classes[dev->devno] = dev->class;
2630 		else
2631 			classes[dev->devno] = ATA_DEV_NONE;
2632 
2633 		dev->class = ATA_DEV_UNKNOWN;
2634 	}
2635 
2636 	/* read IDENTIFY page and configure devices. We have to do the identify
2637 	   specific sequence bass-ackwards so that PDIAG- is released by
2638 	   the slave device */
2639 
2640 	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2641 		if (tries[dev->devno])
2642 			dev->class = classes[dev->devno];
2643 
2644 		if (!ata_dev_enabled(dev))
2645 			continue;
2646 
2647 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2648 				     dev->id);
2649 		if (rc)
2650 			goto fail;
2651 	}
2652 
2653 	/* Now ask for the cable type as PDIAG- should have been released */
2654 	if (ap->ops->cable_detect)
2655 		ap->cbl = ap->ops->cable_detect(ap);
2656 
2657 	/* We may have SATA bridge glue hiding here irrespective of
2658 	 * the reported cable types and sensed types.  When SATA
2659 	 * drives indicate we have a bridge, we don't know which end
2660 	 * of the link the bridge is which is a problem.
2661 	 */
2662 	ata_for_each_dev(dev, &ap->link, ENABLED)
2663 		if (ata_id_is_sata(dev->id))
2664 			ap->cbl = ATA_CBL_SATA;
2665 
2666 	/* After the identify sequence we can now set up the devices. We do
2667 	   this in the normal order so that the user doesn't get confused */
2668 
2669 	ata_for_each_dev(dev, &ap->link, ENABLED) {
2670 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2671 		rc = ata_dev_configure(dev);
2672 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2673 		if (rc)
2674 			goto fail;
2675 	}
2676 
2677 	/* configure transfer mode */
2678 	rc = ata_set_mode(&ap->link, &dev);
2679 	if (rc)
2680 		goto fail;
2681 
2682 	ata_for_each_dev(dev, &ap->link, ENABLED)
2683 		return 0;
2684 
2685 	return -ENODEV;
2686 
2687  fail:
2688 	tries[dev->devno]--;
2689 
2690 	switch (rc) {
2691 	case -EINVAL:
2692 		/* eeek, something went very wrong, give up */
2693 		tries[dev->devno] = 0;
2694 		break;
2695 
2696 	case -ENODEV:
2697 		/* give it just one more chance */
2698 		tries[dev->devno] = min(tries[dev->devno], 1);
2699 	case -EIO:
2700 		if (tries[dev->devno] == 1) {
2701 			/* This is the last chance, better to slow
2702 			 * down than lose it.
2703 			 */
2704 			sata_down_spd_limit(&ap->link, 0);
2705 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2706 		}
2707 	}
2708 
2709 	if (!tries[dev->devno])
2710 		ata_dev_disable(dev);
2711 
2712 	goto retry;
2713 }
2714 
2715 /**
2716  *	sata_print_link_status - Print SATA link status
2717  *	@link: SATA link to printk link status about
2718  *
2719  *	This function prints link speed and status of a SATA link.
2720  *
2721  *	LOCKING:
2722  *	None.
2723  */
sata_print_link_status(struct ata_link * link)2724 static void sata_print_link_status(struct ata_link *link)
2725 {
2726 	u32 sstatus, scontrol, tmp;
2727 
2728 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2729 		return;
2730 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2731 
2732 	if (ata_phys_link_online(link)) {
2733 		tmp = (sstatus >> 4) & 0xf;
2734 		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2735 			      sata_spd_string(tmp), sstatus, scontrol);
2736 	} else {
2737 		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2738 			      sstatus, scontrol);
2739 	}
2740 }
2741 
2742 /**
2743  *	ata_dev_pair		-	return other device on cable
2744  *	@adev: device
2745  *
2746  *	Obtain the other device on the same cable, or if none is
2747  *	present NULL is returned
2748  */
2749 
ata_dev_pair(struct ata_device * adev)2750 struct ata_device *ata_dev_pair(struct ata_device *adev)
2751 {
2752 	struct ata_link *link = adev->link;
2753 	struct ata_device *pair = &link->device[1 - adev->devno];
2754 	if (!ata_dev_enabled(pair))
2755 		return NULL;
2756 	return pair;
2757 }
2758 
2759 /**
2760  *	sata_down_spd_limit - adjust SATA spd limit downward
2761  *	@link: Link to adjust SATA spd limit for
2762  *	@spd_limit: Additional limit
2763  *
2764  *	Adjust SATA spd limit of @link downward.  Note that this
2765  *	function only adjusts the limit.  The change must be applied
2766  *	using sata_set_spd().
2767  *
2768  *	If @spd_limit is non-zero, the speed is limited to equal to or
2769  *	lower than @spd_limit if such speed is supported.  If
2770  *	@spd_limit is slower than any supported speed, only the lowest
2771  *	supported speed is allowed.
2772  *
2773  *	LOCKING:
2774  *	Inherited from caller.
2775  *
2776  *	RETURNS:
2777  *	0 on success, negative errno on failure
2778  */
sata_down_spd_limit(struct ata_link * link,u32 spd_limit)2779 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2780 {
2781 	u32 sstatus, spd, mask;
2782 	int rc, bit;
2783 
2784 	if (!sata_scr_valid(link))
2785 		return -EOPNOTSUPP;
2786 
2787 	/* If SCR can be read, use it to determine the current SPD.
2788 	 * If not, use cached value in link->sata_spd.
2789 	 */
2790 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2791 	if (rc == 0 && ata_sstatus_online(sstatus))
2792 		spd = (sstatus >> 4) & 0xf;
2793 	else
2794 		spd = link->sata_spd;
2795 
2796 	mask = link->sata_spd_limit;
2797 	if (mask <= 1)
2798 		return -EINVAL;
2799 
2800 	/* unconditionally mask off the highest bit */
2801 	bit = fls(mask) - 1;
2802 	mask &= ~(1 << bit);
2803 
2804 	/* Mask off all speeds higher than or equal to the current
2805 	 * one.  Force 1.5Gbps if current SPD is not available.
2806 	 */
2807 	if (spd > 1)
2808 		mask &= (1 << (spd - 1)) - 1;
2809 	else
2810 		mask &= 1;
2811 
2812 	/* were we already at the bottom? */
2813 	if (!mask)
2814 		return -EINVAL;
2815 
2816 	if (spd_limit) {
2817 		if (mask & ((1 << spd_limit) - 1))
2818 			mask &= (1 << spd_limit) - 1;
2819 		else {
2820 			bit = ffs(mask) - 1;
2821 			mask = 1 << bit;
2822 		}
2823 	}
2824 
2825 	link->sata_spd_limit = mask;
2826 
2827 	ata_link_warn(link, "limiting SATA link speed to %s\n",
2828 		      sata_spd_string(fls(mask)));
2829 
2830 	return 0;
2831 }
2832 
__sata_set_spd_needed(struct ata_link * link,u32 * scontrol)2833 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2834 {
2835 	struct ata_link *host_link = &link->ap->link;
2836 	u32 limit, target, spd;
2837 
2838 	limit = link->sata_spd_limit;
2839 
2840 	/* Don't configure downstream link faster than upstream link.
2841 	 * It doesn't speed up anything and some PMPs choke on such
2842 	 * configuration.
2843 	 */
2844 	if (!ata_is_host_link(link) && host_link->sata_spd)
2845 		limit &= (1 << host_link->sata_spd) - 1;
2846 
2847 	if (limit == UINT_MAX)
2848 		target = 0;
2849 	else
2850 		target = fls(limit);
2851 
2852 	spd = (*scontrol >> 4) & 0xf;
2853 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2854 
2855 	return spd != target;
2856 }
2857 
2858 /**
2859  *	sata_set_spd_needed - is SATA spd configuration needed
2860  *	@link: Link in question
2861  *
2862  *	Test whether the spd limit in SControl matches
2863  *	@link->sata_spd_limit.  This function is used to determine
2864  *	whether hardreset is necessary to apply SATA spd
2865  *	configuration.
2866  *
2867  *	LOCKING:
2868  *	Inherited from caller.
2869  *
2870  *	RETURNS:
2871  *	1 if SATA spd configuration is needed, 0 otherwise.
2872  */
sata_set_spd_needed(struct ata_link * link)2873 static int sata_set_spd_needed(struct ata_link *link)
2874 {
2875 	u32 scontrol;
2876 
2877 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2878 		return 1;
2879 
2880 	return __sata_set_spd_needed(link, &scontrol);
2881 }
2882 
2883 /**
2884  *	sata_set_spd - set SATA spd according to spd limit
2885  *	@link: Link to set SATA spd for
2886  *
2887  *	Set SATA spd of @link according to sata_spd_limit.
2888  *
2889  *	LOCKING:
2890  *	Inherited from caller.
2891  *
2892  *	RETURNS:
2893  *	0 if spd doesn't need to be changed, 1 if spd has been
2894  *	changed.  Negative errno if SCR registers are inaccessible.
2895  */
sata_set_spd(struct ata_link * link)2896 int sata_set_spd(struct ata_link *link)
2897 {
2898 	u32 scontrol;
2899 	int rc;
2900 
2901 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2902 		return rc;
2903 
2904 	if (!__sata_set_spd_needed(link, &scontrol))
2905 		return 0;
2906 
2907 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2908 		return rc;
2909 
2910 	return 1;
2911 }
2912 
2913 /*
2914  * This mode timing computation functionality is ported over from
2915  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2916  */
2917 /*
2918  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2919  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2920  * for UDMA6, which is currently supported only by Maxtor drives.
2921  *
2922  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2923  */
2924 
2925 static const struct ata_timing ata_timing[] = {
2926 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
2927 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
2928 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
2929 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
2930 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
2931 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
2932 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
2933 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
2934 
2935 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
2936 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
2937 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
2938 
2939 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
2940 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
2941 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
2942 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
2943 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
2944 
2945 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
2946 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
2947 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
2948 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
2949 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
2950 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
2951 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
2952 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
2953 
2954 	{ 0xFF }
2955 };
2956 
2957 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2958 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2959 
ata_timing_quantize(const struct ata_timing * t,struct ata_timing * q,int T,int UT)2960 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2961 {
2962 	q->setup	= EZ(t->setup      * 1000,  T);
2963 	q->act8b	= EZ(t->act8b      * 1000,  T);
2964 	q->rec8b	= EZ(t->rec8b      * 1000,  T);
2965 	q->cyc8b	= EZ(t->cyc8b      * 1000,  T);
2966 	q->active	= EZ(t->active     * 1000,  T);
2967 	q->recover	= EZ(t->recover    * 1000,  T);
2968 	q->dmack_hold	= EZ(t->dmack_hold * 1000,  T);
2969 	q->cycle	= EZ(t->cycle      * 1000,  T);
2970 	q->udma		= EZ(t->udma       * 1000, UT);
2971 }
2972 
ata_timing_merge(const struct ata_timing * a,const struct ata_timing * b,struct ata_timing * m,unsigned int what)2973 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2974 		      struct ata_timing *m, unsigned int what)
2975 {
2976 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2977 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2978 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2979 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2980 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2981 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2982 	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2983 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2984 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2985 }
2986 
ata_timing_find_mode(u8 xfer_mode)2987 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2988 {
2989 	const struct ata_timing *t = ata_timing;
2990 
2991 	while (xfer_mode > t->mode)
2992 		t++;
2993 
2994 	if (xfer_mode == t->mode)
2995 		return t;
2996 
2997 	WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
2998 			__func__, xfer_mode);
2999 
3000 	return NULL;
3001 }
3002 
ata_timing_compute(struct ata_device * adev,unsigned short speed,struct ata_timing * t,int T,int UT)3003 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3004 		       struct ata_timing *t, int T, int UT)
3005 {
3006 	const u16 *id = adev->id;
3007 	const struct ata_timing *s;
3008 	struct ata_timing p;
3009 
3010 	/*
3011 	 * Find the mode.
3012 	 */
3013 
3014 	if (!(s = ata_timing_find_mode(speed)))
3015 		return -EINVAL;
3016 
3017 	memcpy(t, s, sizeof(*s));
3018 
3019 	/*
3020 	 * If the drive is an EIDE drive, it can tell us it needs extended
3021 	 * PIO/MW_DMA cycle timing.
3022 	 */
3023 
3024 	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3025 		memset(&p, 0, sizeof(p));
3026 
3027 		if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3028 			if (speed <= XFER_PIO_2)
3029 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3030 			else if ((speed <= XFER_PIO_4) ||
3031 				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3032 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3033 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3034 			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3035 
3036 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3037 	}
3038 
3039 	/*
3040 	 * Convert the timing to bus clock counts.
3041 	 */
3042 
3043 	ata_timing_quantize(t, t, T, UT);
3044 
3045 	/*
3046 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3047 	 * S.M.A.R.T * and some other commands. We have to ensure that the
3048 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3049 	 */
3050 
3051 	if (speed > XFER_PIO_6) {
3052 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3053 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3054 	}
3055 
3056 	/*
3057 	 * Lengthen active & recovery time so that cycle time is correct.
3058 	 */
3059 
3060 	if (t->act8b + t->rec8b < t->cyc8b) {
3061 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3062 		t->rec8b = t->cyc8b - t->act8b;
3063 	}
3064 
3065 	if (t->active + t->recover < t->cycle) {
3066 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3067 		t->recover = t->cycle - t->active;
3068 	}
3069 
3070 	/* In a few cases quantisation may produce enough errors to
3071 	   leave t->cycle too low for the sum of active and recovery
3072 	   if so we must correct this */
3073 	if (t->active + t->recover > t->cycle)
3074 		t->cycle = t->active + t->recover;
3075 
3076 	return 0;
3077 }
3078 
3079 /**
3080  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3081  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3082  *	@cycle: cycle duration in ns
3083  *
3084  *	Return matching xfer mode for @cycle.  The returned mode is of
3085  *	the transfer type specified by @xfer_shift.  If @cycle is too
3086  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3087  *	than the fastest known mode, the fasted mode is returned.
3088  *
3089  *	LOCKING:
3090  *	None.
3091  *
3092  *	RETURNS:
3093  *	Matching xfer_mode, 0xff if no match found.
3094  */
ata_timing_cycle2mode(unsigned int xfer_shift,int cycle)3095 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3096 {
3097 	u8 base_mode = 0xff, last_mode = 0xff;
3098 	const struct ata_xfer_ent *ent;
3099 	const struct ata_timing *t;
3100 
3101 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3102 		if (ent->shift == xfer_shift)
3103 			base_mode = ent->base;
3104 
3105 	for (t = ata_timing_find_mode(base_mode);
3106 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3107 		unsigned short this_cycle;
3108 
3109 		switch (xfer_shift) {
3110 		case ATA_SHIFT_PIO:
3111 		case ATA_SHIFT_MWDMA:
3112 			this_cycle = t->cycle;
3113 			break;
3114 		case ATA_SHIFT_UDMA:
3115 			this_cycle = t->udma;
3116 			break;
3117 		default:
3118 			return 0xff;
3119 		}
3120 
3121 		if (cycle > this_cycle)
3122 			break;
3123 
3124 		last_mode = t->mode;
3125 	}
3126 
3127 	return last_mode;
3128 }
3129 
3130 /**
3131  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3132  *	@dev: Device to adjust xfer masks
3133  *	@sel: ATA_DNXFER_* selector
3134  *
3135  *	Adjust xfer masks of @dev downward.  Note that this function
3136  *	does not apply the change.  Invoking ata_set_mode() afterwards
3137  *	will apply the limit.
3138  *
3139  *	LOCKING:
3140  *	Inherited from caller.
3141  *
3142  *	RETURNS:
3143  *	0 on success, negative errno on failure
3144  */
ata_down_xfermask_limit(struct ata_device * dev,unsigned int sel)3145 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3146 {
3147 	char buf[32];
3148 	unsigned long orig_mask, xfer_mask;
3149 	unsigned long pio_mask, mwdma_mask, udma_mask;
3150 	int quiet, highbit;
3151 
3152 	quiet = !!(sel & ATA_DNXFER_QUIET);
3153 	sel &= ~ATA_DNXFER_QUIET;
3154 
3155 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3156 						  dev->mwdma_mask,
3157 						  dev->udma_mask);
3158 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3159 
3160 	switch (sel) {
3161 	case ATA_DNXFER_PIO:
3162 		highbit = fls(pio_mask) - 1;
3163 		pio_mask &= ~(1 << highbit);
3164 		break;
3165 
3166 	case ATA_DNXFER_DMA:
3167 		if (udma_mask) {
3168 			highbit = fls(udma_mask) - 1;
3169 			udma_mask &= ~(1 << highbit);
3170 			if (!udma_mask)
3171 				return -ENOENT;
3172 		} else if (mwdma_mask) {
3173 			highbit = fls(mwdma_mask) - 1;
3174 			mwdma_mask &= ~(1 << highbit);
3175 			if (!mwdma_mask)
3176 				return -ENOENT;
3177 		}
3178 		break;
3179 
3180 	case ATA_DNXFER_40C:
3181 		udma_mask &= ATA_UDMA_MASK_40C;
3182 		break;
3183 
3184 	case ATA_DNXFER_FORCE_PIO0:
3185 		pio_mask &= 1;
3186 	case ATA_DNXFER_FORCE_PIO:
3187 		mwdma_mask = 0;
3188 		udma_mask = 0;
3189 		break;
3190 
3191 	default:
3192 		BUG();
3193 	}
3194 
3195 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3196 
3197 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3198 		return -ENOENT;
3199 
3200 	if (!quiet) {
3201 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3202 			snprintf(buf, sizeof(buf), "%s:%s",
3203 				 ata_mode_string(xfer_mask),
3204 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3205 		else
3206 			snprintf(buf, sizeof(buf), "%s",
3207 				 ata_mode_string(xfer_mask));
3208 
3209 		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3210 	}
3211 
3212 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3213 			    &dev->udma_mask);
3214 
3215 	return 0;
3216 }
3217 
ata_dev_set_mode(struct ata_device * dev)3218 static int ata_dev_set_mode(struct ata_device *dev)
3219 {
3220 	struct ata_port *ap = dev->link->ap;
3221 	struct ata_eh_context *ehc = &dev->link->eh_context;
3222 	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3223 	const char *dev_err_whine = "";
3224 	int ign_dev_err = 0;
3225 	unsigned int err_mask = 0;
3226 	int rc;
3227 
3228 	dev->flags &= ~ATA_DFLAG_PIO;
3229 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3230 		dev->flags |= ATA_DFLAG_PIO;
3231 
3232 	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3233 		dev_err_whine = " (SET_XFERMODE skipped)";
3234 	else {
3235 		if (nosetxfer)
3236 			ata_dev_warn(dev,
3237 				     "NOSETXFER but PATA detected - can't "
3238 				     "skip SETXFER, might malfunction\n");
3239 		err_mask = ata_dev_set_xfermode(dev);
3240 	}
3241 
3242 	if (err_mask & ~AC_ERR_DEV)
3243 		goto fail;
3244 
3245 	/* revalidate */
3246 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3247 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3248 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3249 	if (rc)
3250 		return rc;
3251 
3252 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3253 		/* Old CFA may refuse this command, which is just fine */
3254 		if (ata_id_is_cfa(dev->id))
3255 			ign_dev_err = 1;
3256 		/* Catch several broken garbage emulations plus some pre
3257 		   ATA devices */
3258 		if (ata_id_major_version(dev->id) == 0 &&
3259 					dev->pio_mode <= XFER_PIO_2)
3260 			ign_dev_err = 1;
3261 		/* Some very old devices and some bad newer ones fail
3262 		   any kind of SET_XFERMODE request but support PIO0-2
3263 		   timings and no IORDY */
3264 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3265 			ign_dev_err = 1;
3266 	}
3267 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3268 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3269 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3270 	    dev->dma_mode == XFER_MW_DMA_0 &&
3271 	    (dev->id[63] >> 8) & 1)
3272 		ign_dev_err = 1;
3273 
3274 	/* if the device is actually configured correctly, ignore dev err */
3275 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3276 		ign_dev_err = 1;
3277 
3278 	if (err_mask & AC_ERR_DEV) {
3279 		if (!ign_dev_err)
3280 			goto fail;
3281 		else
3282 			dev_err_whine = " (device error ignored)";
3283 	}
3284 
3285 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3286 		dev->xfer_shift, (int)dev->xfer_mode);
3287 
3288 	ata_dev_info(dev, "configured for %s%s\n",
3289 		     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3290 		     dev_err_whine);
3291 
3292 	return 0;
3293 
3294  fail:
3295 	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3296 	return -EIO;
3297 }
3298 
3299 /**
3300  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3301  *	@link: link on which timings will be programmed
3302  *	@r_failed_dev: out parameter for failed device
3303  *
3304  *	Standard implementation of the function used to tune and set
3305  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3306  *	ata_dev_set_mode() fails, pointer to the failing device is
3307  *	returned in @r_failed_dev.
3308  *
3309  *	LOCKING:
3310  *	PCI/etc. bus probe sem.
3311  *
3312  *	RETURNS:
3313  *	0 on success, negative errno otherwise
3314  */
3315 
ata_do_set_mode(struct ata_link * link,struct ata_device ** r_failed_dev)3316 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3317 {
3318 	struct ata_port *ap = link->ap;
3319 	struct ata_device *dev;
3320 	int rc = 0, used_dma = 0, found = 0;
3321 
3322 	/* step 1: calculate xfer_mask */
3323 	ata_for_each_dev(dev, link, ENABLED) {
3324 		unsigned long pio_mask, dma_mask;
3325 		unsigned int mode_mask;
3326 
3327 		mode_mask = ATA_DMA_MASK_ATA;
3328 		if (dev->class == ATA_DEV_ATAPI)
3329 			mode_mask = ATA_DMA_MASK_ATAPI;
3330 		else if (ata_id_is_cfa(dev->id))
3331 			mode_mask = ATA_DMA_MASK_CFA;
3332 
3333 		ata_dev_xfermask(dev);
3334 		ata_force_xfermask(dev);
3335 
3336 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3337 
3338 		if (libata_dma_mask & mode_mask)
3339 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3340 						     dev->udma_mask);
3341 		else
3342 			dma_mask = 0;
3343 
3344 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3345 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3346 
3347 		found = 1;
3348 		if (ata_dma_enabled(dev))
3349 			used_dma = 1;
3350 	}
3351 	if (!found)
3352 		goto out;
3353 
3354 	/* step 2: always set host PIO timings */
3355 	ata_for_each_dev(dev, link, ENABLED) {
3356 		if (dev->pio_mode == 0xff) {
3357 			ata_dev_warn(dev, "no PIO support\n");
3358 			rc = -EINVAL;
3359 			goto out;
3360 		}
3361 
3362 		dev->xfer_mode = dev->pio_mode;
3363 		dev->xfer_shift = ATA_SHIFT_PIO;
3364 		if (ap->ops->set_piomode)
3365 			ap->ops->set_piomode(ap, dev);
3366 	}
3367 
3368 	/* step 3: set host DMA timings */
3369 	ata_for_each_dev(dev, link, ENABLED) {
3370 		if (!ata_dma_enabled(dev))
3371 			continue;
3372 
3373 		dev->xfer_mode = dev->dma_mode;
3374 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3375 		if (ap->ops->set_dmamode)
3376 			ap->ops->set_dmamode(ap, dev);
3377 	}
3378 
3379 	/* step 4: update devices' xfer mode */
3380 	ata_for_each_dev(dev, link, ENABLED) {
3381 		rc = ata_dev_set_mode(dev);
3382 		if (rc)
3383 			goto out;
3384 	}
3385 
3386 	/* Record simplex status. If we selected DMA then the other
3387 	 * host channels are not permitted to do so.
3388 	 */
3389 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3390 		ap->host->simplex_claimed = ap;
3391 
3392  out:
3393 	if (rc)
3394 		*r_failed_dev = dev;
3395 	return rc;
3396 }
3397 
3398 /**
3399  *	ata_wait_ready - wait for link to become ready
3400  *	@link: link to be waited on
3401  *	@deadline: deadline jiffies for the operation
3402  *	@check_ready: callback to check link readiness
3403  *
3404  *	Wait for @link to become ready.  @check_ready should return
3405  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3406  *	link doesn't seem to be occupied, other errno for other error
3407  *	conditions.
3408  *
3409  *	Transient -ENODEV conditions are allowed for
3410  *	ATA_TMOUT_FF_WAIT.
3411  *
3412  *	LOCKING:
3413  *	EH context.
3414  *
3415  *	RETURNS:
3416  *	0 if @linke is ready before @deadline; otherwise, -errno.
3417  */
ata_wait_ready(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3418 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3419 		   int (*check_ready)(struct ata_link *link))
3420 {
3421 	unsigned long start = jiffies;
3422 	unsigned long nodev_deadline;
3423 	int warned = 0;
3424 
3425 	/* choose which 0xff timeout to use, read comment in libata.h */
3426 	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3427 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3428 	else
3429 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3430 
3431 	/* Slave readiness can't be tested separately from master.  On
3432 	 * M/S emulation configuration, this function should be called
3433 	 * only on the master and it will handle both master and slave.
3434 	 */
3435 	WARN_ON(link == link->ap->slave_link);
3436 
3437 	if (time_after(nodev_deadline, deadline))
3438 		nodev_deadline = deadline;
3439 
3440 	while (1) {
3441 		unsigned long now = jiffies;
3442 		int ready, tmp;
3443 
3444 		ready = tmp = check_ready(link);
3445 		if (ready > 0)
3446 			return 0;
3447 
3448 		/*
3449 		 * -ENODEV could be transient.  Ignore -ENODEV if link
3450 		 * is online.  Also, some SATA devices take a long
3451 		 * time to clear 0xff after reset.  Wait for
3452 		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3453 		 * offline.
3454 		 *
3455 		 * Note that some PATA controllers (pata_ali) explode
3456 		 * if status register is read more than once when
3457 		 * there's no device attached.
3458 		 */
3459 		if (ready == -ENODEV) {
3460 			if (ata_link_online(link))
3461 				ready = 0;
3462 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3463 				 !ata_link_offline(link) &&
3464 				 time_before(now, nodev_deadline))
3465 				ready = 0;
3466 		}
3467 
3468 		if (ready)
3469 			return ready;
3470 		if (time_after(now, deadline))
3471 			return -EBUSY;
3472 
3473 		if (!warned && time_after(now, start + 5 * HZ) &&
3474 		    (deadline - now > 3 * HZ)) {
3475 			ata_link_warn(link,
3476 				"link is slow to respond, please be patient "
3477 				"(ready=%d)\n", tmp);
3478 			warned = 1;
3479 		}
3480 
3481 		ata_msleep(link->ap, 50);
3482 	}
3483 }
3484 
3485 /**
3486  *	ata_wait_after_reset - wait for link to become ready after reset
3487  *	@link: link to be waited on
3488  *	@deadline: deadline jiffies for the operation
3489  *	@check_ready: callback to check link readiness
3490  *
3491  *	Wait for @link to become ready after reset.
3492  *
3493  *	LOCKING:
3494  *	EH context.
3495  *
3496  *	RETURNS:
3497  *	0 if @linke is ready before @deadline; otherwise, -errno.
3498  */
ata_wait_after_reset(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3499 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3500 				int (*check_ready)(struct ata_link *link))
3501 {
3502 	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3503 
3504 	return ata_wait_ready(link, deadline, check_ready);
3505 }
3506 
3507 /**
3508  *	sata_link_debounce - debounce SATA phy status
3509  *	@link: ATA link to debounce SATA phy status for
3510  *	@params: timing parameters { interval, duratinon, timeout } in msec
3511  *	@deadline: deadline jiffies for the operation
3512  *
3513  *	Make sure SStatus of @link reaches stable state, determined by
3514  *	holding the same value where DET is not 1 for @duration polled
3515  *	every @interval, before @timeout.  Timeout constraints the
3516  *	beginning of the stable state.  Because DET gets stuck at 1 on
3517  *	some controllers after hot unplugging, this functions waits
3518  *	until timeout then returns 0 if DET is stable at 1.
3519  *
3520  *	@timeout is further limited by @deadline.  The sooner of the
3521  *	two is used.
3522  *
3523  *	LOCKING:
3524  *	Kernel thread context (may sleep)
3525  *
3526  *	RETURNS:
3527  *	0 on success, -errno on failure.
3528  */
sata_link_debounce(struct ata_link * link,const unsigned long * params,unsigned long deadline)3529 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3530 		       unsigned long deadline)
3531 {
3532 	unsigned long interval = params[0];
3533 	unsigned long duration = params[1];
3534 	unsigned long last_jiffies, t;
3535 	u32 last, cur;
3536 	int rc;
3537 
3538 	t = ata_deadline(jiffies, params[2]);
3539 	if (time_before(t, deadline))
3540 		deadline = t;
3541 
3542 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3543 		return rc;
3544 	cur &= 0xf;
3545 
3546 	last = cur;
3547 	last_jiffies = jiffies;
3548 
3549 	while (1) {
3550 		ata_msleep(link->ap, interval);
3551 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3552 			return rc;
3553 		cur &= 0xf;
3554 
3555 		/* DET stable? */
3556 		if (cur == last) {
3557 			if (cur == 1 && time_before(jiffies, deadline))
3558 				continue;
3559 			if (time_after(jiffies,
3560 				       ata_deadline(last_jiffies, duration)))
3561 				return 0;
3562 			continue;
3563 		}
3564 
3565 		/* unstable, start over */
3566 		last = cur;
3567 		last_jiffies = jiffies;
3568 
3569 		/* Check deadline.  If debouncing failed, return
3570 		 * -EPIPE to tell upper layer to lower link speed.
3571 		 */
3572 		if (time_after(jiffies, deadline))
3573 			return -EPIPE;
3574 	}
3575 }
3576 
3577 /**
3578  *	sata_link_resume - resume SATA link
3579  *	@link: ATA link to resume SATA
3580  *	@params: timing parameters { interval, duratinon, timeout } in msec
3581  *	@deadline: deadline jiffies for the operation
3582  *
3583  *	Resume SATA phy @link and debounce it.
3584  *
3585  *	LOCKING:
3586  *	Kernel thread context (may sleep)
3587  *
3588  *	RETURNS:
3589  *	0 on success, -errno on failure.
3590  */
sata_link_resume(struct ata_link * link,const unsigned long * params,unsigned long deadline)3591 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3592 		     unsigned long deadline)
3593 {
3594 	int tries = ATA_LINK_RESUME_TRIES;
3595 	u32 scontrol, serror;
3596 	int rc;
3597 
3598 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3599 		return rc;
3600 
3601 	/*
3602 	 * Writes to SControl sometimes get ignored under certain
3603 	 * controllers (ata_piix SIDPR).  Make sure DET actually is
3604 	 * cleared.
3605 	 */
3606 	do {
3607 		scontrol = (scontrol & 0x0f0) | 0x300;
3608 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3609 			return rc;
3610 		/*
3611 		 * Some PHYs react badly if SStatus is pounded
3612 		 * immediately after resuming.  Delay 200ms before
3613 		 * debouncing.
3614 		 */
3615 		ata_msleep(link->ap, 200);
3616 
3617 		/* is SControl restored correctly? */
3618 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3619 			return rc;
3620 	} while ((scontrol & 0xf0f) != 0x300 && --tries);
3621 
3622 	if ((scontrol & 0xf0f) != 0x300) {
3623 		ata_link_warn(link, "failed to resume link (SControl %X)\n",
3624 			     scontrol);
3625 		return 0;
3626 	}
3627 
3628 	if (tries < ATA_LINK_RESUME_TRIES)
3629 		ata_link_warn(link, "link resume succeeded after %d retries\n",
3630 			      ATA_LINK_RESUME_TRIES - tries);
3631 
3632 	if ((rc = sata_link_debounce(link, params, deadline)))
3633 		return rc;
3634 
3635 	/* clear SError, some PHYs require this even for SRST to work */
3636 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3637 		rc = sata_scr_write(link, SCR_ERROR, serror);
3638 
3639 	return rc != -EINVAL ? rc : 0;
3640 }
3641 
3642 /**
3643  *	sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3644  *	@link: ATA link to manipulate SControl for
3645  *	@policy: LPM policy to configure
3646  *	@spm_wakeup: initiate LPM transition to active state
3647  *
3648  *	Manipulate the IPM field of the SControl register of @link
3649  *	according to @policy.  If @policy is ATA_LPM_MAX_POWER and
3650  *	@spm_wakeup is %true, the SPM field is manipulated to wake up
3651  *	the link.  This function also clears PHYRDY_CHG before
3652  *	returning.
3653  *
3654  *	LOCKING:
3655  *	EH context.
3656  *
3657  *	RETURNS:
3658  *	0 on succes, -errno otherwise.
3659  */
sata_link_scr_lpm(struct ata_link * link,enum ata_lpm_policy policy,bool spm_wakeup)3660 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3661 		      bool spm_wakeup)
3662 {
3663 	struct ata_eh_context *ehc = &link->eh_context;
3664 	bool woken_up = false;
3665 	u32 scontrol;
3666 	int rc;
3667 
3668 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3669 	if (rc)
3670 		return rc;
3671 
3672 	switch (policy) {
3673 	case ATA_LPM_MAX_POWER:
3674 		/* disable all LPM transitions */
3675 		scontrol |= (0x7 << 8);
3676 		/* initiate transition to active state */
3677 		if (spm_wakeup) {
3678 			scontrol |= (0x4 << 12);
3679 			woken_up = true;
3680 		}
3681 		break;
3682 	case ATA_LPM_MED_POWER:
3683 		/* allow LPM to PARTIAL */
3684 		scontrol &= ~(0x1 << 8);
3685 		scontrol |= (0x6 << 8);
3686 		break;
3687 	case ATA_LPM_MIN_POWER:
3688 		if (ata_link_nr_enabled(link) > 0)
3689 			/* no restrictions on LPM transitions */
3690 			scontrol &= ~(0x7 << 8);
3691 		else {
3692 			/* empty port, power off */
3693 			scontrol &= ~0xf;
3694 			scontrol |= (0x1 << 2);
3695 		}
3696 		break;
3697 	default:
3698 		WARN_ON(1);
3699 	}
3700 
3701 	rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3702 	if (rc)
3703 		return rc;
3704 
3705 	/* give the link time to transit out of LPM state */
3706 	if (woken_up)
3707 		msleep(10);
3708 
3709 	/* clear PHYRDY_CHG from SError */
3710 	ehc->i.serror &= ~SERR_PHYRDY_CHG;
3711 	return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3712 }
3713 
3714 /**
3715  *	ata_std_prereset - prepare for reset
3716  *	@link: ATA link to be reset
3717  *	@deadline: deadline jiffies for the operation
3718  *
3719  *	@link is about to be reset.  Initialize it.  Failure from
3720  *	prereset makes libata abort whole reset sequence and give up
3721  *	that port, so prereset should be best-effort.  It does its
3722  *	best to prepare for reset sequence but if things go wrong, it
3723  *	should just whine, not fail.
3724  *
3725  *	LOCKING:
3726  *	Kernel thread context (may sleep)
3727  *
3728  *	RETURNS:
3729  *	0 on success, -errno otherwise.
3730  */
ata_std_prereset(struct ata_link * link,unsigned long deadline)3731 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3732 {
3733 	struct ata_port *ap = link->ap;
3734 	struct ata_eh_context *ehc = &link->eh_context;
3735 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3736 	int rc;
3737 
3738 	/* if we're about to do hardreset, nothing more to do */
3739 	if (ehc->i.action & ATA_EH_HARDRESET)
3740 		return 0;
3741 
3742 	/* if SATA, resume link */
3743 	if (ap->flags & ATA_FLAG_SATA) {
3744 		rc = sata_link_resume(link, timing, deadline);
3745 		/* whine about phy resume failure but proceed */
3746 		if (rc && rc != -EOPNOTSUPP)
3747 			ata_link_warn(link,
3748 				      "failed to resume link for reset (errno=%d)\n",
3749 				      rc);
3750 	}
3751 
3752 	/* no point in trying softreset on offline link */
3753 	if (ata_phys_link_offline(link))
3754 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3755 
3756 	return 0;
3757 }
3758 
3759 /**
3760  *	sata_link_hardreset - reset link via SATA phy reset
3761  *	@link: link to reset
3762  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3763  *	@deadline: deadline jiffies for the operation
3764  *	@online: optional out parameter indicating link onlineness
3765  *	@check_ready: optional callback to check link readiness
3766  *
3767  *	SATA phy-reset @link using DET bits of SControl register.
3768  *	After hardreset, link readiness is waited upon using
3769  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3770  *	allowed to not specify @check_ready and wait itself after this
3771  *	function returns.  Device classification is LLD's
3772  *	responsibility.
3773  *
3774  *	*@online is set to one iff reset succeeded and @link is online
3775  *	after reset.
3776  *
3777  *	LOCKING:
3778  *	Kernel thread context (may sleep)
3779  *
3780  *	RETURNS:
3781  *	0 on success, -errno otherwise.
3782  */
sata_link_hardreset(struct ata_link * link,const unsigned long * timing,unsigned long deadline,bool * online,int (* check_ready)(struct ata_link *))3783 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3784 			unsigned long deadline,
3785 			bool *online, int (*check_ready)(struct ata_link *))
3786 {
3787 	u32 scontrol;
3788 	int rc;
3789 
3790 	DPRINTK("ENTER\n");
3791 
3792 	if (online)
3793 		*online = false;
3794 
3795 	if (sata_set_spd_needed(link)) {
3796 		/* SATA spec says nothing about how to reconfigure
3797 		 * spd.  To be on the safe side, turn off phy during
3798 		 * reconfiguration.  This works for at least ICH7 AHCI
3799 		 * and Sil3124.
3800 		 */
3801 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3802 			goto out;
3803 
3804 		scontrol = (scontrol & 0x0f0) | 0x304;
3805 
3806 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3807 			goto out;
3808 
3809 		sata_set_spd(link);
3810 	}
3811 
3812 	/* issue phy wake/reset */
3813 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3814 		goto out;
3815 
3816 	scontrol = (scontrol & 0x0f0) | 0x301;
3817 
3818 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3819 		goto out;
3820 
3821 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3822 	 * 10.4.2 says at least 1 ms.
3823 	 */
3824 	ata_msleep(link->ap, 1);
3825 
3826 	/* bring link back */
3827 	rc = sata_link_resume(link, timing, deadline);
3828 	if (rc)
3829 		goto out;
3830 	/* if link is offline nothing more to do */
3831 	if (ata_phys_link_offline(link))
3832 		goto out;
3833 
3834 	/* Link is online.  From this point, -ENODEV too is an error. */
3835 	if (online)
3836 		*online = true;
3837 
3838 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3839 		/* If PMP is supported, we have to do follow-up SRST.
3840 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3841 		 * the first port is empty.  Wait only for
3842 		 * ATA_TMOUT_PMP_SRST_WAIT.
3843 		 */
3844 		if (check_ready) {
3845 			unsigned long pmp_deadline;
3846 
3847 			pmp_deadline = ata_deadline(jiffies,
3848 						    ATA_TMOUT_PMP_SRST_WAIT);
3849 			if (time_after(pmp_deadline, deadline))
3850 				pmp_deadline = deadline;
3851 			ata_wait_ready(link, pmp_deadline, check_ready);
3852 		}
3853 		rc = -EAGAIN;
3854 		goto out;
3855 	}
3856 
3857 	rc = 0;
3858 	if (check_ready)
3859 		rc = ata_wait_ready(link, deadline, check_ready);
3860  out:
3861 	if (rc && rc != -EAGAIN) {
3862 		/* online is set iff link is online && reset succeeded */
3863 		if (online)
3864 			*online = false;
3865 		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3866 	}
3867 	DPRINTK("EXIT, rc=%d\n", rc);
3868 	return rc;
3869 }
3870 
3871 /**
3872  *	sata_std_hardreset - COMRESET w/o waiting or classification
3873  *	@link: link to reset
3874  *	@class: resulting class of attached device
3875  *	@deadline: deadline jiffies for the operation
3876  *
3877  *	Standard SATA COMRESET w/o waiting or classification.
3878  *
3879  *	LOCKING:
3880  *	Kernel thread context (may sleep)
3881  *
3882  *	RETURNS:
3883  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3884  */
sata_std_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)3885 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3886 		       unsigned long deadline)
3887 {
3888 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3889 	bool online;
3890 	int rc;
3891 
3892 	/* do hardreset */
3893 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3894 	return online ? -EAGAIN : rc;
3895 }
3896 
3897 /**
3898  *	ata_std_postreset - standard postreset callback
3899  *	@link: the target ata_link
3900  *	@classes: classes of attached devices
3901  *
3902  *	This function is invoked after a successful reset.  Note that
3903  *	the device might have been reset more than once using
3904  *	different reset methods before postreset is invoked.
3905  *
3906  *	LOCKING:
3907  *	Kernel thread context (may sleep)
3908  */
ata_std_postreset(struct ata_link * link,unsigned int * classes)3909 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3910 {
3911 	u32 serror;
3912 
3913 	DPRINTK("ENTER\n");
3914 
3915 	/* reset complete, clear SError */
3916 	if (!sata_scr_read(link, SCR_ERROR, &serror))
3917 		sata_scr_write(link, SCR_ERROR, serror);
3918 
3919 	/* print link status */
3920 	sata_print_link_status(link);
3921 
3922 	DPRINTK("EXIT\n");
3923 }
3924 
3925 /**
3926  *	ata_dev_same_device - Determine whether new ID matches configured device
3927  *	@dev: device to compare against
3928  *	@new_class: class of the new device
3929  *	@new_id: IDENTIFY page of the new device
3930  *
3931  *	Compare @new_class and @new_id against @dev and determine
3932  *	whether @dev is the device indicated by @new_class and
3933  *	@new_id.
3934  *
3935  *	LOCKING:
3936  *	None.
3937  *
3938  *	RETURNS:
3939  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3940  */
ata_dev_same_device(struct ata_device * dev,unsigned int new_class,const u16 * new_id)3941 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3942 			       const u16 *new_id)
3943 {
3944 	const u16 *old_id = dev->id;
3945 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3946 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3947 
3948 	if (dev->class != new_class) {
3949 		ata_dev_info(dev, "class mismatch %d != %d\n",
3950 			     dev->class, new_class);
3951 		return 0;
3952 	}
3953 
3954 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3955 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3956 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3957 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3958 
3959 	if (strcmp(model[0], model[1])) {
3960 		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3961 			     model[0], model[1]);
3962 		return 0;
3963 	}
3964 
3965 	if (strcmp(serial[0], serial[1])) {
3966 		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3967 			     serial[0], serial[1]);
3968 		return 0;
3969 	}
3970 
3971 	return 1;
3972 }
3973 
3974 /**
3975  *	ata_dev_reread_id - Re-read IDENTIFY data
3976  *	@dev: target ATA device
3977  *	@readid_flags: read ID flags
3978  *
3979  *	Re-read IDENTIFY page and make sure @dev is still attached to
3980  *	the port.
3981  *
3982  *	LOCKING:
3983  *	Kernel thread context (may sleep)
3984  *
3985  *	RETURNS:
3986  *	0 on success, negative errno otherwise
3987  */
ata_dev_reread_id(struct ata_device * dev,unsigned int readid_flags)3988 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3989 {
3990 	unsigned int class = dev->class;
3991 	u16 *id = (void *)dev->link->ap->sector_buf;
3992 	int rc;
3993 
3994 	/* read ID data */
3995 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3996 	if (rc)
3997 		return rc;
3998 
3999 	/* is the device still there? */
4000 	if (!ata_dev_same_device(dev, class, id))
4001 		return -ENODEV;
4002 
4003 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4004 	return 0;
4005 }
4006 
4007 /**
4008  *	ata_dev_revalidate - Revalidate ATA device
4009  *	@dev: device to revalidate
4010  *	@new_class: new class code
4011  *	@readid_flags: read ID flags
4012  *
4013  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4014  *	port and reconfigure it according to the new IDENTIFY page.
4015  *
4016  *	LOCKING:
4017  *	Kernel thread context (may sleep)
4018  *
4019  *	RETURNS:
4020  *	0 on success, negative errno otherwise
4021  */
ata_dev_revalidate(struct ata_device * dev,unsigned int new_class,unsigned int readid_flags)4022 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4023 		       unsigned int readid_flags)
4024 {
4025 	u64 n_sectors = dev->n_sectors;
4026 	u64 n_native_sectors = dev->n_native_sectors;
4027 	int rc;
4028 
4029 	if (!ata_dev_enabled(dev))
4030 		return -ENODEV;
4031 
4032 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4033 	if (ata_class_enabled(new_class) &&
4034 	    new_class != ATA_DEV_ATA &&
4035 	    new_class != ATA_DEV_ATAPI &&
4036 	    new_class != ATA_DEV_SEMB) {
4037 		ata_dev_info(dev, "class mismatch %u != %u\n",
4038 			     dev->class, new_class);
4039 		rc = -ENODEV;
4040 		goto fail;
4041 	}
4042 
4043 	/* re-read ID */
4044 	rc = ata_dev_reread_id(dev, readid_flags);
4045 	if (rc)
4046 		goto fail;
4047 
4048 	/* configure device according to the new ID */
4049 	rc = ata_dev_configure(dev);
4050 	if (rc)
4051 		goto fail;
4052 
4053 	/* verify n_sectors hasn't changed */
4054 	if (dev->class != ATA_DEV_ATA || !n_sectors ||
4055 	    dev->n_sectors == n_sectors)
4056 		return 0;
4057 
4058 	/* n_sectors has changed */
4059 	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4060 		     (unsigned long long)n_sectors,
4061 		     (unsigned long long)dev->n_sectors);
4062 
4063 	/*
4064 	 * Something could have caused HPA to be unlocked
4065 	 * involuntarily.  If n_native_sectors hasn't changed and the
4066 	 * new size matches it, keep the device.
4067 	 */
4068 	if (dev->n_native_sectors == n_native_sectors &&
4069 	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4070 		ata_dev_warn(dev,
4071 			     "new n_sectors matches native, probably "
4072 			     "late HPA unlock, n_sectors updated\n");
4073 		/* use the larger n_sectors */
4074 		return 0;
4075 	}
4076 
4077 	/*
4078 	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4079 	 * unlocking HPA in those cases.
4080 	 *
4081 	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4082 	 */
4083 	if (dev->n_native_sectors == n_native_sectors &&
4084 	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4085 	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4086 		ata_dev_warn(dev,
4087 			     "old n_sectors matches native, probably "
4088 			     "late HPA lock, will try to unlock HPA\n");
4089 		/* try unlocking HPA */
4090 		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4091 		rc = -EIO;
4092 	} else
4093 		rc = -ENODEV;
4094 
4095 	/* restore original n_[native_]sectors and fail */
4096 	dev->n_native_sectors = n_native_sectors;
4097 	dev->n_sectors = n_sectors;
4098  fail:
4099 	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4100 	return rc;
4101 }
4102 
4103 struct ata_blacklist_entry {
4104 	const char *model_num;
4105 	const char *model_rev;
4106 	unsigned long horkage;
4107 };
4108 
4109 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4110 	/* Devices with DMA related problems under Linux */
4111 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4112 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4113 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4114 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4115 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4116 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4117 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4118 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4119 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4120 	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
4121 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4122 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4123 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4124 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4125 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4126 	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
4127 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4128 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4129 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4130 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4131 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4132 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4133 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4134 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4135 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4136 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4137 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4138 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4139 	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
4140 	/* Odd clown on sil3726/4726 PMPs */
4141 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4142 
4143 	/* Weird ATAPI devices */
4144 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4145 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4146 	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4147 	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4148 
4149 	/* Devices we expect to fail diagnostics */
4150 
4151 	/* Devices where NCQ should be avoided */
4152 	/* NCQ is slow */
4153 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4154 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4155 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4156 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4157 	/* NCQ is broken */
4158 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4159 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4160 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4161 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4162 	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4163 
4164 	/* Seagate NCQ + FLUSH CACHE firmware bug */
4165 	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4166 						ATA_HORKAGE_FIRMWARE_WARN },
4167 
4168 	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4169 						ATA_HORKAGE_FIRMWARE_WARN },
4170 
4171 	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4172 						ATA_HORKAGE_FIRMWARE_WARN },
4173 
4174 	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4175 						ATA_HORKAGE_FIRMWARE_WARN },
4176 
4177 	/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
4178 	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
4179 	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
4180 	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
4181 
4182 	/* Blacklist entries taken from Silicon Image 3124/3132
4183 	   Windows driver .inf file - also several Linux problem reports */
4184 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4185 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4186 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4187 
4188 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4189 	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
4190 
4191 	/* devices which puke on READ_NATIVE_MAX */
4192 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4193 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4194 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4195 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4196 
4197 	/* this one allows HPA unlocking but fails IOs on the area */
4198 	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4199 
4200 	/* Devices which report 1 sector over size HPA */
4201 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4202 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4203 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4204 
4205 	/* Devices which get the IVB wrong */
4206 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4207 	/* Maybe we should just blacklist TSSTcorp... */
4208 	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB, },
4209 
4210 	/* Devices that do not need bridging limits applied */
4211 	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4212 	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4213 
4214 	/* Devices which aren't very happy with higher link speeds */
4215 	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
4216 	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS, },
4217 
4218 	/*
4219 	 * Devices which choke on SETXFER.  Applies only if both the
4220 	 * device and controller are SATA.
4221 	 */
4222 	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
4223 	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
4224 	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
4225 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
4226 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
4227 
4228 	/* devices that don't properly handle queued TRIM commands */
4229 	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4230 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4231 	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4232 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4233 	{ "Micron_M5[15]0_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4234 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4235 	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4236 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4237 	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4238 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4239 	{ "Samsung SSD 8*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4240 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4241 	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4242 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4243 
4244 	/* devices that don't properly handle TRIM commands */
4245 	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
4246 
4247 	/*
4248 	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4249 	 * (Return Zero After Trim) flags in the ATA Command Set are
4250 	 * unreliable in the sense that they only define what happens if
4251 	 * the device successfully executed the DSM TRIM command. TRIM
4252 	 * is only advisory, however, and the device is free to silently
4253 	 * ignore all or parts of the request.
4254 	 *
4255 	 * Whitelist drives that are known to reliably return zeroes
4256 	 * after TRIM.
4257 	 */
4258 
4259 	/*
4260 	 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4261 	 * that model before whitelisting all other intel SSDs.
4262 	 */
4263 	{ "INTEL*SSDSC2MH*",		NULL,	0, },
4264 
4265 	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4266 	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4267 	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4268 	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4269 	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4270 	{ "SAMSUNG*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4271 	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4272 
4273 	/* devices that don't properly handle TRIM commands */
4274 	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
4275 
4276 	/*
4277 	 * Some WD SATA-I drives spin up and down erratically when the link
4278 	 * is put into the slumber mode.  We don't have full list of the
4279 	 * affected devices.  Disable LPM if the device matches one of the
4280 	 * known prefixes and is SATA-1.  As a side effect LPM partial is
4281 	 * lost too.
4282 	 *
4283 	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4284 	 */
4285 	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4286 	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4287 	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4288 	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4289 	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4290 	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4291 	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4292 
4293 	/* End Marker */
4294 	{ }
4295 };
4296 
ata_dev_blacklisted(const struct ata_device * dev)4297 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4298 {
4299 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4300 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4301 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4302 
4303 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4304 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4305 
4306 	while (ad->model_num) {
4307 		if (glob_match(ad->model_num, model_num)) {
4308 			if (ad->model_rev == NULL)
4309 				return ad->horkage;
4310 			if (glob_match(ad->model_rev, model_rev))
4311 				return ad->horkage;
4312 		}
4313 		ad++;
4314 	}
4315 	return 0;
4316 }
4317 
ata_dma_blacklisted(const struct ata_device * dev)4318 static int ata_dma_blacklisted(const struct ata_device *dev)
4319 {
4320 	/* We don't support polling DMA.
4321 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4322 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4323 	 */
4324 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4325 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4326 		return 1;
4327 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4328 }
4329 
4330 /**
4331  *	ata_is_40wire		-	check drive side detection
4332  *	@dev: device
4333  *
4334  *	Perform drive side detection decoding, allowing for device vendors
4335  *	who can't follow the documentation.
4336  */
4337 
ata_is_40wire(struct ata_device * dev)4338 static int ata_is_40wire(struct ata_device *dev)
4339 {
4340 	if (dev->horkage & ATA_HORKAGE_IVB)
4341 		return ata_drive_40wire_relaxed(dev->id);
4342 	return ata_drive_40wire(dev->id);
4343 }
4344 
4345 /**
4346  *	cable_is_40wire		-	40/80/SATA decider
4347  *	@ap: port to consider
4348  *
4349  *	This function encapsulates the policy for speed management
4350  *	in one place. At the moment we don't cache the result but
4351  *	there is a good case for setting ap->cbl to the result when
4352  *	we are called with unknown cables (and figuring out if it
4353  *	impacts hotplug at all).
4354  *
4355  *	Return 1 if the cable appears to be 40 wire.
4356  */
4357 
cable_is_40wire(struct ata_port * ap)4358 static int cable_is_40wire(struct ata_port *ap)
4359 {
4360 	struct ata_link *link;
4361 	struct ata_device *dev;
4362 
4363 	/* If the controller thinks we are 40 wire, we are. */
4364 	if (ap->cbl == ATA_CBL_PATA40)
4365 		return 1;
4366 
4367 	/* If the controller thinks we are 80 wire, we are. */
4368 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4369 		return 0;
4370 
4371 	/* If the system is known to be 40 wire short cable (eg
4372 	 * laptop), then we allow 80 wire modes even if the drive
4373 	 * isn't sure.
4374 	 */
4375 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4376 		return 0;
4377 
4378 	/* If the controller doesn't know, we scan.
4379 	 *
4380 	 * Note: We look for all 40 wire detects at this point.  Any
4381 	 *       80 wire detect is taken to be 80 wire cable because
4382 	 * - in many setups only the one drive (slave if present) will
4383 	 *   give a valid detect
4384 	 * - if you have a non detect capable drive you don't want it
4385 	 *   to colour the choice
4386 	 */
4387 	ata_for_each_link(link, ap, EDGE) {
4388 		ata_for_each_dev(dev, link, ENABLED) {
4389 			if (!ata_is_40wire(dev))
4390 				return 0;
4391 		}
4392 	}
4393 	return 1;
4394 }
4395 
4396 /**
4397  *	ata_dev_xfermask - Compute supported xfermask of the given device
4398  *	@dev: Device to compute xfermask for
4399  *
4400  *	Compute supported xfermask of @dev and store it in
4401  *	dev->*_mask.  This function is responsible for applying all
4402  *	known limits including host controller limits, device
4403  *	blacklist, etc...
4404  *
4405  *	LOCKING:
4406  *	None.
4407  */
ata_dev_xfermask(struct ata_device * dev)4408 static void ata_dev_xfermask(struct ata_device *dev)
4409 {
4410 	struct ata_link *link = dev->link;
4411 	struct ata_port *ap = link->ap;
4412 	struct ata_host *host = ap->host;
4413 	unsigned long xfer_mask;
4414 
4415 	/* controller modes available */
4416 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4417 				      ap->mwdma_mask, ap->udma_mask);
4418 
4419 	/* drive modes available */
4420 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4421 				       dev->mwdma_mask, dev->udma_mask);
4422 	xfer_mask &= ata_id_xfermask(dev->id);
4423 
4424 	/*
4425 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4426 	 *	cable
4427 	 */
4428 	if (ata_dev_pair(dev)) {
4429 		/* No PIO5 or PIO6 */
4430 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4431 		/* No MWDMA3 or MWDMA 4 */
4432 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4433 	}
4434 
4435 	if (ata_dma_blacklisted(dev)) {
4436 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4437 		ata_dev_warn(dev,
4438 			     "device is on DMA blacklist, disabling DMA\n");
4439 	}
4440 
4441 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4442 	    host->simplex_claimed && host->simplex_claimed != ap) {
4443 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4444 		ata_dev_warn(dev,
4445 			     "simplex DMA is claimed by other device, disabling DMA\n");
4446 	}
4447 
4448 	if (ap->flags & ATA_FLAG_NO_IORDY)
4449 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4450 
4451 	if (ap->ops->mode_filter)
4452 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4453 
4454 	/* Apply cable rule here.  Don't apply it early because when
4455 	 * we handle hot plug the cable type can itself change.
4456 	 * Check this last so that we know if the transfer rate was
4457 	 * solely limited by the cable.
4458 	 * Unknown or 80 wire cables reported host side are checked
4459 	 * drive side as well. Cases where we know a 40wire cable
4460 	 * is used safely for 80 are not checked here.
4461 	 */
4462 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4463 		/* UDMA/44 or higher would be available */
4464 		if (cable_is_40wire(ap)) {
4465 			ata_dev_warn(dev,
4466 				     "limited to UDMA/33 due to 40-wire cable\n");
4467 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4468 		}
4469 
4470 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4471 			    &dev->mwdma_mask, &dev->udma_mask);
4472 }
4473 
4474 /**
4475  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4476  *	@dev: Device to which command will be sent
4477  *
4478  *	Issue SET FEATURES - XFER MODE command to device @dev
4479  *	on port @ap.
4480  *
4481  *	LOCKING:
4482  *	PCI/etc. bus probe sem.
4483  *
4484  *	RETURNS:
4485  *	0 on success, AC_ERR_* mask otherwise.
4486  */
4487 
ata_dev_set_xfermode(struct ata_device * dev)4488 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4489 {
4490 	struct ata_taskfile tf;
4491 	unsigned int err_mask;
4492 
4493 	/* set up set-features taskfile */
4494 	DPRINTK("set features - xfer mode\n");
4495 
4496 	/* Some controllers and ATAPI devices show flaky interrupt
4497 	 * behavior after setting xfer mode.  Use polling instead.
4498 	 */
4499 	ata_tf_init(dev, &tf);
4500 	tf.command = ATA_CMD_SET_FEATURES;
4501 	tf.feature = SETFEATURES_XFER;
4502 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4503 	tf.protocol = ATA_PROT_NODATA;
4504 	/* If we are using IORDY we must send the mode setting command */
4505 	if (ata_pio_need_iordy(dev))
4506 		tf.nsect = dev->xfer_mode;
4507 	/* If the device has IORDY and the controller does not - turn it off */
4508  	else if (ata_id_has_iordy(dev->id))
4509 		tf.nsect = 0x01;
4510 	else /* In the ancient relic department - skip all of this */
4511 		return 0;
4512 
4513 	/* On some disks, this command causes spin-up, so we need longer timeout */
4514 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4515 
4516 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4517 	return err_mask;
4518 }
4519 
4520 /**
4521  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4522  *	@dev: Device to which command will be sent
4523  *	@enable: Whether to enable or disable the feature
4524  *	@feature: The sector count represents the feature to set
4525  *
4526  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4527  *	on port @ap with sector count
4528  *
4529  *	LOCKING:
4530  *	PCI/etc. bus probe sem.
4531  *
4532  *	RETURNS:
4533  *	0 on success, AC_ERR_* mask otherwise.
4534  */
ata_dev_set_feature(struct ata_device * dev,u8 enable,u8 feature)4535 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4536 {
4537 	struct ata_taskfile tf;
4538 	unsigned int err_mask;
4539 
4540 	/* set up set-features taskfile */
4541 	DPRINTK("set features - SATA features\n");
4542 
4543 	ata_tf_init(dev, &tf);
4544 	tf.command = ATA_CMD_SET_FEATURES;
4545 	tf.feature = enable;
4546 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4547 	tf.protocol = ATA_PROT_NODATA;
4548 	tf.nsect = feature;
4549 
4550 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4551 
4552 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4553 	return err_mask;
4554 }
4555 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4556 
4557 /**
4558  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4559  *	@dev: Device to which command will be sent
4560  *	@heads: Number of heads (taskfile parameter)
4561  *	@sectors: Number of sectors (taskfile parameter)
4562  *
4563  *	LOCKING:
4564  *	Kernel thread context (may sleep)
4565  *
4566  *	RETURNS:
4567  *	0 on success, AC_ERR_* mask otherwise.
4568  */
ata_dev_init_params(struct ata_device * dev,u16 heads,u16 sectors)4569 static unsigned int ata_dev_init_params(struct ata_device *dev,
4570 					u16 heads, u16 sectors)
4571 {
4572 	struct ata_taskfile tf;
4573 	unsigned int err_mask;
4574 
4575 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4576 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4577 		return AC_ERR_INVALID;
4578 
4579 	/* set up init dev params taskfile */
4580 	DPRINTK("init dev params \n");
4581 
4582 	ata_tf_init(dev, &tf);
4583 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4584 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4585 	tf.protocol = ATA_PROT_NODATA;
4586 	tf.nsect = sectors;
4587 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4588 
4589 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4590 	/* A clean abort indicates an original or just out of spec drive
4591 	   and we should continue as we issue the setup based on the
4592 	   drive reported working geometry */
4593 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4594 		err_mask = 0;
4595 
4596 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4597 	return err_mask;
4598 }
4599 
4600 /**
4601  *	ata_sg_clean - Unmap DMA memory associated with command
4602  *	@qc: Command containing DMA memory to be released
4603  *
4604  *	Unmap all mapped DMA memory associated with this command.
4605  *
4606  *	LOCKING:
4607  *	spin_lock_irqsave(host lock)
4608  */
ata_sg_clean(struct ata_queued_cmd * qc)4609 void ata_sg_clean(struct ata_queued_cmd *qc)
4610 {
4611 	struct ata_port *ap = qc->ap;
4612 	struct scatterlist *sg = qc->sg;
4613 	int dir = qc->dma_dir;
4614 
4615 	WARN_ON_ONCE(sg == NULL);
4616 
4617 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4618 
4619 	if (qc->n_elem)
4620 		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4621 
4622 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4623 	qc->sg = NULL;
4624 }
4625 
4626 /**
4627  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4628  *	@qc: Metadata associated with taskfile to check
4629  *
4630  *	Allow low-level driver to filter ATA PACKET commands, returning
4631  *	a status indicating whether or not it is OK to use DMA for the
4632  *	supplied PACKET command.
4633  *
4634  *	LOCKING:
4635  *	spin_lock_irqsave(host lock)
4636  *
4637  *	RETURNS: 0 when ATAPI DMA can be used
4638  *               nonzero otherwise
4639  */
atapi_check_dma(struct ata_queued_cmd * qc)4640 int atapi_check_dma(struct ata_queued_cmd *qc)
4641 {
4642 	struct ata_port *ap = qc->ap;
4643 
4644 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4645 	 * few ATAPI devices choke on such DMA requests.
4646 	 */
4647 	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4648 	    unlikely(qc->nbytes & 15))
4649 		return 1;
4650 
4651 	if (ap->ops->check_atapi_dma)
4652 		return ap->ops->check_atapi_dma(qc);
4653 
4654 	return 0;
4655 }
4656 
4657 /**
4658  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4659  *	@qc: ATA command in question
4660  *
4661  *	Non-NCQ commands cannot run with any other command, NCQ or
4662  *	not.  As upper layer only knows the queue depth, we are
4663  *	responsible for maintaining exclusion.  This function checks
4664  *	whether a new command @qc can be issued.
4665  *
4666  *	LOCKING:
4667  *	spin_lock_irqsave(host lock)
4668  *
4669  *	RETURNS:
4670  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4671  */
ata_std_qc_defer(struct ata_queued_cmd * qc)4672 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4673 {
4674 	struct ata_link *link = qc->dev->link;
4675 
4676 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4677 		if (!ata_tag_valid(link->active_tag))
4678 			return 0;
4679 	} else {
4680 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4681 			return 0;
4682 	}
4683 
4684 	return ATA_DEFER_LINK;
4685 }
4686 
ata_noop_qc_prep(struct ata_queued_cmd * qc)4687 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4688 
4689 /**
4690  *	ata_sg_init - Associate command with scatter-gather table.
4691  *	@qc: Command to be associated
4692  *	@sg: Scatter-gather table.
4693  *	@n_elem: Number of elements in s/g table.
4694  *
4695  *	Initialize the data-related elements of queued_cmd @qc
4696  *	to point to a scatter-gather table @sg, containing @n_elem
4697  *	elements.
4698  *
4699  *	LOCKING:
4700  *	spin_lock_irqsave(host lock)
4701  */
ata_sg_init(struct ata_queued_cmd * qc,struct scatterlist * sg,unsigned int n_elem)4702 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4703 		 unsigned int n_elem)
4704 {
4705 	qc->sg = sg;
4706 	qc->n_elem = n_elem;
4707 	qc->cursg = qc->sg;
4708 }
4709 
4710 /**
4711  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4712  *	@qc: Command with scatter-gather table to be mapped.
4713  *
4714  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4715  *
4716  *	LOCKING:
4717  *	spin_lock_irqsave(host lock)
4718  *
4719  *	RETURNS:
4720  *	Zero on success, negative on error.
4721  *
4722  */
ata_sg_setup(struct ata_queued_cmd * qc)4723 static int ata_sg_setup(struct ata_queued_cmd *qc)
4724 {
4725 	struct ata_port *ap = qc->ap;
4726 	unsigned int n_elem;
4727 
4728 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4729 
4730 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4731 	if (n_elem < 1)
4732 		return -1;
4733 
4734 	DPRINTK("%d sg elements mapped\n", n_elem);
4735 	qc->orig_n_elem = qc->n_elem;
4736 	qc->n_elem = n_elem;
4737 	qc->flags |= ATA_QCFLAG_DMAMAP;
4738 
4739 	return 0;
4740 }
4741 
4742 /**
4743  *	swap_buf_le16 - swap halves of 16-bit words in place
4744  *	@buf:  Buffer to swap
4745  *	@buf_words:  Number of 16-bit words in buffer.
4746  *
4747  *	Swap halves of 16-bit words if needed to convert from
4748  *	little-endian byte order to native cpu byte order, or
4749  *	vice-versa.
4750  *
4751  *	LOCKING:
4752  *	Inherited from caller.
4753  */
swap_buf_le16(u16 * buf,unsigned int buf_words)4754 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4755 {
4756 #ifdef __BIG_ENDIAN
4757 	unsigned int i;
4758 
4759 	for (i = 0; i < buf_words; i++)
4760 		buf[i] = le16_to_cpu(buf[i]);
4761 #endif /* __BIG_ENDIAN */
4762 }
4763 
4764 /**
4765  *	ata_qc_new - Request an available ATA command, for queueing
4766  *	@ap: target port
4767  *
4768  *	Some ATA host controllers may implement a queue depth which is less
4769  *	than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
4770  *	the hardware limitation.
4771  *
4772  *	LOCKING:
4773  *	None.
4774  */
4775 
ata_qc_new(struct ata_port * ap)4776 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4777 {
4778 	struct ata_queued_cmd *qc = NULL;
4779 	unsigned int max_queue = ap->host->n_tags;
4780 	unsigned int i, tag;
4781 
4782 	/* no command while frozen */
4783 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4784 		return NULL;
4785 
4786 	for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
4787 		if (ap->flags & ATA_FLAG_LOWTAG)
4788 			tag = i;
4789 		else
4790 			tag = tag < max_queue ? tag : 0;
4791 
4792 		/* the last tag is reserved for internal command. */
4793 		if (tag == ATA_TAG_INTERNAL)
4794 			continue;
4795 
4796 		if (!test_and_set_bit(tag, &ap->qc_allocated)) {
4797 			qc = __ata_qc_from_tag(ap, tag);
4798 			qc->tag = tag;
4799 			ap->last_tag = tag;
4800 			break;
4801 		}
4802 	}
4803 
4804 	return qc;
4805 }
4806 
4807 /**
4808  *	ata_qc_new_init - Request an available ATA command, and initialize it
4809  *	@dev: Device from whom we request an available command structure
4810  *
4811  *	LOCKING:
4812  *	None.
4813  */
4814 
ata_qc_new_init(struct ata_device * dev)4815 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4816 {
4817 	struct ata_port *ap = dev->link->ap;
4818 	struct ata_queued_cmd *qc;
4819 
4820 	qc = ata_qc_new(ap);
4821 	if (qc) {
4822 		qc->scsicmd = NULL;
4823 		qc->ap = ap;
4824 		qc->dev = dev;
4825 
4826 		ata_qc_reinit(qc);
4827 	}
4828 
4829 	return qc;
4830 }
4831 
4832 /**
4833  *	ata_qc_free - free unused ata_queued_cmd
4834  *	@qc: Command to complete
4835  *
4836  *	Designed to free unused ata_queued_cmd object
4837  *	in case something prevents using it.
4838  *
4839  *	LOCKING:
4840  *	spin_lock_irqsave(host lock)
4841  */
ata_qc_free(struct ata_queued_cmd * qc)4842 void ata_qc_free(struct ata_queued_cmd *qc)
4843 {
4844 	struct ata_port *ap;
4845 	unsigned int tag;
4846 
4847 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4848 	ap = qc->ap;
4849 
4850 	qc->flags = 0;
4851 	tag = qc->tag;
4852 	if (likely(ata_tag_valid(tag))) {
4853 		qc->tag = ATA_TAG_POISON;
4854 		clear_bit(tag, &ap->qc_allocated);
4855 	}
4856 }
4857 
__ata_qc_complete(struct ata_queued_cmd * qc)4858 void __ata_qc_complete(struct ata_queued_cmd *qc)
4859 {
4860 	struct ata_port *ap;
4861 	struct ata_link *link;
4862 
4863 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4864 	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4865 	ap = qc->ap;
4866 	link = qc->dev->link;
4867 
4868 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4869 		ata_sg_clean(qc);
4870 
4871 	/* command should be marked inactive atomically with qc completion */
4872 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4873 		link->sactive &= ~(1 << qc->tag);
4874 		if (!link->sactive)
4875 			ap->nr_active_links--;
4876 	} else {
4877 		link->active_tag = ATA_TAG_POISON;
4878 		ap->nr_active_links--;
4879 	}
4880 
4881 	/* clear exclusive status */
4882 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4883 		     ap->excl_link == link))
4884 		ap->excl_link = NULL;
4885 
4886 	/* atapi: mark qc as inactive to prevent the interrupt handler
4887 	 * from completing the command twice later, before the error handler
4888 	 * is called. (when rc != 0 and atapi request sense is needed)
4889 	 */
4890 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4891 	ap->qc_active &= ~(1 << qc->tag);
4892 
4893 	/* call completion callback */
4894 	qc->complete_fn(qc);
4895 }
4896 
fill_result_tf(struct ata_queued_cmd * qc)4897 static void fill_result_tf(struct ata_queued_cmd *qc)
4898 {
4899 	struct ata_port *ap = qc->ap;
4900 
4901 	qc->result_tf.flags = qc->tf.flags;
4902 	ap->ops->qc_fill_rtf(qc);
4903 }
4904 
ata_verify_xfer(struct ata_queued_cmd * qc)4905 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4906 {
4907 	struct ata_device *dev = qc->dev;
4908 
4909 	if (ata_is_nodata(qc->tf.protocol))
4910 		return;
4911 
4912 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4913 		return;
4914 
4915 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4916 }
4917 
4918 /**
4919  *	ata_qc_complete - Complete an active ATA command
4920  *	@qc: Command to complete
4921  *
4922  *	Indicate to the mid and upper layers that an ATA command has
4923  *	completed, with either an ok or not-ok status.
4924  *
4925  *	Refrain from calling this function multiple times when
4926  *	successfully completing multiple NCQ commands.
4927  *	ata_qc_complete_multiple() should be used instead, which will
4928  *	properly update IRQ expect state.
4929  *
4930  *	LOCKING:
4931  *	spin_lock_irqsave(host lock)
4932  */
ata_qc_complete(struct ata_queued_cmd * qc)4933 void ata_qc_complete(struct ata_queued_cmd *qc)
4934 {
4935 	struct ata_port *ap = qc->ap;
4936 
4937 	/* XXX: New EH and old EH use different mechanisms to
4938 	 * synchronize EH with regular execution path.
4939 	 *
4940 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4941 	 * Normal execution path is responsible for not accessing a
4942 	 * failed qc.  libata core enforces the rule by returning NULL
4943 	 * from ata_qc_from_tag() for failed qcs.
4944 	 *
4945 	 * Old EH depends on ata_qc_complete() nullifying completion
4946 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4947 	 * not synchronize with interrupt handler.  Only PIO task is
4948 	 * taken care of.
4949 	 */
4950 	if (ap->ops->error_handler) {
4951 		struct ata_device *dev = qc->dev;
4952 		struct ata_eh_info *ehi = &dev->link->eh_info;
4953 
4954 		if (unlikely(qc->err_mask))
4955 			qc->flags |= ATA_QCFLAG_FAILED;
4956 
4957 		/*
4958 		 * Finish internal commands without any further processing
4959 		 * and always with the result TF filled.
4960 		 */
4961 		if (unlikely(ata_tag_internal(qc->tag))) {
4962 			fill_result_tf(qc);
4963 			__ata_qc_complete(qc);
4964 			return;
4965 		}
4966 
4967 		/*
4968 		 * Non-internal qc has failed.  Fill the result TF and
4969 		 * summon EH.
4970 		 */
4971 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4972 			fill_result_tf(qc);
4973 			ata_qc_schedule_eh(qc);
4974 			return;
4975 		}
4976 
4977 		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4978 
4979 		/* read result TF if requested */
4980 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4981 			fill_result_tf(qc);
4982 
4983 		/* Some commands need post-processing after successful
4984 		 * completion.
4985 		 */
4986 		switch (qc->tf.command) {
4987 		case ATA_CMD_SET_FEATURES:
4988 			if (qc->tf.feature != SETFEATURES_WC_ON &&
4989 			    qc->tf.feature != SETFEATURES_WC_OFF)
4990 				break;
4991 			/* fall through */
4992 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4993 		case ATA_CMD_SET_MULTI: /* multi_count changed */
4994 			/* revalidate device */
4995 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4996 			ata_port_schedule_eh(ap);
4997 			break;
4998 
4999 		case ATA_CMD_SLEEP:
5000 			dev->flags |= ATA_DFLAG_SLEEPING;
5001 			break;
5002 		}
5003 
5004 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5005 			ata_verify_xfer(qc);
5006 
5007 		__ata_qc_complete(qc);
5008 	} else {
5009 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5010 			return;
5011 
5012 		/* read result TF if failed or requested */
5013 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5014 			fill_result_tf(qc);
5015 
5016 		__ata_qc_complete(qc);
5017 	}
5018 }
5019 
5020 /**
5021  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5022  *	@ap: port in question
5023  *	@qc_active: new qc_active mask
5024  *
5025  *	Complete in-flight commands.  This functions is meant to be
5026  *	called from low-level driver's interrupt routine to complete
5027  *	requests normally.  ap->qc_active and @qc_active is compared
5028  *	and commands are completed accordingly.
5029  *
5030  *	Always use this function when completing multiple NCQ commands
5031  *	from IRQ handlers instead of calling ata_qc_complete()
5032  *	multiple times to keep IRQ expect status properly in sync.
5033  *
5034  *	LOCKING:
5035  *	spin_lock_irqsave(host lock)
5036  *
5037  *	RETURNS:
5038  *	Number of completed commands on success, -errno otherwise.
5039  */
ata_qc_complete_multiple(struct ata_port * ap,u32 qc_active)5040 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5041 {
5042 	int nr_done = 0;
5043 	u32 done_mask;
5044 
5045 	done_mask = ap->qc_active ^ qc_active;
5046 
5047 	if (unlikely(done_mask & qc_active)) {
5048 		ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5049 			     ap->qc_active, qc_active);
5050 		return -EINVAL;
5051 	}
5052 
5053 	while (done_mask) {
5054 		struct ata_queued_cmd *qc;
5055 		unsigned int tag = __ffs(done_mask);
5056 
5057 		qc = ata_qc_from_tag(ap, tag);
5058 		if (qc) {
5059 			ata_qc_complete(qc);
5060 			nr_done++;
5061 		}
5062 		done_mask &= ~(1 << tag);
5063 	}
5064 
5065 	return nr_done;
5066 }
5067 
5068 /**
5069  *	ata_qc_issue - issue taskfile to device
5070  *	@qc: command to issue to device
5071  *
5072  *	Prepare an ATA command to submission to device.
5073  *	This includes mapping the data into a DMA-able
5074  *	area, filling in the S/G table, and finally
5075  *	writing the taskfile to hardware, starting the command.
5076  *
5077  *	LOCKING:
5078  *	spin_lock_irqsave(host lock)
5079  */
ata_qc_issue(struct ata_queued_cmd * qc)5080 void ata_qc_issue(struct ata_queued_cmd *qc)
5081 {
5082 	struct ata_port *ap = qc->ap;
5083 	struct ata_link *link = qc->dev->link;
5084 	u8 prot = qc->tf.protocol;
5085 
5086 	/* Make sure only one non-NCQ command is outstanding.  The
5087 	 * check is skipped for old EH because it reuses active qc to
5088 	 * request ATAPI sense.
5089 	 */
5090 	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5091 
5092 	if (ata_is_ncq(prot)) {
5093 		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5094 
5095 		if (!link->sactive)
5096 			ap->nr_active_links++;
5097 		link->sactive |= 1 << qc->tag;
5098 	} else {
5099 		WARN_ON_ONCE(link->sactive);
5100 
5101 		ap->nr_active_links++;
5102 		link->active_tag = qc->tag;
5103 	}
5104 
5105 	qc->flags |= ATA_QCFLAG_ACTIVE;
5106 	ap->qc_active |= 1 << qc->tag;
5107 
5108 	/*
5109 	 * We guarantee to LLDs that they will have at least one
5110 	 * non-zero sg if the command is a data command.
5111 	 */
5112 	if (WARN_ON_ONCE(ata_is_data(prot) &&
5113 			 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5114 		goto sys_err;
5115 
5116 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5117 				 (ap->flags & ATA_FLAG_PIO_DMA)))
5118 		if (ata_sg_setup(qc))
5119 			goto sys_err;
5120 
5121 	/* if device is sleeping, schedule reset and abort the link */
5122 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5123 		link->eh_info.action |= ATA_EH_RESET;
5124 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5125 		ata_link_abort(link);
5126 		return;
5127 	}
5128 
5129 	ap->ops->qc_prep(qc);
5130 
5131 	qc->err_mask |= ap->ops->qc_issue(qc);
5132 	if (unlikely(qc->err_mask))
5133 		goto err;
5134 	return;
5135 
5136 sys_err:
5137 	qc->err_mask |= AC_ERR_SYSTEM;
5138 err:
5139 	ata_qc_complete(qc);
5140 }
5141 
5142 /**
5143  *	sata_scr_valid - test whether SCRs are accessible
5144  *	@link: ATA link to test SCR accessibility for
5145  *
5146  *	Test whether SCRs are accessible for @link.
5147  *
5148  *	LOCKING:
5149  *	None.
5150  *
5151  *	RETURNS:
5152  *	1 if SCRs are accessible, 0 otherwise.
5153  */
sata_scr_valid(struct ata_link * link)5154 int sata_scr_valid(struct ata_link *link)
5155 {
5156 	struct ata_port *ap = link->ap;
5157 
5158 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5159 }
5160 
5161 /**
5162  *	sata_scr_read - read SCR register of the specified port
5163  *	@link: ATA link to read SCR for
5164  *	@reg: SCR to read
5165  *	@val: Place to store read value
5166  *
5167  *	Read SCR register @reg of @link into *@val.  This function is
5168  *	guaranteed to succeed if @link is ap->link, the cable type of
5169  *	the port is SATA and the port implements ->scr_read.
5170  *
5171  *	LOCKING:
5172  *	None if @link is ap->link.  Kernel thread context otherwise.
5173  *
5174  *	RETURNS:
5175  *	0 on success, negative errno on failure.
5176  */
sata_scr_read(struct ata_link * link,int reg,u32 * val)5177 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5178 {
5179 	if (ata_is_host_link(link)) {
5180 		if (sata_scr_valid(link))
5181 			return link->ap->ops->scr_read(link, reg, val);
5182 		return -EOPNOTSUPP;
5183 	}
5184 
5185 	return sata_pmp_scr_read(link, reg, val);
5186 }
5187 
5188 /**
5189  *	sata_scr_write - write SCR register of the specified port
5190  *	@link: ATA link to write SCR for
5191  *	@reg: SCR to write
5192  *	@val: value to write
5193  *
5194  *	Write @val to SCR register @reg of @link.  This function is
5195  *	guaranteed to succeed if @link is ap->link, the cable type of
5196  *	the port is SATA and the port implements ->scr_read.
5197  *
5198  *	LOCKING:
5199  *	None if @link is ap->link.  Kernel thread context otherwise.
5200  *
5201  *	RETURNS:
5202  *	0 on success, negative errno on failure.
5203  */
sata_scr_write(struct ata_link * link,int reg,u32 val)5204 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5205 {
5206 	if (ata_is_host_link(link)) {
5207 		if (sata_scr_valid(link))
5208 			return link->ap->ops->scr_write(link, reg, val);
5209 		return -EOPNOTSUPP;
5210 	}
5211 
5212 	return sata_pmp_scr_write(link, reg, val);
5213 }
5214 
5215 /**
5216  *	sata_scr_write_flush - write SCR register of the specified port and flush
5217  *	@link: ATA link to write SCR for
5218  *	@reg: SCR to write
5219  *	@val: value to write
5220  *
5221  *	This function is identical to sata_scr_write() except that this
5222  *	function performs flush after writing to the register.
5223  *
5224  *	LOCKING:
5225  *	None if @link is ap->link.  Kernel thread context otherwise.
5226  *
5227  *	RETURNS:
5228  *	0 on success, negative errno on failure.
5229  */
sata_scr_write_flush(struct ata_link * link,int reg,u32 val)5230 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5231 {
5232 	if (ata_is_host_link(link)) {
5233 		int rc;
5234 
5235 		if (sata_scr_valid(link)) {
5236 			rc = link->ap->ops->scr_write(link, reg, val);
5237 			if (rc == 0)
5238 				rc = link->ap->ops->scr_read(link, reg, &val);
5239 			return rc;
5240 		}
5241 		return -EOPNOTSUPP;
5242 	}
5243 
5244 	return sata_pmp_scr_write(link, reg, val);
5245 }
5246 
5247 /**
5248  *	ata_phys_link_online - test whether the given link is online
5249  *	@link: ATA link to test
5250  *
5251  *	Test whether @link is online.  Note that this function returns
5252  *	0 if online status of @link cannot be obtained, so
5253  *	ata_link_online(link) != !ata_link_offline(link).
5254  *
5255  *	LOCKING:
5256  *	None.
5257  *
5258  *	RETURNS:
5259  *	True if the port online status is available and online.
5260  */
ata_phys_link_online(struct ata_link * link)5261 bool ata_phys_link_online(struct ata_link *link)
5262 {
5263 	u32 sstatus;
5264 
5265 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5266 	    ata_sstatus_online(sstatus))
5267 		return true;
5268 	return false;
5269 }
5270 
5271 /**
5272  *	ata_phys_link_offline - test whether the given link is offline
5273  *	@link: ATA link to test
5274  *
5275  *	Test whether @link is offline.  Note that this function
5276  *	returns 0 if offline status of @link cannot be obtained, so
5277  *	ata_link_online(link) != !ata_link_offline(link).
5278  *
5279  *	LOCKING:
5280  *	None.
5281  *
5282  *	RETURNS:
5283  *	True if the port offline status is available and offline.
5284  */
ata_phys_link_offline(struct ata_link * link)5285 bool ata_phys_link_offline(struct ata_link *link)
5286 {
5287 	u32 sstatus;
5288 
5289 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5290 	    !ata_sstatus_online(sstatus))
5291 		return true;
5292 	return false;
5293 }
5294 
5295 /**
5296  *	ata_link_online - test whether the given link is online
5297  *	@link: ATA link to test
5298  *
5299  *	Test whether @link is online.  This is identical to
5300  *	ata_phys_link_online() when there's no slave link.  When
5301  *	there's a slave link, this function should only be called on
5302  *	the master link and will return true if any of M/S links is
5303  *	online.
5304  *
5305  *	LOCKING:
5306  *	None.
5307  *
5308  *	RETURNS:
5309  *	True if the port online status is available and online.
5310  */
ata_link_online(struct ata_link * link)5311 bool ata_link_online(struct ata_link *link)
5312 {
5313 	struct ata_link *slave = link->ap->slave_link;
5314 
5315 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5316 
5317 	return ata_phys_link_online(link) ||
5318 		(slave && ata_phys_link_online(slave));
5319 }
5320 
5321 /**
5322  *	ata_link_offline - test whether the given link is offline
5323  *	@link: ATA link to test
5324  *
5325  *	Test whether @link is offline.  This is identical to
5326  *	ata_phys_link_offline() when there's no slave link.  When
5327  *	there's a slave link, this function should only be called on
5328  *	the master link and will return true if both M/S links are
5329  *	offline.
5330  *
5331  *	LOCKING:
5332  *	None.
5333  *
5334  *	RETURNS:
5335  *	True if the port offline status is available and offline.
5336  */
ata_link_offline(struct ata_link * link)5337 bool ata_link_offline(struct ata_link *link)
5338 {
5339 	struct ata_link *slave = link->ap->slave_link;
5340 
5341 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5342 
5343 	return ata_phys_link_offline(link) &&
5344 		(!slave || ata_phys_link_offline(slave));
5345 }
5346 
5347 #ifdef CONFIG_PM
ata_port_request_pm(struct ata_port * ap,pm_message_t mesg,unsigned int action,unsigned int ehi_flags,bool async)5348 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5349 				unsigned int action, unsigned int ehi_flags,
5350 				bool async)
5351 {
5352 	struct ata_link *link;
5353 	unsigned long flags;
5354 
5355 	/* Previous resume operation might still be in
5356 	 * progress.  Wait for PM_PENDING to clear.
5357 	 */
5358 	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5359 		ata_port_wait_eh(ap);
5360 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5361 	}
5362 
5363 	/* request PM ops to EH */
5364 	spin_lock_irqsave(ap->lock, flags);
5365 
5366 	ap->pm_mesg = mesg;
5367 	ap->pflags |= ATA_PFLAG_PM_PENDING;
5368 	ata_for_each_link(link, ap, HOST_FIRST) {
5369 		link->eh_info.action |= action;
5370 		link->eh_info.flags |= ehi_flags;
5371 	}
5372 
5373 	ata_port_schedule_eh(ap);
5374 
5375 	spin_unlock_irqrestore(ap->lock, flags);
5376 
5377 	if (!async) {
5378 		ata_port_wait_eh(ap);
5379 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5380 	}
5381 }
5382 
5383 /*
5384  * On some hardware, device fails to respond after spun down for suspend.  As
5385  * the device won't be used before being resumed, we don't need to touch the
5386  * device.  Ask EH to skip the usual stuff and proceed directly to suspend.
5387  *
5388  * http://thread.gmane.org/gmane.linux.ide/46764
5389  */
5390 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5391 						 | ATA_EHI_NO_AUTOPSY
5392 						 | ATA_EHI_NO_RECOVERY;
5393 
ata_port_suspend(struct ata_port * ap,pm_message_t mesg)5394 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5395 {
5396 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5397 }
5398 
ata_port_suspend_async(struct ata_port * ap,pm_message_t mesg)5399 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5400 {
5401 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5402 }
5403 
ata_port_pm_suspend(struct device * dev)5404 static int ata_port_pm_suspend(struct device *dev)
5405 {
5406 	struct ata_port *ap = to_ata_port(dev);
5407 
5408 	if (pm_runtime_suspended(dev))
5409 		return 0;
5410 
5411 	ata_port_suspend(ap, PMSG_SUSPEND);
5412 	return 0;
5413 }
5414 
ata_port_pm_freeze(struct device * dev)5415 static int ata_port_pm_freeze(struct device *dev)
5416 {
5417 	struct ata_port *ap = to_ata_port(dev);
5418 
5419 	if (pm_runtime_suspended(dev))
5420 		return 0;
5421 
5422 	ata_port_suspend(ap, PMSG_FREEZE);
5423 	return 0;
5424 }
5425 
ata_port_pm_poweroff(struct device * dev)5426 static int ata_port_pm_poweroff(struct device *dev)
5427 {
5428 	ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5429 	return 0;
5430 }
5431 
5432 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5433 						| ATA_EHI_QUIET;
5434 
ata_port_resume(struct ata_port * ap,pm_message_t mesg)5435 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5436 {
5437 	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5438 }
5439 
ata_port_resume_async(struct ata_port * ap,pm_message_t mesg)5440 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5441 {
5442 	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5443 }
5444 
ata_port_pm_resume(struct device * dev)5445 static int ata_port_pm_resume(struct device *dev)
5446 {
5447 	ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5448 	pm_runtime_disable(dev);
5449 	pm_runtime_set_active(dev);
5450 	pm_runtime_enable(dev);
5451 	return 0;
5452 }
5453 
5454 /*
5455  * For ODDs, the upper layer will poll for media change every few seconds,
5456  * which will make it enter and leave suspend state every few seconds. And
5457  * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5458  * is very little and the ODD may malfunction after constantly being reset.
5459  * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5460  * ODD is attached to the port.
5461  */
ata_port_runtime_idle(struct device * dev)5462 static int ata_port_runtime_idle(struct device *dev)
5463 {
5464 	struct ata_port *ap = to_ata_port(dev);
5465 	struct ata_link *link;
5466 	struct ata_device *adev;
5467 
5468 	ata_for_each_link(link, ap, HOST_FIRST) {
5469 		ata_for_each_dev(adev, link, ENABLED)
5470 			if (adev->class == ATA_DEV_ATAPI &&
5471 			    !zpodd_dev_enabled(adev))
5472 				return -EBUSY;
5473 	}
5474 
5475 	return 0;
5476 }
5477 
ata_port_runtime_suspend(struct device * dev)5478 static int ata_port_runtime_suspend(struct device *dev)
5479 {
5480 	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5481 	return 0;
5482 }
5483 
ata_port_runtime_resume(struct device * dev)5484 static int ata_port_runtime_resume(struct device *dev)
5485 {
5486 	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5487 	return 0;
5488 }
5489 
5490 static const struct dev_pm_ops ata_port_pm_ops = {
5491 	.suspend = ata_port_pm_suspend,
5492 	.resume = ata_port_pm_resume,
5493 	.freeze = ata_port_pm_freeze,
5494 	.thaw = ata_port_pm_resume,
5495 	.poweroff = ata_port_pm_poweroff,
5496 	.restore = ata_port_pm_resume,
5497 
5498 	.runtime_suspend = ata_port_runtime_suspend,
5499 	.runtime_resume = ata_port_runtime_resume,
5500 	.runtime_idle = ata_port_runtime_idle,
5501 };
5502 
5503 /* sas ports don't participate in pm runtime management of ata_ports,
5504  * and need to resume ata devices at the domain level, not the per-port
5505  * level. sas suspend/resume is async to allow parallel port recovery
5506  * since sas has multiple ata_port instances per Scsi_Host.
5507  */
ata_sas_port_suspend(struct ata_port * ap)5508 void ata_sas_port_suspend(struct ata_port *ap)
5509 {
5510 	ata_port_suspend_async(ap, PMSG_SUSPEND);
5511 }
5512 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5513 
ata_sas_port_resume(struct ata_port * ap)5514 void ata_sas_port_resume(struct ata_port *ap)
5515 {
5516 	ata_port_resume_async(ap, PMSG_RESUME);
5517 }
5518 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5519 
5520 /**
5521  *	ata_host_suspend - suspend host
5522  *	@host: host to suspend
5523  *	@mesg: PM message
5524  *
5525  *	Suspend @host.  Actual operation is performed by port suspend.
5526  */
ata_host_suspend(struct ata_host * host,pm_message_t mesg)5527 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5528 {
5529 	host->dev->power.power_state = mesg;
5530 	return 0;
5531 }
5532 
5533 /**
5534  *	ata_host_resume - resume host
5535  *	@host: host to resume
5536  *
5537  *	Resume @host.  Actual operation is performed by port resume.
5538  */
ata_host_resume(struct ata_host * host)5539 void ata_host_resume(struct ata_host *host)
5540 {
5541 	host->dev->power.power_state = PMSG_ON;
5542 }
5543 #endif
5544 
5545 struct device_type ata_port_type = {
5546 	.name = "ata_port",
5547 #ifdef CONFIG_PM
5548 	.pm = &ata_port_pm_ops,
5549 #endif
5550 };
5551 
5552 /**
5553  *	ata_dev_init - Initialize an ata_device structure
5554  *	@dev: Device structure to initialize
5555  *
5556  *	Initialize @dev in preparation for probing.
5557  *
5558  *	LOCKING:
5559  *	Inherited from caller.
5560  */
ata_dev_init(struct ata_device * dev)5561 void ata_dev_init(struct ata_device *dev)
5562 {
5563 	struct ata_link *link = ata_dev_phys_link(dev);
5564 	struct ata_port *ap = link->ap;
5565 	unsigned long flags;
5566 
5567 	/* SATA spd limit is bound to the attached device, reset together */
5568 	link->sata_spd_limit = link->hw_sata_spd_limit;
5569 	link->sata_spd = 0;
5570 
5571 	/* High bits of dev->flags are used to record warm plug
5572 	 * requests which occur asynchronously.  Synchronize using
5573 	 * host lock.
5574 	 */
5575 	spin_lock_irqsave(ap->lock, flags);
5576 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5577 	dev->horkage = 0;
5578 	spin_unlock_irqrestore(ap->lock, flags);
5579 
5580 	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5581 	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5582 	dev->pio_mask = UINT_MAX;
5583 	dev->mwdma_mask = UINT_MAX;
5584 	dev->udma_mask = UINT_MAX;
5585 }
5586 
5587 /**
5588  *	ata_link_init - Initialize an ata_link structure
5589  *	@ap: ATA port link is attached to
5590  *	@link: Link structure to initialize
5591  *	@pmp: Port multiplier port number
5592  *
5593  *	Initialize @link.
5594  *
5595  *	LOCKING:
5596  *	Kernel thread context (may sleep)
5597  */
ata_link_init(struct ata_port * ap,struct ata_link * link,int pmp)5598 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5599 {
5600 	int i;
5601 
5602 	/* clear everything except for devices */
5603 	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5604 	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5605 
5606 	link->ap = ap;
5607 	link->pmp = pmp;
5608 	link->active_tag = ATA_TAG_POISON;
5609 	link->hw_sata_spd_limit = UINT_MAX;
5610 
5611 	/* can't use iterator, ap isn't initialized yet */
5612 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5613 		struct ata_device *dev = &link->device[i];
5614 
5615 		dev->link = link;
5616 		dev->devno = dev - link->device;
5617 #ifdef CONFIG_ATA_ACPI
5618 		dev->gtf_filter = ata_acpi_gtf_filter;
5619 #endif
5620 		ata_dev_init(dev);
5621 	}
5622 }
5623 
5624 /**
5625  *	sata_link_init_spd - Initialize link->sata_spd_limit
5626  *	@link: Link to configure sata_spd_limit for
5627  *
5628  *	Initialize @link->[hw_]sata_spd_limit to the currently
5629  *	configured value.
5630  *
5631  *	LOCKING:
5632  *	Kernel thread context (may sleep).
5633  *
5634  *	RETURNS:
5635  *	0 on success, -errno on failure.
5636  */
sata_link_init_spd(struct ata_link * link)5637 int sata_link_init_spd(struct ata_link *link)
5638 {
5639 	u8 spd;
5640 	int rc;
5641 
5642 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5643 	if (rc)
5644 		return rc;
5645 
5646 	spd = (link->saved_scontrol >> 4) & 0xf;
5647 	if (spd)
5648 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5649 
5650 	ata_force_link_limits(link);
5651 
5652 	link->sata_spd_limit = link->hw_sata_spd_limit;
5653 
5654 	return 0;
5655 }
5656 
5657 /**
5658  *	ata_port_alloc - allocate and initialize basic ATA port resources
5659  *	@host: ATA host this allocated port belongs to
5660  *
5661  *	Allocate and initialize basic ATA port resources.
5662  *
5663  *	RETURNS:
5664  *	Allocate ATA port on success, NULL on failure.
5665  *
5666  *	LOCKING:
5667  *	Inherited from calling layer (may sleep).
5668  */
ata_port_alloc(struct ata_host * host)5669 struct ata_port *ata_port_alloc(struct ata_host *host)
5670 {
5671 	struct ata_port *ap;
5672 
5673 	DPRINTK("ENTER\n");
5674 
5675 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5676 	if (!ap)
5677 		return NULL;
5678 
5679 	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5680 	ap->lock = &host->lock;
5681 	ap->print_id = -1;
5682 	ap->local_port_no = -1;
5683 	ap->host = host;
5684 	ap->dev = host->dev;
5685 
5686 #if defined(ATA_VERBOSE_DEBUG)
5687 	/* turn on all debugging levels */
5688 	ap->msg_enable = 0x00FF;
5689 #elif defined(ATA_DEBUG)
5690 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5691 #else
5692 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5693 #endif
5694 
5695 	mutex_init(&ap->scsi_scan_mutex);
5696 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5697 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5698 	INIT_LIST_HEAD(&ap->eh_done_q);
5699 	init_waitqueue_head(&ap->eh_wait_q);
5700 	init_completion(&ap->park_req_pending);
5701 	init_timer_deferrable(&ap->fastdrain_timer);
5702 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5703 	ap->fastdrain_timer.data = (unsigned long)ap;
5704 
5705 	ap->cbl = ATA_CBL_NONE;
5706 
5707 	ata_link_init(ap, &ap->link, 0);
5708 
5709 #ifdef ATA_IRQ_TRAP
5710 	ap->stats.unhandled_irq = 1;
5711 	ap->stats.idle_irq = 1;
5712 #endif
5713 	ata_sff_port_init(ap);
5714 
5715 	return ap;
5716 }
5717 
ata_host_release(struct device * gendev,void * res)5718 static void ata_host_release(struct device *gendev, void *res)
5719 {
5720 	struct ata_host *host = dev_get_drvdata(gendev);
5721 	int i;
5722 
5723 	for (i = 0; i < host->n_ports; i++) {
5724 		struct ata_port *ap = host->ports[i];
5725 
5726 		if (!ap)
5727 			continue;
5728 
5729 		if (ap->scsi_host)
5730 			scsi_host_put(ap->scsi_host);
5731 
5732 		kfree(ap->pmp_link);
5733 		kfree(ap->slave_link);
5734 		kfree(ap);
5735 		host->ports[i] = NULL;
5736 	}
5737 
5738 	dev_set_drvdata(gendev, NULL);
5739 }
5740 
5741 /**
5742  *	ata_host_alloc - allocate and init basic ATA host resources
5743  *	@dev: generic device this host is associated with
5744  *	@max_ports: maximum number of ATA ports associated with this host
5745  *
5746  *	Allocate and initialize basic ATA host resources.  LLD calls
5747  *	this function to allocate a host, initializes it fully and
5748  *	attaches it using ata_host_register().
5749  *
5750  *	@max_ports ports are allocated and host->n_ports is
5751  *	initialized to @max_ports.  The caller is allowed to decrease
5752  *	host->n_ports before calling ata_host_register().  The unused
5753  *	ports will be automatically freed on registration.
5754  *
5755  *	RETURNS:
5756  *	Allocate ATA host on success, NULL on failure.
5757  *
5758  *	LOCKING:
5759  *	Inherited from calling layer (may sleep).
5760  */
ata_host_alloc(struct device * dev,int max_ports)5761 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5762 {
5763 	struct ata_host *host;
5764 	size_t sz;
5765 	int i;
5766 
5767 	DPRINTK("ENTER\n");
5768 
5769 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5770 		return NULL;
5771 
5772 	/* alloc a container for our list of ATA ports (buses) */
5773 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5774 	/* alloc a container for our list of ATA ports (buses) */
5775 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5776 	if (!host)
5777 		goto err_out;
5778 
5779 	devres_add(dev, host);
5780 	dev_set_drvdata(dev, host);
5781 
5782 	spin_lock_init(&host->lock);
5783 	mutex_init(&host->eh_mutex);
5784 	host->dev = dev;
5785 	host->n_ports = max_ports;
5786 
5787 	/* allocate ports bound to this host */
5788 	for (i = 0; i < max_ports; i++) {
5789 		struct ata_port *ap;
5790 
5791 		ap = ata_port_alloc(host);
5792 		if (!ap)
5793 			goto err_out;
5794 
5795 		ap->port_no = i;
5796 		host->ports[i] = ap;
5797 	}
5798 
5799 	devres_remove_group(dev, NULL);
5800 	return host;
5801 
5802  err_out:
5803 	devres_release_group(dev, NULL);
5804 	return NULL;
5805 }
5806 
5807 /**
5808  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5809  *	@dev: generic device this host is associated with
5810  *	@ppi: array of ATA port_info to initialize host with
5811  *	@n_ports: number of ATA ports attached to this host
5812  *
5813  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5814  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5815  *	last entry will be used for the remaining ports.
5816  *
5817  *	RETURNS:
5818  *	Allocate ATA host on success, NULL on failure.
5819  *
5820  *	LOCKING:
5821  *	Inherited from calling layer (may sleep).
5822  */
ata_host_alloc_pinfo(struct device * dev,const struct ata_port_info * const * ppi,int n_ports)5823 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5824 				      const struct ata_port_info * const * ppi,
5825 				      int n_ports)
5826 {
5827 	const struct ata_port_info *pi;
5828 	struct ata_host *host;
5829 	int i, j;
5830 
5831 	host = ata_host_alloc(dev, n_ports);
5832 	if (!host)
5833 		return NULL;
5834 
5835 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5836 		struct ata_port *ap = host->ports[i];
5837 
5838 		if (ppi[j])
5839 			pi = ppi[j++];
5840 
5841 		ap->pio_mask = pi->pio_mask;
5842 		ap->mwdma_mask = pi->mwdma_mask;
5843 		ap->udma_mask = pi->udma_mask;
5844 		ap->flags |= pi->flags;
5845 		ap->link.flags |= pi->link_flags;
5846 		ap->ops = pi->port_ops;
5847 
5848 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5849 			host->ops = pi->port_ops;
5850 	}
5851 
5852 	return host;
5853 }
5854 
5855 /**
5856  *	ata_slave_link_init - initialize slave link
5857  *	@ap: port to initialize slave link for
5858  *
5859  *	Create and initialize slave link for @ap.  This enables slave
5860  *	link handling on the port.
5861  *
5862  *	In libata, a port contains links and a link contains devices.
5863  *	There is single host link but if a PMP is attached to it,
5864  *	there can be multiple fan-out links.  On SATA, there's usually
5865  *	a single device connected to a link but PATA and SATA
5866  *	controllers emulating TF based interface can have two - master
5867  *	and slave.
5868  *
5869  *	However, there are a few controllers which don't fit into this
5870  *	abstraction too well - SATA controllers which emulate TF
5871  *	interface with both master and slave devices but also have
5872  *	separate SCR register sets for each device.  These controllers
5873  *	need separate links for physical link handling
5874  *	(e.g. onlineness, link speed) but should be treated like a
5875  *	traditional M/S controller for everything else (e.g. command
5876  *	issue, softreset).
5877  *
5878  *	slave_link is libata's way of handling this class of
5879  *	controllers without impacting core layer too much.  For
5880  *	anything other than physical link handling, the default host
5881  *	link is used for both master and slave.  For physical link
5882  *	handling, separate @ap->slave_link is used.  All dirty details
5883  *	are implemented inside libata core layer.  From LLD's POV, the
5884  *	only difference is that prereset, hardreset and postreset are
5885  *	called once more for the slave link, so the reset sequence
5886  *	looks like the following.
5887  *
5888  *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5889  *	softreset(M) -> postreset(M) -> postreset(S)
5890  *
5891  *	Note that softreset is called only for the master.  Softreset
5892  *	resets both M/S by definition, so SRST on master should handle
5893  *	both (the standard method will work just fine).
5894  *
5895  *	LOCKING:
5896  *	Should be called before host is registered.
5897  *
5898  *	RETURNS:
5899  *	0 on success, -errno on failure.
5900  */
ata_slave_link_init(struct ata_port * ap)5901 int ata_slave_link_init(struct ata_port *ap)
5902 {
5903 	struct ata_link *link;
5904 
5905 	WARN_ON(ap->slave_link);
5906 	WARN_ON(ap->flags & ATA_FLAG_PMP);
5907 
5908 	link = kzalloc(sizeof(*link), GFP_KERNEL);
5909 	if (!link)
5910 		return -ENOMEM;
5911 
5912 	ata_link_init(ap, link, 1);
5913 	ap->slave_link = link;
5914 	return 0;
5915 }
5916 
ata_host_stop(struct device * gendev,void * res)5917 static void ata_host_stop(struct device *gendev, void *res)
5918 {
5919 	struct ata_host *host = dev_get_drvdata(gendev);
5920 	int i;
5921 
5922 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5923 
5924 	for (i = 0; i < host->n_ports; i++) {
5925 		struct ata_port *ap = host->ports[i];
5926 
5927 		if (ap->ops->port_stop)
5928 			ap->ops->port_stop(ap);
5929 	}
5930 
5931 	if (host->ops->host_stop)
5932 		host->ops->host_stop(host);
5933 }
5934 
5935 /**
5936  *	ata_finalize_port_ops - finalize ata_port_operations
5937  *	@ops: ata_port_operations to finalize
5938  *
5939  *	An ata_port_operations can inherit from another ops and that
5940  *	ops can again inherit from another.  This can go on as many
5941  *	times as necessary as long as there is no loop in the
5942  *	inheritance chain.
5943  *
5944  *	Ops tables are finalized when the host is started.  NULL or
5945  *	unspecified entries are inherited from the closet ancestor
5946  *	which has the method and the entry is populated with it.
5947  *	After finalization, the ops table directly points to all the
5948  *	methods and ->inherits is no longer necessary and cleared.
5949  *
5950  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5951  *
5952  *	LOCKING:
5953  *	None.
5954  */
ata_finalize_port_ops(struct ata_port_operations * ops)5955 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5956 {
5957 	static DEFINE_SPINLOCK(lock);
5958 	const struct ata_port_operations *cur;
5959 	void **begin = (void **)ops;
5960 	void **end = (void **)&ops->inherits;
5961 	void **pp;
5962 
5963 	if (!ops || !ops->inherits)
5964 		return;
5965 
5966 	spin_lock(&lock);
5967 
5968 	for (cur = ops->inherits; cur; cur = cur->inherits) {
5969 		void **inherit = (void **)cur;
5970 
5971 		for (pp = begin; pp < end; pp++, inherit++)
5972 			if (!*pp)
5973 				*pp = *inherit;
5974 	}
5975 
5976 	for (pp = begin; pp < end; pp++)
5977 		if (IS_ERR(*pp))
5978 			*pp = NULL;
5979 
5980 	ops->inherits = NULL;
5981 
5982 	spin_unlock(&lock);
5983 }
5984 
5985 /**
5986  *	ata_host_start - start and freeze ports of an ATA host
5987  *	@host: ATA host to start ports for
5988  *
5989  *	Start and then freeze ports of @host.  Started status is
5990  *	recorded in host->flags, so this function can be called
5991  *	multiple times.  Ports are guaranteed to get started only
5992  *	once.  If host->ops isn't initialized yet, its set to the
5993  *	first non-dummy port ops.
5994  *
5995  *	LOCKING:
5996  *	Inherited from calling layer (may sleep).
5997  *
5998  *	RETURNS:
5999  *	0 if all ports are started successfully, -errno otherwise.
6000  */
ata_host_start(struct ata_host * host)6001 int ata_host_start(struct ata_host *host)
6002 {
6003 	int have_stop = 0;
6004 	void *start_dr = NULL;
6005 	int i, rc;
6006 
6007 	if (host->flags & ATA_HOST_STARTED)
6008 		return 0;
6009 
6010 	ata_finalize_port_ops(host->ops);
6011 
6012 	for (i = 0; i < host->n_ports; i++) {
6013 		struct ata_port *ap = host->ports[i];
6014 
6015 		ata_finalize_port_ops(ap->ops);
6016 
6017 		if (!host->ops && !ata_port_is_dummy(ap))
6018 			host->ops = ap->ops;
6019 
6020 		if (ap->ops->port_stop)
6021 			have_stop = 1;
6022 	}
6023 
6024 	if (host->ops->host_stop)
6025 		have_stop = 1;
6026 
6027 	if (have_stop) {
6028 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6029 		if (!start_dr)
6030 			return -ENOMEM;
6031 	}
6032 
6033 	for (i = 0; i < host->n_ports; i++) {
6034 		struct ata_port *ap = host->ports[i];
6035 
6036 		if (ap->ops->port_start) {
6037 			rc = ap->ops->port_start(ap);
6038 			if (rc) {
6039 				if (rc != -ENODEV)
6040 					dev_err(host->dev,
6041 						"failed to start port %d (errno=%d)\n",
6042 						i, rc);
6043 				goto err_out;
6044 			}
6045 		}
6046 		ata_eh_freeze_port(ap);
6047 	}
6048 
6049 	if (start_dr)
6050 		devres_add(host->dev, start_dr);
6051 	host->flags |= ATA_HOST_STARTED;
6052 	return 0;
6053 
6054  err_out:
6055 	while (--i >= 0) {
6056 		struct ata_port *ap = host->ports[i];
6057 
6058 		if (ap->ops->port_stop)
6059 			ap->ops->port_stop(ap);
6060 	}
6061 	devres_free(start_dr);
6062 	return rc;
6063 }
6064 
6065 /**
6066  *	ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6067  *	@host:	host to initialize
6068  *	@dev:	device host is attached to
6069  *	@ops:	port_ops
6070  *
6071  */
ata_host_init(struct ata_host * host,struct device * dev,struct ata_port_operations * ops)6072 void ata_host_init(struct ata_host *host, struct device *dev,
6073 		   struct ata_port_operations *ops)
6074 {
6075 	spin_lock_init(&host->lock);
6076 	mutex_init(&host->eh_mutex);
6077 	host->n_tags = ATA_MAX_QUEUE - 1;
6078 	host->dev = dev;
6079 	host->ops = ops;
6080 }
6081 
__ata_port_probe(struct ata_port * ap)6082 void __ata_port_probe(struct ata_port *ap)
6083 {
6084 	struct ata_eh_info *ehi = &ap->link.eh_info;
6085 	unsigned long flags;
6086 
6087 	/* kick EH for boot probing */
6088 	spin_lock_irqsave(ap->lock, flags);
6089 
6090 	ehi->probe_mask |= ATA_ALL_DEVICES;
6091 	ehi->action |= ATA_EH_RESET;
6092 	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6093 
6094 	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6095 	ap->pflags |= ATA_PFLAG_LOADING;
6096 	ata_port_schedule_eh(ap);
6097 
6098 	spin_unlock_irqrestore(ap->lock, flags);
6099 }
6100 
ata_port_probe(struct ata_port * ap)6101 int ata_port_probe(struct ata_port *ap)
6102 {
6103 	int rc = 0;
6104 
6105 	if (ap->ops->error_handler) {
6106 		__ata_port_probe(ap);
6107 		ata_port_wait_eh(ap);
6108 	} else {
6109 		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6110 		rc = ata_bus_probe(ap);
6111 		DPRINTK("ata%u: bus probe end\n", ap->print_id);
6112 	}
6113 	return rc;
6114 }
6115 
6116 
async_port_probe(void * data,async_cookie_t cookie)6117 static void async_port_probe(void *data, async_cookie_t cookie)
6118 {
6119 	struct ata_port *ap = data;
6120 
6121 	/*
6122 	 * If we're not allowed to scan this host in parallel,
6123 	 * we need to wait until all previous scans have completed
6124 	 * before going further.
6125 	 * Jeff Garzik says this is only within a controller, so we
6126 	 * don't need to wait for port 0, only for later ports.
6127 	 */
6128 	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6129 		async_synchronize_cookie(cookie);
6130 
6131 	(void)ata_port_probe(ap);
6132 
6133 	/* in order to keep device order, we need to synchronize at this point */
6134 	async_synchronize_cookie(cookie);
6135 
6136 	ata_scsi_scan_host(ap, 1);
6137 }
6138 
6139 /**
6140  *	ata_host_register - register initialized ATA host
6141  *	@host: ATA host to register
6142  *	@sht: template for SCSI host
6143  *
6144  *	Register initialized ATA host.  @host is allocated using
6145  *	ata_host_alloc() and fully initialized by LLD.  This function
6146  *	starts ports, registers @host with ATA and SCSI layers and
6147  *	probe registered devices.
6148  *
6149  *	LOCKING:
6150  *	Inherited from calling layer (may sleep).
6151  *
6152  *	RETURNS:
6153  *	0 on success, -errno otherwise.
6154  */
ata_host_register(struct ata_host * host,struct scsi_host_template * sht)6155 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6156 {
6157 	int i, rc;
6158 
6159 	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6160 
6161 	/* host must have been started */
6162 	if (!(host->flags & ATA_HOST_STARTED)) {
6163 		dev_err(host->dev, "BUG: trying to register unstarted host\n");
6164 		WARN_ON(1);
6165 		return -EINVAL;
6166 	}
6167 
6168 	/* Blow away unused ports.  This happens when LLD can't
6169 	 * determine the exact number of ports to allocate at
6170 	 * allocation time.
6171 	 */
6172 	for (i = host->n_ports; host->ports[i]; i++)
6173 		kfree(host->ports[i]);
6174 
6175 	/* give ports names and add SCSI hosts */
6176 	for (i = 0; i < host->n_ports; i++) {
6177 		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6178 		host->ports[i]->local_port_no = i + 1;
6179 	}
6180 
6181 	/* Create associated sysfs transport objects  */
6182 	for (i = 0; i < host->n_ports; i++) {
6183 		rc = ata_tport_add(host->dev,host->ports[i]);
6184 		if (rc) {
6185 			goto err_tadd;
6186 		}
6187 	}
6188 
6189 	rc = ata_scsi_add_hosts(host, sht);
6190 	if (rc)
6191 		goto err_tadd;
6192 
6193 	/* set cable, sata_spd_limit and report */
6194 	for (i = 0; i < host->n_ports; i++) {
6195 		struct ata_port *ap = host->ports[i];
6196 		unsigned long xfer_mask;
6197 
6198 		/* set SATA cable type if still unset */
6199 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6200 			ap->cbl = ATA_CBL_SATA;
6201 
6202 		/* init sata_spd_limit to the current value */
6203 		sata_link_init_spd(&ap->link);
6204 		if (ap->slave_link)
6205 			sata_link_init_spd(ap->slave_link);
6206 
6207 		/* print per-port info to dmesg */
6208 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6209 					      ap->udma_mask);
6210 
6211 		if (!ata_port_is_dummy(ap)) {
6212 			ata_port_info(ap, "%cATA max %s %s\n",
6213 				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6214 				      ata_mode_string(xfer_mask),
6215 				      ap->link.eh_info.desc);
6216 			ata_ehi_clear_desc(&ap->link.eh_info);
6217 		} else
6218 			ata_port_info(ap, "DUMMY\n");
6219 	}
6220 
6221 	/* perform each probe asynchronously */
6222 	for (i = 0; i < host->n_ports; i++) {
6223 		struct ata_port *ap = host->ports[i];
6224 		async_schedule(async_port_probe, ap);
6225 	}
6226 
6227 	return 0;
6228 
6229  err_tadd:
6230 	while (--i >= 0) {
6231 		ata_tport_delete(host->ports[i]);
6232 	}
6233 	return rc;
6234 
6235 }
6236 
6237 /**
6238  *	ata_host_activate - start host, request IRQ and register it
6239  *	@host: target ATA host
6240  *	@irq: IRQ to request
6241  *	@irq_handler: irq_handler used when requesting IRQ
6242  *	@irq_flags: irq_flags used when requesting IRQ
6243  *	@sht: scsi_host_template to use when registering the host
6244  *
6245  *	After allocating an ATA host and initializing it, most libata
6246  *	LLDs perform three steps to activate the host - start host,
6247  *	request IRQ and register it.  This helper takes necessasry
6248  *	arguments and performs the three steps in one go.
6249  *
6250  *	An invalid IRQ skips the IRQ registration and expects the host to
6251  *	have set polling mode on the port. In this case, @irq_handler
6252  *	should be NULL.
6253  *
6254  *	LOCKING:
6255  *	Inherited from calling layer (may sleep).
6256  *
6257  *	RETURNS:
6258  *	0 on success, -errno otherwise.
6259  */
ata_host_activate(struct ata_host * host,int irq,irq_handler_t irq_handler,unsigned long irq_flags,struct scsi_host_template * sht)6260 int ata_host_activate(struct ata_host *host, int irq,
6261 		      irq_handler_t irq_handler, unsigned long irq_flags,
6262 		      struct scsi_host_template *sht)
6263 {
6264 	int i, rc;
6265 
6266 	rc = ata_host_start(host);
6267 	if (rc)
6268 		return rc;
6269 
6270 	/* Special case for polling mode */
6271 	if (!irq) {
6272 		WARN_ON(irq_handler);
6273 		return ata_host_register(host, sht);
6274 	}
6275 
6276 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6277 			      dev_name(host->dev), host);
6278 	if (rc)
6279 		return rc;
6280 
6281 	for (i = 0; i < host->n_ports; i++)
6282 		ata_port_desc(host->ports[i], "irq %d", irq);
6283 
6284 	rc = ata_host_register(host, sht);
6285 	/* if failed, just free the IRQ and leave ports alone */
6286 	if (rc)
6287 		devm_free_irq(host->dev, irq, host);
6288 
6289 	return rc;
6290 }
6291 
6292 /**
6293  *	ata_port_detach - Detach ATA port in prepration of device removal
6294  *	@ap: ATA port to be detached
6295  *
6296  *	Detach all ATA devices and the associated SCSI devices of @ap;
6297  *	then, remove the associated SCSI host.  @ap is guaranteed to
6298  *	be quiescent on return from this function.
6299  *
6300  *	LOCKING:
6301  *	Kernel thread context (may sleep).
6302  */
ata_port_detach(struct ata_port * ap)6303 static void ata_port_detach(struct ata_port *ap)
6304 {
6305 	unsigned long flags;
6306 	struct ata_link *link;
6307 	struct ata_device *dev;
6308 
6309 	if (!ap->ops->error_handler)
6310 		goto skip_eh;
6311 
6312 	/* tell EH we're leaving & flush EH */
6313 	spin_lock_irqsave(ap->lock, flags);
6314 	ap->pflags |= ATA_PFLAG_UNLOADING;
6315 	ata_port_schedule_eh(ap);
6316 	spin_unlock_irqrestore(ap->lock, flags);
6317 
6318 	/* wait till EH commits suicide */
6319 	ata_port_wait_eh(ap);
6320 
6321 	/* it better be dead now */
6322 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6323 
6324 	cancel_delayed_work_sync(&ap->hotplug_task);
6325 
6326  skip_eh:
6327 	/* clean up zpodd on port removal */
6328 	ata_for_each_link(link, ap, HOST_FIRST) {
6329 		ata_for_each_dev(dev, link, ALL) {
6330 			if (zpodd_dev_enabled(dev))
6331 				zpodd_exit(dev);
6332 		}
6333 	}
6334 	if (ap->pmp_link) {
6335 		int i;
6336 		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6337 			ata_tlink_delete(&ap->pmp_link[i]);
6338 	}
6339 	/* remove the associated SCSI host */
6340 	scsi_remove_host(ap->scsi_host);
6341 	ata_tport_delete(ap);
6342 }
6343 
6344 /**
6345  *	ata_host_detach - Detach all ports of an ATA host
6346  *	@host: Host to detach
6347  *
6348  *	Detach all ports of @host.
6349  *
6350  *	LOCKING:
6351  *	Kernel thread context (may sleep).
6352  */
ata_host_detach(struct ata_host * host)6353 void ata_host_detach(struct ata_host *host)
6354 {
6355 	int i;
6356 
6357 	for (i = 0; i < host->n_ports; i++)
6358 		ata_port_detach(host->ports[i]);
6359 
6360 	/* the host is dead now, dissociate ACPI */
6361 	ata_acpi_dissociate(host);
6362 }
6363 
6364 #ifdef CONFIG_PCI
6365 
6366 /**
6367  *	ata_pci_remove_one - PCI layer callback for device removal
6368  *	@pdev: PCI device that was removed
6369  *
6370  *	PCI layer indicates to libata via this hook that hot-unplug or
6371  *	module unload event has occurred.  Detach all ports.  Resource
6372  *	release is handled via devres.
6373  *
6374  *	LOCKING:
6375  *	Inherited from PCI layer (may sleep).
6376  */
ata_pci_remove_one(struct pci_dev * pdev)6377 void ata_pci_remove_one(struct pci_dev *pdev)
6378 {
6379 	struct ata_host *host = pci_get_drvdata(pdev);
6380 
6381 	ata_host_detach(host);
6382 }
6383 
6384 /* move to PCI subsystem */
pci_test_config_bits(struct pci_dev * pdev,const struct pci_bits * bits)6385 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6386 {
6387 	unsigned long tmp = 0;
6388 
6389 	switch (bits->width) {
6390 	case 1: {
6391 		u8 tmp8 = 0;
6392 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6393 		tmp = tmp8;
6394 		break;
6395 	}
6396 	case 2: {
6397 		u16 tmp16 = 0;
6398 		pci_read_config_word(pdev, bits->reg, &tmp16);
6399 		tmp = tmp16;
6400 		break;
6401 	}
6402 	case 4: {
6403 		u32 tmp32 = 0;
6404 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6405 		tmp = tmp32;
6406 		break;
6407 	}
6408 
6409 	default:
6410 		return -EINVAL;
6411 	}
6412 
6413 	tmp &= bits->mask;
6414 
6415 	return (tmp == bits->val) ? 1 : 0;
6416 }
6417 
6418 #ifdef CONFIG_PM
ata_pci_device_do_suspend(struct pci_dev * pdev,pm_message_t mesg)6419 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6420 {
6421 	pci_save_state(pdev);
6422 	pci_disable_device(pdev);
6423 
6424 	if (mesg.event & PM_EVENT_SLEEP)
6425 		pci_set_power_state(pdev, PCI_D3hot);
6426 }
6427 
ata_pci_device_do_resume(struct pci_dev * pdev)6428 int ata_pci_device_do_resume(struct pci_dev *pdev)
6429 {
6430 	int rc;
6431 
6432 	pci_set_power_state(pdev, PCI_D0);
6433 	pci_restore_state(pdev);
6434 
6435 	rc = pcim_enable_device(pdev);
6436 	if (rc) {
6437 		dev_err(&pdev->dev,
6438 			"failed to enable device after resume (%d)\n", rc);
6439 		return rc;
6440 	}
6441 
6442 	pci_set_master(pdev);
6443 	return 0;
6444 }
6445 
ata_pci_device_suspend(struct pci_dev * pdev,pm_message_t mesg)6446 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6447 {
6448 	struct ata_host *host = pci_get_drvdata(pdev);
6449 	int rc = 0;
6450 
6451 	rc = ata_host_suspend(host, mesg);
6452 	if (rc)
6453 		return rc;
6454 
6455 	ata_pci_device_do_suspend(pdev, mesg);
6456 
6457 	return 0;
6458 }
6459 
ata_pci_device_resume(struct pci_dev * pdev)6460 int ata_pci_device_resume(struct pci_dev *pdev)
6461 {
6462 	struct ata_host *host = pci_get_drvdata(pdev);
6463 	int rc;
6464 
6465 	rc = ata_pci_device_do_resume(pdev);
6466 	if (rc == 0)
6467 		ata_host_resume(host);
6468 	return rc;
6469 }
6470 #endif /* CONFIG_PM */
6471 
6472 #endif /* CONFIG_PCI */
6473 
6474 /**
6475  *	ata_platform_remove_one - Platform layer callback for device removal
6476  *	@pdev: Platform device that was removed
6477  *
6478  *	Platform layer indicates to libata via this hook that hot-unplug or
6479  *	module unload event has occurred.  Detach all ports.  Resource
6480  *	release is handled via devres.
6481  *
6482  *	LOCKING:
6483  *	Inherited from platform layer (may sleep).
6484  */
ata_platform_remove_one(struct platform_device * pdev)6485 int ata_platform_remove_one(struct platform_device *pdev)
6486 {
6487 	struct ata_host *host = platform_get_drvdata(pdev);
6488 
6489 	ata_host_detach(host);
6490 
6491 	return 0;
6492 }
6493 
ata_parse_force_one(char ** cur,struct ata_force_ent * force_ent,const char ** reason)6494 static int __init ata_parse_force_one(char **cur,
6495 				      struct ata_force_ent *force_ent,
6496 				      const char **reason)
6497 {
6498 	/* FIXME: Currently, there's no way to tag init const data and
6499 	 * using __initdata causes build failure on some versions of
6500 	 * gcc.  Once __initdataconst is implemented, add const to the
6501 	 * following structure.
6502 	 */
6503 	static struct ata_force_param force_tbl[] __initdata = {
6504 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6505 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6506 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6507 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6508 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6509 		{ "sata",	.cbl		= ATA_CBL_SATA },
6510 		{ "1.5Gbps",	.spd_limit	= 1 },
6511 		{ "3.0Gbps",	.spd_limit	= 2 },
6512 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6513 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6514 		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
6515 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6516 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6517 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6518 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6519 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6520 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6521 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6522 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6523 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6524 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6525 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6526 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6527 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6528 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6529 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6530 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6531 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6532 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6533 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6534 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6535 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6536 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6537 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6538 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6539 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6540 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6541 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6542 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6543 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6544 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6545 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6546 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6547 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6548 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6549 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6550 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6551 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6552 		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE },
6553 		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR },
6554 		{ "disable",	.horkage_on	= ATA_HORKAGE_DISABLE },
6555 	};
6556 	char *start = *cur, *p = *cur;
6557 	char *id, *val, *endp;
6558 	const struct ata_force_param *match_fp = NULL;
6559 	int nr_matches = 0, i;
6560 
6561 	/* find where this param ends and update *cur */
6562 	while (*p != '\0' && *p != ',')
6563 		p++;
6564 
6565 	if (*p == '\0')
6566 		*cur = p;
6567 	else
6568 		*cur = p + 1;
6569 
6570 	*p = '\0';
6571 
6572 	/* parse */
6573 	p = strchr(start, ':');
6574 	if (!p) {
6575 		val = strstrip(start);
6576 		goto parse_val;
6577 	}
6578 	*p = '\0';
6579 
6580 	id = strstrip(start);
6581 	val = strstrip(p + 1);
6582 
6583 	/* parse id */
6584 	p = strchr(id, '.');
6585 	if (p) {
6586 		*p++ = '\0';
6587 		force_ent->device = simple_strtoul(p, &endp, 10);
6588 		if (p == endp || *endp != '\0') {
6589 			*reason = "invalid device";
6590 			return -EINVAL;
6591 		}
6592 	}
6593 
6594 	force_ent->port = simple_strtoul(id, &endp, 10);
6595 	if (p == endp || *endp != '\0') {
6596 		*reason = "invalid port/link";
6597 		return -EINVAL;
6598 	}
6599 
6600  parse_val:
6601 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6602 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6603 		const struct ata_force_param *fp = &force_tbl[i];
6604 
6605 		if (strncasecmp(val, fp->name, strlen(val)))
6606 			continue;
6607 
6608 		nr_matches++;
6609 		match_fp = fp;
6610 
6611 		if (strcasecmp(val, fp->name) == 0) {
6612 			nr_matches = 1;
6613 			break;
6614 		}
6615 	}
6616 
6617 	if (!nr_matches) {
6618 		*reason = "unknown value";
6619 		return -EINVAL;
6620 	}
6621 	if (nr_matches > 1) {
6622 		*reason = "ambigious value";
6623 		return -EINVAL;
6624 	}
6625 
6626 	force_ent->param = *match_fp;
6627 
6628 	return 0;
6629 }
6630 
ata_parse_force_param(void)6631 static void __init ata_parse_force_param(void)
6632 {
6633 	int idx = 0, size = 1;
6634 	int last_port = -1, last_device = -1;
6635 	char *p, *cur, *next;
6636 
6637 	/* calculate maximum number of params and allocate force_tbl */
6638 	for (p = ata_force_param_buf; *p; p++)
6639 		if (*p == ',')
6640 			size++;
6641 
6642 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6643 	if (!ata_force_tbl) {
6644 		printk(KERN_WARNING "ata: failed to extend force table, "
6645 		       "libata.force ignored\n");
6646 		return;
6647 	}
6648 
6649 	/* parse and populate the table */
6650 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6651 		const char *reason = "";
6652 		struct ata_force_ent te = { .port = -1, .device = -1 };
6653 
6654 		next = cur;
6655 		if (ata_parse_force_one(&next, &te, &reason)) {
6656 			printk(KERN_WARNING "ata: failed to parse force "
6657 			       "parameter \"%s\" (%s)\n",
6658 			       cur, reason);
6659 			continue;
6660 		}
6661 
6662 		if (te.port == -1) {
6663 			te.port = last_port;
6664 			te.device = last_device;
6665 		}
6666 
6667 		ata_force_tbl[idx++] = te;
6668 
6669 		last_port = te.port;
6670 		last_device = te.device;
6671 	}
6672 
6673 	ata_force_tbl_size = idx;
6674 }
6675 
ata_init(void)6676 static int __init ata_init(void)
6677 {
6678 	int rc;
6679 
6680 	ata_parse_force_param();
6681 
6682 	rc = ata_sff_init();
6683 	if (rc) {
6684 		kfree(ata_force_tbl);
6685 		return rc;
6686 	}
6687 
6688 	libata_transport_init();
6689 	ata_scsi_transport_template = ata_attach_transport();
6690 	if (!ata_scsi_transport_template) {
6691 		ata_sff_exit();
6692 		rc = -ENOMEM;
6693 		goto err_out;
6694 	}
6695 
6696 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6697 	return 0;
6698 
6699 err_out:
6700 	return rc;
6701 }
6702 
ata_exit(void)6703 static void __exit ata_exit(void)
6704 {
6705 	ata_release_transport(ata_scsi_transport_template);
6706 	libata_transport_exit();
6707 	ata_sff_exit();
6708 	kfree(ata_force_tbl);
6709 }
6710 
6711 subsys_initcall(ata_init);
6712 module_exit(ata_exit);
6713 
6714 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6715 
ata_ratelimit(void)6716 int ata_ratelimit(void)
6717 {
6718 	return __ratelimit(&ratelimit);
6719 }
6720 
6721 /**
6722  *	ata_msleep - ATA EH owner aware msleep
6723  *	@ap: ATA port to attribute the sleep to
6724  *	@msecs: duration to sleep in milliseconds
6725  *
6726  *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
6727  *	ownership is released before going to sleep and reacquired
6728  *	after the sleep is complete.  IOW, other ports sharing the
6729  *	@ap->host will be allowed to own the EH while this task is
6730  *	sleeping.
6731  *
6732  *	LOCKING:
6733  *	Might sleep.
6734  */
ata_msleep(struct ata_port * ap,unsigned int msecs)6735 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6736 {
6737 	bool owns_eh = ap && ap->host->eh_owner == current;
6738 
6739 	if (owns_eh)
6740 		ata_eh_release(ap);
6741 
6742 	msleep(msecs);
6743 
6744 	if (owns_eh)
6745 		ata_eh_acquire(ap);
6746 }
6747 
6748 /**
6749  *	ata_wait_register - wait until register value changes
6750  *	@ap: ATA port to wait register for, can be NULL
6751  *	@reg: IO-mapped register
6752  *	@mask: Mask to apply to read register value
6753  *	@val: Wait condition
6754  *	@interval: polling interval in milliseconds
6755  *	@timeout: timeout in milliseconds
6756  *
6757  *	Waiting for some bits of register to change is a common
6758  *	operation for ATA controllers.  This function reads 32bit LE
6759  *	IO-mapped register @reg and tests for the following condition.
6760  *
6761  *	(*@reg & mask) != val
6762  *
6763  *	If the condition is met, it returns; otherwise, the process is
6764  *	repeated after @interval_msec until timeout.
6765  *
6766  *	LOCKING:
6767  *	Kernel thread context (may sleep)
6768  *
6769  *	RETURNS:
6770  *	The final register value.
6771  */
ata_wait_register(struct ata_port * ap,void __iomem * reg,u32 mask,u32 val,unsigned long interval,unsigned long timeout)6772 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6773 		      unsigned long interval, unsigned long timeout)
6774 {
6775 	unsigned long deadline;
6776 	u32 tmp;
6777 
6778 	tmp = ioread32(reg);
6779 
6780 	/* Calculate timeout _after_ the first read to make sure
6781 	 * preceding writes reach the controller before starting to
6782 	 * eat away the timeout.
6783 	 */
6784 	deadline = ata_deadline(jiffies, timeout);
6785 
6786 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6787 		ata_msleep(ap, interval);
6788 		tmp = ioread32(reg);
6789 	}
6790 
6791 	return tmp;
6792 }
6793 
6794 /**
6795  *	sata_lpm_ignore_phy_events - test if PHY event should be ignored
6796  *	@link: Link receiving the event
6797  *
6798  *	Test whether the received PHY event has to be ignored or not.
6799  *
6800  *	LOCKING:
6801  *	None:
6802  *
6803  *	RETURNS:
6804  *	True if the event has to be ignored.
6805  */
sata_lpm_ignore_phy_events(struct ata_link * link)6806 bool sata_lpm_ignore_phy_events(struct ata_link *link)
6807 {
6808 	unsigned long lpm_timeout = link->last_lpm_change +
6809 				    msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
6810 
6811 	/* if LPM is enabled, PHYRDY doesn't mean anything */
6812 	if (link->lpm_policy > ATA_LPM_MAX_POWER)
6813 		return true;
6814 
6815 	/* ignore the first PHY event after the LPM policy changed
6816 	 * as it is might be spurious
6817 	 */
6818 	if ((link->flags & ATA_LFLAG_CHANGED) &&
6819 	    time_before(jiffies, lpm_timeout))
6820 		return true;
6821 
6822 	return false;
6823 }
6824 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
6825 
6826 /*
6827  * Dummy port_ops
6828  */
ata_dummy_qc_issue(struct ata_queued_cmd * qc)6829 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6830 {
6831 	return AC_ERR_SYSTEM;
6832 }
6833 
ata_dummy_error_handler(struct ata_port * ap)6834 static void ata_dummy_error_handler(struct ata_port *ap)
6835 {
6836 	/* truly dummy */
6837 }
6838 
6839 struct ata_port_operations ata_dummy_port_ops = {
6840 	.qc_prep		= ata_noop_qc_prep,
6841 	.qc_issue		= ata_dummy_qc_issue,
6842 	.error_handler		= ata_dummy_error_handler,
6843 	.sched_eh		= ata_std_sched_eh,
6844 	.end_eh			= ata_std_end_eh,
6845 };
6846 
6847 const struct ata_port_info ata_dummy_port_info = {
6848 	.port_ops		= &ata_dummy_port_ops,
6849 };
6850 
6851 /*
6852  * Utility print functions
6853  */
ata_port_printk(const struct ata_port * ap,const char * level,const char * fmt,...)6854 void ata_port_printk(const struct ata_port *ap, const char *level,
6855 		     const char *fmt, ...)
6856 {
6857 	struct va_format vaf;
6858 	va_list args;
6859 
6860 	va_start(args, fmt);
6861 
6862 	vaf.fmt = fmt;
6863 	vaf.va = &args;
6864 
6865 	printk("%sata%u: %pV", level, ap->print_id, &vaf);
6866 
6867 	va_end(args);
6868 }
6869 EXPORT_SYMBOL(ata_port_printk);
6870 
ata_link_printk(const struct ata_link * link,const char * level,const char * fmt,...)6871 void ata_link_printk(const struct ata_link *link, const char *level,
6872 		     const char *fmt, ...)
6873 {
6874 	struct va_format vaf;
6875 	va_list args;
6876 
6877 	va_start(args, fmt);
6878 
6879 	vaf.fmt = fmt;
6880 	vaf.va = &args;
6881 
6882 	if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6883 		printk("%sata%u.%02u: %pV",
6884 		       level, link->ap->print_id, link->pmp, &vaf);
6885 	else
6886 		printk("%sata%u: %pV",
6887 		       level, link->ap->print_id, &vaf);
6888 
6889 	va_end(args);
6890 }
6891 EXPORT_SYMBOL(ata_link_printk);
6892 
ata_dev_printk(const struct ata_device * dev,const char * level,const char * fmt,...)6893 void ata_dev_printk(const struct ata_device *dev, const char *level,
6894 		    const char *fmt, ...)
6895 {
6896 	struct va_format vaf;
6897 	va_list args;
6898 
6899 	va_start(args, fmt);
6900 
6901 	vaf.fmt = fmt;
6902 	vaf.va = &args;
6903 
6904 	printk("%sata%u.%02u: %pV",
6905 	       level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6906 	       &vaf);
6907 
6908 	va_end(args);
6909 }
6910 EXPORT_SYMBOL(ata_dev_printk);
6911 
ata_print_version(const struct device * dev,const char * version)6912 void ata_print_version(const struct device *dev, const char *version)
6913 {
6914 	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6915 }
6916 EXPORT_SYMBOL(ata_print_version);
6917 
6918 /*
6919  * libata is essentially a library of internal helper functions for
6920  * low-level ATA host controller drivers.  As such, the API/ABI is
6921  * likely to change as new drivers are added and updated.
6922  * Do not depend on ABI/API stability.
6923  */
6924 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6925 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6926 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6927 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6928 EXPORT_SYMBOL_GPL(sata_port_ops);
6929 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6930 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6931 EXPORT_SYMBOL_GPL(ata_link_next);
6932 EXPORT_SYMBOL_GPL(ata_dev_next);
6933 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6934 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6935 EXPORT_SYMBOL_GPL(ata_host_init);
6936 EXPORT_SYMBOL_GPL(ata_host_alloc);
6937 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6938 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6939 EXPORT_SYMBOL_GPL(ata_host_start);
6940 EXPORT_SYMBOL_GPL(ata_host_register);
6941 EXPORT_SYMBOL_GPL(ata_host_activate);
6942 EXPORT_SYMBOL_GPL(ata_host_detach);
6943 EXPORT_SYMBOL_GPL(ata_sg_init);
6944 EXPORT_SYMBOL_GPL(ata_qc_complete);
6945 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6946 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6947 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6948 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6949 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6950 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6951 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6952 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6953 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6954 EXPORT_SYMBOL_GPL(ata_mode_string);
6955 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6956 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6957 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6958 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6959 EXPORT_SYMBOL_GPL(ata_dev_disable);
6960 EXPORT_SYMBOL_GPL(sata_set_spd);
6961 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6962 EXPORT_SYMBOL_GPL(sata_link_debounce);
6963 EXPORT_SYMBOL_GPL(sata_link_resume);
6964 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6965 EXPORT_SYMBOL_GPL(ata_std_prereset);
6966 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6967 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6968 EXPORT_SYMBOL_GPL(ata_std_postreset);
6969 EXPORT_SYMBOL_GPL(ata_dev_classify);
6970 EXPORT_SYMBOL_GPL(ata_dev_pair);
6971 EXPORT_SYMBOL_GPL(ata_ratelimit);
6972 EXPORT_SYMBOL_GPL(ata_msleep);
6973 EXPORT_SYMBOL_GPL(ata_wait_register);
6974 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6975 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6976 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6977 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6978 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
6979 EXPORT_SYMBOL_GPL(sata_scr_valid);
6980 EXPORT_SYMBOL_GPL(sata_scr_read);
6981 EXPORT_SYMBOL_GPL(sata_scr_write);
6982 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6983 EXPORT_SYMBOL_GPL(ata_link_online);
6984 EXPORT_SYMBOL_GPL(ata_link_offline);
6985 #ifdef CONFIG_PM
6986 EXPORT_SYMBOL_GPL(ata_host_suspend);
6987 EXPORT_SYMBOL_GPL(ata_host_resume);
6988 #endif /* CONFIG_PM */
6989 EXPORT_SYMBOL_GPL(ata_id_string);
6990 EXPORT_SYMBOL_GPL(ata_id_c_string);
6991 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6992 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6993 
6994 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6995 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6996 EXPORT_SYMBOL_GPL(ata_timing_compute);
6997 EXPORT_SYMBOL_GPL(ata_timing_merge);
6998 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6999 
7000 #ifdef CONFIG_PCI
7001 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7002 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7003 #ifdef CONFIG_PM
7004 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7005 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7006 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7007 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7008 #endif /* CONFIG_PM */
7009 #endif /* CONFIG_PCI */
7010 
7011 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7012 
7013 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7014 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7015 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7016 EXPORT_SYMBOL_GPL(ata_port_desc);
7017 #ifdef CONFIG_PCI
7018 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7019 #endif /* CONFIG_PCI */
7020 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7021 EXPORT_SYMBOL_GPL(ata_link_abort);
7022 EXPORT_SYMBOL_GPL(ata_port_abort);
7023 EXPORT_SYMBOL_GPL(ata_port_freeze);
7024 EXPORT_SYMBOL_GPL(sata_async_notification);
7025 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7026 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7027 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7028 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7029 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7030 EXPORT_SYMBOL_GPL(ata_do_eh);
7031 EXPORT_SYMBOL_GPL(ata_std_error_handler);
7032 
7033 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7034 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7035 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7036 EXPORT_SYMBOL_GPL(ata_cable_ignore);
7037 EXPORT_SYMBOL_GPL(ata_cable_sata);
7038