• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   *  libata-core.c - helper library for ATA
3   *
4   *  Maintained by:  Tejun Heo <tj@kernel.org>
5   *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6   *		    on emails.
7   *
8   *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9   *  Copyright 2003-2004 Jeff Garzik
10   *
11   *
12   *  This program is free software; you can redistribute it and/or modify
13   *  it under the terms of the GNU General Public License as published by
14   *  the Free Software Foundation; either version 2, or (at your option)
15   *  any later version.
16   *
17   *  This program is distributed in the hope that it will be useful,
18   *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19   *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20   *  GNU General Public License for more details.
21   *
22   *  You should have received a copy of the GNU General Public License
23   *  along with this program; see the file COPYING.  If not, write to
24   *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25   *
26   *
27   *  libata documentation is available via 'make {ps|pdf}docs',
28   *  as Documentation/driver-api/libata.rst
29   *
30   *  Hardware documentation available from http://www.t13.org/ and
31   *  http://www.sata-io.org/
32   *
33   *  Standards documents from:
34   *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35   *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36   *	http://www.sata-io.org (SATA)
37   *	http://www.compactflash.org (CF)
38   *	http://www.qic.org (QIC157 - Tape and DSC)
39   *	http://www.ce-ata.org (CE-ATA: not supported)
40   *
41   */
42  
43  #include <linux/kernel.h>
44  #include <linux/module.h>
45  #include <linux/pci.h>
46  #include <linux/init.h>
47  #include <linux/list.h>
48  #include <linux/mm.h>
49  #include <linux/spinlock.h>
50  #include <linux/blkdev.h>
51  #include <linux/delay.h>
52  #include <linux/timer.h>
53  #include <linux/time.h>
54  #include <linux/interrupt.h>
55  #include <linux/completion.h>
56  #include <linux/suspend.h>
57  #include <linux/workqueue.h>
58  #include <linux/scatterlist.h>
59  #include <linux/io.h>
60  #include <linux/async.h>
61  #include <linux/log2.h>
62  #include <linux/slab.h>
63  #include <linux/glob.h>
64  #include <scsi/scsi.h>
65  #include <scsi/scsi_cmnd.h>
66  #include <scsi/scsi_host.h>
67  #include <linux/libata.h>
68  #include <asm/byteorder.h>
69  #include <asm/unaligned.h>
70  #include <linux/cdrom.h>
71  #include <linux/ratelimit.h>
72  #include <linux/leds.h>
73  #include <linux/pm_runtime.h>
74  #include <linux/platform_device.h>
75  
76  #define CREATE_TRACE_POINTS
77  #include <trace/events/libata.h>
78  
79  #include "libata.h"
80  #include "libata-transport.h"
81  
82  /* debounce timing parameters in msecs { interval, duration, timeout } */
83  const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
84  const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
85  const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
86  
87  const struct ata_port_operations ata_base_port_ops = {
88  	.prereset		= ata_std_prereset,
89  	.postreset		= ata_std_postreset,
90  	.error_handler		= ata_std_error_handler,
91  	.sched_eh		= ata_std_sched_eh,
92  	.end_eh			= ata_std_end_eh,
93  };
94  
95  const struct ata_port_operations sata_port_ops = {
96  	.inherits		= &ata_base_port_ops,
97  
98  	.qc_defer		= ata_std_qc_defer,
99  	.hardreset		= sata_std_hardreset,
100  };
101  
102  static unsigned int ata_dev_init_params(struct ata_device *dev,
103  					u16 heads, u16 sectors);
104  static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
105  static void ata_dev_xfermask(struct ata_device *dev);
106  static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
107  
108  atomic_t ata_print_id = ATOMIC_INIT(0);
109  
110  struct ata_force_param {
111  	const char	*name;
112  	unsigned int	cbl;
113  	int		spd_limit;
114  	unsigned long	xfer_mask;
115  	unsigned int	horkage_on;
116  	unsigned int	horkage_off;
117  	unsigned int	lflags;
118  };
119  
120  struct ata_force_ent {
121  	int			port;
122  	int			device;
123  	struct ata_force_param	param;
124  };
125  
126  static struct ata_force_ent *ata_force_tbl;
127  static int ata_force_tbl_size;
128  
129  static char ata_force_param_buf[PAGE_SIZE] __initdata;
130  /* param_buf is thrown away after initialization, disallow read */
131  module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
132  MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
133  
134  static int atapi_enabled = 1;
135  module_param(atapi_enabled, int, 0444);
136  MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
137  
138  static int atapi_dmadir = 0;
139  module_param(atapi_dmadir, int, 0444);
140  MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
141  
142  int atapi_passthru16 = 1;
143  module_param(atapi_passthru16, int, 0444);
144  MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
145  
146  int libata_fua = 0;
147  module_param_named(fua, libata_fua, int, 0444);
148  MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
149  
150  static int ata_ignore_hpa;
151  module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
152  MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
153  
154  static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
155  module_param_named(dma, libata_dma_mask, int, 0444);
156  MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
157  
158  static int ata_probe_timeout;
159  module_param(ata_probe_timeout, int, 0444);
160  MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
161  
162  int libata_noacpi = 0;
163  module_param_named(noacpi, libata_noacpi, int, 0444);
164  MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
165  
166  int libata_allow_tpm = 0;
167  module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
168  MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
169  
170  static int atapi_an;
171  module_param(atapi_an, int, 0444);
172  MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
173  
174  MODULE_AUTHOR("Jeff Garzik");
175  MODULE_DESCRIPTION("Library module for ATA devices");
176  MODULE_LICENSE("GPL");
177  MODULE_VERSION(DRV_VERSION);
178  
179  
ata_sstatus_online(u32 sstatus)180  static bool ata_sstatus_online(u32 sstatus)
181  {
182  	return (sstatus & 0xf) == 0x3;
183  }
184  
185  /**
186   *	ata_link_next - link iteration helper
187   *	@link: the previous link, NULL to start
188   *	@ap: ATA port containing links to iterate
189   *	@mode: iteration mode, one of ATA_LITER_*
190   *
191   *	LOCKING:
192   *	Host lock or EH context.
193   *
194   *	RETURNS:
195   *	Pointer to the next link.
196   */
ata_link_next(struct ata_link * link,struct ata_port * ap,enum ata_link_iter_mode mode)197  struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
198  			       enum ata_link_iter_mode mode)
199  {
200  	BUG_ON(mode != ATA_LITER_EDGE &&
201  	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
202  
203  	/* NULL link indicates start of iteration */
204  	if (!link)
205  		switch (mode) {
206  		case ATA_LITER_EDGE:
207  		case ATA_LITER_PMP_FIRST:
208  			if (sata_pmp_attached(ap))
209  				return ap->pmp_link;
210  			/* fall through */
211  		case ATA_LITER_HOST_FIRST:
212  			return &ap->link;
213  		}
214  
215  	/* we just iterated over the host link, what's next? */
216  	if (link == &ap->link)
217  		switch (mode) {
218  		case ATA_LITER_HOST_FIRST:
219  			if (sata_pmp_attached(ap))
220  				return ap->pmp_link;
221  			/* fall through */
222  		case ATA_LITER_PMP_FIRST:
223  			if (unlikely(ap->slave_link))
224  				return ap->slave_link;
225  			/* fall through */
226  		case ATA_LITER_EDGE:
227  			return NULL;
228  		}
229  
230  	/* slave_link excludes PMP */
231  	if (unlikely(link == ap->slave_link))
232  		return NULL;
233  
234  	/* we were over a PMP link */
235  	if (++link < ap->pmp_link + ap->nr_pmp_links)
236  		return link;
237  
238  	if (mode == ATA_LITER_PMP_FIRST)
239  		return &ap->link;
240  
241  	return NULL;
242  }
243  
244  /**
245   *	ata_dev_next - device iteration helper
246   *	@dev: the previous device, NULL to start
247   *	@link: ATA link containing devices to iterate
248   *	@mode: iteration mode, one of ATA_DITER_*
249   *
250   *	LOCKING:
251   *	Host lock or EH context.
252   *
253   *	RETURNS:
254   *	Pointer to the next device.
255   */
ata_dev_next(struct ata_device * dev,struct ata_link * link,enum ata_dev_iter_mode mode)256  struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
257  				enum ata_dev_iter_mode mode)
258  {
259  	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
260  	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
261  
262  	/* NULL dev indicates start of iteration */
263  	if (!dev)
264  		switch (mode) {
265  		case ATA_DITER_ENABLED:
266  		case ATA_DITER_ALL:
267  			dev = link->device;
268  			goto check;
269  		case ATA_DITER_ENABLED_REVERSE:
270  		case ATA_DITER_ALL_REVERSE:
271  			dev = link->device + ata_link_max_devices(link) - 1;
272  			goto check;
273  		}
274  
275   next:
276  	/* move to the next one */
277  	switch (mode) {
278  	case ATA_DITER_ENABLED:
279  	case ATA_DITER_ALL:
280  		if (++dev < link->device + ata_link_max_devices(link))
281  			goto check;
282  		return NULL;
283  	case ATA_DITER_ENABLED_REVERSE:
284  	case ATA_DITER_ALL_REVERSE:
285  		if (--dev >= link->device)
286  			goto check;
287  		return NULL;
288  	}
289  
290   check:
291  	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
292  	    !ata_dev_enabled(dev))
293  		goto next;
294  	return dev;
295  }
296  
297  /**
298   *	ata_dev_phys_link - find physical link for a device
299   *	@dev: ATA device to look up physical link for
300   *
301   *	Look up physical link which @dev is attached to.  Note that
302   *	this is different from @dev->link only when @dev is on slave
303   *	link.  For all other cases, it's the same as @dev->link.
304   *
305   *	LOCKING:
306   *	Don't care.
307   *
308   *	RETURNS:
309   *	Pointer to the found physical link.
310   */
ata_dev_phys_link(struct ata_device * dev)311  struct ata_link *ata_dev_phys_link(struct ata_device *dev)
312  {
313  	struct ata_port *ap = dev->link->ap;
314  
315  	if (!ap->slave_link)
316  		return dev->link;
317  	if (!dev->devno)
318  		return &ap->link;
319  	return ap->slave_link;
320  }
321  
322  /**
323   *	ata_force_cbl - force cable type according to libata.force
324   *	@ap: ATA port of interest
325   *
326   *	Force cable type according to libata.force and whine about it.
327   *	The last entry which has matching port number is used, so it
328   *	can be specified as part of device force parameters.  For
329   *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
330   *	same effect.
331   *
332   *	LOCKING:
333   *	EH context.
334   */
ata_force_cbl(struct ata_port * ap)335  void ata_force_cbl(struct ata_port *ap)
336  {
337  	int i;
338  
339  	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
340  		const struct ata_force_ent *fe = &ata_force_tbl[i];
341  
342  		if (fe->port != -1 && fe->port != ap->print_id)
343  			continue;
344  
345  		if (fe->param.cbl == ATA_CBL_NONE)
346  			continue;
347  
348  		ap->cbl = fe->param.cbl;
349  		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
350  		return;
351  	}
352  }
353  
354  /**
355   *	ata_force_link_limits - force link limits according to libata.force
356   *	@link: ATA link of interest
357   *
358   *	Force link flags and SATA spd limit according to libata.force
359   *	and whine about it.  When only the port part is specified
360   *	(e.g. 1:), the limit applies to all links connected to both
361   *	the host link and all fan-out ports connected via PMP.  If the
362   *	device part is specified as 0 (e.g. 1.00:), it specifies the
363   *	first fan-out link not the host link.  Device number 15 always
364   *	points to the host link whether PMP is attached or not.  If the
365   *	controller has slave link, device number 16 points to it.
366   *
367   *	LOCKING:
368   *	EH context.
369   */
ata_force_link_limits(struct ata_link * link)370  static void ata_force_link_limits(struct ata_link *link)
371  {
372  	bool did_spd = false;
373  	int linkno = link->pmp;
374  	int i;
375  
376  	if (ata_is_host_link(link))
377  		linkno += 15;
378  
379  	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
380  		const struct ata_force_ent *fe = &ata_force_tbl[i];
381  
382  		if (fe->port != -1 && fe->port != link->ap->print_id)
383  			continue;
384  
385  		if (fe->device != -1 && fe->device != linkno)
386  			continue;
387  
388  		/* only honor the first spd limit */
389  		if (!did_spd && fe->param.spd_limit) {
390  			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
391  			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
392  					fe->param.name);
393  			did_spd = true;
394  		}
395  
396  		/* let lflags stack */
397  		if (fe->param.lflags) {
398  			link->flags |= fe->param.lflags;
399  			ata_link_notice(link,
400  					"FORCE: link flag 0x%x forced -> 0x%x\n",
401  					fe->param.lflags, link->flags);
402  		}
403  	}
404  }
405  
406  /**
407   *	ata_force_xfermask - force xfermask according to libata.force
408   *	@dev: ATA device of interest
409   *
410   *	Force xfer_mask according to libata.force and whine about it.
411   *	For consistency with link selection, device number 15 selects
412   *	the first device connected to the host link.
413   *
414   *	LOCKING:
415   *	EH context.
416   */
ata_force_xfermask(struct ata_device * dev)417  static void ata_force_xfermask(struct ata_device *dev)
418  {
419  	int devno = dev->link->pmp + dev->devno;
420  	int alt_devno = devno;
421  	int i;
422  
423  	/* allow n.15/16 for devices attached to host port */
424  	if (ata_is_host_link(dev->link))
425  		alt_devno += 15;
426  
427  	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
428  		const struct ata_force_ent *fe = &ata_force_tbl[i];
429  		unsigned long pio_mask, mwdma_mask, udma_mask;
430  
431  		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
432  			continue;
433  
434  		if (fe->device != -1 && fe->device != devno &&
435  		    fe->device != alt_devno)
436  			continue;
437  
438  		if (!fe->param.xfer_mask)
439  			continue;
440  
441  		ata_unpack_xfermask(fe->param.xfer_mask,
442  				    &pio_mask, &mwdma_mask, &udma_mask);
443  		if (udma_mask)
444  			dev->udma_mask = udma_mask;
445  		else if (mwdma_mask) {
446  			dev->udma_mask = 0;
447  			dev->mwdma_mask = mwdma_mask;
448  		} else {
449  			dev->udma_mask = 0;
450  			dev->mwdma_mask = 0;
451  			dev->pio_mask = pio_mask;
452  		}
453  
454  		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
455  			       fe->param.name);
456  		return;
457  	}
458  }
459  
460  /**
461   *	ata_force_horkage - force horkage according to libata.force
462   *	@dev: ATA device of interest
463   *
464   *	Force horkage according to libata.force and whine about it.
465   *	For consistency with link selection, device number 15 selects
466   *	the first device connected to the host link.
467   *
468   *	LOCKING:
469   *	EH context.
470   */
ata_force_horkage(struct ata_device * dev)471  static void ata_force_horkage(struct ata_device *dev)
472  {
473  	int devno = dev->link->pmp + dev->devno;
474  	int alt_devno = devno;
475  	int i;
476  
477  	/* allow n.15/16 for devices attached to host port */
478  	if (ata_is_host_link(dev->link))
479  		alt_devno += 15;
480  
481  	for (i = 0; i < ata_force_tbl_size; i++) {
482  		const struct ata_force_ent *fe = &ata_force_tbl[i];
483  
484  		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
485  			continue;
486  
487  		if (fe->device != -1 && fe->device != devno &&
488  		    fe->device != alt_devno)
489  			continue;
490  
491  		if (!(~dev->horkage & fe->param.horkage_on) &&
492  		    !(dev->horkage & fe->param.horkage_off))
493  			continue;
494  
495  		dev->horkage |= fe->param.horkage_on;
496  		dev->horkage &= ~fe->param.horkage_off;
497  
498  		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
499  			       fe->param.name);
500  	}
501  }
502  
503  /**
504   *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
505   *	@opcode: SCSI opcode
506   *
507   *	Determine ATAPI command type from @opcode.
508   *
509   *	LOCKING:
510   *	None.
511   *
512   *	RETURNS:
513   *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
514   */
atapi_cmd_type(u8 opcode)515  int atapi_cmd_type(u8 opcode)
516  {
517  	switch (opcode) {
518  	case GPCMD_READ_10:
519  	case GPCMD_READ_12:
520  		return ATAPI_READ;
521  
522  	case GPCMD_WRITE_10:
523  	case GPCMD_WRITE_12:
524  	case GPCMD_WRITE_AND_VERIFY_10:
525  		return ATAPI_WRITE;
526  
527  	case GPCMD_READ_CD:
528  	case GPCMD_READ_CD_MSF:
529  		return ATAPI_READ_CD;
530  
531  	case ATA_16:
532  	case ATA_12:
533  		if (atapi_passthru16)
534  			return ATAPI_PASS_THRU;
535  		/* fall thru */
536  	default:
537  		return ATAPI_MISC;
538  	}
539  }
540  
541  /**
542   *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
543   *	@tf: Taskfile to convert
544   *	@pmp: Port multiplier port
545   *	@is_cmd: This FIS is for command
546   *	@fis: Buffer into which data will output
547   *
548   *	Converts a standard ATA taskfile to a Serial ATA
549   *	FIS structure (Register - Host to Device).
550   *
551   *	LOCKING:
552   *	Inherited from caller.
553   */
ata_tf_to_fis(const struct ata_taskfile * tf,u8 pmp,int is_cmd,u8 * fis)554  void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
555  {
556  	fis[0] = 0x27;			/* Register - Host to Device FIS */
557  	fis[1] = pmp & 0xf;		/* Port multiplier number*/
558  	if (is_cmd)
559  		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
560  
561  	fis[2] = tf->command;
562  	fis[3] = tf->feature;
563  
564  	fis[4] = tf->lbal;
565  	fis[5] = tf->lbam;
566  	fis[6] = tf->lbah;
567  	fis[7] = tf->device;
568  
569  	fis[8] = tf->hob_lbal;
570  	fis[9] = tf->hob_lbam;
571  	fis[10] = tf->hob_lbah;
572  	fis[11] = tf->hob_feature;
573  
574  	fis[12] = tf->nsect;
575  	fis[13] = tf->hob_nsect;
576  	fis[14] = 0;
577  	fis[15] = tf->ctl;
578  
579  	fis[16] = tf->auxiliary & 0xff;
580  	fis[17] = (tf->auxiliary >> 8) & 0xff;
581  	fis[18] = (tf->auxiliary >> 16) & 0xff;
582  	fis[19] = (tf->auxiliary >> 24) & 0xff;
583  }
584  
585  /**
586   *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
587   *	@fis: Buffer from which data will be input
588   *	@tf: Taskfile to output
589   *
590   *	Converts a serial ATA FIS structure to a standard ATA taskfile.
591   *
592   *	LOCKING:
593   *	Inherited from caller.
594   */
595  
ata_tf_from_fis(const u8 * fis,struct ata_taskfile * tf)596  void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
597  {
598  	tf->command	= fis[2];	/* status */
599  	tf->feature	= fis[3];	/* error */
600  
601  	tf->lbal	= fis[4];
602  	tf->lbam	= fis[5];
603  	tf->lbah	= fis[6];
604  	tf->device	= fis[7];
605  
606  	tf->hob_lbal	= fis[8];
607  	tf->hob_lbam	= fis[9];
608  	tf->hob_lbah	= fis[10];
609  
610  	tf->nsect	= fis[12];
611  	tf->hob_nsect	= fis[13];
612  }
613  
614  static const u8 ata_rw_cmds[] = {
615  	/* pio multi */
616  	ATA_CMD_READ_MULTI,
617  	ATA_CMD_WRITE_MULTI,
618  	ATA_CMD_READ_MULTI_EXT,
619  	ATA_CMD_WRITE_MULTI_EXT,
620  	0,
621  	0,
622  	0,
623  	ATA_CMD_WRITE_MULTI_FUA_EXT,
624  	/* pio */
625  	ATA_CMD_PIO_READ,
626  	ATA_CMD_PIO_WRITE,
627  	ATA_CMD_PIO_READ_EXT,
628  	ATA_CMD_PIO_WRITE_EXT,
629  	0,
630  	0,
631  	0,
632  	0,
633  	/* dma */
634  	ATA_CMD_READ,
635  	ATA_CMD_WRITE,
636  	ATA_CMD_READ_EXT,
637  	ATA_CMD_WRITE_EXT,
638  	0,
639  	0,
640  	0,
641  	ATA_CMD_WRITE_FUA_EXT
642  };
643  
644  /**
645   *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
646   *	@tf: command to examine and configure
647   *	@dev: device tf belongs to
648   *
649   *	Examine the device configuration and tf->flags to calculate
650   *	the proper read/write commands and protocol to use.
651   *
652   *	LOCKING:
653   *	caller.
654   */
ata_rwcmd_protocol(struct ata_taskfile * tf,struct ata_device * dev)655  static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
656  {
657  	u8 cmd;
658  
659  	int index, fua, lba48, write;
660  
661  	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
662  	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
663  	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
664  
665  	if (dev->flags & ATA_DFLAG_PIO) {
666  		tf->protocol = ATA_PROT_PIO;
667  		index = dev->multi_count ? 0 : 8;
668  	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
669  		/* Unable to use DMA due to host limitation */
670  		tf->protocol = ATA_PROT_PIO;
671  		index = dev->multi_count ? 0 : 8;
672  	} else {
673  		tf->protocol = ATA_PROT_DMA;
674  		index = 16;
675  	}
676  
677  	cmd = ata_rw_cmds[index + fua + lba48 + write];
678  	if (cmd) {
679  		tf->command = cmd;
680  		return 0;
681  	}
682  	return -1;
683  }
684  
685  /**
686   *	ata_tf_read_block - Read block address from ATA taskfile
687   *	@tf: ATA taskfile of interest
688   *	@dev: ATA device @tf belongs to
689   *
690   *	LOCKING:
691   *	None.
692   *
693   *	Read block address from @tf.  This function can handle all
694   *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
695   *	flags select the address format to use.
696   *
697   *	RETURNS:
698   *	Block address read from @tf.
699   */
ata_tf_read_block(const struct ata_taskfile * tf,struct ata_device * dev)700  u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
701  {
702  	u64 block = 0;
703  
704  	if (tf->flags & ATA_TFLAG_LBA) {
705  		if (tf->flags & ATA_TFLAG_LBA48) {
706  			block |= (u64)tf->hob_lbah << 40;
707  			block |= (u64)tf->hob_lbam << 32;
708  			block |= (u64)tf->hob_lbal << 24;
709  		} else
710  			block |= (tf->device & 0xf) << 24;
711  
712  		block |= tf->lbah << 16;
713  		block |= tf->lbam << 8;
714  		block |= tf->lbal;
715  	} else {
716  		u32 cyl, head, sect;
717  
718  		cyl = tf->lbam | (tf->lbah << 8);
719  		head = tf->device & 0xf;
720  		sect = tf->lbal;
721  
722  		if (!sect) {
723  			ata_dev_warn(dev,
724  				     "device reported invalid CHS sector 0\n");
725  			return U64_MAX;
726  		}
727  
728  		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
729  	}
730  
731  	return block;
732  }
733  
734  /**
735   *	ata_build_rw_tf - Build ATA taskfile for given read/write request
736   *	@tf: Target ATA taskfile
737   *	@dev: ATA device @tf belongs to
738   *	@block: Block address
739   *	@n_block: Number of blocks
740   *	@tf_flags: RW/FUA etc...
741   *	@tag: tag
742   *	@class: IO priority class
743   *
744   *	LOCKING:
745   *	None.
746   *
747   *	Build ATA taskfile @tf for read/write request described by
748   *	@block, @n_block, @tf_flags and @tag on @dev.
749   *
750   *	RETURNS:
751   *
752   *	0 on success, -ERANGE if the request is too large for @dev,
753   *	-EINVAL if the request is invalid.
754   */
ata_build_rw_tf(struct ata_taskfile * tf,struct ata_device * dev,u64 block,u32 n_block,unsigned int tf_flags,unsigned int tag,int class)755  int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
756  		    u64 block, u32 n_block, unsigned int tf_flags,
757  		    unsigned int tag, int class)
758  {
759  	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
760  	tf->flags |= tf_flags;
761  
762  	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
763  		/* yay, NCQ */
764  		if (!lba_48_ok(block, n_block))
765  			return -ERANGE;
766  
767  		tf->protocol = ATA_PROT_NCQ;
768  		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
769  
770  		if (tf->flags & ATA_TFLAG_WRITE)
771  			tf->command = ATA_CMD_FPDMA_WRITE;
772  		else
773  			tf->command = ATA_CMD_FPDMA_READ;
774  
775  		tf->nsect = tag << 3;
776  		tf->hob_feature = (n_block >> 8) & 0xff;
777  		tf->feature = n_block & 0xff;
778  
779  		tf->hob_lbah = (block >> 40) & 0xff;
780  		tf->hob_lbam = (block >> 32) & 0xff;
781  		tf->hob_lbal = (block >> 24) & 0xff;
782  		tf->lbah = (block >> 16) & 0xff;
783  		tf->lbam = (block >> 8) & 0xff;
784  		tf->lbal = block & 0xff;
785  
786  		tf->device = ATA_LBA;
787  		if (tf->flags & ATA_TFLAG_FUA)
788  			tf->device |= 1 << 7;
789  
790  		if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
791  			if (class == IOPRIO_CLASS_RT)
792  				tf->hob_nsect |= ATA_PRIO_HIGH <<
793  						 ATA_SHIFT_PRIO;
794  		}
795  	} else if (dev->flags & ATA_DFLAG_LBA) {
796  		tf->flags |= ATA_TFLAG_LBA;
797  
798  		if (lba_28_ok(block, n_block)) {
799  			/* use LBA28 */
800  			tf->device |= (block >> 24) & 0xf;
801  		} else if (lba_48_ok(block, n_block)) {
802  			if (!(dev->flags & ATA_DFLAG_LBA48))
803  				return -ERANGE;
804  
805  			/* use LBA48 */
806  			tf->flags |= ATA_TFLAG_LBA48;
807  
808  			tf->hob_nsect = (n_block >> 8) & 0xff;
809  
810  			tf->hob_lbah = (block >> 40) & 0xff;
811  			tf->hob_lbam = (block >> 32) & 0xff;
812  			tf->hob_lbal = (block >> 24) & 0xff;
813  		} else
814  			/* request too large even for LBA48 */
815  			return -ERANGE;
816  
817  		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
818  			return -EINVAL;
819  
820  		tf->nsect = n_block & 0xff;
821  
822  		tf->lbah = (block >> 16) & 0xff;
823  		tf->lbam = (block >> 8) & 0xff;
824  		tf->lbal = block & 0xff;
825  
826  		tf->device |= ATA_LBA;
827  	} else {
828  		/* CHS */
829  		u32 sect, head, cyl, track;
830  
831  		/* The request -may- be too large for CHS addressing. */
832  		if (!lba_28_ok(block, n_block))
833  			return -ERANGE;
834  
835  		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
836  			return -EINVAL;
837  
838  		/* Convert LBA to CHS */
839  		track = (u32)block / dev->sectors;
840  		cyl   = track / dev->heads;
841  		head  = track % dev->heads;
842  		sect  = (u32)block % dev->sectors + 1;
843  
844  		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
845  			(u32)block, track, cyl, head, sect);
846  
847  		/* Check whether the converted CHS can fit.
848  		   Cylinder: 0-65535
849  		   Head: 0-15
850  		   Sector: 1-255*/
851  		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
852  			return -ERANGE;
853  
854  		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
855  		tf->lbal = sect;
856  		tf->lbam = cyl;
857  		tf->lbah = cyl >> 8;
858  		tf->device |= head;
859  	}
860  
861  	return 0;
862  }
863  
864  /**
865   *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
866   *	@pio_mask: pio_mask
867   *	@mwdma_mask: mwdma_mask
868   *	@udma_mask: udma_mask
869   *
870   *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
871   *	unsigned int xfer_mask.
872   *
873   *	LOCKING:
874   *	None.
875   *
876   *	RETURNS:
877   *	Packed xfer_mask.
878   */
ata_pack_xfermask(unsigned long pio_mask,unsigned long mwdma_mask,unsigned long udma_mask)879  unsigned long ata_pack_xfermask(unsigned long pio_mask,
880  				unsigned long mwdma_mask,
881  				unsigned long udma_mask)
882  {
883  	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
884  		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
885  		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
886  }
887  
888  /**
889   *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
890   *	@xfer_mask: xfer_mask to unpack
891   *	@pio_mask: resulting pio_mask
892   *	@mwdma_mask: resulting mwdma_mask
893   *	@udma_mask: resulting udma_mask
894   *
895   *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
896   *	Any NULL destination masks will be ignored.
897   */
ata_unpack_xfermask(unsigned long xfer_mask,unsigned long * pio_mask,unsigned long * mwdma_mask,unsigned long * udma_mask)898  void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
899  			 unsigned long *mwdma_mask, unsigned long *udma_mask)
900  {
901  	if (pio_mask)
902  		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
903  	if (mwdma_mask)
904  		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
905  	if (udma_mask)
906  		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
907  }
908  
909  static const struct ata_xfer_ent {
910  	int shift, bits;
911  	u8 base;
912  } ata_xfer_tbl[] = {
913  	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
914  	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
915  	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
916  	{ -1, },
917  };
918  
919  /**
920   *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
921   *	@xfer_mask: xfer_mask of interest
922   *
923   *	Return matching XFER_* value for @xfer_mask.  Only the highest
924   *	bit of @xfer_mask is considered.
925   *
926   *	LOCKING:
927   *	None.
928   *
929   *	RETURNS:
930   *	Matching XFER_* value, 0xff if no match found.
931   */
ata_xfer_mask2mode(unsigned long xfer_mask)932  u8 ata_xfer_mask2mode(unsigned long xfer_mask)
933  {
934  	int highbit = fls(xfer_mask) - 1;
935  	const struct ata_xfer_ent *ent;
936  
937  	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
938  		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
939  			return ent->base + highbit - ent->shift;
940  	return 0xff;
941  }
942  
943  /**
944   *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
945   *	@xfer_mode: XFER_* of interest
946   *
947   *	Return matching xfer_mask for @xfer_mode.
948   *
949   *	LOCKING:
950   *	None.
951   *
952   *	RETURNS:
953   *	Matching xfer_mask, 0 if no match found.
954   */
ata_xfer_mode2mask(u8 xfer_mode)955  unsigned long ata_xfer_mode2mask(u8 xfer_mode)
956  {
957  	const struct ata_xfer_ent *ent;
958  
959  	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
960  		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
961  			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
962  				& ~((1 << ent->shift) - 1);
963  	return 0;
964  }
965  
966  /**
967   *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
968   *	@xfer_mode: XFER_* of interest
969   *
970   *	Return matching xfer_shift for @xfer_mode.
971   *
972   *	LOCKING:
973   *	None.
974   *
975   *	RETURNS:
976   *	Matching xfer_shift, -1 if no match found.
977   */
ata_xfer_mode2shift(unsigned long xfer_mode)978  int ata_xfer_mode2shift(unsigned long xfer_mode)
979  {
980  	const struct ata_xfer_ent *ent;
981  
982  	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
983  		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
984  			return ent->shift;
985  	return -1;
986  }
987  
988  /**
989   *	ata_mode_string - convert xfer_mask to string
990   *	@xfer_mask: mask of bits supported; only highest bit counts.
991   *
992   *	Determine string which represents the highest speed
993   *	(highest bit in @modemask).
994   *
995   *	LOCKING:
996   *	None.
997   *
998   *	RETURNS:
999   *	Constant C string representing highest speed listed in
1000   *	@mode_mask, or the constant C string "<n/a>".
1001   */
ata_mode_string(unsigned long xfer_mask)1002  const char *ata_mode_string(unsigned long xfer_mask)
1003  {
1004  	static const char * const xfer_mode_str[] = {
1005  		"PIO0",
1006  		"PIO1",
1007  		"PIO2",
1008  		"PIO3",
1009  		"PIO4",
1010  		"PIO5",
1011  		"PIO6",
1012  		"MWDMA0",
1013  		"MWDMA1",
1014  		"MWDMA2",
1015  		"MWDMA3",
1016  		"MWDMA4",
1017  		"UDMA/16",
1018  		"UDMA/25",
1019  		"UDMA/33",
1020  		"UDMA/44",
1021  		"UDMA/66",
1022  		"UDMA/100",
1023  		"UDMA/133",
1024  		"UDMA7",
1025  	};
1026  	int highbit;
1027  
1028  	highbit = fls(xfer_mask) - 1;
1029  	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1030  		return xfer_mode_str[highbit];
1031  	return "<n/a>";
1032  }
1033  
sata_spd_string(unsigned int spd)1034  const char *sata_spd_string(unsigned int spd)
1035  {
1036  	static const char * const spd_str[] = {
1037  		"1.5 Gbps",
1038  		"3.0 Gbps",
1039  		"6.0 Gbps",
1040  	};
1041  
1042  	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1043  		return "<unknown>";
1044  	return spd_str[spd - 1];
1045  }
1046  
1047  /**
1048   *	ata_dev_classify - determine device type based on ATA-spec signature
1049   *	@tf: ATA taskfile register set for device to be identified
1050   *
1051   *	Determine from taskfile register contents whether a device is
1052   *	ATA or ATAPI, as per "Signature and persistence" section
1053   *	of ATA/PI spec (volume 1, sect 5.14).
1054   *
1055   *	LOCKING:
1056   *	None.
1057   *
1058   *	RETURNS:
1059   *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1060   *	%ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1061   */
ata_dev_classify(const struct ata_taskfile * tf)1062  unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1063  {
1064  	/* Apple's open source Darwin code hints that some devices only
1065  	 * put a proper signature into the LBA mid/high registers,
1066  	 * So, we only check those.  It's sufficient for uniqueness.
1067  	 *
1068  	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1069  	 * signatures for ATA and ATAPI devices attached on SerialATA,
1070  	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1071  	 * spec has never mentioned about using different signatures
1072  	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1073  	 * Multiplier specification began to use 0x69/0x96 to identify
1074  	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1075  	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1076  	 * 0x69/0x96 shortly and described them as reserved for
1077  	 * SerialATA.
1078  	 *
1079  	 * We follow the current spec and consider that 0x69/0x96
1080  	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1081  	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1082  	 * SEMB signature.  This is worked around in
1083  	 * ata_dev_read_id().
1084  	 */
1085  	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1086  		DPRINTK("found ATA device by sig\n");
1087  		return ATA_DEV_ATA;
1088  	}
1089  
1090  	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1091  		DPRINTK("found ATAPI device by sig\n");
1092  		return ATA_DEV_ATAPI;
1093  	}
1094  
1095  	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1096  		DPRINTK("found PMP device by sig\n");
1097  		return ATA_DEV_PMP;
1098  	}
1099  
1100  	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1101  		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1102  		return ATA_DEV_SEMB;
1103  	}
1104  
1105  	if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1106  		DPRINTK("found ZAC device by sig\n");
1107  		return ATA_DEV_ZAC;
1108  	}
1109  
1110  	DPRINTK("unknown device\n");
1111  	return ATA_DEV_UNKNOWN;
1112  }
1113  
1114  /**
1115   *	ata_id_string - Convert IDENTIFY DEVICE page into string
1116   *	@id: IDENTIFY DEVICE results we will examine
1117   *	@s: string into which data is output
1118   *	@ofs: offset into identify device page
1119   *	@len: length of string to return. must be an even number.
1120   *
1121   *	The strings in the IDENTIFY DEVICE page are broken up into
1122   *	16-bit chunks.  Run through the string, and output each
1123   *	8-bit chunk linearly, regardless of platform.
1124   *
1125   *	LOCKING:
1126   *	caller.
1127   */
1128  
ata_id_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1129  void ata_id_string(const u16 *id, unsigned char *s,
1130  		   unsigned int ofs, unsigned int len)
1131  {
1132  	unsigned int c;
1133  
1134  	BUG_ON(len & 1);
1135  
1136  	while (len > 0) {
1137  		c = id[ofs] >> 8;
1138  		*s = c;
1139  		s++;
1140  
1141  		c = id[ofs] & 0xff;
1142  		*s = c;
1143  		s++;
1144  
1145  		ofs++;
1146  		len -= 2;
1147  	}
1148  }
1149  
1150  /**
1151   *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1152   *	@id: IDENTIFY DEVICE results we will examine
1153   *	@s: string into which data is output
1154   *	@ofs: offset into identify device page
1155   *	@len: length of string to return. must be an odd number.
1156   *
1157   *	This function is identical to ata_id_string except that it
1158   *	trims trailing spaces and terminates the resulting string with
1159   *	null.  @len must be actual maximum length (even number) + 1.
1160   *
1161   *	LOCKING:
1162   *	caller.
1163   */
ata_id_c_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1164  void ata_id_c_string(const u16 *id, unsigned char *s,
1165  		     unsigned int ofs, unsigned int len)
1166  {
1167  	unsigned char *p;
1168  
1169  	ata_id_string(id, s, ofs, len - 1);
1170  
1171  	p = s + strnlen(s, len - 1);
1172  	while (p > s && p[-1] == ' ')
1173  		p--;
1174  	*p = '\0';
1175  }
1176  
ata_id_n_sectors(const u16 * id)1177  static u64 ata_id_n_sectors(const u16 *id)
1178  {
1179  	if (ata_id_has_lba(id)) {
1180  		if (ata_id_has_lba48(id))
1181  			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1182  		else
1183  			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1184  	} else {
1185  		if (ata_id_current_chs_valid(id))
1186  			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1187  			       id[ATA_ID_CUR_SECTORS];
1188  		else
1189  			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1190  			       id[ATA_ID_SECTORS];
1191  	}
1192  }
1193  
ata_tf_to_lba48(const struct ata_taskfile * tf)1194  u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1195  {
1196  	u64 sectors = 0;
1197  
1198  	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1199  	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1200  	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1201  	sectors |= (tf->lbah & 0xff) << 16;
1202  	sectors |= (tf->lbam & 0xff) << 8;
1203  	sectors |= (tf->lbal & 0xff);
1204  
1205  	return sectors;
1206  }
1207  
ata_tf_to_lba(const struct ata_taskfile * tf)1208  u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1209  {
1210  	u64 sectors = 0;
1211  
1212  	sectors |= (tf->device & 0x0f) << 24;
1213  	sectors |= (tf->lbah & 0xff) << 16;
1214  	sectors |= (tf->lbam & 0xff) << 8;
1215  	sectors |= (tf->lbal & 0xff);
1216  
1217  	return sectors;
1218  }
1219  
1220  /**
1221   *	ata_read_native_max_address - Read native max address
1222   *	@dev: target device
1223   *	@max_sectors: out parameter for the result native max address
1224   *
1225   *	Perform an LBA48 or LBA28 native size query upon the device in
1226   *	question.
1227   *
1228   *	RETURNS:
1229   *	0 on success, -EACCES if command is aborted by the drive.
1230   *	-EIO on other errors.
1231   */
ata_read_native_max_address(struct ata_device * dev,u64 * max_sectors)1232  static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1233  {
1234  	unsigned int err_mask;
1235  	struct ata_taskfile tf;
1236  	int lba48 = ata_id_has_lba48(dev->id);
1237  
1238  	ata_tf_init(dev, &tf);
1239  
1240  	/* always clear all address registers */
1241  	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1242  
1243  	if (lba48) {
1244  		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1245  		tf.flags |= ATA_TFLAG_LBA48;
1246  	} else
1247  		tf.command = ATA_CMD_READ_NATIVE_MAX;
1248  
1249  	tf.protocol = ATA_PROT_NODATA;
1250  	tf.device |= ATA_LBA;
1251  
1252  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1253  	if (err_mask) {
1254  		ata_dev_warn(dev,
1255  			     "failed to read native max address (err_mask=0x%x)\n",
1256  			     err_mask);
1257  		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1258  			return -EACCES;
1259  		return -EIO;
1260  	}
1261  
1262  	if (lba48)
1263  		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1264  	else
1265  		*max_sectors = ata_tf_to_lba(&tf) + 1;
1266  	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1267  		(*max_sectors)--;
1268  	return 0;
1269  }
1270  
1271  /**
1272   *	ata_set_max_sectors - Set max sectors
1273   *	@dev: target device
1274   *	@new_sectors: new max sectors value to set for the device
1275   *
1276   *	Set max sectors of @dev to @new_sectors.
1277   *
1278   *	RETURNS:
1279   *	0 on success, -EACCES if command is aborted or denied (due to
1280   *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1281   *	errors.
1282   */
ata_set_max_sectors(struct ata_device * dev,u64 new_sectors)1283  static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1284  {
1285  	unsigned int err_mask;
1286  	struct ata_taskfile tf;
1287  	int lba48 = ata_id_has_lba48(dev->id);
1288  
1289  	new_sectors--;
1290  
1291  	ata_tf_init(dev, &tf);
1292  
1293  	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1294  
1295  	if (lba48) {
1296  		tf.command = ATA_CMD_SET_MAX_EXT;
1297  		tf.flags |= ATA_TFLAG_LBA48;
1298  
1299  		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1300  		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1301  		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1302  	} else {
1303  		tf.command = ATA_CMD_SET_MAX;
1304  
1305  		tf.device |= (new_sectors >> 24) & 0xf;
1306  	}
1307  
1308  	tf.protocol = ATA_PROT_NODATA;
1309  	tf.device |= ATA_LBA;
1310  
1311  	tf.lbal = (new_sectors >> 0) & 0xff;
1312  	tf.lbam = (new_sectors >> 8) & 0xff;
1313  	tf.lbah = (new_sectors >> 16) & 0xff;
1314  
1315  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1316  	if (err_mask) {
1317  		ata_dev_warn(dev,
1318  			     "failed to set max address (err_mask=0x%x)\n",
1319  			     err_mask);
1320  		if (err_mask == AC_ERR_DEV &&
1321  		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1322  			return -EACCES;
1323  		return -EIO;
1324  	}
1325  
1326  	return 0;
1327  }
1328  
1329  /**
1330   *	ata_hpa_resize		-	Resize a device with an HPA set
1331   *	@dev: Device to resize
1332   *
1333   *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1334   *	it if required to the full size of the media. The caller must check
1335   *	the drive has the HPA feature set enabled.
1336   *
1337   *	RETURNS:
1338   *	0 on success, -errno on failure.
1339   */
ata_hpa_resize(struct ata_device * dev)1340  static int ata_hpa_resize(struct ata_device *dev)
1341  {
1342  	struct ata_eh_context *ehc = &dev->link->eh_context;
1343  	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1344  	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1345  	u64 sectors = ata_id_n_sectors(dev->id);
1346  	u64 native_sectors;
1347  	int rc;
1348  
1349  	/* do we need to do it? */
1350  	if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1351  	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1352  	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1353  		return 0;
1354  
1355  	/* read native max address */
1356  	rc = ata_read_native_max_address(dev, &native_sectors);
1357  	if (rc) {
1358  		/* If device aborted the command or HPA isn't going to
1359  		 * be unlocked, skip HPA resizing.
1360  		 */
1361  		if (rc == -EACCES || !unlock_hpa) {
1362  			ata_dev_warn(dev,
1363  				     "HPA support seems broken, skipping HPA handling\n");
1364  			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1365  
1366  			/* we can continue if device aborted the command */
1367  			if (rc == -EACCES)
1368  				rc = 0;
1369  		}
1370  
1371  		return rc;
1372  	}
1373  	dev->n_native_sectors = native_sectors;
1374  
1375  	/* nothing to do? */
1376  	if (native_sectors <= sectors || !unlock_hpa) {
1377  		if (!print_info || native_sectors == sectors)
1378  			return 0;
1379  
1380  		if (native_sectors > sectors)
1381  			ata_dev_info(dev,
1382  				"HPA detected: current %llu, native %llu\n",
1383  				(unsigned long long)sectors,
1384  				(unsigned long long)native_sectors);
1385  		else if (native_sectors < sectors)
1386  			ata_dev_warn(dev,
1387  				"native sectors (%llu) is smaller than sectors (%llu)\n",
1388  				(unsigned long long)native_sectors,
1389  				(unsigned long long)sectors);
1390  		return 0;
1391  	}
1392  
1393  	/* let's unlock HPA */
1394  	rc = ata_set_max_sectors(dev, native_sectors);
1395  	if (rc == -EACCES) {
1396  		/* if device aborted the command, skip HPA resizing */
1397  		ata_dev_warn(dev,
1398  			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1399  			     (unsigned long long)sectors,
1400  			     (unsigned long long)native_sectors);
1401  		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1402  		return 0;
1403  	} else if (rc)
1404  		return rc;
1405  
1406  	/* re-read IDENTIFY data */
1407  	rc = ata_dev_reread_id(dev, 0);
1408  	if (rc) {
1409  		ata_dev_err(dev,
1410  			    "failed to re-read IDENTIFY data after HPA resizing\n");
1411  		return rc;
1412  	}
1413  
1414  	if (print_info) {
1415  		u64 new_sectors = ata_id_n_sectors(dev->id);
1416  		ata_dev_info(dev,
1417  			"HPA unlocked: %llu -> %llu, native %llu\n",
1418  			(unsigned long long)sectors,
1419  			(unsigned long long)new_sectors,
1420  			(unsigned long long)native_sectors);
1421  	}
1422  
1423  	return 0;
1424  }
1425  
1426  /**
1427   *	ata_dump_id - IDENTIFY DEVICE info debugging output
1428   *	@id: IDENTIFY DEVICE page to dump
1429   *
1430   *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1431   *	page.
1432   *
1433   *	LOCKING:
1434   *	caller.
1435   */
1436  
ata_dump_id(const u16 * id)1437  static inline void ata_dump_id(const u16 *id)
1438  {
1439  	DPRINTK("49==0x%04x  "
1440  		"53==0x%04x  "
1441  		"63==0x%04x  "
1442  		"64==0x%04x  "
1443  		"75==0x%04x  \n",
1444  		id[49],
1445  		id[53],
1446  		id[63],
1447  		id[64],
1448  		id[75]);
1449  	DPRINTK("80==0x%04x  "
1450  		"81==0x%04x  "
1451  		"82==0x%04x  "
1452  		"83==0x%04x  "
1453  		"84==0x%04x  \n",
1454  		id[80],
1455  		id[81],
1456  		id[82],
1457  		id[83],
1458  		id[84]);
1459  	DPRINTK("88==0x%04x  "
1460  		"93==0x%04x\n",
1461  		id[88],
1462  		id[93]);
1463  }
1464  
1465  /**
1466   *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1467   *	@id: IDENTIFY data to compute xfer mask from
1468   *
1469   *	Compute the xfermask for this device. This is not as trivial
1470   *	as it seems if we must consider early devices correctly.
1471   *
1472   *	FIXME: pre IDE drive timing (do we care ?).
1473   *
1474   *	LOCKING:
1475   *	None.
1476   *
1477   *	RETURNS:
1478   *	Computed xfermask
1479   */
ata_id_xfermask(const u16 * id)1480  unsigned long ata_id_xfermask(const u16 *id)
1481  {
1482  	unsigned long pio_mask, mwdma_mask, udma_mask;
1483  
1484  	/* Usual case. Word 53 indicates word 64 is valid */
1485  	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1486  		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1487  		pio_mask <<= 3;
1488  		pio_mask |= 0x7;
1489  	} else {
1490  		/* If word 64 isn't valid then Word 51 high byte holds
1491  		 * the PIO timing number for the maximum. Turn it into
1492  		 * a mask.
1493  		 */
1494  		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1495  		if (mode < 5)	/* Valid PIO range */
1496  			pio_mask = (2 << mode) - 1;
1497  		else
1498  			pio_mask = 1;
1499  
1500  		/* But wait.. there's more. Design your standards by
1501  		 * committee and you too can get a free iordy field to
1502  		 * process. However its the speeds not the modes that
1503  		 * are supported... Note drivers using the timing API
1504  		 * will get this right anyway
1505  		 */
1506  	}
1507  
1508  	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1509  
1510  	if (ata_id_is_cfa(id)) {
1511  		/*
1512  		 *	Process compact flash extended modes
1513  		 */
1514  		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1515  		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1516  
1517  		if (pio)
1518  			pio_mask |= (1 << 5);
1519  		if (pio > 1)
1520  			pio_mask |= (1 << 6);
1521  		if (dma)
1522  			mwdma_mask |= (1 << 3);
1523  		if (dma > 1)
1524  			mwdma_mask |= (1 << 4);
1525  	}
1526  
1527  	udma_mask = 0;
1528  	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1529  		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1530  
1531  	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1532  }
1533  
ata_qc_complete_internal(struct ata_queued_cmd * qc)1534  static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1535  {
1536  	struct completion *waiting = qc->private_data;
1537  
1538  	complete(waiting);
1539  }
1540  
1541  /**
1542   *	ata_exec_internal_sg - execute libata internal command
1543   *	@dev: Device to which the command is sent
1544   *	@tf: Taskfile registers for the command and the result
1545   *	@cdb: CDB for packet command
1546   *	@dma_dir: Data transfer direction of the command
1547   *	@sgl: sg list for the data buffer of the command
1548   *	@n_elem: Number of sg entries
1549   *	@timeout: Timeout in msecs (0 for default)
1550   *
1551   *	Executes libata internal command with timeout.  @tf contains
1552   *	command on entry and result on return.  Timeout and error
1553   *	conditions are reported via return value.  No recovery action
1554   *	is taken after a command times out.  It's caller's duty to
1555   *	clean up after timeout.
1556   *
1557   *	LOCKING:
1558   *	None.  Should be called with kernel context, might sleep.
1559   *
1560   *	RETURNS:
1561   *	Zero on success, AC_ERR_* mask on failure
1562   */
ata_exec_internal_sg(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,int dma_dir,struct scatterlist * sgl,unsigned int n_elem,unsigned long timeout)1563  unsigned ata_exec_internal_sg(struct ata_device *dev,
1564  			      struct ata_taskfile *tf, const u8 *cdb,
1565  			      int dma_dir, struct scatterlist *sgl,
1566  			      unsigned int n_elem, unsigned long timeout)
1567  {
1568  	struct ata_link *link = dev->link;
1569  	struct ata_port *ap = link->ap;
1570  	u8 command = tf->command;
1571  	int auto_timeout = 0;
1572  	struct ata_queued_cmd *qc;
1573  	unsigned int tag, preempted_tag;
1574  	u32 preempted_sactive, preempted_qc_active;
1575  	int preempted_nr_active_links;
1576  	DECLARE_COMPLETION_ONSTACK(wait);
1577  	unsigned long flags;
1578  	unsigned int err_mask;
1579  	int rc;
1580  
1581  	spin_lock_irqsave(ap->lock, flags);
1582  
1583  	/* no internal command while frozen */
1584  	if (ap->pflags & ATA_PFLAG_FROZEN) {
1585  		spin_unlock_irqrestore(ap->lock, flags);
1586  		return AC_ERR_SYSTEM;
1587  	}
1588  
1589  	/* initialize internal qc */
1590  
1591  	/* XXX: Tag 0 is used for drivers with legacy EH as some
1592  	 * drivers choke if any other tag is given.  This breaks
1593  	 * ata_tag_internal() test for those drivers.  Don't use new
1594  	 * EH stuff without converting to it.
1595  	 */
1596  	if (ap->ops->error_handler)
1597  		tag = ATA_TAG_INTERNAL;
1598  	else
1599  		tag = 0;
1600  
1601  	qc = __ata_qc_from_tag(ap, tag);
1602  
1603  	qc->tag = tag;
1604  	qc->scsicmd = NULL;
1605  	qc->ap = ap;
1606  	qc->dev = dev;
1607  	ata_qc_reinit(qc);
1608  
1609  	preempted_tag = link->active_tag;
1610  	preempted_sactive = link->sactive;
1611  	preempted_qc_active = ap->qc_active;
1612  	preempted_nr_active_links = ap->nr_active_links;
1613  	link->active_tag = ATA_TAG_POISON;
1614  	link->sactive = 0;
1615  	ap->qc_active = 0;
1616  	ap->nr_active_links = 0;
1617  
1618  	/* prepare & issue qc */
1619  	qc->tf = *tf;
1620  	if (cdb)
1621  		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1622  
1623  	/* some SATA bridges need us to indicate data xfer direction */
1624  	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1625  	    dma_dir == DMA_FROM_DEVICE)
1626  		qc->tf.feature |= ATAPI_DMADIR;
1627  
1628  	qc->flags |= ATA_QCFLAG_RESULT_TF;
1629  	qc->dma_dir = dma_dir;
1630  	if (dma_dir != DMA_NONE) {
1631  		unsigned int i, buflen = 0;
1632  		struct scatterlist *sg;
1633  
1634  		for_each_sg(sgl, sg, n_elem, i)
1635  			buflen += sg->length;
1636  
1637  		ata_sg_init(qc, sgl, n_elem);
1638  		qc->nbytes = buflen;
1639  	}
1640  
1641  	qc->private_data = &wait;
1642  	qc->complete_fn = ata_qc_complete_internal;
1643  
1644  	ata_qc_issue(qc);
1645  
1646  	spin_unlock_irqrestore(ap->lock, flags);
1647  
1648  	if (!timeout) {
1649  		if (ata_probe_timeout)
1650  			timeout = ata_probe_timeout * 1000;
1651  		else {
1652  			timeout = ata_internal_cmd_timeout(dev, command);
1653  			auto_timeout = 1;
1654  		}
1655  	}
1656  
1657  	if (ap->ops->error_handler)
1658  		ata_eh_release(ap);
1659  
1660  	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1661  
1662  	if (ap->ops->error_handler)
1663  		ata_eh_acquire(ap);
1664  
1665  	ata_sff_flush_pio_task(ap);
1666  
1667  	if (!rc) {
1668  		spin_lock_irqsave(ap->lock, flags);
1669  
1670  		/* We're racing with irq here.  If we lose, the
1671  		 * following test prevents us from completing the qc
1672  		 * twice.  If we win, the port is frozen and will be
1673  		 * cleaned up by ->post_internal_cmd().
1674  		 */
1675  		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1676  			qc->err_mask |= AC_ERR_TIMEOUT;
1677  
1678  			if (ap->ops->error_handler)
1679  				ata_port_freeze(ap);
1680  			else
1681  				ata_qc_complete(qc);
1682  
1683  			if (ata_msg_warn(ap))
1684  				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1685  					     command);
1686  		}
1687  
1688  		spin_unlock_irqrestore(ap->lock, flags);
1689  	}
1690  
1691  	/* do post_internal_cmd */
1692  	if (ap->ops->post_internal_cmd)
1693  		ap->ops->post_internal_cmd(qc);
1694  
1695  	/* perform minimal error analysis */
1696  	if (qc->flags & ATA_QCFLAG_FAILED) {
1697  		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1698  			qc->err_mask |= AC_ERR_DEV;
1699  
1700  		if (!qc->err_mask)
1701  			qc->err_mask |= AC_ERR_OTHER;
1702  
1703  		if (qc->err_mask & ~AC_ERR_OTHER)
1704  			qc->err_mask &= ~AC_ERR_OTHER;
1705  	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1706  		qc->result_tf.command |= ATA_SENSE;
1707  	}
1708  
1709  	/* finish up */
1710  	spin_lock_irqsave(ap->lock, flags);
1711  
1712  	*tf = qc->result_tf;
1713  	err_mask = qc->err_mask;
1714  
1715  	ata_qc_free(qc);
1716  	link->active_tag = preempted_tag;
1717  	link->sactive = preempted_sactive;
1718  	ap->qc_active = preempted_qc_active;
1719  	ap->nr_active_links = preempted_nr_active_links;
1720  
1721  	spin_unlock_irqrestore(ap->lock, flags);
1722  
1723  	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1724  		ata_internal_cmd_timed_out(dev, command);
1725  
1726  	return err_mask;
1727  }
1728  
1729  /**
1730   *	ata_exec_internal - execute libata internal command
1731   *	@dev: Device to which the command is sent
1732   *	@tf: Taskfile registers for the command and the result
1733   *	@cdb: CDB for packet command
1734   *	@dma_dir: Data transfer direction of the command
1735   *	@buf: Data buffer of the command
1736   *	@buflen: Length of data buffer
1737   *	@timeout: Timeout in msecs (0 for default)
1738   *
1739   *	Wrapper around ata_exec_internal_sg() which takes simple
1740   *	buffer instead of sg list.
1741   *
1742   *	LOCKING:
1743   *	None.  Should be called with kernel context, might sleep.
1744   *
1745   *	RETURNS:
1746   *	Zero on success, AC_ERR_* mask on failure
1747   */
ata_exec_internal(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,int dma_dir,void * buf,unsigned int buflen,unsigned long timeout)1748  unsigned ata_exec_internal(struct ata_device *dev,
1749  			   struct ata_taskfile *tf, const u8 *cdb,
1750  			   int dma_dir, void *buf, unsigned int buflen,
1751  			   unsigned long timeout)
1752  {
1753  	struct scatterlist *psg = NULL, sg;
1754  	unsigned int n_elem = 0;
1755  
1756  	if (dma_dir != DMA_NONE) {
1757  		WARN_ON(!buf);
1758  		sg_init_one(&sg, buf, buflen);
1759  		psg = &sg;
1760  		n_elem++;
1761  	}
1762  
1763  	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1764  				    timeout);
1765  }
1766  
1767  /**
1768   *	ata_pio_need_iordy	-	check if iordy needed
1769   *	@adev: ATA device
1770   *
1771   *	Check if the current speed of the device requires IORDY. Used
1772   *	by various controllers for chip configuration.
1773   */
ata_pio_need_iordy(const struct ata_device * adev)1774  unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1775  {
1776  	/* Don't set IORDY if we're preparing for reset.  IORDY may
1777  	 * lead to controller lock up on certain controllers if the
1778  	 * port is not occupied.  See bko#11703 for details.
1779  	 */
1780  	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1781  		return 0;
1782  	/* Controller doesn't support IORDY.  Probably a pointless
1783  	 * check as the caller should know this.
1784  	 */
1785  	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1786  		return 0;
1787  	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1788  	if (ata_id_is_cfa(adev->id)
1789  	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1790  		return 0;
1791  	/* PIO3 and higher it is mandatory */
1792  	if (adev->pio_mode > XFER_PIO_2)
1793  		return 1;
1794  	/* We turn it on when possible */
1795  	if (ata_id_has_iordy(adev->id))
1796  		return 1;
1797  	return 0;
1798  }
1799  
1800  /**
1801   *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1802   *	@adev: ATA device
1803   *
1804   *	Compute the highest mode possible if we are not using iordy. Return
1805   *	-1 if no iordy mode is available.
1806   */
ata_pio_mask_no_iordy(const struct ata_device * adev)1807  static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1808  {
1809  	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1810  	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1811  		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1812  		/* Is the speed faster than the drive allows non IORDY ? */
1813  		if (pio) {
1814  			/* This is cycle times not frequency - watch the logic! */
1815  			if (pio > 240)	/* PIO2 is 240nS per cycle */
1816  				return 3 << ATA_SHIFT_PIO;
1817  			return 7 << ATA_SHIFT_PIO;
1818  		}
1819  	}
1820  	return 3 << ATA_SHIFT_PIO;
1821  }
1822  
1823  /**
1824   *	ata_do_dev_read_id		-	default ID read method
1825   *	@dev: device
1826   *	@tf: proposed taskfile
1827   *	@id: data buffer
1828   *
1829   *	Issue the identify taskfile and hand back the buffer containing
1830   *	identify data. For some RAID controllers and for pre ATA devices
1831   *	this function is wrapped or replaced by the driver
1832   */
ata_do_dev_read_id(struct ata_device * dev,struct ata_taskfile * tf,u16 * id)1833  unsigned int ata_do_dev_read_id(struct ata_device *dev,
1834  					struct ata_taskfile *tf, u16 *id)
1835  {
1836  	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1837  				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1838  }
1839  
1840  /**
1841   *	ata_dev_read_id - Read ID data from the specified device
1842   *	@dev: target device
1843   *	@p_class: pointer to class of the target device (may be changed)
1844   *	@flags: ATA_READID_* flags
1845   *	@id: buffer to read IDENTIFY data into
1846   *
1847   *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1848   *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1849   *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1850   *	for pre-ATA4 drives.
1851   *
1852   *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1853   *	now we abort if we hit that case.
1854   *
1855   *	LOCKING:
1856   *	Kernel thread context (may sleep)
1857   *
1858   *	RETURNS:
1859   *	0 on success, -errno otherwise.
1860   */
ata_dev_read_id(struct ata_device * dev,unsigned int * p_class,unsigned int flags,u16 * id)1861  int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1862  		    unsigned int flags, u16 *id)
1863  {
1864  	struct ata_port *ap = dev->link->ap;
1865  	unsigned int class = *p_class;
1866  	struct ata_taskfile tf;
1867  	unsigned int err_mask = 0;
1868  	const char *reason;
1869  	bool is_semb = class == ATA_DEV_SEMB;
1870  	int may_fallback = 1, tried_spinup = 0;
1871  	int rc;
1872  
1873  	if (ata_msg_ctl(ap))
1874  		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1875  
1876  retry:
1877  	ata_tf_init(dev, &tf);
1878  
1879  	switch (class) {
1880  	case ATA_DEV_SEMB:
1881  		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1882  	case ATA_DEV_ATA:
1883  	case ATA_DEV_ZAC:
1884  		tf.command = ATA_CMD_ID_ATA;
1885  		break;
1886  	case ATA_DEV_ATAPI:
1887  		tf.command = ATA_CMD_ID_ATAPI;
1888  		break;
1889  	default:
1890  		rc = -ENODEV;
1891  		reason = "unsupported class";
1892  		goto err_out;
1893  	}
1894  
1895  	tf.protocol = ATA_PROT_PIO;
1896  
1897  	/* Some devices choke if TF registers contain garbage.  Make
1898  	 * sure those are properly initialized.
1899  	 */
1900  	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1901  
1902  	/* Device presence detection is unreliable on some
1903  	 * controllers.  Always poll IDENTIFY if available.
1904  	 */
1905  	tf.flags |= ATA_TFLAG_POLLING;
1906  
1907  	if (ap->ops->read_id)
1908  		err_mask = ap->ops->read_id(dev, &tf, id);
1909  	else
1910  		err_mask = ata_do_dev_read_id(dev, &tf, id);
1911  
1912  	if (err_mask) {
1913  		if (err_mask & AC_ERR_NODEV_HINT) {
1914  			ata_dev_dbg(dev, "NODEV after polling detection\n");
1915  			return -ENOENT;
1916  		}
1917  
1918  		if (is_semb) {
1919  			ata_dev_info(dev,
1920  		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1921  			/* SEMB is not supported yet */
1922  			*p_class = ATA_DEV_SEMB_UNSUP;
1923  			return 0;
1924  		}
1925  
1926  		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1927  			/* Device or controller might have reported
1928  			 * the wrong device class.  Give a shot at the
1929  			 * other IDENTIFY if the current one is
1930  			 * aborted by the device.
1931  			 */
1932  			if (may_fallback) {
1933  				may_fallback = 0;
1934  
1935  				if (class == ATA_DEV_ATA)
1936  					class = ATA_DEV_ATAPI;
1937  				else
1938  					class = ATA_DEV_ATA;
1939  				goto retry;
1940  			}
1941  
1942  			/* Control reaches here iff the device aborted
1943  			 * both flavors of IDENTIFYs which happens
1944  			 * sometimes with phantom devices.
1945  			 */
1946  			ata_dev_dbg(dev,
1947  				    "both IDENTIFYs aborted, assuming NODEV\n");
1948  			return -ENOENT;
1949  		}
1950  
1951  		rc = -EIO;
1952  		reason = "I/O error";
1953  		goto err_out;
1954  	}
1955  
1956  	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1957  		ata_dev_dbg(dev, "dumping IDENTIFY data, "
1958  			    "class=%d may_fallback=%d tried_spinup=%d\n",
1959  			    class, may_fallback, tried_spinup);
1960  		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1961  			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1962  	}
1963  
1964  	/* Falling back doesn't make sense if ID data was read
1965  	 * successfully at least once.
1966  	 */
1967  	may_fallback = 0;
1968  
1969  	swap_buf_le16(id, ATA_ID_WORDS);
1970  
1971  	/* sanity check */
1972  	rc = -EINVAL;
1973  	reason = "device reports invalid type";
1974  
1975  	if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1976  		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1977  			goto err_out;
1978  		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1979  							ata_id_is_ata(id)) {
1980  			ata_dev_dbg(dev,
1981  				"host indicates ignore ATA devices, ignored\n");
1982  			return -ENOENT;
1983  		}
1984  	} else {
1985  		if (ata_id_is_ata(id))
1986  			goto err_out;
1987  	}
1988  
1989  	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1990  		tried_spinup = 1;
1991  		/*
1992  		 * Drive powered-up in standby mode, and requires a specific
1993  		 * SET_FEATURES spin-up subcommand before it will accept
1994  		 * anything other than the original IDENTIFY command.
1995  		 */
1996  		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1997  		if (err_mask && id[2] != 0x738c) {
1998  			rc = -EIO;
1999  			reason = "SPINUP failed";
2000  			goto err_out;
2001  		}
2002  		/*
2003  		 * If the drive initially returned incomplete IDENTIFY info,
2004  		 * we now must reissue the IDENTIFY command.
2005  		 */
2006  		if (id[2] == 0x37c8)
2007  			goto retry;
2008  	}
2009  
2010  	if ((flags & ATA_READID_POSTRESET) &&
2011  	    (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
2012  		/*
2013  		 * The exact sequence expected by certain pre-ATA4 drives is:
2014  		 * SRST RESET
2015  		 * IDENTIFY (optional in early ATA)
2016  		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2017  		 * anything else..
2018  		 * Some drives were very specific about that exact sequence.
2019  		 *
2020  		 * Note that ATA4 says lba is mandatory so the second check
2021  		 * should never trigger.
2022  		 */
2023  		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2024  			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2025  			if (err_mask) {
2026  				rc = -EIO;
2027  				reason = "INIT_DEV_PARAMS failed";
2028  				goto err_out;
2029  			}
2030  
2031  			/* current CHS translation info (id[53-58]) might be
2032  			 * changed. reread the identify device info.
2033  			 */
2034  			flags &= ~ATA_READID_POSTRESET;
2035  			goto retry;
2036  		}
2037  	}
2038  
2039  	*p_class = class;
2040  
2041  	return 0;
2042  
2043   err_out:
2044  	if (ata_msg_warn(ap))
2045  		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2046  			     reason, err_mask);
2047  	return rc;
2048  }
2049  
2050  /**
2051   *	ata_read_log_page - read a specific log page
2052   *	@dev: target device
2053   *	@log: log to read
2054   *	@page: page to read
2055   *	@buf: buffer to store read page
2056   *	@sectors: number of sectors to read
2057   *
2058   *	Read log page using READ_LOG_EXT command.
2059   *
2060   *	LOCKING:
2061   *	Kernel thread context (may sleep).
2062   *
2063   *	RETURNS:
2064   *	0 on success, AC_ERR_* mask otherwise.
2065   */
ata_read_log_page(struct ata_device * dev,u8 log,u8 page,void * buf,unsigned int sectors)2066  unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
2067  			       u8 page, void *buf, unsigned int sectors)
2068  {
2069  	unsigned long ap_flags = dev->link->ap->flags;
2070  	struct ata_taskfile tf;
2071  	unsigned int err_mask;
2072  	bool dma = false;
2073  
2074  	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
2075  
2076  	/*
2077  	 * Return error without actually issuing the command on controllers
2078  	 * which e.g. lockup on a read log page.
2079  	 */
2080  	if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
2081  		return AC_ERR_DEV;
2082  
2083  retry:
2084  	ata_tf_init(dev, &tf);
2085  	if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
2086  	    !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
2087  		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2088  		tf.protocol = ATA_PROT_DMA;
2089  		dma = true;
2090  	} else {
2091  		tf.command = ATA_CMD_READ_LOG_EXT;
2092  		tf.protocol = ATA_PROT_PIO;
2093  		dma = false;
2094  	}
2095  	tf.lbal = log;
2096  	tf.lbam = page;
2097  	tf.nsect = sectors;
2098  	tf.hob_nsect = sectors >> 8;
2099  	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2100  
2101  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2102  				     buf, sectors * ATA_SECT_SIZE, 0);
2103  
2104  	if (err_mask && dma) {
2105  		dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2106  		ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
2107  		goto retry;
2108  	}
2109  
2110  	DPRINTK("EXIT, err_mask=%x\n", err_mask);
2111  	return err_mask;
2112  }
2113  
ata_log_supported(struct ata_device * dev,u8 log)2114  static bool ata_log_supported(struct ata_device *dev, u8 log)
2115  {
2116  	struct ata_port *ap = dev->link->ap;
2117  
2118  	if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2119  		return false;
2120  	return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
2121  }
2122  
ata_identify_page_supported(struct ata_device * dev,u8 page)2123  static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2124  {
2125  	struct ata_port *ap = dev->link->ap;
2126  	unsigned int err, i;
2127  
2128  	if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2129  		ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
2130  		return false;
2131  	}
2132  
2133  	/*
2134  	 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2135  	 * supported.
2136  	 */
2137  	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2138  				1);
2139  	if (err) {
2140  		ata_dev_info(dev,
2141  			     "failed to get Device Identify Log Emask 0x%x\n",
2142  			     err);
2143  		return false;
2144  	}
2145  
2146  	for (i = 0; i < ap->sector_buf[8]; i++) {
2147  		if (ap->sector_buf[9 + i] == page)
2148  			return true;
2149  	}
2150  
2151  	return false;
2152  }
2153  
ata_do_link_spd_horkage(struct ata_device * dev)2154  static int ata_do_link_spd_horkage(struct ata_device *dev)
2155  {
2156  	struct ata_link *plink = ata_dev_phys_link(dev);
2157  	u32 target, target_limit;
2158  
2159  	if (!sata_scr_valid(plink))
2160  		return 0;
2161  
2162  	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2163  		target = 1;
2164  	else
2165  		return 0;
2166  
2167  	target_limit = (1 << target) - 1;
2168  
2169  	/* if already on stricter limit, no need to push further */
2170  	if (plink->sata_spd_limit <= target_limit)
2171  		return 0;
2172  
2173  	plink->sata_spd_limit = target_limit;
2174  
2175  	/* Request another EH round by returning -EAGAIN if link is
2176  	 * going faster than the target speed.  Forward progress is
2177  	 * guaranteed by setting sata_spd_limit to target_limit above.
2178  	 */
2179  	if (plink->sata_spd > target) {
2180  		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2181  			     sata_spd_string(target));
2182  		return -EAGAIN;
2183  	}
2184  	return 0;
2185  }
2186  
ata_dev_knobble(struct ata_device * dev)2187  static inline u8 ata_dev_knobble(struct ata_device *dev)
2188  {
2189  	struct ata_port *ap = dev->link->ap;
2190  
2191  	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2192  		return 0;
2193  
2194  	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2195  }
2196  
ata_dev_config_ncq_send_recv(struct ata_device * dev)2197  static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2198  {
2199  	struct ata_port *ap = dev->link->ap;
2200  	unsigned int err_mask;
2201  
2202  	if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2203  		ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2204  		return;
2205  	}
2206  	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2207  				     0, ap->sector_buf, 1);
2208  	if (err_mask) {
2209  		ata_dev_dbg(dev,
2210  			    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2211  			    err_mask);
2212  	} else {
2213  		u8 *cmds = dev->ncq_send_recv_cmds;
2214  
2215  		dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2216  		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2217  
2218  		if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2219  			ata_dev_dbg(dev, "disabling queued TRIM support\n");
2220  			cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2221  				~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2222  		}
2223  	}
2224  }
2225  
ata_dev_config_ncq_non_data(struct ata_device * dev)2226  static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2227  {
2228  	struct ata_port *ap = dev->link->ap;
2229  	unsigned int err_mask;
2230  
2231  	if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2232  		ata_dev_warn(dev,
2233  			     "NCQ Send/Recv Log not supported\n");
2234  		return;
2235  	}
2236  	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2237  				     0, ap->sector_buf, 1);
2238  	if (err_mask) {
2239  		ata_dev_dbg(dev,
2240  			    "failed to get NCQ Non-Data Log Emask 0x%x\n",
2241  			    err_mask);
2242  	} else {
2243  		u8 *cmds = dev->ncq_non_data_cmds;
2244  
2245  		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2246  	}
2247  }
2248  
ata_dev_config_ncq_prio(struct ata_device * dev)2249  static void ata_dev_config_ncq_prio(struct ata_device *dev)
2250  {
2251  	struct ata_port *ap = dev->link->ap;
2252  	unsigned int err_mask;
2253  
2254  	if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
2255  		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2256  		return;
2257  	}
2258  
2259  	err_mask = ata_read_log_page(dev,
2260  				     ATA_LOG_IDENTIFY_DEVICE,
2261  				     ATA_LOG_SATA_SETTINGS,
2262  				     ap->sector_buf,
2263  				     1);
2264  	if (err_mask) {
2265  		ata_dev_dbg(dev,
2266  			    "failed to get Identify Device data, Emask 0x%x\n",
2267  			    err_mask);
2268  		return;
2269  	}
2270  
2271  	if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
2272  		dev->flags |= ATA_DFLAG_NCQ_PRIO;
2273  	} else {
2274  		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2275  		ata_dev_dbg(dev, "SATA page does not support priority\n");
2276  	}
2277  
2278  }
2279  
ata_dev_config_ncq(struct ata_device * dev,char * desc,size_t desc_sz)2280  static int ata_dev_config_ncq(struct ata_device *dev,
2281  			       char *desc, size_t desc_sz)
2282  {
2283  	struct ata_port *ap = dev->link->ap;
2284  	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2285  	unsigned int err_mask;
2286  	char *aa_desc = "";
2287  
2288  	if (!ata_id_has_ncq(dev->id)) {
2289  		desc[0] = '\0';
2290  		return 0;
2291  	}
2292  	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2293  		snprintf(desc, desc_sz, "NCQ (not used)");
2294  		return 0;
2295  	}
2296  	if (ap->flags & ATA_FLAG_NCQ) {
2297  		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2298  		dev->flags |= ATA_DFLAG_NCQ;
2299  	}
2300  
2301  	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2302  		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2303  		ata_id_has_fpdma_aa(dev->id)) {
2304  		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2305  			SATA_FPDMA_AA);
2306  		if (err_mask) {
2307  			ata_dev_err(dev,
2308  				    "failed to enable AA (error_mask=0x%x)\n",
2309  				    err_mask);
2310  			if (err_mask != AC_ERR_DEV) {
2311  				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2312  				return -EIO;
2313  			}
2314  		} else
2315  			aa_desc = ", AA";
2316  	}
2317  
2318  	if (hdepth >= ddepth)
2319  		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2320  	else
2321  		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2322  			ddepth, aa_desc);
2323  
2324  	if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2325  		if (ata_id_has_ncq_send_and_recv(dev->id))
2326  			ata_dev_config_ncq_send_recv(dev);
2327  		if (ata_id_has_ncq_non_data(dev->id))
2328  			ata_dev_config_ncq_non_data(dev);
2329  		if (ata_id_has_ncq_prio(dev->id))
2330  			ata_dev_config_ncq_prio(dev);
2331  	}
2332  
2333  	return 0;
2334  }
2335  
ata_dev_config_sense_reporting(struct ata_device * dev)2336  static void ata_dev_config_sense_reporting(struct ata_device *dev)
2337  {
2338  	unsigned int err_mask;
2339  
2340  	if (!ata_id_has_sense_reporting(dev->id))
2341  		return;
2342  
2343  	if (ata_id_sense_reporting_enabled(dev->id))
2344  		return;
2345  
2346  	err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2347  	if (err_mask) {
2348  		ata_dev_dbg(dev,
2349  			    "failed to enable Sense Data Reporting, Emask 0x%x\n",
2350  			    err_mask);
2351  	}
2352  }
2353  
ata_dev_config_zac(struct ata_device * dev)2354  static void ata_dev_config_zac(struct ata_device *dev)
2355  {
2356  	struct ata_port *ap = dev->link->ap;
2357  	unsigned int err_mask;
2358  	u8 *identify_buf = ap->sector_buf;
2359  
2360  	dev->zac_zones_optimal_open = U32_MAX;
2361  	dev->zac_zones_optimal_nonseq = U32_MAX;
2362  	dev->zac_zones_max_open = U32_MAX;
2363  
2364  	/*
2365  	 * Always set the 'ZAC' flag for Host-managed devices.
2366  	 */
2367  	if (dev->class == ATA_DEV_ZAC)
2368  		dev->flags |= ATA_DFLAG_ZAC;
2369  	else if (ata_id_zoned_cap(dev->id) == 0x01)
2370  		/*
2371  		 * Check for host-aware devices.
2372  		 */
2373  		dev->flags |= ATA_DFLAG_ZAC;
2374  
2375  	if (!(dev->flags & ATA_DFLAG_ZAC))
2376  		return;
2377  
2378  	if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2379  		ata_dev_warn(dev,
2380  			     "ATA Zoned Information Log not supported\n");
2381  		return;
2382  	}
2383  
2384  	/*
2385  	 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2386  	 */
2387  	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2388  				     ATA_LOG_ZONED_INFORMATION,
2389  				     identify_buf, 1);
2390  	if (!err_mask) {
2391  		u64 zoned_cap, opt_open, opt_nonseq, max_open;
2392  
2393  		zoned_cap = get_unaligned_le64(&identify_buf[8]);
2394  		if ((zoned_cap >> 63))
2395  			dev->zac_zoned_cap = (zoned_cap & 1);
2396  		opt_open = get_unaligned_le64(&identify_buf[24]);
2397  		if ((opt_open >> 63))
2398  			dev->zac_zones_optimal_open = (u32)opt_open;
2399  		opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2400  		if ((opt_nonseq >> 63))
2401  			dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2402  		max_open = get_unaligned_le64(&identify_buf[40]);
2403  		if ((max_open >> 63))
2404  			dev->zac_zones_max_open = (u32)max_open;
2405  	}
2406  }
2407  
ata_dev_config_trusted(struct ata_device * dev)2408  static void ata_dev_config_trusted(struct ata_device *dev)
2409  {
2410  	struct ata_port *ap = dev->link->ap;
2411  	u64 trusted_cap;
2412  	unsigned int err;
2413  
2414  	if (!ata_id_has_trusted(dev->id))
2415  		return;
2416  
2417  	if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2418  		ata_dev_warn(dev,
2419  			     "Security Log not supported\n");
2420  		return;
2421  	}
2422  
2423  	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2424  			ap->sector_buf, 1);
2425  	if (err) {
2426  		ata_dev_dbg(dev,
2427  			    "failed to read Security Log, Emask 0x%x\n", err);
2428  		return;
2429  	}
2430  
2431  	trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2432  	if (!(trusted_cap & (1ULL << 63))) {
2433  		ata_dev_dbg(dev,
2434  			    "Trusted Computing capability qword not valid!\n");
2435  		return;
2436  	}
2437  
2438  	if (trusted_cap & (1 << 0))
2439  		dev->flags |= ATA_DFLAG_TRUSTED;
2440  }
2441  
2442  /**
2443   *	ata_dev_configure - Configure the specified ATA/ATAPI device
2444   *	@dev: Target device to configure
2445   *
2446   *	Configure @dev according to @dev->id.  Generic and low-level
2447   *	driver specific fixups are also applied.
2448   *
2449   *	LOCKING:
2450   *	Kernel thread context (may sleep)
2451   *
2452   *	RETURNS:
2453   *	0 on success, -errno otherwise
2454   */
ata_dev_configure(struct ata_device * dev)2455  int ata_dev_configure(struct ata_device *dev)
2456  {
2457  	struct ata_port *ap = dev->link->ap;
2458  	struct ata_eh_context *ehc = &dev->link->eh_context;
2459  	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2460  	const u16 *id = dev->id;
2461  	unsigned long xfer_mask;
2462  	unsigned int err_mask;
2463  	char revbuf[7];		/* XYZ-99\0 */
2464  	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2465  	char modelbuf[ATA_ID_PROD_LEN+1];
2466  	int rc;
2467  
2468  	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2469  		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2470  		return 0;
2471  	}
2472  
2473  	if (ata_msg_probe(ap))
2474  		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2475  
2476  	/* set horkage */
2477  	dev->horkage |= ata_dev_blacklisted(dev);
2478  	ata_force_horkage(dev);
2479  
2480  	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2481  		ata_dev_info(dev, "unsupported device, disabling\n");
2482  		ata_dev_disable(dev);
2483  		return 0;
2484  	}
2485  
2486  	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2487  	    dev->class == ATA_DEV_ATAPI) {
2488  		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2489  			     atapi_enabled ? "not supported with this driver"
2490  			     : "disabled");
2491  		ata_dev_disable(dev);
2492  		return 0;
2493  	}
2494  
2495  	rc = ata_do_link_spd_horkage(dev);
2496  	if (rc)
2497  		return rc;
2498  
2499  	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2500  	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2501  	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2502  		dev->horkage |= ATA_HORKAGE_NOLPM;
2503  
2504  	if (ap->flags & ATA_FLAG_NO_LPM)
2505  		dev->horkage |= ATA_HORKAGE_NOLPM;
2506  
2507  	if (dev->horkage & ATA_HORKAGE_NOLPM) {
2508  		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2509  		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2510  	}
2511  
2512  	/* let ACPI work its magic */
2513  	rc = ata_acpi_on_devcfg(dev);
2514  	if (rc)
2515  		return rc;
2516  
2517  	/* massage HPA, do it early as it might change IDENTIFY data */
2518  	rc = ata_hpa_resize(dev);
2519  	if (rc)
2520  		return rc;
2521  
2522  	/* print device capabilities */
2523  	if (ata_msg_probe(ap))
2524  		ata_dev_dbg(dev,
2525  			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2526  			    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2527  			    __func__,
2528  			    id[49], id[82], id[83], id[84],
2529  			    id[85], id[86], id[87], id[88]);
2530  
2531  	/* initialize to-be-configured parameters */
2532  	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2533  	dev->max_sectors = 0;
2534  	dev->cdb_len = 0;
2535  	dev->n_sectors = 0;
2536  	dev->cylinders = 0;
2537  	dev->heads = 0;
2538  	dev->sectors = 0;
2539  	dev->multi_count = 0;
2540  
2541  	/*
2542  	 * common ATA, ATAPI feature tests
2543  	 */
2544  
2545  	/* find max transfer mode; for printk only */
2546  	xfer_mask = ata_id_xfermask(id);
2547  
2548  	if (ata_msg_probe(ap))
2549  		ata_dump_id(id);
2550  
2551  	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2552  	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2553  			sizeof(fwrevbuf));
2554  
2555  	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2556  			sizeof(modelbuf));
2557  
2558  	/* ATA-specific feature tests */
2559  	if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2560  		if (ata_id_is_cfa(id)) {
2561  			/* CPRM may make this media unusable */
2562  			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2563  				ata_dev_warn(dev,
2564  	"supports DRM functions and may not be fully accessible\n");
2565  			snprintf(revbuf, 7, "CFA");
2566  		} else {
2567  			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2568  			/* Warn the user if the device has TPM extensions */
2569  			if (ata_id_has_tpm(id))
2570  				ata_dev_warn(dev,
2571  	"supports DRM functions and may not be fully accessible\n");
2572  		}
2573  
2574  		dev->n_sectors = ata_id_n_sectors(id);
2575  
2576  		/* get current R/W Multiple count setting */
2577  		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2578  			unsigned int max = dev->id[47] & 0xff;
2579  			unsigned int cnt = dev->id[59] & 0xff;
2580  			/* only recognize/allow powers of two here */
2581  			if (is_power_of_2(max) && is_power_of_2(cnt))
2582  				if (cnt <= max)
2583  					dev->multi_count = cnt;
2584  		}
2585  
2586  		if (ata_id_has_lba(id)) {
2587  			const char *lba_desc;
2588  			char ncq_desc[24];
2589  
2590  			lba_desc = "LBA";
2591  			dev->flags |= ATA_DFLAG_LBA;
2592  			if (ata_id_has_lba48(id)) {
2593  				dev->flags |= ATA_DFLAG_LBA48;
2594  				lba_desc = "LBA48";
2595  
2596  				if (dev->n_sectors >= (1UL << 28) &&
2597  				    ata_id_has_flush_ext(id))
2598  					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2599  			}
2600  
2601  			/* config NCQ */
2602  			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2603  			if (rc)
2604  				return rc;
2605  
2606  			/* print device info to dmesg */
2607  			if (ata_msg_drv(ap) && print_info) {
2608  				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2609  					     revbuf, modelbuf, fwrevbuf,
2610  					     ata_mode_string(xfer_mask));
2611  				ata_dev_info(dev,
2612  					     "%llu sectors, multi %u: %s %s\n",
2613  					(unsigned long long)dev->n_sectors,
2614  					dev->multi_count, lba_desc, ncq_desc);
2615  			}
2616  		} else {
2617  			/* CHS */
2618  
2619  			/* Default translation */
2620  			dev->cylinders	= id[1];
2621  			dev->heads	= id[3];
2622  			dev->sectors	= id[6];
2623  
2624  			if (ata_id_current_chs_valid(id)) {
2625  				/* Current CHS translation is valid. */
2626  				dev->cylinders = id[54];
2627  				dev->heads     = id[55];
2628  				dev->sectors   = id[56];
2629  			}
2630  
2631  			/* print device info to dmesg */
2632  			if (ata_msg_drv(ap) && print_info) {
2633  				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2634  					     revbuf,	modelbuf, fwrevbuf,
2635  					     ata_mode_string(xfer_mask));
2636  				ata_dev_info(dev,
2637  					     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2638  					     (unsigned long long)dev->n_sectors,
2639  					     dev->multi_count, dev->cylinders,
2640  					     dev->heads, dev->sectors);
2641  			}
2642  		}
2643  
2644  		/* Check and mark DevSlp capability. Get DevSlp timing variables
2645  		 * from SATA Settings page of Identify Device Data Log.
2646  		 */
2647  		if (ata_id_has_devslp(dev->id)) {
2648  			u8 *sata_setting = ap->sector_buf;
2649  			int i, j;
2650  
2651  			dev->flags |= ATA_DFLAG_DEVSLP;
2652  			err_mask = ata_read_log_page(dev,
2653  						     ATA_LOG_IDENTIFY_DEVICE,
2654  						     ATA_LOG_SATA_SETTINGS,
2655  						     sata_setting,
2656  						     1);
2657  			if (err_mask)
2658  				ata_dev_dbg(dev,
2659  					    "failed to get Identify Device Data, Emask 0x%x\n",
2660  					    err_mask);
2661  			else
2662  				for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2663  					j = ATA_LOG_DEVSLP_OFFSET + i;
2664  					dev->devslp_timing[i] = sata_setting[j];
2665  				}
2666  		}
2667  		ata_dev_config_sense_reporting(dev);
2668  		ata_dev_config_zac(dev);
2669  		ata_dev_config_trusted(dev);
2670  		dev->cdb_len = 32;
2671  	}
2672  
2673  	/* ATAPI-specific feature tests */
2674  	else if (dev->class == ATA_DEV_ATAPI) {
2675  		const char *cdb_intr_string = "";
2676  		const char *atapi_an_string = "";
2677  		const char *dma_dir_string = "";
2678  		u32 sntf;
2679  
2680  		rc = atapi_cdb_len(id);
2681  		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2682  			if (ata_msg_warn(ap))
2683  				ata_dev_warn(dev, "unsupported CDB len\n");
2684  			rc = -EINVAL;
2685  			goto err_out_nosup;
2686  		}
2687  		dev->cdb_len = (unsigned int) rc;
2688  
2689  		/* Enable ATAPI AN if both the host and device have
2690  		 * the support.  If PMP is attached, SNTF is required
2691  		 * to enable ATAPI AN to discern between PHY status
2692  		 * changed notifications and ATAPI ANs.
2693  		 */
2694  		if (atapi_an &&
2695  		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2696  		    (!sata_pmp_attached(ap) ||
2697  		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2698  			/* issue SET feature command to turn this on */
2699  			err_mask = ata_dev_set_feature(dev,
2700  					SETFEATURES_SATA_ENABLE, SATA_AN);
2701  			if (err_mask)
2702  				ata_dev_err(dev,
2703  					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
2704  					    err_mask);
2705  			else {
2706  				dev->flags |= ATA_DFLAG_AN;
2707  				atapi_an_string = ", ATAPI AN";
2708  			}
2709  		}
2710  
2711  		if (ata_id_cdb_intr(dev->id)) {
2712  			dev->flags |= ATA_DFLAG_CDB_INTR;
2713  			cdb_intr_string = ", CDB intr";
2714  		}
2715  
2716  		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2717  			dev->flags |= ATA_DFLAG_DMADIR;
2718  			dma_dir_string = ", DMADIR";
2719  		}
2720  
2721  		if (ata_id_has_da(dev->id)) {
2722  			dev->flags |= ATA_DFLAG_DA;
2723  			zpodd_init(dev);
2724  		}
2725  
2726  		/* print device info to dmesg */
2727  		if (ata_msg_drv(ap) && print_info)
2728  			ata_dev_info(dev,
2729  				     "ATAPI: %s, %s, max %s%s%s%s\n",
2730  				     modelbuf, fwrevbuf,
2731  				     ata_mode_string(xfer_mask),
2732  				     cdb_intr_string, atapi_an_string,
2733  				     dma_dir_string);
2734  	}
2735  
2736  	/* determine max_sectors */
2737  	dev->max_sectors = ATA_MAX_SECTORS;
2738  	if (dev->flags & ATA_DFLAG_LBA48)
2739  		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2740  
2741  	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2742  	   200 sectors */
2743  	if (ata_dev_knobble(dev)) {
2744  		if (ata_msg_drv(ap) && print_info)
2745  			ata_dev_info(dev, "applying bridge limits\n");
2746  		dev->udma_mask &= ATA_UDMA5;
2747  		dev->max_sectors = ATA_MAX_SECTORS;
2748  	}
2749  
2750  	if ((dev->class == ATA_DEV_ATAPI) &&
2751  	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2752  		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2753  		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2754  	}
2755  
2756  	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2757  		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2758  					 dev->max_sectors);
2759  
2760  	if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2761  		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2762  					 dev->max_sectors);
2763  
2764  	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2765  		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2766  
2767  	if (ap->ops->dev_config)
2768  		ap->ops->dev_config(dev);
2769  
2770  	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2771  		/* Let the user know. We don't want to disallow opens for
2772  		   rescue purposes, or in case the vendor is just a blithering
2773  		   idiot. Do this after the dev_config call as some controllers
2774  		   with buggy firmware may want to avoid reporting false device
2775  		   bugs */
2776  
2777  		if (print_info) {
2778  			ata_dev_warn(dev,
2779  "Drive reports diagnostics failure. This may indicate a drive\n");
2780  			ata_dev_warn(dev,
2781  "fault or invalid emulation. Contact drive vendor for information.\n");
2782  		}
2783  	}
2784  
2785  	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2786  		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2787  		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
2788  	}
2789  
2790  	return 0;
2791  
2792  err_out_nosup:
2793  	if (ata_msg_probe(ap))
2794  		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2795  	return rc;
2796  }
2797  
2798  /**
2799   *	ata_cable_40wire	-	return 40 wire cable type
2800   *	@ap: port
2801   *
2802   *	Helper method for drivers which want to hardwire 40 wire cable
2803   *	detection.
2804   */
2805  
ata_cable_40wire(struct ata_port * ap)2806  int ata_cable_40wire(struct ata_port *ap)
2807  {
2808  	return ATA_CBL_PATA40;
2809  }
2810  
2811  /**
2812   *	ata_cable_80wire	-	return 80 wire cable type
2813   *	@ap: port
2814   *
2815   *	Helper method for drivers which want to hardwire 80 wire cable
2816   *	detection.
2817   */
2818  
ata_cable_80wire(struct ata_port * ap)2819  int ata_cable_80wire(struct ata_port *ap)
2820  {
2821  	return ATA_CBL_PATA80;
2822  }
2823  
2824  /**
2825   *	ata_cable_unknown	-	return unknown PATA cable.
2826   *	@ap: port
2827   *
2828   *	Helper method for drivers which have no PATA cable detection.
2829   */
2830  
ata_cable_unknown(struct ata_port * ap)2831  int ata_cable_unknown(struct ata_port *ap)
2832  {
2833  	return ATA_CBL_PATA_UNK;
2834  }
2835  
2836  /**
2837   *	ata_cable_ignore	-	return ignored PATA cable.
2838   *	@ap: port
2839   *
2840   *	Helper method for drivers which don't use cable type to limit
2841   *	transfer mode.
2842   */
ata_cable_ignore(struct ata_port * ap)2843  int ata_cable_ignore(struct ata_port *ap)
2844  {
2845  	return ATA_CBL_PATA_IGN;
2846  }
2847  
2848  /**
2849   *	ata_cable_sata	-	return SATA cable type
2850   *	@ap: port
2851   *
2852   *	Helper method for drivers which have SATA cables
2853   */
2854  
ata_cable_sata(struct ata_port * ap)2855  int ata_cable_sata(struct ata_port *ap)
2856  {
2857  	return ATA_CBL_SATA;
2858  }
2859  
2860  /**
2861   *	ata_bus_probe - Reset and probe ATA bus
2862   *	@ap: Bus to probe
2863   *
2864   *	Master ATA bus probing function.  Initiates a hardware-dependent
2865   *	bus reset, then attempts to identify any devices found on
2866   *	the bus.
2867   *
2868   *	LOCKING:
2869   *	PCI/etc. bus probe sem.
2870   *
2871   *	RETURNS:
2872   *	Zero on success, negative errno otherwise.
2873   */
2874  
ata_bus_probe(struct ata_port * ap)2875  int ata_bus_probe(struct ata_port *ap)
2876  {
2877  	unsigned int classes[ATA_MAX_DEVICES];
2878  	int tries[ATA_MAX_DEVICES];
2879  	int rc;
2880  	struct ata_device *dev;
2881  
2882  	ata_for_each_dev(dev, &ap->link, ALL)
2883  		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2884  
2885   retry:
2886  	ata_for_each_dev(dev, &ap->link, ALL) {
2887  		/* If we issue an SRST then an ATA drive (not ATAPI)
2888  		 * may change configuration and be in PIO0 timing. If
2889  		 * we do a hard reset (or are coming from power on)
2890  		 * this is true for ATA or ATAPI. Until we've set a
2891  		 * suitable controller mode we should not touch the
2892  		 * bus as we may be talking too fast.
2893  		 */
2894  		dev->pio_mode = XFER_PIO_0;
2895  		dev->dma_mode = 0xff;
2896  
2897  		/* If the controller has a pio mode setup function
2898  		 * then use it to set the chipset to rights. Don't
2899  		 * touch the DMA setup as that will be dealt with when
2900  		 * configuring devices.
2901  		 */
2902  		if (ap->ops->set_piomode)
2903  			ap->ops->set_piomode(ap, dev);
2904  	}
2905  
2906  	/* reset and determine device classes */
2907  	ap->ops->phy_reset(ap);
2908  
2909  	ata_for_each_dev(dev, &ap->link, ALL) {
2910  		if (dev->class != ATA_DEV_UNKNOWN)
2911  			classes[dev->devno] = dev->class;
2912  		else
2913  			classes[dev->devno] = ATA_DEV_NONE;
2914  
2915  		dev->class = ATA_DEV_UNKNOWN;
2916  	}
2917  
2918  	/* read IDENTIFY page and configure devices. We have to do the identify
2919  	   specific sequence bass-ackwards so that PDIAG- is released by
2920  	   the slave device */
2921  
2922  	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2923  		if (tries[dev->devno])
2924  			dev->class = classes[dev->devno];
2925  
2926  		if (!ata_dev_enabled(dev))
2927  			continue;
2928  
2929  		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2930  				     dev->id);
2931  		if (rc)
2932  			goto fail;
2933  	}
2934  
2935  	/* Now ask for the cable type as PDIAG- should have been released */
2936  	if (ap->ops->cable_detect)
2937  		ap->cbl = ap->ops->cable_detect(ap);
2938  
2939  	/* We may have SATA bridge glue hiding here irrespective of
2940  	 * the reported cable types and sensed types.  When SATA
2941  	 * drives indicate we have a bridge, we don't know which end
2942  	 * of the link the bridge is which is a problem.
2943  	 */
2944  	ata_for_each_dev(dev, &ap->link, ENABLED)
2945  		if (ata_id_is_sata(dev->id))
2946  			ap->cbl = ATA_CBL_SATA;
2947  
2948  	/* After the identify sequence we can now set up the devices. We do
2949  	   this in the normal order so that the user doesn't get confused */
2950  
2951  	ata_for_each_dev(dev, &ap->link, ENABLED) {
2952  		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2953  		rc = ata_dev_configure(dev);
2954  		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2955  		if (rc)
2956  			goto fail;
2957  	}
2958  
2959  	/* configure transfer mode */
2960  	rc = ata_set_mode(&ap->link, &dev);
2961  	if (rc)
2962  		goto fail;
2963  
2964  	ata_for_each_dev(dev, &ap->link, ENABLED)
2965  		return 0;
2966  
2967  	return -ENODEV;
2968  
2969   fail:
2970  	tries[dev->devno]--;
2971  
2972  	switch (rc) {
2973  	case -EINVAL:
2974  		/* eeek, something went very wrong, give up */
2975  		tries[dev->devno] = 0;
2976  		break;
2977  
2978  	case -ENODEV:
2979  		/* give it just one more chance */
2980  		tries[dev->devno] = min(tries[dev->devno], 1);
2981  	case -EIO:
2982  		if (tries[dev->devno] == 1) {
2983  			/* This is the last chance, better to slow
2984  			 * down than lose it.
2985  			 */
2986  			sata_down_spd_limit(&ap->link, 0);
2987  			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2988  		}
2989  	}
2990  
2991  	if (!tries[dev->devno])
2992  		ata_dev_disable(dev);
2993  
2994  	goto retry;
2995  }
2996  
2997  /**
2998   *	sata_print_link_status - Print SATA link status
2999   *	@link: SATA link to printk link status about
3000   *
3001   *	This function prints link speed and status of a SATA link.
3002   *
3003   *	LOCKING:
3004   *	None.
3005   */
sata_print_link_status(struct ata_link * link)3006  static void sata_print_link_status(struct ata_link *link)
3007  {
3008  	u32 sstatus, scontrol, tmp;
3009  
3010  	if (sata_scr_read(link, SCR_STATUS, &sstatus))
3011  		return;
3012  	sata_scr_read(link, SCR_CONTROL, &scontrol);
3013  
3014  	if (ata_phys_link_online(link)) {
3015  		tmp = (sstatus >> 4) & 0xf;
3016  		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3017  			      sata_spd_string(tmp), sstatus, scontrol);
3018  	} else {
3019  		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3020  			      sstatus, scontrol);
3021  	}
3022  }
3023  
3024  /**
3025   *	ata_dev_pair		-	return other device on cable
3026   *	@adev: device
3027   *
3028   *	Obtain the other device on the same cable, or if none is
3029   *	present NULL is returned
3030   */
3031  
ata_dev_pair(struct ata_device * adev)3032  struct ata_device *ata_dev_pair(struct ata_device *adev)
3033  {
3034  	struct ata_link *link = adev->link;
3035  	struct ata_device *pair = &link->device[1 - adev->devno];
3036  	if (!ata_dev_enabled(pair))
3037  		return NULL;
3038  	return pair;
3039  }
3040  
3041  /**
3042   *	sata_down_spd_limit - adjust SATA spd limit downward
3043   *	@link: Link to adjust SATA spd limit for
3044   *	@spd_limit: Additional limit
3045   *
3046   *	Adjust SATA spd limit of @link downward.  Note that this
3047   *	function only adjusts the limit.  The change must be applied
3048   *	using sata_set_spd().
3049   *
3050   *	If @spd_limit is non-zero, the speed is limited to equal to or
3051   *	lower than @spd_limit if such speed is supported.  If
3052   *	@spd_limit is slower than any supported speed, only the lowest
3053   *	supported speed is allowed.
3054   *
3055   *	LOCKING:
3056   *	Inherited from caller.
3057   *
3058   *	RETURNS:
3059   *	0 on success, negative errno on failure
3060   */
sata_down_spd_limit(struct ata_link * link,u32 spd_limit)3061  int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3062  {
3063  	u32 sstatus, spd, mask;
3064  	int rc, bit;
3065  
3066  	if (!sata_scr_valid(link))
3067  		return -EOPNOTSUPP;
3068  
3069  	/* If SCR can be read, use it to determine the current SPD.
3070  	 * If not, use cached value in link->sata_spd.
3071  	 */
3072  	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3073  	if (rc == 0 && ata_sstatus_online(sstatus))
3074  		spd = (sstatus >> 4) & 0xf;
3075  	else
3076  		spd = link->sata_spd;
3077  
3078  	mask = link->sata_spd_limit;
3079  	if (mask <= 1)
3080  		return -EINVAL;
3081  
3082  	/* unconditionally mask off the highest bit */
3083  	bit = fls(mask) - 1;
3084  	mask &= ~(1 << bit);
3085  
3086  	/* Mask off all speeds higher than or equal to the current
3087  	 * one.  Force 1.5Gbps if current SPD is not available.
3088  	 */
3089  	if (spd > 1)
3090  		mask &= (1 << (spd - 1)) - 1;
3091  	else
3092  		mask &= 1;
3093  
3094  	/* were we already at the bottom? */
3095  	if (!mask)
3096  		return -EINVAL;
3097  
3098  	if (spd_limit) {
3099  		if (mask & ((1 << spd_limit) - 1))
3100  			mask &= (1 << spd_limit) - 1;
3101  		else {
3102  			bit = ffs(mask) - 1;
3103  			mask = 1 << bit;
3104  		}
3105  	}
3106  
3107  	link->sata_spd_limit = mask;
3108  
3109  	ata_link_warn(link, "limiting SATA link speed to %s\n",
3110  		      sata_spd_string(fls(mask)));
3111  
3112  	return 0;
3113  }
3114  
__sata_set_spd_needed(struct ata_link * link,u32 * scontrol)3115  static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
3116  {
3117  	struct ata_link *host_link = &link->ap->link;
3118  	u32 limit, target, spd;
3119  
3120  	limit = link->sata_spd_limit;
3121  
3122  	/* Don't configure downstream link faster than upstream link.
3123  	 * It doesn't speed up anything and some PMPs choke on such
3124  	 * configuration.
3125  	 */
3126  	if (!ata_is_host_link(link) && host_link->sata_spd)
3127  		limit &= (1 << host_link->sata_spd) - 1;
3128  
3129  	if (limit == UINT_MAX)
3130  		target = 0;
3131  	else
3132  		target = fls(limit);
3133  
3134  	spd = (*scontrol >> 4) & 0xf;
3135  	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3136  
3137  	return spd != target;
3138  }
3139  
3140  /**
3141   *	sata_set_spd_needed - is SATA spd configuration needed
3142   *	@link: Link in question
3143   *
3144   *	Test whether the spd limit in SControl matches
3145   *	@link->sata_spd_limit.  This function is used to determine
3146   *	whether hardreset is necessary to apply SATA spd
3147   *	configuration.
3148   *
3149   *	LOCKING:
3150   *	Inherited from caller.
3151   *
3152   *	RETURNS:
3153   *	1 if SATA spd configuration is needed, 0 otherwise.
3154   */
sata_set_spd_needed(struct ata_link * link)3155  static int sata_set_spd_needed(struct ata_link *link)
3156  {
3157  	u32 scontrol;
3158  
3159  	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3160  		return 1;
3161  
3162  	return __sata_set_spd_needed(link, &scontrol);
3163  }
3164  
3165  /**
3166   *	sata_set_spd - set SATA spd according to spd limit
3167   *	@link: Link to set SATA spd for
3168   *
3169   *	Set SATA spd of @link according to sata_spd_limit.
3170   *
3171   *	LOCKING:
3172   *	Inherited from caller.
3173   *
3174   *	RETURNS:
3175   *	0 if spd doesn't need to be changed, 1 if spd has been
3176   *	changed.  Negative errno if SCR registers are inaccessible.
3177   */
sata_set_spd(struct ata_link * link)3178  int sata_set_spd(struct ata_link *link)
3179  {
3180  	u32 scontrol;
3181  	int rc;
3182  
3183  	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3184  		return rc;
3185  
3186  	if (!__sata_set_spd_needed(link, &scontrol))
3187  		return 0;
3188  
3189  	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3190  		return rc;
3191  
3192  	return 1;
3193  }
3194  
3195  /*
3196   * This mode timing computation functionality is ported over from
3197   * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3198   */
3199  /*
3200   * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3201   * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3202   * for UDMA6, which is currently supported only by Maxtor drives.
3203   *
3204   * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3205   */
3206  
3207  static const struct ata_timing ata_timing[] = {
3208  /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
3209  	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
3210  	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
3211  	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
3212  	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
3213  	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
3214  	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
3215  	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
3216  
3217  	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
3218  	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
3219  	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
3220  
3221  	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
3222  	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
3223  	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
3224  	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
3225  	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
3226  
3227  /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
3228  	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
3229  	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
3230  	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
3231  	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
3232  	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
3233  	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
3234  	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
3235  
3236  	{ 0xFF }
3237  };
3238  
3239  #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
3240  #define EZ(v, unit)		((v)?ENOUGH(((v) * 1000), unit):0)
3241  
ata_timing_quantize(const struct ata_timing * t,struct ata_timing * q,int T,int UT)3242  static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3243  {
3244  	q->setup	= EZ(t->setup,       T);
3245  	q->act8b	= EZ(t->act8b,       T);
3246  	q->rec8b	= EZ(t->rec8b,       T);
3247  	q->cyc8b	= EZ(t->cyc8b,       T);
3248  	q->active	= EZ(t->active,      T);
3249  	q->recover	= EZ(t->recover,     T);
3250  	q->dmack_hold	= EZ(t->dmack_hold,  T);
3251  	q->cycle	= EZ(t->cycle,       T);
3252  	q->udma		= EZ(t->udma,       UT);
3253  }
3254  
ata_timing_merge(const struct ata_timing * a,const struct ata_timing * b,struct ata_timing * m,unsigned int what)3255  void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3256  		      struct ata_timing *m, unsigned int what)
3257  {
3258  	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
3259  	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
3260  	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
3261  	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
3262  	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
3263  	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3264  	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3265  	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
3266  	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
3267  }
3268  
ata_timing_find_mode(u8 xfer_mode)3269  const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3270  {
3271  	const struct ata_timing *t = ata_timing;
3272  
3273  	while (xfer_mode > t->mode)
3274  		t++;
3275  
3276  	if (xfer_mode == t->mode)
3277  		return t;
3278  
3279  	WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3280  			__func__, xfer_mode);
3281  
3282  	return NULL;
3283  }
3284  
ata_timing_compute(struct ata_device * adev,unsigned short speed,struct ata_timing * t,int T,int UT)3285  int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3286  		       struct ata_timing *t, int T, int UT)
3287  {
3288  	const u16 *id = adev->id;
3289  	const struct ata_timing *s;
3290  	struct ata_timing p;
3291  
3292  	/*
3293  	 * Find the mode.
3294  	 */
3295  
3296  	if (!(s = ata_timing_find_mode(speed)))
3297  		return -EINVAL;
3298  
3299  	memcpy(t, s, sizeof(*s));
3300  
3301  	/*
3302  	 * If the drive is an EIDE drive, it can tell us it needs extended
3303  	 * PIO/MW_DMA cycle timing.
3304  	 */
3305  
3306  	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3307  		memset(&p, 0, sizeof(p));
3308  
3309  		if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3310  			if (speed <= XFER_PIO_2)
3311  				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3312  			else if ((speed <= XFER_PIO_4) ||
3313  				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3314  				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3315  		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3316  			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3317  
3318  		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3319  	}
3320  
3321  	/*
3322  	 * Convert the timing to bus clock counts.
3323  	 */
3324  
3325  	ata_timing_quantize(t, t, T, UT);
3326  
3327  	/*
3328  	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3329  	 * S.M.A.R.T * and some other commands. We have to ensure that the
3330  	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3331  	 */
3332  
3333  	if (speed > XFER_PIO_6) {
3334  		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3335  		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3336  	}
3337  
3338  	/*
3339  	 * Lengthen active & recovery time so that cycle time is correct.
3340  	 */
3341  
3342  	if (t->act8b + t->rec8b < t->cyc8b) {
3343  		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3344  		t->rec8b = t->cyc8b - t->act8b;
3345  	}
3346  
3347  	if (t->active + t->recover < t->cycle) {
3348  		t->active += (t->cycle - (t->active + t->recover)) / 2;
3349  		t->recover = t->cycle - t->active;
3350  	}
3351  
3352  	/* In a few cases quantisation may produce enough errors to
3353  	   leave t->cycle too low for the sum of active and recovery
3354  	   if so we must correct this */
3355  	if (t->active + t->recover > t->cycle)
3356  		t->cycle = t->active + t->recover;
3357  
3358  	return 0;
3359  }
3360  
3361  /**
3362   *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3363   *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3364   *	@cycle: cycle duration in ns
3365   *
3366   *	Return matching xfer mode for @cycle.  The returned mode is of
3367   *	the transfer type specified by @xfer_shift.  If @cycle is too
3368   *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3369   *	than the fastest known mode, the fasted mode is returned.
3370   *
3371   *	LOCKING:
3372   *	None.
3373   *
3374   *	RETURNS:
3375   *	Matching xfer_mode, 0xff if no match found.
3376   */
ata_timing_cycle2mode(unsigned int xfer_shift,int cycle)3377  u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3378  {
3379  	u8 base_mode = 0xff, last_mode = 0xff;
3380  	const struct ata_xfer_ent *ent;
3381  	const struct ata_timing *t;
3382  
3383  	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3384  		if (ent->shift == xfer_shift)
3385  			base_mode = ent->base;
3386  
3387  	for (t = ata_timing_find_mode(base_mode);
3388  	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3389  		unsigned short this_cycle;
3390  
3391  		switch (xfer_shift) {
3392  		case ATA_SHIFT_PIO:
3393  		case ATA_SHIFT_MWDMA:
3394  			this_cycle = t->cycle;
3395  			break;
3396  		case ATA_SHIFT_UDMA:
3397  			this_cycle = t->udma;
3398  			break;
3399  		default:
3400  			return 0xff;
3401  		}
3402  
3403  		if (cycle > this_cycle)
3404  			break;
3405  
3406  		last_mode = t->mode;
3407  	}
3408  
3409  	return last_mode;
3410  }
3411  
3412  /**
3413   *	ata_down_xfermask_limit - adjust dev xfer masks downward
3414   *	@dev: Device to adjust xfer masks
3415   *	@sel: ATA_DNXFER_* selector
3416   *
3417   *	Adjust xfer masks of @dev downward.  Note that this function
3418   *	does not apply the change.  Invoking ata_set_mode() afterwards
3419   *	will apply the limit.
3420   *
3421   *	LOCKING:
3422   *	Inherited from caller.
3423   *
3424   *	RETURNS:
3425   *	0 on success, negative errno on failure
3426   */
ata_down_xfermask_limit(struct ata_device * dev,unsigned int sel)3427  int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3428  {
3429  	char buf[32];
3430  	unsigned long orig_mask, xfer_mask;
3431  	unsigned long pio_mask, mwdma_mask, udma_mask;
3432  	int quiet, highbit;
3433  
3434  	quiet = !!(sel & ATA_DNXFER_QUIET);
3435  	sel &= ~ATA_DNXFER_QUIET;
3436  
3437  	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3438  						  dev->mwdma_mask,
3439  						  dev->udma_mask);
3440  	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3441  
3442  	switch (sel) {
3443  	case ATA_DNXFER_PIO:
3444  		highbit = fls(pio_mask) - 1;
3445  		pio_mask &= ~(1 << highbit);
3446  		break;
3447  
3448  	case ATA_DNXFER_DMA:
3449  		if (udma_mask) {
3450  			highbit = fls(udma_mask) - 1;
3451  			udma_mask &= ~(1 << highbit);
3452  			if (!udma_mask)
3453  				return -ENOENT;
3454  		} else if (mwdma_mask) {
3455  			highbit = fls(mwdma_mask) - 1;
3456  			mwdma_mask &= ~(1 << highbit);
3457  			if (!mwdma_mask)
3458  				return -ENOENT;
3459  		}
3460  		break;
3461  
3462  	case ATA_DNXFER_40C:
3463  		udma_mask &= ATA_UDMA_MASK_40C;
3464  		break;
3465  
3466  	case ATA_DNXFER_FORCE_PIO0:
3467  		pio_mask &= 1;
3468  	case ATA_DNXFER_FORCE_PIO:
3469  		mwdma_mask = 0;
3470  		udma_mask = 0;
3471  		break;
3472  
3473  	default:
3474  		BUG();
3475  	}
3476  
3477  	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3478  
3479  	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3480  		return -ENOENT;
3481  
3482  	if (!quiet) {
3483  		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3484  			snprintf(buf, sizeof(buf), "%s:%s",
3485  				 ata_mode_string(xfer_mask),
3486  				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3487  		else
3488  			snprintf(buf, sizeof(buf), "%s",
3489  				 ata_mode_string(xfer_mask));
3490  
3491  		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3492  	}
3493  
3494  	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3495  			    &dev->udma_mask);
3496  
3497  	return 0;
3498  }
3499  
ata_dev_set_mode(struct ata_device * dev)3500  static int ata_dev_set_mode(struct ata_device *dev)
3501  {
3502  	struct ata_port *ap = dev->link->ap;
3503  	struct ata_eh_context *ehc = &dev->link->eh_context;
3504  	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3505  	const char *dev_err_whine = "";
3506  	int ign_dev_err = 0;
3507  	unsigned int err_mask = 0;
3508  	int rc;
3509  
3510  	dev->flags &= ~ATA_DFLAG_PIO;
3511  	if (dev->xfer_shift == ATA_SHIFT_PIO)
3512  		dev->flags |= ATA_DFLAG_PIO;
3513  
3514  	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3515  		dev_err_whine = " (SET_XFERMODE skipped)";
3516  	else {
3517  		if (nosetxfer)
3518  			ata_dev_warn(dev,
3519  				     "NOSETXFER but PATA detected - can't "
3520  				     "skip SETXFER, might malfunction\n");
3521  		err_mask = ata_dev_set_xfermode(dev);
3522  	}
3523  
3524  	if (err_mask & ~AC_ERR_DEV)
3525  		goto fail;
3526  
3527  	/* revalidate */
3528  	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3529  	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3530  	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3531  	if (rc)
3532  		return rc;
3533  
3534  	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3535  		/* Old CFA may refuse this command, which is just fine */
3536  		if (ata_id_is_cfa(dev->id))
3537  			ign_dev_err = 1;
3538  		/* Catch several broken garbage emulations plus some pre
3539  		   ATA devices */
3540  		if (ata_id_major_version(dev->id) == 0 &&
3541  					dev->pio_mode <= XFER_PIO_2)
3542  			ign_dev_err = 1;
3543  		/* Some very old devices and some bad newer ones fail
3544  		   any kind of SET_XFERMODE request but support PIO0-2
3545  		   timings and no IORDY */
3546  		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3547  			ign_dev_err = 1;
3548  	}
3549  	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3550  	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3551  	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3552  	    dev->dma_mode == XFER_MW_DMA_0 &&
3553  	    (dev->id[63] >> 8) & 1)
3554  		ign_dev_err = 1;
3555  
3556  	/* if the device is actually configured correctly, ignore dev err */
3557  	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3558  		ign_dev_err = 1;
3559  
3560  	if (err_mask & AC_ERR_DEV) {
3561  		if (!ign_dev_err)
3562  			goto fail;
3563  		else
3564  			dev_err_whine = " (device error ignored)";
3565  	}
3566  
3567  	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3568  		dev->xfer_shift, (int)dev->xfer_mode);
3569  
3570  	ata_dev_info(dev, "configured for %s%s\n",
3571  		     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3572  		     dev_err_whine);
3573  
3574  	return 0;
3575  
3576   fail:
3577  	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3578  	return -EIO;
3579  }
3580  
3581  /**
3582   *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3583   *	@link: link on which timings will be programmed
3584   *	@r_failed_dev: out parameter for failed device
3585   *
3586   *	Standard implementation of the function used to tune and set
3587   *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3588   *	ata_dev_set_mode() fails, pointer to the failing device is
3589   *	returned in @r_failed_dev.
3590   *
3591   *	LOCKING:
3592   *	PCI/etc. bus probe sem.
3593   *
3594   *	RETURNS:
3595   *	0 on success, negative errno otherwise
3596   */
3597  
ata_do_set_mode(struct ata_link * link,struct ata_device ** r_failed_dev)3598  int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3599  {
3600  	struct ata_port *ap = link->ap;
3601  	struct ata_device *dev;
3602  	int rc = 0, used_dma = 0, found = 0;
3603  
3604  	/* step 1: calculate xfer_mask */
3605  	ata_for_each_dev(dev, link, ENABLED) {
3606  		unsigned long pio_mask, dma_mask;
3607  		unsigned int mode_mask;
3608  
3609  		mode_mask = ATA_DMA_MASK_ATA;
3610  		if (dev->class == ATA_DEV_ATAPI)
3611  			mode_mask = ATA_DMA_MASK_ATAPI;
3612  		else if (ata_id_is_cfa(dev->id))
3613  			mode_mask = ATA_DMA_MASK_CFA;
3614  
3615  		ata_dev_xfermask(dev);
3616  		ata_force_xfermask(dev);
3617  
3618  		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3619  
3620  		if (libata_dma_mask & mode_mask)
3621  			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3622  						     dev->udma_mask);
3623  		else
3624  			dma_mask = 0;
3625  
3626  		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3627  		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3628  
3629  		found = 1;
3630  		if (ata_dma_enabled(dev))
3631  			used_dma = 1;
3632  	}
3633  	if (!found)
3634  		goto out;
3635  
3636  	/* step 2: always set host PIO timings */
3637  	ata_for_each_dev(dev, link, ENABLED) {
3638  		if (dev->pio_mode == 0xff) {
3639  			ata_dev_warn(dev, "no PIO support\n");
3640  			rc = -EINVAL;
3641  			goto out;
3642  		}
3643  
3644  		dev->xfer_mode = dev->pio_mode;
3645  		dev->xfer_shift = ATA_SHIFT_PIO;
3646  		if (ap->ops->set_piomode)
3647  			ap->ops->set_piomode(ap, dev);
3648  	}
3649  
3650  	/* step 3: set host DMA timings */
3651  	ata_for_each_dev(dev, link, ENABLED) {
3652  		if (!ata_dma_enabled(dev))
3653  			continue;
3654  
3655  		dev->xfer_mode = dev->dma_mode;
3656  		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3657  		if (ap->ops->set_dmamode)
3658  			ap->ops->set_dmamode(ap, dev);
3659  	}
3660  
3661  	/* step 4: update devices' xfer mode */
3662  	ata_for_each_dev(dev, link, ENABLED) {
3663  		rc = ata_dev_set_mode(dev);
3664  		if (rc)
3665  			goto out;
3666  	}
3667  
3668  	/* Record simplex status. If we selected DMA then the other
3669  	 * host channels are not permitted to do so.
3670  	 */
3671  	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3672  		ap->host->simplex_claimed = ap;
3673  
3674   out:
3675  	if (rc)
3676  		*r_failed_dev = dev;
3677  	return rc;
3678  }
3679  
3680  /**
3681   *	ata_wait_ready - wait for link to become ready
3682   *	@link: link to be waited on
3683   *	@deadline: deadline jiffies for the operation
3684   *	@check_ready: callback to check link readiness
3685   *
3686   *	Wait for @link to become ready.  @check_ready should return
3687   *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3688   *	link doesn't seem to be occupied, other errno for other error
3689   *	conditions.
3690   *
3691   *	Transient -ENODEV conditions are allowed for
3692   *	ATA_TMOUT_FF_WAIT.
3693   *
3694   *	LOCKING:
3695   *	EH context.
3696   *
3697   *	RETURNS:
3698   *	0 if @link is ready before @deadline; otherwise, -errno.
3699   */
ata_wait_ready(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3700  int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3701  		   int (*check_ready)(struct ata_link *link))
3702  {
3703  	unsigned long start = jiffies;
3704  	unsigned long nodev_deadline;
3705  	int warned = 0;
3706  
3707  	/* choose which 0xff timeout to use, read comment in libata.h */
3708  	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3709  		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3710  	else
3711  		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3712  
3713  	/* Slave readiness can't be tested separately from master.  On
3714  	 * M/S emulation configuration, this function should be called
3715  	 * only on the master and it will handle both master and slave.
3716  	 */
3717  	WARN_ON(link == link->ap->slave_link);
3718  
3719  	if (time_after(nodev_deadline, deadline))
3720  		nodev_deadline = deadline;
3721  
3722  	while (1) {
3723  		unsigned long now = jiffies;
3724  		int ready, tmp;
3725  
3726  		ready = tmp = check_ready(link);
3727  		if (ready > 0)
3728  			return 0;
3729  
3730  		/*
3731  		 * -ENODEV could be transient.  Ignore -ENODEV if link
3732  		 * is online.  Also, some SATA devices take a long
3733  		 * time to clear 0xff after reset.  Wait for
3734  		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3735  		 * offline.
3736  		 *
3737  		 * Note that some PATA controllers (pata_ali) explode
3738  		 * if status register is read more than once when
3739  		 * there's no device attached.
3740  		 */
3741  		if (ready == -ENODEV) {
3742  			if (ata_link_online(link))
3743  				ready = 0;
3744  			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3745  				 !ata_link_offline(link) &&
3746  				 time_before(now, nodev_deadline))
3747  				ready = 0;
3748  		}
3749  
3750  		if (ready)
3751  			return ready;
3752  		if (time_after(now, deadline))
3753  			return -EBUSY;
3754  
3755  		if (!warned && time_after(now, start + 5 * HZ) &&
3756  		    (deadline - now > 3 * HZ)) {
3757  			ata_link_warn(link,
3758  				"link is slow to respond, please be patient "
3759  				"(ready=%d)\n", tmp);
3760  			warned = 1;
3761  		}
3762  
3763  		ata_msleep(link->ap, 50);
3764  	}
3765  }
3766  
3767  /**
3768   *	ata_wait_after_reset - wait for link to become ready after reset
3769   *	@link: link to be waited on
3770   *	@deadline: deadline jiffies for the operation
3771   *	@check_ready: callback to check link readiness
3772   *
3773   *	Wait for @link to become ready after reset.
3774   *
3775   *	LOCKING:
3776   *	EH context.
3777   *
3778   *	RETURNS:
3779   *	0 if @link is ready before @deadline; otherwise, -errno.
3780   */
ata_wait_after_reset(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3781  int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3782  				int (*check_ready)(struct ata_link *link))
3783  {
3784  	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3785  
3786  	return ata_wait_ready(link, deadline, check_ready);
3787  }
3788  
3789  /**
3790   *	sata_link_debounce - debounce SATA phy status
3791   *	@link: ATA link to debounce SATA phy status for
3792   *	@params: timing parameters { interval, duration, timeout } in msec
3793   *	@deadline: deadline jiffies for the operation
3794   *
3795   *	Make sure SStatus of @link reaches stable state, determined by
3796   *	holding the same value where DET is not 1 for @duration polled
3797   *	every @interval, before @timeout.  Timeout constraints the
3798   *	beginning of the stable state.  Because DET gets stuck at 1 on
3799   *	some controllers after hot unplugging, this functions waits
3800   *	until timeout then returns 0 if DET is stable at 1.
3801   *
3802   *	@timeout is further limited by @deadline.  The sooner of the
3803   *	two is used.
3804   *
3805   *	LOCKING:
3806   *	Kernel thread context (may sleep)
3807   *
3808   *	RETURNS:
3809   *	0 on success, -errno on failure.
3810   */
sata_link_debounce(struct ata_link * link,const unsigned long * params,unsigned long deadline)3811  int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3812  		       unsigned long deadline)
3813  {
3814  	unsigned long interval = params[0];
3815  	unsigned long duration = params[1];
3816  	unsigned long last_jiffies, t;
3817  	u32 last, cur;
3818  	int rc;
3819  
3820  	t = ata_deadline(jiffies, params[2]);
3821  	if (time_before(t, deadline))
3822  		deadline = t;
3823  
3824  	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3825  		return rc;
3826  	cur &= 0xf;
3827  
3828  	last = cur;
3829  	last_jiffies = jiffies;
3830  
3831  	while (1) {
3832  		ata_msleep(link->ap, interval);
3833  		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3834  			return rc;
3835  		cur &= 0xf;
3836  
3837  		/* DET stable? */
3838  		if (cur == last) {
3839  			if (cur == 1 && time_before(jiffies, deadline))
3840  				continue;
3841  			if (time_after(jiffies,
3842  				       ata_deadline(last_jiffies, duration)))
3843  				return 0;
3844  			continue;
3845  		}
3846  
3847  		/* unstable, start over */
3848  		last = cur;
3849  		last_jiffies = jiffies;
3850  
3851  		/* Check deadline.  If debouncing failed, return
3852  		 * -EPIPE to tell upper layer to lower link speed.
3853  		 */
3854  		if (time_after(jiffies, deadline))
3855  			return -EPIPE;
3856  	}
3857  }
3858  
3859  /**
3860   *	sata_link_resume - resume SATA link
3861   *	@link: ATA link to resume SATA
3862   *	@params: timing parameters { interval, duration, timeout } in msec
3863   *	@deadline: deadline jiffies for the operation
3864   *
3865   *	Resume SATA phy @link and debounce it.
3866   *
3867   *	LOCKING:
3868   *	Kernel thread context (may sleep)
3869   *
3870   *	RETURNS:
3871   *	0 on success, -errno on failure.
3872   */
sata_link_resume(struct ata_link * link,const unsigned long * params,unsigned long deadline)3873  int sata_link_resume(struct ata_link *link, const unsigned long *params,
3874  		     unsigned long deadline)
3875  {
3876  	int tries = ATA_LINK_RESUME_TRIES;
3877  	u32 scontrol, serror;
3878  	int rc;
3879  
3880  	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3881  		return rc;
3882  
3883  	/*
3884  	 * Writes to SControl sometimes get ignored under certain
3885  	 * controllers (ata_piix SIDPR).  Make sure DET actually is
3886  	 * cleared.
3887  	 */
3888  	do {
3889  		scontrol = (scontrol & 0x0f0) | 0x300;
3890  		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3891  			return rc;
3892  		/*
3893  		 * Some PHYs react badly if SStatus is pounded
3894  		 * immediately after resuming.  Delay 200ms before
3895  		 * debouncing.
3896  		 */
3897  		if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3898  			ata_msleep(link->ap, 200);
3899  
3900  		/* is SControl restored correctly? */
3901  		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3902  			return rc;
3903  	} while ((scontrol & 0xf0f) != 0x300 && --tries);
3904  
3905  	if ((scontrol & 0xf0f) != 0x300) {
3906  		ata_link_warn(link, "failed to resume link (SControl %X)\n",
3907  			     scontrol);
3908  		return 0;
3909  	}
3910  
3911  	if (tries < ATA_LINK_RESUME_TRIES)
3912  		ata_link_warn(link, "link resume succeeded after %d retries\n",
3913  			      ATA_LINK_RESUME_TRIES - tries);
3914  
3915  	if ((rc = sata_link_debounce(link, params, deadline)))
3916  		return rc;
3917  
3918  	/* clear SError, some PHYs require this even for SRST to work */
3919  	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3920  		rc = sata_scr_write(link, SCR_ERROR, serror);
3921  
3922  	return rc != -EINVAL ? rc : 0;
3923  }
3924  
3925  /**
3926   *	sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3927   *	@link: ATA link to manipulate SControl for
3928   *	@policy: LPM policy to configure
3929   *	@spm_wakeup: initiate LPM transition to active state
3930   *
3931   *	Manipulate the IPM field of the SControl register of @link
3932   *	according to @policy.  If @policy is ATA_LPM_MAX_POWER and
3933   *	@spm_wakeup is %true, the SPM field is manipulated to wake up
3934   *	the link.  This function also clears PHYRDY_CHG before
3935   *	returning.
3936   *
3937   *	LOCKING:
3938   *	EH context.
3939   *
3940   *	RETURNS:
3941   *	0 on success, -errno otherwise.
3942   */
sata_link_scr_lpm(struct ata_link * link,enum ata_lpm_policy policy,bool spm_wakeup)3943  int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3944  		      bool spm_wakeup)
3945  {
3946  	struct ata_eh_context *ehc = &link->eh_context;
3947  	bool woken_up = false;
3948  	u32 scontrol;
3949  	int rc;
3950  
3951  	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3952  	if (rc)
3953  		return rc;
3954  
3955  	switch (policy) {
3956  	case ATA_LPM_MAX_POWER:
3957  		/* disable all LPM transitions */
3958  		scontrol |= (0x7 << 8);
3959  		/* initiate transition to active state */
3960  		if (spm_wakeup) {
3961  			scontrol |= (0x4 << 12);
3962  			woken_up = true;
3963  		}
3964  		break;
3965  	case ATA_LPM_MED_POWER:
3966  		/* allow LPM to PARTIAL */
3967  		scontrol &= ~(0x1 << 8);
3968  		scontrol |= (0x6 << 8);
3969  		break;
3970  	case ATA_LPM_MIN_POWER:
3971  		if (ata_link_nr_enabled(link) > 0)
3972  			/* no restrictions on LPM transitions */
3973  			scontrol &= ~(0x7 << 8);
3974  		else {
3975  			/* empty port, power off */
3976  			scontrol &= ~0xf;
3977  			scontrol |= (0x1 << 2);
3978  		}
3979  		break;
3980  	default:
3981  		WARN_ON(1);
3982  	}
3983  
3984  	rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3985  	if (rc)
3986  		return rc;
3987  
3988  	/* give the link time to transit out of LPM state */
3989  	if (woken_up)
3990  		msleep(10);
3991  
3992  	/* clear PHYRDY_CHG from SError */
3993  	ehc->i.serror &= ~SERR_PHYRDY_CHG;
3994  	return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3995  }
3996  
3997  /**
3998   *	ata_std_prereset - prepare for reset
3999   *	@link: ATA link to be reset
4000   *	@deadline: deadline jiffies for the operation
4001   *
4002   *	@link is about to be reset.  Initialize it.  Failure from
4003   *	prereset makes libata abort whole reset sequence and give up
4004   *	that port, so prereset should be best-effort.  It does its
4005   *	best to prepare for reset sequence but if things go wrong, it
4006   *	should just whine, not fail.
4007   *
4008   *	LOCKING:
4009   *	Kernel thread context (may sleep)
4010   *
4011   *	RETURNS:
4012   *	0 on success, -errno otherwise.
4013   */
ata_std_prereset(struct ata_link * link,unsigned long deadline)4014  int ata_std_prereset(struct ata_link *link, unsigned long deadline)
4015  {
4016  	struct ata_port *ap = link->ap;
4017  	struct ata_eh_context *ehc = &link->eh_context;
4018  	const unsigned long *timing = sata_ehc_deb_timing(ehc);
4019  	int rc;
4020  
4021  	/* if we're about to do hardreset, nothing more to do */
4022  	if (ehc->i.action & ATA_EH_HARDRESET)
4023  		return 0;
4024  
4025  	/* if SATA, resume link */
4026  	if (ap->flags & ATA_FLAG_SATA) {
4027  		rc = sata_link_resume(link, timing, deadline);
4028  		/* whine about phy resume failure but proceed */
4029  		if (rc && rc != -EOPNOTSUPP)
4030  			ata_link_warn(link,
4031  				      "failed to resume link for reset (errno=%d)\n",
4032  				      rc);
4033  	}
4034  
4035  	/* no point in trying softreset on offline link */
4036  	if (ata_phys_link_offline(link))
4037  		ehc->i.action &= ~ATA_EH_SOFTRESET;
4038  
4039  	return 0;
4040  }
4041  
4042  /**
4043   *	sata_link_hardreset - reset link via SATA phy reset
4044   *	@link: link to reset
4045   *	@timing: timing parameters { interval, duration, timeout } in msec
4046   *	@deadline: deadline jiffies for the operation
4047   *	@online: optional out parameter indicating link onlineness
4048   *	@check_ready: optional callback to check link readiness
4049   *
4050   *	SATA phy-reset @link using DET bits of SControl register.
4051   *	After hardreset, link readiness is waited upon using
4052   *	ata_wait_ready() if @check_ready is specified.  LLDs are
4053   *	allowed to not specify @check_ready and wait itself after this
4054   *	function returns.  Device classification is LLD's
4055   *	responsibility.
4056   *
4057   *	*@online is set to one iff reset succeeded and @link is online
4058   *	after reset.
4059   *
4060   *	LOCKING:
4061   *	Kernel thread context (may sleep)
4062   *
4063   *	RETURNS:
4064   *	0 on success, -errno otherwise.
4065   */
sata_link_hardreset(struct ata_link * link,const unsigned long * timing,unsigned long deadline,bool * online,int (* check_ready)(struct ata_link *))4066  int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
4067  			unsigned long deadline,
4068  			bool *online, int (*check_ready)(struct ata_link *))
4069  {
4070  	u32 scontrol;
4071  	int rc;
4072  
4073  	DPRINTK("ENTER\n");
4074  
4075  	if (online)
4076  		*online = false;
4077  
4078  	if (sata_set_spd_needed(link)) {
4079  		/* SATA spec says nothing about how to reconfigure
4080  		 * spd.  To be on the safe side, turn off phy during
4081  		 * reconfiguration.  This works for at least ICH7 AHCI
4082  		 * and Sil3124.
4083  		 */
4084  		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4085  			goto out;
4086  
4087  		scontrol = (scontrol & 0x0f0) | 0x304;
4088  
4089  		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
4090  			goto out;
4091  
4092  		sata_set_spd(link);
4093  	}
4094  
4095  	/* issue phy wake/reset */
4096  	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4097  		goto out;
4098  
4099  	scontrol = (scontrol & 0x0f0) | 0x301;
4100  
4101  	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
4102  		goto out;
4103  
4104  	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
4105  	 * 10.4.2 says at least 1 ms.
4106  	 */
4107  	ata_msleep(link->ap, 1);
4108  
4109  	/* bring link back */
4110  	rc = sata_link_resume(link, timing, deadline);
4111  	if (rc)
4112  		goto out;
4113  	/* if link is offline nothing more to do */
4114  	if (ata_phys_link_offline(link))
4115  		goto out;
4116  
4117  	/* Link is online.  From this point, -ENODEV too is an error. */
4118  	if (online)
4119  		*online = true;
4120  
4121  	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
4122  		/* If PMP is supported, we have to do follow-up SRST.
4123  		 * Some PMPs don't send D2H Reg FIS after hardreset if
4124  		 * the first port is empty.  Wait only for
4125  		 * ATA_TMOUT_PMP_SRST_WAIT.
4126  		 */
4127  		if (check_ready) {
4128  			unsigned long pmp_deadline;
4129  
4130  			pmp_deadline = ata_deadline(jiffies,
4131  						    ATA_TMOUT_PMP_SRST_WAIT);
4132  			if (time_after(pmp_deadline, deadline))
4133  				pmp_deadline = deadline;
4134  			ata_wait_ready(link, pmp_deadline, check_ready);
4135  		}
4136  		rc = -EAGAIN;
4137  		goto out;
4138  	}
4139  
4140  	rc = 0;
4141  	if (check_ready)
4142  		rc = ata_wait_ready(link, deadline, check_ready);
4143   out:
4144  	if (rc && rc != -EAGAIN) {
4145  		/* online is set iff link is online && reset succeeded */
4146  		if (online)
4147  			*online = false;
4148  		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
4149  	}
4150  	DPRINTK("EXIT, rc=%d\n", rc);
4151  	return rc;
4152  }
4153  
4154  /**
4155   *	sata_std_hardreset - COMRESET w/o waiting or classification
4156   *	@link: link to reset
4157   *	@class: resulting class of attached device
4158   *	@deadline: deadline jiffies for the operation
4159   *
4160   *	Standard SATA COMRESET w/o waiting or classification.
4161   *
4162   *	LOCKING:
4163   *	Kernel thread context (may sleep)
4164   *
4165   *	RETURNS:
4166   *	0 if link offline, -EAGAIN if link online, -errno on errors.
4167   */
sata_std_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)4168  int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4169  		       unsigned long deadline)
4170  {
4171  	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4172  	bool online;
4173  	int rc;
4174  
4175  	/* do hardreset */
4176  	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
4177  	return online ? -EAGAIN : rc;
4178  }
4179  
4180  /**
4181   *	ata_std_postreset - standard postreset callback
4182   *	@link: the target ata_link
4183   *	@classes: classes of attached devices
4184   *
4185   *	This function is invoked after a successful reset.  Note that
4186   *	the device might have been reset more than once using
4187   *	different reset methods before postreset is invoked.
4188   *
4189   *	LOCKING:
4190   *	Kernel thread context (may sleep)
4191   */
ata_std_postreset(struct ata_link * link,unsigned int * classes)4192  void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4193  {
4194  	u32 serror;
4195  
4196  	DPRINTK("ENTER\n");
4197  
4198  	/* reset complete, clear SError */
4199  	if (!sata_scr_read(link, SCR_ERROR, &serror))
4200  		sata_scr_write(link, SCR_ERROR, serror);
4201  
4202  	/* print link status */
4203  	sata_print_link_status(link);
4204  
4205  	DPRINTK("EXIT\n");
4206  }
4207  
4208  /**
4209   *	ata_dev_same_device - Determine whether new ID matches configured device
4210   *	@dev: device to compare against
4211   *	@new_class: class of the new device
4212   *	@new_id: IDENTIFY page of the new device
4213   *
4214   *	Compare @new_class and @new_id against @dev and determine
4215   *	whether @dev is the device indicated by @new_class and
4216   *	@new_id.
4217   *
4218   *	LOCKING:
4219   *	None.
4220   *
4221   *	RETURNS:
4222   *	1 if @dev matches @new_class and @new_id, 0 otherwise.
4223   */
ata_dev_same_device(struct ata_device * dev,unsigned int new_class,const u16 * new_id)4224  static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4225  			       const u16 *new_id)
4226  {
4227  	const u16 *old_id = dev->id;
4228  	unsigned char model[2][ATA_ID_PROD_LEN + 1];
4229  	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4230  
4231  	if (dev->class != new_class) {
4232  		ata_dev_info(dev, "class mismatch %d != %d\n",
4233  			     dev->class, new_class);
4234  		return 0;
4235  	}
4236  
4237  	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4238  	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4239  	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4240  	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4241  
4242  	if (strcmp(model[0], model[1])) {
4243  		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
4244  			     model[0], model[1]);
4245  		return 0;
4246  	}
4247  
4248  	if (strcmp(serial[0], serial[1])) {
4249  		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
4250  			     serial[0], serial[1]);
4251  		return 0;
4252  	}
4253  
4254  	return 1;
4255  }
4256  
4257  /**
4258   *	ata_dev_reread_id - Re-read IDENTIFY data
4259   *	@dev: target ATA device
4260   *	@readid_flags: read ID flags
4261   *
4262   *	Re-read IDENTIFY page and make sure @dev is still attached to
4263   *	the port.
4264   *
4265   *	LOCKING:
4266   *	Kernel thread context (may sleep)
4267   *
4268   *	RETURNS:
4269   *	0 on success, negative errno otherwise
4270   */
ata_dev_reread_id(struct ata_device * dev,unsigned int readid_flags)4271  int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4272  {
4273  	unsigned int class = dev->class;
4274  	u16 *id = (void *)dev->link->ap->sector_buf;
4275  	int rc;
4276  
4277  	/* read ID data */
4278  	rc = ata_dev_read_id(dev, &class, readid_flags, id);
4279  	if (rc)
4280  		return rc;
4281  
4282  	/* is the device still there? */
4283  	if (!ata_dev_same_device(dev, class, id))
4284  		return -ENODEV;
4285  
4286  	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4287  	return 0;
4288  }
4289  
4290  /**
4291   *	ata_dev_revalidate - Revalidate ATA device
4292   *	@dev: device to revalidate
4293   *	@new_class: new class code
4294   *	@readid_flags: read ID flags
4295   *
4296   *	Re-read IDENTIFY page, make sure @dev is still attached to the
4297   *	port and reconfigure it according to the new IDENTIFY page.
4298   *
4299   *	LOCKING:
4300   *	Kernel thread context (may sleep)
4301   *
4302   *	RETURNS:
4303   *	0 on success, negative errno otherwise
4304   */
ata_dev_revalidate(struct ata_device * dev,unsigned int new_class,unsigned int readid_flags)4305  int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4306  		       unsigned int readid_flags)
4307  {
4308  	u64 n_sectors = dev->n_sectors;
4309  	u64 n_native_sectors = dev->n_native_sectors;
4310  	int rc;
4311  
4312  	if (!ata_dev_enabled(dev))
4313  		return -ENODEV;
4314  
4315  	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4316  	if (ata_class_enabled(new_class) &&
4317  	    new_class != ATA_DEV_ATA &&
4318  	    new_class != ATA_DEV_ATAPI &&
4319  	    new_class != ATA_DEV_ZAC &&
4320  	    new_class != ATA_DEV_SEMB) {
4321  		ata_dev_info(dev, "class mismatch %u != %u\n",
4322  			     dev->class, new_class);
4323  		rc = -ENODEV;
4324  		goto fail;
4325  	}
4326  
4327  	/* re-read ID */
4328  	rc = ata_dev_reread_id(dev, readid_flags);
4329  	if (rc)
4330  		goto fail;
4331  
4332  	/* configure device according to the new ID */
4333  	rc = ata_dev_configure(dev);
4334  	if (rc)
4335  		goto fail;
4336  
4337  	/* verify n_sectors hasn't changed */
4338  	if (dev->class != ATA_DEV_ATA || !n_sectors ||
4339  	    dev->n_sectors == n_sectors)
4340  		return 0;
4341  
4342  	/* n_sectors has changed */
4343  	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4344  		     (unsigned long long)n_sectors,
4345  		     (unsigned long long)dev->n_sectors);
4346  
4347  	/*
4348  	 * Something could have caused HPA to be unlocked
4349  	 * involuntarily.  If n_native_sectors hasn't changed and the
4350  	 * new size matches it, keep the device.
4351  	 */
4352  	if (dev->n_native_sectors == n_native_sectors &&
4353  	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4354  		ata_dev_warn(dev,
4355  			     "new n_sectors matches native, probably "
4356  			     "late HPA unlock, n_sectors updated\n");
4357  		/* use the larger n_sectors */
4358  		return 0;
4359  	}
4360  
4361  	/*
4362  	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4363  	 * unlocking HPA in those cases.
4364  	 *
4365  	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4366  	 */
4367  	if (dev->n_native_sectors == n_native_sectors &&
4368  	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4369  	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4370  		ata_dev_warn(dev,
4371  			     "old n_sectors matches native, probably "
4372  			     "late HPA lock, will try to unlock HPA\n");
4373  		/* try unlocking HPA */
4374  		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4375  		rc = -EIO;
4376  	} else
4377  		rc = -ENODEV;
4378  
4379  	/* restore original n_[native_]sectors and fail */
4380  	dev->n_native_sectors = n_native_sectors;
4381  	dev->n_sectors = n_sectors;
4382   fail:
4383  	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4384  	return rc;
4385  }
4386  
4387  struct ata_blacklist_entry {
4388  	const char *model_num;
4389  	const char *model_rev;
4390  	unsigned long horkage;
4391  };
4392  
4393  static const struct ata_blacklist_entry ata_device_blacklist [] = {
4394  	/* Devices with DMA related problems under Linux */
4395  	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4396  	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4397  	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4398  	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4399  	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4400  	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4401  	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4402  	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4403  	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4404  	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
4405  	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4406  	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4407  	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4408  	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4409  	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4410  	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
4411  	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4412  	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4413  	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4414  	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4415  	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4416  	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4417  	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4418  	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4419  	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4420  	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4421  	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4422  	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4423  	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
4424  	{ "VRFDFC22048UCHC-TE*", NULL,		ATA_HORKAGE_NODMA },
4425  	/* Odd clown on sil3726/4726 PMPs */
4426  	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4427  
4428  	/* Weird ATAPI devices */
4429  	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4430  	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4431  	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4432  	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4433  
4434  	/*
4435  	 * Causes silent data corruption with higher max sects.
4436  	 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4437  	 */
4438  	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
4439  
4440  	/*
4441  	 * These devices time out with higher max sects.
4442  	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4443  	 */
4444  	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
4445  	{ "LITEON EP1-*",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
4446  
4447  	/* Devices we expect to fail diagnostics */
4448  
4449  	/* Devices where NCQ should be avoided */
4450  	/* NCQ is slow */
4451  	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4452  	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4453  	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4454  	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4455  	/* NCQ is broken */
4456  	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4457  	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4458  	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4459  	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4460  	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4461  
4462  	/* Seagate NCQ + FLUSH CACHE firmware bug */
4463  	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4464  						ATA_HORKAGE_FIRMWARE_WARN },
4465  
4466  	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4467  						ATA_HORKAGE_FIRMWARE_WARN },
4468  
4469  	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4470  						ATA_HORKAGE_FIRMWARE_WARN },
4471  
4472  	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4473  						ATA_HORKAGE_FIRMWARE_WARN },
4474  
4475  	/* drives which fail FPDMA_AA activation (some may freeze afterwards)
4476  	   the ST disks also have LPM issues */
4477  	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA |
4478  						ATA_HORKAGE_NOLPM, },
4479  	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA |
4480  						ATA_HORKAGE_NOLPM, },
4481  	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
4482  
4483  	/* Blacklist entries taken from Silicon Image 3124/3132
4484  	   Windows driver .inf file - also several Linux problem reports */
4485  	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4486  	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4487  	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4488  
4489  	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4490  	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
4491  
4492  	/* Some Sandisk SSDs lock up hard with NCQ enabled.  Reported on
4493  	   SD7SN6S256G and SD8SN8U256G */
4494  	{ "SanDisk SD[78]SN*G",	NULL,		ATA_HORKAGE_NONCQ, },
4495  
4496  	/* devices which puke on READ_NATIVE_MAX */
4497  	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4498  	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4499  	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4500  	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4501  
4502  	/* this one allows HPA unlocking but fails IOs on the area */
4503  	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4504  
4505  	/* Devices which report 1 sector over size HPA */
4506  	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4507  	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4508  	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4509  
4510  	/* Devices which get the IVB wrong */
4511  	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4512  	/* Maybe we should just blacklist TSSTcorp... */
4513  	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB, },
4514  
4515  	/* Devices that do not need bridging limits applied */
4516  	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4517  	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4518  
4519  	/* Devices which aren't very happy with higher link speeds */
4520  	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
4521  	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS, },
4522  
4523  	/*
4524  	 * Devices which choke on SETXFER.  Applies only if both the
4525  	 * device and controller are SATA.
4526  	 */
4527  	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
4528  	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
4529  	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
4530  	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
4531  	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
4532  
4533  	/* Crucial BX100 SSD 500GB has broken LPM support */
4534  	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM },
4535  
4536  	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4537  	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4538  						ATA_HORKAGE_ZERO_AFTER_TRIM |
4539  						ATA_HORKAGE_NOLPM, },
4540  	/* 512GB MX100 with newer firmware has only LPM issues */
4541  	{ "Crucial_CT512MX100*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM |
4542  						ATA_HORKAGE_NOLPM, },
4543  
4544  	/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4545  	{ "Crucial_CT480M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4546  						ATA_HORKAGE_ZERO_AFTER_TRIM |
4547  						ATA_HORKAGE_NOLPM, },
4548  	{ "Crucial_CT960M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4549  						ATA_HORKAGE_ZERO_AFTER_TRIM |
4550  						ATA_HORKAGE_NOLPM, },
4551  
4552  	/* devices that don't properly handle queued TRIM commands */
4553  	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4554  						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4555  	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4556  						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4557  	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4558  						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4559  	{ "Micron_M5[15]0_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4560  						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4561  	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4562  						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4563  	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4564  						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4565  	{ "Samsung SSD 840*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4566  						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4567  	{ "Samsung SSD 850*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4568  						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4569  	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4570  						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4571  
4572  	/* devices that don't properly handle TRIM commands */
4573  	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
4574  
4575  	/*
4576  	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4577  	 * (Return Zero After Trim) flags in the ATA Command Set are
4578  	 * unreliable in the sense that they only define what happens if
4579  	 * the device successfully executed the DSM TRIM command. TRIM
4580  	 * is only advisory, however, and the device is free to silently
4581  	 * ignore all or parts of the request.
4582  	 *
4583  	 * Whitelist drives that are known to reliably return zeroes
4584  	 * after TRIM.
4585  	 */
4586  
4587  	/*
4588  	 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4589  	 * that model before whitelisting all other intel SSDs.
4590  	 */
4591  	{ "INTEL*SSDSC2MH*",		NULL,	0, },
4592  
4593  	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4594  	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4595  	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4596  	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4597  	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4598  	{ "SAMSUNG*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4599  	{ "SAMSUNG*MZ7KM*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4600  	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4601  
4602  	/*
4603  	 * Some WD SATA-I drives spin up and down erratically when the link
4604  	 * is put into the slumber mode.  We don't have full list of the
4605  	 * affected devices.  Disable LPM if the device matches one of the
4606  	 * known prefixes and is SATA-1.  As a side effect LPM partial is
4607  	 * lost too.
4608  	 *
4609  	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4610  	 */
4611  	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4612  	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4613  	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4614  	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4615  	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4616  	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4617  	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4618  
4619  	/* End Marker */
4620  	{ }
4621  };
4622  
ata_dev_blacklisted(const struct ata_device * dev)4623  static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4624  {
4625  	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4626  	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4627  	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4628  
4629  	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4630  	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4631  
4632  	while (ad->model_num) {
4633  		if (glob_match(ad->model_num, model_num)) {
4634  			if (ad->model_rev == NULL)
4635  				return ad->horkage;
4636  			if (glob_match(ad->model_rev, model_rev))
4637  				return ad->horkage;
4638  		}
4639  		ad++;
4640  	}
4641  	return 0;
4642  }
4643  
ata_dma_blacklisted(const struct ata_device * dev)4644  static int ata_dma_blacklisted(const struct ata_device *dev)
4645  {
4646  	/* We don't support polling DMA.
4647  	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4648  	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4649  	 */
4650  	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4651  	    (dev->flags & ATA_DFLAG_CDB_INTR))
4652  		return 1;
4653  	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4654  }
4655  
4656  /**
4657   *	ata_is_40wire		-	check drive side detection
4658   *	@dev: device
4659   *
4660   *	Perform drive side detection decoding, allowing for device vendors
4661   *	who can't follow the documentation.
4662   */
4663  
ata_is_40wire(struct ata_device * dev)4664  static int ata_is_40wire(struct ata_device *dev)
4665  {
4666  	if (dev->horkage & ATA_HORKAGE_IVB)
4667  		return ata_drive_40wire_relaxed(dev->id);
4668  	return ata_drive_40wire(dev->id);
4669  }
4670  
4671  /**
4672   *	cable_is_40wire		-	40/80/SATA decider
4673   *	@ap: port to consider
4674   *
4675   *	This function encapsulates the policy for speed management
4676   *	in one place. At the moment we don't cache the result but
4677   *	there is a good case for setting ap->cbl to the result when
4678   *	we are called with unknown cables (and figuring out if it
4679   *	impacts hotplug at all).
4680   *
4681   *	Return 1 if the cable appears to be 40 wire.
4682   */
4683  
cable_is_40wire(struct ata_port * ap)4684  static int cable_is_40wire(struct ata_port *ap)
4685  {
4686  	struct ata_link *link;
4687  	struct ata_device *dev;
4688  
4689  	/* If the controller thinks we are 40 wire, we are. */
4690  	if (ap->cbl == ATA_CBL_PATA40)
4691  		return 1;
4692  
4693  	/* If the controller thinks we are 80 wire, we are. */
4694  	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4695  		return 0;
4696  
4697  	/* If the system is known to be 40 wire short cable (eg
4698  	 * laptop), then we allow 80 wire modes even if the drive
4699  	 * isn't sure.
4700  	 */
4701  	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4702  		return 0;
4703  
4704  	/* If the controller doesn't know, we scan.
4705  	 *
4706  	 * Note: We look for all 40 wire detects at this point.  Any
4707  	 *       80 wire detect is taken to be 80 wire cable because
4708  	 * - in many setups only the one drive (slave if present) will
4709  	 *   give a valid detect
4710  	 * - if you have a non detect capable drive you don't want it
4711  	 *   to colour the choice
4712  	 */
4713  	ata_for_each_link(link, ap, EDGE) {
4714  		ata_for_each_dev(dev, link, ENABLED) {
4715  			if (!ata_is_40wire(dev))
4716  				return 0;
4717  		}
4718  	}
4719  	return 1;
4720  }
4721  
4722  /**
4723   *	ata_dev_xfermask - Compute supported xfermask of the given device
4724   *	@dev: Device to compute xfermask for
4725   *
4726   *	Compute supported xfermask of @dev and store it in
4727   *	dev->*_mask.  This function is responsible for applying all
4728   *	known limits including host controller limits, device
4729   *	blacklist, etc...
4730   *
4731   *	LOCKING:
4732   *	None.
4733   */
ata_dev_xfermask(struct ata_device * dev)4734  static void ata_dev_xfermask(struct ata_device *dev)
4735  {
4736  	struct ata_link *link = dev->link;
4737  	struct ata_port *ap = link->ap;
4738  	struct ata_host *host = ap->host;
4739  	unsigned long xfer_mask;
4740  
4741  	/* controller modes available */
4742  	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4743  				      ap->mwdma_mask, ap->udma_mask);
4744  
4745  	/* drive modes available */
4746  	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4747  				       dev->mwdma_mask, dev->udma_mask);
4748  	xfer_mask &= ata_id_xfermask(dev->id);
4749  
4750  	/*
4751  	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4752  	 *	cable
4753  	 */
4754  	if (ata_dev_pair(dev)) {
4755  		/* No PIO5 or PIO6 */
4756  		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4757  		/* No MWDMA3 or MWDMA 4 */
4758  		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4759  	}
4760  
4761  	if (ata_dma_blacklisted(dev)) {
4762  		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4763  		ata_dev_warn(dev,
4764  			     "device is on DMA blacklist, disabling DMA\n");
4765  	}
4766  
4767  	if ((host->flags & ATA_HOST_SIMPLEX) &&
4768  	    host->simplex_claimed && host->simplex_claimed != ap) {
4769  		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4770  		ata_dev_warn(dev,
4771  			     "simplex DMA is claimed by other device, disabling DMA\n");
4772  	}
4773  
4774  	if (ap->flags & ATA_FLAG_NO_IORDY)
4775  		xfer_mask &= ata_pio_mask_no_iordy(dev);
4776  
4777  	if (ap->ops->mode_filter)
4778  		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4779  
4780  	/* Apply cable rule here.  Don't apply it early because when
4781  	 * we handle hot plug the cable type can itself change.
4782  	 * Check this last so that we know if the transfer rate was
4783  	 * solely limited by the cable.
4784  	 * Unknown or 80 wire cables reported host side are checked
4785  	 * drive side as well. Cases where we know a 40wire cable
4786  	 * is used safely for 80 are not checked here.
4787  	 */
4788  	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4789  		/* UDMA/44 or higher would be available */
4790  		if (cable_is_40wire(ap)) {
4791  			ata_dev_warn(dev,
4792  				     "limited to UDMA/33 due to 40-wire cable\n");
4793  			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4794  		}
4795  
4796  	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4797  			    &dev->mwdma_mask, &dev->udma_mask);
4798  }
4799  
4800  /**
4801   *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4802   *	@dev: Device to which command will be sent
4803   *
4804   *	Issue SET FEATURES - XFER MODE command to device @dev
4805   *	on port @ap.
4806   *
4807   *	LOCKING:
4808   *	PCI/etc. bus probe sem.
4809   *
4810   *	RETURNS:
4811   *	0 on success, AC_ERR_* mask otherwise.
4812   */
4813  
ata_dev_set_xfermode(struct ata_device * dev)4814  static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4815  {
4816  	struct ata_taskfile tf;
4817  	unsigned int err_mask;
4818  
4819  	/* set up set-features taskfile */
4820  	DPRINTK("set features - xfer mode\n");
4821  
4822  	/* Some controllers and ATAPI devices show flaky interrupt
4823  	 * behavior after setting xfer mode.  Use polling instead.
4824  	 */
4825  	ata_tf_init(dev, &tf);
4826  	tf.command = ATA_CMD_SET_FEATURES;
4827  	tf.feature = SETFEATURES_XFER;
4828  	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4829  	tf.protocol = ATA_PROT_NODATA;
4830  	/* If we are using IORDY we must send the mode setting command */
4831  	if (ata_pio_need_iordy(dev))
4832  		tf.nsect = dev->xfer_mode;
4833  	/* If the device has IORDY and the controller does not - turn it off */
4834   	else if (ata_id_has_iordy(dev->id))
4835  		tf.nsect = 0x01;
4836  	else /* In the ancient relic department - skip all of this */
4837  		return 0;
4838  
4839  	/* On some disks, this command causes spin-up, so we need longer timeout */
4840  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4841  
4842  	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4843  	return err_mask;
4844  }
4845  
4846  /**
4847   *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4848   *	@dev: Device to which command will be sent
4849   *	@enable: Whether to enable or disable the feature
4850   *	@feature: The sector count represents the feature to set
4851   *
4852   *	Issue SET FEATURES - SATA FEATURES command to device @dev
4853   *	on port @ap with sector count
4854   *
4855   *	LOCKING:
4856   *	PCI/etc. bus probe sem.
4857   *
4858   *	RETURNS:
4859   *	0 on success, AC_ERR_* mask otherwise.
4860   */
ata_dev_set_feature(struct ata_device * dev,u8 enable,u8 feature)4861  unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4862  {
4863  	struct ata_taskfile tf;
4864  	unsigned int err_mask;
4865  	unsigned long timeout = 0;
4866  
4867  	/* set up set-features taskfile */
4868  	DPRINTK("set features - SATA features\n");
4869  
4870  	ata_tf_init(dev, &tf);
4871  	tf.command = ATA_CMD_SET_FEATURES;
4872  	tf.feature = enable;
4873  	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4874  	tf.protocol = ATA_PROT_NODATA;
4875  	tf.nsect = feature;
4876  
4877  	if (enable == SETFEATURES_SPINUP)
4878  		timeout = ata_probe_timeout ?
4879  			  ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4880  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4881  
4882  	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4883  	return err_mask;
4884  }
4885  EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4886  
4887  /**
4888   *	ata_dev_init_params - Issue INIT DEV PARAMS command
4889   *	@dev: Device to which command will be sent
4890   *	@heads: Number of heads (taskfile parameter)
4891   *	@sectors: Number of sectors (taskfile parameter)
4892   *
4893   *	LOCKING:
4894   *	Kernel thread context (may sleep)
4895   *
4896   *	RETURNS:
4897   *	0 on success, AC_ERR_* mask otherwise.
4898   */
ata_dev_init_params(struct ata_device * dev,u16 heads,u16 sectors)4899  static unsigned int ata_dev_init_params(struct ata_device *dev,
4900  					u16 heads, u16 sectors)
4901  {
4902  	struct ata_taskfile tf;
4903  	unsigned int err_mask;
4904  
4905  	/* Number of sectors per track 1-255. Number of heads 1-16 */
4906  	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4907  		return AC_ERR_INVALID;
4908  
4909  	/* set up init dev params taskfile */
4910  	DPRINTK("init dev params \n");
4911  
4912  	ata_tf_init(dev, &tf);
4913  	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4914  	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4915  	tf.protocol = ATA_PROT_NODATA;
4916  	tf.nsect = sectors;
4917  	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4918  
4919  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4920  	/* A clean abort indicates an original or just out of spec drive
4921  	   and we should continue as we issue the setup based on the
4922  	   drive reported working geometry */
4923  	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4924  		err_mask = 0;
4925  
4926  	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4927  	return err_mask;
4928  }
4929  
4930  /**
4931   *	atapi_check_dma - Check whether ATAPI DMA can be supported
4932   *	@qc: Metadata associated with taskfile to check
4933   *
4934   *	Allow low-level driver to filter ATA PACKET commands, returning
4935   *	a status indicating whether or not it is OK to use DMA for the
4936   *	supplied PACKET command.
4937   *
4938   *	LOCKING:
4939   *	spin_lock_irqsave(host lock)
4940   *
4941   *	RETURNS: 0 when ATAPI DMA can be used
4942   *               nonzero otherwise
4943   */
atapi_check_dma(struct ata_queued_cmd * qc)4944  int atapi_check_dma(struct ata_queued_cmd *qc)
4945  {
4946  	struct ata_port *ap = qc->ap;
4947  
4948  	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4949  	 * few ATAPI devices choke on such DMA requests.
4950  	 */
4951  	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4952  	    unlikely(qc->nbytes & 15))
4953  		return 1;
4954  
4955  	if (ap->ops->check_atapi_dma)
4956  		return ap->ops->check_atapi_dma(qc);
4957  
4958  	return 0;
4959  }
4960  
4961  /**
4962   *	ata_std_qc_defer - Check whether a qc needs to be deferred
4963   *	@qc: ATA command in question
4964   *
4965   *	Non-NCQ commands cannot run with any other command, NCQ or
4966   *	not.  As upper layer only knows the queue depth, we are
4967   *	responsible for maintaining exclusion.  This function checks
4968   *	whether a new command @qc can be issued.
4969   *
4970   *	LOCKING:
4971   *	spin_lock_irqsave(host lock)
4972   *
4973   *	RETURNS:
4974   *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4975   */
ata_std_qc_defer(struct ata_queued_cmd * qc)4976  int ata_std_qc_defer(struct ata_queued_cmd *qc)
4977  {
4978  	struct ata_link *link = qc->dev->link;
4979  
4980  	if (ata_is_ncq(qc->tf.protocol)) {
4981  		if (!ata_tag_valid(link->active_tag))
4982  			return 0;
4983  	} else {
4984  		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4985  			return 0;
4986  	}
4987  
4988  	return ATA_DEFER_LINK;
4989  }
4990  
ata_noop_qc_prep(struct ata_queued_cmd * qc)4991  void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4992  
4993  /**
4994   *	ata_sg_init - Associate command with scatter-gather table.
4995   *	@qc: Command to be associated
4996   *	@sg: Scatter-gather table.
4997   *	@n_elem: Number of elements in s/g table.
4998   *
4999   *	Initialize the data-related elements of queued_cmd @qc
5000   *	to point to a scatter-gather table @sg, containing @n_elem
5001   *	elements.
5002   *
5003   *	LOCKING:
5004   *	spin_lock_irqsave(host lock)
5005   */
ata_sg_init(struct ata_queued_cmd * qc,struct scatterlist * sg,unsigned int n_elem)5006  void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
5007  		 unsigned int n_elem)
5008  {
5009  	qc->sg = sg;
5010  	qc->n_elem = n_elem;
5011  	qc->cursg = qc->sg;
5012  }
5013  
5014  #ifdef CONFIG_HAS_DMA
5015  
5016  /**
5017   *	ata_sg_clean - Unmap DMA memory associated with command
5018   *	@qc: Command containing DMA memory to be released
5019   *
5020   *	Unmap all mapped DMA memory associated with this command.
5021   *
5022   *	LOCKING:
5023   *	spin_lock_irqsave(host lock)
5024   */
ata_sg_clean(struct ata_queued_cmd * qc)5025  static void ata_sg_clean(struct ata_queued_cmd *qc)
5026  {
5027  	struct ata_port *ap = qc->ap;
5028  	struct scatterlist *sg = qc->sg;
5029  	int dir = qc->dma_dir;
5030  
5031  	WARN_ON_ONCE(sg == NULL);
5032  
5033  	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
5034  
5035  	if (qc->n_elem)
5036  		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
5037  
5038  	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5039  	qc->sg = NULL;
5040  }
5041  
5042  /**
5043   *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
5044   *	@qc: Command with scatter-gather table to be mapped.
5045   *
5046   *	DMA-map the scatter-gather table associated with queued_cmd @qc.
5047   *
5048   *	LOCKING:
5049   *	spin_lock_irqsave(host lock)
5050   *
5051   *	RETURNS:
5052   *	Zero on success, negative on error.
5053   *
5054   */
ata_sg_setup(struct ata_queued_cmd * qc)5055  static int ata_sg_setup(struct ata_queued_cmd *qc)
5056  {
5057  	struct ata_port *ap = qc->ap;
5058  	unsigned int n_elem;
5059  
5060  	VPRINTK("ENTER, ata%u\n", ap->print_id);
5061  
5062  	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
5063  	if (n_elem < 1)
5064  		return -1;
5065  
5066  	DPRINTK("%d sg elements mapped\n", n_elem);
5067  	qc->orig_n_elem = qc->n_elem;
5068  	qc->n_elem = n_elem;
5069  	qc->flags |= ATA_QCFLAG_DMAMAP;
5070  
5071  	return 0;
5072  }
5073  
5074  #else /* !CONFIG_HAS_DMA */
5075  
ata_sg_clean(struct ata_queued_cmd * qc)5076  static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
ata_sg_setup(struct ata_queued_cmd * qc)5077  static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
5078  
5079  #endif /* !CONFIG_HAS_DMA */
5080  
5081  /**
5082   *	swap_buf_le16 - swap halves of 16-bit words in place
5083   *	@buf:  Buffer to swap
5084   *	@buf_words:  Number of 16-bit words in buffer.
5085   *
5086   *	Swap halves of 16-bit words if needed to convert from
5087   *	little-endian byte order to native cpu byte order, or
5088   *	vice-versa.
5089   *
5090   *	LOCKING:
5091   *	Inherited from caller.
5092   */
swap_buf_le16(u16 * buf,unsigned int buf_words)5093  void swap_buf_le16(u16 *buf, unsigned int buf_words)
5094  {
5095  #ifdef __BIG_ENDIAN
5096  	unsigned int i;
5097  
5098  	for (i = 0; i < buf_words; i++)
5099  		buf[i] = le16_to_cpu(buf[i]);
5100  #endif /* __BIG_ENDIAN */
5101  }
5102  
5103  /**
5104   *	ata_qc_new_init - Request an available ATA command, and initialize it
5105   *	@dev: Device from whom we request an available command structure
5106   *	@tag: tag
5107   *
5108   *	LOCKING:
5109   *	None.
5110   */
5111  
ata_qc_new_init(struct ata_device * dev,int tag)5112  struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
5113  {
5114  	struct ata_port *ap = dev->link->ap;
5115  	struct ata_queued_cmd *qc;
5116  
5117  	/* no command while frozen */
5118  	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5119  		return NULL;
5120  
5121  	/* libsas case */
5122  	if (ap->flags & ATA_FLAG_SAS_HOST) {
5123  		tag = ata_sas_allocate_tag(ap);
5124  		if (tag < 0)
5125  			return NULL;
5126  	}
5127  
5128  	qc = __ata_qc_from_tag(ap, tag);
5129  	qc->tag = tag;
5130  	qc->scsicmd = NULL;
5131  	qc->ap = ap;
5132  	qc->dev = dev;
5133  
5134  	ata_qc_reinit(qc);
5135  
5136  	return qc;
5137  }
5138  
5139  /**
5140   *	ata_qc_free - free unused ata_queued_cmd
5141   *	@qc: Command to complete
5142   *
5143   *	Designed to free unused ata_queued_cmd object
5144   *	in case something prevents using it.
5145   *
5146   *	LOCKING:
5147   *	spin_lock_irqsave(host lock)
5148   */
ata_qc_free(struct ata_queued_cmd * qc)5149  void ata_qc_free(struct ata_queued_cmd *qc)
5150  {
5151  	struct ata_port *ap;
5152  	unsigned int tag;
5153  
5154  	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5155  	ap = qc->ap;
5156  
5157  	qc->flags = 0;
5158  	tag = qc->tag;
5159  	if (likely(ata_tag_valid(tag))) {
5160  		qc->tag = ATA_TAG_POISON;
5161  		if (ap->flags & ATA_FLAG_SAS_HOST)
5162  			ata_sas_free_tag(tag, ap);
5163  	}
5164  }
5165  
__ata_qc_complete(struct ata_queued_cmd * qc)5166  void __ata_qc_complete(struct ata_queued_cmd *qc)
5167  {
5168  	struct ata_port *ap;
5169  	struct ata_link *link;
5170  
5171  	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5172  	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
5173  	ap = qc->ap;
5174  	link = qc->dev->link;
5175  
5176  	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5177  		ata_sg_clean(qc);
5178  
5179  	/* command should be marked inactive atomically with qc completion */
5180  	if (ata_is_ncq(qc->tf.protocol)) {
5181  		link->sactive &= ~(1 << qc->tag);
5182  		if (!link->sactive)
5183  			ap->nr_active_links--;
5184  	} else {
5185  		link->active_tag = ATA_TAG_POISON;
5186  		ap->nr_active_links--;
5187  	}
5188  
5189  	/* clear exclusive status */
5190  	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5191  		     ap->excl_link == link))
5192  		ap->excl_link = NULL;
5193  
5194  	/* atapi: mark qc as inactive to prevent the interrupt handler
5195  	 * from completing the command twice later, before the error handler
5196  	 * is called. (when rc != 0 and atapi request sense is needed)
5197  	 */
5198  	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5199  	ap->qc_active &= ~(1 << qc->tag);
5200  
5201  	/* call completion callback */
5202  	qc->complete_fn(qc);
5203  }
5204  
fill_result_tf(struct ata_queued_cmd * qc)5205  static void fill_result_tf(struct ata_queued_cmd *qc)
5206  {
5207  	struct ata_port *ap = qc->ap;
5208  
5209  	qc->result_tf.flags = qc->tf.flags;
5210  	ap->ops->qc_fill_rtf(qc);
5211  }
5212  
ata_verify_xfer(struct ata_queued_cmd * qc)5213  static void ata_verify_xfer(struct ata_queued_cmd *qc)
5214  {
5215  	struct ata_device *dev = qc->dev;
5216  
5217  	if (!ata_is_data(qc->tf.protocol))
5218  		return;
5219  
5220  	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5221  		return;
5222  
5223  	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5224  }
5225  
5226  /**
5227   *	ata_qc_complete - Complete an active ATA command
5228   *	@qc: Command to complete
5229   *
5230   *	Indicate to the mid and upper layers that an ATA command has
5231   *	completed, with either an ok or not-ok status.
5232   *
5233   *	Refrain from calling this function multiple times when
5234   *	successfully completing multiple NCQ commands.
5235   *	ata_qc_complete_multiple() should be used instead, which will
5236   *	properly update IRQ expect state.
5237   *
5238   *	LOCKING:
5239   *	spin_lock_irqsave(host lock)
5240   */
ata_qc_complete(struct ata_queued_cmd * qc)5241  void ata_qc_complete(struct ata_queued_cmd *qc)
5242  {
5243  	struct ata_port *ap = qc->ap;
5244  
5245  	/* Trigger the LED (if available) */
5246  	ledtrig_disk_activity();
5247  
5248  	/* XXX: New EH and old EH use different mechanisms to
5249  	 * synchronize EH with regular execution path.
5250  	 *
5251  	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5252  	 * Normal execution path is responsible for not accessing a
5253  	 * failed qc.  libata core enforces the rule by returning NULL
5254  	 * from ata_qc_from_tag() for failed qcs.
5255  	 *
5256  	 * Old EH depends on ata_qc_complete() nullifying completion
5257  	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5258  	 * not synchronize with interrupt handler.  Only PIO task is
5259  	 * taken care of.
5260  	 */
5261  	if (ap->ops->error_handler) {
5262  		struct ata_device *dev = qc->dev;
5263  		struct ata_eh_info *ehi = &dev->link->eh_info;
5264  
5265  		if (unlikely(qc->err_mask))
5266  			qc->flags |= ATA_QCFLAG_FAILED;
5267  
5268  		/*
5269  		 * Finish internal commands without any further processing
5270  		 * and always with the result TF filled.
5271  		 */
5272  		if (unlikely(ata_tag_internal(qc->tag))) {
5273  			fill_result_tf(qc);
5274  			trace_ata_qc_complete_internal(qc);
5275  			__ata_qc_complete(qc);
5276  			return;
5277  		}
5278  
5279  		/*
5280  		 * Non-internal qc has failed.  Fill the result TF and
5281  		 * summon EH.
5282  		 */
5283  		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5284  			fill_result_tf(qc);
5285  			trace_ata_qc_complete_failed(qc);
5286  			ata_qc_schedule_eh(qc);
5287  			return;
5288  		}
5289  
5290  		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5291  
5292  		/* read result TF if requested */
5293  		if (qc->flags & ATA_QCFLAG_RESULT_TF)
5294  			fill_result_tf(qc);
5295  
5296  		trace_ata_qc_complete_done(qc);
5297  		/* Some commands need post-processing after successful
5298  		 * completion.
5299  		 */
5300  		switch (qc->tf.command) {
5301  		case ATA_CMD_SET_FEATURES:
5302  			if (qc->tf.feature != SETFEATURES_WC_ON &&
5303  			    qc->tf.feature != SETFEATURES_WC_OFF &&
5304  			    qc->tf.feature != SETFEATURES_RA_ON &&
5305  			    qc->tf.feature != SETFEATURES_RA_OFF)
5306  				break;
5307  			/* fall through */
5308  		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5309  		case ATA_CMD_SET_MULTI: /* multi_count changed */
5310  			/* revalidate device */
5311  			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5312  			ata_port_schedule_eh(ap);
5313  			break;
5314  
5315  		case ATA_CMD_SLEEP:
5316  			dev->flags |= ATA_DFLAG_SLEEPING;
5317  			break;
5318  		}
5319  
5320  		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5321  			ata_verify_xfer(qc);
5322  
5323  		__ata_qc_complete(qc);
5324  	} else {
5325  		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5326  			return;
5327  
5328  		/* read result TF if failed or requested */
5329  		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5330  			fill_result_tf(qc);
5331  
5332  		__ata_qc_complete(qc);
5333  	}
5334  }
5335  
5336  /**
5337   *	ata_qc_complete_multiple - Complete multiple qcs successfully
5338   *	@ap: port in question
5339   *	@qc_active: new qc_active mask
5340   *
5341   *	Complete in-flight commands.  This functions is meant to be
5342   *	called from low-level driver's interrupt routine to complete
5343   *	requests normally.  ap->qc_active and @qc_active is compared
5344   *	and commands are completed accordingly.
5345   *
5346   *	Always use this function when completing multiple NCQ commands
5347   *	from IRQ handlers instead of calling ata_qc_complete()
5348   *	multiple times to keep IRQ expect status properly in sync.
5349   *
5350   *	LOCKING:
5351   *	spin_lock_irqsave(host lock)
5352   *
5353   *	RETURNS:
5354   *	Number of completed commands on success, -errno otherwise.
5355   */
ata_qc_complete_multiple(struct ata_port * ap,u32 qc_active)5356  int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5357  {
5358  	int nr_done = 0;
5359  	u32 done_mask;
5360  
5361  	done_mask = ap->qc_active ^ qc_active;
5362  
5363  	if (unlikely(done_mask & qc_active)) {
5364  		ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5365  			     ap->qc_active, qc_active);
5366  		return -EINVAL;
5367  	}
5368  
5369  	while (done_mask) {
5370  		struct ata_queued_cmd *qc;
5371  		unsigned int tag = __ffs(done_mask);
5372  
5373  		qc = ata_qc_from_tag(ap, tag);
5374  		if (qc) {
5375  			ata_qc_complete(qc);
5376  			nr_done++;
5377  		}
5378  		done_mask &= ~(1 << tag);
5379  	}
5380  
5381  	return nr_done;
5382  }
5383  
5384  /**
5385   *	ata_qc_issue - issue taskfile to device
5386   *	@qc: command to issue to device
5387   *
5388   *	Prepare an ATA command to submission to device.
5389   *	This includes mapping the data into a DMA-able
5390   *	area, filling in the S/G table, and finally
5391   *	writing the taskfile to hardware, starting the command.
5392   *
5393   *	LOCKING:
5394   *	spin_lock_irqsave(host lock)
5395   */
ata_qc_issue(struct ata_queued_cmd * qc)5396  void ata_qc_issue(struct ata_queued_cmd *qc)
5397  {
5398  	struct ata_port *ap = qc->ap;
5399  	struct ata_link *link = qc->dev->link;
5400  	u8 prot = qc->tf.protocol;
5401  
5402  	/* Make sure only one non-NCQ command is outstanding.  The
5403  	 * check is skipped for old EH because it reuses active qc to
5404  	 * request ATAPI sense.
5405  	 */
5406  	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5407  
5408  	if (ata_is_ncq(prot)) {
5409  		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5410  
5411  		if (!link->sactive)
5412  			ap->nr_active_links++;
5413  		link->sactive |= 1 << qc->tag;
5414  	} else {
5415  		WARN_ON_ONCE(link->sactive);
5416  
5417  		ap->nr_active_links++;
5418  		link->active_tag = qc->tag;
5419  	}
5420  
5421  	qc->flags |= ATA_QCFLAG_ACTIVE;
5422  	ap->qc_active |= 1 << qc->tag;
5423  
5424  	/*
5425  	 * We guarantee to LLDs that they will have at least one
5426  	 * non-zero sg if the command is a data command.
5427  	 */
5428  	if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
5429  		goto sys_err;
5430  
5431  	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5432  				 (ap->flags & ATA_FLAG_PIO_DMA)))
5433  		if (ata_sg_setup(qc))
5434  			goto sys_err;
5435  
5436  	/* if device is sleeping, schedule reset and abort the link */
5437  	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5438  		link->eh_info.action |= ATA_EH_RESET;
5439  		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5440  		ata_link_abort(link);
5441  		return;
5442  	}
5443  
5444  	ap->ops->qc_prep(qc);
5445  	trace_ata_qc_issue(qc);
5446  	qc->err_mask |= ap->ops->qc_issue(qc);
5447  	if (unlikely(qc->err_mask))
5448  		goto err;
5449  	return;
5450  
5451  sys_err:
5452  	qc->err_mask |= AC_ERR_SYSTEM;
5453  err:
5454  	ata_qc_complete(qc);
5455  }
5456  
5457  /**
5458   *	sata_scr_valid - test whether SCRs are accessible
5459   *	@link: ATA link to test SCR accessibility for
5460   *
5461   *	Test whether SCRs are accessible for @link.
5462   *
5463   *	LOCKING:
5464   *	None.
5465   *
5466   *	RETURNS:
5467   *	1 if SCRs are accessible, 0 otherwise.
5468   */
sata_scr_valid(struct ata_link * link)5469  int sata_scr_valid(struct ata_link *link)
5470  {
5471  	struct ata_port *ap = link->ap;
5472  
5473  	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5474  }
5475  
5476  /**
5477   *	sata_scr_read - read SCR register of the specified port
5478   *	@link: ATA link to read SCR for
5479   *	@reg: SCR to read
5480   *	@val: Place to store read value
5481   *
5482   *	Read SCR register @reg of @link into *@val.  This function is
5483   *	guaranteed to succeed if @link is ap->link, the cable type of
5484   *	the port is SATA and the port implements ->scr_read.
5485   *
5486   *	LOCKING:
5487   *	None if @link is ap->link.  Kernel thread context otherwise.
5488   *
5489   *	RETURNS:
5490   *	0 on success, negative errno on failure.
5491   */
sata_scr_read(struct ata_link * link,int reg,u32 * val)5492  int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5493  {
5494  	if (ata_is_host_link(link)) {
5495  		if (sata_scr_valid(link))
5496  			return link->ap->ops->scr_read(link, reg, val);
5497  		return -EOPNOTSUPP;
5498  	}
5499  
5500  	return sata_pmp_scr_read(link, reg, val);
5501  }
5502  
5503  /**
5504   *	sata_scr_write - write SCR register of the specified port
5505   *	@link: ATA link to write SCR for
5506   *	@reg: SCR to write
5507   *	@val: value to write
5508   *
5509   *	Write @val to SCR register @reg of @link.  This function is
5510   *	guaranteed to succeed if @link is ap->link, the cable type of
5511   *	the port is SATA and the port implements ->scr_read.
5512   *
5513   *	LOCKING:
5514   *	None if @link is ap->link.  Kernel thread context otherwise.
5515   *
5516   *	RETURNS:
5517   *	0 on success, negative errno on failure.
5518   */
sata_scr_write(struct ata_link * link,int reg,u32 val)5519  int sata_scr_write(struct ata_link *link, int reg, u32 val)
5520  {
5521  	if (ata_is_host_link(link)) {
5522  		if (sata_scr_valid(link))
5523  			return link->ap->ops->scr_write(link, reg, val);
5524  		return -EOPNOTSUPP;
5525  	}
5526  
5527  	return sata_pmp_scr_write(link, reg, val);
5528  }
5529  
5530  /**
5531   *	sata_scr_write_flush - write SCR register of the specified port and flush
5532   *	@link: ATA link to write SCR for
5533   *	@reg: SCR to write
5534   *	@val: value to write
5535   *
5536   *	This function is identical to sata_scr_write() except that this
5537   *	function performs flush after writing to the register.
5538   *
5539   *	LOCKING:
5540   *	None if @link is ap->link.  Kernel thread context otherwise.
5541   *
5542   *	RETURNS:
5543   *	0 on success, negative errno on failure.
5544   */
sata_scr_write_flush(struct ata_link * link,int reg,u32 val)5545  int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5546  {
5547  	if (ata_is_host_link(link)) {
5548  		int rc;
5549  
5550  		if (sata_scr_valid(link)) {
5551  			rc = link->ap->ops->scr_write(link, reg, val);
5552  			if (rc == 0)
5553  				rc = link->ap->ops->scr_read(link, reg, &val);
5554  			return rc;
5555  		}
5556  		return -EOPNOTSUPP;
5557  	}
5558  
5559  	return sata_pmp_scr_write(link, reg, val);
5560  }
5561  
5562  /**
5563   *	ata_phys_link_online - test whether the given link is online
5564   *	@link: ATA link to test
5565   *
5566   *	Test whether @link is online.  Note that this function returns
5567   *	0 if online status of @link cannot be obtained, so
5568   *	ata_link_online(link) != !ata_link_offline(link).
5569   *
5570   *	LOCKING:
5571   *	None.
5572   *
5573   *	RETURNS:
5574   *	True if the port online status is available and online.
5575   */
ata_phys_link_online(struct ata_link * link)5576  bool ata_phys_link_online(struct ata_link *link)
5577  {
5578  	u32 sstatus;
5579  
5580  	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5581  	    ata_sstatus_online(sstatus))
5582  		return true;
5583  	return false;
5584  }
5585  
5586  /**
5587   *	ata_phys_link_offline - test whether the given link is offline
5588   *	@link: ATA link to test
5589   *
5590   *	Test whether @link is offline.  Note that this function
5591   *	returns 0 if offline status of @link cannot be obtained, so
5592   *	ata_link_online(link) != !ata_link_offline(link).
5593   *
5594   *	LOCKING:
5595   *	None.
5596   *
5597   *	RETURNS:
5598   *	True if the port offline status is available and offline.
5599   */
ata_phys_link_offline(struct ata_link * link)5600  bool ata_phys_link_offline(struct ata_link *link)
5601  {
5602  	u32 sstatus;
5603  
5604  	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5605  	    !ata_sstatus_online(sstatus))
5606  		return true;
5607  	return false;
5608  }
5609  
5610  /**
5611   *	ata_link_online - test whether the given link is online
5612   *	@link: ATA link to test
5613   *
5614   *	Test whether @link is online.  This is identical to
5615   *	ata_phys_link_online() when there's no slave link.  When
5616   *	there's a slave link, this function should only be called on
5617   *	the master link and will return true if any of M/S links is
5618   *	online.
5619   *
5620   *	LOCKING:
5621   *	None.
5622   *
5623   *	RETURNS:
5624   *	True if the port online status is available and online.
5625   */
ata_link_online(struct ata_link * link)5626  bool ata_link_online(struct ata_link *link)
5627  {
5628  	struct ata_link *slave = link->ap->slave_link;
5629  
5630  	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5631  
5632  	return ata_phys_link_online(link) ||
5633  		(slave && ata_phys_link_online(slave));
5634  }
5635  
5636  /**
5637   *	ata_link_offline - test whether the given link is offline
5638   *	@link: ATA link to test
5639   *
5640   *	Test whether @link is offline.  This is identical to
5641   *	ata_phys_link_offline() when there's no slave link.  When
5642   *	there's a slave link, this function should only be called on
5643   *	the master link and will return true if both M/S links are
5644   *	offline.
5645   *
5646   *	LOCKING:
5647   *	None.
5648   *
5649   *	RETURNS:
5650   *	True if the port offline status is available and offline.
5651   */
ata_link_offline(struct ata_link * link)5652  bool ata_link_offline(struct ata_link *link)
5653  {
5654  	struct ata_link *slave = link->ap->slave_link;
5655  
5656  	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5657  
5658  	return ata_phys_link_offline(link) &&
5659  		(!slave || ata_phys_link_offline(slave));
5660  }
5661  
5662  #ifdef CONFIG_PM
ata_port_request_pm(struct ata_port * ap,pm_message_t mesg,unsigned int action,unsigned int ehi_flags,bool async)5663  static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5664  				unsigned int action, unsigned int ehi_flags,
5665  				bool async)
5666  {
5667  	struct ata_link *link;
5668  	unsigned long flags;
5669  
5670  	/* Previous resume operation might still be in
5671  	 * progress.  Wait for PM_PENDING to clear.
5672  	 */
5673  	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5674  		ata_port_wait_eh(ap);
5675  		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5676  	}
5677  
5678  	/* request PM ops to EH */
5679  	spin_lock_irqsave(ap->lock, flags);
5680  
5681  	ap->pm_mesg = mesg;
5682  	ap->pflags |= ATA_PFLAG_PM_PENDING;
5683  	ata_for_each_link(link, ap, HOST_FIRST) {
5684  		link->eh_info.action |= action;
5685  		link->eh_info.flags |= ehi_flags;
5686  	}
5687  
5688  	ata_port_schedule_eh(ap);
5689  
5690  	spin_unlock_irqrestore(ap->lock, flags);
5691  
5692  	if (!async) {
5693  		ata_port_wait_eh(ap);
5694  		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5695  	}
5696  }
5697  
5698  /*
5699   * On some hardware, device fails to respond after spun down for suspend.  As
5700   * the device won't be used before being resumed, we don't need to touch the
5701   * device.  Ask EH to skip the usual stuff and proceed directly to suspend.
5702   *
5703   * http://thread.gmane.org/gmane.linux.ide/46764
5704   */
5705  static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5706  						 | ATA_EHI_NO_AUTOPSY
5707  						 | ATA_EHI_NO_RECOVERY;
5708  
ata_port_suspend(struct ata_port * ap,pm_message_t mesg)5709  static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5710  {
5711  	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5712  }
5713  
ata_port_suspend_async(struct ata_port * ap,pm_message_t mesg)5714  static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5715  {
5716  	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5717  }
5718  
ata_port_pm_suspend(struct device * dev)5719  static int ata_port_pm_suspend(struct device *dev)
5720  {
5721  	struct ata_port *ap = to_ata_port(dev);
5722  
5723  	if (pm_runtime_suspended(dev))
5724  		return 0;
5725  
5726  	ata_port_suspend(ap, PMSG_SUSPEND);
5727  	return 0;
5728  }
5729  
ata_port_pm_freeze(struct device * dev)5730  static int ata_port_pm_freeze(struct device *dev)
5731  {
5732  	struct ata_port *ap = to_ata_port(dev);
5733  
5734  	if (pm_runtime_suspended(dev))
5735  		return 0;
5736  
5737  	ata_port_suspend(ap, PMSG_FREEZE);
5738  	return 0;
5739  }
5740  
ata_port_pm_poweroff(struct device * dev)5741  static int ata_port_pm_poweroff(struct device *dev)
5742  {
5743  	ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5744  	return 0;
5745  }
5746  
5747  static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5748  						| ATA_EHI_QUIET;
5749  
ata_port_resume(struct ata_port * ap,pm_message_t mesg)5750  static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5751  {
5752  	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5753  }
5754  
ata_port_resume_async(struct ata_port * ap,pm_message_t mesg)5755  static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5756  {
5757  	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5758  }
5759  
ata_port_pm_resume(struct device * dev)5760  static int ata_port_pm_resume(struct device *dev)
5761  {
5762  	ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5763  	pm_runtime_disable(dev);
5764  	pm_runtime_set_active(dev);
5765  	pm_runtime_enable(dev);
5766  	return 0;
5767  }
5768  
5769  /*
5770   * For ODDs, the upper layer will poll for media change every few seconds,
5771   * which will make it enter and leave suspend state every few seconds. And
5772   * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5773   * is very little and the ODD may malfunction after constantly being reset.
5774   * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5775   * ODD is attached to the port.
5776   */
ata_port_runtime_idle(struct device * dev)5777  static int ata_port_runtime_idle(struct device *dev)
5778  {
5779  	struct ata_port *ap = to_ata_port(dev);
5780  	struct ata_link *link;
5781  	struct ata_device *adev;
5782  
5783  	ata_for_each_link(link, ap, HOST_FIRST) {
5784  		ata_for_each_dev(adev, link, ENABLED)
5785  			if (adev->class == ATA_DEV_ATAPI &&
5786  			    !zpodd_dev_enabled(adev))
5787  				return -EBUSY;
5788  	}
5789  
5790  	return 0;
5791  }
5792  
ata_port_runtime_suspend(struct device * dev)5793  static int ata_port_runtime_suspend(struct device *dev)
5794  {
5795  	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5796  	return 0;
5797  }
5798  
ata_port_runtime_resume(struct device * dev)5799  static int ata_port_runtime_resume(struct device *dev)
5800  {
5801  	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5802  	return 0;
5803  }
5804  
5805  static const struct dev_pm_ops ata_port_pm_ops = {
5806  	.suspend = ata_port_pm_suspend,
5807  	.resume = ata_port_pm_resume,
5808  	.freeze = ata_port_pm_freeze,
5809  	.thaw = ata_port_pm_resume,
5810  	.poweroff = ata_port_pm_poweroff,
5811  	.restore = ata_port_pm_resume,
5812  
5813  	.runtime_suspend = ata_port_runtime_suspend,
5814  	.runtime_resume = ata_port_runtime_resume,
5815  	.runtime_idle = ata_port_runtime_idle,
5816  };
5817  
5818  /* sas ports don't participate in pm runtime management of ata_ports,
5819   * and need to resume ata devices at the domain level, not the per-port
5820   * level. sas suspend/resume is async to allow parallel port recovery
5821   * since sas has multiple ata_port instances per Scsi_Host.
5822   */
ata_sas_port_suspend(struct ata_port * ap)5823  void ata_sas_port_suspend(struct ata_port *ap)
5824  {
5825  	ata_port_suspend_async(ap, PMSG_SUSPEND);
5826  }
5827  EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5828  
ata_sas_port_resume(struct ata_port * ap)5829  void ata_sas_port_resume(struct ata_port *ap)
5830  {
5831  	ata_port_resume_async(ap, PMSG_RESUME);
5832  }
5833  EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5834  
5835  /**
5836   *	ata_host_suspend - suspend host
5837   *	@host: host to suspend
5838   *	@mesg: PM message
5839   *
5840   *	Suspend @host.  Actual operation is performed by port suspend.
5841   */
ata_host_suspend(struct ata_host * host,pm_message_t mesg)5842  int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5843  {
5844  	host->dev->power.power_state = mesg;
5845  	return 0;
5846  }
5847  
5848  /**
5849   *	ata_host_resume - resume host
5850   *	@host: host to resume
5851   *
5852   *	Resume @host.  Actual operation is performed by port resume.
5853   */
ata_host_resume(struct ata_host * host)5854  void ata_host_resume(struct ata_host *host)
5855  {
5856  	host->dev->power.power_state = PMSG_ON;
5857  }
5858  #endif
5859  
5860  struct device_type ata_port_type = {
5861  	.name = "ata_port",
5862  #ifdef CONFIG_PM
5863  	.pm = &ata_port_pm_ops,
5864  #endif
5865  };
5866  
5867  /**
5868   *	ata_dev_init - Initialize an ata_device structure
5869   *	@dev: Device structure to initialize
5870   *
5871   *	Initialize @dev in preparation for probing.
5872   *
5873   *	LOCKING:
5874   *	Inherited from caller.
5875   */
ata_dev_init(struct ata_device * dev)5876  void ata_dev_init(struct ata_device *dev)
5877  {
5878  	struct ata_link *link = ata_dev_phys_link(dev);
5879  	struct ata_port *ap = link->ap;
5880  	unsigned long flags;
5881  
5882  	/* SATA spd limit is bound to the attached device, reset together */
5883  	link->sata_spd_limit = link->hw_sata_spd_limit;
5884  	link->sata_spd = 0;
5885  
5886  	/* High bits of dev->flags are used to record warm plug
5887  	 * requests which occur asynchronously.  Synchronize using
5888  	 * host lock.
5889  	 */
5890  	spin_lock_irqsave(ap->lock, flags);
5891  	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5892  	dev->horkage = 0;
5893  	spin_unlock_irqrestore(ap->lock, flags);
5894  
5895  	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5896  	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5897  	dev->pio_mask = UINT_MAX;
5898  	dev->mwdma_mask = UINT_MAX;
5899  	dev->udma_mask = UINT_MAX;
5900  }
5901  
5902  /**
5903   *	ata_link_init - Initialize an ata_link structure
5904   *	@ap: ATA port link is attached to
5905   *	@link: Link structure to initialize
5906   *	@pmp: Port multiplier port number
5907   *
5908   *	Initialize @link.
5909   *
5910   *	LOCKING:
5911   *	Kernel thread context (may sleep)
5912   */
ata_link_init(struct ata_port * ap,struct ata_link * link,int pmp)5913  void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5914  {
5915  	int i;
5916  
5917  	/* clear everything except for devices */
5918  	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5919  	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5920  
5921  	link->ap = ap;
5922  	link->pmp = pmp;
5923  	link->active_tag = ATA_TAG_POISON;
5924  	link->hw_sata_spd_limit = UINT_MAX;
5925  
5926  	/* can't use iterator, ap isn't initialized yet */
5927  	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5928  		struct ata_device *dev = &link->device[i];
5929  
5930  		dev->link = link;
5931  		dev->devno = dev - link->device;
5932  #ifdef CONFIG_ATA_ACPI
5933  		dev->gtf_filter = ata_acpi_gtf_filter;
5934  #endif
5935  		ata_dev_init(dev);
5936  	}
5937  }
5938  
5939  /**
5940   *	sata_link_init_spd - Initialize link->sata_spd_limit
5941   *	@link: Link to configure sata_spd_limit for
5942   *
5943   *	Initialize @link->[hw_]sata_spd_limit to the currently
5944   *	configured value.
5945   *
5946   *	LOCKING:
5947   *	Kernel thread context (may sleep).
5948   *
5949   *	RETURNS:
5950   *	0 on success, -errno on failure.
5951   */
sata_link_init_spd(struct ata_link * link)5952  int sata_link_init_spd(struct ata_link *link)
5953  {
5954  	u8 spd;
5955  	int rc;
5956  
5957  	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5958  	if (rc)
5959  		return rc;
5960  
5961  	spd = (link->saved_scontrol >> 4) & 0xf;
5962  	if (spd)
5963  		link->hw_sata_spd_limit &= (1 << spd) - 1;
5964  
5965  	ata_force_link_limits(link);
5966  
5967  	link->sata_spd_limit = link->hw_sata_spd_limit;
5968  
5969  	return 0;
5970  }
5971  
5972  /**
5973   *	ata_port_alloc - allocate and initialize basic ATA port resources
5974   *	@host: ATA host this allocated port belongs to
5975   *
5976   *	Allocate and initialize basic ATA port resources.
5977   *
5978   *	RETURNS:
5979   *	Allocate ATA port on success, NULL on failure.
5980   *
5981   *	LOCKING:
5982   *	Inherited from calling layer (may sleep).
5983   */
ata_port_alloc(struct ata_host * host)5984  struct ata_port *ata_port_alloc(struct ata_host *host)
5985  {
5986  	struct ata_port *ap;
5987  
5988  	DPRINTK("ENTER\n");
5989  
5990  	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5991  	if (!ap)
5992  		return NULL;
5993  
5994  	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5995  	ap->lock = &host->lock;
5996  	ap->print_id = -1;
5997  	ap->local_port_no = -1;
5998  	ap->host = host;
5999  	ap->dev = host->dev;
6000  
6001  #if defined(ATA_VERBOSE_DEBUG)
6002  	/* turn on all debugging levels */
6003  	ap->msg_enable = 0x00FF;
6004  #elif defined(ATA_DEBUG)
6005  	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6006  #else
6007  	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6008  #endif
6009  
6010  	mutex_init(&ap->scsi_scan_mutex);
6011  	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6012  	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6013  	INIT_LIST_HEAD(&ap->eh_done_q);
6014  	init_waitqueue_head(&ap->eh_wait_q);
6015  	init_completion(&ap->park_req_pending);
6016  	setup_deferrable_timer(&ap->fastdrain_timer,
6017  			       ata_eh_fastdrain_timerfn,
6018  			       (unsigned long)ap);
6019  
6020  	ap->cbl = ATA_CBL_NONE;
6021  
6022  	ata_link_init(ap, &ap->link, 0);
6023  
6024  #ifdef ATA_IRQ_TRAP
6025  	ap->stats.unhandled_irq = 1;
6026  	ap->stats.idle_irq = 1;
6027  #endif
6028  	ata_sff_port_init(ap);
6029  
6030  	return ap;
6031  }
6032  
ata_host_release(struct device * gendev,void * res)6033  static void ata_host_release(struct device *gendev, void *res)
6034  {
6035  	struct ata_host *host = dev_get_drvdata(gendev);
6036  	int i;
6037  
6038  	for (i = 0; i < host->n_ports; i++) {
6039  		struct ata_port *ap = host->ports[i];
6040  
6041  		if (!ap)
6042  			continue;
6043  
6044  		if (ap->scsi_host)
6045  			scsi_host_put(ap->scsi_host);
6046  
6047  		kfree(ap->pmp_link);
6048  		kfree(ap->slave_link);
6049  		kfree(ap);
6050  		host->ports[i] = NULL;
6051  	}
6052  
6053  	dev_set_drvdata(gendev, NULL);
6054  }
6055  
6056  /**
6057   *	ata_host_alloc - allocate and init basic ATA host resources
6058   *	@dev: generic device this host is associated with
6059   *	@max_ports: maximum number of ATA ports associated with this host
6060   *
6061   *	Allocate and initialize basic ATA host resources.  LLD calls
6062   *	this function to allocate a host, initializes it fully and
6063   *	attaches it using ata_host_register().
6064   *
6065   *	@max_ports ports are allocated and host->n_ports is
6066   *	initialized to @max_ports.  The caller is allowed to decrease
6067   *	host->n_ports before calling ata_host_register().  The unused
6068   *	ports will be automatically freed on registration.
6069   *
6070   *	RETURNS:
6071   *	Allocate ATA host on success, NULL on failure.
6072   *
6073   *	LOCKING:
6074   *	Inherited from calling layer (may sleep).
6075   */
ata_host_alloc(struct device * dev,int max_ports)6076  struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6077  {
6078  	struct ata_host *host;
6079  	size_t sz;
6080  	int i;
6081  
6082  	DPRINTK("ENTER\n");
6083  
6084  	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6085  		return NULL;
6086  
6087  	/* alloc a container for our list of ATA ports (buses) */
6088  	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6089  	/* alloc a container for our list of ATA ports (buses) */
6090  	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6091  	if (!host)
6092  		goto err_out;
6093  
6094  	devres_add(dev, host);
6095  	dev_set_drvdata(dev, host);
6096  
6097  	spin_lock_init(&host->lock);
6098  	mutex_init(&host->eh_mutex);
6099  	host->dev = dev;
6100  	host->n_ports = max_ports;
6101  
6102  	/* allocate ports bound to this host */
6103  	for (i = 0; i < max_ports; i++) {
6104  		struct ata_port *ap;
6105  
6106  		ap = ata_port_alloc(host);
6107  		if (!ap)
6108  			goto err_out;
6109  
6110  		ap->port_no = i;
6111  		host->ports[i] = ap;
6112  	}
6113  
6114  	devres_remove_group(dev, NULL);
6115  	return host;
6116  
6117   err_out:
6118  	devres_release_group(dev, NULL);
6119  	return NULL;
6120  }
6121  
6122  /**
6123   *	ata_host_alloc_pinfo - alloc host and init with port_info array
6124   *	@dev: generic device this host is associated with
6125   *	@ppi: array of ATA port_info to initialize host with
6126   *	@n_ports: number of ATA ports attached to this host
6127   *
6128   *	Allocate ATA host and initialize with info from @ppi.  If NULL
6129   *	terminated, @ppi may contain fewer entries than @n_ports.  The
6130   *	last entry will be used for the remaining ports.
6131   *
6132   *	RETURNS:
6133   *	Allocate ATA host on success, NULL on failure.
6134   *
6135   *	LOCKING:
6136   *	Inherited from calling layer (may sleep).
6137   */
ata_host_alloc_pinfo(struct device * dev,const struct ata_port_info * const * ppi,int n_ports)6138  struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6139  				      const struct ata_port_info * const * ppi,
6140  				      int n_ports)
6141  {
6142  	const struct ata_port_info *pi;
6143  	struct ata_host *host;
6144  	int i, j;
6145  
6146  	host = ata_host_alloc(dev, n_ports);
6147  	if (!host)
6148  		return NULL;
6149  
6150  	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6151  		struct ata_port *ap = host->ports[i];
6152  
6153  		if (ppi[j])
6154  			pi = ppi[j++];
6155  
6156  		ap->pio_mask = pi->pio_mask;
6157  		ap->mwdma_mask = pi->mwdma_mask;
6158  		ap->udma_mask = pi->udma_mask;
6159  		ap->flags |= pi->flags;
6160  		ap->link.flags |= pi->link_flags;
6161  		ap->ops = pi->port_ops;
6162  
6163  		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6164  			host->ops = pi->port_ops;
6165  	}
6166  
6167  	return host;
6168  }
6169  
6170  /**
6171   *	ata_slave_link_init - initialize slave link
6172   *	@ap: port to initialize slave link for
6173   *
6174   *	Create and initialize slave link for @ap.  This enables slave
6175   *	link handling on the port.
6176   *
6177   *	In libata, a port contains links and a link contains devices.
6178   *	There is single host link but if a PMP is attached to it,
6179   *	there can be multiple fan-out links.  On SATA, there's usually
6180   *	a single device connected to a link but PATA and SATA
6181   *	controllers emulating TF based interface can have two - master
6182   *	and slave.
6183   *
6184   *	However, there are a few controllers which don't fit into this
6185   *	abstraction too well - SATA controllers which emulate TF
6186   *	interface with both master and slave devices but also have
6187   *	separate SCR register sets for each device.  These controllers
6188   *	need separate links for physical link handling
6189   *	(e.g. onlineness, link speed) but should be treated like a
6190   *	traditional M/S controller for everything else (e.g. command
6191   *	issue, softreset).
6192   *
6193   *	slave_link is libata's way of handling this class of
6194   *	controllers without impacting core layer too much.  For
6195   *	anything other than physical link handling, the default host
6196   *	link is used for both master and slave.  For physical link
6197   *	handling, separate @ap->slave_link is used.  All dirty details
6198   *	are implemented inside libata core layer.  From LLD's POV, the
6199   *	only difference is that prereset, hardreset and postreset are
6200   *	called once more for the slave link, so the reset sequence
6201   *	looks like the following.
6202   *
6203   *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
6204   *	softreset(M) -> postreset(M) -> postreset(S)
6205   *
6206   *	Note that softreset is called only for the master.  Softreset
6207   *	resets both M/S by definition, so SRST on master should handle
6208   *	both (the standard method will work just fine).
6209   *
6210   *	LOCKING:
6211   *	Should be called before host is registered.
6212   *
6213   *	RETURNS:
6214   *	0 on success, -errno on failure.
6215   */
ata_slave_link_init(struct ata_port * ap)6216  int ata_slave_link_init(struct ata_port *ap)
6217  {
6218  	struct ata_link *link;
6219  
6220  	WARN_ON(ap->slave_link);
6221  	WARN_ON(ap->flags & ATA_FLAG_PMP);
6222  
6223  	link = kzalloc(sizeof(*link), GFP_KERNEL);
6224  	if (!link)
6225  		return -ENOMEM;
6226  
6227  	ata_link_init(ap, link, 1);
6228  	ap->slave_link = link;
6229  	return 0;
6230  }
6231  
ata_host_stop(struct device * gendev,void * res)6232  static void ata_host_stop(struct device *gendev, void *res)
6233  {
6234  	struct ata_host *host = dev_get_drvdata(gendev);
6235  	int i;
6236  
6237  	WARN_ON(!(host->flags & ATA_HOST_STARTED));
6238  
6239  	for (i = 0; i < host->n_ports; i++) {
6240  		struct ata_port *ap = host->ports[i];
6241  
6242  		if (ap->ops->port_stop)
6243  			ap->ops->port_stop(ap);
6244  	}
6245  
6246  	if (host->ops->host_stop)
6247  		host->ops->host_stop(host);
6248  }
6249  
6250  /**
6251   *	ata_finalize_port_ops - finalize ata_port_operations
6252   *	@ops: ata_port_operations to finalize
6253   *
6254   *	An ata_port_operations can inherit from another ops and that
6255   *	ops can again inherit from another.  This can go on as many
6256   *	times as necessary as long as there is no loop in the
6257   *	inheritance chain.
6258   *
6259   *	Ops tables are finalized when the host is started.  NULL or
6260   *	unspecified entries are inherited from the closet ancestor
6261   *	which has the method and the entry is populated with it.
6262   *	After finalization, the ops table directly points to all the
6263   *	methods and ->inherits is no longer necessary and cleared.
6264   *
6265   *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
6266   *
6267   *	LOCKING:
6268   *	None.
6269   */
ata_finalize_port_ops(struct ata_port_operations * ops)6270  static void ata_finalize_port_ops(struct ata_port_operations *ops)
6271  {
6272  	static DEFINE_SPINLOCK(lock);
6273  	const struct ata_port_operations *cur;
6274  	void **begin = (void **)ops;
6275  	void **end = (void **)&ops->inherits;
6276  	void **pp;
6277  
6278  	if (!ops || !ops->inherits)
6279  		return;
6280  
6281  	spin_lock(&lock);
6282  
6283  	for (cur = ops->inherits; cur; cur = cur->inherits) {
6284  		void **inherit = (void **)cur;
6285  
6286  		for (pp = begin; pp < end; pp++, inherit++)
6287  			if (!*pp)
6288  				*pp = *inherit;
6289  	}
6290  
6291  	for (pp = begin; pp < end; pp++)
6292  		if (IS_ERR(*pp))
6293  			*pp = NULL;
6294  
6295  	ops->inherits = NULL;
6296  
6297  	spin_unlock(&lock);
6298  }
6299  
6300  /**
6301   *	ata_host_start - start and freeze ports of an ATA host
6302   *	@host: ATA host to start ports for
6303   *
6304   *	Start and then freeze ports of @host.  Started status is
6305   *	recorded in host->flags, so this function can be called
6306   *	multiple times.  Ports are guaranteed to get started only
6307   *	once.  If host->ops isn't initialized yet, its set to the
6308   *	first non-dummy port ops.
6309   *
6310   *	LOCKING:
6311   *	Inherited from calling layer (may sleep).
6312   *
6313   *	RETURNS:
6314   *	0 if all ports are started successfully, -errno otherwise.
6315   */
ata_host_start(struct ata_host * host)6316  int ata_host_start(struct ata_host *host)
6317  {
6318  	int have_stop = 0;
6319  	void *start_dr = NULL;
6320  	int i, rc;
6321  
6322  	if (host->flags & ATA_HOST_STARTED)
6323  		return 0;
6324  
6325  	ata_finalize_port_ops(host->ops);
6326  
6327  	for (i = 0; i < host->n_ports; i++) {
6328  		struct ata_port *ap = host->ports[i];
6329  
6330  		ata_finalize_port_ops(ap->ops);
6331  
6332  		if (!host->ops && !ata_port_is_dummy(ap))
6333  			host->ops = ap->ops;
6334  
6335  		if (ap->ops->port_stop)
6336  			have_stop = 1;
6337  	}
6338  
6339  	if (host->ops->host_stop)
6340  		have_stop = 1;
6341  
6342  	if (have_stop) {
6343  		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6344  		if (!start_dr)
6345  			return -ENOMEM;
6346  	}
6347  
6348  	for (i = 0; i < host->n_ports; i++) {
6349  		struct ata_port *ap = host->ports[i];
6350  
6351  		if (ap->ops->port_start) {
6352  			rc = ap->ops->port_start(ap);
6353  			if (rc) {
6354  				if (rc != -ENODEV)
6355  					dev_err(host->dev,
6356  						"failed to start port %d (errno=%d)\n",
6357  						i, rc);
6358  				goto err_out;
6359  			}
6360  		}
6361  		ata_eh_freeze_port(ap);
6362  	}
6363  
6364  	if (start_dr)
6365  		devres_add(host->dev, start_dr);
6366  	host->flags |= ATA_HOST_STARTED;
6367  	return 0;
6368  
6369   err_out:
6370  	while (--i >= 0) {
6371  		struct ata_port *ap = host->ports[i];
6372  
6373  		if (ap->ops->port_stop)
6374  			ap->ops->port_stop(ap);
6375  	}
6376  	devres_free(start_dr);
6377  	return rc;
6378  }
6379  
6380  /**
6381   *	ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6382   *	@host:	host to initialize
6383   *	@dev:	device host is attached to
6384   *	@ops:	port_ops
6385   *
6386   */
ata_host_init(struct ata_host * host,struct device * dev,struct ata_port_operations * ops)6387  void ata_host_init(struct ata_host *host, struct device *dev,
6388  		   struct ata_port_operations *ops)
6389  {
6390  	spin_lock_init(&host->lock);
6391  	mutex_init(&host->eh_mutex);
6392  	host->n_tags = ATA_MAX_QUEUE - 1;
6393  	host->dev = dev;
6394  	host->ops = ops;
6395  }
6396  
__ata_port_probe(struct ata_port * ap)6397  void __ata_port_probe(struct ata_port *ap)
6398  {
6399  	struct ata_eh_info *ehi = &ap->link.eh_info;
6400  	unsigned long flags;
6401  
6402  	/* kick EH for boot probing */
6403  	spin_lock_irqsave(ap->lock, flags);
6404  
6405  	ehi->probe_mask |= ATA_ALL_DEVICES;
6406  	ehi->action |= ATA_EH_RESET;
6407  	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6408  
6409  	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6410  	ap->pflags |= ATA_PFLAG_LOADING;
6411  	ata_port_schedule_eh(ap);
6412  
6413  	spin_unlock_irqrestore(ap->lock, flags);
6414  }
6415  
ata_port_probe(struct ata_port * ap)6416  int ata_port_probe(struct ata_port *ap)
6417  {
6418  	int rc = 0;
6419  
6420  	if (ap->ops->error_handler) {
6421  		__ata_port_probe(ap);
6422  		ata_port_wait_eh(ap);
6423  	} else {
6424  		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6425  		rc = ata_bus_probe(ap);
6426  		DPRINTK("ata%u: bus probe end\n", ap->print_id);
6427  	}
6428  	return rc;
6429  }
6430  
6431  
async_port_probe(void * data,async_cookie_t cookie)6432  static void async_port_probe(void *data, async_cookie_t cookie)
6433  {
6434  	struct ata_port *ap = data;
6435  
6436  	/*
6437  	 * If we're not allowed to scan this host in parallel,
6438  	 * we need to wait until all previous scans have completed
6439  	 * before going further.
6440  	 * Jeff Garzik says this is only within a controller, so we
6441  	 * don't need to wait for port 0, only for later ports.
6442  	 */
6443  	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6444  		async_synchronize_cookie(cookie);
6445  
6446  	(void)ata_port_probe(ap);
6447  
6448  	/* in order to keep device order, we need to synchronize at this point */
6449  	async_synchronize_cookie(cookie);
6450  
6451  	ata_scsi_scan_host(ap, 1);
6452  }
6453  
6454  /**
6455   *	ata_host_register - register initialized ATA host
6456   *	@host: ATA host to register
6457   *	@sht: template for SCSI host
6458   *
6459   *	Register initialized ATA host.  @host is allocated using
6460   *	ata_host_alloc() and fully initialized by LLD.  This function
6461   *	starts ports, registers @host with ATA and SCSI layers and
6462   *	probe registered devices.
6463   *
6464   *	LOCKING:
6465   *	Inherited from calling layer (may sleep).
6466   *
6467   *	RETURNS:
6468   *	0 on success, -errno otherwise.
6469   */
ata_host_register(struct ata_host * host,struct scsi_host_template * sht)6470  int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6471  {
6472  	int i, rc;
6473  
6474  	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6475  
6476  	/* host must have been started */
6477  	if (!(host->flags & ATA_HOST_STARTED)) {
6478  		dev_err(host->dev, "BUG: trying to register unstarted host\n");
6479  		WARN_ON(1);
6480  		return -EINVAL;
6481  	}
6482  
6483  	/* Blow away unused ports.  This happens when LLD can't
6484  	 * determine the exact number of ports to allocate at
6485  	 * allocation time.
6486  	 */
6487  	for (i = host->n_ports; host->ports[i]; i++)
6488  		kfree(host->ports[i]);
6489  
6490  	/* give ports names and add SCSI hosts */
6491  	for (i = 0; i < host->n_ports; i++) {
6492  		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6493  		host->ports[i]->local_port_no = i + 1;
6494  	}
6495  
6496  	/* Create associated sysfs transport objects  */
6497  	for (i = 0; i < host->n_ports; i++) {
6498  		rc = ata_tport_add(host->dev,host->ports[i]);
6499  		if (rc) {
6500  			goto err_tadd;
6501  		}
6502  	}
6503  
6504  	rc = ata_scsi_add_hosts(host, sht);
6505  	if (rc)
6506  		goto err_tadd;
6507  
6508  	/* set cable, sata_spd_limit and report */
6509  	for (i = 0; i < host->n_ports; i++) {
6510  		struct ata_port *ap = host->ports[i];
6511  		unsigned long xfer_mask;
6512  
6513  		/* set SATA cable type if still unset */
6514  		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6515  			ap->cbl = ATA_CBL_SATA;
6516  
6517  		/* init sata_spd_limit to the current value */
6518  		sata_link_init_spd(&ap->link);
6519  		if (ap->slave_link)
6520  			sata_link_init_spd(ap->slave_link);
6521  
6522  		/* print per-port info to dmesg */
6523  		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6524  					      ap->udma_mask);
6525  
6526  		if (!ata_port_is_dummy(ap)) {
6527  			ata_port_info(ap, "%cATA max %s %s\n",
6528  				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6529  				      ata_mode_string(xfer_mask),
6530  				      ap->link.eh_info.desc);
6531  			ata_ehi_clear_desc(&ap->link.eh_info);
6532  		} else
6533  			ata_port_info(ap, "DUMMY\n");
6534  	}
6535  
6536  	/* perform each probe asynchronously */
6537  	for (i = 0; i < host->n_ports; i++) {
6538  		struct ata_port *ap = host->ports[i];
6539  		async_schedule(async_port_probe, ap);
6540  	}
6541  
6542  	return 0;
6543  
6544   err_tadd:
6545  	while (--i >= 0) {
6546  		ata_tport_delete(host->ports[i]);
6547  	}
6548  	return rc;
6549  
6550  }
6551  
6552  /**
6553   *	ata_host_activate - start host, request IRQ and register it
6554   *	@host: target ATA host
6555   *	@irq: IRQ to request
6556   *	@irq_handler: irq_handler used when requesting IRQ
6557   *	@irq_flags: irq_flags used when requesting IRQ
6558   *	@sht: scsi_host_template to use when registering the host
6559   *
6560   *	After allocating an ATA host and initializing it, most libata
6561   *	LLDs perform three steps to activate the host - start host,
6562   *	request IRQ and register it.  This helper takes necessary
6563   *	arguments and performs the three steps in one go.
6564   *
6565   *	An invalid IRQ skips the IRQ registration and expects the host to
6566   *	have set polling mode on the port. In this case, @irq_handler
6567   *	should be NULL.
6568   *
6569   *	LOCKING:
6570   *	Inherited from calling layer (may sleep).
6571   *
6572   *	RETURNS:
6573   *	0 on success, -errno otherwise.
6574   */
ata_host_activate(struct ata_host * host,int irq,irq_handler_t irq_handler,unsigned long irq_flags,struct scsi_host_template * sht)6575  int ata_host_activate(struct ata_host *host, int irq,
6576  		      irq_handler_t irq_handler, unsigned long irq_flags,
6577  		      struct scsi_host_template *sht)
6578  {
6579  	int i, rc;
6580  	char *irq_desc;
6581  
6582  	rc = ata_host_start(host);
6583  	if (rc)
6584  		return rc;
6585  
6586  	/* Special case for polling mode */
6587  	if (!irq) {
6588  		WARN_ON(irq_handler);
6589  		return ata_host_register(host, sht);
6590  	}
6591  
6592  	irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6593  				  dev_driver_string(host->dev),
6594  				  dev_name(host->dev));
6595  	if (!irq_desc)
6596  		return -ENOMEM;
6597  
6598  	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6599  			      irq_desc, host);
6600  	if (rc)
6601  		return rc;
6602  
6603  	for (i = 0; i < host->n_ports; i++)
6604  		ata_port_desc(host->ports[i], "irq %d", irq);
6605  
6606  	rc = ata_host_register(host, sht);
6607  	/* if failed, just free the IRQ and leave ports alone */
6608  	if (rc)
6609  		devm_free_irq(host->dev, irq, host);
6610  
6611  	return rc;
6612  }
6613  
6614  /**
6615   *	ata_port_detach - Detach ATA port in preparation of device removal
6616   *	@ap: ATA port to be detached
6617   *
6618   *	Detach all ATA devices and the associated SCSI devices of @ap;
6619   *	then, remove the associated SCSI host.  @ap is guaranteed to
6620   *	be quiescent on return from this function.
6621   *
6622   *	LOCKING:
6623   *	Kernel thread context (may sleep).
6624   */
ata_port_detach(struct ata_port * ap)6625  static void ata_port_detach(struct ata_port *ap)
6626  {
6627  	unsigned long flags;
6628  	struct ata_link *link;
6629  	struct ata_device *dev;
6630  
6631  	if (!ap->ops->error_handler)
6632  		goto skip_eh;
6633  
6634  	/* tell EH we're leaving & flush EH */
6635  	spin_lock_irqsave(ap->lock, flags);
6636  	ap->pflags |= ATA_PFLAG_UNLOADING;
6637  	ata_port_schedule_eh(ap);
6638  	spin_unlock_irqrestore(ap->lock, flags);
6639  
6640  	/* wait till EH commits suicide */
6641  	ata_port_wait_eh(ap);
6642  
6643  	/* it better be dead now */
6644  	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6645  
6646  	cancel_delayed_work_sync(&ap->hotplug_task);
6647  
6648   skip_eh:
6649  	/* clean up zpodd on port removal */
6650  	ata_for_each_link(link, ap, HOST_FIRST) {
6651  		ata_for_each_dev(dev, link, ALL) {
6652  			if (zpodd_dev_enabled(dev))
6653  				zpodd_exit(dev);
6654  		}
6655  	}
6656  	if (ap->pmp_link) {
6657  		int i;
6658  		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6659  			ata_tlink_delete(&ap->pmp_link[i]);
6660  	}
6661  	/* remove the associated SCSI host */
6662  	scsi_remove_host(ap->scsi_host);
6663  	ata_tport_delete(ap);
6664  }
6665  
6666  /**
6667   *	ata_host_detach - Detach all ports of an ATA host
6668   *	@host: Host to detach
6669   *
6670   *	Detach all ports of @host.
6671   *
6672   *	LOCKING:
6673   *	Kernel thread context (may sleep).
6674   */
ata_host_detach(struct ata_host * host)6675  void ata_host_detach(struct ata_host *host)
6676  {
6677  	int i;
6678  
6679  	/* Ensure ata_port probe has completed */
6680  	async_synchronize_full();
6681  
6682  	for (i = 0; i < host->n_ports; i++)
6683  		ata_port_detach(host->ports[i]);
6684  
6685  	/* the host is dead now, dissociate ACPI */
6686  	ata_acpi_dissociate(host);
6687  }
6688  
6689  #ifdef CONFIG_PCI
6690  
6691  /**
6692   *	ata_pci_remove_one - PCI layer callback for device removal
6693   *	@pdev: PCI device that was removed
6694   *
6695   *	PCI layer indicates to libata via this hook that hot-unplug or
6696   *	module unload event has occurred.  Detach all ports.  Resource
6697   *	release is handled via devres.
6698   *
6699   *	LOCKING:
6700   *	Inherited from PCI layer (may sleep).
6701   */
ata_pci_remove_one(struct pci_dev * pdev)6702  void ata_pci_remove_one(struct pci_dev *pdev)
6703  {
6704  	struct ata_host *host = pci_get_drvdata(pdev);
6705  
6706  	ata_host_detach(host);
6707  }
6708  
ata_pci_shutdown_one(struct pci_dev * pdev)6709  void ata_pci_shutdown_one(struct pci_dev *pdev)
6710  {
6711  	struct ata_host *host = pci_get_drvdata(pdev);
6712  	int i;
6713  
6714  	for (i = 0; i < host->n_ports; i++) {
6715  		struct ata_port *ap = host->ports[i];
6716  
6717  		ap->pflags |= ATA_PFLAG_FROZEN;
6718  
6719  		/* Disable port interrupts */
6720  		if (ap->ops->freeze)
6721  			ap->ops->freeze(ap);
6722  
6723  		/* Stop the port DMA engines */
6724  		if (ap->ops->port_stop)
6725  			ap->ops->port_stop(ap);
6726  	}
6727  }
6728  
6729  /* move to PCI subsystem */
pci_test_config_bits(struct pci_dev * pdev,const struct pci_bits * bits)6730  int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6731  {
6732  	unsigned long tmp = 0;
6733  
6734  	switch (bits->width) {
6735  	case 1: {
6736  		u8 tmp8 = 0;
6737  		pci_read_config_byte(pdev, bits->reg, &tmp8);
6738  		tmp = tmp8;
6739  		break;
6740  	}
6741  	case 2: {
6742  		u16 tmp16 = 0;
6743  		pci_read_config_word(pdev, bits->reg, &tmp16);
6744  		tmp = tmp16;
6745  		break;
6746  	}
6747  	case 4: {
6748  		u32 tmp32 = 0;
6749  		pci_read_config_dword(pdev, bits->reg, &tmp32);
6750  		tmp = tmp32;
6751  		break;
6752  	}
6753  
6754  	default:
6755  		return -EINVAL;
6756  	}
6757  
6758  	tmp &= bits->mask;
6759  
6760  	return (tmp == bits->val) ? 1 : 0;
6761  }
6762  
6763  #ifdef CONFIG_PM
ata_pci_device_do_suspend(struct pci_dev * pdev,pm_message_t mesg)6764  void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6765  {
6766  	pci_save_state(pdev);
6767  	pci_disable_device(pdev);
6768  
6769  	if (mesg.event & PM_EVENT_SLEEP)
6770  		pci_set_power_state(pdev, PCI_D3hot);
6771  }
6772  
ata_pci_device_do_resume(struct pci_dev * pdev)6773  int ata_pci_device_do_resume(struct pci_dev *pdev)
6774  {
6775  	int rc;
6776  
6777  	pci_set_power_state(pdev, PCI_D0);
6778  	pci_restore_state(pdev);
6779  
6780  	rc = pcim_enable_device(pdev);
6781  	if (rc) {
6782  		dev_err(&pdev->dev,
6783  			"failed to enable device after resume (%d)\n", rc);
6784  		return rc;
6785  	}
6786  
6787  	pci_set_master(pdev);
6788  	return 0;
6789  }
6790  
ata_pci_device_suspend(struct pci_dev * pdev,pm_message_t mesg)6791  int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6792  {
6793  	struct ata_host *host = pci_get_drvdata(pdev);
6794  	int rc = 0;
6795  
6796  	rc = ata_host_suspend(host, mesg);
6797  	if (rc)
6798  		return rc;
6799  
6800  	ata_pci_device_do_suspend(pdev, mesg);
6801  
6802  	return 0;
6803  }
6804  
ata_pci_device_resume(struct pci_dev * pdev)6805  int ata_pci_device_resume(struct pci_dev *pdev)
6806  {
6807  	struct ata_host *host = pci_get_drvdata(pdev);
6808  	int rc;
6809  
6810  	rc = ata_pci_device_do_resume(pdev);
6811  	if (rc == 0)
6812  		ata_host_resume(host);
6813  	return rc;
6814  }
6815  #endif /* CONFIG_PM */
6816  
6817  #endif /* CONFIG_PCI */
6818  
6819  /**
6820   *	ata_platform_remove_one - Platform layer callback for device removal
6821   *	@pdev: Platform device that was removed
6822   *
6823   *	Platform layer indicates to libata via this hook that hot-unplug or
6824   *	module unload event has occurred.  Detach all ports.  Resource
6825   *	release is handled via devres.
6826   *
6827   *	LOCKING:
6828   *	Inherited from platform layer (may sleep).
6829   */
ata_platform_remove_one(struct platform_device * pdev)6830  int ata_platform_remove_one(struct platform_device *pdev)
6831  {
6832  	struct ata_host *host = platform_get_drvdata(pdev);
6833  
6834  	ata_host_detach(host);
6835  
6836  	return 0;
6837  }
6838  
ata_parse_force_one(char ** cur,struct ata_force_ent * force_ent,const char ** reason)6839  static int __init ata_parse_force_one(char **cur,
6840  				      struct ata_force_ent *force_ent,
6841  				      const char **reason)
6842  {
6843  	static const struct ata_force_param force_tbl[] __initconst = {
6844  		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6845  		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6846  		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6847  		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6848  		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6849  		{ "sata",	.cbl		= ATA_CBL_SATA },
6850  		{ "1.5Gbps",	.spd_limit	= 1 },
6851  		{ "3.0Gbps",	.spd_limit	= 2 },
6852  		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6853  		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6854  		{ "noncqtrim",	.horkage_on	= ATA_HORKAGE_NO_NCQ_TRIM },
6855  		{ "ncqtrim",	.horkage_off	= ATA_HORKAGE_NO_NCQ_TRIM },
6856  		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
6857  		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6858  		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6859  		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6860  		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6861  		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6862  		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6863  		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6864  		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6865  		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6866  		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6867  		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6868  		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6869  		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6870  		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6871  		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6872  		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6873  		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6874  		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6875  		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6876  		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6877  		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6878  		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6879  		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6880  		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6881  		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6882  		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6883  		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6884  		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6885  		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6886  		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6887  		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6888  		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6889  		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6890  		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6891  		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6892  		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6893  		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6894  		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE },
6895  		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR },
6896  		{ "disable",	.horkage_on	= ATA_HORKAGE_DISABLE },
6897  	};
6898  	char *start = *cur, *p = *cur;
6899  	char *id, *val, *endp;
6900  	const struct ata_force_param *match_fp = NULL;
6901  	int nr_matches = 0, i;
6902  
6903  	/* find where this param ends and update *cur */
6904  	while (*p != '\0' && *p != ',')
6905  		p++;
6906  
6907  	if (*p == '\0')
6908  		*cur = p;
6909  	else
6910  		*cur = p + 1;
6911  
6912  	*p = '\0';
6913  
6914  	/* parse */
6915  	p = strchr(start, ':');
6916  	if (!p) {
6917  		val = strstrip(start);
6918  		goto parse_val;
6919  	}
6920  	*p = '\0';
6921  
6922  	id = strstrip(start);
6923  	val = strstrip(p + 1);
6924  
6925  	/* parse id */
6926  	p = strchr(id, '.');
6927  	if (p) {
6928  		*p++ = '\0';
6929  		force_ent->device = simple_strtoul(p, &endp, 10);
6930  		if (p == endp || *endp != '\0') {
6931  			*reason = "invalid device";
6932  			return -EINVAL;
6933  		}
6934  	}
6935  
6936  	force_ent->port = simple_strtoul(id, &endp, 10);
6937  	if (id == endp || *endp != '\0') {
6938  		*reason = "invalid port/link";
6939  		return -EINVAL;
6940  	}
6941  
6942   parse_val:
6943  	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6944  	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6945  		const struct ata_force_param *fp = &force_tbl[i];
6946  
6947  		if (strncasecmp(val, fp->name, strlen(val)))
6948  			continue;
6949  
6950  		nr_matches++;
6951  		match_fp = fp;
6952  
6953  		if (strcasecmp(val, fp->name) == 0) {
6954  			nr_matches = 1;
6955  			break;
6956  		}
6957  	}
6958  
6959  	if (!nr_matches) {
6960  		*reason = "unknown value";
6961  		return -EINVAL;
6962  	}
6963  	if (nr_matches > 1) {
6964  		*reason = "ambigious value";
6965  		return -EINVAL;
6966  	}
6967  
6968  	force_ent->param = *match_fp;
6969  
6970  	return 0;
6971  }
6972  
ata_parse_force_param(void)6973  static void __init ata_parse_force_param(void)
6974  {
6975  	int idx = 0, size = 1;
6976  	int last_port = -1, last_device = -1;
6977  	char *p, *cur, *next;
6978  
6979  	/* calculate maximum number of params and allocate force_tbl */
6980  	for (p = ata_force_param_buf; *p; p++)
6981  		if (*p == ',')
6982  			size++;
6983  
6984  	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6985  	if (!ata_force_tbl) {
6986  		printk(KERN_WARNING "ata: failed to extend force table, "
6987  		       "libata.force ignored\n");
6988  		return;
6989  	}
6990  
6991  	/* parse and populate the table */
6992  	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6993  		const char *reason = "";
6994  		struct ata_force_ent te = { .port = -1, .device = -1 };
6995  
6996  		next = cur;
6997  		if (ata_parse_force_one(&next, &te, &reason)) {
6998  			printk(KERN_WARNING "ata: failed to parse force "
6999  			       "parameter \"%s\" (%s)\n",
7000  			       cur, reason);
7001  			continue;
7002  		}
7003  
7004  		if (te.port == -1) {
7005  			te.port = last_port;
7006  			te.device = last_device;
7007  		}
7008  
7009  		ata_force_tbl[idx++] = te;
7010  
7011  		last_port = te.port;
7012  		last_device = te.device;
7013  	}
7014  
7015  	ata_force_tbl_size = idx;
7016  }
7017  
ata_init(void)7018  static int __init ata_init(void)
7019  {
7020  	int rc;
7021  
7022  	ata_parse_force_param();
7023  
7024  	rc = ata_sff_init();
7025  	if (rc) {
7026  		kfree(ata_force_tbl);
7027  		return rc;
7028  	}
7029  
7030  	libata_transport_init();
7031  	ata_scsi_transport_template = ata_attach_transport();
7032  	if (!ata_scsi_transport_template) {
7033  		ata_sff_exit();
7034  		rc = -ENOMEM;
7035  		goto err_out;
7036  	}
7037  
7038  	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7039  	return 0;
7040  
7041  err_out:
7042  	return rc;
7043  }
7044  
ata_exit(void)7045  static void __exit ata_exit(void)
7046  {
7047  	ata_release_transport(ata_scsi_transport_template);
7048  	libata_transport_exit();
7049  	ata_sff_exit();
7050  	kfree(ata_force_tbl);
7051  }
7052  
7053  subsys_initcall(ata_init);
7054  module_exit(ata_exit);
7055  
7056  static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
7057  
ata_ratelimit(void)7058  int ata_ratelimit(void)
7059  {
7060  	return __ratelimit(&ratelimit);
7061  }
7062  
7063  /**
7064   *	ata_msleep - ATA EH owner aware msleep
7065   *	@ap: ATA port to attribute the sleep to
7066   *	@msecs: duration to sleep in milliseconds
7067   *
7068   *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
7069   *	ownership is released before going to sleep and reacquired
7070   *	after the sleep is complete.  IOW, other ports sharing the
7071   *	@ap->host will be allowed to own the EH while this task is
7072   *	sleeping.
7073   *
7074   *	LOCKING:
7075   *	Might sleep.
7076   */
ata_msleep(struct ata_port * ap,unsigned int msecs)7077  void ata_msleep(struct ata_port *ap, unsigned int msecs)
7078  {
7079  	bool owns_eh = ap && ap->host->eh_owner == current;
7080  
7081  	if (owns_eh)
7082  		ata_eh_release(ap);
7083  
7084  	if (msecs < 20) {
7085  		unsigned long usecs = msecs * USEC_PER_MSEC;
7086  		usleep_range(usecs, usecs + 50);
7087  	} else {
7088  		msleep(msecs);
7089  	}
7090  
7091  	if (owns_eh)
7092  		ata_eh_acquire(ap);
7093  }
7094  
7095  /**
7096   *	ata_wait_register - wait until register value changes
7097   *	@ap: ATA port to wait register for, can be NULL
7098   *	@reg: IO-mapped register
7099   *	@mask: Mask to apply to read register value
7100   *	@val: Wait condition
7101   *	@interval: polling interval in milliseconds
7102   *	@timeout: timeout in milliseconds
7103   *
7104   *	Waiting for some bits of register to change is a common
7105   *	operation for ATA controllers.  This function reads 32bit LE
7106   *	IO-mapped register @reg and tests for the following condition.
7107   *
7108   *	(*@reg & mask) != val
7109   *
7110   *	If the condition is met, it returns; otherwise, the process is
7111   *	repeated after @interval_msec until timeout.
7112   *
7113   *	LOCKING:
7114   *	Kernel thread context (may sleep)
7115   *
7116   *	RETURNS:
7117   *	The final register value.
7118   */
ata_wait_register(struct ata_port * ap,void __iomem * reg,u32 mask,u32 val,unsigned long interval,unsigned long timeout)7119  u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
7120  		      unsigned long interval, unsigned long timeout)
7121  {
7122  	unsigned long deadline;
7123  	u32 tmp;
7124  
7125  	tmp = ioread32(reg);
7126  
7127  	/* Calculate timeout _after_ the first read to make sure
7128  	 * preceding writes reach the controller before starting to
7129  	 * eat away the timeout.
7130  	 */
7131  	deadline = ata_deadline(jiffies, timeout);
7132  
7133  	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
7134  		ata_msleep(ap, interval);
7135  		tmp = ioread32(reg);
7136  	}
7137  
7138  	return tmp;
7139  }
7140  
7141  /**
7142   *	sata_lpm_ignore_phy_events - test if PHY event should be ignored
7143   *	@link: Link receiving the event
7144   *
7145   *	Test whether the received PHY event has to be ignored or not.
7146   *
7147   *	LOCKING:
7148   *	None:
7149   *
7150   *	RETURNS:
7151   *	True if the event has to be ignored.
7152   */
sata_lpm_ignore_phy_events(struct ata_link * link)7153  bool sata_lpm_ignore_phy_events(struct ata_link *link)
7154  {
7155  	unsigned long lpm_timeout = link->last_lpm_change +
7156  				    msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
7157  
7158  	/* if LPM is enabled, PHYRDY doesn't mean anything */
7159  	if (link->lpm_policy > ATA_LPM_MAX_POWER)
7160  		return true;
7161  
7162  	/* ignore the first PHY event after the LPM policy changed
7163  	 * as it is might be spurious
7164  	 */
7165  	if ((link->flags & ATA_LFLAG_CHANGED) &&
7166  	    time_before(jiffies, lpm_timeout))
7167  		return true;
7168  
7169  	return false;
7170  }
7171  EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
7172  
7173  /*
7174   * Dummy port_ops
7175   */
ata_dummy_qc_issue(struct ata_queued_cmd * qc)7176  static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7177  {
7178  	return AC_ERR_SYSTEM;
7179  }
7180  
ata_dummy_error_handler(struct ata_port * ap)7181  static void ata_dummy_error_handler(struct ata_port *ap)
7182  {
7183  	/* truly dummy */
7184  }
7185  
7186  struct ata_port_operations ata_dummy_port_ops = {
7187  	.qc_prep		= ata_noop_qc_prep,
7188  	.qc_issue		= ata_dummy_qc_issue,
7189  	.error_handler		= ata_dummy_error_handler,
7190  	.sched_eh		= ata_std_sched_eh,
7191  	.end_eh			= ata_std_end_eh,
7192  };
7193  
7194  const struct ata_port_info ata_dummy_port_info = {
7195  	.port_ops		= &ata_dummy_port_ops,
7196  };
7197  
7198  /*
7199   * Utility print functions
7200   */
ata_port_printk(const struct ata_port * ap,const char * level,const char * fmt,...)7201  void ata_port_printk(const struct ata_port *ap, const char *level,
7202  		     const char *fmt, ...)
7203  {
7204  	struct va_format vaf;
7205  	va_list args;
7206  
7207  	va_start(args, fmt);
7208  
7209  	vaf.fmt = fmt;
7210  	vaf.va = &args;
7211  
7212  	printk("%sata%u: %pV", level, ap->print_id, &vaf);
7213  
7214  	va_end(args);
7215  }
7216  EXPORT_SYMBOL(ata_port_printk);
7217  
ata_link_printk(const struct ata_link * link,const char * level,const char * fmt,...)7218  void ata_link_printk(const struct ata_link *link, const char *level,
7219  		     const char *fmt, ...)
7220  {
7221  	struct va_format vaf;
7222  	va_list args;
7223  
7224  	va_start(args, fmt);
7225  
7226  	vaf.fmt = fmt;
7227  	vaf.va = &args;
7228  
7229  	if (sata_pmp_attached(link->ap) || link->ap->slave_link)
7230  		printk("%sata%u.%02u: %pV",
7231  		       level, link->ap->print_id, link->pmp, &vaf);
7232  	else
7233  		printk("%sata%u: %pV",
7234  		       level, link->ap->print_id, &vaf);
7235  
7236  	va_end(args);
7237  }
7238  EXPORT_SYMBOL(ata_link_printk);
7239  
ata_dev_printk(const struct ata_device * dev,const char * level,const char * fmt,...)7240  void ata_dev_printk(const struct ata_device *dev, const char *level,
7241  		    const char *fmt, ...)
7242  {
7243  	struct va_format vaf;
7244  	va_list args;
7245  
7246  	va_start(args, fmt);
7247  
7248  	vaf.fmt = fmt;
7249  	vaf.va = &args;
7250  
7251  	printk("%sata%u.%02u: %pV",
7252  	       level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
7253  	       &vaf);
7254  
7255  	va_end(args);
7256  }
7257  EXPORT_SYMBOL(ata_dev_printk);
7258  
ata_print_version(const struct device * dev,const char * version)7259  void ata_print_version(const struct device *dev, const char *version)
7260  {
7261  	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
7262  }
7263  EXPORT_SYMBOL(ata_print_version);
7264  
7265  /*
7266   * libata is essentially a library of internal helper functions for
7267   * low-level ATA host controller drivers.  As such, the API/ABI is
7268   * likely to change as new drivers are added and updated.
7269   * Do not depend on ABI/API stability.
7270   */
7271  EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7272  EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7273  EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7274  EXPORT_SYMBOL_GPL(ata_base_port_ops);
7275  EXPORT_SYMBOL_GPL(sata_port_ops);
7276  EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7277  EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7278  EXPORT_SYMBOL_GPL(ata_link_next);
7279  EXPORT_SYMBOL_GPL(ata_dev_next);
7280  EXPORT_SYMBOL_GPL(ata_std_bios_param);
7281  EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
7282  EXPORT_SYMBOL_GPL(ata_host_init);
7283  EXPORT_SYMBOL_GPL(ata_host_alloc);
7284  EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7285  EXPORT_SYMBOL_GPL(ata_slave_link_init);
7286  EXPORT_SYMBOL_GPL(ata_host_start);
7287  EXPORT_SYMBOL_GPL(ata_host_register);
7288  EXPORT_SYMBOL_GPL(ata_host_activate);
7289  EXPORT_SYMBOL_GPL(ata_host_detach);
7290  EXPORT_SYMBOL_GPL(ata_sg_init);
7291  EXPORT_SYMBOL_GPL(ata_qc_complete);
7292  EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7293  EXPORT_SYMBOL_GPL(atapi_cmd_type);
7294  EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7295  EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7296  EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7297  EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7298  EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7299  EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7300  EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7301  EXPORT_SYMBOL_GPL(ata_mode_string);
7302  EXPORT_SYMBOL_GPL(ata_id_xfermask);
7303  EXPORT_SYMBOL_GPL(ata_do_set_mode);
7304  EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7305  EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7306  EXPORT_SYMBOL_GPL(ata_dev_disable);
7307  EXPORT_SYMBOL_GPL(sata_set_spd);
7308  EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7309  EXPORT_SYMBOL_GPL(sata_link_debounce);
7310  EXPORT_SYMBOL_GPL(sata_link_resume);
7311  EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
7312  EXPORT_SYMBOL_GPL(ata_std_prereset);
7313  EXPORT_SYMBOL_GPL(sata_link_hardreset);
7314  EXPORT_SYMBOL_GPL(sata_std_hardreset);
7315  EXPORT_SYMBOL_GPL(ata_std_postreset);
7316  EXPORT_SYMBOL_GPL(ata_dev_classify);
7317  EXPORT_SYMBOL_GPL(ata_dev_pair);
7318  EXPORT_SYMBOL_GPL(ata_ratelimit);
7319  EXPORT_SYMBOL_GPL(ata_msleep);
7320  EXPORT_SYMBOL_GPL(ata_wait_register);
7321  EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7322  EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7323  EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7324  EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7325  EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
7326  EXPORT_SYMBOL_GPL(sata_scr_valid);
7327  EXPORT_SYMBOL_GPL(sata_scr_read);
7328  EXPORT_SYMBOL_GPL(sata_scr_write);
7329  EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7330  EXPORT_SYMBOL_GPL(ata_link_online);
7331  EXPORT_SYMBOL_GPL(ata_link_offline);
7332  #ifdef CONFIG_PM
7333  EXPORT_SYMBOL_GPL(ata_host_suspend);
7334  EXPORT_SYMBOL_GPL(ata_host_resume);
7335  #endif /* CONFIG_PM */
7336  EXPORT_SYMBOL_GPL(ata_id_string);
7337  EXPORT_SYMBOL_GPL(ata_id_c_string);
7338  EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
7339  EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7340  
7341  EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7342  EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7343  EXPORT_SYMBOL_GPL(ata_timing_compute);
7344  EXPORT_SYMBOL_GPL(ata_timing_merge);
7345  EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7346  
7347  #ifdef CONFIG_PCI
7348  EXPORT_SYMBOL_GPL(pci_test_config_bits);
7349  EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
7350  EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7351  #ifdef CONFIG_PM
7352  EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7353  EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7354  EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7355  EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7356  #endif /* CONFIG_PM */
7357  #endif /* CONFIG_PCI */
7358  
7359  EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7360  
7361  EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7362  EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7363  EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7364  EXPORT_SYMBOL_GPL(ata_port_desc);
7365  #ifdef CONFIG_PCI
7366  EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7367  #endif /* CONFIG_PCI */
7368  EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7369  EXPORT_SYMBOL_GPL(ata_link_abort);
7370  EXPORT_SYMBOL_GPL(ata_port_abort);
7371  EXPORT_SYMBOL_GPL(ata_port_freeze);
7372  EXPORT_SYMBOL_GPL(sata_async_notification);
7373  EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7374  EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7375  EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7376  EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7377  EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7378  EXPORT_SYMBOL_GPL(ata_do_eh);
7379  EXPORT_SYMBOL_GPL(ata_std_error_handler);
7380  
7381  EXPORT_SYMBOL_GPL(ata_cable_40wire);
7382  EXPORT_SYMBOL_GPL(ata_cable_80wire);
7383  EXPORT_SYMBOL_GPL(ata_cable_unknown);
7384  EXPORT_SYMBOL_GPL(ata_cable_ignore);
7385  EXPORT_SYMBOL_GPL(ata_cable_sata);
7386