1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/time.h>
54 #include <linux/interrupt.h>
55 #include <linux/completion.h>
56 #include <linux/suspend.h>
57 #include <linux/workqueue.h>
58 #include <linux/scatterlist.h>
59 #include <linux/io.h>
60 #include <linux/async.h>
61 #include <linux/log2.h>
62 #include <linux/slab.h>
63 #include <linux/glob.h>
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_host.h>
67 #include <linux/libata.h>
68 #include <asm/byteorder.h>
69 #include <asm/unaligned.h>
70 #include <linux/cdrom.h>
71 #include <linux/ratelimit.h>
72 #include <linux/leds.h>
73 #include <linux/pm_runtime.h>
74 #include <linux/platform_device.h>
75
76 #define CREATE_TRACE_POINTS
77 #include <trace/events/libata.h>
78
79 #include "libata.h"
80 #include "libata-transport.h"
81
82 /* debounce timing parameters in msecs { interval, duration, timeout } */
83 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
84 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
85 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
86
87 const struct ata_port_operations ata_base_port_ops = {
88 .prereset = ata_std_prereset,
89 .postreset = ata_std_postreset,
90 .error_handler = ata_std_error_handler,
91 .sched_eh = ata_std_sched_eh,
92 .end_eh = ata_std_end_eh,
93 };
94
95 const struct ata_port_operations sata_port_ops = {
96 .inherits = &ata_base_port_ops,
97
98 .qc_defer = ata_std_qc_defer,
99 .hardreset = sata_std_hardreset,
100 };
101
102 static unsigned int ata_dev_init_params(struct ata_device *dev,
103 u16 heads, u16 sectors);
104 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
105 static void ata_dev_xfermask(struct ata_device *dev);
106 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
107
108 atomic_t ata_print_id = ATOMIC_INIT(0);
109
110 struct ata_force_param {
111 const char *name;
112 unsigned int cbl;
113 int spd_limit;
114 unsigned long xfer_mask;
115 unsigned int horkage_on;
116 unsigned int horkage_off;
117 unsigned int lflags;
118 };
119
120 struct ata_force_ent {
121 int port;
122 int device;
123 struct ata_force_param param;
124 };
125
126 static struct ata_force_ent *ata_force_tbl;
127 static int ata_force_tbl_size;
128
129 static char ata_force_param_buf[PAGE_SIZE] __initdata;
130 /* param_buf is thrown away after initialization, disallow read */
131 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
132 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
133
134 static int atapi_enabled = 1;
135 module_param(atapi_enabled, int, 0444);
136 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
137
138 static int atapi_dmadir = 0;
139 module_param(atapi_dmadir, int, 0444);
140 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
141
142 int atapi_passthru16 = 1;
143 module_param(atapi_passthru16, int, 0444);
144 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
145
146 int libata_fua = 0;
147 module_param_named(fua, libata_fua, int, 0444);
148 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
149
150 static int ata_ignore_hpa;
151 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
152 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
153
154 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
155 module_param_named(dma, libata_dma_mask, int, 0444);
156 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
157
158 static int ata_probe_timeout;
159 module_param(ata_probe_timeout, int, 0444);
160 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
161
162 int libata_noacpi = 0;
163 module_param_named(noacpi, libata_noacpi, int, 0444);
164 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
165
166 int libata_allow_tpm = 0;
167 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
168 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
169
170 static int atapi_an;
171 module_param(atapi_an, int, 0444);
172 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
173
174 MODULE_AUTHOR("Jeff Garzik");
175 MODULE_DESCRIPTION("Library module for ATA devices");
176 MODULE_LICENSE("GPL");
177 MODULE_VERSION(DRV_VERSION);
178
179
ata_sstatus_online(u32 sstatus)180 static bool ata_sstatus_online(u32 sstatus)
181 {
182 return (sstatus & 0xf) == 0x3;
183 }
184
185 /**
186 * ata_link_next - link iteration helper
187 * @link: the previous link, NULL to start
188 * @ap: ATA port containing links to iterate
189 * @mode: iteration mode, one of ATA_LITER_*
190 *
191 * LOCKING:
192 * Host lock or EH context.
193 *
194 * RETURNS:
195 * Pointer to the next link.
196 */
ata_link_next(struct ata_link * link,struct ata_port * ap,enum ata_link_iter_mode mode)197 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
198 enum ata_link_iter_mode mode)
199 {
200 BUG_ON(mode != ATA_LITER_EDGE &&
201 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
202
203 /* NULL link indicates start of iteration */
204 if (!link)
205 switch (mode) {
206 case ATA_LITER_EDGE:
207 case ATA_LITER_PMP_FIRST:
208 if (sata_pmp_attached(ap))
209 return ap->pmp_link;
210 /* fall through */
211 case ATA_LITER_HOST_FIRST:
212 return &ap->link;
213 }
214
215 /* we just iterated over the host link, what's next? */
216 if (link == &ap->link)
217 switch (mode) {
218 case ATA_LITER_HOST_FIRST:
219 if (sata_pmp_attached(ap))
220 return ap->pmp_link;
221 /* fall through */
222 case ATA_LITER_PMP_FIRST:
223 if (unlikely(ap->slave_link))
224 return ap->slave_link;
225 /* fall through */
226 case ATA_LITER_EDGE:
227 return NULL;
228 }
229
230 /* slave_link excludes PMP */
231 if (unlikely(link == ap->slave_link))
232 return NULL;
233
234 /* we were over a PMP link */
235 if (++link < ap->pmp_link + ap->nr_pmp_links)
236 return link;
237
238 if (mode == ATA_LITER_PMP_FIRST)
239 return &ap->link;
240
241 return NULL;
242 }
243
244 /**
245 * ata_dev_next - device iteration helper
246 * @dev: the previous device, NULL to start
247 * @link: ATA link containing devices to iterate
248 * @mode: iteration mode, one of ATA_DITER_*
249 *
250 * LOCKING:
251 * Host lock or EH context.
252 *
253 * RETURNS:
254 * Pointer to the next device.
255 */
ata_dev_next(struct ata_device * dev,struct ata_link * link,enum ata_dev_iter_mode mode)256 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
257 enum ata_dev_iter_mode mode)
258 {
259 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
260 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
261
262 /* NULL dev indicates start of iteration */
263 if (!dev)
264 switch (mode) {
265 case ATA_DITER_ENABLED:
266 case ATA_DITER_ALL:
267 dev = link->device;
268 goto check;
269 case ATA_DITER_ENABLED_REVERSE:
270 case ATA_DITER_ALL_REVERSE:
271 dev = link->device + ata_link_max_devices(link) - 1;
272 goto check;
273 }
274
275 next:
276 /* move to the next one */
277 switch (mode) {
278 case ATA_DITER_ENABLED:
279 case ATA_DITER_ALL:
280 if (++dev < link->device + ata_link_max_devices(link))
281 goto check;
282 return NULL;
283 case ATA_DITER_ENABLED_REVERSE:
284 case ATA_DITER_ALL_REVERSE:
285 if (--dev >= link->device)
286 goto check;
287 return NULL;
288 }
289
290 check:
291 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
292 !ata_dev_enabled(dev))
293 goto next;
294 return dev;
295 }
296
297 /**
298 * ata_dev_phys_link - find physical link for a device
299 * @dev: ATA device to look up physical link for
300 *
301 * Look up physical link which @dev is attached to. Note that
302 * this is different from @dev->link only when @dev is on slave
303 * link. For all other cases, it's the same as @dev->link.
304 *
305 * LOCKING:
306 * Don't care.
307 *
308 * RETURNS:
309 * Pointer to the found physical link.
310 */
ata_dev_phys_link(struct ata_device * dev)311 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
312 {
313 struct ata_port *ap = dev->link->ap;
314
315 if (!ap->slave_link)
316 return dev->link;
317 if (!dev->devno)
318 return &ap->link;
319 return ap->slave_link;
320 }
321
322 /**
323 * ata_force_cbl - force cable type according to libata.force
324 * @ap: ATA port of interest
325 *
326 * Force cable type according to libata.force and whine about it.
327 * The last entry which has matching port number is used, so it
328 * can be specified as part of device force parameters. For
329 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
330 * same effect.
331 *
332 * LOCKING:
333 * EH context.
334 */
ata_force_cbl(struct ata_port * ap)335 void ata_force_cbl(struct ata_port *ap)
336 {
337 int i;
338
339 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
340 const struct ata_force_ent *fe = &ata_force_tbl[i];
341
342 if (fe->port != -1 && fe->port != ap->print_id)
343 continue;
344
345 if (fe->param.cbl == ATA_CBL_NONE)
346 continue;
347
348 ap->cbl = fe->param.cbl;
349 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
350 return;
351 }
352 }
353
354 /**
355 * ata_force_link_limits - force link limits according to libata.force
356 * @link: ATA link of interest
357 *
358 * Force link flags and SATA spd limit according to libata.force
359 * and whine about it. When only the port part is specified
360 * (e.g. 1:), the limit applies to all links connected to both
361 * the host link and all fan-out ports connected via PMP. If the
362 * device part is specified as 0 (e.g. 1.00:), it specifies the
363 * first fan-out link not the host link. Device number 15 always
364 * points to the host link whether PMP is attached or not. If the
365 * controller has slave link, device number 16 points to it.
366 *
367 * LOCKING:
368 * EH context.
369 */
ata_force_link_limits(struct ata_link * link)370 static void ata_force_link_limits(struct ata_link *link)
371 {
372 bool did_spd = false;
373 int linkno = link->pmp;
374 int i;
375
376 if (ata_is_host_link(link))
377 linkno += 15;
378
379 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
380 const struct ata_force_ent *fe = &ata_force_tbl[i];
381
382 if (fe->port != -1 && fe->port != link->ap->print_id)
383 continue;
384
385 if (fe->device != -1 && fe->device != linkno)
386 continue;
387
388 /* only honor the first spd limit */
389 if (!did_spd && fe->param.spd_limit) {
390 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
391 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
392 fe->param.name);
393 did_spd = true;
394 }
395
396 /* let lflags stack */
397 if (fe->param.lflags) {
398 link->flags |= fe->param.lflags;
399 ata_link_notice(link,
400 "FORCE: link flag 0x%x forced -> 0x%x\n",
401 fe->param.lflags, link->flags);
402 }
403 }
404 }
405
406 /**
407 * ata_force_xfermask - force xfermask according to libata.force
408 * @dev: ATA device of interest
409 *
410 * Force xfer_mask according to libata.force and whine about it.
411 * For consistency with link selection, device number 15 selects
412 * the first device connected to the host link.
413 *
414 * LOCKING:
415 * EH context.
416 */
ata_force_xfermask(struct ata_device * dev)417 static void ata_force_xfermask(struct ata_device *dev)
418 {
419 int devno = dev->link->pmp + dev->devno;
420 int alt_devno = devno;
421 int i;
422
423 /* allow n.15/16 for devices attached to host port */
424 if (ata_is_host_link(dev->link))
425 alt_devno += 15;
426
427 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
428 const struct ata_force_ent *fe = &ata_force_tbl[i];
429 unsigned long pio_mask, mwdma_mask, udma_mask;
430
431 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
432 continue;
433
434 if (fe->device != -1 && fe->device != devno &&
435 fe->device != alt_devno)
436 continue;
437
438 if (!fe->param.xfer_mask)
439 continue;
440
441 ata_unpack_xfermask(fe->param.xfer_mask,
442 &pio_mask, &mwdma_mask, &udma_mask);
443 if (udma_mask)
444 dev->udma_mask = udma_mask;
445 else if (mwdma_mask) {
446 dev->udma_mask = 0;
447 dev->mwdma_mask = mwdma_mask;
448 } else {
449 dev->udma_mask = 0;
450 dev->mwdma_mask = 0;
451 dev->pio_mask = pio_mask;
452 }
453
454 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
455 fe->param.name);
456 return;
457 }
458 }
459
460 /**
461 * ata_force_horkage - force horkage according to libata.force
462 * @dev: ATA device of interest
463 *
464 * Force horkage according to libata.force and whine about it.
465 * For consistency with link selection, device number 15 selects
466 * the first device connected to the host link.
467 *
468 * LOCKING:
469 * EH context.
470 */
ata_force_horkage(struct ata_device * dev)471 static void ata_force_horkage(struct ata_device *dev)
472 {
473 int devno = dev->link->pmp + dev->devno;
474 int alt_devno = devno;
475 int i;
476
477 /* allow n.15/16 for devices attached to host port */
478 if (ata_is_host_link(dev->link))
479 alt_devno += 15;
480
481 for (i = 0; i < ata_force_tbl_size; i++) {
482 const struct ata_force_ent *fe = &ata_force_tbl[i];
483
484 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
485 continue;
486
487 if (fe->device != -1 && fe->device != devno &&
488 fe->device != alt_devno)
489 continue;
490
491 if (!(~dev->horkage & fe->param.horkage_on) &&
492 !(dev->horkage & fe->param.horkage_off))
493 continue;
494
495 dev->horkage |= fe->param.horkage_on;
496 dev->horkage &= ~fe->param.horkage_off;
497
498 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
499 fe->param.name);
500 }
501 }
502
503 /**
504 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
505 * @opcode: SCSI opcode
506 *
507 * Determine ATAPI command type from @opcode.
508 *
509 * LOCKING:
510 * None.
511 *
512 * RETURNS:
513 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
514 */
atapi_cmd_type(u8 opcode)515 int atapi_cmd_type(u8 opcode)
516 {
517 switch (opcode) {
518 case GPCMD_READ_10:
519 case GPCMD_READ_12:
520 return ATAPI_READ;
521
522 case GPCMD_WRITE_10:
523 case GPCMD_WRITE_12:
524 case GPCMD_WRITE_AND_VERIFY_10:
525 return ATAPI_WRITE;
526
527 case GPCMD_READ_CD:
528 case GPCMD_READ_CD_MSF:
529 return ATAPI_READ_CD;
530
531 case ATA_16:
532 case ATA_12:
533 if (atapi_passthru16)
534 return ATAPI_PASS_THRU;
535 /* fall thru */
536 default:
537 return ATAPI_MISC;
538 }
539 }
540
541 /**
542 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
543 * @tf: Taskfile to convert
544 * @pmp: Port multiplier port
545 * @is_cmd: This FIS is for command
546 * @fis: Buffer into which data will output
547 *
548 * Converts a standard ATA taskfile to a Serial ATA
549 * FIS structure (Register - Host to Device).
550 *
551 * LOCKING:
552 * Inherited from caller.
553 */
ata_tf_to_fis(const struct ata_taskfile * tf,u8 pmp,int is_cmd,u8 * fis)554 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
555 {
556 fis[0] = 0x27; /* Register - Host to Device FIS */
557 fis[1] = pmp & 0xf; /* Port multiplier number*/
558 if (is_cmd)
559 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
560
561 fis[2] = tf->command;
562 fis[3] = tf->feature;
563
564 fis[4] = tf->lbal;
565 fis[5] = tf->lbam;
566 fis[6] = tf->lbah;
567 fis[7] = tf->device;
568
569 fis[8] = tf->hob_lbal;
570 fis[9] = tf->hob_lbam;
571 fis[10] = tf->hob_lbah;
572 fis[11] = tf->hob_feature;
573
574 fis[12] = tf->nsect;
575 fis[13] = tf->hob_nsect;
576 fis[14] = 0;
577 fis[15] = tf->ctl;
578
579 fis[16] = tf->auxiliary & 0xff;
580 fis[17] = (tf->auxiliary >> 8) & 0xff;
581 fis[18] = (tf->auxiliary >> 16) & 0xff;
582 fis[19] = (tf->auxiliary >> 24) & 0xff;
583 }
584
585 /**
586 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
587 * @fis: Buffer from which data will be input
588 * @tf: Taskfile to output
589 *
590 * Converts a serial ATA FIS structure to a standard ATA taskfile.
591 *
592 * LOCKING:
593 * Inherited from caller.
594 */
595
ata_tf_from_fis(const u8 * fis,struct ata_taskfile * tf)596 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
597 {
598 tf->command = fis[2]; /* status */
599 tf->feature = fis[3]; /* error */
600
601 tf->lbal = fis[4];
602 tf->lbam = fis[5];
603 tf->lbah = fis[6];
604 tf->device = fis[7];
605
606 tf->hob_lbal = fis[8];
607 tf->hob_lbam = fis[9];
608 tf->hob_lbah = fis[10];
609
610 tf->nsect = fis[12];
611 tf->hob_nsect = fis[13];
612 }
613
614 static const u8 ata_rw_cmds[] = {
615 /* pio multi */
616 ATA_CMD_READ_MULTI,
617 ATA_CMD_WRITE_MULTI,
618 ATA_CMD_READ_MULTI_EXT,
619 ATA_CMD_WRITE_MULTI_EXT,
620 0,
621 0,
622 0,
623 ATA_CMD_WRITE_MULTI_FUA_EXT,
624 /* pio */
625 ATA_CMD_PIO_READ,
626 ATA_CMD_PIO_WRITE,
627 ATA_CMD_PIO_READ_EXT,
628 ATA_CMD_PIO_WRITE_EXT,
629 0,
630 0,
631 0,
632 0,
633 /* dma */
634 ATA_CMD_READ,
635 ATA_CMD_WRITE,
636 ATA_CMD_READ_EXT,
637 ATA_CMD_WRITE_EXT,
638 0,
639 0,
640 0,
641 ATA_CMD_WRITE_FUA_EXT
642 };
643
644 /**
645 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
646 * @tf: command to examine and configure
647 * @dev: device tf belongs to
648 *
649 * Examine the device configuration and tf->flags to calculate
650 * the proper read/write commands and protocol to use.
651 *
652 * LOCKING:
653 * caller.
654 */
ata_rwcmd_protocol(struct ata_taskfile * tf,struct ata_device * dev)655 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
656 {
657 u8 cmd;
658
659 int index, fua, lba48, write;
660
661 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
662 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
663 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
664
665 if (dev->flags & ATA_DFLAG_PIO) {
666 tf->protocol = ATA_PROT_PIO;
667 index = dev->multi_count ? 0 : 8;
668 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
669 /* Unable to use DMA due to host limitation */
670 tf->protocol = ATA_PROT_PIO;
671 index = dev->multi_count ? 0 : 8;
672 } else {
673 tf->protocol = ATA_PROT_DMA;
674 index = 16;
675 }
676
677 cmd = ata_rw_cmds[index + fua + lba48 + write];
678 if (cmd) {
679 tf->command = cmd;
680 return 0;
681 }
682 return -1;
683 }
684
685 /**
686 * ata_tf_read_block - Read block address from ATA taskfile
687 * @tf: ATA taskfile of interest
688 * @dev: ATA device @tf belongs to
689 *
690 * LOCKING:
691 * None.
692 *
693 * Read block address from @tf. This function can handle all
694 * three address formats - LBA, LBA48 and CHS. tf->protocol and
695 * flags select the address format to use.
696 *
697 * RETURNS:
698 * Block address read from @tf.
699 */
ata_tf_read_block(const struct ata_taskfile * tf,struct ata_device * dev)700 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
701 {
702 u64 block = 0;
703
704 if (tf->flags & ATA_TFLAG_LBA) {
705 if (tf->flags & ATA_TFLAG_LBA48) {
706 block |= (u64)tf->hob_lbah << 40;
707 block |= (u64)tf->hob_lbam << 32;
708 block |= (u64)tf->hob_lbal << 24;
709 } else
710 block |= (tf->device & 0xf) << 24;
711
712 block |= tf->lbah << 16;
713 block |= tf->lbam << 8;
714 block |= tf->lbal;
715 } else {
716 u32 cyl, head, sect;
717
718 cyl = tf->lbam | (tf->lbah << 8);
719 head = tf->device & 0xf;
720 sect = tf->lbal;
721
722 if (!sect) {
723 ata_dev_warn(dev,
724 "device reported invalid CHS sector 0\n");
725 return U64_MAX;
726 }
727
728 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
729 }
730
731 return block;
732 }
733
734 /**
735 * ata_build_rw_tf - Build ATA taskfile for given read/write request
736 * @tf: Target ATA taskfile
737 * @dev: ATA device @tf belongs to
738 * @block: Block address
739 * @n_block: Number of blocks
740 * @tf_flags: RW/FUA etc...
741 * @tag: tag
742 *
743 * LOCKING:
744 * None.
745 *
746 * Build ATA taskfile @tf for read/write request described by
747 * @block, @n_block, @tf_flags and @tag on @dev.
748 *
749 * RETURNS:
750 *
751 * 0 on success, -ERANGE if the request is too large for @dev,
752 * -EINVAL if the request is invalid.
753 */
ata_build_rw_tf(struct ata_taskfile * tf,struct ata_device * dev,u64 block,u32 n_block,unsigned int tf_flags,unsigned int tag)754 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
755 u64 block, u32 n_block, unsigned int tf_flags,
756 unsigned int tag)
757 {
758 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
759 tf->flags |= tf_flags;
760
761 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
762 /* yay, NCQ */
763 if (!lba_48_ok(block, n_block))
764 return -ERANGE;
765
766 tf->protocol = ATA_PROT_NCQ;
767 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
768
769 if (tf->flags & ATA_TFLAG_WRITE)
770 tf->command = ATA_CMD_FPDMA_WRITE;
771 else
772 tf->command = ATA_CMD_FPDMA_READ;
773
774 tf->nsect = tag << 3;
775 tf->hob_feature = (n_block >> 8) & 0xff;
776 tf->feature = n_block & 0xff;
777
778 tf->hob_lbah = (block >> 40) & 0xff;
779 tf->hob_lbam = (block >> 32) & 0xff;
780 tf->hob_lbal = (block >> 24) & 0xff;
781 tf->lbah = (block >> 16) & 0xff;
782 tf->lbam = (block >> 8) & 0xff;
783 tf->lbal = block & 0xff;
784
785 tf->device = ATA_LBA;
786 if (tf->flags & ATA_TFLAG_FUA)
787 tf->device |= 1 << 7;
788 } else if (dev->flags & ATA_DFLAG_LBA) {
789 tf->flags |= ATA_TFLAG_LBA;
790
791 if (lba_28_ok(block, n_block)) {
792 /* use LBA28 */
793 tf->device |= (block >> 24) & 0xf;
794 } else if (lba_48_ok(block, n_block)) {
795 if (!(dev->flags & ATA_DFLAG_LBA48))
796 return -ERANGE;
797
798 /* use LBA48 */
799 tf->flags |= ATA_TFLAG_LBA48;
800
801 tf->hob_nsect = (n_block >> 8) & 0xff;
802
803 tf->hob_lbah = (block >> 40) & 0xff;
804 tf->hob_lbam = (block >> 32) & 0xff;
805 tf->hob_lbal = (block >> 24) & 0xff;
806 } else
807 /* request too large even for LBA48 */
808 return -ERANGE;
809
810 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
811 return -EINVAL;
812
813 tf->nsect = n_block & 0xff;
814
815 tf->lbah = (block >> 16) & 0xff;
816 tf->lbam = (block >> 8) & 0xff;
817 tf->lbal = block & 0xff;
818
819 tf->device |= ATA_LBA;
820 } else {
821 /* CHS */
822 u32 sect, head, cyl, track;
823
824 /* The request -may- be too large for CHS addressing. */
825 if (!lba_28_ok(block, n_block))
826 return -ERANGE;
827
828 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
829 return -EINVAL;
830
831 /* Convert LBA to CHS */
832 track = (u32)block / dev->sectors;
833 cyl = track / dev->heads;
834 head = track % dev->heads;
835 sect = (u32)block % dev->sectors + 1;
836
837 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
838 (u32)block, track, cyl, head, sect);
839
840 /* Check whether the converted CHS can fit.
841 Cylinder: 0-65535
842 Head: 0-15
843 Sector: 1-255*/
844 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
845 return -ERANGE;
846
847 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
848 tf->lbal = sect;
849 tf->lbam = cyl;
850 tf->lbah = cyl >> 8;
851 tf->device |= head;
852 }
853
854 return 0;
855 }
856
857 /**
858 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
859 * @pio_mask: pio_mask
860 * @mwdma_mask: mwdma_mask
861 * @udma_mask: udma_mask
862 *
863 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
864 * unsigned int xfer_mask.
865 *
866 * LOCKING:
867 * None.
868 *
869 * RETURNS:
870 * Packed xfer_mask.
871 */
ata_pack_xfermask(unsigned long pio_mask,unsigned long mwdma_mask,unsigned long udma_mask)872 unsigned long ata_pack_xfermask(unsigned long pio_mask,
873 unsigned long mwdma_mask,
874 unsigned long udma_mask)
875 {
876 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
877 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
878 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
879 }
880
881 /**
882 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
883 * @xfer_mask: xfer_mask to unpack
884 * @pio_mask: resulting pio_mask
885 * @mwdma_mask: resulting mwdma_mask
886 * @udma_mask: resulting udma_mask
887 *
888 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
889 * Any NULL destination masks will be ignored.
890 */
ata_unpack_xfermask(unsigned long xfer_mask,unsigned long * pio_mask,unsigned long * mwdma_mask,unsigned long * udma_mask)891 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
892 unsigned long *mwdma_mask, unsigned long *udma_mask)
893 {
894 if (pio_mask)
895 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
896 if (mwdma_mask)
897 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
898 if (udma_mask)
899 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
900 }
901
902 static const struct ata_xfer_ent {
903 int shift, bits;
904 u8 base;
905 } ata_xfer_tbl[] = {
906 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
907 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
908 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
909 { -1, },
910 };
911
912 /**
913 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
914 * @xfer_mask: xfer_mask of interest
915 *
916 * Return matching XFER_* value for @xfer_mask. Only the highest
917 * bit of @xfer_mask is considered.
918 *
919 * LOCKING:
920 * None.
921 *
922 * RETURNS:
923 * Matching XFER_* value, 0xff if no match found.
924 */
ata_xfer_mask2mode(unsigned long xfer_mask)925 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
926 {
927 int highbit = fls(xfer_mask) - 1;
928 const struct ata_xfer_ent *ent;
929
930 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
931 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
932 return ent->base + highbit - ent->shift;
933 return 0xff;
934 }
935
936 /**
937 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
938 * @xfer_mode: XFER_* of interest
939 *
940 * Return matching xfer_mask for @xfer_mode.
941 *
942 * LOCKING:
943 * None.
944 *
945 * RETURNS:
946 * Matching xfer_mask, 0 if no match found.
947 */
ata_xfer_mode2mask(u8 xfer_mode)948 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
949 {
950 const struct ata_xfer_ent *ent;
951
952 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
953 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
954 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
955 & ~((1 << ent->shift) - 1);
956 return 0;
957 }
958
959 /**
960 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
961 * @xfer_mode: XFER_* of interest
962 *
963 * Return matching xfer_shift for @xfer_mode.
964 *
965 * LOCKING:
966 * None.
967 *
968 * RETURNS:
969 * Matching xfer_shift, -1 if no match found.
970 */
ata_xfer_mode2shift(unsigned long xfer_mode)971 int ata_xfer_mode2shift(unsigned long xfer_mode)
972 {
973 const struct ata_xfer_ent *ent;
974
975 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
976 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
977 return ent->shift;
978 return -1;
979 }
980
981 /**
982 * ata_mode_string - convert xfer_mask to string
983 * @xfer_mask: mask of bits supported; only highest bit counts.
984 *
985 * Determine string which represents the highest speed
986 * (highest bit in @modemask).
987 *
988 * LOCKING:
989 * None.
990 *
991 * RETURNS:
992 * Constant C string representing highest speed listed in
993 * @mode_mask, or the constant C string "<n/a>".
994 */
ata_mode_string(unsigned long xfer_mask)995 const char *ata_mode_string(unsigned long xfer_mask)
996 {
997 static const char * const xfer_mode_str[] = {
998 "PIO0",
999 "PIO1",
1000 "PIO2",
1001 "PIO3",
1002 "PIO4",
1003 "PIO5",
1004 "PIO6",
1005 "MWDMA0",
1006 "MWDMA1",
1007 "MWDMA2",
1008 "MWDMA3",
1009 "MWDMA4",
1010 "UDMA/16",
1011 "UDMA/25",
1012 "UDMA/33",
1013 "UDMA/44",
1014 "UDMA/66",
1015 "UDMA/100",
1016 "UDMA/133",
1017 "UDMA7",
1018 };
1019 int highbit;
1020
1021 highbit = fls(xfer_mask) - 1;
1022 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1023 return xfer_mode_str[highbit];
1024 return "<n/a>";
1025 }
1026
sata_spd_string(unsigned int spd)1027 const char *sata_spd_string(unsigned int spd)
1028 {
1029 static const char * const spd_str[] = {
1030 "1.5 Gbps",
1031 "3.0 Gbps",
1032 "6.0 Gbps",
1033 };
1034
1035 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1036 return "<unknown>";
1037 return spd_str[spd - 1];
1038 }
1039
1040 /**
1041 * ata_dev_classify - determine device type based on ATA-spec signature
1042 * @tf: ATA taskfile register set for device to be identified
1043 *
1044 * Determine from taskfile register contents whether a device is
1045 * ATA or ATAPI, as per "Signature and persistence" section
1046 * of ATA/PI spec (volume 1, sect 5.14).
1047 *
1048 * LOCKING:
1049 * None.
1050 *
1051 * RETURNS:
1052 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1053 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1054 */
ata_dev_classify(const struct ata_taskfile * tf)1055 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1056 {
1057 /* Apple's open source Darwin code hints that some devices only
1058 * put a proper signature into the LBA mid/high registers,
1059 * So, we only check those. It's sufficient for uniqueness.
1060 *
1061 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1062 * signatures for ATA and ATAPI devices attached on SerialATA,
1063 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1064 * spec has never mentioned about using different signatures
1065 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1066 * Multiplier specification began to use 0x69/0x96 to identify
1067 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1068 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1069 * 0x69/0x96 shortly and described them as reserved for
1070 * SerialATA.
1071 *
1072 * We follow the current spec and consider that 0x69/0x96
1073 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1074 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1075 * SEMB signature. This is worked around in
1076 * ata_dev_read_id().
1077 */
1078 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1079 DPRINTK("found ATA device by sig\n");
1080 return ATA_DEV_ATA;
1081 }
1082
1083 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1084 DPRINTK("found ATAPI device by sig\n");
1085 return ATA_DEV_ATAPI;
1086 }
1087
1088 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1089 DPRINTK("found PMP device by sig\n");
1090 return ATA_DEV_PMP;
1091 }
1092
1093 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1094 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1095 return ATA_DEV_SEMB;
1096 }
1097
1098 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1099 DPRINTK("found ZAC device by sig\n");
1100 return ATA_DEV_ZAC;
1101 }
1102
1103 DPRINTK("unknown device\n");
1104 return ATA_DEV_UNKNOWN;
1105 }
1106
1107 /**
1108 * ata_id_string - Convert IDENTIFY DEVICE page into string
1109 * @id: IDENTIFY DEVICE results we will examine
1110 * @s: string into which data is output
1111 * @ofs: offset into identify device page
1112 * @len: length of string to return. must be an even number.
1113 *
1114 * The strings in the IDENTIFY DEVICE page are broken up into
1115 * 16-bit chunks. Run through the string, and output each
1116 * 8-bit chunk linearly, regardless of platform.
1117 *
1118 * LOCKING:
1119 * caller.
1120 */
1121
ata_id_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1122 void ata_id_string(const u16 *id, unsigned char *s,
1123 unsigned int ofs, unsigned int len)
1124 {
1125 unsigned int c;
1126
1127 BUG_ON(len & 1);
1128
1129 while (len > 0) {
1130 c = id[ofs] >> 8;
1131 *s = c;
1132 s++;
1133
1134 c = id[ofs] & 0xff;
1135 *s = c;
1136 s++;
1137
1138 ofs++;
1139 len -= 2;
1140 }
1141 }
1142
1143 /**
1144 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1145 * @id: IDENTIFY DEVICE results we will examine
1146 * @s: string into which data is output
1147 * @ofs: offset into identify device page
1148 * @len: length of string to return. must be an odd number.
1149 *
1150 * This function is identical to ata_id_string except that it
1151 * trims trailing spaces and terminates the resulting string with
1152 * null. @len must be actual maximum length (even number) + 1.
1153 *
1154 * LOCKING:
1155 * caller.
1156 */
ata_id_c_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1157 void ata_id_c_string(const u16 *id, unsigned char *s,
1158 unsigned int ofs, unsigned int len)
1159 {
1160 unsigned char *p;
1161
1162 ata_id_string(id, s, ofs, len - 1);
1163
1164 p = s + strnlen(s, len - 1);
1165 while (p > s && p[-1] == ' ')
1166 p--;
1167 *p = '\0';
1168 }
1169
ata_id_n_sectors(const u16 * id)1170 static u64 ata_id_n_sectors(const u16 *id)
1171 {
1172 if (ata_id_has_lba(id)) {
1173 if (ata_id_has_lba48(id))
1174 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1175 else
1176 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1177 } else {
1178 if (ata_id_current_chs_valid(id))
1179 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1180 id[ATA_ID_CUR_SECTORS];
1181 else
1182 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1183 id[ATA_ID_SECTORS];
1184 }
1185 }
1186
ata_tf_to_lba48(const struct ata_taskfile * tf)1187 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1188 {
1189 u64 sectors = 0;
1190
1191 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1192 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1193 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1194 sectors |= (tf->lbah & 0xff) << 16;
1195 sectors |= (tf->lbam & 0xff) << 8;
1196 sectors |= (tf->lbal & 0xff);
1197
1198 return sectors;
1199 }
1200
ata_tf_to_lba(const struct ata_taskfile * tf)1201 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1202 {
1203 u64 sectors = 0;
1204
1205 sectors |= (tf->device & 0x0f) << 24;
1206 sectors |= (tf->lbah & 0xff) << 16;
1207 sectors |= (tf->lbam & 0xff) << 8;
1208 sectors |= (tf->lbal & 0xff);
1209
1210 return sectors;
1211 }
1212
1213 /**
1214 * ata_read_native_max_address - Read native max address
1215 * @dev: target device
1216 * @max_sectors: out parameter for the result native max address
1217 *
1218 * Perform an LBA48 or LBA28 native size query upon the device in
1219 * question.
1220 *
1221 * RETURNS:
1222 * 0 on success, -EACCES if command is aborted by the drive.
1223 * -EIO on other errors.
1224 */
ata_read_native_max_address(struct ata_device * dev,u64 * max_sectors)1225 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1226 {
1227 unsigned int err_mask;
1228 struct ata_taskfile tf;
1229 int lba48 = ata_id_has_lba48(dev->id);
1230
1231 ata_tf_init(dev, &tf);
1232
1233 /* always clear all address registers */
1234 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1235
1236 if (lba48) {
1237 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1238 tf.flags |= ATA_TFLAG_LBA48;
1239 } else
1240 tf.command = ATA_CMD_READ_NATIVE_MAX;
1241
1242 tf.protocol = ATA_PROT_NODATA;
1243 tf.device |= ATA_LBA;
1244
1245 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1246 if (err_mask) {
1247 ata_dev_warn(dev,
1248 "failed to read native max address (err_mask=0x%x)\n",
1249 err_mask);
1250 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1251 return -EACCES;
1252 return -EIO;
1253 }
1254
1255 if (lba48)
1256 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1257 else
1258 *max_sectors = ata_tf_to_lba(&tf) + 1;
1259 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1260 (*max_sectors)--;
1261 return 0;
1262 }
1263
1264 /**
1265 * ata_set_max_sectors - Set max sectors
1266 * @dev: target device
1267 * @new_sectors: new max sectors value to set for the device
1268 *
1269 * Set max sectors of @dev to @new_sectors.
1270 *
1271 * RETURNS:
1272 * 0 on success, -EACCES if command is aborted or denied (due to
1273 * previous non-volatile SET_MAX) by the drive. -EIO on other
1274 * errors.
1275 */
ata_set_max_sectors(struct ata_device * dev,u64 new_sectors)1276 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1277 {
1278 unsigned int err_mask;
1279 struct ata_taskfile tf;
1280 int lba48 = ata_id_has_lba48(dev->id);
1281
1282 new_sectors--;
1283
1284 ata_tf_init(dev, &tf);
1285
1286 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1287
1288 if (lba48) {
1289 tf.command = ATA_CMD_SET_MAX_EXT;
1290 tf.flags |= ATA_TFLAG_LBA48;
1291
1292 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1293 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1294 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1295 } else {
1296 tf.command = ATA_CMD_SET_MAX;
1297
1298 tf.device |= (new_sectors >> 24) & 0xf;
1299 }
1300
1301 tf.protocol = ATA_PROT_NODATA;
1302 tf.device |= ATA_LBA;
1303
1304 tf.lbal = (new_sectors >> 0) & 0xff;
1305 tf.lbam = (new_sectors >> 8) & 0xff;
1306 tf.lbah = (new_sectors >> 16) & 0xff;
1307
1308 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1309 if (err_mask) {
1310 ata_dev_warn(dev,
1311 "failed to set max address (err_mask=0x%x)\n",
1312 err_mask);
1313 if (err_mask == AC_ERR_DEV &&
1314 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1315 return -EACCES;
1316 return -EIO;
1317 }
1318
1319 return 0;
1320 }
1321
1322 /**
1323 * ata_hpa_resize - Resize a device with an HPA set
1324 * @dev: Device to resize
1325 *
1326 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1327 * it if required to the full size of the media. The caller must check
1328 * the drive has the HPA feature set enabled.
1329 *
1330 * RETURNS:
1331 * 0 on success, -errno on failure.
1332 */
ata_hpa_resize(struct ata_device * dev)1333 static int ata_hpa_resize(struct ata_device *dev)
1334 {
1335 struct ata_eh_context *ehc = &dev->link->eh_context;
1336 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1337 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1338 u64 sectors = ata_id_n_sectors(dev->id);
1339 u64 native_sectors;
1340 int rc;
1341
1342 /* do we need to do it? */
1343 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1344 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1345 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1346 return 0;
1347
1348 /* read native max address */
1349 rc = ata_read_native_max_address(dev, &native_sectors);
1350 if (rc) {
1351 /* If device aborted the command or HPA isn't going to
1352 * be unlocked, skip HPA resizing.
1353 */
1354 if (rc == -EACCES || !unlock_hpa) {
1355 ata_dev_warn(dev,
1356 "HPA support seems broken, skipping HPA handling\n");
1357 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1358
1359 /* we can continue if device aborted the command */
1360 if (rc == -EACCES)
1361 rc = 0;
1362 }
1363
1364 return rc;
1365 }
1366 dev->n_native_sectors = native_sectors;
1367
1368 /* nothing to do? */
1369 if (native_sectors <= sectors || !unlock_hpa) {
1370 if (!print_info || native_sectors == sectors)
1371 return 0;
1372
1373 if (native_sectors > sectors)
1374 ata_dev_info(dev,
1375 "HPA detected: current %llu, native %llu\n",
1376 (unsigned long long)sectors,
1377 (unsigned long long)native_sectors);
1378 else if (native_sectors < sectors)
1379 ata_dev_warn(dev,
1380 "native sectors (%llu) is smaller than sectors (%llu)\n",
1381 (unsigned long long)native_sectors,
1382 (unsigned long long)sectors);
1383 return 0;
1384 }
1385
1386 /* let's unlock HPA */
1387 rc = ata_set_max_sectors(dev, native_sectors);
1388 if (rc == -EACCES) {
1389 /* if device aborted the command, skip HPA resizing */
1390 ata_dev_warn(dev,
1391 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1392 (unsigned long long)sectors,
1393 (unsigned long long)native_sectors);
1394 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1395 return 0;
1396 } else if (rc)
1397 return rc;
1398
1399 /* re-read IDENTIFY data */
1400 rc = ata_dev_reread_id(dev, 0);
1401 if (rc) {
1402 ata_dev_err(dev,
1403 "failed to re-read IDENTIFY data after HPA resizing\n");
1404 return rc;
1405 }
1406
1407 if (print_info) {
1408 u64 new_sectors = ata_id_n_sectors(dev->id);
1409 ata_dev_info(dev,
1410 "HPA unlocked: %llu -> %llu, native %llu\n",
1411 (unsigned long long)sectors,
1412 (unsigned long long)new_sectors,
1413 (unsigned long long)native_sectors);
1414 }
1415
1416 return 0;
1417 }
1418
1419 /**
1420 * ata_dump_id - IDENTIFY DEVICE info debugging output
1421 * @id: IDENTIFY DEVICE page to dump
1422 *
1423 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1424 * page.
1425 *
1426 * LOCKING:
1427 * caller.
1428 */
1429
ata_dump_id(const u16 * id)1430 static inline void ata_dump_id(const u16 *id)
1431 {
1432 DPRINTK("49==0x%04x "
1433 "53==0x%04x "
1434 "63==0x%04x "
1435 "64==0x%04x "
1436 "75==0x%04x \n",
1437 id[49],
1438 id[53],
1439 id[63],
1440 id[64],
1441 id[75]);
1442 DPRINTK("80==0x%04x "
1443 "81==0x%04x "
1444 "82==0x%04x "
1445 "83==0x%04x "
1446 "84==0x%04x \n",
1447 id[80],
1448 id[81],
1449 id[82],
1450 id[83],
1451 id[84]);
1452 DPRINTK("88==0x%04x "
1453 "93==0x%04x\n",
1454 id[88],
1455 id[93]);
1456 }
1457
1458 /**
1459 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1460 * @id: IDENTIFY data to compute xfer mask from
1461 *
1462 * Compute the xfermask for this device. This is not as trivial
1463 * as it seems if we must consider early devices correctly.
1464 *
1465 * FIXME: pre IDE drive timing (do we care ?).
1466 *
1467 * LOCKING:
1468 * None.
1469 *
1470 * RETURNS:
1471 * Computed xfermask
1472 */
ata_id_xfermask(const u16 * id)1473 unsigned long ata_id_xfermask(const u16 *id)
1474 {
1475 unsigned long pio_mask, mwdma_mask, udma_mask;
1476
1477 /* Usual case. Word 53 indicates word 64 is valid */
1478 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1479 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1480 pio_mask <<= 3;
1481 pio_mask |= 0x7;
1482 } else {
1483 /* If word 64 isn't valid then Word 51 high byte holds
1484 * the PIO timing number for the maximum. Turn it into
1485 * a mask.
1486 */
1487 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1488 if (mode < 5) /* Valid PIO range */
1489 pio_mask = (2 << mode) - 1;
1490 else
1491 pio_mask = 1;
1492
1493 /* But wait.. there's more. Design your standards by
1494 * committee and you too can get a free iordy field to
1495 * process. However its the speeds not the modes that
1496 * are supported... Note drivers using the timing API
1497 * will get this right anyway
1498 */
1499 }
1500
1501 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1502
1503 if (ata_id_is_cfa(id)) {
1504 /*
1505 * Process compact flash extended modes
1506 */
1507 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1508 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1509
1510 if (pio)
1511 pio_mask |= (1 << 5);
1512 if (pio > 1)
1513 pio_mask |= (1 << 6);
1514 if (dma)
1515 mwdma_mask |= (1 << 3);
1516 if (dma > 1)
1517 mwdma_mask |= (1 << 4);
1518 }
1519
1520 udma_mask = 0;
1521 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1522 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1523
1524 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1525 }
1526
ata_qc_complete_internal(struct ata_queued_cmd * qc)1527 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1528 {
1529 struct completion *waiting = qc->private_data;
1530
1531 complete(waiting);
1532 }
1533
1534 /**
1535 * ata_exec_internal_sg - execute libata internal command
1536 * @dev: Device to which the command is sent
1537 * @tf: Taskfile registers for the command and the result
1538 * @cdb: CDB for packet command
1539 * @dma_dir: Data transfer direction of the command
1540 * @sgl: sg list for the data buffer of the command
1541 * @n_elem: Number of sg entries
1542 * @timeout: Timeout in msecs (0 for default)
1543 *
1544 * Executes libata internal command with timeout. @tf contains
1545 * command on entry and result on return. Timeout and error
1546 * conditions are reported via return value. No recovery action
1547 * is taken after a command times out. It's caller's duty to
1548 * clean up after timeout.
1549 *
1550 * LOCKING:
1551 * None. Should be called with kernel context, might sleep.
1552 *
1553 * RETURNS:
1554 * Zero on success, AC_ERR_* mask on failure
1555 */
ata_exec_internal_sg(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,int dma_dir,struct scatterlist * sgl,unsigned int n_elem,unsigned long timeout)1556 unsigned ata_exec_internal_sg(struct ata_device *dev,
1557 struct ata_taskfile *tf, const u8 *cdb,
1558 int dma_dir, struct scatterlist *sgl,
1559 unsigned int n_elem, unsigned long timeout)
1560 {
1561 struct ata_link *link = dev->link;
1562 struct ata_port *ap = link->ap;
1563 u8 command = tf->command;
1564 int auto_timeout = 0;
1565 struct ata_queued_cmd *qc;
1566 unsigned int tag, preempted_tag;
1567 u32 preempted_sactive, preempted_qc_active;
1568 int preempted_nr_active_links;
1569 DECLARE_COMPLETION_ONSTACK(wait);
1570 unsigned long flags;
1571 unsigned int err_mask;
1572 int rc;
1573
1574 spin_lock_irqsave(ap->lock, flags);
1575
1576 /* no internal command while frozen */
1577 if (ap->pflags & ATA_PFLAG_FROZEN) {
1578 spin_unlock_irqrestore(ap->lock, flags);
1579 return AC_ERR_SYSTEM;
1580 }
1581
1582 /* initialize internal qc */
1583
1584 /* XXX: Tag 0 is used for drivers with legacy EH as some
1585 * drivers choke if any other tag is given. This breaks
1586 * ata_tag_internal() test for those drivers. Don't use new
1587 * EH stuff without converting to it.
1588 */
1589 if (ap->ops->error_handler)
1590 tag = ATA_TAG_INTERNAL;
1591 else
1592 tag = 0;
1593
1594 qc = __ata_qc_from_tag(ap, tag);
1595
1596 qc->tag = tag;
1597 qc->scsicmd = NULL;
1598 qc->ap = ap;
1599 qc->dev = dev;
1600 ata_qc_reinit(qc);
1601
1602 preempted_tag = link->active_tag;
1603 preempted_sactive = link->sactive;
1604 preempted_qc_active = ap->qc_active;
1605 preempted_nr_active_links = ap->nr_active_links;
1606 link->active_tag = ATA_TAG_POISON;
1607 link->sactive = 0;
1608 ap->qc_active = 0;
1609 ap->nr_active_links = 0;
1610
1611 /* prepare & issue qc */
1612 qc->tf = *tf;
1613 if (cdb)
1614 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1615
1616 /* some SATA bridges need us to indicate data xfer direction */
1617 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1618 dma_dir == DMA_FROM_DEVICE)
1619 qc->tf.feature |= ATAPI_DMADIR;
1620
1621 qc->flags |= ATA_QCFLAG_RESULT_TF;
1622 qc->dma_dir = dma_dir;
1623 if (dma_dir != DMA_NONE) {
1624 unsigned int i, buflen = 0;
1625 struct scatterlist *sg;
1626
1627 for_each_sg(sgl, sg, n_elem, i)
1628 buflen += sg->length;
1629
1630 ata_sg_init(qc, sgl, n_elem);
1631 qc->nbytes = buflen;
1632 }
1633
1634 qc->private_data = &wait;
1635 qc->complete_fn = ata_qc_complete_internal;
1636
1637 ata_qc_issue(qc);
1638
1639 spin_unlock_irqrestore(ap->lock, flags);
1640
1641 if (!timeout) {
1642 if (ata_probe_timeout)
1643 timeout = ata_probe_timeout * 1000;
1644 else {
1645 timeout = ata_internal_cmd_timeout(dev, command);
1646 auto_timeout = 1;
1647 }
1648 }
1649
1650 if (ap->ops->error_handler)
1651 ata_eh_release(ap);
1652
1653 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1654
1655 if (ap->ops->error_handler)
1656 ata_eh_acquire(ap);
1657
1658 ata_sff_flush_pio_task(ap);
1659
1660 if (!rc) {
1661 spin_lock_irqsave(ap->lock, flags);
1662
1663 /* We're racing with irq here. If we lose, the
1664 * following test prevents us from completing the qc
1665 * twice. If we win, the port is frozen and will be
1666 * cleaned up by ->post_internal_cmd().
1667 */
1668 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1669 qc->err_mask |= AC_ERR_TIMEOUT;
1670
1671 if (ap->ops->error_handler)
1672 ata_port_freeze(ap);
1673 else
1674 ata_qc_complete(qc);
1675
1676 if (ata_msg_warn(ap))
1677 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1678 command);
1679 }
1680
1681 spin_unlock_irqrestore(ap->lock, flags);
1682 }
1683
1684 /* do post_internal_cmd */
1685 if (ap->ops->post_internal_cmd)
1686 ap->ops->post_internal_cmd(qc);
1687
1688 /* perform minimal error analysis */
1689 if (qc->flags & ATA_QCFLAG_FAILED) {
1690 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1691 qc->err_mask |= AC_ERR_DEV;
1692
1693 if (!qc->err_mask)
1694 qc->err_mask |= AC_ERR_OTHER;
1695
1696 if (qc->err_mask & ~AC_ERR_OTHER)
1697 qc->err_mask &= ~AC_ERR_OTHER;
1698 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1699 qc->result_tf.command |= ATA_SENSE;
1700 }
1701
1702 /* finish up */
1703 spin_lock_irqsave(ap->lock, flags);
1704
1705 *tf = qc->result_tf;
1706 err_mask = qc->err_mask;
1707
1708 ata_qc_free(qc);
1709 link->active_tag = preempted_tag;
1710 link->sactive = preempted_sactive;
1711 ap->qc_active = preempted_qc_active;
1712 ap->nr_active_links = preempted_nr_active_links;
1713
1714 spin_unlock_irqrestore(ap->lock, flags);
1715
1716 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1717 ata_internal_cmd_timed_out(dev, command);
1718
1719 return err_mask;
1720 }
1721
1722 /**
1723 * ata_exec_internal - execute libata internal command
1724 * @dev: Device to which the command is sent
1725 * @tf: Taskfile registers for the command and the result
1726 * @cdb: CDB for packet command
1727 * @dma_dir: Data transfer direction of the command
1728 * @buf: Data buffer of the command
1729 * @buflen: Length of data buffer
1730 * @timeout: Timeout in msecs (0 for default)
1731 *
1732 * Wrapper around ata_exec_internal_sg() which takes simple
1733 * buffer instead of sg list.
1734 *
1735 * LOCKING:
1736 * None. Should be called with kernel context, might sleep.
1737 *
1738 * RETURNS:
1739 * Zero on success, AC_ERR_* mask on failure
1740 */
ata_exec_internal(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,int dma_dir,void * buf,unsigned int buflen,unsigned long timeout)1741 unsigned ata_exec_internal(struct ata_device *dev,
1742 struct ata_taskfile *tf, const u8 *cdb,
1743 int dma_dir, void *buf, unsigned int buflen,
1744 unsigned long timeout)
1745 {
1746 struct scatterlist *psg = NULL, sg;
1747 unsigned int n_elem = 0;
1748
1749 if (dma_dir != DMA_NONE) {
1750 WARN_ON(!buf);
1751 sg_init_one(&sg, buf, buflen);
1752 psg = &sg;
1753 n_elem++;
1754 }
1755
1756 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1757 timeout);
1758 }
1759
1760 /**
1761 * ata_pio_need_iordy - check if iordy needed
1762 * @adev: ATA device
1763 *
1764 * Check if the current speed of the device requires IORDY. Used
1765 * by various controllers for chip configuration.
1766 */
ata_pio_need_iordy(const struct ata_device * adev)1767 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1768 {
1769 /* Don't set IORDY if we're preparing for reset. IORDY may
1770 * lead to controller lock up on certain controllers if the
1771 * port is not occupied. See bko#11703 for details.
1772 */
1773 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1774 return 0;
1775 /* Controller doesn't support IORDY. Probably a pointless
1776 * check as the caller should know this.
1777 */
1778 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1779 return 0;
1780 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1781 if (ata_id_is_cfa(adev->id)
1782 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1783 return 0;
1784 /* PIO3 and higher it is mandatory */
1785 if (adev->pio_mode > XFER_PIO_2)
1786 return 1;
1787 /* We turn it on when possible */
1788 if (ata_id_has_iordy(adev->id))
1789 return 1;
1790 return 0;
1791 }
1792
1793 /**
1794 * ata_pio_mask_no_iordy - Return the non IORDY mask
1795 * @adev: ATA device
1796 *
1797 * Compute the highest mode possible if we are not using iordy. Return
1798 * -1 if no iordy mode is available.
1799 */
ata_pio_mask_no_iordy(const struct ata_device * adev)1800 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1801 {
1802 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1803 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1804 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1805 /* Is the speed faster than the drive allows non IORDY ? */
1806 if (pio) {
1807 /* This is cycle times not frequency - watch the logic! */
1808 if (pio > 240) /* PIO2 is 240nS per cycle */
1809 return 3 << ATA_SHIFT_PIO;
1810 return 7 << ATA_SHIFT_PIO;
1811 }
1812 }
1813 return 3 << ATA_SHIFT_PIO;
1814 }
1815
1816 /**
1817 * ata_do_dev_read_id - default ID read method
1818 * @dev: device
1819 * @tf: proposed taskfile
1820 * @id: data buffer
1821 *
1822 * Issue the identify taskfile and hand back the buffer containing
1823 * identify data. For some RAID controllers and for pre ATA devices
1824 * this function is wrapped or replaced by the driver
1825 */
ata_do_dev_read_id(struct ata_device * dev,struct ata_taskfile * tf,u16 * id)1826 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1827 struct ata_taskfile *tf, u16 *id)
1828 {
1829 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1830 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1831 }
1832
1833 /**
1834 * ata_dev_read_id - Read ID data from the specified device
1835 * @dev: target device
1836 * @p_class: pointer to class of the target device (may be changed)
1837 * @flags: ATA_READID_* flags
1838 * @id: buffer to read IDENTIFY data into
1839 *
1840 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1841 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1842 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1843 * for pre-ATA4 drives.
1844 *
1845 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1846 * now we abort if we hit that case.
1847 *
1848 * LOCKING:
1849 * Kernel thread context (may sleep)
1850 *
1851 * RETURNS:
1852 * 0 on success, -errno otherwise.
1853 */
ata_dev_read_id(struct ata_device * dev,unsigned int * p_class,unsigned int flags,u16 * id)1854 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1855 unsigned int flags, u16 *id)
1856 {
1857 struct ata_port *ap = dev->link->ap;
1858 unsigned int class = *p_class;
1859 struct ata_taskfile tf;
1860 unsigned int err_mask = 0;
1861 const char *reason;
1862 bool is_semb = class == ATA_DEV_SEMB;
1863 int may_fallback = 1, tried_spinup = 0;
1864 int rc;
1865
1866 if (ata_msg_ctl(ap))
1867 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1868
1869 retry:
1870 ata_tf_init(dev, &tf);
1871
1872 switch (class) {
1873 case ATA_DEV_SEMB:
1874 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1875 case ATA_DEV_ATA:
1876 case ATA_DEV_ZAC:
1877 tf.command = ATA_CMD_ID_ATA;
1878 break;
1879 case ATA_DEV_ATAPI:
1880 tf.command = ATA_CMD_ID_ATAPI;
1881 break;
1882 default:
1883 rc = -ENODEV;
1884 reason = "unsupported class";
1885 goto err_out;
1886 }
1887
1888 tf.protocol = ATA_PROT_PIO;
1889
1890 /* Some devices choke if TF registers contain garbage. Make
1891 * sure those are properly initialized.
1892 */
1893 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1894
1895 /* Device presence detection is unreliable on some
1896 * controllers. Always poll IDENTIFY if available.
1897 */
1898 tf.flags |= ATA_TFLAG_POLLING;
1899
1900 if (ap->ops->read_id)
1901 err_mask = ap->ops->read_id(dev, &tf, id);
1902 else
1903 err_mask = ata_do_dev_read_id(dev, &tf, id);
1904
1905 if (err_mask) {
1906 if (err_mask & AC_ERR_NODEV_HINT) {
1907 ata_dev_dbg(dev, "NODEV after polling detection\n");
1908 return -ENOENT;
1909 }
1910
1911 if (is_semb) {
1912 ata_dev_info(dev,
1913 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1914 /* SEMB is not supported yet */
1915 *p_class = ATA_DEV_SEMB_UNSUP;
1916 return 0;
1917 }
1918
1919 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1920 /* Device or controller might have reported
1921 * the wrong device class. Give a shot at the
1922 * other IDENTIFY if the current one is
1923 * aborted by the device.
1924 */
1925 if (may_fallback) {
1926 may_fallback = 0;
1927
1928 if (class == ATA_DEV_ATA)
1929 class = ATA_DEV_ATAPI;
1930 else
1931 class = ATA_DEV_ATA;
1932 goto retry;
1933 }
1934
1935 /* Control reaches here iff the device aborted
1936 * both flavors of IDENTIFYs which happens
1937 * sometimes with phantom devices.
1938 */
1939 ata_dev_dbg(dev,
1940 "both IDENTIFYs aborted, assuming NODEV\n");
1941 return -ENOENT;
1942 }
1943
1944 rc = -EIO;
1945 reason = "I/O error";
1946 goto err_out;
1947 }
1948
1949 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1950 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1951 "class=%d may_fallback=%d tried_spinup=%d\n",
1952 class, may_fallback, tried_spinup);
1953 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1954 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1955 }
1956
1957 /* Falling back doesn't make sense if ID data was read
1958 * successfully at least once.
1959 */
1960 may_fallback = 0;
1961
1962 swap_buf_le16(id, ATA_ID_WORDS);
1963
1964 /* sanity check */
1965 rc = -EINVAL;
1966 reason = "device reports invalid type";
1967
1968 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1969 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1970 goto err_out;
1971 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1972 ata_id_is_ata(id)) {
1973 ata_dev_dbg(dev,
1974 "host indicates ignore ATA devices, ignored\n");
1975 return -ENOENT;
1976 }
1977 } else {
1978 if (ata_id_is_ata(id))
1979 goto err_out;
1980 }
1981
1982 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1983 tried_spinup = 1;
1984 /*
1985 * Drive powered-up in standby mode, and requires a specific
1986 * SET_FEATURES spin-up subcommand before it will accept
1987 * anything other than the original IDENTIFY command.
1988 */
1989 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1990 if (err_mask && id[2] != 0x738c) {
1991 rc = -EIO;
1992 reason = "SPINUP failed";
1993 goto err_out;
1994 }
1995 /*
1996 * If the drive initially returned incomplete IDENTIFY info,
1997 * we now must reissue the IDENTIFY command.
1998 */
1999 if (id[2] == 0x37c8)
2000 goto retry;
2001 }
2002
2003 if ((flags & ATA_READID_POSTRESET) &&
2004 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
2005 /*
2006 * The exact sequence expected by certain pre-ATA4 drives is:
2007 * SRST RESET
2008 * IDENTIFY (optional in early ATA)
2009 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2010 * anything else..
2011 * Some drives were very specific about that exact sequence.
2012 *
2013 * Note that ATA4 says lba is mandatory so the second check
2014 * should never trigger.
2015 */
2016 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2017 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2018 if (err_mask) {
2019 rc = -EIO;
2020 reason = "INIT_DEV_PARAMS failed";
2021 goto err_out;
2022 }
2023
2024 /* current CHS translation info (id[53-58]) might be
2025 * changed. reread the identify device info.
2026 */
2027 flags &= ~ATA_READID_POSTRESET;
2028 goto retry;
2029 }
2030 }
2031
2032 *p_class = class;
2033
2034 return 0;
2035
2036 err_out:
2037 if (ata_msg_warn(ap))
2038 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2039 reason, err_mask);
2040 return rc;
2041 }
2042
ata_do_link_spd_horkage(struct ata_device * dev)2043 static int ata_do_link_spd_horkage(struct ata_device *dev)
2044 {
2045 struct ata_link *plink = ata_dev_phys_link(dev);
2046 u32 target, target_limit;
2047
2048 if (!sata_scr_valid(plink))
2049 return 0;
2050
2051 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2052 target = 1;
2053 else
2054 return 0;
2055
2056 target_limit = (1 << target) - 1;
2057
2058 /* if already on stricter limit, no need to push further */
2059 if (plink->sata_spd_limit <= target_limit)
2060 return 0;
2061
2062 plink->sata_spd_limit = target_limit;
2063
2064 /* Request another EH round by returning -EAGAIN if link is
2065 * going faster than the target speed. Forward progress is
2066 * guaranteed by setting sata_spd_limit to target_limit above.
2067 */
2068 if (plink->sata_spd > target) {
2069 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2070 sata_spd_string(target));
2071 return -EAGAIN;
2072 }
2073 return 0;
2074 }
2075
ata_dev_knobble(struct ata_device * dev)2076 static inline u8 ata_dev_knobble(struct ata_device *dev)
2077 {
2078 struct ata_port *ap = dev->link->ap;
2079
2080 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2081 return 0;
2082
2083 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2084 }
2085
ata_dev_config_ncq_send_recv(struct ata_device * dev)2086 static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2087 {
2088 struct ata_port *ap = dev->link->ap;
2089 unsigned int err_mask;
2090 int log_index = ATA_LOG_NCQ_SEND_RECV * 2;
2091 u16 log_pages;
2092
2093 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2094 0, ap->sector_buf, 1);
2095 if (err_mask) {
2096 ata_dev_dbg(dev,
2097 "failed to get Log Directory Emask 0x%x\n",
2098 err_mask);
2099 return;
2100 }
2101 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2102 if (!log_pages) {
2103 ata_dev_warn(dev,
2104 "NCQ Send/Recv Log not supported\n");
2105 return;
2106 }
2107 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2108 0, ap->sector_buf, 1);
2109 if (err_mask) {
2110 ata_dev_dbg(dev,
2111 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2112 err_mask);
2113 } else {
2114 u8 *cmds = dev->ncq_send_recv_cmds;
2115
2116 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2117 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2118
2119 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2120 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2121 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2122 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2123 }
2124 }
2125 }
2126
ata_dev_config_ncq_non_data(struct ata_device * dev)2127 static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2128 {
2129 struct ata_port *ap = dev->link->ap;
2130 unsigned int err_mask;
2131 int log_index = ATA_LOG_NCQ_NON_DATA * 2;
2132 u16 log_pages;
2133
2134 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2135 0, ap->sector_buf, 1);
2136 if (err_mask) {
2137 ata_dev_dbg(dev,
2138 "failed to get Log Directory Emask 0x%x\n",
2139 err_mask);
2140 return;
2141 }
2142 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2143 if (!log_pages) {
2144 ata_dev_warn(dev,
2145 "NCQ Send/Recv Log not supported\n");
2146 return;
2147 }
2148 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2149 0, ap->sector_buf, 1);
2150 if (err_mask) {
2151 ata_dev_dbg(dev,
2152 "failed to get NCQ Non-Data Log Emask 0x%x\n",
2153 err_mask);
2154 } else {
2155 u8 *cmds = dev->ncq_non_data_cmds;
2156
2157 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2158 }
2159 }
2160
ata_dev_config_ncq(struct ata_device * dev,char * desc,size_t desc_sz)2161 static int ata_dev_config_ncq(struct ata_device *dev,
2162 char *desc, size_t desc_sz)
2163 {
2164 struct ata_port *ap = dev->link->ap;
2165 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2166 unsigned int err_mask;
2167 char *aa_desc = "";
2168
2169 if (!ata_id_has_ncq(dev->id)) {
2170 desc[0] = '\0';
2171 return 0;
2172 }
2173 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2174 snprintf(desc, desc_sz, "NCQ (not used)");
2175 return 0;
2176 }
2177 if (ap->flags & ATA_FLAG_NCQ) {
2178 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2179 dev->flags |= ATA_DFLAG_NCQ;
2180 }
2181
2182 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2183 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2184 ata_id_has_fpdma_aa(dev->id)) {
2185 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2186 SATA_FPDMA_AA);
2187 if (err_mask) {
2188 ata_dev_err(dev,
2189 "failed to enable AA (error_mask=0x%x)\n",
2190 err_mask);
2191 if (err_mask != AC_ERR_DEV) {
2192 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2193 return -EIO;
2194 }
2195 } else
2196 aa_desc = ", AA";
2197 }
2198
2199 if (hdepth >= ddepth)
2200 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2201 else
2202 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2203 ddepth, aa_desc);
2204
2205 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2206 if (ata_id_has_ncq_send_and_recv(dev->id))
2207 ata_dev_config_ncq_send_recv(dev);
2208 if (ata_id_has_ncq_non_data(dev->id))
2209 ata_dev_config_ncq_non_data(dev);
2210 }
2211
2212 return 0;
2213 }
2214
ata_dev_config_sense_reporting(struct ata_device * dev)2215 static void ata_dev_config_sense_reporting(struct ata_device *dev)
2216 {
2217 unsigned int err_mask;
2218
2219 if (!ata_id_has_sense_reporting(dev->id))
2220 return;
2221
2222 if (ata_id_sense_reporting_enabled(dev->id))
2223 return;
2224
2225 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2226 if (err_mask) {
2227 ata_dev_dbg(dev,
2228 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2229 err_mask);
2230 }
2231 }
2232
ata_dev_config_zac(struct ata_device * dev)2233 static void ata_dev_config_zac(struct ata_device *dev)
2234 {
2235 struct ata_port *ap = dev->link->ap;
2236 unsigned int err_mask;
2237 u8 *identify_buf = ap->sector_buf;
2238 int log_index = ATA_LOG_SATA_ID_DEV_DATA * 2, i, found = 0;
2239 u16 log_pages;
2240
2241 dev->zac_zones_optimal_open = U32_MAX;
2242 dev->zac_zones_optimal_nonseq = U32_MAX;
2243 dev->zac_zones_max_open = U32_MAX;
2244
2245 /*
2246 * Always set the 'ZAC' flag for Host-managed devices.
2247 */
2248 if (dev->class == ATA_DEV_ZAC)
2249 dev->flags |= ATA_DFLAG_ZAC;
2250 else if (ata_id_zoned_cap(dev->id) == 0x01)
2251 /*
2252 * Check for host-aware devices.
2253 */
2254 dev->flags |= ATA_DFLAG_ZAC;
2255
2256 if (!(dev->flags & ATA_DFLAG_ZAC))
2257 return;
2258
2259 /*
2260 * Read Log Directory to figure out if IDENTIFY DEVICE log
2261 * is supported.
2262 */
2263 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2264 0, ap->sector_buf, 1);
2265 if (err_mask) {
2266 ata_dev_info(dev,
2267 "failed to get Log Directory Emask 0x%x\n",
2268 err_mask);
2269 return;
2270 }
2271 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2272 if (log_pages == 0) {
2273 ata_dev_warn(dev,
2274 "ATA Identify Device Log not supported\n");
2275 return;
2276 }
2277 /*
2278 * Read IDENTIFY DEVICE data log, page 0, to figure out
2279 * if page 9 is supported.
2280 */
2281 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, 0,
2282 identify_buf, 1);
2283 if (err_mask) {
2284 ata_dev_info(dev,
2285 "failed to get Device Identify Log Emask 0x%x\n",
2286 err_mask);
2287 return;
2288 }
2289 log_pages = identify_buf[8];
2290 for (i = 0; i < log_pages; i++) {
2291 if (identify_buf[9 + i] == ATA_LOG_ZONED_INFORMATION) {
2292 found++;
2293 break;
2294 }
2295 }
2296 if (!found) {
2297 ata_dev_warn(dev,
2298 "ATA Zoned Information Log not supported\n");
2299 return;
2300 }
2301
2302 /*
2303 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2304 */
2305 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA,
2306 ATA_LOG_ZONED_INFORMATION,
2307 identify_buf, 1);
2308 if (!err_mask) {
2309 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2310
2311 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2312 if ((zoned_cap >> 63))
2313 dev->zac_zoned_cap = (zoned_cap & 1);
2314 opt_open = get_unaligned_le64(&identify_buf[24]);
2315 if ((opt_open >> 63))
2316 dev->zac_zones_optimal_open = (u32)opt_open;
2317 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2318 if ((opt_nonseq >> 63))
2319 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2320 max_open = get_unaligned_le64(&identify_buf[40]);
2321 if ((max_open >> 63))
2322 dev->zac_zones_max_open = (u32)max_open;
2323 }
2324 }
2325
2326 /**
2327 * ata_dev_configure - Configure the specified ATA/ATAPI device
2328 * @dev: Target device to configure
2329 *
2330 * Configure @dev according to @dev->id. Generic and low-level
2331 * driver specific fixups are also applied.
2332 *
2333 * LOCKING:
2334 * Kernel thread context (may sleep)
2335 *
2336 * RETURNS:
2337 * 0 on success, -errno otherwise
2338 */
ata_dev_configure(struct ata_device * dev)2339 int ata_dev_configure(struct ata_device *dev)
2340 {
2341 struct ata_port *ap = dev->link->ap;
2342 struct ata_eh_context *ehc = &dev->link->eh_context;
2343 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2344 const u16 *id = dev->id;
2345 unsigned long xfer_mask;
2346 unsigned int err_mask;
2347 char revbuf[7]; /* XYZ-99\0 */
2348 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2349 char modelbuf[ATA_ID_PROD_LEN+1];
2350 int rc;
2351
2352 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2353 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2354 return 0;
2355 }
2356
2357 if (ata_msg_probe(ap))
2358 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2359
2360 /* set horkage */
2361 dev->horkage |= ata_dev_blacklisted(dev);
2362 ata_force_horkage(dev);
2363
2364 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2365 ata_dev_info(dev, "unsupported device, disabling\n");
2366 ata_dev_disable(dev);
2367 return 0;
2368 }
2369
2370 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2371 dev->class == ATA_DEV_ATAPI) {
2372 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2373 atapi_enabled ? "not supported with this driver"
2374 : "disabled");
2375 ata_dev_disable(dev);
2376 return 0;
2377 }
2378
2379 rc = ata_do_link_spd_horkage(dev);
2380 if (rc)
2381 return rc;
2382
2383 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2384 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2385 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2386 dev->horkage |= ATA_HORKAGE_NOLPM;
2387
2388 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2389 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2390 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2391 }
2392
2393 /* let ACPI work its magic */
2394 rc = ata_acpi_on_devcfg(dev);
2395 if (rc)
2396 return rc;
2397
2398 /* massage HPA, do it early as it might change IDENTIFY data */
2399 rc = ata_hpa_resize(dev);
2400 if (rc)
2401 return rc;
2402
2403 /* print device capabilities */
2404 if (ata_msg_probe(ap))
2405 ata_dev_dbg(dev,
2406 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2407 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2408 __func__,
2409 id[49], id[82], id[83], id[84],
2410 id[85], id[86], id[87], id[88]);
2411
2412 /* initialize to-be-configured parameters */
2413 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2414 dev->max_sectors = 0;
2415 dev->cdb_len = 0;
2416 dev->n_sectors = 0;
2417 dev->cylinders = 0;
2418 dev->heads = 0;
2419 dev->sectors = 0;
2420 dev->multi_count = 0;
2421
2422 /*
2423 * common ATA, ATAPI feature tests
2424 */
2425
2426 /* find max transfer mode; for printk only */
2427 xfer_mask = ata_id_xfermask(id);
2428
2429 if (ata_msg_probe(ap))
2430 ata_dump_id(id);
2431
2432 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2433 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2434 sizeof(fwrevbuf));
2435
2436 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2437 sizeof(modelbuf));
2438
2439 /* ATA-specific feature tests */
2440 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2441 if (ata_id_is_cfa(id)) {
2442 /* CPRM may make this media unusable */
2443 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2444 ata_dev_warn(dev,
2445 "supports DRM functions and may not be fully accessible\n");
2446 snprintf(revbuf, 7, "CFA");
2447 } else {
2448 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2449 /* Warn the user if the device has TPM extensions */
2450 if (ata_id_has_tpm(id))
2451 ata_dev_warn(dev,
2452 "supports DRM functions and may not be fully accessible\n");
2453 }
2454
2455 dev->n_sectors = ata_id_n_sectors(id);
2456
2457 /* get current R/W Multiple count setting */
2458 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2459 unsigned int max = dev->id[47] & 0xff;
2460 unsigned int cnt = dev->id[59] & 0xff;
2461 /* only recognize/allow powers of two here */
2462 if (is_power_of_2(max) && is_power_of_2(cnt))
2463 if (cnt <= max)
2464 dev->multi_count = cnt;
2465 }
2466
2467 if (ata_id_has_lba(id)) {
2468 const char *lba_desc;
2469 char ncq_desc[24];
2470
2471 lba_desc = "LBA";
2472 dev->flags |= ATA_DFLAG_LBA;
2473 if (ata_id_has_lba48(id)) {
2474 dev->flags |= ATA_DFLAG_LBA48;
2475 lba_desc = "LBA48";
2476
2477 if (dev->n_sectors >= (1UL << 28) &&
2478 ata_id_has_flush_ext(id))
2479 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2480 }
2481
2482 /* config NCQ */
2483 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2484 if (rc)
2485 return rc;
2486
2487 /* print device info to dmesg */
2488 if (ata_msg_drv(ap) && print_info) {
2489 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2490 revbuf, modelbuf, fwrevbuf,
2491 ata_mode_string(xfer_mask));
2492 ata_dev_info(dev,
2493 "%llu sectors, multi %u: %s %s\n",
2494 (unsigned long long)dev->n_sectors,
2495 dev->multi_count, lba_desc, ncq_desc);
2496 }
2497 } else {
2498 /* CHS */
2499
2500 /* Default translation */
2501 dev->cylinders = id[1];
2502 dev->heads = id[3];
2503 dev->sectors = id[6];
2504
2505 if (ata_id_current_chs_valid(id)) {
2506 /* Current CHS translation is valid. */
2507 dev->cylinders = id[54];
2508 dev->heads = id[55];
2509 dev->sectors = id[56];
2510 }
2511
2512 /* print device info to dmesg */
2513 if (ata_msg_drv(ap) && print_info) {
2514 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2515 revbuf, modelbuf, fwrevbuf,
2516 ata_mode_string(xfer_mask));
2517 ata_dev_info(dev,
2518 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2519 (unsigned long long)dev->n_sectors,
2520 dev->multi_count, dev->cylinders,
2521 dev->heads, dev->sectors);
2522 }
2523 }
2524
2525 /* Check and mark DevSlp capability. Get DevSlp timing variables
2526 * from SATA Settings page of Identify Device Data Log.
2527 */
2528 if (ata_id_has_devslp(dev->id)) {
2529 u8 *sata_setting = ap->sector_buf;
2530 int i, j;
2531
2532 dev->flags |= ATA_DFLAG_DEVSLP;
2533 err_mask = ata_read_log_page(dev,
2534 ATA_LOG_SATA_ID_DEV_DATA,
2535 ATA_LOG_SATA_SETTINGS,
2536 sata_setting,
2537 1);
2538 if (err_mask)
2539 ata_dev_dbg(dev,
2540 "failed to get Identify Device Data, Emask 0x%x\n",
2541 err_mask);
2542 else
2543 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2544 j = ATA_LOG_DEVSLP_OFFSET + i;
2545 dev->devslp_timing[i] = sata_setting[j];
2546 }
2547 }
2548 ata_dev_config_sense_reporting(dev);
2549 ata_dev_config_zac(dev);
2550 dev->cdb_len = 16;
2551 }
2552
2553 /* ATAPI-specific feature tests */
2554 else if (dev->class == ATA_DEV_ATAPI) {
2555 const char *cdb_intr_string = "";
2556 const char *atapi_an_string = "";
2557 const char *dma_dir_string = "";
2558 u32 sntf;
2559
2560 rc = atapi_cdb_len(id);
2561 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2562 if (ata_msg_warn(ap))
2563 ata_dev_warn(dev, "unsupported CDB len\n");
2564 rc = -EINVAL;
2565 goto err_out_nosup;
2566 }
2567 dev->cdb_len = (unsigned int) rc;
2568
2569 /* Enable ATAPI AN if both the host and device have
2570 * the support. If PMP is attached, SNTF is required
2571 * to enable ATAPI AN to discern between PHY status
2572 * changed notifications and ATAPI ANs.
2573 */
2574 if (atapi_an &&
2575 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2576 (!sata_pmp_attached(ap) ||
2577 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2578 /* issue SET feature command to turn this on */
2579 err_mask = ata_dev_set_feature(dev,
2580 SETFEATURES_SATA_ENABLE, SATA_AN);
2581 if (err_mask)
2582 ata_dev_err(dev,
2583 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2584 err_mask);
2585 else {
2586 dev->flags |= ATA_DFLAG_AN;
2587 atapi_an_string = ", ATAPI AN";
2588 }
2589 }
2590
2591 if (ata_id_cdb_intr(dev->id)) {
2592 dev->flags |= ATA_DFLAG_CDB_INTR;
2593 cdb_intr_string = ", CDB intr";
2594 }
2595
2596 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2597 dev->flags |= ATA_DFLAG_DMADIR;
2598 dma_dir_string = ", DMADIR";
2599 }
2600
2601 if (ata_id_has_da(dev->id)) {
2602 dev->flags |= ATA_DFLAG_DA;
2603 zpodd_init(dev);
2604 }
2605
2606 /* print device info to dmesg */
2607 if (ata_msg_drv(ap) && print_info)
2608 ata_dev_info(dev,
2609 "ATAPI: %s, %s, max %s%s%s%s\n",
2610 modelbuf, fwrevbuf,
2611 ata_mode_string(xfer_mask),
2612 cdb_intr_string, atapi_an_string,
2613 dma_dir_string);
2614 }
2615
2616 /* determine max_sectors */
2617 dev->max_sectors = ATA_MAX_SECTORS;
2618 if (dev->flags & ATA_DFLAG_LBA48)
2619 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2620
2621 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2622 200 sectors */
2623 if (ata_dev_knobble(dev)) {
2624 if (ata_msg_drv(ap) && print_info)
2625 ata_dev_info(dev, "applying bridge limits\n");
2626 dev->udma_mask &= ATA_UDMA5;
2627 dev->max_sectors = ATA_MAX_SECTORS;
2628 }
2629
2630 if ((dev->class == ATA_DEV_ATAPI) &&
2631 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2632 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2633 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2634 }
2635
2636 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2637 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2638 dev->max_sectors);
2639
2640 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2641 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2642 dev->max_sectors);
2643
2644 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2645 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2646
2647 if (ap->ops->dev_config)
2648 ap->ops->dev_config(dev);
2649
2650 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2651 /* Let the user know. We don't want to disallow opens for
2652 rescue purposes, or in case the vendor is just a blithering
2653 idiot. Do this after the dev_config call as some controllers
2654 with buggy firmware may want to avoid reporting false device
2655 bugs */
2656
2657 if (print_info) {
2658 ata_dev_warn(dev,
2659 "Drive reports diagnostics failure. This may indicate a drive\n");
2660 ata_dev_warn(dev,
2661 "fault or invalid emulation. Contact drive vendor for information.\n");
2662 }
2663 }
2664
2665 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2666 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2667 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2668 }
2669
2670 return 0;
2671
2672 err_out_nosup:
2673 if (ata_msg_probe(ap))
2674 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2675 return rc;
2676 }
2677
2678 /**
2679 * ata_cable_40wire - return 40 wire cable type
2680 * @ap: port
2681 *
2682 * Helper method for drivers which want to hardwire 40 wire cable
2683 * detection.
2684 */
2685
ata_cable_40wire(struct ata_port * ap)2686 int ata_cable_40wire(struct ata_port *ap)
2687 {
2688 return ATA_CBL_PATA40;
2689 }
2690
2691 /**
2692 * ata_cable_80wire - return 80 wire cable type
2693 * @ap: port
2694 *
2695 * Helper method for drivers which want to hardwire 80 wire cable
2696 * detection.
2697 */
2698
ata_cable_80wire(struct ata_port * ap)2699 int ata_cable_80wire(struct ata_port *ap)
2700 {
2701 return ATA_CBL_PATA80;
2702 }
2703
2704 /**
2705 * ata_cable_unknown - return unknown PATA cable.
2706 * @ap: port
2707 *
2708 * Helper method for drivers which have no PATA cable detection.
2709 */
2710
ata_cable_unknown(struct ata_port * ap)2711 int ata_cable_unknown(struct ata_port *ap)
2712 {
2713 return ATA_CBL_PATA_UNK;
2714 }
2715
2716 /**
2717 * ata_cable_ignore - return ignored PATA cable.
2718 * @ap: port
2719 *
2720 * Helper method for drivers which don't use cable type to limit
2721 * transfer mode.
2722 */
ata_cable_ignore(struct ata_port * ap)2723 int ata_cable_ignore(struct ata_port *ap)
2724 {
2725 return ATA_CBL_PATA_IGN;
2726 }
2727
2728 /**
2729 * ata_cable_sata - return SATA cable type
2730 * @ap: port
2731 *
2732 * Helper method for drivers which have SATA cables
2733 */
2734
ata_cable_sata(struct ata_port * ap)2735 int ata_cable_sata(struct ata_port *ap)
2736 {
2737 return ATA_CBL_SATA;
2738 }
2739
2740 /**
2741 * ata_bus_probe - Reset and probe ATA bus
2742 * @ap: Bus to probe
2743 *
2744 * Master ATA bus probing function. Initiates a hardware-dependent
2745 * bus reset, then attempts to identify any devices found on
2746 * the bus.
2747 *
2748 * LOCKING:
2749 * PCI/etc. bus probe sem.
2750 *
2751 * RETURNS:
2752 * Zero on success, negative errno otherwise.
2753 */
2754
ata_bus_probe(struct ata_port * ap)2755 int ata_bus_probe(struct ata_port *ap)
2756 {
2757 unsigned int classes[ATA_MAX_DEVICES];
2758 int tries[ATA_MAX_DEVICES];
2759 int rc;
2760 struct ata_device *dev;
2761
2762 ata_for_each_dev(dev, &ap->link, ALL)
2763 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2764
2765 retry:
2766 ata_for_each_dev(dev, &ap->link, ALL) {
2767 /* If we issue an SRST then an ATA drive (not ATAPI)
2768 * may change configuration and be in PIO0 timing. If
2769 * we do a hard reset (or are coming from power on)
2770 * this is true for ATA or ATAPI. Until we've set a
2771 * suitable controller mode we should not touch the
2772 * bus as we may be talking too fast.
2773 */
2774 dev->pio_mode = XFER_PIO_0;
2775 dev->dma_mode = 0xff;
2776
2777 /* If the controller has a pio mode setup function
2778 * then use it to set the chipset to rights. Don't
2779 * touch the DMA setup as that will be dealt with when
2780 * configuring devices.
2781 */
2782 if (ap->ops->set_piomode)
2783 ap->ops->set_piomode(ap, dev);
2784 }
2785
2786 /* reset and determine device classes */
2787 ap->ops->phy_reset(ap);
2788
2789 ata_for_each_dev(dev, &ap->link, ALL) {
2790 if (dev->class != ATA_DEV_UNKNOWN)
2791 classes[dev->devno] = dev->class;
2792 else
2793 classes[dev->devno] = ATA_DEV_NONE;
2794
2795 dev->class = ATA_DEV_UNKNOWN;
2796 }
2797
2798 /* read IDENTIFY page and configure devices. We have to do the identify
2799 specific sequence bass-ackwards so that PDIAG- is released by
2800 the slave device */
2801
2802 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2803 if (tries[dev->devno])
2804 dev->class = classes[dev->devno];
2805
2806 if (!ata_dev_enabled(dev))
2807 continue;
2808
2809 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2810 dev->id);
2811 if (rc)
2812 goto fail;
2813 }
2814
2815 /* Now ask for the cable type as PDIAG- should have been released */
2816 if (ap->ops->cable_detect)
2817 ap->cbl = ap->ops->cable_detect(ap);
2818
2819 /* We may have SATA bridge glue hiding here irrespective of
2820 * the reported cable types and sensed types. When SATA
2821 * drives indicate we have a bridge, we don't know which end
2822 * of the link the bridge is which is a problem.
2823 */
2824 ata_for_each_dev(dev, &ap->link, ENABLED)
2825 if (ata_id_is_sata(dev->id))
2826 ap->cbl = ATA_CBL_SATA;
2827
2828 /* After the identify sequence we can now set up the devices. We do
2829 this in the normal order so that the user doesn't get confused */
2830
2831 ata_for_each_dev(dev, &ap->link, ENABLED) {
2832 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2833 rc = ata_dev_configure(dev);
2834 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2835 if (rc)
2836 goto fail;
2837 }
2838
2839 /* configure transfer mode */
2840 rc = ata_set_mode(&ap->link, &dev);
2841 if (rc)
2842 goto fail;
2843
2844 ata_for_each_dev(dev, &ap->link, ENABLED)
2845 return 0;
2846
2847 return -ENODEV;
2848
2849 fail:
2850 tries[dev->devno]--;
2851
2852 switch (rc) {
2853 case -EINVAL:
2854 /* eeek, something went very wrong, give up */
2855 tries[dev->devno] = 0;
2856 break;
2857
2858 case -ENODEV:
2859 /* give it just one more chance */
2860 tries[dev->devno] = min(tries[dev->devno], 1);
2861 case -EIO:
2862 if (tries[dev->devno] == 1) {
2863 /* This is the last chance, better to slow
2864 * down than lose it.
2865 */
2866 sata_down_spd_limit(&ap->link, 0);
2867 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2868 }
2869 }
2870
2871 if (!tries[dev->devno])
2872 ata_dev_disable(dev);
2873
2874 goto retry;
2875 }
2876
2877 /**
2878 * sata_print_link_status - Print SATA link status
2879 * @link: SATA link to printk link status about
2880 *
2881 * This function prints link speed and status of a SATA link.
2882 *
2883 * LOCKING:
2884 * None.
2885 */
sata_print_link_status(struct ata_link * link)2886 static void sata_print_link_status(struct ata_link *link)
2887 {
2888 u32 sstatus, scontrol, tmp;
2889
2890 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2891 return;
2892 sata_scr_read(link, SCR_CONTROL, &scontrol);
2893
2894 if (ata_phys_link_online(link)) {
2895 tmp = (sstatus >> 4) & 0xf;
2896 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2897 sata_spd_string(tmp), sstatus, scontrol);
2898 } else {
2899 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2900 sstatus, scontrol);
2901 }
2902 }
2903
2904 /**
2905 * ata_dev_pair - return other device on cable
2906 * @adev: device
2907 *
2908 * Obtain the other device on the same cable, or if none is
2909 * present NULL is returned
2910 */
2911
ata_dev_pair(struct ata_device * adev)2912 struct ata_device *ata_dev_pair(struct ata_device *adev)
2913 {
2914 struct ata_link *link = adev->link;
2915 struct ata_device *pair = &link->device[1 - adev->devno];
2916 if (!ata_dev_enabled(pair))
2917 return NULL;
2918 return pair;
2919 }
2920
2921 /**
2922 * sata_down_spd_limit - adjust SATA spd limit downward
2923 * @link: Link to adjust SATA spd limit for
2924 * @spd_limit: Additional limit
2925 *
2926 * Adjust SATA spd limit of @link downward. Note that this
2927 * function only adjusts the limit. The change must be applied
2928 * using sata_set_spd().
2929 *
2930 * If @spd_limit is non-zero, the speed is limited to equal to or
2931 * lower than @spd_limit if such speed is supported. If
2932 * @spd_limit is slower than any supported speed, only the lowest
2933 * supported speed is allowed.
2934 *
2935 * LOCKING:
2936 * Inherited from caller.
2937 *
2938 * RETURNS:
2939 * 0 on success, negative errno on failure
2940 */
sata_down_spd_limit(struct ata_link * link,u32 spd_limit)2941 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2942 {
2943 u32 sstatus, spd, mask;
2944 int rc, bit;
2945
2946 if (!sata_scr_valid(link))
2947 return -EOPNOTSUPP;
2948
2949 /* If SCR can be read, use it to determine the current SPD.
2950 * If not, use cached value in link->sata_spd.
2951 */
2952 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2953 if (rc == 0 && ata_sstatus_online(sstatus))
2954 spd = (sstatus >> 4) & 0xf;
2955 else
2956 spd = link->sata_spd;
2957
2958 mask = link->sata_spd_limit;
2959 if (mask <= 1)
2960 return -EINVAL;
2961
2962 /* unconditionally mask off the highest bit */
2963 bit = fls(mask) - 1;
2964 mask &= ~(1 << bit);
2965
2966 /* Mask off all speeds higher than or equal to the current
2967 * one. Force 1.5Gbps if current SPD is not available.
2968 */
2969 if (spd > 1)
2970 mask &= (1 << (spd - 1)) - 1;
2971 else
2972 mask &= 1;
2973
2974 /* were we already at the bottom? */
2975 if (!mask)
2976 return -EINVAL;
2977
2978 if (spd_limit) {
2979 if (mask & ((1 << spd_limit) - 1))
2980 mask &= (1 << spd_limit) - 1;
2981 else {
2982 bit = ffs(mask) - 1;
2983 mask = 1 << bit;
2984 }
2985 }
2986
2987 link->sata_spd_limit = mask;
2988
2989 ata_link_warn(link, "limiting SATA link speed to %s\n",
2990 sata_spd_string(fls(mask)));
2991
2992 return 0;
2993 }
2994
__sata_set_spd_needed(struct ata_link * link,u32 * scontrol)2995 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2996 {
2997 struct ata_link *host_link = &link->ap->link;
2998 u32 limit, target, spd;
2999
3000 limit = link->sata_spd_limit;
3001
3002 /* Don't configure downstream link faster than upstream link.
3003 * It doesn't speed up anything and some PMPs choke on such
3004 * configuration.
3005 */
3006 if (!ata_is_host_link(link) && host_link->sata_spd)
3007 limit &= (1 << host_link->sata_spd) - 1;
3008
3009 if (limit == UINT_MAX)
3010 target = 0;
3011 else
3012 target = fls(limit);
3013
3014 spd = (*scontrol >> 4) & 0xf;
3015 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3016
3017 return spd != target;
3018 }
3019
3020 /**
3021 * sata_set_spd_needed - is SATA spd configuration needed
3022 * @link: Link in question
3023 *
3024 * Test whether the spd limit in SControl matches
3025 * @link->sata_spd_limit. This function is used to determine
3026 * whether hardreset is necessary to apply SATA spd
3027 * configuration.
3028 *
3029 * LOCKING:
3030 * Inherited from caller.
3031 *
3032 * RETURNS:
3033 * 1 if SATA spd configuration is needed, 0 otherwise.
3034 */
sata_set_spd_needed(struct ata_link * link)3035 static int sata_set_spd_needed(struct ata_link *link)
3036 {
3037 u32 scontrol;
3038
3039 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3040 return 1;
3041
3042 return __sata_set_spd_needed(link, &scontrol);
3043 }
3044
3045 /**
3046 * sata_set_spd - set SATA spd according to spd limit
3047 * @link: Link to set SATA spd for
3048 *
3049 * Set SATA spd of @link according to sata_spd_limit.
3050 *
3051 * LOCKING:
3052 * Inherited from caller.
3053 *
3054 * RETURNS:
3055 * 0 if spd doesn't need to be changed, 1 if spd has been
3056 * changed. Negative errno if SCR registers are inaccessible.
3057 */
sata_set_spd(struct ata_link * link)3058 int sata_set_spd(struct ata_link *link)
3059 {
3060 u32 scontrol;
3061 int rc;
3062
3063 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3064 return rc;
3065
3066 if (!__sata_set_spd_needed(link, &scontrol))
3067 return 0;
3068
3069 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3070 return rc;
3071
3072 return 1;
3073 }
3074
3075 /*
3076 * This mode timing computation functionality is ported over from
3077 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3078 */
3079 /*
3080 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3081 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3082 * for UDMA6, which is currently supported only by Maxtor drives.
3083 *
3084 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3085 */
3086
3087 static const struct ata_timing ata_timing[] = {
3088 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
3089 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
3090 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
3091 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
3092 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
3093 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
3094 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
3095 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
3096
3097 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
3098 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
3099 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
3100
3101 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
3102 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
3103 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
3104 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
3105 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
3106
3107 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
3108 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
3109 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
3110 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
3111 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
3112 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
3113 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
3114 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
3115
3116 { 0xFF }
3117 };
3118
3119 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3120 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
3121
ata_timing_quantize(const struct ata_timing * t,struct ata_timing * q,int T,int UT)3122 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3123 {
3124 q->setup = EZ(t->setup * 1000, T);
3125 q->act8b = EZ(t->act8b * 1000, T);
3126 q->rec8b = EZ(t->rec8b * 1000, T);
3127 q->cyc8b = EZ(t->cyc8b * 1000, T);
3128 q->active = EZ(t->active * 1000, T);
3129 q->recover = EZ(t->recover * 1000, T);
3130 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
3131 q->cycle = EZ(t->cycle * 1000, T);
3132 q->udma = EZ(t->udma * 1000, UT);
3133 }
3134
ata_timing_merge(const struct ata_timing * a,const struct ata_timing * b,struct ata_timing * m,unsigned int what)3135 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3136 struct ata_timing *m, unsigned int what)
3137 {
3138 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3139 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3140 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3141 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3142 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3143 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3144 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3145 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3146 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3147 }
3148
ata_timing_find_mode(u8 xfer_mode)3149 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3150 {
3151 const struct ata_timing *t = ata_timing;
3152
3153 while (xfer_mode > t->mode)
3154 t++;
3155
3156 if (xfer_mode == t->mode)
3157 return t;
3158
3159 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3160 __func__, xfer_mode);
3161
3162 return NULL;
3163 }
3164
ata_timing_compute(struct ata_device * adev,unsigned short speed,struct ata_timing * t,int T,int UT)3165 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3166 struct ata_timing *t, int T, int UT)
3167 {
3168 const u16 *id = adev->id;
3169 const struct ata_timing *s;
3170 struct ata_timing p;
3171
3172 /*
3173 * Find the mode.
3174 */
3175
3176 if (!(s = ata_timing_find_mode(speed)))
3177 return -EINVAL;
3178
3179 memcpy(t, s, sizeof(*s));
3180
3181 /*
3182 * If the drive is an EIDE drive, it can tell us it needs extended
3183 * PIO/MW_DMA cycle timing.
3184 */
3185
3186 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3187 memset(&p, 0, sizeof(p));
3188
3189 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3190 if (speed <= XFER_PIO_2)
3191 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3192 else if ((speed <= XFER_PIO_4) ||
3193 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3194 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3195 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3196 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3197
3198 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3199 }
3200
3201 /*
3202 * Convert the timing to bus clock counts.
3203 */
3204
3205 ata_timing_quantize(t, t, T, UT);
3206
3207 /*
3208 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3209 * S.M.A.R.T * and some other commands. We have to ensure that the
3210 * DMA cycle timing is slower/equal than the fastest PIO timing.
3211 */
3212
3213 if (speed > XFER_PIO_6) {
3214 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3215 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3216 }
3217
3218 /*
3219 * Lengthen active & recovery time so that cycle time is correct.
3220 */
3221
3222 if (t->act8b + t->rec8b < t->cyc8b) {
3223 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3224 t->rec8b = t->cyc8b - t->act8b;
3225 }
3226
3227 if (t->active + t->recover < t->cycle) {
3228 t->active += (t->cycle - (t->active + t->recover)) / 2;
3229 t->recover = t->cycle - t->active;
3230 }
3231
3232 /* In a few cases quantisation may produce enough errors to
3233 leave t->cycle too low for the sum of active and recovery
3234 if so we must correct this */
3235 if (t->active + t->recover > t->cycle)
3236 t->cycle = t->active + t->recover;
3237
3238 return 0;
3239 }
3240
3241 /**
3242 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3243 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3244 * @cycle: cycle duration in ns
3245 *
3246 * Return matching xfer mode for @cycle. The returned mode is of
3247 * the transfer type specified by @xfer_shift. If @cycle is too
3248 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3249 * than the fastest known mode, the fasted mode is returned.
3250 *
3251 * LOCKING:
3252 * None.
3253 *
3254 * RETURNS:
3255 * Matching xfer_mode, 0xff if no match found.
3256 */
ata_timing_cycle2mode(unsigned int xfer_shift,int cycle)3257 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3258 {
3259 u8 base_mode = 0xff, last_mode = 0xff;
3260 const struct ata_xfer_ent *ent;
3261 const struct ata_timing *t;
3262
3263 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3264 if (ent->shift == xfer_shift)
3265 base_mode = ent->base;
3266
3267 for (t = ata_timing_find_mode(base_mode);
3268 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3269 unsigned short this_cycle;
3270
3271 switch (xfer_shift) {
3272 case ATA_SHIFT_PIO:
3273 case ATA_SHIFT_MWDMA:
3274 this_cycle = t->cycle;
3275 break;
3276 case ATA_SHIFT_UDMA:
3277 this_cycle = t->udma;
3278 break;
3279 default:
3280 return 0xff;
3281 }
3282
3283 if (cycle > this_cycle)
3284 break;
3285
3286 last_mode = t->mode;
3287 }
3288
3289 return last_mode;
3290 }
3291
3292 /**
3293 * ata_down_xfermask_limit - adjust dev xfer masks downward
3294 * @dev: Device to adjust xfer masks
3295 * @sel: ATA_DNXFER_* selector
3296 *
3297 * Adjust xfer masks of @dev downward. Note that this function
3298 * does not apply the change. Invoking ata_set_mode() afterwards
3299 * will apply the limit.
3300 *
3301 * LOCKING:
3302 * Inherited from caller.
3303 *
3304 * RETURNS:
3305 * 0 on success, negative errno on failure
3306 */
ata_down_xfermask_limit(struct ata_device * dev,unsigned int sel)3307 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3308 {
3309 char buf[32];
3310 unsigned long orig_mask, xfer_mask;
3311 unsigned long pio_mask, mwdma_mask, udma_mask;
3312 int quiet, highbit;
3313
3314 quiet = !!(sel & ATA_DNXFER_QUIET);
3315 sel &= ~ATA_DNXFER_QUIET;
3316
3317 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3318 dev->mwdma_mask,
3319 dev->udma_mask);
3320 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3321
3322 switch (sel) {
3323 case ATA_DNXFER_PIO:
3324 highbit = fls(pio_mask) - 1;
3325 pio_mask &= ~(1 << highbit);
3326 break;
3327
3328 case ATA_DNXFER_DMA:
3329 if (udma_mask) {
3330 highbit = fls(udma_mask) - 1;
3331 udma_mask &= ~(1 << highbit);
3332 if (!udma_mask)
3333 return -ENOENT;
3334 } else if (mwdma_mask) {
3335 highbit = fls(mwdma_mask) - 1;
3336 mwdma_mask &= ~(1 << highbit);
3337 if (!mwdma_mask)
3338 return -ENOENT;
3339 }
3340 break;
3341
3342 case ATA_DNXFER_40C:
3343 udma_mask &= ATA_UDMA_MASK_40C;
3344 break;
3345
3346 case ATA_DNXFER_FORCE_PIO0:
3347 pio_mask &= 1;
3348 case ATA_DNXFER_FORCE_PIO:
3349 mwdma_mask = 0;
3350 udma_mask = 0;
3351 break;
3352
3353 default:
3354 BUG();
3355 }
3356
3357 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3358
3359 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3360 return -ENOENT;
3361
3362 if (!quiet) {
3363 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3364 snprintf(buf, sizeof(buf), "%s:%s",
3365 ata_mode_string(xfer_mask),
3366 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3367 else
3368 snprintf(buf, sizeof(buf), "%s",
3369 ata_mode_string(xfer_mask));
3370
3371 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3372 }
3373
3374 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3375 &dev->udma_mask);
3376
3377 return 0;
3378 }
3379
ata_dev_set_mode(struct ata_device * dev)3380 static int ata_dev_set_mode(struct ata_device *dev)
3381 {
3382 struct ata_port *ap = dev->link->ap;
3383 struct ata_eh_context *ehc = &dev->link->eh_context;
3384 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3385 const char *dev_err_whine = "";
3386 int ign_dev_err = 0;
3387 unsigned int err_mask = 0;
3388 int rc;
3389
3390 dev->flags &= ~ATA_DFLAG_PIO;
3391 if (dev->xfer_shift == ATA_SHIFT_PIO)
3392 dev->flags |= ATA_DFLAG_PIO;
3393
3394 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3395 dev_err_whine = " (SET_XFERMODE skipped)";
3396 else {
3397 if (nosetxfer)
3398 ata_dev_warn(dev,
3399 "NOSETXFER but PATA detected - can't "
3400 "skip SETXFER, might malfunction\n");
3401 err_mask = ata_dev_set_xfermode(dev);
3402 }
3403
3404 if (err_mask & ~AC_ERR_DEV)
3405 goto fail;
3406
3407 /* revalidate */
3408 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3409 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3410 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3411 if (rc)
3412 return rc;
3413
3414 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3415 /* Old CFA may refuse this command, which is just fine */
3416 if (ata_id_is_cfa(dev->id))
3417 ign_dev_err = 1;
3418 /* Catch several broken garbage emulations plus some pre
3419 ATA devices */
3420 if (ata_id_major_version(dev->id) == 0 &&
3421 dev->pio_mode <= XFER_PIO_2)
3422 ign_dev_err = 1;
3423 /* Some very old devices and some bad newer ones fail
3424 any kind of SET_XFERMODE request but support PIO0-2
3425 timings and no IORDY */
3426 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3427 ign_dev_err = 1;
3428 }
3429 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3430 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3431 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3432 dev->dma_mode == XFER_MW_DMA_0 &&
3433 (dev->id[63] >> 8) & 1)
3434 ign_dev_err = 1;
3435
3436 /* if the device is actually configured correctly, ignore dev err */
3437 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3438 ign_dev_err = 1;
3439
3440 if (err_mask & AC_ERR_DEV) {
3441 if (!ign_dev_err)
3442 goto fail;
3443 else
3444 dev_err_whine = " (device error ignored)";
3445 }
3446
3447 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3448 dev->xfer_shift, (int)dev->xfer_mode);
3449
3450 ata_dev_info(dev, "configured for %s%s\n",
3451 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3452 dev_err_whine);
3453
3454 return 0;
3455
3456 fail:
3457 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3458 return -EIO;
3459 }
3460
3461 /**
3462 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3463 * @link: link on which timings will be programmed
3464 * @r_failed_dev: out parameter for failed device
3465 *
3466 * Standard implementation of the function used to tune and set
3467 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3468 * ata_dev_set_mode() fails, pointer to the failing device is
3469 * returned in @r_failed_dev.
3470 *
3471 * LOCKING:
3472 * PCI/etc. bus probe sem.
3473 *
3474 * RETURNS:
3475 * 0 on success, negative errno otherwise
3476 */
3477
ata_do_set_mode(struct ata_link * link,struct ata_device ** r_failed_dev)3478 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3479 {
3480 struct ata_port *ap = link->ap;
3481 struct ata_device *dev;
3482 int rc = 0, used_dma = 0, found = 0;
3483
3484 /* step 1: calculate xfer_mask */
3485 ata_for_each_dev(dev, link, ENABLED) {
3486 unsigned long pio_mask, dma_mask;
3487 unsigned int mode_mask;
3488
3489 mode_mask = ATA_DMA_MASK_ATA;
3490 if (dev->class == ATA_DEV_ATAPI)
3491 mode_mask = ATA_DMA_MASK_ATAPI;
3492 else if (ata_id_is_cfa(dev->id))
3493 mode_mask = ATA_DMA_MASK_CFA;
3494
3495 ata_dev_xfermask(dev);
3496 ata_force_xfermask(dev);
3497
3498 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3499
3500 if (libata_dma_mask & mode_mask)
3501 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3502 dev->udma_mask);
3503 else
3504 dma_mask = 0;
3505
3506 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3507 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3508
3509 found = 1;
3510 if (ata_dma_enabled(dev))
3511 used_dma = 1;
3512 }
3513 if (!found)
3514 goto out;
3515
3516 /* step 2: always set host PIO timings */
3517 ata_for_each_dev(dev, link, ENABLED) {
3518 if (dev->pio_mode == 0xff) {
3519 ata_dev_warn(dev, "no PIO support\n");
3520 rc = -EINVAL;
3521 goto out;
3522 }
3523
3524 dev->xfer_mode = dev->pio_mode;
3525 dev->xfer_shift = ATA_SHIFT_PIO;
3526 if (ap->ops->set_piomode)
3527 ap->ops->set_piomode(ap, dev);
3528 }
3529
3530 /* step 3: set host DMA timings */
3531 ata_for_each_dev(dev, link, ENABLED) {
3532 if (!ata_dma_enabled(dev))
3533 continue;
3534
3535 dev->xfer_mode = dev->dma_mode;
3536 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3537 if (ap->ops->set_dmamode)
3538 ap->ops->set_dmamode(ap, dev);
3539 }
3540
3541 /* step 4: update devices' xfer mode */
3542 ata_for_each_dev(dev, link, ENABLED) {
3543 rc = ata_dev_set_mode(dev);
3544 if (rc)
3545 goto out;
3546 }
3547
3548 /* Record simplex status. If we selected DMA then the other
3549 * host channels are not permitted to do so.
3550 */
3551 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3552 ap->host->simplex_claimed = ap;
3553
3554 out:
3555 if (rc)
3556 *r_failed_dev = dev;
3557 return rc;
3558 }
3559
3560 /**
3561 * ata_wait_ready - wait for link to become ready
3562 * @link: link to be waited on
3563 * @deadline: deadline jiffies for the operation
3564 * @check_ready: callback to check link readiness
3565 *
3566 * Wait for @link to become ready. @check_ready should return
3567 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3568 * link doesn't seem to be occupied, other errno for other error
3569 * conditions.
3570 *
3571 * Transient -ENODEV conditions are allowed for
3572 * ATA_TMOUT_FF_WAIT.
3573 *
3574 * LOCKING:
3575 * EH context.
3576 *
3577 * RETURNS:
3578 * 0 if @link is ready before @deadline; otherwise, -errno.
3579 */
ata_wait_ready(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3580 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3581 int (*check_ready)(struct ata_link *link))
3582 {
3583 unsigned long start = jiffies;
3584 unsigned long nodev_deadline;
3585 int warned = 0;
3586
3587 /* choose which 0xff timeout to use, read comment in libata.h */
3588 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3589 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3590 else
3591 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3592
3593 /* Slave readiness can't be tested separately from master. On
3594 * M/S emulation configuration, this function should be called
3595 * only on the master and it will handle both master and slave.
3596 */
3597 WARN_ON(link == link->ap->slave_link);
3598
3599 if (time_after(nodev_deadline, deadline))
3600 nodev_deadline = deadline;
3601
3602 while (1) {
3603 unsigned long now = jiffies;
3604 int ready, tmp;
3605
3606 ready = tmp = check_ready(link);
3607 if (ready > 0)
3608 return 0;
3609
3610 /*
3611 * -ENODEV could be transient. Ignore -ENODEV if link
3612 * is online. Also, some SATA devices take a long
3613 * time to clear 0xff after reset. Wait for
3614 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3615 * offline.
3616 *
3617 * Note that some PATA controllers (pata_ali) explode
3618 * if status register is read more than once when
3619 * there's no device attached.
3620 */
3621 if (ready == -ENODEV) {
3622 if (ata_link_online(link))
3623 ready = 0;
3624 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3625 !ata_link_offline(link) &&
3626 time_before(now, nodev_deadline))
3627 ready = 0;
3628 }
3629
3630 if (ready)
3631 return ready;
3632 if (time_after(now, deadline))
3633 return -EBUSY;
3634
3635 if (!warned && time_after(now, start + 5 * HZ) &&
3636 (deadline - now > 3 * HZ)) {
3637 ata_link_warn(link,
3638 "link is slow to respond, please be patient "
3639 "(ready=%d)\n", tmp);
3640 warned = 1;
3641 }
3642
3643 ata_msleep(link->ap, 50);
3644 }
3645 }
3646
3647 /**
3648 * ata_wait_after_reset - wait for link to become ready after reset
3649 * @link: link to be waited on
3650 * @deadline: deadline jiffies for the operation
3651 * @check_ready: callback to check link readiness
3652 *
3653 * Wait for @link to become ready after reset.
3654 *
3655 * LOCKING:
3656 * EH context.
3657 *
3658 * RETURNS:
3659 * 0 if @link is ready before @deadline; otherwise, -errno.
3660 */
ata_wait_after_reset(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3661 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3662 int (*check_ready)(struct ata_link *link))
3663 {
3664 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3665
3666 return ata_wait_ready(link, deadline, check_ready);
3667 }
3668
3669 /**
3670 * sata_link_debounce - debounce SATA phy status
3671 * @link: ATA link to debounce SATA phy status for
3672 * @params: timing parameters { interval, duration, timeout } in msec
3673 * @deadline: deadline jiffies for the operation
3674 *
3675 * Make sure SStatus of @link reaches stable state, determined by
3676 * holding the same value where DET is not 1 for @duration polled
3677 * every @interval, before @timeout. Timeout constraints the
3678 * beginning of the stable state. Because DET gets stuck at 1 on
3679 * some controllers after hot unplugging, this functions waits
3680 * until timeout then returns 0 if DET is stable at 1.
3681 *
3682 * @timeout is further limited by @deadline. The sooner of the
3683 * two is used.
3684 *
3685 * LOCKING:
3686 * Kernel thread context (may sleep)
3687 *
3688 * RETURNS:
3689 * 0 on success, -errno on failure.
3690 */
sata_link_debounce(struct ata_link * link,const unsigned long * params,unsigned long deadline)3691 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3692 unsigned long deadline)
3693 {
3694 unsigned long interval = params[0];
3695 unsigned long duration = params[1];
3696 unsigned long last_jiffies, t;
3697 u32 last, cur;
3698 int rc;
3699
3700 t = ata_deadline(jiffies, params[2]);
3701 if (time_before(t, deadline))
3702 deadline = t;
3703
3704 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3705 return rc;
3706 cur &= 0xf;
3707
3708 last = cur;
3709 last_jiffies = jiffies;
3710
3711 while (1) {
3712 ata_msleep(link->ap, interval);
3713 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3714 return rc;
3715 cur &= 0xf;
3716
3717 /* DET stable? */
3718 if (cur == last) {
3719 if (cur == 1 && time_before(jiffies, deadline))
3720 continue;
3721 if (time_after(jiffies,
3722 ata_deadline(last_jiffies, duration)))
3723 return 0;
3724 continue;
3725 }
3726
3727 /* unstable, start over */
3728 last = cur;
3729 last_jiffies = jiffies;
3730
3731 /* Check deadline. If debouncing failed, return
3732 * -EPIPE to tell upper layer to lower link speed.
3733 */
3734 if (time_after(jiffies, deadline))
3735 return -EPIPE;
3736 }
3737 }
3738
3739 /**
3740 * sata_link_resume - resume SATA link
3741 * @link: ATA link to resume SATA
3742 * @params: timing parameters { interval, duration, timeout } in msec
3743 * @deadline: deadline jiffies for the operation
3744 *
3745 * Resume SATA phy @link and debounce it.
3746 *
3747 * LOCKING:
3748 * Kernel thread context (may sleep)
3749 *
3750 * RETURNS:
3751 * 0 on success, -errno on failure.
3752 */
sata_link_resume(struct ata_link * link,const unsigned long * params,unsigned long deadline)3753 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3754 unsigned long deadline)
3755 {
3756 int tries = ATA_LINK_RESUME_TRIES;
3757 u32 scontrol, serror;
3758 int rc;
3759
3760 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3761 return rc;
3762
3763 /*
3764 * Writes to SControl sometimes get ignored under certain
3765 * controllers (ata_piix SIDPR). Make sure DET actually is
3766 * cleared.
3767 */
3768 do {
3769 scontrol = (scontrol & 0x0f0) | 0x300;
3770 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3771 return rc;
3772 /*
3773 * Some PHYs react badly if SStatus is pounded
3774 * immediately after resuming. Delay 200ms before
3775 * debouncing.
3776 */
3777 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3778 ata_msleep(link->ap, 200);
3779
3780 /* is SControl restored correctly? */
3781 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3782 return rc;
3783 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3784
3785 if ((scontrol & 0xf0f) != 0x300) {
3786 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3787 scontrol);
3788 return 0;
3789 }
3790
3791 if (tries < ATA_LINK_RESUME_TRIES)
3792 ata_link_warn(link, "link resume succeeded after %d retries\n",
3793 ATA_LINK_RESUME_TRIES - tries);
3794
3795 if ((rc = sata_link_debounce(link, params, deadline)))
3796 return rc;
3797
3798 /* clear SError, some PHYs require this even for SRST to work */
3799 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3800 rc = sata_scr_write(link, SCR_ERROR, serror);
3801
3802 return rc != -EINVAL ? rc : 0;
3803 }
3804
3805 /**
3806 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3807 * @link: ATA link to manipulate SControl for
3808 * @policy: LPM policy to configure
3809 * @spm_wakeup: initiate LPM transition to active state
3810 *
3811 * Manipulate the IPM field of the SControl register of @link
3812 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3813 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3814 * the link. This function also clears PHYRDY_CHG before
3815 * returning.
3816 *
3817 * LOCKING:
3818 * EH context.
3819 *
3820 * RETURNS:
3821 * 0 on success, -errno otherwise.
3822 */
sata_link_scr_lpm(struct ata_link * link,enum ata_lpm_policy policy,bool spm_wakeup)3823 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3824 bool spm_wakeup)
3825 {
3826 struct ata_eh_context *ehc = &link->eh_context;
3827 bool woken_up = false;
3828 u32 scontrol;
3829 int rc;
3830
3831 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3832 if (rc)
3833 return rc;
3834
3835 switch (policy) {
3836 case ATA_LPM_MAX_POWER:
3837 /* disable all LPM transitions */
3838 scontrol |= (0x7 << 8);
3839 /* initiate transition to active state */
3840 if (spm_wakeup) {
3841 scontrol |= (0x4 << 12);
3842 woken_up = true;
3843 }
3844 break;
3845 case ATA_LPM_MED_POWER:
3846 /* allow LPM to PARTIAL */
3847 scontrol &= ~(0x1 << 8);
3848 scontrol |= (0x6 << 8);
3849 break;
3850 case ATA_LPM_MIN_POWER:
3851 if (ata_link_nr_enabled(link) > 0)
3852 /* no restrictions on LPM transitions */
3853 scontrol &= ~(0x7 << 8);
3854 else {
3855 /* empty port, power off */
3856 scontrol &= ~0xf;
3857 scontrol |= (0x1 << 2);
3858 }
3859 break;
3860 default:
3861 WARN_ON(1);
3862 }
3863
3864 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3865 if (rc)
3866 return rc;
3867
3868 /* give the link time to transit out of LPM state */
3869 if (woken_up)
3870 msleep(10);
3871
3872 /* clear PHYRDY_CHG from SError */
3873 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3874 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3875 }
3876
3877 /**
3878 * ata_std_prereset - prepare for reset
3879 * @link: ATA link to be reset
3880 * @deadline: deadline jiffies for the operation
3881 *
3882 * @link is about to be reset. Initialize it. Failure from
3883 * prereset makes libata abort whole reset sequence and give up
3884 * that port, so prereset should be best-effort. It does its
3885 * best to prepare for reset sequence but if things go wrong, it
3886 * should just whine, not fail.
3887 *
3888 * LOCKING:
3889 * Kernel thread context (may sleep)
3890 *
3891 * RETURNS:
3892 * 0 on success, -errno otherwise.
3893 */
ata_std_prereset(struct ata_link * link,unsigned long deadline)3894 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3895 {
3896 struct ata_port *ap = link->ap;
3897 struct ata_eh_context *ehc = &link->eh_context;
3898 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3899 int rc;
3900
3901 /* if we're about to do hardreset, nothing more to do */
3902 if (ehc->i.action & ATA_EH_HARDRESET)
3903 return 0;
3904
3905 /* if SATA, resume link */
3906 if (ap->flags & ATA_FLAG_SATA) {
3907 rc = sata_link_resume(link, timing, deadline);
3908 /* whine about phy resume failure but proceed */
3909 if (rc && rc != -EOPNOTSUPP)
3910 ata_link_warn(link,
3911 "failed to resume link for reset (errno=%d)\n",
3912 rc);
3913 }
3914
3915 /* no point in trying softreset on offline link */
3916 if (ata_phys_link_offline(link))
3917 ehc->i.action &= ~ATA_EH_SOFTRESET;
3918
3919 return 0;
3920 }
3921
3922 /**
3923 * sata_link_hardreset - reset link via SATA phy reset
3924 * @link: link to reset
3925 * @timing: timing parameters { interval, duration, timeout } in msec
3926 * @deadline: deadline jiffies for the operation
3927 * @online: optional out parameter indicating link onlineness
3928 * @check_ready: optional callback to check link readiness
3929 *
3930 * SATA phy-reset @link using DET bits of SControl register.
3931 * After hardreset, link readiness is waited upon using
3932 * ata_wait_ready() if @check_ready is specified. LLDs are
3933 * allowed to not specify @check_ready and wait itself after this
3934 * function returns. Device classification is LLD's
3935 * responsibility.
3936 *
3937 * *@online is set to one iff reset succeeded and @link is online
3938 * after reset.
3939 *
3940 * LOCKING:
3941 * Kernel thread context (may sleep)
3942 *
3943 * RETURNS:
3944 * 0 on success, -errno otherwise.
3945 */
sata_link_hardreset(struct ata_link * link,const unsigned long * timing,unsigned long deadline,bool * online,int (* check_ready)(struct ata_link *))3946 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3947 unsigned long deadline,
3948 bool *online, int (*check_ready)(struct ata_link *))
3949 {
3950 u32 scontrol;
3951 int rc;
3952
3953 DPRINTK("ENTER\n");
3954
3955 if (online)
3956 *online = false;
3957
3958 if (sata_set_spd_needed(link)) {
3959 /* SATA spec says nothing about how to reconfigure
3960 * spd. To be on the safe side, turn off phy during
3961 * reconfiguration. This works for at least ICH7 AHCI
3962 * and Sil3124.
3963 */
3964 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3965 goto out;
3966
3967 scontrol = (scontrol & 0x0f0) | 0x304;
3968
3969 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3970 goto out;
3971
3972 sata_set_spd(link);
3973 }
3974
3975 /* issue phy wake/reset */
3976 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3977 goto out;
3978
3979 scontrol = (scontrol & 0x0f0) | 0x301;
3980
3981 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3982 goto out;
3983
3984 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3985 * 10.4.2 says at least 1 ms.
3986 */
3987 ata_msleep(link->ap, 1);
3988
3989 /* bring link back */
3990 rc = sata_link_resume(link, timing, deadline);
3991 if (rc)
3992 goto out;
3993 /* if link is offline nothing more to do */
3994 if (ata_phys_link_offline(link))
3995 goto out;
3996
3997 /* Link is online. From this point, -ENODEV too is an error. */
3998 if (online)
3999 *online = true;
4000
4001 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
4002 /* If PMP is supported, we have to do follow-up SRST.
4003 * Some PMPs don't send D2H Reg FIS after hardreset if
4004 * the first port is empty. Wait only for
4005 * ATA_TMOUT_PMP_SRST_WAIT.
4006 */
4007 if (check_ready) {
4008 unsigned long pmp_deadline;
4009
4010 pmp_deadline = ata_deadline(jiffies,
4011 ATA_TMOUT_PMP_SRST_WAIT);
4012 if (time_after(pmp_deadline, deadline))
4013 pmp_deadline = deadline;
4014 ata_wait_ready(link, pmp_deadline, check_ready);
4015 }
4016 rc = -EAGAIN;
4017 goto out;
4018 }
4019
4020 rc = 0;
4021 if (check_ready)
4022 rc = ata_wait_ready(link, deadline, check_ready);
4023 out:
4024 if (rc && rc != -EAGAIN) {
4025 /* online is set iff link is online && reset succeeded */
4026 if (online)
4027 *online = false;
4028 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
4029 }
4030 DPRINTK("EXIT, rc=%d\n", rc);
4031 return rc;
4032 }
4033
4034 /**
4035 * sata_std_hardreset - COMRESET w/o waiting or classification
4036 * @link: link to reset
4037 * @class: resulting class of attached device
4038 * @deadline: deadline jiffies for the operation
4039 *
4040 * Standard SATA COMRESET w/o waiting or classification.
4041 *
4042 * LOCKING:
4043 * Kernel thread context (may sleep)
4044 *
4045 * RETURNS:
4046 * 0 if link offline, -EAGAIN if link online, -errno on errors.
4047 */
sata_std_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)4048 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4049 unsigned long deadline)
4050 {
4051 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4052 bool online;
4053 int rc;
4054
4055 /* do hardreset */
4056 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
4057 return online ? -EAGAIN : rc;
4058 }
4059
4060 /**
4061 * ata_std_postreset - standard postreset callback
4062 * @link: the target ata_link
4063 * @classes: classes of attached devices
4064 *
4065 * This function is invoked after a successful reset. Note that
4066 * the device might have been reset more than once using
4067 * different reset methods before postreset is invoked.
4068 *
4069 * LOCKING:
4070 * Kernel thread context (may sleep)
4071 */
ata_std_postreset(struct ata_link * link,unsigned int * classes)4072 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4073 {
4074 u32 serror;
4075
4076 DPRINTK("ENTER\n");
4077
4078 /* reset complete, clear SError */
4079 if (!sata_scr_read(link, SCR_ERROR, &serror))
4080 sata_scr_write(link, SCR_ERROR, serror);
4081
4082 /* print link status */
4083 sata_print_link_status(link);
4084
4085 DPRINTK("EXIT\n");
4086 }
4087
4088 /**
4089 * ata_dev_same_device - Determine whether new ID matches configured device
4090 * @dev: device to compare against
4091 * @new_class: class of the new device
4092 * @new_id: IDENTIFY page of the new device
4093 *
4094 * Compare @new_class and @new_id against @dev and determine
4095 * whether @dev is the device indicated by @new_class and
4096 * @new_id.
4097 *
4098 * LOCKING:
4099 * None.
4100 *
4101 * RETURNS:
4102 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4103 */
ata_dev_same_device(struct ata_device * dev,unsigned int new_class,const u16 * new_id)4104 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4105 const u16 *new_id)
4106 {
4107 const u16 *old_id = dev->id;
4108 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4109 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4110
4111 if (dev->class != new_class) {
4112 ata_dev_info(dev, "class mismatch %d != %d\n",
4113 dev->class, new_class);
4114 return 0;
4115 }
4116
4117 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4118 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4119 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4120 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4121
4122 if (strcmp(model[0], model[1])) {
4123 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
4124 model[0], model[1]);
4125 return 0;
4126 }
4127
4128 if (strcmp(serial[0], serial[1])) {
4129 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
4130 serial[0], serial[1]);
4131 return 0;
4132 }
4133
4134 return 1;
4135 }
4136
4137 /**
4138 * ata_dev_reread_id - Re-read IDENTIFY data
4139 * @dev: target ATA device
4140 * @readid_flags: read ID flags
4141 *
4142 * Re-read IDENTIFY page and make sure @dev is still attached to
4143 * the port.
4144 *
4145 * LOCKING:
4146 * Kernel thread context (may sleep)
4147 *
4148 * RETURNS:
4149 * 0 on success, negative errno otherwise
4150 */
ata_dev_reread_id(struct ata_device * dev,unsigned int readid_flags)4151 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4152 {
4153 unsigned int class = dev->class;
4154 u16 *id = (void *)dev->link->ap->sector_buf;
4155 int rc;
4156
4157 /* read ID data */
4158 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4159 if (rc)
4160 return rc;
4161
4162 /* is the device still there? */
4163 if (!ata_dev_same_device(dev, class, id))
4164 return -ENODEV;
4165
4166 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4167 return 0;
4168 }
4169
4170 /**
4171 * ata_dev_revalidate - Revalidate ATA device
4172 * @dev: device to revalidate
4173 * @new_class: new class code
4174 * @readid_flags: read ID flags
4175 *
4176 * Re-read IDENTIFY page, make sure @dev is still attached to the
4177 * port and reconfigure it according to the new IDENTIFY page.
4178 *
4179 * LOCKING:
4180 * Kernel thread context (may sleep)
4181 *
4182 * RETURNS:
4183 * 0 on success, negative errno otherwise
4184 */
ata_dev_revalidate(struct ata_device * dev,unsigned int new_class,unsigned int readid_flags)4185 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4186 unsigned int readid_flags)
4187 {
4188 u64 n_sectors = dev->n_sectors;
4189 u64 n_native_sectors = dev->n_native_sectors;
4190 int rc;
4191
4192 if (!ata_dev_enabled(dev))
4193 return -ENODEV;
4194
4195 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4196 if (ata_class_enabled(new_class) &&
4197 new_class != ATA_DEV_ATA &&
4198 new_class != ATA_DEV_ATAPI &&
4199 new_class != ATA_DEV_ZAC &&
4200 new_class != ATA_DEV_SEMB) {
4201 ata_dev_info(dev, "class mismatch %u != %u\n",
4202 dev->class, new_class);
4203 rc = -ENODEV;
4204 goto fail;
4205 }
4206
4207 /* re-read ID */
4208 rc = ata_dev_reread_id(dev, readid_flags);
4209 if (rc)
4210 goto fail;
4211
4212 /* configure device according to the new ID */
4213 rc = ata_dev_configure(dev);
4214 if (rc)
4215 goto fail;
4216
4217 /* verify n_sectors hasn't changed */
4218 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4219 dev->n_sectors == n_sectors)
4220 return 0;
4221
4222 /* n_sectors has changed */
4223 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4224 (unsigned long long)n_sectors,
4225 (unsigned long long)dev->n_sectors);
4226
4227 /*
4228 * Something could have caused HPA to be unlocked
4229 * involuntarily. If n_native_sectors hasn't changed and the
4230 * new size matches it, keep the device.
4231 */
4232 if (dev->n_native_sectors == n_native_sectors &&
4233 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4234 ata_dev_warn(dev,
4235 "new n_sectors matches native, probably "
4236 "late HPA unlock, n_sectors updated\n");
4237 /* use the larger n_sectors */
4238 return 0;
4239 }
4240
4241 /*
4242 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4243 * unlocking HPA in those cases.
4244 *
4245 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4246 */
4247 if (dev->n_native_sectors == n_native_sectors &&
4248 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4249 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4250 ata_dev_warn(dev,
4251 "old n_sectors matches native, probably "
4252 "late HPA lock, will try to unlock HPA\n");
4253 /* try unlocking HPA */
4254 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4255 rc = -EIO;
4256 } else
4257 rc = -ENODEV;
4258
4259 /* restore original n_[native_]sectors and fail */
4260 dev->n_native_sectors = n_native_sectors;
4261 dev->n_sectors = n_sectors;
4262 fail:
4263 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4264 return rc;
4265 }
4266
4267 struct ata_blacklist_entry {
4268 const char *model_num;
4269 const char *model_rev;
4270 unsigned long horkage;
4271 };
4272
4273 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4274 /* Devices with DMA related problems under Linux */
4275 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4276 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4277 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4278 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4279 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4280 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4281 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4282 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4283 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4284 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4285 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4286 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4287 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4288 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4289 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4290 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4291 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4292 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4293 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4294 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4295 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4296 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4297 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4298 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4299 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4300 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4301 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4302 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4303 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4304 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
4305 /* Odd clown on sil3726/4726 PMPs */
4306 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4307
4308 /* Weird ATAPI devices */
4309 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4310 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4311 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4312 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4313
4314 /*
4315 * Causes silent data corruption with higher max sects.
4316 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4317 */
4318 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4319
4320 /*
4321 * These devices time out with higher max sects.
4322 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4323 */
4324 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4325 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4326
4327 /* Devices we expect to fail diagnostics */
4328
4329 /* Devices where NCQ should be avoided */
4330 /* NCQ is slow */
4331 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4332 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4333 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4334 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4335 /* NCQ is broken */
4336 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4337 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4338 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4339 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4340 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4341
4342 /* Seagate NCQ + FLUSH CACHE firmware bug */
4343 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4344 ATA_HORKAGE_FIRMWARE_WARN },
4345
4346 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4347 ATA_HORKAGE_FIRMWARE_WARN },
4348
4349 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4350 ATA_HORKAGE_FIRMWARE_WARN },
4351
4352 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4353 ATA_HORKAGE_FIRMWARE_WARN },
4354
4355 /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
4356 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4357 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4358 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
4359
4360 /* Blacklist entries taken from Silicon Image 3124/3132
4361 Windows driver .inf file - also several Linux problem reports */
4362 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4363 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4364 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4365
4366 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4367 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4368
4369 /* devices which puke on READ_NATIVE_MAX */
4370 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4371 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4372 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4373 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4374
4375 /* this one allows HPA unlocking but fails IOs on the area */
4376 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4377
4378 /* Devices which report 1 sector over size HPA */
4379 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4380 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4381 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4382
4383 /* Devices which get the IVB wrong */
4384 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4385 /* Maybe we should just blacklist TSSTcorp... */
4386 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4387
4388 /* Devices that do not need bridging limits applied */
4389 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4390 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4391
4392 /* Devices which aren't very happy with higher link speeds */
4393 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4394 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4395
4396 /*
4397 * Devices which choke on SETXFER. Applies only if both the
4398 * device and controller are SATA.
4399 */
4400 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4401 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4402 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4403 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4404 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4405
4406 /* Crucial BX100 SSD 500GB has broken LPM support */
4407 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
4408
4409 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4410 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4411 ATA_HORKAGE_ZERO_AFTER_TRIM |
4412 ATA_HORKAGE_NOLPM, },
4413 /* 512GB MX100 with newer firmware has only LPM issues */
4414 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
4415 ATA_HORKAGE_NOLPM, },
4416
4417 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4418 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4419 ATA_HORKAGE_ZERO_AFTER_TRIM |
4420 ATA_HORKAGE_NOLPM, },
4421 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4422 ATA_HORKAGE_ZERO_AFTER_TRIM |
4423 ATA_HORKAGE_NOLPM, },
4424
4425 /* devices that don't properly handle queued TRIM commands */
4426 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4427 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4428 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4429 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4430 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4431 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4432 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4433 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4434 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4435 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4436 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4437 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4438 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4439 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4440 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4441 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4442
4443 /* devices that don't properly handle TRIM commands */
4444 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4445
4446 /*
4447 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4448 * (Return Zero After Trim) flags in the ATA Command Set are
4449 * unreliable in the sense that they only define what happens if
4450 * the device successfully executed the DSM TRIM command. TRIM
4451 * is only advisory, however, and the device is free to silently
4452 * ignore all or parts of the request.
4453 *
4454 * Whitelist drives that are known to reliably return zeroes
4455 * after TRIM.
4456 */
4457
4458 /*
4459 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4460 * that model before whitelisting all other intel SSDs.
4461 */
4462 { "INTEL*SSDSC2MH*", NULL, 0, },
4463
4464 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4465 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4466 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4467 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4468 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4469 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4470 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4471
4472 /*
4473 * Some WD SATA-I drives spin up and down erratically when the link
4474 * is put into the slumber mode. We don't have full list of the
4475 * affected devices. Disable LPM if the device matches one of the
4476 * known prefixes and is SATA-1. As a side effect LPM partial is
4477 * lost too.
4478 *
4479 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4480 */
4481 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4482 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4483 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4484 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4485 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4486 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4487 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4488
4489 /* End Marker */
4490 { }
4491 };
4492
ata_dev_blacklisted(const struct ata_device * dev)4493 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4494 {
4495 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4496 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4497 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4498
4499 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4500 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4501
4502 while (ad->model_num) {
4503 if (glob_match(ad->model_num, model_num)) {
4504 if (ad->model_rev == NULL)
4505 return ad->horkage;
4506 if (glob_match(ad->model_rev, model_rev))
4507 return ad->horkage;
4508 }
4509 ad++;
4510 }
4511 return 0;
4512 }
4513
ata_dma_blacklisted(const struct ata_device * dev)4514 static int ata_dma_blacklisted(const struct ata_device *dev)
4515 {
4516 /* We don't support polling DMA.
4517 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4518 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4519 */
4520 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4521 (dev->flags & ATA_DFLAG_CDB_INTR))
4522 return 1;
4523 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4524 }
4525
4526 /**
4527 * ata_is_40wire - check drive side detection
4528 * @dev: device
4529 *
4530 * Perform drive side detection decoding, allowing for device vendors
4531 * who can't follow the documentation.
4532 */
4533
ata_is_40wire(struct ata_device * dev)4534 static int ata_is_40wire(struct ata_device *dev)
4535 {
4536 if (dev->horkage & ATA_HORKAGE_IVB)
4537 return ata_drive_40wire_relaxed(dev->id);
4538 return ata_drive_40wire(dev->id);
4539 }
4540
4541 /**
4542 * cable_is_40wire - 40/80/SATA decider
4543 * @ap: port to consider
4544 *
4545 * This function encapsulates the policy for speed management
4546 * in one place. At the moment we don't cache the result but
4547 * there is a good case for setting ap->cbl to the result when
4548 * we are called with unknown cables (and figuring out if it
4549 * impacts hotplug at all).
4550 *
4551 * Return 1 if the cable appears to be 40 wire.
4552 */
4553
cable_is_40wire(struct ata_port * ap)4554 static int cable_is_40wire(struct ata_port *ap)
4555 {
4556 struct ata_link *link;
4557 struct ata_device *dev;
4558
4559 /* If the controller thinks we are 40 wire, we are. */
4560 if (ap->cbl == ATA_CBL_PATA40)
4561 return 1;
4562
4563 /* If the controller thinks we are 80 wire, we are. */
4564 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4565 return 0;
4566
4567 /* If the system is known to be 40 wire short cable (eg
4568 * laptop), then we allow 80 wire modes even if the drive
4569 * isn't sure.
4570 */
4571 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4572 return 0;
4573
4574 /* If the controller doesn't know, we scan.
4575 *
4576 * Note: We look for all 40 wire detects at this point. Any
4577 * 80 wire detect is taken to be 80 wire cable because
4578 * - in many setups only the one drive (slave if present) will
4579 * give a valid detect
4580 * - if you have a non detect capable drive you don't want it
4581 * to colour the choice
4582 */
4583 ata_for_each_link(link, ap, EDGE) {
4584 ata_for_each_dev(dev, link, ENABLED) {
4585 if (!ata_is_40wire(dev))
4586 return 0;
4587 }
4588 }
4589 return 1;
4590 }
4591
4592 /**
4593 * ata_dev_xfermask - Compute supported xfermask of the given device
4594 * @dev: Device to compute xfermask for
4595 *
4596 * Compute supported xfermask of @dev and store it in
4597 * dev->*_mask. This function is responsible for applying all
4598 * known limits including host controller limits, device
4599 * blacklist, etc...
4600 *
4601 * LOCKING:
4602 * None.
4603 */
ata_dev_xfermask(struct ata_device * dev)4604 static void ata_dev_xfermask(struct ata_device *dev)
4605 {
4606 struct ata_link *link = dev->link;
4607 struct ata_port *ap = link->ap;
4608 struct ata_host *host = ap->host;
4609 unsigned long xfer_mask;
4610
4611 /* controller modes available */
4612 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4613 ap->mwdma_mask, ap->udma_mask);
4614
4615 /* drive modes available */
4616 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4617 dev->mwdma_mask, dev->udma_mask);
4618 xfer_mask &= ata_id_xfermask(dev->id);
4619
4620 /*
4621 * CFA Advanced TrueIDE timings are not allowed on a shared
4622 * cable
4623 */
4624 if (ata_dev_pair(dev)) {
4625 /* No PIO5 or PIO6 */
4626 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4627 /* No MWDMA3 or MWDMA 4 */
4628 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4629 }
4630
4631 if (ata_dma_blacklisted(dev)) {
4632 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4633 ata_dev_warn(dev,
4634 "device is on DMA blacklist, disabling DMA\n");
4635 }
4636
4637 if ((host->flags & ATA_HOST_SIMPLEX) &&
4638 host->simplex_claimed && host->simplex_claimed != ap) {
4639 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4640 ata_dev_warn(dev,
4641 "simplex DMA is claimed by other device, disabling DMA\n");
4642 }
4643
4644 if (ap->flags & ATA_FLAG_NO_IORDY)
4645 xfer_mask &= ata_pio_mask_no_iordy(dev);
4646
4647 if (ap->ops->mode_filter)
4648 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4649
4650 /* Apply cable rule here. Don't apply it early because when
4651 * we handle hot plug the cable type can itself change.
4652 * Check this last so that we know if the transfer rate was
4653 * solely limited by the cable.
4654 * Unknown or 80 wire cables reported host side are checked
4655 * drive side as well. Cases where we know a 40wire cable
4656 * is used safely for 80 are not checked here.
4657 */
4658 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4659 /* UDMA/44 or higher would be available */
4660 if (cable_is_40wire(ap)) {
4661 ata_dev_warn(dev,
4662 "limited to UDMA/33 due to 40-wire cable\n");
4663 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4664 }
4665
4666 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4667 &dev->mwdma_mask, &dev->udma_mask);
4668 }
4669
4670 /**
4671 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4672 * @dev: Device to which command will be sent
4673 *
4674 * Issue SET FEATURES - XFER MODE command to device @dev
4675 * on port @ap.
4676 *
4677 * LOCKING:
4678 * PCI/etc. bus probe sem.
4679 *
4680 * RETURNS:
4681 * 0 on success, AC_ERR_* mask otherwise.
4682 */
4683
ata_dev_set_xfermode(struct ata_device * dev)4684 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4685 {
4686 struct ata_taskfile tf;
4687 unsigned int err_mask;
4688
4689 /* set up set-features taskfile */
4690 DPRINTK("set features - xfer mode\n");
4691
4692 /* Some controllers and ATAPI devices show flaky interrupt
4693 * behavior after setting xfer mode. Use polling instead.
4694 */
4695 ata_tf_init(dev, &tf);
4696 tf.command = ATA_CMD_SET_FEATURES;
4697 tf.feature = SETFEATURES_XFER;
4698 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4699 tf.protocol = ATA_PROT_NODATA;
4700 /* If we are using IORDY we must send the mode setting command */
4701 if (ata_pio_need_iordy(dev))
4702 tf.nsect = dev->xfer_mode;
4703 /* If the device has IORDY and the controller does not - turn it off */
4704 else if (ata_id_has_iordy(dev->id))
4705 tf.nsect = 0x01;
4706 else /* In the ancient relic department - skip all of this */
4707 return 0;
4708
4709 /* On some disks, this command causes spin-up, so we need longer timeout */
4710 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4711
4712 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4713 return err_mask;
4714 }
4715
4716 /**
4717 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4718 * @dev: Device to which command will be sent
4719 * @enable: Whether to enable or disable the feature
4720 * @feature: The sector count represents the feature to set
4721 *
4722 * Issue SET FEATURES - SATA FEATURES command to device @dev
4723 * on port @ap with sector count
4724 *
4725 * LOCKING:
4726 * PCI/etc. bus probe sem.
4727 *
4728 * RETURNS:
4729 * 0 on success, AC_ERR_* mask otherwise.
4730 */
ata_dev_set_feature(struct ata_device * dev,u8 enable,u8 feature)4731 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4732 {
4733 struct ata_taskfile tf;
4734 unsigned int err_mask;
4735 unsigned long timeout = 0;
4736
4737 /* set up set-features taskfile */
4738 DPRINTK("set features - SATA features\n");
4739
4740 ata_tf_init(dev, &tf);
4741 tf.command = ATA_CMD_SET_FEATURES;
4742 tf.feature = enable;
4743 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4744 tf.protocol = ATA_PROT_NODATA;
4745 tf.nsect = feature;
4746
4747 if (enable == SETFEATURES_SPINUP)
4748 timeout = ata_probe_timeout ?
4749 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4750 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4751
4752 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4753 return err_mask;
4754 }
4755 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4756
4757 /**
4758 * ata_dev_init_params - Issue INIT DEV PARAMS command
4759 * @dev: Device to which command will be sent
4760 * @heads: Number of heads (taskfile parameter)
4761 * @sectors: Number of sectors (taskfile parameter)
4762 *
4763 * LOCKING:
4764 * Kernel thread context (may sleep)
4765 *
4766 * RETURNS:
4767 * 0 on success, AC_ERR_* mask otherwise.
4768 */
ata_dev_init_params(struct ata_device * dev,u16 heads,u16 sectors)4769 static unsigned int ata_dev_init_params(struct ata_device *dev,
4770 u16 heads, u16 sectors)
4771 {
4772 struct ata_taskfile tf;
4773 unsigned int err_mask;
4774
4775 /* Number of sectors per track 1-255. Number of heads 1-16 */
4776 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4777 return AC_ERR_INVALID;
4778
4779 /* set up init dev params taskfile */
4780 DPRINTK("init dev params \n");
4781
4782 ata_tf_init(dev, &tf);
4783 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4784 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4785 tf.protocol = ATA_PROT_NODATA;
4786 tf.nsect = sectors;
4787 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4788
4789 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4790 /* A clean abort indicates an original or just out of spec drive
4791 and we should continue as we issue the setup based on the
4792 drive reported working geometry */
4793 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4794 err_mask = 0;
4795
4796 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4797 return err_mask;
4798 }
4799
4800 /**
4801 * ata_sg_clean - Unmap DMA memory associated with command
4802 * @qc: Command containing DMA memory to be released
4803 *
4804 * Unmap all mapped DMA memory associated with this command.
4805 *
4806 * LOCKING:
4807 * spin_lock_irqsave(host lock)
4808 */
ata_sg_clean(struct ata_queued_cmd * qc)4809 void ata_sg_clean(struct ata_queued_cmd *qc)
4810 {
4811 struct ata_port *ap = qc->ap;
4812 struct scatterlist *sg = qc->sg;
4813 int dir = qc->dma_dir;
4814
4815 WARN_ON_ONCE(sg == NULL);
4816
4817 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4818
4819 if (qc->n_elem)
4820 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4821
4822 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4823 qc->sg = NULL;
4824 }
4825
4826 /**
4827 * atapi_check_dma - Check whether ATAPI DMA can be supported
4828 * @qc: Metadata associated with taskfile to check
4829 *
4830 * Allow low-level driver to filter ATA PACKET commands, returning
4831 * a status indicating whether or not it is OK to use DMA for the
4832 * supplied PACKET command.
4833 *
4834 * LOCKING:
4835 * spin_lock_irqsave(host lock)
4836 *
4837 * RETURNS: 0 when ATAPI DMA can be used
4838 * nonzero otherwise
4839 */
atapi_check_dma(struct ata_queued_cmd * qc)4840 int atapi_check_dma(struct ata_queued_cmd *qc)
4841 {
4842 struct ata_port *ap = qc->ap;
4843
4844 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4845 * few ATAPI devices choke on such DMA requests.
4846 */
4847 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4848 unlikely(qc->nbytes & 15))
4849 return 1;
4850
4851 if (ap->ops->check_atapi_dma)
4852 return ap->ops->check_atapi_dma(qc);
4853
4854 return 0;
4855 }
4856
4857 /**
4858 * ata_std_qc_defer - Check whether a qc needs to be deferred
4859 * @qc: ATA command in question
4860 *
4861 * Non-NCQ commands cannot run with any other command, NCQ or
4862 * not. As upper layer only knows the queue depth, we are
4863 * responsible for maintaining exclusion. This function checks
4864 * whether a new command @qc can be issued.
4865 *
4866 * LOCKING:
4867 * spin_lock_irqsave(host lock)
4868 *
4869 * RETURNS:
4870 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4871 */
ata_std_qc_defer(struct ata_queued_cmd * qc)4872 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4873 {
4874 struct ata_link *link = qc->dev->link;
4875
4876 if (ata_is_ncq(qc->tf.protocol)) {
4877 if (!ata_tag_valid(link->active_tag))
4878 return 0;
4879 } else {
4880 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4881 return 0;
4882 }
4883
4884 return ATA_DEFER_LINK;
4885 }
4886
ata_noop_qc_prep(struct ata_queued_cmd * qc)4887 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4888
4889 /**
4890 * ata_sg_init - Associate command with scatter-gather table.
4891 * @qc: Command to be associated
4892 * @sg: Scatter-gather table.
4893 * @n_elem: Number of elements in s/g table.
4894 *
4895 * Initialize the data-related elements of queued_cmd @qc
4896 * to point to a scatter-gather table @sg, containing @n_elem
4897 * elements.
4898 *
4899 * LOCKING:
4900 * spin_lock_irqsave(host lock)
4901 */
ata_sg_init(struct ata_queued_cmd * qc,struct scatterlist * sg,unsigned int n_elem)4902 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4903 unsigned int n_elem)
4904 {
4905 qc->sg = sg;
4906 qc->n_elem = n_elem;
4907 qc->cursg = qc->sg;
4908 }
4909
4910 /**
4911 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4912 * @qc: Command with scatter-gather table to be mapped.
4913 *
4914 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4915 *
4916 * LOCKING:
4917 * spin_lock_irqsave(host lock)
4918 *
4919 * RETURNS:
4920 * Zero on success, negative on error.
4921 *
4922 */
ata_sg_setup(struct ata_queued_cmd * qc)4923 static int ata_sg_setup(struct ata_queued_cmd *qc)
4924 {
4925 struct ata_port *ap = qc->ap;
4926 unsigned int n_elem;
4927
4928 VPRINTK("ENTER, ata%u\n", ap->print_id);
4929
4930 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4931 if (n_elem < 1)
4932 return -1;
4933
4934 DPRINTK("%d sg elements mapped\n", n_elem);
4935 qc->orig_n_elem = qc->n_elem;
4936 qc->n_elem = n_elem;
4937 qc->flags |= ATA_QCFLAG_DMAMAP;
4938
4939 return 0;
4940 }
4941
4942 /**
4943 * swap_buf_le16 - swap halves of 16-bit words in place
4944 * @buf: Buffer to swap
4945 * @buf_words: Number of 16-bit words in buffer.
4946 *
4947 * Swap halves of 16-bit words if needed to convert from
4948 * little-endian byte order to native cpu byte order, or
4949 * vice-versa.
4950 *
4951 * LOCKING:
4952 * Inherited from caller.
4953 */
swap_buf_le16(u16 * buf,unsigned int buf_words)4954 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4955 {
4956 #ifdef __BIG_ENDIAN
4957 unsigned int i;
4958
4959 for (i = 0; i < buf_words; i++)
4960 buf[i] = le16_to_cpu(buf[i]);
4961 #endif /* __BIG_ENDIAN */
4962 }
4963
4964 /**
4965 * ata_qc_new_init - Request an available ATA command, and initialize it
4966 * @dev: Device from whom we request an available command structure
4967 * @tag: tag
4968 *
4969 * LOCKING:
4970 * None.
4971 */
4972
ata_qc_new_init(struct ata_device * dev,int tag)4973 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4974 {
4975 struct ata_port *ap = dev->link->ap;
4976 struct ata_queued_cmd *qc;
4977
4978 /* no command while frozen */
4979 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4980 return NULL;
4981
4982 /* libsas case */
4983 if (ap->flags & ATA_FLAG_SAS_HOST) {
4984 tag = ata_sas_allocate_tag(ap);
4985 if (tag < 0)
4986 return NULL;
4987 }
4988
4989 qc = __ata_qc_from_tag(ap, tag);
4990 qc->tag = tag;
4991 qc->scsicmd = NULL;
4992 qc->ap = ap;
4993 qc->dev = dev;
4994
4995 ata_qc_reinit(qc);
4996
4997 return qc;
4998 }
4999
5000 /**
5001 * ata_qc_free - free unused ata_queued_cmd
5002 * @qc: Command to complete
5003 *
5004 * Designed to free unused ata_queued_cmd object
5005 * in case something prevents using it.
5006 *
5007 * LOCKING:
5008 * spin_lock_irqsave(host lock)
5009 */
ata_qc_free(struct ata_queued_cmd * qc)5010 void ata_qc_free(struct ata_queued_cmd *qc)
5011 {
5012 struct ata_port *ap;
5013 unsigned int tag;
5014
5015 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5016 ap = qc->ap;
5017
5018 qc->flags = 0;
5019 tag = qc->tag;
5020 if (likely(ata_tag_valid(tag))) {
5021 qc->tag = ATA_TAG_POISON;
5022 if (ap->flags & ATA_FLAG_SAS_HOST)
5023 ata_sas_free_tag(tag, ap);
5024 }
5025 }
5026
__ata_qc_complete(struct ata_queued_cmd * qc)5027 void __ata_qc_complete(struct ata_queued_cmd *qc)
5028 {
5029 struct ata_port *ap;
5030 struct ata_link *link;
5031
5032 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5033 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
5034 ap = qc->ap;
5035 link = qc->dev->link;
5036
5037 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5038 ata_sg_clean(qc);
5039
5040 /* command should be marked inactive atomically with qc completion */
5041 if (ata_is_ncq(qc->tf.protocol)) {
5042 link->sactive &= ~(1 << qc->tag);
5043 if (!link->sactive)
5044 ap->nr_active_links--;
5045 } else {
5046 link->active_tag = ATA_TAG_POISON;
5047 ap->nr_active_links--;
5048 }
5049
5050 /* clear exclusive status */
5051 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5052 ap->excl_link == link))
5053 ap->excl_link = NULL;
5054
5055 /* atapi: mark qc as inactive to prevent the interrupt handler
5056 * from completing the command twice later, before the error handler
5057 * is called. (when rc != 0 and atapi request sense is needed)
5058 */
5059 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5060 ap->qc_active &= ~(1 << qc->tag);
5061
5062 /* call completion callback */
5063 qc->complete_fn(qc);
5064 }
5065
fill_result_tf(struct ata_queued_cmd * qc)5066 static void fill_result_tf(struct ata_queued_cmd *qc)
5067 {
5068 struct ata_port *ap = qc->ap;
5069
5070 qc->result_tf.flags = qc->tf.flags;
5071 ap->ops->qc_fill_rtf(qc);
5072 }
5073
ata_verify_xfer(struct ata_queued_cmd * qc)5074 static void ata_verify_xfer(struct ata_queued_cmd *qc)
5075 {
5076 struct ata_device *dev = qc->dev;
5077
5078 if (!ata_is_data(qc->tf.protocol))
5079 return;
5080
5081 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5082 return;
5083
5084 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5085 }
5086
5087 /**
5088 * ata_qc_complete - Complete an active ATA command
5089 * @qc: Command to complete
5090 *
5091 * Indicate to the mid and upper layers that an ATA command has
5092 * completed, with either an ok or not-ok status.
5093 *
5094 * Refrain from calling this function multiple times when
5095 * successfully completing multiple NCQ commands.
5096 * ata_qc_complete_multiple() should be used instead, which will
5097 * properly update IRQ expect state.
5098 *
5099 * LOCKING:
5100 * spin_lock_irqsave(host lock)
5101 */
ata_qc_complete(struct ata_queued_cmd * qc)5102 void ata_qc_complete(struct ata_queued_cmd *qc)
5103 {
5104 struct ata_port *ap = qc->ap;
5105
5106 /* Trigger the LED (if available) */
5107 ledtrig_disk_activity();
5108
5109 /* XXX: New EH and old EH use different mechanisms to
5110 * synchronize EH with regular execution path.
5111 *
5112 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5113 * Normal execution path is responsible for not accessing a
5114 * failed qc. libata core enforces the rule by returning NULL
5115 * from ata_qc_from_tag() for failed qcs.
5116 *
5117 * Old EH depends on ata_qc_complete() nullifying completion
5118 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5119 * not synchronize with interrupt handler. Only PIO task is
5120 * taken care of.
5121 */
5122 if (ap->ops->error_handler) {
5123 struct ata_device *dev = qc->dev;
5124 struct ata_eh_info *ehi = &dev->link->eh_info;
5125
5126 if (unlikely(qc->err_mask))
5127 qc->flags |= ATA_QCFLAG_FAILED;
5128
5129 /*
5130 * Finish internal commands without any further processing
5131 * and always with the result TF filled.
5132 */
5133 if (unlikely(ata_tag_internal(qc->tag))) {
5134 fill_result_tf(qc);
5135 trace_ata_qc_complete_internal(qc);
5136 __ata_qc_complete(qc);
5137 return;
5138 }
5139
5140 /*
5141 * Non-internal qc has failed. Fill the result TF and
5142 * summon EH.
5143 */
5144 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5145 fill_result_tf(qc);
5146 trace_ata_qc_complete_failed(qc);
5147 ata_qc_schedule_eh(qc);
5148 return;
5149 }
5150
5151 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5152
5153 /* read result TF if requested */
5154 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5155 fill_result_tf(qc);
5156
5157 trace_ata_qc_complete_done(qc);
5158 /* Some commands need post-processing after successful
5159 * completion.
5160 */
5161 switch (qc->tf.command) {
5162 case ATA_CMD_SET_FEATURES:
5163 if (qc->tf.feature != SETFEATURES_WC_ON &&
5164 qc->tf.feature != SETFEATURES_WC_OFF &&
5165 qc->tf.feature != SETFEATURES_RA_ON &&
5166 qc->tf.feature != SETFEATURES_RA_OFF)
5167 break;
5168 /* fall through */
5169 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5170 case ATA_CMD_SET_MULTI: /* multi_count changed */
5171 /* revalidate device */
5172 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5173 ata_port_schedule_eh(ap);
5174 break;
5175
5176 case ATA_CMD_SLEEP:
5177 dev->flags |= ATA_DFLAG_SLEEPING;
5178 break;
5179 }
5180
5181 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5182 ata_verify_xfer(qc);
5183
5184 __ata_qc_complete(qc);
5185 } else {
5186 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5187 return;
5188
5189 /* read result TF if failed or requested */
5190 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5191 fill_result_tf(qc);
5192
5193 __ata_qc_complete(qc);
5194 }
5195 }
5196
5197 /**
5198 * ata_qc_complete_multiple - Complete multiple qcs successfully
5199 * @ap: port in question
5200 * @qc_active: new qc_active mask
5201 *
5202 * Complete in-flight commands. This functions is meant to be
5203 * called from low-level driver's interrupt routine to complete
5204 * requests normally. ap->qc_active and @qc_active is compared
5205 * and commands are completed accordingly.
5206 *
5207 * Always use this function when completing multiple NCQ commands
5208 * from IRQ handlers instead of calling ata_qc_complete()
5209 * multiple times to keep IRQ expect status properly in sync.
5210 *
5211 * LOCKING:
5212 * spin_lock_irqsave(host lock)
5213 *
5214 * RETURNS:
5215 * Number of completed commands on success, -errno otherwise.
5216 */
ata_qc_complete_multiple(struct ata_port * ap,u32 qc_active)5217 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5218 {
5219 int nr_done = 0;
5220 u32 done_mask;
5221
5222 done_mask = ap->qc_active ^ qc_active;
5223
5224 if (unlikely(done_mask & qc_active)) {
5225 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5226 ap->qc_active, qc_active);
5227 return -EINVAL;
5228 }
5229
5230 while (done_mask) {
5231 struct ata_queued_cmd *qc;
5232 unsigned int tag = __ffs(done_mask);
5233
5234 qc = ata_qc_from_tag(ap, tag);
5235 if (qc) {
5236 ata_qc_complete(qc);
5237 nr_done++;
5238 }
5239 done_mask &= ~(1 << tag);
5240 }
5241
5242 return nr_done;
5243 }
5244
5245 /**
5246 * ata_qc_issue - issue taskfile to device
5247 * @qc: command to issue to device
5248 *
5249 * Prepare an ATA command to submission to device.
5250 * This includes mapping the data into a DMA-able
5251 * area, filling in the S/G table, and finally
5252 * writing the taskfile to hardware, starting the command.
5253 *
5254 * LOCKING:
5255 * spin_lock_irqsave(host lock)
5256 */
ata_qc_issue(struct ata_queued_cmd * qc)5257 void ata_qc_issue(struct ata_queued_cmd *qc)
5258 {
5259 struct ata_port *ap = qc->ap;
5260 struct ata_link *link = qc->dev->link;
5261 u8 prot = qc->tf.protocol;
5262
5263 /* Make sure only one non-NCQ command is outstanding. The
5264 * check is skipped for old EH because it reuses active qc to
5265 * request ATAPI sense.
5266 */
5267 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5268
5269 if (ata_is_ncq(prot)) {
5270 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5271
5272 if (!link->sactive)
5273 ap->nr_active_links++;
5274 link->sactive |= 1 << qc->tag;
5275 } else {
5276 WARN_ON_ONCE(link->sactive);
5277
5278 ap->nr_active_links++;
5279 link->active_tag = qc->tag;
5280 }
5281
5282 qc->flags |= ATA_QCFLAG_ACTIVE;
5283 ap->qc_active |= 1 << qc->tag;
5284
5285 /*
5286 * We guarantee to LLDs that they will have at least one
5287 * non-zero sg if the command is a data command.
5288 */
5289 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
5290 goto sys_err;
5291
5292 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5293 (ap->flags & ATA_FLAG_PIO_DMA)))
5294 if (ata_sg_setup(qc))
5295 goto sys_err;
5296
5297 /* if device is sleeping, schedule reset and abort the link */
5298 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5299 link->eh_info.action |= ATA_EH_RESET;
5300 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5301 ata_link_abort(link);
5302 return;
5303 }
5304
5305 ap->ops->qc_prep(qc);
5306 trace_ata_qc_issue(qc);
5307 qc->err_mask |= ap->ops->qc_issue(qc);
5308 if (unlikely(qc->err_mask))
5309 goto err;
5310 return;
5311
5312 sys_err:
5313 qc->err_mask |= AC_ERR_SYSTEM;
5314 err:
5315 ata_qc_complete(qc);
5316 }
5317
5318 /**
5319 * sata_scr_valid - test whether SCRs are accessible
5320 * @link: ATA link to test SCR accessibility for
5321 *
5322 * Test whether SCRs are accessible for @link.
5323 *
5324 * LOCKING:
5325 * None.
5326 *
5327 * RETURNS:
5328 * 1 if SCRs are accessible, 0 otherwise.
5329 */
sata_scr_valid(struct ata_link * link)5330 int sata_scr_valid(struct ata_link *link)
5331 {
5332 struct ata_port *ap = link->ap;
5333
5334 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5335 }
5336
5337 /**
5338 * sata_scr_read - read SCR register of the specified port
5339 * @link: ATA link to read SCR for
5340 * @reg: SCR to read
5341 * @val: Place to store read value
5342 *
5343 * Read SCR register @reg of @link into *@val. This function is
5344 * guaranteed to succeed if @link is ap->link, the cable type of
5345 * the port is SATA and the port implements ->scr_read.
5346 *
5347 * LOCKING:
5348 * None if @link is ap->link. Kernel thread context otherwise.
5349 *
5350 * RETURNS:
5351 * 0 on success, negative errno on failure.
5352 */
sata_scr_read(struct ata_link * link,int reg,u32 * val)5353 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5354 {
5355 if (ata_is_host_link(link)) {
5356 if (sata_scr_valid(link))
5357 return link->ap->ops->scr_read(link, reg, val);
5358 return -EOPNOTSUPP;
5359 }
5360
5361 return sata_pmp_scr_read(link, reg, val);
5362 }
5363
5364 /**
5365 * sata_scr_write - write SCR register of the specified port
5366 * @link: ATA link to write SCR for
5367 * @reg: SCR to write
5368 * @val: value to write
5369 *
5370 * Write @val to SCR register @reg of @link. This function is
5371 * guaranteed to succeed if @link is ap->link, the cable type of
5372 * the port is SATA and the port implements ->scr_read.
5373 *
5374 * LOCKING:
5375 * None if @link is ap->link. Kernel thread context otherwise.
5376 *
5377 * RETURNS:
5378 * 0 on success, negative errno on failure.
5379 */
sata_scr_write(struct ata_link * link,int reg,u32 val)5380 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5381 {
5382 if (ata_is_host_link(link)) {
5383 if (sata_scr_valid(link))
5384 return link->ap->ops->scr_write(link, reg, val);
5385 return -EOPNOTSUPP;
5386 }
5387
5388 return sata_pmp_scr_write(link, reg, val);
5389 }
5390
5391 /**
5392 * sata_scr_write_flush - write SCR register of the specified port and flush
5393 * @link: ATA link to write SCR for
5394 * @reg: SCR to write
5395 * @val: value to write
5396 *
5397 * This function is identical to sata_scr_write() except that this
5398 * function performs flush after writing to the register.
5399 *
5400 * LOCKING:
5401 * None if @link is ap->link. Kernel thread context otherwise.
5402 *
5403 * RETURNS:
5404 * 0 on success, negative errno on failure.
5405 */
sata_scr_write_flush(struct ata_link * link,int reg,u32 val)5406 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5407 {
5408 if (ata_is_host_link(link)) {
5409 int rc;
5410
5411 if (sata_scr_valid(link)) {
5412 rc = link->ap->ops->scr_write(link, reg, val);
5413 if (rc == 0)
5414 rc = link->ap->ops->scr_read(link, reg, &val);
5415 return rc;
5416 }
5417 return -EOPNOTSUPP;
5418 }
5419
5420 return sata_pmp_scr_write(link, reg, val);
5421 }
5422
5423 /**
5424 * ata_phys_link_online - test whether the given link is online
5425 * @link: ATA link to test
5426 *
5427 * Test whether @link is online. Note that this function returns
5428 * 0 if online status of @link cannot be obtained, so
5429 * ata_link_online(link) != !ata_link_offline(link).
5430 *
5431 * LOCKING:
5432 * None.
5433 *
5434 * RETURNS:
5435 * True if the port online status is available and online.
5436 */
ata_phys_link_online(struct ata_link * link)5437 bool ata_phys_link_online(struct ata_link *link)
5438 {
5439 u32 sstatus;
5440
5441 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5442 ata_sstatus_online(sstatus))
5443 return true;
5444 return false;
5445 }
5446
5447 /**
5448 * ata_phys_link_offline - test whether the given link is offline
5449 * @link: ATA link to test
5450 *
5451 * Test whether @link is offline. Note that this function
5452 * returns 0 if offline status of @link cannot be obtained, so
5453 * ata_link_online(link) != !ata_link_offline(link).
5454 *
5455 * LOCKING:
5456 * None.
5457 *
5458 * RETURNS:
5459 * True if the port offline status is available and offline.
5460 */
ata_phys_link_offline(struct ata_link * link)5461 bool ata_phys_link_offline(struct ata_link *link)
5462 {
5463 u32 sstatus;
5464
5465 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5466 !ata_sstatus_online(sstatus))
5467 return true;
5468 return false;
5469 }
5470
5471 /**
5472 * ata_link_online - test whether the given link is online
5473 * @link: ATA link to test
5474 *
5475 * Test whether @link is online. This is identical to
5476 * ata_phys_link_online() when there's no slave link. When
5477 * there's a slave link, this function should only be called on
5478 * the master link and will return true if any of M/S links is
5479 * online.
5480 *
5481 * LOCKING:
5482 * None.
5483 *
5484 * RETURNS:
5485 * True if the port online status is available and online.
5486 */
ata_link_online(struct ata_link * link)5487 bool ata_link_online(struct ata_link *link)
5488 {
5489 struct ata_link *slave = link->ap->slave_link;
5490
5491 WARN_ON(link == slave); /* shouldn't be called on slave link */
5492
5493 return ata_phys_link_online(link) ||
5494 (slave && ata_phys_link_online(slave));
5495 }
5496
5497 /**
5498 * ata_link_offline - test whether the given link is offline
5499 * @link: ATA link to test
5500 *
5501 * Test whether @link is offline. This is identical to
5502 * ata_phys_link_offline() when there's no slave link. When
5503 * there's a slave link, this function should only be called on
5504 * the master link and will return true if both M/S links are
5505 * offline.
5506 *
5507 * LOCKING:
5508 * None.
5509 *
5510 * RETURNS:
5511 * True if the port offline status is available and offline.
5512 */
ata_link_offline(struct ata_link * link)5513 bool ata_link_offline(struct ata_link *link)
5514 {
5515 struct ata_link *slave = link->ap->slave_link;
5516
5517 WARN_ON(link == slave); /* shouldn't be called on slave link */
5518
5519 return ata_phys_link_offline(link) &&
5520 (!slave || ata_phys_link_offline(slave));
5521 }
5522
5523 #ifdef CONFIG_PM
ata_port_request_pm(struct ata_port * ap,pm_message_t mesg,unsigned int action,unsigned int ehi_flags,bool async)5524 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5525 unsigned int action, unsigned int ehi_flags,
5526 bool async)
5527 {
5528 struct ata_link *link;
5529 unsigned long flags;
5530
5531 /* Previous resume operation might still be in
5532 * progress. Wait for PM_PENDING to clear.
5533 */
5534 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5535 ata_port_wait_eh(ap);
5536 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5537 }
5538
5539 /* request PM ops to EH */
5540 spin_lock_irqsave(ap->lock, flags);
5541
5542 ap->pm_mesg = mesg;
5543 ap->pflags |= ATA_PFLAG_PM_PENDING;
5544 ata_for_each_link(link, ap, HOST_FIRST) {
5545 link->eh_info.action |= action;
5546 link->eh_info.flags |= ehi_flags;
5547 }
5548
5549 ata_port_schedule_eh(ap);
5550
5551 spin_unlock_irqrestore(ap->lock, flags);
5552
5553 if (!async) {
5554 ata_port_wait_eh(ap);
5555 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5556 }
5557 }
5558
5559 /*
5560 * On some hardware, device fails to respond after spun down for suspend. As
5561 * the device won't be used before being resumed, we don't need to touch the
5562 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5563 *
5564 * http://thread.gmane.org/gmane.linux.ide/46764
5565 */
5566 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5567 | ATA_EHI_NO_AUTOPSY
5568 | ATA_EHI_NO_RECOVERY;
5569
ata_port_suspend(struct ata_port * ap,pm_message_t mesg)5570 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5571 {
5572 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5573 }
5574
ata_port_suspend_async(struct ata_port * ap,pm_message_t mesg)5575 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5576 {
5577 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5578 }
5579
ata_port_pm_suspend(struct device * dev)5580 static int ata_port_pm_suspend(struct device *dev)
5581 {
5582 struct ata_port *ap = to_ata_port(dev);
5583
5584 if (pm_runtime_suspended(dev))
5585 return 0;
5586
5587 ata_port_suspend(ap, PMSG_SUSPEND);
5588 return 0;
5589 }
5590
ata_port_pm_freeze(struct device * dev)5591 static int ata_port_pm_freeze(struct device *dev)
5592 {
5593 struct ata_port *ap = to_ata_port(dev);
5594
5595 if (pm_runtime_suspended(dev))
5596 return 0;
5597
5598 ata_port_suspend(ap, PMSG_FREEZE);
5599 return 0;
5600 }
5601
ata_port_pm_poweroff(struct device * dev)5602 static int ata_port_pm_poweroff(struct device *dev)
5603 {
5604 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5605 return 0;
5606 }
5607
5608 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5609 | ATA_EHI_QUIET;
5610
ata_port_resume(struct ata_port * ap,pm_message_t mesg)5611 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5612 {
5613 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5614 }
5615
ata_port_resume_async(struct ata_port * ap,pm_message_t mesg)5616 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5617 {
5618 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5619 }
5620
ata_port_pm_resume(struct device * dev)5621 static int ata_port_pm_resume(struct device *dev)
5622 {
5623 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5624 pm_runtime_disable(dev);
5625 pm_runtime_set_active(dev);
5626 pm_runtime_enable(dev);
5627 return 0;
5628 }
5629
5630 /*
5631 * For ODDs, the upper layer will poll for media change every few seconds,
5632 * which will make it enter and leave suspend state every few seconds. And
5633 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5634 * is very little and the ODD may malfunction after constantly being reset.
5635 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5636 * ODD is attached to the port.
5637 */
ata_port_runtime_idle(struct device * dev)5638 static int ata_port_runtime_idle(struct device *dev)
5639 {
5640 struct ata_port *ap = to_ata_port(dev);
5641 struct ata_link *link;
5642 struct ata_device *adev;
5643
5644 ata_for_each_link(link, ap, HOST_FIRST) {
5645 ata_for_each_dev(adev, link, ENABLED)
5646 if (adev->class == ATA_DEV_ATAPI &&
5647 !zpodd_dev_enabled(adev))
5648 return -EBUSY;
5649 }
5650
5651 return 0;
5652 }
5653
ata_port_runtime_suspend(struct device * dev)5654 static int ata_port_runtime_suspend(struct device *dev)
5655 {
5656 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5657 return 0;
5658 }
5659
ata_port_runtime_resume(struct device * dev)5660 static int ata_port_runtime_resume(struct device *dev)
5661 {
5662 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5663 return 0;
5664 }
5665
5666 static const struct dev_pm_ops ata_port_pm_ops = {
5667 .suspend = ata_port_pm_suspend,
5668 .resume = ata_port_pm_resume,
5669 .freeze = ata_port_pm_freeze,
5670 .thaw = ata_port_pm_resume,
5671 .poweroff = ata_port_pm_poweroff,
5672 .restore = ata_port_pm_resume,
5673
5674 .runtime_suspend = ata_port_runtime_suspend,
5675 .runtime_resume = ata_port_runtime_resume,
5676 .runtime_idle = ata_port_runtime_idle,
5677 };
5678
5679 /* sas ports don't participate in pm runtime management of ata_ports,
5680 * and need to resume ata devices at the domain level, not the per-port
5681 * level. sas suspend/resume is async to allow parallel port recovery
5682 * since sas has multiple ata_port instances per Scsi_Host.
5683 */
ata_sas_port_suspend(struct ata_port * ap)5684 void ata_sas_port_suspend(struct ata_port *ap)
5685 {
5686 ata_port_suspend_async(ap, PMSG_SUSPEND);
5687 }
5688 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5689
ata_sas_port_resume(struct ata_port * ap)5690 void ata_sas_port_resume(struct ata_port *ap)
5691 {
5692 ata_port_resume_async(ap, PMSG_RESUME);
5693 }
5694 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5695
5696 /**
5697 * ata_host_suspend - suspend host
5698 * @host: host to suspend
5699 * @mesg: PM message
5700 *
5701 * Suspend @host. Actual operation is performed by port suspend.
5702 */
ata_host_suspend(struct ata_host * host,pm_message_t mesg)5703 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5704 {
5705 host->dev->power.power_state = mesg;
5706 return 0;
5707 }
5708
5709 /**
5710 * ata_host_resume - resume host
5711 * @host: host to resume
5712 *
5713 * Resume @host. Actual operation is performed by port resume.
5714 */
ata_host_resume(struct ata_host * host)5715 void ata_host_resume(struct ata_host *host)
5716 {
5717 host->dev->power.power_state = PMSG_ON;
5718 }
5719 #endif
5720
5721 struct device_type ata_port_type = {
5722 .name = "ata_port",
5723 #ifdef CONFIG_PM
5724 .pm = &ata_port_pm_ops,
5725 #endif
5726 };
5727
5728 /**
5729 * ata_dev_init - Initialize an ata_device structure
5730 * @dev: Device structure to initialize
5731 *
5732 * Initialize @dev in preparation for probing.
5733 *
5734 * LOCKING:
5735 * Inherited from caller.
5736 */
ata_dev_init(struct ata_device * dev)5737 void ata_dev_init(struct ata_device *dev)
5738 {
5739 struct ata_link *link = ata_dev_phys_link(dev);
5740 struct ata_port *ap = link->ap;
5741 unsigned long flags;
5742
5743 /* SATA spd limit is bound to the attached device, reset together */
5744 link->sata_spd_limit = link->hw_sata_spd_limit;
5745 link->sata_spd = 0;
5746
5747 /* High bits of dev->flags are used to record warm plug
5748 * requests which occur asynchronously. Synchronize using
5749 * host lock.
5750 */
5751 spin_lock_irqsave(ap->lock, flags);
5752 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5753 dev->horkage = 0;
5754 spin_unlock_irqrestore(ap->lock, flags);
5755
5756 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5757 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5758 dev->pio_mask = UINT_MAX;
5759 dev->mwdma_mask = UINT_MAX;
5760 dev->udma_mask = UINT_MAX;
5761 }
5762
5763 /**
5764 * ata_link_init - Initialize an ata_link structure
5765 * @ap: ATA port link is attached to
5766 * @link: Link structure to initialize
5767 * @pmp: Port multiplier port number
5768 *
5769 * Initialize @link.
5770 *
5771 * LOCKING:
5772 * Kernel thread context (may sleep)
5773 */
ata_link_init(struct ata_port * ap,struct ata_link * link,int pmp)5774 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5775 {
5776 int i;
5777
5778 /* clear everything except for devices */
5779 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5780 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5781
5782 link->ap = ap;
5783 link->pmp = pmp;
5784 link->active_tag = ATA_TAG_POISON;
5785 link->hw_sata_spd_limit = UINT_MAX;
5786
5787 /* can't use iterator, ap isn't initialized yet */
5788 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5789 struct ata_device *dev = &link->device[i];
5790
5791 dev->link = link;
5792 dev->devno = dev - link->device;
5793 #ifdef CONFIG_ATA_ACPI
5794 dev->gtf_filter = ata_acpi_gtf_filter;
5795 #endif
5796 ata_dev_init(dev);
5797 }
5798 }
5799
5800 /**
5801 * sata_link_init_spd - Initialize link->sata_spd_limit
5802 * @link: Link to configure sata_spd_limit for
5803 *
5804 * Initialize @link->[hw_]sata_spd_limit to the currently
5805 * configured value.
5806 *
5807 * LOCKING:
5808 * Kernel thread context (may sleep).
5809 *
5810 * RETURNS:
5811 * 0 on success, -errno on failure.
5812 */
sata_link_init_spd(struct ata_link * link)5813 int sata_link_init_spd(struct ata_link *link)
5814 {
5815 u8 spd;
5816 int rc;
5817
5818 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5819 if (rc)
5820 return rc;
5821
5822 spd = (link->saved_scontrol >> 4) & 0xf;
5823 if (spd)
5824 link->hw_sata_spd_limit &= (1 << spd) - 1;
5825
5826 ata_force_link_limits(link);
5827
5828 link->sata_spd_limit = link->hw_sata_spd_limit;
5829
5830 return 0;
5831 }
5832
5833 /**
5834 * ata_port_alloc - allocate and initialize basic ATA port resources
5835 * @host: ATA host this allocated port belongs to
5836 *
5837 * Allocate and initialize basic ATA port resources.
5838 *
5839 * RETURNS:
5840 * Allocate ATA port on success, NULL on failure.
5841 *
5842 * LOCKING:
5843 * Inherited from calling layer (may sleep).
5844 */
ata_port_alloc(struct ata_host * host)5845 struct ata_port *ata_port_alloc(struct ata_host *host)
5846 {
5847 struct ata_port *ap;
5848
5849 DPRINTK("ENTER\n");
5850
5851 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5852 if (!ap)
5853 return NULL;
5854
5855 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5856 ap->lock = &host->lock;
5857 ap->print_id = -1;
5858 ap->local_port_no = -1;
5859 ap->host = host;
5860 ap->dev = host->dev;
5861
5862 #if defined(ATA_VERBOSE_DEBUG)
5863 /* turn on all debugging levels */
5864 ap->msg_enable = 0x00FF;
5865 #elif defined(ATA_DEBUG)
5866 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5867 #else
5868 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5869 #endif
5870
5871 mutex_init(&ap->scsi_scan_mutex);
5872 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5873 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5874 INIT_LIST_HEAD(&ap->eh_done_q);
5875 init_waitqueue_head(&ap->eh_wait_q);
5876 init_completion(&ap->park_req_pending);
5877 init_timer_deferrable(&ap->fastdrain_timer);
5878 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5879 ap->fastdrain_timer.data = (unsigned long)ap;
5880
5881 ap->cbl = ATA_CBL_NONE;
5882
5883 ata_link_init(ap, &ap->link, 0);
5884
5885 #ifdef ATA_IRQ_TRAP
5886 ap->stats.unhandled_irq = 1;
5887 ap->stats.idle_irq = 1;
5888 #endif
5889 ata_sff_port_init(ap);
5890
5891 return ap;
5892 }
5893
ata_host_release(struct device * gendev,void * res)5894 static void ata_host_release(struct device *gendev, void *res)
5895 {
5896 struct ata_host *host = dev_get_drvdata(gendev);
5897 int i;
5898
5899 for (i = 0; i < host->n_ports; i++) {
5900 struct ata_port *ap = host->ports[i];
5901
5902 if (!ap)
5903 continue;
5904
5905 if (ap->scsi_host)
5906 scsi_host_put(ap->scsi_host);
5907
5908 kfree(ap->pmp_link);
5909 kfree(ap->slave_link);
5910 kfree(ap);
5911 host->ports[i] = NULL;
5912 }
5913
5914 dev_set_drvdata(gendev, NULL);
5915 }
5916
5917 /**
5918 * ata_host_alloc - allocate and init basic ATA host resources
5919 * @dev: generic device this host is associated with
5920 * @max_ports: maximum number of ATA ports associated with this host
5921 *
5922 * Allocate and initialize basic ATA host resources. LLD calls
5923 * this function to allocate a host, initializes it fully and
5924 * attaches it using ata_host_register().
5925 *
5926 * @max_ports ports are allocated and host->n_ports is
5927 * initialized to @max_ports. The caller is allowed to decrease
5928 * host->n_ports before calling ata_host_register(). The unused
5929 * ports will be automatically freed on registration.
5930 *
5931 * RETURNS:
5932 * Allocate ATA host on success, NULL on failure.
5933 *
5934 * LOCKING:
5935 * Inherited from calling layer (may sleep).
5936 */
ata_host_alloc(struct device * dev,int max_ports)5937 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5938 {
5939 struct ata_host *host;
5940 size_t sz;
5941 int i;
5942
5943 DPRINTK("ENTER\n");
5944
5945 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5946 return NULL;
5947
5948 /* alloc a container for our list of ATA ports (buses) */
5949 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5950 /* alloc a container for our list of ATA ports (buses) */
5951 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5952 if (!host)
5953 goto err_out;
5954
5955 devres_add(dev, host);
5956 dev_set_drvdata(dev, host);
5957
5958 spin_lock_init(&host->lock);
5959 mutex_init(&host->eh_mutex);
5960 host->dev = dev;
5961 host->n_ports = max_ports;
5962
5963 /* allocate ports bound to this host */
5964 for (i = 0; i < max_ports; i++) {
5965 struct ata_port *ap;
5966
5967 ap = ata_port_alloc(host);
5968 if (!ap)
5969 goto err_out;
5970
5971 ap->port_no = i;
5972 host->ports[i] = ap;
5973 }
5974
5975 devres_remove_group(dev, NULL);
5976 return host;
5977
5978 err_out:
5979 devres_release_group(dev, NULL);
5980 return NULL;
5981 }
5982
5983 /**
5984 * ata_host_alloc_pinfo - alloc host and init with port_info array
5985 * @dev: generic device this host is associated with
5986 * @ppi: array of ATA port_info to initialize host with
5987 * @n_ports: number of ATA ports attached to this host
5988 *
5989 * Allocate ATA host and initialize with info from @ppi. If NULL
5990 * terminated, @ppi may contain fewer entries than @n_ports. The
5991 * last entry will be used for the remaining ports.
5992 *
5993 * RETURNS:
5994 * Allocate ATA host on success, NULL on failure.
5995 *
5996 * LOCKING:
5997 * Inherited from calling layer (may sleep).
5998 */
ata_host_alloc_pinfo(struct device * dev,const struct ata_port_info * const * ppi,int n_ports)5999 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6000 const struct ata_port_info * const * ppi,
6001 int n_ports)
6002 {
6003 const struct ata_port_info *pi;
6004 struct ata_host *host;
6005 int i, j;
6006
6007 host = ata_host_alloc(dev, n_ports);
6008 if (!host)
6009 return NULL;
6010
6011 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6012 struct ata_port *ap = host->ports[i];
6013
6014 if (ppi[j])
6015 pi = ppi[j++];
6016
6017 ap->pio_mask = pi->pio_mask;
6018 ap->mwdma_mask = pi->mwdma_mask;
6019 ap->udma_mask = pi->udma_mask;
6020 ap->flags |= pi->flags;
6021 ap->link.flags |= pi->link_flags;
6022 ap->ops = pi->port_ops;
6023
6024 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6025 host->ops = pi->port_ops;
6026 }
6027
6028 return host;
6029 }
6030
6031 /**
6032 * ata_slave_link_init - initialize slave link
6033 * @ap: port to initialize slave link for
6034 *
6035 * Create and initialize slave link for @ap. This enables slave
6036 * link handling on the port.
6037 *
6038 * In libata, a port contains links and a link contains devices.
6039 * There is single host link but if a PMP is attached to it,
6040 * there can be multiple fan-out links. On SATA, there's usually
6041 * a single device connected to a link but PATA and SATA
6042 * controllers emulating TF based interface can have two - master
6043 * and slave.
6044 *
6045 * However, there are a few controllers which don't fit into this
6046 * abstraction too well - SATA controllers which emulate TF
6047 * interface with both master and slave devices but also have
6048 * separate SCR register sets for each device. These controllers
6049 * need separate links for physical link handling
6050 * (e.g. onlineness, link speed) but should be treated like a
6051 * traditional M/S controller for everything else (e.g. command
6052 * issue, softreset).
6053 *
6054 * slave_link is libata's way of handling this class of
6055 * controllers without impacting core layer too much. For
6056 * anything other than physical link handling, the default host
6057 * link is used for both master and slave. For physical link
6058 * handling, separate @ap->slave_link is used. All dirty details
6059 * are implemented inside libata core layer. From LLD's POV, the
6060 * only difference is that prereset, hardreset and postreset are
6061 * called once more for the slave link, so the reset sequence
6062 * looks like the following.
6063 *
6064 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
6065 * softreset(M) -> postreset(M) -> postreset(S)
6066 *
6067 * Note that softreset is called only for the master. Softreset
6068 * resets both M/S by definition, so SRST on master should handle
6069 * both (the standard method will work just fine).
6070 *
6071 * LOCKING:
6072 * Should be called before host is registered.
6073 *
6074 * RETURNS:
6075 * 0 on success, -errno on failure.
6076 */
ata_slave_link_init(struct ata_port * ap)6077 int ata_slave_link_init(struct ata_port *ap)
6078 {
6079 struct ata_link *link;
6080
6081 WARN_ON(ap->slave_link);
6082 WARN_ON(ap->flags & ATA_FLAG_PMP);
6083
6084 link = kzalloc(sizeof(*link), GFP_KERNEL);
6085 if (!link)
6086 return -ENOMEM;
6087
6088 ata_link_init(ap, link, 1);
6089 ap->slave_link = link;
6090 return 0;
6091 }
6092
ata_host_stop(struct device * gendev,void * res)6093 static void ata_host_stop(struct device *gendev, void *res)
6094 {
6095 struct ata_host *host = dev_get_drvdata(gendev);
6096 int i;
6097
6098 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6099
6100 for (i = 0; i < host->n_ports; i++) {
6101 struct ata_port *ap = host->ports[i];
6102
6103 if (ap->ops->port_stop)
6104 ap->ops->port_stop(ap);
6105 }
6106
6107 if (host->ops->host_stop)
6108 host->ops->host_stop(host);
6109 }
6110
6111 /**
6112 * ata_finalize_port_ops - finalize ata_port_operations
6113 * @ops: ata_port_operations to finalize
6114 *
6115 * An ata_port_operations can inherit from another ops and that
6116 * ops can again inherit from another. This can go on as many
6117 * times as necessary as long as there is no loop in the
6118 * inheritance chain.
6119 *
6120 * Ops tables are finalized when the host is started. NULL or
6121 * unspecified entries are inherited from the closet ancestor
6122 * which has the method and the entry is populated with it.
6123 * After finalization, the ops table directly points to all the
6124 * methods and ->inherits is no longer necessary and cleared.
6125 *
6126 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
6127 *
6128 * LOCKING:
6129 * None.
6130 */
ata_finalize_port_ops(struct ata_port_operations * ops)6131 static void ata_finalize_port_ops(struct ata_port_operations *ops)
6132 {
6133 static DEFINE_SPINLOCK(lock);
6134 const struct ata_port_operations *cur;
6135 void **begin = (void **)ops;
6136 void **end = (void **)&ops->inherits;
6137 void **pp;
6138
6139 if (!ops || !ops->inherits)
6140 return;
6141
6142 spin_lock(&lock);
6143
6144 for (cur = ops->inherits; cur; cur = cur->inherits) {
6145 void **inherit = (void **)cur;
6146
6147 for (pp = begin; pp < end; pp++, inherit++)
6148 if (!*pp)
6149 *pp = *inherit;
6150 }
6151
6152 for (pp = begin; pp < end; pp++)
6153 if (IS_ERR(*pp))
6154 *pp = NULL;
6155
6156 ops->inherits = NULL;
6157
6158 spin_unlock(&lock);
6159 }
6160
6161 /**
6162 * ata_host_start - start and freeze ports of an ATA host
6163 * @host: ATA host to start ports for
6164 *
6165 * Start and then freeze ports of @host. Started status is
6166 * recorded in host->flags, so this function can be called
6167 * multiple times. Ports are guaranteed to get started only
6168 * once. If host->ops isn't initialized yet, its set to the
6169 * first non-dummy port ops.
6170 *
6171 * LOCKING:
6172 * Inherited from calling layer (may sleep).
6173 *
6174 * RETURNS:
6175 * 0 if all ports are started successfully, -errno otherwise.
6176 */
ata_host_start(struct ata_host * host)6177 int ata_host_start(struct ata_host *host)
6178 {
6179 int have_stop = 0;
6180 void *start_dr = NULL;
6181 int i, rc;
6182
6183 if (host->flags & ATA_HOST_STARTED)
6184 return 0;
6185
6186 ata_finalize_port_ops(host->ops);
6187
6188 for (i = 0; i < host->n_ports; i++) {
6189 struct ata_port *ap = host->ports[i];
6190
6191 ata_finalize_port_ops(ap->ops);
6192
6193 if (!host->ops && !ata_port_is_dummy(ap))
6194 host->ops = ap->ops;
6195
6196 if (ap->ops->port_stop)
6197 have_stop = 1;
6198 }
6199
6200 if (host->ops->host_stop)
6201 have_stop = 1;
6202
6203 if (have_stop) {
6204 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6205 if (!start_dr)
6206 return -ENOMEM;
6207 }
6208
6209 for (i = 0; i < host->n_ports; i++) {
6210 struct ata_port *ap = host->ports[i];
6211
6212 if (ap->ops->port_start) {
6213 rc = ap->ops->port_start(ap);
6214 if (rc) {
6215 if (rc != -ENODEV)
6216 dev_err(host->dev,
6217 "failed to start port %d (errno=%d)\n",
6218 i, rc);
6219 goto err_out;
6220 }
6221 }
6222 ata_eh_freeze_port(ap);
6223 }
6224
6225 if (start_dr)
6226 devres_add(host->dev, start_dr);
6227 host->flags |= ATA_HOST_STARTED;
6228 return 0;
6229
6230 err_out:
6231 while (--i >= 0) {
6232 struct ata_port *ap = host->ports[i];
6233
6234 if (ap->ops->port_stop)
6235 ap->ops->port_stop(ap);
6236 }
6237 devres_free(start_dr);
6238 return rc;
6239 }
6240
6241 /**
6242 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6243 * @host: host to initialize
6244 * @dev: device host is attached to
6245 * @ops: port_ops
6246 *
6247 */
ata_host_init(struct ata_host * host,struct device * dev,struct ata_port_operations * ops)6248 void ata_host_init(struct ata_host *host, struct device *dev,
6249 struct ata_port_operations *ops)
6250 {
6251 spin_lock_init(&host->lock);
6252 mutex_init(&host->eh_mutex);
6253 host->n_tags = ATA_MAX_QUEUE - 1;
6254 host->dev = dev;
6255 host->ops = ops;
6256 }
6257
__ata_port_probe(struct ata_port * ap)6258 void __ata_port_probe(struct ata_port *ap)
6259 {
6260 struct ata_eh_info *ehi = &ap->link.eh_info;
6261 unsigned long flags;
6262
6263 /* kick EH for boot probing */
6264 spin_lock_irqsave(ap->lock, flags);
6265
6266 ehi->probe_mask |= ATA_ALL_DEVICES;
6267 ehi->action |= ATA_EH_RESET;
6268 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6269
6270 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6271 ap->pflags |= ATA_PFLAG_LOADING;
6272 ata_port_schedule_eh(ap);
6273
6274 spin_unlock_irqrestore(ap->lock, flags);
6275 }
6276
ata_port_probe(struct ata_port * ap)6277 int ata_port_probe(struct ata_port *ap)
6278 {
6279 int rc = 0;
6280
6281 if (ap->ops->error_handler) {
6282 __ata_port_probe(ap);
6283 ata_port_wait_eh(ap);
6284 } else {
6285 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6286 rc = ata_bus_probe(ap);
6287 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6288 }
6289 return rc;
6290 }
6291
6292
async_port_probe(void * data,async_cookie_t cookie)6293 static void async_port_probe(void *data, async_cookie_t cookie)
6294 {
6295 struct ata_port *ap = data;
6296
6297 /*
6298 * If we're not allowed to scan this host in parallel,
6299 * we need to wait until all previous scans have completed
6300 * before going further.
6301 * Jeff Garzik says this is only within a controller, so we
6302 * don't need to wait for port 0, only for later ports.
6303 */
6304 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6305 async_synchronize_cookie(cookie);
6306
6307 (void)ata_port_probe(ap);
6308
6309 /* in order to keep device order, we need to synchronize at this point */
6310 async_synchronize_cookie(cookie);
6311
6312 ata_scsi_scan_host(ap, 1);
6313 }
6314
6315 /**
6316 * ata_host_register - register initialized ATA host
6317 * @host: ATA host to register
6318 * @sht: template for SCSI host
6319 *
6320 * Register initialized ATA host. @host is allocated using
6321 * ata_host_alloc() and fully initialized by LLD. This function
6322 * starts ports, registers @host with ATA and SCSI layers and
6323 * probe registered devices.
6324 *
6325 * LOCKING:
6326 * Inherited from calling layer (may sleep).
6327 *
6328 * RETURNS:
6329 * 0 on success, -errno otherwise.
6330 */
ata_host_register(struct ata_host * host,struct scsi_host_template * sht)6331 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6332 {
6333 int i, rc;
6334
6335 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6336
6337 /* host must have been started */
6338 if (!(host->flags & ATA_HOST_STARTED)) {
6339 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6340 WARN_ON(1);
6341 return -EINVAL;
6342 }
6343
6344 /* Blow away unused ports. This happens when LLD can't
6345 * determine the exact number of ports to allocate at
6346 * allocation time.
6347 */
6348 for (i = host->n_ports; host->ports[i]; i++)
6349 kfree(host->ports[i]);
6350
6351 /* give ports names and add SCSI hosts */
6352 for (i = 0; i < host->n_ports; i++) {
6353 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6354 host->ports[i]->local_port_no = i + 1;
6355 }
6356
6357 /* Create associated sysfs transport objects */
6358 for (i = 0; i < host->n_ports; i++) {
6359 rc = ata_tport_add(host->dev,host->ports[i]);
6360 if (rc) {
6361 goto err_tadd;
6362 }
6363 }
6364
6365 rc = ata_scsi_add_hosts(host, sht);
6366 if (rc)
6367 goto err_tadd;
6368
6369 /* set cable, sata_spd_limit and report */
6370 for (i = 0; i < host->n_ports; i++) {
6371 struct ata_port *ap = host->ports[i];
6372 unsigned long xfer_mask;
6373
6374 /* set SATA cable type if still unset */
6375 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6376 ap->cbl = ATA_CBL_SATA;
6377
6378 /* init sata_spd_limit to the current value */
6379 sata_link_init_spd(&ap->link);
6380 if (ap->slave_link)
6381 sata_link_init_spd(ap->slave_link);
6382
6383 /* print per-port info to dmesg */
6384 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6385 ap->udma_mask);
6386
6387 if (!ata_port_is_dummy(ap)) {
6388 ata_port_info(ap, "%cATA max %s %s\n",
6389 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6390 ata_mode_string(xfer_mask),
6391 ap->link.eh_info.desc);
6392 ata_ehi_clear_desc(&ap->link.eh_info);
6393 } else
6394 ata_port_info(ap, "DUMMY\n");
6395 }
6396
6397 /* perform each probe asynchronously */
6398 for (i = 0; i < host->n_ports; i++) {
6399 struct ata_port *ap = host->ports[i];
6400 async_schedule(async_port_probe, ap);
6401 }
6402
6403 return 0;
6404
6405 err_tadd:
6406 while (--i >= 0) {
6407 ata_tport_delete(host->ports[i]);
6408 }
6409 return rc;
6410
6411 }
6412
6413 /**
6414 * ata_host_activate - start host, request IRQ and register it
6415 * @host: target ATA host
6416 * @irq: IRQ to request
6417 * @irq_handler: irq_handler used when requesting IRQ
6418 * @irq_flags: irq_flags used when requesting IRQ
6419 * @sht: scsi_host_template to use when registering the host
6420 *
6421 * After allocating an ATA host and initializing it, most libata
6422 * LLDs perform three steps to activate the host - start host,
6423 * request IRQ and register it. This helper takes necessary
6424 * arguments and performs the three steps in one go.
6425 *
6426 * An invalid IRQ skips the IRQ registration and expects the host to
6427 * have set polling mode on the port. In this case, @irq_handler
6428 * should be NULL.
6429 *
6430 * LOCKING:
6431 * Inherited from calling layer (may sleep).
6432 *
6433 * RETURNS:
6434 * 0 on success, -errno otherwise.
6435 */
ata_host_activate(struct ata_host * host,int irq,irq_handler_t irq_handler,unsigned long irq_flags,struct scsi_host_template * sht)6436 int ata_host_activate(struct ata_host *host, int irq,
6437 irq_handler_t irq_handler, unsigned long irq_flags,
6438 struct scsi_host_template *sht)
6439 {
6440 int i, rc;
6441 char *irq_desc;
6442
6443 rc = ata_host_start(host);
6444 if (rc)
6445 return rc;
6446
6447 /* Special case for polling mode */
6448 if (!irq) {
6449 WARN_ON(irq_handler);
6450 return ata_host_register(host, sht);
6451 }
6452
6453 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6454 dev_driver_string(host->dev),
6455 dev_name(host->dev));
6456 if (!irq_desc)
6457 return -ENOMEM;
6458
6459 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6460 irq_desc, host);
6461 if (rc)
6462 return rc;
6463
6464 for (i = 0; i < host->n_ports; i++)
6465 ata_port_desc(host->ports[i], "irq %d", irq);
6466
6467 rc = ata_host_register(host, sht);
6468 /* if failed, just free the IRQ and leave ports alone */
6469 if (rc)
6470 devm_free_irq(host->dev, irq, host);
6471
6472 return rc;
6473 }
6474
6475 /**
6476 * ata_port_detach - Detach ATA port in preparation of device removal
6477 * @ap: ATA port to be detached
6478 *
6479 * Detach all ATA devices and the associated SCSI devices of @ap;
6480 * then, remove the associated SCSI host. @ap is guaranteed to
6481 * be quiescent on return from this function.
6482 *
6483 * LOCKING:
6484 * Kernel thread context (may sleep).
6485 */
ata_port_detach(struct ata_port * ap)6486 static void ata_port_detach(struct ata_port *ap)
6487 {
6488 unsigned long flags;
6489 struct ata_link *link;
6490 struct ata_device *dev;
6491
6492 if (!ap->ops->error_handler)
6493 goto skip_eh;
6494
6495 /* tell EH we're leaving & flush EH */
6496 spin_lock_irqsave(ap->lock, flags);
6497 ap->pflags |= ATA_PFLAG_UNLOADING;
6498 ata_port_schedule_eh(ap);
6499 spin_unlock_irqrestore(ap->lock, flags);
6500
6501 /* wait till EH commits suicide */
6502 ata_port_wait_eh(ap);
6503
6504 /* it better be dead now */
6505 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6506
6507 cancel_delayed_work_sync(&ap->hotplug_task);
6508
6509 skip_eh:
6510 /* clean up zpodd on port removal */
6511 ata_for_each_link(link, ap, HOST_FIRST) {
6512 ata_for_each_dev(dev, link, ALL) {
6513 if (zpodd_dev_enabled(dev))
6514 zpodd_exit(dev);
6515 }
6516 }
6517 if (ap->pmp_link) {
6518 int i;
6519 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6520 ata_tlink_delete(&ap->pmp_link[i]);
6521 }
6522 /* remove the associated SCSI host */
6523 scsi_remove_host(ap->scsi_host);
6524 ata_tport_delete(ap);
6525 }
6526
6527 /**
6528 * ata_host_detach - Detach all ports of an ATA host
6529 * @host: Host to detach
6530 *
6531 * Detach all ports of @host.
6532 *
6533 * LOCKING:
6534 * Kernel thread context (may sleep).
6535 */
ata_host_detach(struct ata_host * host)6536 void ata_host_detach(struct ata_host *host)
6537 {
6538 int i;
6539
6540 for (i = 0; i < host->n_ports; i++)
6541 ata_port_detach(host->ports[i]);
6542
6543 /* the host is dead now, dissociate ACPI */
6544 ata_acpi_dissociate(host);
6545 }
6546
6547 #ifdef CONFIG_PCI
6548
6549 /**
6550 * ata_pci_remove_one - PCI layer callback for device removal
6551 * @pdev: PCI device that was removed
6552 *
6553 * PCI layer indicates to libata via this hook that hot-unplug or
6554 * module unload event has occurred. Detach all ports. Resource
6555 * release is handled via devres.
6556 *
6557 * LOCKING:
6558 * Inherited from PCI layer (may sleep).
6559 */
ata_pci_remove_one(struct pci_dev * pdev)6560 void ata_pci_remove_one(struct pci_dev *pdev)
6561 {
6562 struct ata_host *host = pci_get_drvdata(pdev);
6563
6564 ata_host_detach(host);
6565 }
6566
6567 /* move to PCI subsystem */
pci_test_config_bits(struct pci_dev * pdev,const struct pci_bits * bits)6568 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6569 {
6570 unsigned long tmp = 0;
6571
6572 switch (bits->width) {
6573 case 1: {
6574 u8 tmp8 = 0;
6575 pci_read_config_byte(pdev, bits->reg, &tmp8);
6576 tmp = tmp8;
6577 break;
6578 }
6579 case 2: {
6580 u16 tmp16 = 0;
6581 pci_read_config_word(pdev, bits->reg, &tmp16);
6582 tmp = tmp16;
6583 break;
6584 }
6585 case 4: {
6586 u32 tmp32 = 0;
6587 pci_read_config_dword(pdev, bits->reg, &tmp32);
6588 tmp = tmp32;
6589 break;
6590 }
6591
6592 default:
6593 return -EINVAL;
6594 }
6595
6596 tmp &= bits->mask;
6597
6598 return (tmp == bits->val) ? 1 : 0;
6599 }
6600
6601 #ifdef CONFIG_PM
ata_pci_device_do_suspend(struct pci_dev * pdev,pm_message_t mesg)6602 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6603 {
6604 pci_save_state(pdev);
6605 pci_disable_device(pdev);
6606
6607 if (mesg.event & PM_EVENT_SLEEP)
6608 pci_set_power_state(pdev, PCI_D3hot);
6609 }
6610
ata_pci_device_do_resume(struct pci_dev * pdev)6611 int ata_pci_device_do_resume(struct pci_dev *pdev)
6612 {
6613 int rc;
6614
6615 pci_set_power_state(pdev, PCI_D0);
6616 pci_restore_state(pdev);
6617
6618 rc = pcim_enable_device(pdev);
6619 if (rc) {
6620 dev_err(&pdev->dev,
6621 "failed to enable device after resume (%d)\n", rc);
6622 return rc;
6623 }
6624
6625 pci_set_master(pdev);
6626 return 0;
6627 }
6628
ata_pci_device_suspend(struct pci_dev * pdev,pm_message_t mesg)6629 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6630 {
6631 struct ata_host *host = pci_get_drvdata(pdev);
6632 int rc = 0;
6633
6634 rc = ata_host_suspend(host, mesg);
6635 if (rc)
6636 return rc;
6637
6638 ata_pci_device_do_suspend(pdev, mesg);
6639
6640 return 0;
6641 }
6642
ata_pci_device_resume(struct pci_dev * pdev)6643 int ata_pci_device_resume(struct pci_dev *pdev)
6644 {
6645 struct ata_host *host = pci_get_drvdata(pdev);
6646 int rc;
6647
6648 rc = ata_pci_device_do_resume(pdev);
6649 if (rc == 0)
6650 ata_host_resume(host);
6651 return rc;
6652 }
6653 #endif /* CONFIG_PM */
6654
6655 #endif /* CONFIG_PCI */
6656
6657 /**
6658 * ata_platform_remove_one - Platform layer callback for device removal
6659 * @pdev: Platform device that was removed
6660 *
6661 * Platform layer indicates to libata via this hook that hot-unplug or
6662 * module unload event has occurred. Detach all ports. Resource
6663 * release is handled via devres.
6664 *
6665 * LOCKING:
6666 * Inherited from platform layer (may sleep).
6667 */
ata_platform_remove_one(struct platform_device * pdev)6668 int ata_platform_remove_one(struct platform_device *pdev)
6669 {
6670 struct ata_host *host = platform_get_drvdata(pdev);
6671
6672 ata_host_detach(host);
6673
6674 return 0;
6675 }
6676
ata_parse_force_one(char ** cur,struct ata_force_ent * force_ent,const char ** reason)6677 static int __init ata_parse_force_one(char **cur,
6678 struct ata_force_ent *force_ent,
6679 const char **reason)
6680 {
6681 static const struct ata_force_param force_tbl[] __initconst = {
6682 { "40c", .cbl = ATA_CBL_PATA40 },
6683 { "80c", .cbl = ATA_CBL_PATA80 },
6684 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6685 { "unk", .cbl = ATA_CBL_PATA_UNK },
6686 { "ign", .cbl = ATA_CBL_PATA_IGN },
6687 { "sata", .cbl = ATA_CBL_SATA },
6688 { "1.5Gbps", .spd_limit = 1 },
6689 { "3.0Gbps", .spd_limit = 2 },
6690 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6691 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6692 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
6693 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
6694 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6695 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6696 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6697 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6698 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6699 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6700 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6701 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6702 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6703 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6704 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6705 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6706 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6707 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6708 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6709 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6710 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6711 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6712 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6713 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6714 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6715 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6716 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6717 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6718 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6719 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6720 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6721 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6722 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6723 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6724 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6725 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6726 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6727 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6728 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6729 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6730 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6731 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6732 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6733 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6734 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
6735 };
6736 char *start = *cur, *p = *cur;
6737 char *id, *val, *endp;
6738 const struct ata_force_param *match_fp = NULL;
6739 int nr_matches = 0, i;
6740
6741 /* find where this param ends and update *cur */
6742 while (*p != '\0' && *p != ',')
6743 p++;
6744
6745 if (*p == '\0')
6746 *cur = p;
6747 else
6748 *cur = p + 1;
6749
6750 *p = '\0';
6751
6752 /* parse */
6753 p = strchr(start, ':');
6754 if (!p) {
6755 val = strstrip(start);
6756 goto parse_val;
6757 }
6758 *p = '\0';
6759
6760 id = strstrip(start);
6761 val = strstrip(p + 1);
6762
6763 /* parse id */
6764 p = strchr(id, '.');
6765 if (p) {
6766 *p++ = '\0';
6767 force_ent->device = simple_strtoul(p, &endp, 10);
6768 if (p == endp || *endp != '\0') {
6769 *reason = "invalid device";
6770 return -EINVAL;
6771 }
6772 }
6773
6774 force_ent->port = simple_strtoul(id, &endp, 10);
6775 if (p == endp || *endp != '\0') {
6776 *reason = "invalid port/link";
6777 return -EINVAL;
6778 }
6779
6780 parse_val:
6781 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6782 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6783 const struct ata_force_param *fp = &force_tbl[i];
6784
6785 if (strncasecmp(val, fp->name, strlen(val)))
6786 continue;
6787
6788 nr_matches++;
6789 match_fp = fp;
6790
6791 if (strcasecmp(val, fp->name) == 0) {
6792 nr_matches = 1;
6793 break;
6794 }
6795 }
6796
6797 if (!nr_matches) {
6798 *reason = "unknown value";
6799 return -EINVAL;
6800 }
6801 if (nr_matches > 1) {
6802 *reason = "ambigious value";
6803 return -EINVAL;
6804 }
6805
6806 force_ent->param = *match_fp;
6807
6808 return 0;
6809 }
6810
ata_parse_force_param(void)6811 static void __init ata_parse_force_param(void)
6812 {
6813 int idx = 0, size = 1;
6814 int last_port = -1, last_device = -1;
6815 char *p, *cur, *next;
6816
6817 /* calculate maximum number of params and allocate force_tbl */
6818 for (p = ata_force_param_buf; *p; p++)
6819 if (*p == ',')
6820 size++;
6821
6822 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6823 if (!ata_force_tbl) {
6824 printk(KERN_WARNING "ata: failed to extend force table, "
6825 "libata.force ignored\n");
6826 return;
6827 }
6828
6829 /* parse and populate the table */
6830 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6831 const char *reason = "";
6832 struct ata_force_ent te = { .port = -1, .device = -1 };
6833
6834 next = cur;
6835 if (ata_parse_force_one(&next, &te, &reason)) {
6836 printk(KERN_WARNING "ata: failed to parse force "
6837 "parameter \"%s\" (%s)\n",
6838 cur, reason);
6839 continue;
6840 }
6841
6842 if (te.port == -1) {
6843 te.port = last_port;
6844 te.device = last_device;
6845 }
6846
6847 ata_force_tbl[idx++] = te;
6848
6849 last_port = te.port;
6850 last_device = te.device;
6851 }
6852
6853 ata_force_tbl_size = idx;
6854 }
6855
ata_init(void)6856 static int __init ata_init(void)
6857 {
6858 int rc;
6859
6860 ata_parse_force_param();
6861
6862 rc = ata_sff_init();
6863 if (rc) {
6864 kfree(ata_force_tbl);
6865 return rc;
6866 }
6867
6868 libata_transport_init();
6869 ata_scsi_transport_template = ata_attach_transport();
6870 if (!ata_scsi_transport_template) {
6871 ata_sff_exit();
6872 rc = -ENOMEM;
6873 goto err_out;
6874 }
6875
6876 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6877 return 0;
6878
6879 err_out:
6880 return rc;
6881 }
6882
ata_exit(void)6883 static void __exit ata_exit(void)
6884 {
6885 ata_release_transport(ata_scsi_transport_template);
6886 libata_transport_exit();
6887 ata_sff_exit();
6888 kfree(ata_force_tbl);
6889 }
6890
6891 subsys_initcall(ata_init);
6892 module_exit(ata_exit);
6893
6894 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6895
ata_ratelimit(void)6896 int ata_ratelimit(void)
6897 {
6898 return __ratelimit(&ratelimit);
6899 }
6900
6901 /**
6902 * ata_msleep - ATA EH owner aware msleep
6903 * @ap: ATA port to attribute the sleep to
6904 * @msecs: duration to sleep in milliseconds
6905 *
6906 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6907 * ownership is released before going to sleep and reacquired
6908 * after the sleep is complete. IOW, other ports sharing the
6909 * @ap->host will be allowed to own the EH while this task is
6910 * sleeping.
6911 *
6912 * LOCKING:
6913 * Might sleep.
6914 */
ata_msleep(struct ata_port * ap,unsigned int msecs)6915 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6916 {
6917 bool owns_eh = ap && ap->host->eh_owner == current;
6918
6919 if (owns_eh)
6920 ata_eh_release(ap);
6921
6922 if (msecs < 20) {
6923 unsigned long usecs = msecs * USEC_PER_MSEC;
6924 usleep_range(usecs, usecs + 50);
6925 } else {
6926 msleep(msecs);
6927 }
6928
6929 if (owns_eh)
6930 ata_eh_acquire(ap);
6931 }
6932
6933 /**
6934 * ata_wait_register - wait until register value changes
6935 * @ap: ATA port to wait register for, can be NULL
6936 * @reg: IO-mapped register
6937 * @mask: Mask to apply to read register value
6938 * @val: Wait condition
6939 * @interval: polling interval in milliseconds
6940 * @timeout: timeout in milliseconds
6941 *
6942 * Waiting for some bits of register to change is a common
6943 * operation for ATA controllers. This function reads 32bit LE
6944 * IO-mapped register @reg and tests for the following condition.
6945 *
6946 * (*@reg & mask) != val
6947 *
6948 * If the condition is met, it returns; otherwise, the process is
6949 * repeated after @interval_msec until timeout.
6950 *
6951 * LOCKING:
6952 * Kernel thread context (may sleep)
6953 *
6954 * RETURNS:
6955 * The final register value.
6956 */
ata_wait_register(struct ata_port * ap,void __iomem * reg,u32 mask,u32 val,unsigned long interval,unsigned long timeout)6957 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6958 unsigned long interval, unsigned long timeout)
6959 {
6960 unsigned long deadline;
6961 u32 tmp;
6962
6963 tmp = ioread32(reg);
6964
6965 /* Calculate timeout _after_ the first read to make sure
6966 * preceding writes reach the controller before starting to
6967 * eat away the timeout.
6968 */
6969 deadline = ata_deadline(jiffies, timeout);
6970
6971 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6972 ata_msleep(ap, interval);
6973 tmp = ioread32(reg);
6974 }
6975
6976 return tmp;
6977 }
6978
6979 /**
6980 * sata_lpm_ignore_phy_events - test if PHY event should be ignored
6981 * @link: Link receiving the event
6982 *
6983 * Test whether the received PHY event has to be ignored or not.
6984 *
6985 * LOCKING:
6986 * None:
6987 *
6988 * RETURNS:
6989 * True if the event has to be ignored.
6990 */
sata_lpm_ignore_phy_events(struct ata_link * link)6991 bool sata_lpm_ignore_phy_events(struct ata_link *link)
6992 {
6993 unsigned long lpm_timeout = link->last_lpm_change +
6994 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
6995
6996 /* if LPM is enabled, PHYRDY doesn't mean anything */
6997 if (link->lpm_policy > ATA_LPM_MAX_POWER)
6998 return true;
6999
7000 /* ignore the first PHY event after the LPM policy changed
7001 * as it is might be spurious
7002 */
7003 if ((link->flags & ATA_LFLAG_CHANGED) &&
7004 time_before(jiffies, lpm_timeout))
7005 return true;
7006
7007 return false;
7008 }
7009 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
7010
7011 /*
7012 * Dummy port_ops
7013 */
ata_dummy_qc_issue(struct ata_queued_cmd * qc)7014 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7015 {
7016 return AC_ERR_SYSTEM;
7017 }
7018
ata_dummy_error_handler(struct ata_port * ap)7019 static void ata_dummy_error_handler(struct ata_port *ap)
7020 {
7021 /* truly dummy */
7022 }
7023
7024 struct ata_port_operations ata_dummy_port_ops = {
7025 .qc_prep = ata_noop_qc_prep,
7026 .qc_issue = ata_dummy_qc_issue,
7027 .error_handler = ata_dummy_error_handler,
7028 .sched_eh = ata_std_sched_eh,
7029 .end_eh = ata_std_end_eh,
7030 };
7031
7032 const struct ata_port_info ata_dummy_port_info = {
7033 .port_ops = &ata_dummy_port_ops,
7034 };
7035
7036 /*
7037 * Utility print functions
7038 */
ata_port_printk(const struct ata_port * ap,const char * level,const char * fmt,...)7039 void ata_port_printk(const struct ata_port *ap, const char *level,
7040 const char *fmt, ...)
7041 {
7042 struct va_format vaf;
7043 va_list args;
7044
7045 va_start(args, fmt);
7046
7047 vaf.fmt = fmt;
7048 vaf.va = &args;
7049
7050 printk("%sata%u: %pV", level, ap->print_id, &vaf);
7051
7052 va_end(args);
7053 }
7054 EXPORT_SYMBOL(ata_port_printk);
7055
ata_link_printk(const struct ata_link * link,const char * level,const char * fmt,...)7056 void ata_link_printk(const struct ata_link *link, const char *level,
7057 const char *fmt, ...)
7058 {
7059 struct va_format vaf;
7060 va_list args;
7061
7062 va_start(args, fmt);
7063
7064 vaf.fmt = fmt;
7065 vaf.va = &args;
7066
7067 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
7068 printk("%sata%u.%02u: %pV",
7069 level, link->ap->print_id, link->pmp, &vaf);
7070 else
7071 printk("%sata%u: %pV",
7072 level, link->ap->print_id, &vaf);
7073
7074 va_end(args);
7075 }
7076 EXPORT_SYMBOL(ata_link_printk);
7077
ata_dev_printk(const struct ata_device * dev,const char * level,const char * fmt,...)7078 void ata_dev_printk(const struct ata_device *dev, const char *level,
7079 const char *fmt, ...)
7080 {
7081 struct va_format vaf;
7082 va_list args;
7083
7084 va_start(args, fmt);
7085
7086 vaf.fmt = fmt;
7087 vaf.va = &args;
7088
7089 printk("%sata%u.%02u: %pV",
7090 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
7091 &vaf);
7092
7093 va_end(args);
7094 }
7095 EXPORT_SYMBOL(ata_dev_printk);
7096
ata_print_version(const struct device * dev,const char * version)7097 void ata_print_version(const struct device *dev, const char *version)
7098 {
7099 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
7100 }
7101 EXPORT_SYMBOL(ata_print_version);
7102
7103 /*
7104 * libata is essentially a library of internal helper functions for
7105 * low-level ATA host controller drivers. As such, the API/ABI is
7106 * likely to change as new drivers are added and updated.
7107 * Do not depend on ABI/API stability.
7108 */
7109 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7110 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7111 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7112 EXPORT_SYMBOL_GPL(ata_base_port_ops);
7113 EXPORT_SYMBOL_GPL(sata_port_ops);
7114 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7115 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7116 EXPORT_SYMBOL_GPL(ata_link_next);
7117 EXPORT_SYMBOL_GPL(ata_dev_next);
7118 EXPORT_SYMBOL_GPL(ata_std_bios_param);
7119 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
7120 EXPORT_SYMBOL_GPL(ata_host_init);
7121 EXPORT_SYMBOL_GPL(ata_host_alloc);
7122 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7123 EXPORT_SYMBOL_GPL(ata_slave_link_init);
7124 EXPORT_SYMBOL_GPL(ata_host_start);
7125 EXPORT_SYMBOL_GPL(ata_host_register);
7126 EXPORT_SYMBOL_GPL(ata_host_activate);
7127 EXPORT_SYMBOL_GPL(ata_host_detach);
7128 EXPORT_SYMBOL_GPL(ata_sg_init);
7129 EXPORT_SYMBOL_GPL(ata_qc_complete);
7130 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7131 EXPORT_SYMBOL_GPL(atapi_cmd_type);
7132 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7133 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7134 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7135 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7136 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7137 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7138 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7139 EXPORT_SYMBOL_GPL(ata_mode_string);
7140 EXPORT_SYMBOL_GPL(ata_id_xfermask);
7141 EXPORT_SYMBOL_GPL(ata_do_set_mode);
7142 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7143 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7144 EXPORT_SYMBOL_GPL(ata_dev_disable);
7145 EXPORT_SYMBOL_GPL(sata_set_spd);
7146 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7147 EXPORT_SYMBOL_GPL(sata_link_debounce);
7148 EXPORT_SYMBOL_GPL(sata_link_resume);
7149 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
7150 EXPORT_SYMBOL_GPL(ata_std_prereset);
7151 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7152 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7153 EXPORT_SYMBOL_GPL(ata_std_postreset);
7154 EXPORT_SYMBOL_GPL(ata_dev_classify);
7155 EXPORT_SYMBOL_GPL(ata_dev_pair);
7156 EXPORT_SYMBOL_GPL(ata_ratelimit);
7157 EXPORT_SYMBOL_GPL(ata_msleep);
7158 EXPORT_SYMBOL_GPL(ata_wait_register);
7159 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7160 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7161 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7162 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7163 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
7164 EXPORT_SYMBOL_GPL(sata_scr_valid);
7165 EXPORT_SYMBOL_GPL(sata_scr_read);
7166 EXPORT_SYMBOL_GPL(sata_scr_write);
7167 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7168 EXPORT_SYMBOL_GPL(ata_link_online);
7169 EXPORT_SYMBOL_GPL(ata_link_offline);
7170 #ifdef CONFIG_PM
7171 EXPORT_SYMBOL_GPL(ata_host_suspend);
7172 EXPORT_SYMBOL_GPL(ata_host_resume);
7173 #endif /* CONFIG_PM */
7174 EXPORT_SYMBOL_GPL(ata_id_string);
7175 EXPORT_SYMBOL_GPL(ata_id_c_string);
7176 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
7177 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7178
7179 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7180 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7181 EXPORT_SYMBOL_GPL(ata_timing_compute);
7182 EXPORT_SYMBOL_GPL(ata_timing_merge);
7183 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7184
7185 #ifdef CONFIG_PCI
7186 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7187 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7188 #ifdef CONFIG_PM
7189 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7190 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7191 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7192 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7193 #endif /* CONFIG_PM */
7194 #endif /* CONFIG_PCI */
7195
7196 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7197
7198 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7199 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7200 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7201 EXPORT_SYMBOL_GPL(ata_port_desc);
7202 #ifdef CONFIG_PCI
7203 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7204 #endif /* CONFIG_PCI */
7205 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7206 EXPORT_SYMBOL_GPL(ata_link_abort);
7207 EXPORT_SYMBOL_GPL(ata_port_abort);
7208 EXPORT_SYMBOL_GPL(ata_port_freeze);
7209 EXPORT_SYMBOL_GPL(sata_async_notification);
7210 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7211 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7212 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7213 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7214 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7215 EXPORT_SYMBOL_GPL(ata_do_eh);
7216 EXPORT_SYMBOL_GPL(ata_std_error_handler);
7217
7218 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7219 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7220 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7221 EXPORT_SYMBOL_GPL(ata_cable_ignore);
7222 EXPORT_SYMBOL_GPL(ata_cable_sata);
7223