1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * SATA specific part of ATA helper library
4 *
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
7 * Copyright 2006 Tejun Heo <htejun@gmail.com>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <scsi/scsi_cmnd.h>
13 #include <scsi/scsi_device.h>
14 #include <linux/libata.h>
15
16 #include "libata.h"
17 #include "libata-transport.h"
18
19 /* debounce timing parameters in msecs { interval, duration, timeout } */
20 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
21 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
22 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
23 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
24 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
25 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
26
27 /**
28 * sata_scr_valid - test whether SCRs are accessible
29 * @link: ATA link to test SCR accessibility for
30 *
31 * Test whether SCRs are accessible for @link.
32 *
33 * LOCKING:
34 * None.
35 *
36 * RETURNS:
37 * 1 if SCRs are accessible, 0 otherwise.
38 */
sata_scr_valid(struct ata_link * link)39 int sata_scr_valid(struct ata_link *link)
40 {
41 struct ata_port *ap = link->ap;
42
43 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
44 }
45 EXPORT_SYMBOL_GPL(sata_scr_valid);
46
47 /**
48 * sata_scr_read - read SCR register of the specified port
49 * @link: ATA link to read SCR for
50 * @reg: SCR to read
51 * @val: Place to store read value
52 *
53 * Read SCR register @reg of @link into *@val. This function is
54 * guaranteed to succeed if @link is ap->link, the cable type of
55 * the port is SATA and the port implements ->scr_read.
56 *
57 * LOCKING:
58 * None if @link is ap->link. Kernel thread context otherwise.
59 *
60 * RETURNS:
61 * 0 on success, negative errno on failure.
62 */
sata_scr_read(struct ata_link * link,int reg,u32 * val)63 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
64 {
65 if (ata_is_host_link(link)) {
66 if (sata_scr_valid(link))
67 return link->ap->ops->scr_read(link, reg, val);
68 return -EOPNOTSUPP;
69 }
70
71 return sata_pmp_scr_read(link, reg, val);
72 }
73 EXPORT_SYMBOL_GPL(sata_scr_read);
74
75 /**
76 * sata_scr_write - write SCR register of the specified port
77 * @link: ATA link to write SCR for
78 * @reg: SCR to write
79 * @val: value to write
80 *
81 * Write @val to SCR register @reg of @link. This function is
82 * guaranteed to succeed if @link is ap->link, the cable type of
83 * the port is SATA and the port implements ->scr_read.
84 *
85 * LOCKING:
86 * None if @link is ap->link. Kernel thread context otherwise.
87 *
88 * RETURNS:
89 * 0 on success, negative errno on failure.
90 */
sata_scr_write(struct ata_link * link,int reg,u32 val)91 int sata_scr_write(struct ata_link *link, int reg, u32 val)
92 {
93 if (ata_is_host_link(link)) {
94 if (sata_scr_valid(link))
95 return link->ap->ops->scr_write(link, reg, val);
96 return -EOPNOTSUPP;
97 }
98
99 return sata_pmp_scr_write(link, reg, val);
100 }
101 EXPORT_SYMBOL_GPL(sata_scr_write);
102
103 /**
104 * sata_scr_write_flush - write SCR register of the specified port and flush
105 * @link: ATA link to write SCR for
106 * @reg: SCR to write
107 * @val: value to write
108 *
109 * This function is identical to sata_scr_write() except that this
110 * function performs flush after writing to the register.
111 *
112 * LOCKING:
113 * None if @link is ap->link. Kernel thread context otherwise.
114 *
115 * RETURNS:
116 * 0 on success, negative errno on failure.
117 */
sata_scr_write_flush(struct ata_link * link,int reg,u32 val)118 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
119 {
120 if (ata_is_host_link(link)) {
121 int rc;
122
123 if (sata_scr_valid(link)) {
124 rc = link->ap->ops->scr_write(link, reg, val);
125 if (rc == 0)
126 rc = link->ap->ops->scr_read(link, reg, &val);
127 return rc;
128 }
129 return -EOPNOTSUPP;
130 }
131
132 return sata_pmp_scr_write(link, reg, val);
133 }
134 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
135
136 /**
137 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
138 * @tf: Taskfile to convert
139 * @pmp: Port multiplier port
140 * @is_cmd: This FIS is for command
141 * @fis: Buffer into which data will output
142 *
143 * Converts a standard ATA taskfile to a Serial ATA
144 * FIS structure (Register - Host to Device).
145 *
146 * LOCKING:
147 * Inherited from caller.
148 */
ata_tf_to_fis(const struct ata_taskfile * tf,u8 pmp,int is_cmd,u8 * fis)149 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
150 {
151 fis[0] = 0x27; /* Register - Host to Device FIS */
152 fis[1] = pmp & 0xf; /* Port multiplier number*/
153 if (is_cmd)
154 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
155
156 fis[2] = tf->command;
157 fis[3] = tf->feature;
158
159 fis[4] = tf->lbal;
160 fis[5] = tf->lbam;
161 fis[6] = tf->lbah;
162 fis[7] = tf->device;
163
164 fis[8] = tf->hob_lbal;
165 fis[9] = tf->hob_lbam;
166 fis[10] = tf->hob_lbah;
167 fis[11] = tf->hob_feature;
168
169 fis[12] = tf->nsect;
170 fis[13] = tf->hob_nsect;
171 fis[14] = 0;
172 fis[15] = tf->ctl;
173
174 fis[16] = tf->auxiliary & 0xff;
175 fis[17] = (tf->auxiliary >> 8) & 0xff;
176 fis[18] = (tf->auxiliary >> 16) & 0xff;
177 fis[19] = (tf->auxiliary >> 24) & 0xff;
178 }
179 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
180
181 /**
182 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
183 * @fis: Buffer from which data will be input
184 * @tf: Taskfile to output
185 *
186 * Converts a serial ATA FIS structure to a standard ATA taskfile.
187 *
188 * LOCKING:
189 * Inherited from caller.
190 */
191
ata_tf_from_fis(const u8 * fis,struct ata_taskfile * tf)192 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
193 {
194 tf->status = fis[2];
195 tf->error = fis[3];
196
197 tf->lbal = fis[4];
198 tf->lbam = fis[5];
199 tf->lbah = fis[6];
200 tf->device = fis[7];
201
202 tf->hob_lbal = fis[8];
203 tf->hob_lbam = fis[9];
204 tf->hob_lbah = fis[10];
205
206 tf->nsect = fis[12];
207 tf->hob_nsect = fis[13];
208 }
209 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
210
211 /**
212 * sata_link_debounce - debounce SATA phy status
213 * @link: ATA link to debounce SATA phy status for
214 * @params: timing parameters { interval, duration, timeout } in msec
215 * @deadline: deadline jiffies for the operation
216 *
217 * Make sure SStatus of @link reaches stable state, determined by
218 * holding the same value where DET is not 1 for @duration polled
219 * every @interval, before @timeout. Timeout constraints the
220 * beginning of the stable state. Because DET gets stuck at 1 on
221 * some controllers after hot unplugging, this functions waits
222 * until timeout then returns 0 if DET is stable at 1.
223 *
224 * @timeout is further limited by @deadline. The sooner of the
225 * two is used.
226 *
227 * LOCKING:
228 * Kernel thread context (may sleep)
229 *
230 * RETURNS:
231 * 0 on success, -errno on failure.
232 */
sata_link_debounce(struct ata_link * link,const unsigned long * params,unsigned long deadline)233 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
234 unsigned long deadline)
235 {
236 unsigned long interval = params[0];
237 unsigned long duration = params[1];
238 unsigned long last_jiffies, t;
239 u32 last, cur;
240 int rc;
241
242 t = ata_deadline(jiffies, params[2]);
243 if (time_before(t, deadline))
244 deadline = t;
245
246 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
247 return rc;
248 cur &= 0xf;
249
250 last = cur;
251 last_jiffies = jiffies;
252
253 while (1) {
254 ata_msleep(link->ap, interval);
255 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
256 return rc;
257 cur &= 0xf;
258
259 /* DET stable? */
260 if (cur == last) {
261 if (cur == 1 && time_before(jiffies, deadline))
262 continue;
263 if (time_after(jiffies,
264 ata_deadline(last_jiffies, duration)))
265 return 0;
266 continue;
267 }
268
269 /* unstable, start over */
270 last = cur;
271 last_jiffies = jiffies;
272
273 /* Check deadline. If debouncing failed, return
274 * -EPIPE to tell upper layer to lower link speed.
275 */
276 if (time_after(jiffies, deadline))
277 return -EPIPE;
278 }
279 }
280 EXPORT_SYMBOL_GPL(sata_link_debounce);
281
282 /**
283 * sata_link_resume - resume SATA link
284 * @link: ATA link to resume SATA
285 * @params: timing parameters { interval, duration, timeout } in msec
286 * @deadline: deadline jiffies for the operation
287 *
288 * Resume SATA phy @link and debounce it.
289 *
290 * LOCKING:
291 * Kernel thread context (may sleep)
292 *
293 * RETURNS:
294 * 0 on success, -errno on failure.
295 */
sata_link_resume(struct ata_link * link,const unsigned long * params,unsigned long deadline)296 int sata_link_resume(struct ata_link *link, const unsigned long *params,
297 unsigned long deadline)
298 {
299 int tries = ATA_LINK_RESUME_TRIES;
300 u32 scontrol, serror;
301 int rc;
302
303 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
304 return rc;
305
306 /*
307 * Writes to SControl sometimes get ignored under certain
308 * controllers (ata_piix SIDPR). Make sure DET actually is
309 * cleared.
310 */
311 do {
312 scontrol = (scontrol & 0x0f0) | 0x300;
313 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
314 return rc;
315 /*
316 * Some PHYs react badly if SStatus is pounded
317 * immediately after resuming. Delay 200ms before
318 * debouncing.
319 */
320 if (!(link->flags & ATA_LFLAG_NO_DEBOUNCE_DELAY))
321 ata_msleep(link->ap, 200);
322
323 /* is SControl restored correctly? */
324 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
325 return rc;
326 } while ((scontrol & 0xf0f) != 0x300 && --tries);
327
328 if ((scontrol & 0xf0f) != 0x300) {
329 ata_link_warn(link, "failed to resume link (SControl %X)\n",
330 scontrol);
331 return 0;
332 }
333
334 if (tries < ATA_LINK_RESUME_TRIES)
335 ata_link_warn(link, "link resume succeeded after %d retries\n",
336 ATA_LINK_RESUME_TRIES - tries);
337
338 if ((rc = sata_link_debounce(link, params, deadline)))
339 return rc;
340
341 /* clear SError, some PHYs require this even for SRST to work */
342 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
343 rc = sata_scr_write(link, SCR_ERROR, serror);
344
345 return rc != -EINVAL ? rc : 0;
346 }
347 EXPORT_SYMBOL_GPL(sata_link_resume);
348
349 /**
350 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
351 * @link: ATA link to manipulate SControl for
352 * @policy: LPM policy to configure
353 * @spm_wakeup: initiate LPM transition to active state
354 *
355 * Manipulate the IPM field of the SControl register of @link
356 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
357 * @spm_wakeup is %true, the SPM field is manipulated to wake up
358 * the link. This function also clears PHYRDY_CHG before
359 * returning.
360 *
361 * LOCKING:
362 * EH context.
363 *
364 * RETURNS:
365 * 0 on success, -errno otherwise.
366 */
sata_link_scr_lpm(struct ata_link * link,enum ata_lpm_policy policy,bool spm_wakeup)367 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
368 bool spm_wakeup)
369 {
370 struct ata_eh_context *ehc = &link->eh_context;
371 bool woken_up = false;
372 u32 scontrol;
373 int rc;
374
375 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
376 if (rc)
377 return rc;
378
379 switch (policy) {
380 case ATA_LPM_MAX_POWER:
381 /* disable all LPM transitions */
382 scontrol |= (0x7 << 8);
383 /* initiate transition to active state */
384 if (spm_wakeup) {
385 scontrol |= (0x4 << 12);
386 woken_up = true;
387 }
388 break;
389 case ATA_LPM_MED_POWER:
390 /* allow LPM to PARTIAL */
391 scontrol &= ~(0x1 << 8);
392 scontrol |= (0x6 << 8);
393 break;
394 case ATA_LPM_MED_POWER_WITH_DIPM:
395 case ATA_LPM_MIN_POWER_WITH_PARTIAL:
396 case ATA_LPM_MIN_POWER:
397 if (ata_link_nr_enabled(link) > 0) {
398 /* assume no restrictions on LPM transitions */
399 scontrol &= ~(0x7 << 8);
400
401 /*
402 * If the controller does not support partial, slumber,
403 * or devsleep, then disallow these transitions.
404 */
405 if (link->ap->host->flags & ATA_HOST_NO_PART)
406 scontrol |= (0x1 << 8);
407
408 if (link->ap->host->flags & ATA_HOST_NO_SSC)
409 scontrol |= (0x2 << 8);
410
411 if (link->ap->host->flags & ATA_HOST_NO_DEVSLP)
412 scontrol |= (0x4 << 8);
413 } else {
414 /* empty port, power off */
415 scontrol &= ~0xf;
416 scontrol |= (0x1 << 2);
417 }
418 break;
419 default:
420 WARN_ON(1);
421 }
422
423 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
424 if (rc)
425 return rc;
426
427 /* give the link time to transit out of LPM state */
428 if (woken_up)
429 msleep(10);
430
431 /* clear PHYRDY_CHG from SError */
432 ehc->i.serror &= ~SERR_PHYRDY_CHG;
433 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
434 }
435 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
436
__sata_set_spd_needed(struct ata_link * link,u32 * scontrol)437 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
438 {
439 struct ata_link *host_link = &link->ap->link;
440 u32 limit, target, spd;
441
442 limit = link->sata_spd_limit;
443
444 /* Don't configure downstream link faster than upstream link.
445 * It doesn't speed up anything and some PMPs choke on such
446 * configuration.
447 */
448 if (!ata_is_host_link(link) && host_link->sata_spd)
449 limit &= (1 << host_link->sata_spd) - 1;
450
451 if (limit == UINT_MAX)
452 target = 0;
453 else
454 target = fls(limit);
455
456 spd = (*scontrol >> 4) & 0xf;
457 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
458
459 return spd != target;
460 }
461
462 /**
463 * sata_set_spd_needed - is SATA spd configuration needed
464 * @link: Link in question
465 *
466 * Test whether the spd limit in SControl matches
467 * @link->sata_spd_limit. This function is used to determine
468 * whether hardreset is necessary to apply SATA spd
469 * configuration.
470 *
471 * LOCKING:
472 * Inherited from caller.
473 *
474 * RETURNS:
475 * 1 if SATA spd configuration is needed, 0 otherwise.
476 */
sata_set_spd_needed(struct ata_link * link)477 static int sata_set_spd_needed(struct ata_link *link)
478 {
479 u32 scontrol;
480
481 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
482 return 1;
483
484 return __sata_set_spd_needed(link, &scontrol);
485 }
486
487 /**
488 * sata_set_spd - set SATA spd according to spd limit
489 * @link: Link to set SATA spd for
490 *
491 * Set SATA spd of @link according to sata_spd_limit.
492 *
493 * LOCKING:
494 * Inherited from caller.
495 *
496 * RETURNS:
497 * 0 if spd doesn't need to be changed, 1 if spd has been
498 * changed. Negative errno if SCR registers are inaccessible.
499 */
sata_set_spd(struct ata_link * link)500 int sata_set_spd(struct ata_link *link)
501 {
502 u32 scontrol;
503 int rc;
504
505 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
506 return rc;
507
508 if (!__sata_set_spd_needed(link, &scontrol))
509 return 0;
510
511 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
512 return rc;
513
514 return 1;
515 }
516 EXPORT_SYMBOL_GPL(sata_set_spd);
517
518 /**
519 * sata_link_hardreset - reset link via SATA phy reset
520 * @link: link to reset
521 * @timing: timing parameters { interval, duration, timeout } in msec
522 * @deadline: deadline jiffies for the operation
523 * @online: optional out parameter indicating link onlineness
524 * @check_ready: optional callback to check link readiness
525 *
526 * SATA phy-reset @link using DET bits of SControl register.
527 * After hardreset, link readiness is waited upon using
528 * ata_wait_ready() if @check_ready is specified. LLDs are
529 * allowed to not specify @check_ready and wait itself after this
530 * function returns. Device classification is LLD's
531 * responsibility.
532 *
533 * *@online is set to one iff reset succeeded and @link is online
534 * after reset.
535 *
536 * LOCKING:
537 * Kernel thread context (may sleep)
538 *
539 * RETURNS:
540 * 0 on success, -errno otherwise.
541 */
sata_link_hardreset(struct ata_link * link,const unsigned long * timing,unsigned long deadline,bool * online,int (* check_ready)(struct ata_link *))542 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
543 unsigned long deadline,
544 bool *online, int (*check_ready)(struct ata_link *))
545 {
546 u32 scontrol;
547 int rc;
548
549 DPRINTK("ENTER\n");
550
551 if (online)
552 *online = false;
553
554 if (sata_set_spd_needed(link)) {
555 /* SATA spec says nothing about how to reconfigure
556 * spd. To be on the safe side, turn off phy during
557 * reconfiguration. This works for at least ICH7 AHCI
558 * and Sil3124.
559 */
560 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
561 goto out;
562
563 scontrol = (scontrol & 0x0f0) | 0x304;
564
565 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
566 goto out;
567
568 sata_set_spd(link);
569 }
570
571 /* issue phy wake/reset */
572 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
573 goto out;
574
575 scontrol = (scontrol & 0x0f0) | 0x301;
576
577 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
578 goto out;
579
580 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
581 * 10.4.2 says at least 1 ms.
582 */
583 ata_msleep(link->ap, 1);
584
585 /* bring link back */
586 rc = sata_link_resume(link, timing, deadline);
587 if (rc)
588 goto out;
589 /* if link is offline nothing more to do */
590 if (ata_phys_link_offline(link))
591 goto out;
592
593 /* Link is online. From this point, -ENODEV too is an error. */
594 if (online)
595 *online = true;
596
597 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
598 /* If PMP is supported, we have to do follow-up SRST.
599 * Some PMPs don't send D2H Reg FIS after hardreset if
600 * the first port is empty. Wait only for
601 * ATA_TMOUT_PMP_SRST_WAIT.
602 */
603 if (check_ready) {
604 unsigned long pmp_deadline;
605
606 pmp_deadline = ata_deadline(jiffies,
607 ATA_TMOUT_PMP_SRST_WAIT);
608 if (time_after(pmp_deadline, deadline))
609 pmp_deadline = deadline;
610 ata_wait_ready(link, pmp_deadline, check_ready);
611 }
612 rc = -EAGAIN;
613 goto out;
614 }
615
616 rc = 0;
617 if (check_ready)
618 rc = ata_wait_ready(link, deadline, check_ready);
619 out:
620 if (rc && rc != -EAGAIN) {
621 /* online is set iff link is online && reset succeeded */
622 if (online)
623 *online = false;
624 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
625 }
626 DPRINTK("EXIT, rc=%d\n", rc);
627 return rc;
628 }
629 EXPORT_SYMBOL_GPL(sata_link_hardreset);
630
631 /**
632 * ata_qc_complete_multiple - Complete multiple qcs successfully
633 * @ap: port in question
634 * @qc_active: new qc_active mask
635 *
636 * Complete in-flight commands. This functions is meant to be
637 * called from low-level driver's interrupt routine to complete
638 * requests normally. ap->qc_active and @qc_active is compared
639 * and commands are completed accordingly.
640 *
641 * Always use this function when completing multiple NCQ commands
642 * from IRQ handlers instead of calling ata_qc_complete()
643 * multiple times to keep IRQ expect status properly in sync.
644 *
645 * LOCKING:
646 * spin_lock_irqsave(host lock)
647 *
648 * RETURNS:
649 * Number of completed commands on success, -errno otherwise.
650 */
ata_qc_complete_multiple(struct ata_port * ap,u64 qc_active)651 int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
652 {
653 u64 done_mask, ap_qc_active = ap->qc_active;
654 int nr_done = 0;
655
656 /*
657 * If the internal tag is set on ap->qc_active, then we care about
658 * bit0 on the passed in qc_active mask. Move that bit up to match
659 * the internal tag.
660 */
661 if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
662 qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
663 qc_active ^= qc_active & 0x01;
664 }
665
666 done_mask = ap_qc_active ^ qc_active;
667
668 if (unlikely(done_mask & qc_active)) {
669 ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
670 ap->qc_active, qc_active);
671 return -EINVAL;
672 }
673
674 while (done_mask) {
675 struct ata_queued_cmd *qc;
676 unsigned int tag = __ffs64(done_mask);
677
678 qc = ata_qc_from_tag(ap, tag);
679 if (qc) {
680 ata_qc_complete(qc);
681 nr_done++;
682 }
683 done_mask &= ~(1ULL << tag);
684 }
685
686 return nr_done;
687 }
688 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
689
690 /**
691 * ata_slave_link_init - initialize slave link
692 * @ap: port to initialize slave link for
693 *
694 * Create and initialize slave link for @ap. This enables slave
695 * link handling on the port.
696 *
697 * In libata, a port contains links and a link contains devices.
698 * There is single host link but if a PMP is attached to it,
699 * there can be multiple fan-out links. On SATA, there's usually
700 * a single device connected to a link but PATA and SATA
701 * controllers emulating TF based interface can have two - master
702 * and slave.
703 *
704 * However, there are a few controllers which don't fit into this
705 * abstraction too well - SATA controllers which emulate TF
706 * interface with both master and slave devices but also have
707 * separate SCR register sets for each device. These controllers
708 * need separate links for physical link handling
709 * (e.g. onlineness, link speed) but should be treated like a
710 * traditional M/S controller for everything else (e.g. command
711 * issue, softreset).
712 *
713 * slave_link is libata's way of handling this class of
714 * controllers without impacting core layer too much. For
715 * anything other than physical link handling, the default host
716 * link is used for both master and slave. For physical link
717 * handling, separate @ap->slave_link is used. All dirty details
718 * are implemented inside libata core layer. From LLD's POV, the
719 * only difference is that prereset, hardreset and postreset are
720 * called once more for the slave link, so the reset sequence
721 * looks like the following.
722 *
723 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
724 * softreset(M) -> postreset(M) -> postreset(S)
725 *
726 * Note that softreset is called only for the master. Softreset
727 * resets both M/S by definition, so SRST on master should handle
728 * both (the standard method will work just fine).
729 *
730 * LOCKING:
731 * Should be called before host is registered.
732 *
733 * RETURNS:
734 * 0 on success, -errno on failure.
735 */
ata_slave_link_init(struct ata_port * ap)736 int ata_slave_link_init(struct ata_port *ap)
737 {
738 struct ata_link *link;
739
740 WARN_ON(ap->slave_link);
741 WARN_ON(ap->flags & ATA_FLAG_PMP);
742
743 link = kzalloc(sizeof(*link), GFP_KERNEL);
744 if (!link)
745 return -ENOMEM;
746
747 ata_link_init(ap, link, 1);
748 ap->slave_link = link;
749 return 0;
750 }
751 EXPORT_SYMBOL_GPL(ata_slave_link_init);
752
753 /**
754 * sata_lpm_ignore_phy_events - test if PHY event should be ignored
755 * @link: Link receiving the event
756 *
757 * Test whether the received PHY event has to be ignored or not.
758 *
759 * LOCKING:
760 * None:
761 *
762 * RETURNS:
763 * True if the event has to be ignored.
764 */
sata_lpm_ignore_phy_events(struct ata_link * link)765 bool sata_lpm_ignore_phy_events(struct ata_link *link)
766 {
767 unsigned long lpm_timeout = link->last_lpm_change +
768 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
769
770 /* if LPM is enabled, PHYRDY doesn't mean anything */
771 if (link->lpm_policy > ATA_LPM_MAX_POWER)
772 return true;
773
774 /* ignore the first PHY event after the LPM policy changed
775 * as it is might be spurious
776 */
777 if ((link->flags & ATA_LFLAG_CHANGED) &&
778 time_before(jiffies, lpm_timeout))
779 return true;
780
781 return false;
782 }
783 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
784
785 static const char *ata_lpm_policy_names[] = {
786 [ATA_LPM_UNKNOWN] = "max_performance",
787 [ATA_LPM_MAX_POWER] = "max_performance",
788 [ATA_LPM_MED_POWER] = "medium_power",
789 [ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm",
790 [ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial",
791 [ATA_LPM_MIN_POWER] = "min_power",
792 };
793
ata_scsi_lpm_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)794 static ssize_t ata_scsi_lpm_store(struct device *device,
795 struct device_attribute *attr,
796 const char *buf, size_t count)
797 {
798 struct Scsi_Host *shost = class_to_shost(device);
799 struct ata_port *ap = ata_shost_to_port(shost);
800 struct ata_link *link;
801 struct ata_device *dev;
802 enum ata_lpm_policy policy;
803 unsigned long flags;
804
805 /* UNKNOWN is internal state, iterate from MAX_POWER */
806 for (policy = ATA_LPM_MAX_POWER;
807 policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) {
808 const char *name = ata_lpm_policy_names[policy];
809
810 if (strncmp(name, buf, strlen(name)) == 0)
811 break;
812 }
813 if (policy == ARRAY_SIZE(ata_lpm_policy_names))
814 return -EINVAL;
815
816 spin_lock_irqsave(ap->lock, flags);
817
818 ata_for_each_link(link, ap, EDGE) {
819 ata_for_each_dev(dev, &ap->link, ENABLED) {
820 if (dev->horkage & ATA_HORKAGE_NOLPM) {
821 count = -EOPNOTSUPP;
822 goto out_unlock;
823 }
824 }
825 }
826
827 ap->target_lpm_policy = policy;
828 ata_port_schedule_eh(ap);
829 out_unlock:
830 spin_unlock_irqrestore(ap->lock, flags);
831 return count;
832 }
833
ata_scsi_lpm_show(struct device * dev,struct device_attribute * attr,char * buf)834 static ssize_t ata_scsi_lpm_show(struct device *dev,
835 struct device_attribute *attr, char *buf)
836 {
837 struct Scsi_Host *shost = class_to_shost(dev);
838 struct ata_port *ap = ata_shost_to_port(shost);
839
840 if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
841 return -EINVAL;
842
843 return snprintf(buf, PAGE_SIZE, "%s\n",
844 ata_lpm_policy_names[ap->target_lpm_policy]);
845 }
846 DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
847 ata_scsi_lpm_show, ata_scsi_lpm_store);
848 EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
849
ata_ncq_prio_supported_show(struct device * device,struct device_attribute * attr,char * buf)850 static ssize_t ata_ncq_prio_supported_show(struct device *device,
851 struct device_attribute *attr,
852 char *buf)
853 {
854 struct scsi_device *sdev = to_scsi_device(device);
855 struct ata_port *ap = ata_shost_to_port(sdev->host);
856 struct ata_device *dev;
857 bool ncq_prio_supported;
858 int rc = 0;
859
860 spin_lock_irq(ap->lock);
861 dev = ata_scsi_find_dev(ap, sdev);
862 if (!dev)
863 rc = -ENODEV;
864 else
865 ncq_prio_supported = dev->flags & ATA_DFLAG_NCQ_PRIO;
866 spin_unlock_irq(ap->lock);
867
868 return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_supported);
869 }
870
871 DEVICE_ATTR(ncq_prio_supported, S_IRUGO, ata_ncq_prio_supported_show, NULL);
872 EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_supported);
873
ata_ncq_prio_enable_show(struct device * device,struct device_attribute * attr,char * buf)874 static ssize_t ata_ncq_prio_enable_show(struct device *device,
875 struct device_attribute *attr,
876 char *buf)
877 {
878 struct scsi_device *sdev = to_scsi_device(device);
879 struct ata_port *ap = ata_shost_to_port(sdev->host);
880 struct ata_device *dev;
881 bool ncq_prio_enable;
882 int rc = 0;
883
884 spin_lock_irq(ap->lock);
885 dev = ata_scsi_find_dev(ap, sdev);
886 if (!dev)
887 rc = -ENODEV;
888 else
889 ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
890 spin_unlock_irq(ap->lock);
891
892 return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
893 }
894
ata_ncq_prio_enable_store(struct device * device,struct device_attribute * attr,const char * buf,size_t len)895 static ssize_t ata_ncq_prio_enable_store(struct device *device,
896 struct device_attribute *attr,
897 const char *buf, size_t len)
898 {
899 struct scsi_device *sdev = to_scsi_device(device);
900 struct ata_port *ap;
901 struct ata_device *dev;
902 long int input;
903 int rc = 0;
904
905 rc = kstrtol(buf, 10, &input);
906 if (rc)
907 return rc;
908 if ((input < 0) || (input > 1))
909 return -EINVAL;
910
911 ap = ata_shost_to_port(sdev->host);
912 dev = ata_scsi_find_dev(ap, sdev);
913 if (unlikely(!dev))
914 return -ENODEV;
915
916 spin_lock_irq(ap->lock);
917
918 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
919 rc = -EINVAL;
920 goto unlock;
921 }
922
923 if (input)
924 dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE;
925 else
926 dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
927
928 unlock:
929 spin_unlock_irq(ap->lock);
930
931 return rc ? rc : len;
932 }
933
934 DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
935 ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
936 EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
937
938 struct device_attribute *ata_ncq_sdev_attrs[] = {
939 &dev_attr_unload_heads,
940 &dev_attr_ncq_prio_enable,
941 &dev_attr_ncq_prio_supported,
942 NULL
943 };
944 EXPORT_SYMBOL_GPL(ata_ncq_sdev_attrs);
945
946 static ssize_t
ata_scsi_em_message_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)947 ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
948 const char *buf, size_t count)
949 {
950 struct Scsi_Host *shost = class_to_shost(dev);
951 struct ata_port *ap = ata_shost_to_port(shost);
952 if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
953 return ap->ops->em_store(ap, buf, count);
954 return -EINVAL;
955 }
956
957 static ssize_t
ata_scsi_em_message_show(struct device * dev,struct device_attribute * attr,char * buf)958 ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
959 char *buf)
960 {
961 struct Scsi_Host *shost = class_to_shost(dev);
962 struct ata_port *ap = ata_shost_to_port(shost);
963
964 if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
965 return ap->ops->em_show(ap, buf);
966 return -EINVAL;
967 }
968 DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
969 ata_scsi_em_message_show, ata_scsi_em_message_store);
970 EXPORT_SYMBOL_GPL(dev_attr_em_message);
971
972 static ssize_t
ata_scsi_em_message_type_show(struct device * dev,struct device_attribute * attr,char * buf)973 ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
974 char *buf)
975 {
976 struct Scsi_Host *shost = class_to_shost(dev);
977 struct ata_port *ap = ata_shost_to_port(shost);
978
979 return snprintf(buf, 23, "%d\n", ap->em_message_type);
980 }
981 DEVICE_ATTR(em_message_type, S_IRUGO,
982 ata_scsi_em_message_type_show, NULL);
983 EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
984
985 static ssize_t
ata_scsi_activity_show(struct device * dev,struct device_attribute * attr,char * buf)986 ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
987 char *buf)
988 {
989 struct scsi_device *sdev = to_scsi_device(dev);
990 struct ata_port *ap = ata_shost_to_port(sdev->host);
991 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
992
993 if (atadev && ap->ops->sw_activity_show &&
994 (ap->flags & ATA_FLAG_SW_ACTIVITY))
995 return ap->ops->sw_activity_show(atadev, buf);
996 return -EINVAL;
997 }
998
999 static ssize_t
ata_scsi_activity_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1000 ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
1001 const char *buf, size_t count)
1002 {
1003 struct scsi_device *sdev = to_scsi_device(dev);
1004 struct ata_port *ap = ata_shost_to_port(sdev->host);
1005 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
1006 enum sw_activity val;
1007 int rc;
1008
1009 if (atadev && ap->ops->sw_activity_store &&
1010 (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
1011 val = simple_strtoul(buf, NULL, 0);
1012 switch (val) {
1013 case OFF: case BLINK_ON: case BLINK_OFF:
1014 rc = ap->ops->sw_activity_store(atadev, val);
1015 if (!rc)
1016 return count;
1017 else
1018 return rc;
1019 }
1020 }
1021 return -EINVAL;
1022 }
1023 DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
1024 ata_scsi_activity_store);
1025 EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
1026
1027 /**
1028 * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
1029 * @ap: ATA port to which the device change the queue depth
1030 * @sdev: SCSI device to configure queue depth for
1031 * @queue_depth: new queue depth
1032 *
1033 * libsas and libata have different approaches for associating a sdev to
1034 * its ata_port.
1035 *
1036 */
__ata_change_queue_depth(struct ata_port * ap,struct scsi_device * sdev,int queue_depth)1037 int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
1038 int queue_depth)
1039 {
1040 struct ata_device *dev;
1041 unsigned long flags;
1042
1043 if (queue_depth < 1 || queue_depth == sdev->queue_depth)
1044 return sdev->queue_depth;
1045
1046 dev = ata_scsi_find_dev(ap, sdev);
1047 if (!dev || !ata_dev_enabled(dev))
1048 return sdev->queue_depth;
1049
1050 /* NCQ enabled? */
1051 spin_lock_irqsave(ap->lock, flags);
1052 dev->flags &= ~ATA_DFLAG_NCQ_OFF;
1053 if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
1054 dev->flags |= ATA_DFLAG_NCQ_OFF;
1055 queue_depth = 1;
1056 }
1057 spin_unlock_irqrestore(ap->lock, flags);
1058
1059 /* limit and apply queue depth */
1060 queue_depth = min(queue_depth, sdev->host->can_queue);
1061 queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
1062 queue_depth = min(queue_depth, ATA_MAX_QUEUE);
1063
1064 if (sdev->queue_depth == queue_depth)
1065 return -EINVAL;
1066
1067 return scsi_change_queue_depth(sdev, queue_depth);
1068 }
1069 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
1070
1071 /**
1072 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
1073 * @sdev: SCSI device to configure queue depth for
1074 * @queue_depth: new queue depth
1075 *
1076 * This is libata standard hostt->change_queue_depth callback.
1077 * SCSI will call into this callback when user tries to set queue
1078 * depth via sysfs.
1079 *
1080 * LOCKING:
1081 * SCSI layer (we don't care)
1082 *
1083 * RETURNS:
1084 * Newly configured queue depth.
1085 */
ata_scsi_change_queue_depth(struct scsi_device * sdev,int queue_depth)1086 int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
1087 {
1088 struct ata_port *ap = ata_shost_to_port(sdev->host);
1089
1090 return __ata_change_queue_depth(ap, sdev, queue_depth);
1091 }
1092 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1093
1094 /**
1095 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device
1096 * @host: ATA host container for all SAS ports
1097 * @port_info: Information from low-level host driver
1098 * @shost: SCSI host that the scsi device is attached to
1099 *
1100 * LOCKING:
1101 * PCI/etc. bus probe sem.
1102 *
1103 * RETURNS:
1104 * ata_port pointer on success / NULL on failure.
1105 */
1106
ata_sas_port_alloc(struct ata_host * host,struct ata_port_info * port_info,struct Scsi_Host * shost)1107 struct ata_port *ata_sas_port_alloc(struct ata_host *host,
1108 struct ata_port_info *port_info,
1109 struct Scsi_Host *shost)
1110 {
1111 struct ata_port *ap;
1112
1113 ap = ata_port_alloc(host);
1114 if (!ap)
1115 return NULL;
1116
1117 ap->port_no = 0;
1118 ap->lock = &host->lock;
1119 ap->pio_mask = port_info->pio_mask;
1120 ap->mwdma_mask = port_info->mwdma_mask;
1121 ap->udma_mask = port_info->udma_mask;
1122 ap->flags |= port_info->flags;
1123 ap->ops = port_info->port_ops;
1124 ap->cbl = ATA_CBL_SATA;
1125
1126 return ap;
1127 }
1128 EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
1129
1130 /**
1131 * ata_sas_port_start - Set port up for dma.
1132 * @ap: Port to initialize
1133 *
1134 * Called just after data structures for each port are
1135 * initialized.
1136 *
1137 * May be used as the port_start() entry in ata_port_operations.
1138 *
1139 * LOCKING:
1140 * Inherited from caller.
1141 */
ata_sas_port_start(struct ata_port * ap)1142 int ata_sas_port_start(struct ata_port *ap)
1143 {
1144 /*
1145 * the port is marked as frozen at allocation time, but if we don't
1146 * have new eh, we won't thaw it
1147 */
1148 if (!ap->ops->error_handler)
1149 ap->pflags &= ~ATA_PFLAG_FROZEN;
1150 return 0;
1151 }
1152 EXPORT_SYMBOL_GPL(ata_sas_port_start);
1153
1154 /**
1155 * ata_sas_port_stop - Undo ata_sas_port_start()
1156 * @ap: Port to shut down
1157 *
1158 * May be used as the port_stop() entry in ata_port_operations.
1159 *
1160 * LOCKING:
1161 * Inherited from caller.
1162 */
1163
ata_sas_port_stop(struct ata_port * ap)1164 void ata_sas_port_stop(struct ata_port *ap)
1165 {
1166 }
1167 EXPORT_SYMBOL_GPL(ata_sas_port_stop);
1168
1169 /**
1170 * ata_sas_async_probe - simply schedule probing and return
1171 * @ap: Port to probe
1172 *
1173 * For batch scheduling of probe for sas attached ata devices, assumes
1174 * the port has already been through ata_sas_port_init()
1175 */
ata_sas_async_probe(struct ata_port * ap)1176 void ata_sas_async_probe(struct ata_port *ap)
1177 {
1178 __ata_port_probe(ap);
1179 }
1180 EXPORT_SYMBOL_GPL(ata_sas_async_probe);
1181
ata_sas_sync_probe(struct ata_port * ap)1182 int ata_sas_sync_probe(struct ata_port *ap)
1183 {
1184 return ata_port_probe(ap);
1185 }
1186 EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
1187
1188
1189 /**
1190 * ata_sas_port_init - Initialize a SATA device
1191 * @ap: SATA port to initialize
1192 *
1193 * LOCKING:
1194 * PCI/etc. bus probe sem.
1195 *
1196 * RETURNS:
1197 * Zero on success, non-zero on error.
1198 */
1199
ata_sas_port_init(struct ata_port * ap)1200 int ata_sas_port_init(struct ata_port *ap)
1201 {
1202 int rc = ap->ops->port_start(ap);
1203
1204 if (rc)
1205 return rc;
1206 ap->print_id = atomic_inc_return(&ata_print_id);
1207 return 0;
1208 }
1209 EXPORT_SYMBOL_GPL(ata_sas_port_init);
1210
ata_sas_tport_add(struct device * parent,struct ata_port * ap)1211 int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
1212 {
1213 return ata_tport_add(parent, ap);
1214 }
1215 EXPORT_SYMBOL_GPL(ata_sas_tport_add);
1216
ata_sas_tport_delete(struct ata_port * ap)1217 void ata_sas_tport_delete(struct ata_port *ap)
1218 {
1219 ata_tport_delete(ap);
1220 }
1221 EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
1222
1223 /**
1224 * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
1225 * @ap: SATA port to destroy
1226 *
1227 */
1228
ata_sas_port_destroy(struct ata_port * ap)1229 void ata_sas_port_destroy(struct ata_port *ap)
1230 {
1231 if (ap->ops->port_stop)
1232 ap->ops->port_stop(ap);
1233 kfree(ap);
1234 }
1235 EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
1236
1237 /**
1238 * ata_sas_slave_configure - Default slave_config routine for libata devices
1239 * @sdev: SCSI device to configure
1240 * @ap: ATA port to which SCSI device is attached
1241 *
1242 * RETURNS:
1243 * Zero.
1244 */
1245
ata_sas_slave_configure(struct scsi_device * sdev,struct ata_port * ap)1246 int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
1247 {
1248 ata_scsi_sdev_config(sdev);
1249 ata_scsi_dev_config(sdev, ap->link.device);
1250 return 0;
1251 }
1252 EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
1253
1254 /**
1255 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
1256 * @cmd: SCSI command to be sent
1257 * @ap: ATA port to which the command is being sent
1258 *
1259 * RETURNS:
1260 * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
1261 * 0 otherwise.
1262 */
1263
ata_sas_queuecmd(struct scsi_cmnd * cmd,struct ata_port * ap)1264 int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
1265 {
1266 int rc = 0;
1267
1268 ata_scsi_dump_cdb(ap, cmd);
1269
1270 if (likely(ata_dev_enabled(ap->link.device)))
1271 rc = __ata_scsi_queuecmd(cmd, ap->link.device);
1272 else {
1273 cmd->result = (DID_BAD_TARGET << 16);
1274 cmd->scsi_done(cmd);
1275 }
1276 return rc;
1277 }
1278 EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
1279
ata_sas_allocate_tag(struct ata_port * ap)1280 int ata_sas_allocate_tag(struct ata_port *ap)
1281 {
1282 unsigned int max_queue = ap->host->n_tags;
1283 unsigned int i, tag;
1284
1285 for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) {
1286 tag = tag < max_queue ? tag : 0;
1287
1288 /* the last tag is reserved for internal command. */
1289 if (ata_tag_internal(tag))
1290 continue;
1291
1292 if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) {
1293 ap->sas_last_tag = tag;
1294 return tag;
1295 }
1296 }
1297 return -1;
1298 }
1299
ata_sas_free_tag(unsigned int tag,struct ata_port * ap)1300 void ata_sas_free_tag(unsigned int tag, struct ata_port *ap)
1301 {
1302 clear_bit(tag, &ap->sas_tag_allocated);
1303 }
1304
1305 /**
1306 * sata_async_notification - SATA async notification handler
1307 * @ap: ATA port where async notification is received
1308 *
1309 * Handler to be called when async notification via SDB FIS is
1310 * received. This function schedules EH if necessary.
1311 *
1312 * LOCKING:
1313 * spin_lock_irqsave(host lock)
1314 *
1315 * RETURNS:
1316 * 1 if EH is scheduled, 0 otherwise.
1317 */
sata_async_notification(struct ata_port * ap)1318 int sata_async_notification(struct ata_port *ap)
1319 {
1320 u32 sntf;
1321 int rc;
1322
1323 if (!(ap->flags & ATA_FLAG_AN))
1324 return 0;
1325
1326 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1327 if (rc == 0)
1328 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1329
1330 if (!sata_pmp_attached(ap) || rc) {
1331 /* PMP is not attached or SNTF is not available */
1332 if (!sata_pmp_attached(ap)) {
1333 /* PMP is not attached. Check whether ATAPI
1334 * AN is configured. If so, notify media
1335 * change.
1336 */
1337 struct ata_device *dev = ap->link.device;
1338
1339 if ((dev->class == ATA_DEV_ATAPI) &&
1340 (dev->flags & ATA_DFLAG_AN))
1341 ata_scsi_media_change_notify(dev);
1342 return 0;
1343 } else {
1344 /* PMP is attached but SNTF is not available.
1345 * ATAPI async media change notification is
1346 * not used. The PMP must be reporting PHY
1347 * status change, schedule EH.
1348 */
1349 ata_port_schedule_eh(ap);
1350 return 1;
1351 }
1352 } else {
1353 /* PMP is attached and SNTF is available */
1354 struct ata_link *link;
1355
1356 /* check and notify ATAPI AN */
1357 ata_for_each_link(link, ap, EDGE) {
1358 if (!(sntf & (1 << link->pmp)))
1359 continue;
1360
1361 if ((link->device->class == ATA_DEV_ATAPI) &&
1362 (link->device->flags & ATA_DFLAG_AN))
1363 ata_scsi_media_change_notify(link->device);
1364 }
1365
1366 /* If PMP is reporting that PHY status of some
1367 * downstream ports has changed, schedule EH.
1368 */
1369 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1370 ata_port_schedule_eh(ap);
1371 return 1;
1372 }
1373
1374 return 0;
1375 }
1376 }
1377 EXPORT_SYMBOL_GPL(sata_async_notification);
1378
1379 /**
1380 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1381 * @dev: Device to read log page 10h from
1382 * @tag: Resulting tag of the failed command
1383 * @tf: Resulting taskfile registers of the failed command
1384 *
1385 * Read log page 10h to obtain NCQ error details and clear error
1386 * condition.
1387 *
1388 * LOCKING:
1389 * Kernel thread context (may sleep).
1390 *
1391 * RETURNS:
1392 * 0 on success, -errno otherwise.
1393 */
ata_eh_read_log_10h(struct ata_device * dev,int * tag,struct ata_taskfile * tf)1394 static int ata_eh_read_log_10h(struct ata_device *dev,
1395 int *tag, struct ata_taskfile *tf)
1396 {
1397 u8 *buf = dev->link->ap->sector_buf;
1398 unsigned int err_mask;
1399 u8 csum;
1400 int i;
1401
1402 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
1403 if (err_mask)
1404 return -EIO;
1405
1406 csum = 0;
1407 for (i = 0; i < ATA_SECT_SIZE; i++)
1408 csum += buf[i];
1409 if (csum)
1410 ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1411 csum);
1412
1413 if (buf[0] & 0x80)
1414 return -ENOENT;
1415
1416 *tag = buf[0] & 0x1f;
1417
1418 tf->status = buf[2];
1419 tf->error = buf[3];
1420 tf->lbal = buf[4];
1421 tf->lbam = buf[5];
1422 tf->lbah = buf[6];
1423 tf->device = buf[7];
1424 tf->hob_lbal = buf[8];
1425 tf->hob_lbam = buf[9];
1426 tf->hob_lbah = buf[10];
1427 tf->nsect = buf[12];
1428 tf->hob_nsect = buf[13];
1429 if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id) &&
1430 (tf->status & ATA_SENSE))
1431 tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
1432
1433 return 0;
1434 }
1435
1436 /**
1437 * ata_eh_analyze_ncq_error - analyze NCQ error
1438 * @link: ATA link to analyze NCQ error for
1439 *
1440 * Read log page 10h, determine the offending qc and acquire
1441 * error status TF. For NCQ device errors, all LLDDs have to do
1442 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1443 * care of the rest.
1444 *
1445 * LOCKING:
1446 * Kernel thread context (may sleep).
1447 */
ata_eh_analyze_ncq_error(struct ata_link * link)1448 void ata_eh_analyze_ncq_error(struct ata_link *link)
1449 {
1450 struct ata_port *ap = link->ap;
1451 struct ata_eh_context *ehc = &link->eh_context;
1452 struct ata_device *dev = link->device;
1453 struct ata_queued_cmd *qc;
1454 struct ata_taskfile tf;
1455 int tag, rc;
1456
1457 /* if frozen, we can't do much */
1458 if (ap->pflags & ATA_PFLAG_FROZEN)
1459 return;
1460
1461 /* is it NCQ device error? */
1462 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1463 return;
1464
1465 /* has LLDD analyzed already? */
1466 ata_qc_for_each_raw(ap, qc, tag) {
1467 if (!(qc->flags & ATA_QCFLAG_FAILED))
1468 continue;
1469
1470 if (qc->err_mask)
1471 return;
1472 }
1473
1474 /* okay, this error is ours */
1475 memset(&tf, 0, sizeof(tf));
1476 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1477 if (rc) {
1478 ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1479 rc);
1480 return;
1481 }
1482
1483 if (!(link->sactive & (1 << tag))) {
1484 ata_link_err(link, "log page 10h reported inactive tag %d\n",
1485 tag);
1486 return;
1487 }
1488
1489 /* we've got the perpetrator, condemn it */
1490 qc = __ata_qc_from_tag(ap, tag);
1491 memcpy(&qc->result_tf, &tf, sizeof(tf));
1492 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1493 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1494
1495 /*
1496 * If the device supports NCQ autosense, ata_eh_read_log_10h() will have
1497 * stored the sense data in qc->result_tf.auxiliary.
1498 */
1499 if (qc->result_tf.auxiliary) {
1500 char sense_key, asc, ascq;
1501
1502 sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
1503 asc = (qc->result_tf.auxiliary >> 8) & 0xff;
1504 ascq = qc->result_tf.auxiliary & 0xff;
1505 ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq);
1506 ata_scsi_set_sense_information(dev, qc->scsicmd,
1507 &qc->result_tf);
1508 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1509 }
1510
1511 ehc->i.err_mask &= ~AC_ERR_DEV;
1512 }
1513 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
1514