1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/kernel.h>
4 #include <linux/export.h>
5 #include <linux/ide.h>
6 #include <linux/delay.h>
7
ide_ata_error(ide_drive_t * drive,struct request * rq,u8 stat,u8 err)8 static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq,
9 u8 stat, u8 err)
10 {
11 ide_hwif_t *hwif = drive->hwif;
12
13 if ((stat & ATA_BUSY) ||
14 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
15 /* other bits are useless when BUSY */
16 scsi_req(rq)->result |= ERROR_RESET;
17 } else if (stat & ATA_ERR) {
18 /* err has different meaning on cdrom and tape */
19 if (err == ATA_ABORTED) {
20 if ((drive->dev_flags & IDE_DFLAG_LBA) &&
21 /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
22 hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
23 return ide_stopped;
24 } else if ((err & BAD_CRC) == BAD_CRC) {
25 /* UDMA crc error, just retry the operation */
26 drive->crc_count++;
27 } else if (err & (ATA_BBK | ATA_UNC)) {
28 /* retries won't help these */
29 scsi_req(rq)->result = ERROR_MAX;
30 } else if (err & ATA_TRK0NF) {
31 /* help it find track zero */
32 scsi_req(rq)->result |= ERROR_RECAL;
33 }
34 }
35
36 if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
37 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
38 int nsect = drive->mult_count ? drive->mult_count : 1;
39
40 ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
41 }
42
43 if (scsi_req(rq)->result >= ERROR_MAX || blk_noretry_request(rq)) {
44 ide_kill_rq(drive, rq);
45 return ide_stopped;
46 }
47
48 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
49 scsi_req(rq)->result |= ERROR_RESET;
50
51 if ((scsi_req(rq)->result & ERROR_RESET) == ERROR_RESET) {
52 ++scsi_req(rq)->result;
53 return ide_do_reset(drive);
54 }
55
56 if ((scsi_req(rq)->result & ERROR_RECAL) == ERROR_RECAL)
57 drive->special_flags |= IDE_SFLAG_RECALIBRATE;
58
59 ++scsi_req(rq)->result;
60
61 return ide_stopped;
62 }
63
ide_atapi_error(ide_drive_t * drive,struct request * rq,u8 stat,u8 err)64 static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq,
65 u8 stat, u8 err)
66 {
67 ide_hwif_t *hwif = drive->hwif;
68
69 if ((stat & ATA_BUSY) ||
70 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
71 /* other bits are useless when BUSY */
72 scsi_req(rq)->result |= ERROR_RESET;
73 } else {
74 /* add decoding error stuff */
75 }
76
77 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
78 /* force an abort */
79 hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
80
81 if (scsi_req(rq)->result >= ERROR_MAX) {
82 ide_kill_rq(drive, rq);
83 } else {
84 if ((scsi_req(rq)->result & ERROR_RESET) == ERROR_RESET) {
85 ++scsi_req(rq)->result;
86 return ide_do_reset(drive);
87 }
88 ++scsi_req(rq)->result;
89 }
90
91 return ide_stopped;
92 }
93
__ide_error(ide_drive_t * drive,struct request * rq,u8 stat,u8 err)94 static ide_startstop_t __ide_error(ide_drive_t *drive, struct request *rq,
95 u8 stat, u8 err)
96 {
97 if (drive->media == ide_disk)
98 return ide_ata_error(drive, rq, stat, err);
99 return ide_atapi_error(drive, rq, stat, err);
100 }
101
102 /**
103 * ide_error - handle an error on the IDE
104 * @drive: drive the error occurred on
105 * @msg: message to report
106 * @stat: status bits
107 *
108 * ide_error() takes action based on the error returned by the drive.
109 * For normal I/O that may well include retries. We deal with
110 * both new-style (taskfile) and old style command handling here.
111 * In the case of taskfile command handling there is work left to
112 * do
113 */
114
ide_error(ide_drive_t * drive,const char * msg,u8 stat)115 ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
116 {
117 struct request *rq;
118 u8 err;
119
120 err = ide_dump_status(drive, msg, stat);
121
122 rq = drive->hwif->rq;
123 if (rq == NULL)
124 return ide_stopped;
125
126 /* retry only "normal" I/O: */
127 if (blk_rq_is_passthrough(rq)) {
128 if (ata_taskfile_request(rq)) {
129 struct ide_cmd *cmd = ide_req(rq)->special;
130
131 if (cmd)
132 ide_complete_cmd(drive, cmd, stat, err);
133 } else if (ata_pm_request(rq)) {
134 scsi_req(rq)->result = 1;
135 ide_complete_pm_rq(drive, rq);
136 return ide_stopped;
137 }
138 scsi_req(rq)->result = err;
139 ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
140 return ide_stopped;
141 }
142
143 return __ide_error(drive, rq, stat, err);
144 }
145 EXPORT_SYMBOL_GPL(ide_error);
146
ide_complete_drive_reset(ide_drive_t * drive,blk_status_t err)147 static inline void ide_complete_drive_reset(ide_drive_t *drive, blk_status_t err)
148 {
149 struct request *rq = drive->hwif->rq;
150
151 if (rq && ata_misc_request(rq) &&
152 scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
153 if (err <= 0 && scsi_req(rq)->result == 0)
154 scsi_req(rq)->result = -EIO;
155 ide_complete_rq(drive, err, blk_rq_bytes(rq));
156 }
157 }
158
159 /* needed below */
160 static ide_startstop_t do_reset1(ide_drive_t *, int);
161
162 /*
163 * atapi_reset_pollfunc() gets invoked to poll the interface for completion
164 * every 50ms during an atapi drive reset operation. If the drive has not yet
165 * responded, and we have not yet hit our maximum waiting time, then the timer
166 * is restarted for another 50ms.
167 */
atapi_reset_pollfunc(ide_drive_t * drive)168 static ide_startstop_t atapi_reset_pollfunc(ide_drive_t *drive)
169 {
170 ide_hwif_t *hwif = drive->hwif;
171 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
172 u8 stat;
173
174 tp_ops->dev_select(drive);
175 udelay(10);
176 stat = tp_ops->read_status(hwif);
177
178 if (OK_STAT(stat, 0, ATA_BUSY))
179 printk(KERN_INFO "%s: ATAPI reset complete\n", drive->name);
180 else {
181 if (time_before(jiffies, hwif->poll_timeout)) {
182 ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20);
183 /* continue polling */
184 return ide_started;
185 }
186 /* end of polling */
187 hwif->polling = 0;
188 printk(KERN_ERR "%s: ATAPI reset timed-out, status=0x%02x\n",
189 drive->name, stat);
190 /* do it the old fashioned way */
191 return do_reset1(drive, 1);
192 }
193 /* done polling */
194 hwif->polling = 0;
195 ide_complete_drive_reset(drive, BLK_STS_OK);
196 return ide_stopped;
197 }
198
ide_reset_report_error(ide_hwif_t * hwif,u8 err)199 static void ide_reset_report_error(ide_hwif_t *hwif, u8 err)
200 {
201 static const char *err_master_vals[] =
202 { NULL, "passed", "formatter device error",
203 "sector buffer error", "ECC circuitry error",
204 "controlling MPU error" };
205
206 u8 err_master = err & 0x7f;
207
208 printk(KERN_ERR "%s: reset: master: ", hwif->name);
209 if (err_master && err_master < 6)
210 printk(KERN_CONT "%s", err_master_vals[err_master]);
211 else
212 printk(KERN_CONT "error (0x%02x?)", err);
213 if (err & 0x80)
214 printk(KERN_CONT "; slave: failed");
215 printk(KERN_CONT "\n");
216 }
217
218 /*
219 * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
220 * during an ide reset operation. If the drives have not yet responded,
221 * and we have not yet hit our maximum waiting time, then the timer is restarted
222 * for another 50ms.
223 */
reset_pollfunc(ide_drive_t * drive)224 static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
225 {
226 ide_hwif_t *hwif = drive->hwif;
227 const struct ide_port_ops *port_ops = hwif->port_ops;
228 u8 tmp;
229 blk_status_t err = BLK_STS_OK;
230
231 if (port_ops && port_ops->reset_poll) {
232 err = port_ops->reset_poll(drive);
233 if (err) {
234 printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
235 hwif->name, drive->name);
236 goto out;
237 }
238 }
239
240 tmp = hwif->tp_ops->read_status(hwif);
241
242 if (!OK_STAT(tmp, 0, ATA_BUSY)) {
243 if (time_before(jiffies, hwif->poll_timeout)) {
244 ide_set_handler(drive, &reset_pollfunc, HZ/20);
245 /* continue polling */
246 return ide_started;
247 }
248 printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n",
249 hwif->name, tmp);
250 drive->failures++;
251 err = BLK_STS_IOERR;
252 } else {
253 tmp = ide_read_error(drive);
254
255 if (tmp == 1) {
256 printk(KERN_INFO "%s: reset: success\n", hwif->name);
257 drive->failures = 0;
258 } else {
259 ide_reset_report_error(hwif, tmp);
260 drive->failures++;
261 err = BLK_STS_IOERR;
262 }
263 }
264 out:
265 hwif->polling = 0; /* done polling */
266 ide_complete_drive_reset(drive, err);
267 return ide_stopped;
268 }
269
ide_disk_pre_reset(ide_drive_t * drive)270 static void ide_disk_pre_reset(ide_drive_t *drive)
271 {
272 int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1;
273
274 drive->special_flags =
275 legacy ? (IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE) : 0;
276
277 drive->mult_count = 0;
278 drive->dev_flags &= ~IDE_DFLAG_PARKED;
279
280 if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 &&
281 (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0)
282 drive->mult_req = 0;
283
284 if (drive->mult_req != drive->mult_count)
285 drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
286 }
287
pre_reset(ide_drive_t * drive)288 static void pre_reset(ide_drive_t *drive)
289 {
290 const struct ide_port_ops *port_ops = drive->hwif->port_ops;
291
292 if (drive->media == ide_disk)
293 ide_disk_pre_reset(drive);
294 else
295 drive->dev_flags |= IDE_DFLAG_POST_RESET;
296
297 if (drive->dev_flags & IDE_DFLAG_USING_DMA) {
298 if (drive->crc_count)
299 ide_check_dma_crc(drive);
300 else
301 ide_dma_off(drive);
302 }
303
304 if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) {
305 if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) {
306 drive->dev_flags &= ~IDE_DFLAG_UNMASK;
307 drive->io_32bit = 0;
308 }
309 return;
310 }
311
312 if (port_ops && port_ops->pre_reset)
313 port_ops->pre_reset(drive);
314
315 if (drive->current_speed != 0xff)
316 drive->desired_speed = drive->current_speed;
317 drive->current_speed = 0xff;
318 }
319
320 /*
321 * do_reset1() attempts to recover a confused drive by resetting it.
322 * Unfortunately, resetting a disk drive actually resets all devices on
323 * the same interface, so it can really be thought of as resetting the
324 * interface rather than resetting the drive.
325 *
326 * ATAPI devices have their own reset mechanism which allows them to be
327 * individually reset without clobbering other devices on the same interface.
328 *
329 * Unfortunately, the IDE interface does not generate an interrupt to let
330 * us know when the reset operation has finished, so we must poll for this.
331 * Equally poor, though, is the fact that this may a very long time to complete,
332 * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
333 * we set a timer to poll at 50ms intervals.
334 */
do_reset1(ide_drive_t * drive,int do_not_try_atapi)335 static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi)
336 {
337 ide_hwif_t *hwif = drive->hwif;
338 struct ide_io_ports *io_ports = &hwif->io_ports;
339 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
340 const struct ide_port_ops *port_ops;
341 ide_drive_t *tdrive;
342 unsigned long flags, timeout;
343 int i;
344 DEFINE_WAIT(wait);
345
346 spin_lock_irqsave(&hwif->lock, flags);
347
348 /* We must not reset with running handlers */
349 BUG_ON(hwif->handler != NULL);
350
351 /* For an ATAPI device, first try an ATAPI SRST. */
352 if (drive->media != ide_disk && !do_not_try_atapi) {
353 pre_reset(drive);
354 tp_ops->dev_select(drive);
355 udelay(20);
356 tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
357 ndelay(400);
358 hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
359 hwif->polling = 1;
360 __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20);
361 spin_unlock_irqrestore(&hwif->lock, flags);
362 return ide_started;
363 }
364
365 /* We must not disturb devices in the IDE_DFLAG_PARKED state. */
366 do {
367 unsigned long now;
368
369 prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE);
370 timeout = jiffies;
371 ide_port_for_each_present_dev(i, tdrive, hwif) {
372 if ((tdrive->dev_flags & IDE_DFLAG_PARKED) &&
373 time_after(tdrive->sleep, timeout))
374 timeout = tdrive->sleep;
375 }
376
377 now = jiffies;
378 if (time_before_eq(timeout, now))
379 break;
380
381 spin_unlock_irqrestore(&hwif->lock, flags);
382 timeout = schedule_timeout_uninterruptible(timeout - now);
383 spin_lock_irqsave(&hwif->lock, flags);
384 } while (timeout);
385 finish_wait(&ide_park_wq, &wait);
386
387 /*
388 * First, reset any device state data we were maintaining
389 * for any of the drives on this interface.
390 */
391 ide_port_for_each_dev(i, tdrive, hwif)
392 pre_reset(tdrive);
393
394 if (io_ports->ctl_addr == 0) {
395 spin_unlock_irqrestore(&hwif->lock, flags);
396 ide_complete_drive_reset(drive, BLK_STS_IOERR);
397 return ide_stopped;
398 }
399
400 /*
401 * Note that we also set nIEN while resetting the device,
402 * to mask unwanted interrupts from the interface during the reset.
403 * However, due to the design of PC hardware, this will cause an
404 * immediate interrupt due to the edge transition it produces.
405 * This single interrupt gives us a "fast poll" for drives that
406 * recover from reset very quickly, saving us the first 50ms wait time.
407 */
408 /* set SRST and nIEN */
409 tp_ops->write_devctl(hwif, ATA_SRST | ATA_NIEN | ATA_DEVCTL_OBS);
410 /* more than enough time */
411 udelay(10);
412 /* clear SRST, leave nIEN (unless device is on the quirk list) */
413 tp_ops->write_devctl(hwif,
414 ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) ? 0 : ATA_NIEN) |
415 ATA_DEVCTL_OBS);
416 /* more than enough time */
417 udelay(10);
418 hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
419 hwif->polling = 1;
420 __ide_set_handler(drive, &reset_pollfunc, HZ/20);
421
422 /*
423 * Some weird controller like resetting themselves to a strange
424 * state when the disks are reset this way. At least, the Winbond
425 * 553 documentation says that
426 */
427 port_ops = hwif->port_ops;
428 if (port_ops && port_ops->resetproc)
429 port_ops->resetproc(drive);
430
431 spin_unlock_irqrestore(&hwif->lock, flags);
432 return ide_started;
433 }
434
435 /*
436 * ide_do_reset() is the entry point to the drive/interface reset code.
437 */
438
ide_do_reset(ide_drive_t * drive)439 ide_startstop_t ide_do_reset(ide_drive_t *drive)
440 {
441 return do_reset1(drive, 0);
442 }
443 EXPORT_SYMBOL(ide_do_reset);
444