1
2 #include <linux/kernel.h>
3 #include <linux/export.h>
4 #include <linux/ide.h>
5 #include <linux/delay.h>
6
ide_ata_error(ide_drive_t * drive,struct request * rq,u8 stat,u8 err)7 static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq,
8 u8 stat, u8 err)
9 {
10 ide_hwif_t *hwif = drive->hwif;
11
12 if ((stat & ATA_BUSY) ||
13 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
14 /* other bits are useless when BUSY */
15 rq->errors |= ERROR_RESET;
16 } else if (stat & ATA_ERR) {
17 /* err has different meaning on cdrom and tape */
18 if (err == ATA_ABORTED) {
19 if ((drive->dev_flags & IDE_DFLAG_LBA) &&
20 /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
21 hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
22 return ide_stopped;
23 } else if ((err & BAD_CRC) == BAD_CRC) {
24 /* UDMA crc error, just retry the operation */
25 drive->crc_count++;
26 } else if (err & (ATA_BBK | ATA_UNC)) {
27 /* retries won't help these */
28 rq->errors = ERROR_MAX;
29 } else if (err & ATA_TRK0NF) {
30 /* help it find track zero */
31 rq->errors |= ERROR_RECAL;
32 }
33 }
34
35 if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
36 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
37 int nsect = drive->mult_count ? drive->mult_count : 1;
38
39 ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
40 }
41
42 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
43 ide_kill_rq(drive, rq);
44 return ide_stopped;
45 }
46
47 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
48 rq->errors |= ERROR_RESET;
49
50 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
51 ++rq->errors;
52 return ide_do_reset(drive);
53 }
54
55 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
56 drive->special_flags |= IDE_SFLAG_RECALIBRATE;
57
58 ++rq->errors;
59
60 return ide_stopped;
61 }
62
ide_atapi_error(ide_drive_t * drive,struct request * rq,u8 stat,u8 err)63 static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq,
64 u8 stat, u8 err)
65 {
66 ide_hwif_t *hwif = drive->hwif;
67
68 if ((stat & ATA_BUSY) ||
69 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
70 /* other bits are useless when BUSY */
71 rq->errors |= ERROR_RESET;
72 } else {
73 /* add decoding error stuff */
74 }
75
76 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
77 /* force an abort */
78 hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
79
80 if (rq->errors >= ERROR_MAX) {
81 ide_kill_rq(drive, rq);
82 } else {
83 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
84 ++rq->errors;
85 return ide_do_reset(drive);
86 }
87 ++rq->errors;
88 }
89
90 return ide_stopped;
91 }
92
__ide_error(ide_drive_t * drive,struct request * rq,u8 stat,u8 err)93 static ide_startstop_t __ide_error(ide_drive_t *drive, struct request *rq,
94 u8 stat, u8 err)
95 {
96 if (drive->media == ide_disk)
97 return ide_ata_error(drive, rq, stat, err);
98 return ide_atapi_error(drive, rq, stat, err);
99 }
100
101 /**
102 * ide_error - handle an error on the IDE
103 * @drive: drive the error occurred on
104 * @msg: message to report
105 * @stat: status bits
106 *
107 * ide_error() takes action based on the error returned by the drive.
108 * For normal I/O that may well include retries. We deal with
109 * both new-style (taskfile) and old style command handling here.
110 * In the case of taskfile command handling there is work left to
111 * do
112 */
113
ide_error(ide_drive_t * drive,const char * msg,u8 stat)114 ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
115 {
116 struct request *rq;
117 u8 err;
118
119 err = ide_dump_status(drive, msg, stat);
120
121 rq = drive->hwif->rq;
122 if (rq == NULL)
123 return ide_stopped;
124
125 /* retry only "normal" I/O: */
126 if (rq->cmd_type != REQ_TYPE_FS) {
127 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
128 struct ide_cmd *cmd = rq->special;
129
130 if (cmd)
131 ide_complete_cmd(drive, cmd, stat, err);
132 } else if (blk_pm_request(rq)) {
133 rq->errors = 1;
134 ide_complete_pm_rq(drive, rq);
135 return ide_stopped;
136 }
137 rq->errors = err;
138 ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq));
139 return ide_stopped;
140 }
141
142 return __ide_error(drive, rq, stat, err);
143 }
144 EXPORT_SYMBOL_GPL(ide_error);
145
ide_complete_drive_reset(ide_drive_t * drive,int err)146 static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
147 {
148 struct request *rq = drive->hwif->rq;
149
150 if (rq && rq->cmd_type == REQ_TYPE_SPECIAL &&
151 rq->cmd[0] == REQ_DRIVE_RESET) {
152 if (err <= 0 && rq->errors == 0)
153 rq->errors = -EIO;
154 ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
155 }
156 }
157
158 /* needed below */
159 static ide_startstop_t do_reset1(ide_drive_t *, int);
160
161 /*
162 * atapi_reset_pollfunc() gets invoked to poll the interface for completion
163 * every 50ms during an atapi drive reset operation. If the drive has not yet
164 * responded, and we have not yet hit our maximum waiting time, then the timer
165 * is restarted for another 50ms.
166 */
atapi_reset_pollfunc(ide_drive_t * drive)167 static ide_startstop_t atapi_reset_pollfunc(ide_drive_t *drive)
168 {
169 ide_hwif_t *hwif = drive->hwif;
170 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
171 u8 stat;
172
173 tp_ops->dev_select(drive);
174 udelay(10);
175 stat = tp_ops->read_status(hwif);
176
177 if (OK_STAT(stat, 0, ATA_BUSY))
178 printk(KERN_INFO "%s: ATAPI reset complete\n", drive->name);
179 else {
180 if (time_before(jiffies, hwif->poll_timeout)) {
181 ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20);
182 /* continue polling */
183 return ide_started;
184 }
185 /* end of polling */
186 hwif->polling = 0;
187 printk(KERN_ERR "%s: ATAPI reset timed-out, status=0x%02x\n",
188 drive->name, stat);
189 /* do it the old fashioned way */
190 return do_reset1(drive, 1);
191 }
192 /* done polling */
193 hwif->polling = 0;
194 ide_complete_drive_reset(drive, 0);
195 return ide_stopped;
196 }
197
ide_reset_report_error(ide_hwif_t * hwif,u8 err)198 static void ide_reset_report_error(ide_hwif_t *hwif, u8 err)
199 {
200 static const char *err_master_vals[] =
201 { NULL, "passed", "formatter device error",
202 "sector buffer error", "ECC circuitry error",
203 "controlling MPU error" };
204
205 u8 err_master = err & 0x7f;
206
207 printk(KERN_ERR "%s: reset: master: ", hwif->name);
208 if (err_master && err_master < 6)
209 printk(KERN_CONT "%s", err_master_vals[err_master]);
210 else
211 printk(KERN_CONT "error (0x%02x?)", err);
212 if (err & 0x80)
213 printk(KERN_CONT "; slave: failed");
214 printk(KERN_CONT "\n");
215 }
216
217 /*
218 * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
219 * during an ide reset operation. If the drives have not yet responded,
220 * and we have not yet hit our maximum waiting time, then the timer is restarted
221 * for another 50ms.
222 */
reset_pollfunc(ide_drive_t * drive)223 static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
224 {
225 ide_hwif_t *hwif = drive->hwif;
226 const struct ide_port_ops *port_ops = hwif->port_ops;
227 u8 tmp;
228 int err = 0;
229
230 if (port_ops && port_ops->reset_poll) {
231 err = port_ops->reset_poll(drive);
232 if (err) {
233 printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
234 hwif->name, drive->name);
235 goto out;
236 }
237 }
238
239 tmp = hwif->tp_ops->read_status(hwif);
240
241 if (!OK_STAT(tmp, 0, ATA_BUSY)) {
242 if (time_before(jiffies, hwif->poll_timeout)) {
243 ide_set_handler(drive, &reset_pollfunc, HZ/20);
244 /* continue polling */
245 return ide_started;
246 }
247 printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n",
248 hwif->name, tmp);
249 drive->failures++;
250 err = -EIO;
251 } else {
252 tmp = ide_read_error(drive);
253
254 if (tmp == 1) {
255 printk(KERN_INFO "%s: reset: success\n", hwif->name);
256 drive->failures = 0;
257 } else {
258 ide_reset_report_error(hwif, tmp);
259 drive->failures++;
260 err = -EIO;
261 }
262 }
263 out:
264 hwif->polling = 0; /* done polling */
265 ide_complete_drive_reset(drive, err);
266 return ide_stopped;
267 }
268
ide_disk_pre_reset(ide_drive_t * drive)269 static void ide_disk_pre_reset(ide_drive_t *drive)
270 {
271 int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1;
272
273 drive->special_flags =
274 legacy ? (IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE) : 0;
275
276 drive->mult_count = 0;
277 drive->dev_flags &= ~IDE_DFLAG_PARKED;
278
279 if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 &&
280 (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0)
281 drive->mult_req = 0;
282
283 if (drive->mult_req != drive->mult_count)
284 drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
285 }
286
pre_reset(ide_drive_t * drive)287 static void pre_reset(ide_drive_t *drive)
288 {
289 const struct ide_port_ops *port_ops = drive->hwif->port_ops;
290
291 if (drive->media == ide_disk)
292 ide_disk_pre_reset(drive);
293 else
294 drive->dev_flags |= IDE_DFLAG_POST_RESET;
295
296 if (drive->dev_flags & IDE_DFLAG_USING_DMA) {
297 if (drive->crc_count)
298 ide_check_dma_crc(drive);
299 else
300 ide_dma_off(drive);
301 }
302
303 if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) {
304 if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) {
305 drive->dev_flags &= ~IDE_DFLAG_UNMASK;
306 drive->io_32bit = 0;
307 }
308 return;
309 }
310
311 if (port_ops && port_ops->pre_reset)
312 port_ops->pre_reset(drive);
313
314 if (drive->current_speed != 0xff)
315 drive->desired_speed = drive->current_speed;
316 drive->current_speed = 0xff;
317 }
318
319 /*
320 * do_reset1() attempts to recover a confused drive by resetting it.
321 * Unfortunately, resetting a disk drive actually resets all devices on
322 * the same interface, so it can really be thought of as resetting the
323 * interface rather than resetting the drive.
324 *
325 * ATAPI devices have their own reset mechanism which allows them to be
326 * individually reset without clobbering other devices on the same interface.
327 *
328 * Unfortunately, the IDE interface does not generate an interrupt to let
329 * us know when the reset operation has finished, so we must poll for this.
330 * Equally poor, though, is the fact that this may a very long time to complete,
331 * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
332 * we set a timer to poll at 50ms intervals.
333 */
do_reset1(ide_drive_t * drive,int do_not_try_atapi)334 static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi)
335 {
336 ide_hwif_t *hwif = drive->hwif;
337 struct ide_io_ports *io_ports = &hwif->io_ports;
338 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
339 const struct ide_port_ops *port_ops;
340 ide_drive_t *tdrive;
341 unsigned long flags, timeout;
342 int i;
343 DEFINE_WAIT(wait);
344
345 spin_lock_irqsave(&hwif->lock, flags);
346
347 /* We must not reset with running handlers */
348 BUG_ON(hwif->handler != NULL);
349
350 /* For an ATAPI device, first try an ATAPI SRST. */
351 if (drive->media != ide_disk && !do_not_try_atapi) {
352 pre_reset(drive);
353 tp_ops->dev_select(drive);
354 udelay(20);
355 tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
356 ndelay(400);
357 hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
358 hwif->polling = 1;
359 __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20);
360 spin_unlock_irqrestore(&hwif->lock, flags);
361 return ide_started;
362 }
363
364 /* We must not disturb devices in the IDE_DFLAG_PARKED state. */
365 do {
366 unsigned long now;
367
368 prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE);
369 timeout = jiffies;
370 ide_port_for_each_present_dev(i, tdrive, hwif) {
371 if ((tdrive->dev_flags & IDE_DFLAG_PARKED) &&
372 time_after(tdrive->sleep, timeout))
373 timeout = tdrive->sleep;
374 }
375
376 now = jiffies;
377 if (time_before_eq(timeout, now))
378 break;
379
380 spin_unlock_irqrestore(&hwif->lock, flags);
381 timeout = schedule_timeout_uninterruptible(timeout - now);
382 spin_lock_irqsave(&hwif->lock, flags);
383 } while (timeout);
384 finish_wait(&ide_park_wq, &wait);
385
386 /*
387 * First, reset any device state data we were maintaining
388 * for any of the drives on this interface.
389 */
390 ide_port_for_each_dev(i, tdrive, hwif)
391 pre_reset(tdrive);
392
393 if (io_ports->ctl_addr == 0) {
394 spin_unlock_irqrestore(&hwif->lock, flags);
395 ide_complete_drive_reset(drive, -ENXIO);
396 return ide_stopped;
397 }
398
399 /*
400 * Note that we also set nIEN while resetting the device,
401 * to mask unwanted interrupts from the interface during the reset.
402 * However, due to the design of PC hardware, this will cause an
403 * immediate interrupt due to the edge transition it produces.
404 * This single interrupt gives us a "fast poll" for drives that
405 * recover from reset very quickly, saving us the first 50ms wait time.
406 */
407 /* set SRST and nIEN */
408 tp_ops->write_devctl(hwif, ATA_SRST | ATA_NIEN | ATA_DEVCTL_OBS);
409 /* more than enough time */
410 udelay(10);
411 /* clear SRST, leave nIEN (unless device is on the quirk list) */
412 tp_ops->write_devctl(hwif,
413 ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) ? 0 : ATA_NIEN) |
414 ATA_DEVCTL_OBS);
415 /* more than enough time */
416 udelay(10);
417 hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
418 hwif->polling = 1;
419 __ide_set_handler(drive, &reset_pollfunc, HZ/20);
420
421 /*
422 * Some weird controller like resetting themselves to a strange
423 * state when the disks are reset this way. At least, the Winbond
424 * 553 documentation says that
425 */
426 port_ops = hwif->port_ops;
427 if (port_ops && port_ops->resetproc)
428 port_ops->resetproc(drive);
429
430 spin_unlock_irqrestore(&hwif->lock, flags);
431 return ide_started;
432 }
433
434 /*
435 * ide_do_reset() is the entry point to the drive/interface reset code.
436 */
437
ide_do_reset(ide_drive_t * drive)438 ide_startstop_t ide_do_reset(ide_drive_t *drive)
439 {
440 return do_reset1(drive, 0);
441 }
442 EXPORT_SYMBOL(ide_do_reset);
443