1 /*
2 * IDE I/O functions
3 *
4 * Basic PIO and command management functionality.
5 *
6 * This code was split off from ide.c. See ide.c for history and original
7 * copyrights.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
12 * later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
24 */
25
26
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/kernel.h>
31 #include <linux/timer.h>
32 #include <linux/mm.h>
33 #include <linux/interrupt.h>
34 #include <linux/major.h>
35 #include <linux/errno.h>
36 #include <linux/genhd.h>
37 #include <linux/blkpg.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ide.h>
43 #include <linux/completion.h>
44 #include <linux/reboot.h>
45 #include <linux/cdrom.h>
46 #include <linux/seq_file.h>
47 #include <linux/device.h>
48 #include <linux/kmod.h>
49 #include <linux/scatterlist.h>
50 #include <linux/bitops.h>
51
52 #include <asm/byteorder.h>
53 #include <asm/irq.h>
54 #include <linux/uaccess.h>
55 #include <asm/io.h>
56
ide_end_rq(ide_drive_t * drive,struct request * rq,blk_status_t error,unsigned int nr_bytes)57 int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
58 unsigned int nr_bytes)
59 {
60 /*
61 * decide whether to reenable DMA -- 3 is a random magic for now,
62 * if we DMA timeout more than 3 times, just stay in PIO
63 */
64 if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
65 drive->retry_pio <= 3) {
66 drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
67 ide_dma_on(drive);
68 }
69
70 if (!blk_update_request(rq, error, nr_bytes)) {
71 if (rq == drive->sense_rq) {
72 drive->sense_rq = NULL;
73 drive->sense_rq_active = false;
74 }
75
76 __blk_mq_end_request(rq, error);
77 return 0;
78 }
79
80 return 1;
81 }
82 EXPORT_SYMBOL_GPL(ide_end_rq);
83
ide_complete_cmd(ide_drive_t * drive,struct ide_cmd * cmd,u8 stat,u8 err)84 void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
85 {
86 const struct ide_tp_ops *tp_ops = drive->hwif->tp_ops;
87 struct ide_taskfile *tf = &cmd->tf;
88 struct request *rq = cmd->rq;
89 u8 tf_cmd = tf->command;
90
91 tf->error = err;
92 tf->status = stat;
93
94 if (cmd->ftf_flags & IDE_FTFLAG_IN_DATA) {
95 u8 data[2];
96
97 tp_ops->input_data(drive, cmd, data, 2);
98
99 cmd->tf.data = data[0];
100 cmd->hob.data = data[1];
101 }
102
103 ide_tf_readback(drive, cmd);
104
105 if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) &&
106 tf_cmd == ATA_CMD_IDLEIMMEDIATE) {
107 if (tf->lbal != 0xc4) {
108 printk(KERN_ERR "%s: head unload failed!\n",
109 drive->name);
110 ide_tf_dump(drive->name, cmd);
111 } else
112 drive->dev_flags |= IDE_DFLAG_PARKED;
113 }
114
115 if (rq && ata_taskfile_request(rq)) {
116 struct ide_cmd *orig_cmd = ide_req(rq)->special;
117
118 if (cmd->tf_flags & IDE_TFLAG_DYN)
119 kfree(orig_cmd);
120 else if (cmd != orig_cmd)
121 memcpy(orig_cmd, cmd, sizeof(*cmd));
122 }
123 }
124
ide_complete_rq(ide_drive_t * drive,blk_status_t error,unsigned int nr_bytes)125 int ide_complete_rq(ide_drive_t *drive, blk_status_t error, unsigned int nr_bytes)
126 {
127 ide_hwif_t *hwif = drive->hwif;
128 struct request *rq = hwif->rq;
129 int rc;
130
131 /*
132 * if failfast is set on a request, override number of sectors
133 * and complete the whole request right now
134 */
135 if (blk_noretry_request(rq) && error)
136 nr_bytes = blk_rq_sectors(rq) << 9;
137
138 rc = ide_end_rq(drive, rq, error, nr_bytes);
139 if (rc == 0)
140 hwif->rq = NULL;
141
142 return rc;
143 }
144 EXPORT_SYMBOL(ide_complete_rq);
145
ide_kill_rq(ide_drive_t * drive,struct request * rq)146 void ide_kill_rq(ide_drive_t *drive, struct request *rq)
147 {
148 u8 drv_req = ata_misc_request(rq) && rq->rq_disk;
149 u8 media = drive->media;
150
151 drive->failed_pc = NULL;
152
153 if ((media == ide_floppy || media == ide_tape) && drv_req) {
154 scsi_req(rq)->result = 0;
155 } else {
156 if (media == ide_tape)
157 scsi_req(rq)->result = IDE_DRV_ERROR_GENERAL;
158 else if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
159 scsi_req(rq)->result = -EIO;
160 }
161
162 ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
163 }
164
ide_tf_set_specify_cmd(ide_drive_t * drive,struct ide_taskfile * tf)165 static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
166 {
167 tf->nsect = drive->sect;
168 tf->lbal = drive->sect;
169 tf->lbam = drive->cyl;
170 tf->lbah = drive->cyl >> 8;
171 tf->device = (drive->head - 1) | drive->select;
172 tf->command = ATA_CMD_INIT_DEV_PARAMS;
173 }
174
ide_tf_set_restore_cmd(ide_drive_t * drive,struct ide_taskfile * tf)175 static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
176 {
177 tf->nsect = drive->sect;
178 tf->command = ATA_CMD_RESTORE;
179 }
180
ide_tf_set_setmult_cmd(ide_drive_t * drive,struct ide_taskfile * tf)181 static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
182 {
183 tf->nsect = drive->mult_req;
184 tf->command = ATA_CMD_SET_MULTI;
185 }
186
187 /**
188 * do_special - issue some special commands
189 * @drive: drive the command is for
190 *
191 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
192 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
193 */
194
do_special(ide_drive_t * drive)195 static ide_startstop_t do_special(ide_drive_t *drive)
196 {
197 struct ide_cmd cmd;
198
199 #ifdef DEBUG
200 printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__,
201 drive->special_flags);
202 #endif
203 if (drive->media != ide_disk) {
204 drive->special_flags = 0;
205 drive->mult_req = 0;
206 return ide_stopped;
207 }
208
209 memset(&cmd, 0, sizeof(cmd));
210 cmd.protocol = ATA_PROT_NODATA;
211
212 if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) {
213 drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY;
214 ide_tf_set_specify_cmd(drive, &cmd.tf);
215 } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) {
216 drive->special_flags &= ~IDE_SFLAG_RECALIBRATE;
217 ide_tf_set_restore_cmd(drive, &cmd.tf);
218 } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) {
219 drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE;
220 ide_tf_set_setmult_cmd(drive, &cmd.tf);
221 } else
222 BUG();
223
224 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
225 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
226 cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER;
227
228 do_rw_taskfile(drive, &cmd);
229
230 return ide_started;
231 }
232
ide_map_sg(ide_drive_t * drive,struct ide_cmd * cmd)233 void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
234 {
235 ide_hwif_t *hwif = drive->hwif;
236 struct scatterlist *sg = hwif->sg_table;
237 struct request *rq = cmd->rq;
238
239 cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
240 }
241 EXPORT_SYMBOL_GPL(ide_map_sg);
242
ide_init_sg_cmd(struct ide_cmd * cmd,unsigned int nr_bytes)243 void ide_init_sg_cmd(struct ide_cmd *cmd, unsigned int nr_bytes)
244 {
245 cmd->nbytes = cmd->nleft = nr_bytes;
246 cmd->cursg_ofs = 0;
247 cmd->cursg = NULL;
248 }
249 EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
250
251 /**
252 * execute_drive_command - issue special drive command
253 * @drive: the drive to issue the command on
254 * @rq: the request structure holding the command
255 *
256 * execute_drive_cmd() issues a special drive command, usually
257 * initiated by ioctl() from the external hdparm program. The
258 * command can be a drive command, drive task or taskfile
259 * operation. Weirdly you can call it with NULL to wait for
260 * all commands to finish. Don't do this as that is due to change
261 */
262
execute_drive_cmd(ide_drive_t * drive,struct request * rq)263 static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
264 struct request *rq)
265 {
266 struct ide_cmd *cmd = ide_req(rq)->special;
267
268 if (cmd) {
269 if (cmd->protocol == ATA_PROT_PIO) {
270 ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
271 ide_map_sg(drive, cmd);
272 }
273
274 return do_rw_taskfile(drive, cmd);
275 }
276
277 /*
278 * NULL is actually a valid way of waiting for
279 * all current requests to be flushed from the queue.
280 */
281 #ifdef DEBUG
282 printk("%s: DRIVE_CMD (null)\n", drive->name);
283 #endif
284 scsi_req(rq)->result = 0;
285 ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
286
287 return ide_stopped;
288 }
289
ide_special_rq(ide_drive_t * drive,struct request * rq)290 static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
291 {
292 u8 cmd = scsi_req(rq)->cmd[0];
293
294 switch (cmd) {
295 case REQ_PARK_HEADS:
296 case REQ_UNPARK_HEADS:
297 return ide_do_park_unpark(drive, rq);
298 case REQ_DEVSET_EXEC:
299 return ide_do_devset(drive, rq);
300 case REQ_DRIVE_RESET:
301 return ide_do_reset(drive);
302 default:
303 BUG();
304 }
305 }
306
307 /**
308 * start_request - start of I/O and command issuing for IDE
309 *
310 * start_request() initiates handling of a new I/O request. It
311 * accepts commands and I/O (read/write) requests.
312 *
313 * FIXME: this function needs a rename
314 */
315
start_request(ide_drive_t * drive,struct request * rq)316 static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
317 {
318 ide_startstop_t startstop;
319
320 #ifdef DEBUG
321 printk("%s: start_request: current=0x%08lx\n",
322 drive->hwif->name, (unsigned long) rq);
323 #endif
324
325 /* bail early if we've exceeded max_failures */
326 if (drive->max_failures && (drive->failures > drive->max_failures)) {
327 rq->rq_flags |= RQF_FAILED;
328 goto kill_rq;
329 }
330
331 if (drive->prep_rq && !drive->prep_rq(drive, rq))
332 return ide_stopped;
333
334 if (ata_pm_request(rq))
335 ide_check_pm_state(drive, rq);
336
337 drive->hwif->tp_ops->dev_select(drive);
338 if (ide_wait_stat(&startstop, drive, drive->ready_stat,
339 ATA_BUSY | ATA_DRQ, WAIT_READY)) {
340 printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
341 return startstop;
342 }
343
344 if (drive->special_flags == 0) {
345 struct ide_driver *drv;
346
347 /*
348 * We reset the drive so we need to issue a SETFEATURES.
349 * Do it _after_ do_special() restored device parameters.
350 */
351 if (drive->current_speed == 0xff)
352 ide_config_drive_speed(drive, drive->desired_speed);
353
354 if (ata_taskfile_request(rq))
355 return execute_drive_cmd(drive, rq);
356 else if (ata_pm_request(rq)) {
357 struct ide_pm_state *pm = ide_req(rq)->special;
358 #ifdef DEBUG_PM
359 printk("%s: start_power_step(step: %d)\n",
360 drive->name, pm->pm_step);
361 #endif
362 startstop = ide_start_power_step(drive, rq);
363 if (startstop == ide_stopped &&
364 pm->pm_step == IDE_PM_COMPLETED)
365 ide_complete_pm_rq(drive, rq);
366 return startstop;
367 } else if (!rq->rq_disk && ata_misc_request(rq))
368 /*
369 * TODO: Once all ULDs have been modified to
370 * check for specific op codes rather than
371 * blindly accepting any special request, the
372 * check for ->rq_disk above may be replaced
373 * by a more suitable mechanism or even
374 * dropped entirely.
375 */
376 return ide_special_rq(drive, rq);
377
378 drv = *(struct ide_driver **)rq->rq_disk->private_data;
379
380 return drv->do_request(drive, rq, blk_rq_pos(rq));
381 }
382 return do_special(drive);
383 kill_rq:
384 ide_kill_rq(drive, rq);
385 return ide_stopped;
386 }
387
388 /**
389 * ide_stall_queue - pause an IDE device
390 * @drive: drive to stall
391 * @timeout: time to stall for (jiffies)
392 *
393 * ide_stall_queue() can be used by a drive to give excess bandwidth back
394 * to the port by sleeping for timeout jiffies.
395 */
396
ide_stall_queue(ide_drive_t * drive,unsigned long timeout)397 void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
398 {
399 if (timeout > WAIT_WORSTCASE)
400 timeout = WAIT_WORSTCASE;
401 drive->sleep = timeout + jiffies;
402 drive->dev_flags |= IDE_DFLAG_SLEEPING;
403 }
404 EXPORT_SYMBOL(ide_stall_queue);
405
ide_lock_port(ide_hwif_t * hwif)406 static inline int ide_lock_port(ide_hwif_t *hwif)
407 {
408 if (hwif->busy)
409 return 1;
410
411 hwif->busy = 1;
412
413 return 0;
414 }
415
ide_unlock_port(ide_hwif_t * hwif)416 static inline void ide_unlock_port(ide_hwif_t *hwif)
417 {
418 hwif->busy = 0;
419 }
420
ide_lock_host(struct ide_host * host,ide_hwif_t * hwif)421 static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif)
422 {
423 int rc = 0;
424
425 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
426 rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy);
427 if (rc == 0) {
428 if (host->get_lock)
429 host->get_lock(ide_intr, hwif);
430 }
431 }
432 return rc;
433 }
434
ide_unlock_host(struct ide_host * host)435 static inline void ide_unlock_host(struct ide_host *host)
436 {
437 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
438 if (host->release_lock)
439 host->release_lock();
440 clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy);
441 }
442 }
443
ide_requeue_and_plug(ide_drive_t * drive,struct request * rq)444 void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
445 {
446 struct request_queue *q = drive->queue;
447
448 /* Use 3ms as that was the old plug delay */
449 if (rq) {
450 blk_mq_requeue_request(rq, false);
451 blk_mq_delay_kick_requeue_list(q, 3);
452 } else
453 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
454 }
455
ide_issue_rq(ide_drive_t * drive,struct request * rq,bool local_requeue)456 blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
457 bool local_requeue)
458 {
459 ide_hwif_t *hwif = drive->hwif;
460 struct ide_host *host = hwif->host;
461 ide_startstop_t startstop;
462
463 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
464 rq->rq_flags |= RQF_DONTPREP;
465 ide_req(rq)->special = NULL;
466 }
467
468 /* HLD do_request() callback might sleep, make sure it's okay */
469 might_sleep();
470
471 if (ide_lock_host(host, hwif))
472 return BLK_STS_DEV_RESOURCE;
473
474 spin_lock_irq(&hwif->lock);
475
476 if (!ide_lock_port(hwif)) {
477 ide_hwif_t *prev_port;
478
479 WARN_ON_ONCE(hwif->rq);
480 repeat:
481 prev_port = hwif->host->cur_port;
482 if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
483 time_after(drive->sleep, jiffies)) {
484 ide_unlock_port(hwif);
485 goto plug_device;
486 }
487
488 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
489 hwif != prev_port) {
490 ide_drive_t *cur_dev =
491 prev_port ? prev_port->cur_dev : NULL;
492
493 /*
494 * set nIEN for previous port, drives in the
495 * quirk list may not like intr setups/cleanups
496 */
497 if (cur_dev &&
498 (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
499 prev_port->tp_ops->write_devctl(prev_port,
500 ATA_NIEN |
501 ATA_DEVCTL_OBS);
502
503 hwif->host->cur_port = hwif;
504 }
505 hwif->cur_dev = drive;
506 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
507
508 /*
509 * Sanity: don't accept a request that isn't a PM request
510 * if we are currently power managed. This is very important as
511 * blk_stop_queue() doesn't prevent the blk_fetch_request()
512 * above to return us whatever is in the queue. Since we call
513 * ide_do_request() ourselves, we end up taking requests while
514 * the queue is blocked...
515 *
516 * We let requests forced at head of queue with ide-preempt
517 * though. I hope that doesn't happen too much, hopefully not
518 * unless the subdriver triggers such a thing in its own PM
519 * state machine.
520 */
521 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
522 ata_pm_request(rq) == 0 &&
523 (rq->rq_flags & RQF_PREEMPT) == 0) {
524 /* there should be no pending command at this point */
525 ide_unlock_port(hwif);
526 goto plug_device;
527 }
528
529 scsi_req(rq)->resid_len = blk_rq_bytes(rq);
530 hwif->rq = rq;
531
532 spin_unlock_irq(&hwif->lock);
533 startstop = start_request(drive, rq);
534 spin_lock_irq(&hwif->lock);
535
536 if (startstop == ide_stopped) {
537 rq = hwif->rq;
538 hwif->rq = NULL;
539 if (rq)
540 goto repeat;
541 ide_unlock_port(hwif);
542 goto out;
543 }
544 } else {
545 plug_device:
546 if (local_requeue)
547 list_add(&rq->queuelist, &drive->rq_list);
548 spin_unlock_irq(&hwif->lock);
549 ide_unlock_host(host);
550 if (!local_requeue)
551 ide_requeue_and_plug(drive, rq);
552 return BLK_STS_OK;
553 }
554
555 out:
556 spin_unlock_irq(&hwif->lock);
557 if (rq == NULL)
558 ide_unlock_host(host);
559 return BLK_STS_OK;
560 }
561
562 /*
563 * Issue a new request to a device.
564 */
ide_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)565 blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
566 const struct blk_mq_queue_data *bd)
567 {
568 ide_drive_t *drive = hctx->queue->queuedata;
569 ide_hwif_t *hwif = drive->hwif;
570
571 spin_lock_irq(&hwif->lock);
572 if (drive->sense_rq_active) {
573 spin_unlock_irq(&hwif->lock);
574 return BLK_STS_DEV_RESOURCE;
575 }
576 spin_unlock_irq(&hwif->lock);
577
578 blk_mq_start_request(bd->rq);
579 return ide_issue_rq(drive, bd->rq, false);
580 }
581
drive_is_ready(ide_drive_t * drive)582 static int drive_is_ready(ide_drive_t *drive)
583 {
584 ide_hwif_t *hwif = drive->hwif;
585 u8 stat = 0;
586
587 if (drive->waiting_for_dma)
588 return hwif->dma_ops->dma_test_irq(drive);
589
590 if (hwif->io_ports.ctl_addr &&
591 (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
592 stat = hwif->tp_ops->read_altstatus(hwif);
593 else
594 /* Note: this may clear a pending IRQ!! */
595 stat = hwif->tp_ops->read_status(hwif);
596
597 if (stat & ATA_BUSY)
598 /* drive busy: definitely not interrupting */
599 return 0;
600
601 /* drive ready: *might* be interrupting */
602 return 1;
603 }
604
605 /**
606 * ide_timer_expiry - handle lack of an IDE interrupt
607 * @data: timer callback magic (hwif)
608 *
609 * An IDE command has timed out before the expected drive return
610 * occurred. At this point we attempt to clean up the current
611 * mess. If the current handler includes an expiry handler then
612 * we invoke the expiry handler, and providing it is happy the
613 * work is done. If that fails we apply generic recovery rules
614 * invoking the handler and checking the drive DMA status. We
615 * have an excessively incestuous relationship with the DMA
616 * logic that wants cleaning up.
617 */
618
ide_timer_expiry(struct timer_list * t)619 void ide_timer_expiry (struct timer_list *t)
620 {
621 ide_hwif_t *hwif = from_timer(hwif, t, timer);
622 ide_drive_t *uninitialized_var(drive);
623 ide_handler_t *handler;
624 unsigned long flags;
625 int wait = -1;
626 int plug_device = 0;
627 struct request *uninitialized_var(rq_in_flight);
628
629 spin_lock_irqsave(&hwif->lock, flags);
630
631 handler = hwif->handler;
632
633 if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) {
634 /*
635 * Either a marginal timeout occurred
636 * (got the interrupt just as timer expired),
637 * or we were "sleeping" to give other devices a chance.
638 * Either way, we don't really want to complain about anything.
639 */
640 } else {
641 ide_expiry_t *expiry = hwif->expiry;
642 ide_startstop_t startstop = ide_stopped;
643
644 drive = hwif->cur_dev;
645
646 if (expiry) {
647 wait = expiry(drive);
648 if (wait > 0) { /* continue */
649 /* reset timer */
650 hwif->timer.expires = jiffies + wait;
651 hwif->req_gen_timer = hwif->req_gen;
652 add_timer(&hwif->timer);
653 spin_unlock_irqrestore(&hwif->lock, flags);
654 return;
655 }
656 }
657 hwif->handler = NULL;
658 hwif->expiry = NULL;
659 /*
660 * We need to simulate a real interrupt when invoking
661 * the handler() function, which means we need to
662 * globally mask the specific IRQ:
663 */
664 spin_unlock(&hwif->lock);
665 /* disable_irq_nosync ?? */
666 disable_irq(hwif->irq);
667
668 if (hwif->polling) {
669 startstop = handler(drive);
670 } else if (drive_is_ready(drive)) {
671 if (drive->waiting_for_dma)
672 hwif->dma_ops->dma_lost_irq(drive);
673 if (hwif->port_ops && hwif->port_ops->clear_irq)
674 hwif->port_ops->clear_irq(drive);
675
676 printk(KERN_WARNING "%s: lost interrupt\n",
677 drive->name);
678 startstop = handler(drive);
679 } else {
680 if (drive->waiting_for_dma)
681 startstop = ide_dma_timeout_retry(drive, wait);
682 else
683 startstop = ide_error(drive, "irq timeout",
684 hwif->tp_ops->read_status(hwif));
685 }
686 /* Disable interrupts again, `handler' might have enabled it */
687 spin_lock_irq(&hwif->lock);
688 enable_irq(hwif->irq);
689 if (startstop == ide_stopped && hwif->polling == 0) {
690 rq_in_flight = hwif->rq;
691 hwif->rq = NULL;
692 ide_unlock_port(hwif);
693 plug_device = 1;
694 }
695 }
696 spin_unlock_irqrestore(&hwif->lock, flags);
697
698 if (plug_device) {
699 ide_unlock_host(hwif->host);
700 ide_requeue_and_plug(drive, rq_in_flight);
701 }
702 }
703
704 /**
705 * unexpected_intr - handle an unexpected IDE interrupt
706 * @irq: interrupt line
707 * @hwif: port being processed
708 *
709 * There's nothing really useful we can do with an unexpected interrupt,
710 * other than reading the status register (to clear it), and logging it.
711 * There should be no way that an irq can happen before we're ready for it,
712 * so we needn't worry much about losing an "important" interrupt here.
713 *
714 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
715 * the drive enters "idle", "standby", or "sleep" mode, so if the status
716 * looks "good", we just ignore the interrupt completely.
717 *
718 * This routine assumes __cli() is in effect when called.
719 *
720 * If an unexpected interrupt happens on irq15 while we are handling irq14
721 * and if the two interfaces are "serialized" (CMD640), then it looks like
722 * we could screw up by interfering with a new request being set up for
723 * irq15.
724 *
725 * In reality, this is a non-issue. The new command is not sent unless
726 * the drive is ready to accept one, in which case we know the drive is
727 * not trying to interrupt us. And ide_set_handler() is always invoked
728 * before completing the issuance of any new drive command, so we will not
729 * be accidentally invoked as a result of any valid command completion
730 * interrupt.
731 */
732
unexpected_intr(int irq,ide_hwif_t * hwif)733 static void unexpected_intr(int irq, ide_hwif_t *hwif)
734 {
735 u8 stat = hwif->tp_ops->read_status(hwif);
736
737 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
738 /* Try to not flood the console with msgs */
739 static unsigned long last_msgtime, count;
740 ++count;
741
742 if (time_after(jiffies, last_msgtime + HZ)) {
743 last_msgtime = jiffies;
744 printk(KERN_ERR "%s: unexpected interrupt, "
745 "status=0x%02x, count=%ld\n",
746 hwif->name, stat, count);
747 }
748 }
749 }
750
751 /**
752 * ide_intr - default IDE interrupt handler
753 * @irq: interrupt number
754 * @dev_id: hwif
755 * @regs: unused weirdness from the kernel irq layer
756 *
757 * This is the default IRQ handler for the IDE layer. You should
758 * not need to override it. If you do be aware it is subtle in
759 * places
760 *
761 * hwif is the interface in the group currently performing
762 * a command. hwif->cur_dev is the drive and hwif->handler is
763 * the IRQ handler to call. As we issue a command the handlers
764 * step through multiple states, reassigning the handler to the
765 * next step in the process. Unlike a smart SCSI controller IDE
766 * expects the main processor to sequence the various transfer
767 * stages. We also manage a poll timer to catch up with most
768 * timeout situations. There are still a few where the handlers
769 * don't ever decide to give up.
770 *
771 * The handler eventually returns ide_stopped to indicate the
772 * request completed. At this point we issue the next request
773 * on the port and the process begins again.
774 */
775
ide_intr(int irq,void * dev_id)776 irqreturn_t ide_intr (int irq, void *dev_id)
777 {
778 ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
779 struct ide_host *host = hwif->host;
780 ide_drive_t *uninitialized_var(drive);
781 ide_handler_t *handler;
782 unsigned long flags;
783 ide_startstop_t startstop;
784 irqreturn_t irq_ret = IRQ_NONE;
785 int plug_device = 0;
786 struct request *uninitialized_var(rq_in_flight);
787
788 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
789 if (hwif != host->cur_port)
790 goto out_early;
791 }
792
793 spin_lock_irqsave(&hwif->lock, flags);
794
795 if (hwif->port_ops && hwif->port_ops->test_irq &&
796 hwif->port_ops->test_irq(hwif) == 0)
797 goto out;
798
799 handler = hwif->handler;
800
801 if (handler == NULL || hwif->polling) {
802 /*
803 * Not expecting an interrupt from this drive.
804 * That means this could be:
805 * (1) an interrupt from another PCI device
806 * sharing the same PCI INT# as us.
807 * or (2) a drive just entered sleep or standby mode,
808 * and is interrupting to let us know.
809 * or (3) a spurious interrupt of unknown origin.
810 *
811 * For PCI, we cannot tell the difference,
812 * so in that case we just ignore it and hope it goes away.
813 */
814 if ((host->irq_flags & IRQF_SHARED) == 0) {
815 /*
816 * Probably not a shared PCI interrupt,
817 * so we can safely try to do something about it:
818 */
819 unexpected_intr(irq, hwif);
820 } else {
821 /*
822 * Whack the status register, just in case
823 * we have a leftover pending IRQ.
824 */
825 (void)hwif->tp_ops->read_status(hwif);
826 }
827 goto out;
828 }
829
830 drive = hwif->cur_dev;
831
832 if (!drive_is_ready(drive))
833 /*
834 * This happens regularly when we share a PCI IRQ with
835 * another device. Unfortunately, it can also happen
836 * with some buggy drives that trigger the IRQ before
837 * their status register is up to date. Hopefully we have
838 * enough advance overhead that the latter isn't a problem.
839 */
840 goto out;
841
842 hwif->handler = NULL;
843 hwif->expiry = NULL;
844 hwif->req_gen++;
845 del_timer(&hwif->timer);
846 spin_unlock(&hwif->lock);
847
848 if (hwif->port_ops && hwif->port_ops->clear_irq)
849 hwif->port_ops->clear_irq(drive);
850
851 if (drive->dev_flags & IDE_DFLAG_UNMASK)
852 local_irq_enable_in_hardirq();
853
854 /* service this interrupt, may set handler for next interrupt */
855 startstop = handler(drive);
856
857 spin_lock_irq(&hwif->lock);
858 /*
859 * Note that handler() may have set things up for another
860 * interrupt to occur soon, but it cannot happen until
861 * we exit from this routine, because it will be the
862 * same irq as is currently being serviced here, and Linux
863 * won't allow another of the same (on any CPU) until we return.
864 */
865 if (startstop == ide_stopped && hwif->polling == 0) {
866 BUG_ON(hwif->handler);
867 rq_in_flight = hwif->rq;
868 hwif->rq = NULL;
869 ide_unlock_port(hwif);
870 plug_device = 1;
871 }
872 irq_ret = IRQ_HANDLED;
873 out:
874 spin_unlock_irqrestore(&hwif->lock, flags);
875 out_early:
876 if (plug_device) {
877 ide_unlock_host(hwif->host);
878 ide_requeue_and_plug(drive, rq_in_flight);
879 }
880
881 return irq_ret;
882 }
883 EXPORT_SYMBOL_GPL(ide_intr);
884
ide_pad_transfer(ide_drive_t * drive,int write,int len)885 void ide_pad_transfer(ide_drive_t *drive, int write, int len)
886 {
887 ide_hwif_t *hwif = drive->hwif;
888 u8 buf[4] = { 0 };
889
890 while (len > 0) {
891 if (write)
892 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
893 else
894 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
895 len -= 4;
896 }
897 }
898 EXPORT_SYMBOL_GPL(ide_pad_transfer);
899
ide_insert_request_head(ide_drive_t * drive,struct request * rq)900 void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
901 {
902 drive->sense_rq_active = true;
903 list_add_tail(&rq->queuelist, &drive->rq_list);
904 kblockd_schedule_work(&drive->rq_work);
905 }
906 EXPORT_SYMBOL_GPL(ide_insert_request_head);
907