• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  libata-sff.c - helper library for PCI IDE BMDMA
3  *
4  *  Maintained by:  Tejun Heo <tj@kernel.org>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2006 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2006 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/gfp.h>
37 #include <linux/pci.h>
38 #include <linux/module.h>
39 #include <linux/libata.h>
40 #include <linux/highmem.h>
41 
42 #include "libata.h"
43 
44 static struct workqueue_struct *ata_sff_wq;
45 
46 const struct ata_port_operations ata_sff_port_ops = {
47 	.inherits		= &ata_base_port_ops,
48 
49 	.qc_prep		= ata_noop_qc_prep,
50 	.qc_issue		= ata_sff_qc_issue,
51 	.qc_fill_rtf		= ata_sff_qc_fill_rtf,
52 
53 	.freeze			= ata_sff_freeze,
54 	.thaw			= ata_sff_thaw,
55 	.prereset		= ata_sff_prereset,
56 	.softreset		= ata_sff_softreset,
57 	.hardreset		= sata_sff_hardreset,
58 	.postreset		= ata_sff_postreset,
59 	.error_handler		= ata_sff_error_handler,
60 
61 	.sff_dev_select		= ata_sff_dev_select,
62 	.sff_check_status	= ata_sff_check_status,
63 	.sff_tf_load		= ata_sff_tf_load,
64 	.sff_tf_read		= ata_sff_tf_read,
65 	.sff_exec_command	= ata_sff_exec_command,
66 	.sff_data_xfer		= ata_sff_data_xfer,
67 	.sff_drain_fifo		= ata_sff_drain_fifo,
68 
69 	.lost_interrupt		= ata_sff_lost_interrupt,
70 };
71 EXPORT_SYMBOL_GPL(ata_sff_port_ops);
72 
73 /**
74  *	ata_sff_check_status - Read device status reg & clear interrupt
75  *	@ap: port where the device is
76  *
77  *	Reads ATA taskfile status register for currently-selected device
78  *	and return its value. This also clears pending interrupts
79  *      from this device
80  *
81  *	LOCKING:
82  *	Inherited from caller.
83  */
ata_sff_check_status(struct ata_port * ap)84 u8 ata_sff_check_status(struct ata_port *ap)
85 {
86 	return ioread8(ap->ioaddr.status_addr);
87 }
88 EXPORT_SYMBOL_GPL(ata_sff_check_status);
89 
90 /**
91  *	ata_sff_altstatus - Read device alternate status reg
92  *	@ap: port where the device is
93  *
94  *	Reads ATA taskfile alternate status register for
95  *	currently-selected device and return its value.
96  *
97  *	Note: may NOT be used as the check_altstatus() entry in
98  *	ata_port_operations.
99  *
100  *	LOCKING:
101  *	Inherited from caller.
102  */
ata_sff_altstatus(struct ata_port * ap)103 static u8 ata_sff_altstatus(struct ata_port *ap)
104 {
105 	if (ap->ops->sff_check_altstatus)
106 		return ap->ops->sff_check_altstatus(ap);
107 
108 	return ioread8(ap->ioaddr.altstatus_addr);
109 }
110 
111 /**
112  *	ata_sff_irq_status - Check if the device is busy
113  *	@ap: port where the device is
114  *
115  *	Determine if the port is currently busy. Uses altstatus
116  *	if available in order to avoid clearing shared IRQ status
117  *	when finding an IRQ source. Non ctl capable devices don't
118  *	share interrupt lines fortunately for us.
119  *
120  *	LOCKING:
121  *	Inherited from caller.
122  */
ata_sff_irq_status(struct ata_port * ap)123 static u8 ata_sff_irq_status(struct ata_port *ap)
124 {
125 	u8 status;
126 
127 	if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
128 		status = ata_sff_altstatus(ap);
129 		/* Not us: We are busy */
130 		if (status & ATA_BUSY)
131 			return status;
132 	}
133 	/* Clear INTRQ latch */
134 	status = ap->ops->sff_check_status(ap);
135 	return status;
136 }
137 
138 /**
139  *	ata_sff_sync - Flush writes
140  *	@ap: Port to wait for.
141  *
142  *	CAUTION:
143  *	If we have an mmio device with no ctl and no altstatus
144  *	method this will fail. No such devices are known to exist.
145  *
146  *	LOCKING:
147  *	Inherited from caller.
148  */
149 
ata_sff_sync(struct ata_port * ap)150 static void ata_sff_sync(struct ata_port *ap)
151 {
152 	if (ap->ops->sff_check_altstatus)
153 		ap->ops->sff_check_altstatus(ap);
154 	else if (ap->ioaddr.altstatus_addr)
155 		ioread8(ap->ioaddr.altstatus_addr);
156 }
157 
158 /**
159  *	ata_sff_pause		-	Flush writes and wait 400nS
160  *	@ap: Port to pause for.
161  *
162  *	CAUTION:
163  *	If we have an mmio device with no ctl and no altstatus
164  *	method this will fail. No such devices are known to exist.
165  *
166  *	LOCKING:
167  *	Inherited from caller.
168  */
169 
ata_sff_pause(struct ata_port * ap)170 void ata_sff_pause(struct ata_port *ap)
171 {
172 	ata_sff_sync(ap);
173 	ndelay(400);
174 }
175 EXPORT_SYMBOL_GPL(ata_sff_pause);
176 
177 /**
178  *	ata_sff_dma_pause	-	Pause before commencing DMA
179  *	@ap: Port to pause for.
180  *
181  *	Perform I/O fencing and ensure sufficient cycle delays occur
182  *	for the HDMA1:0 transition
183  */
184 
ata_sff_dma_pause(struct ata_port * ap)185 void ata_sff_dma_pause(struct ata_port *ap)
186 {
187 	if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
188 		/* An altstatus read will cause the needed delay without
189 		   messing up the IRQ status */
190 		ata_sff_altstatus(ap);
191 		return;
192 	}
193 	/* There are no DMA controllers without ctl. BUG here to ensure
194 	   we never violate the HDMA1:0 transition timing and risk
195 	   corruption. */
196 	BUG();
197 }
198 EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
199 
200 /**
201  *	ata_sff_busy_sleep - sleep until BSY clears, or timeout
202  *	@ap: port containing status register to be polled
203  *	@tmout_pat: impatience timeout in msecs
204  *	@tmout: overall timeout in msecs
205  *
206  *	Sleep until ATA Status register bit BSY clears,
207  *	or a timeout occurs.
208  *
209  *	LOCKING:
210  *	Kernel thread context (may sleep).
211  *
212  *	RETURNS:
213  *	0 on success, -errno otherwise.
214  */
ata_sff_busy_sleep(struct ata_port * ap,unsigned long tmout_pat,unsigned long tmout)215 int ata_sff_busy_sleep(struct ata_port *ap,
216 		       unsigned long tmout_pat, unsigned long tmout)
217 {
218 	unsigned long timer_start, timeout;
219 	u8 status;
220 
221 	status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
222 	timer_start = jiffies;
223 	timeout = ata_deadline(timer_start, tmout_pat);
224 	while (status != 0xff && (status & ATA_BUSY) &&
225 	       time_before(jiffies, timeout)) {
226 		ata_msleep(ap, 50);
227 		status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
228 	}
229 
230 	if (status != 0xff && (status & ATA_BUSY))
231 		ata_port_warn(ap,
232 			      "port is slow to respond, please be patient (Status 0x%x)\n",
233 			      status);
234 
235 	timeout = ata_deadline(timer_start, tmout);
236 	while (status != 0xff && (status & ATA_BUSY) &&
237 	       time_before(jiffies, timeout)) {
238 		ata_msleep(ap, 50);
239 		status = ap->ops->sff_check_status(ap);
240 	}
241 
242 	if (status == 0xff)
243 		return -ENODEV;
244 
245 	if (status & ATA_BUSY) {
246 		ata_port_err(ap,
247 			     "port failed to respond (%lu secs, Status 0x%x)\n",
248 			     DIV_ROUND_UP(tmout, 1000), status);
249 		return -EBUSY;
250 	}
251 
252 	return 0;
253 }
254 EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
255 
ata_sff_check_ready(struct ata_link * link)256 static int ata_sff_check_ready(struct ata_link *link)
257 {
258 	u8 status = link->ap->ops->sff_check_status(link->ap);
259 
260 	return ata_check_ready(status);
261 }
262 
263 /**
264  *	ata_sff_wait_ready - sleep until BSY clears, or timeout
265  *	@link: SFF link to wait ready status for
266  *	@deadline: deadline jiffies for the operation
267  *
268  *	Sleep until ATA Status register bit BSY clears, or timeout
269  *	occurs.
270  *
271  *	LOCKING:
272  *	Kernel thread context (may sleep).
273  *
274  *	RETURNS:
275  *	0 on success, -errno otherwise.
276  */
ata_sff_wait_ready(struct ata_link * link,unsigned long deadline)277 int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
278 {
279 	return ata_wait_ready(link, deadline, ata_sff_check_ready);
280 }
281 EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
282 
283 /**
284  *	ata_sff_set_devctl - Write device control reg
285  *	@ap: port where the device is
286  *	@ctl: value to write
287  *
288  *	Writes ATA taskfile device control register.
289  *
290  *	Note: may NOT be used as the sff_set_devctl() entry in
291  *	ata_port_operations.
292  *
293  *	LOCKING:
294  *	Inherited from caller.
295  */
ata_sff_set_devctl(struct ata_port * ap,u8 ctl)296 static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
297 {
298 	if (ap->ops->sff_set_devctl)
299 		ap->ops->sff_set_devctl(ap, ctl);
300 	else
301 		iowrite8(ctl, ap->ioaddr.ctl_addr);
302 }
303 
304 /**
305  *	ata_sff_dev_select - Select device 0/1 on ATA bus
306  *	@ap: ATA channel to manipulate
307  *	@device: ATA device (numbered from zero) to select
308  *
309  *	Use the method defined in the ATA specification to
310  *	make either device 0, or device 1, active on the
311  *	ATA channel.  Works with both PIO and MMIO.
312  *
313  *	May be used as the dev_select() entry in ata_port_operations.
314  *
315  *	LOCKING:
316  *	caller.
317  */
ata_sff_dev_select(struct ata_port * ap,unsigned int device)318 void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
319 {
320 	u8 tmp;
321 
322 	if (device == 0)
323 		tmp = ATA_DEVICE_OBS;
324 	else
325 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
326 
327 	iowrite8(tmp, ap->ioaddr.device_addr);
328 	ata_sff_pause(ap);	/* needed; also flushes, for mmio */
329 }
330 EXPORT_SYMBOL_GPL(ata_sff_dev_select);
331 
332 /**
333  *	ata_dev_select - Select device 0/1 on ATA bus
334  *	@ap: ATA channel to manipulate
335  *	@device: ATA device (numbered from zero) to select
336  *	@wait: non-zero to wait for Status register BSY bit to clear
337  *	@can_sleep: non-zero if context allows sleeping
338  *
339  *	Use the method defined in the ATA specification to
340  *	make either device 0, or device 1, active on the
341  *	ATA channel.
342  *
343  *	This is a high-level version of ata_sff_dev_select(), which
344  *	additionally provides the services of inserting the proper
345  *	pauses and status polling, where needed.
346  *
347  *	LOCKING:
348  *	caller.
349  */
ata_dev_select(struct ata_port * ap,unsigned int device,unsigned int wait,unsigned int can_sleep)350 static void ata_dev_select(struct ata_port *ap, unsigned int device,
351 			   unsigned int wait, unsigned int can_sleep)
352 {
353 	if (ata_msg_probe(ap))
354 		ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n",
355 			      device, wait);
356 
357 	if (wait)
358 		ata_wait_idle(ap);
359 
360 	ap->ops->sff_dev_select(ap, device);
361 
362 	if (wait) {
363 		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
364 			ata_msleep(ap, 150);
365 		ata_wait_idle(ap);
366 	}
367 }
368 
369 /**
370  *	ata_sff_irq_on - Enable interrupts on a port.
371  *	@ap: Port on which interrupts are enabled.
372  *
373  *	Enable interrupts on a legacy IDE device using MMIO or PIO,
374  *	wait for idle, clear any pending interrupts.
375  *
376  *	Note: may NOT be used as the sff_irq_on() entry in
377  *	ata_port_operations.
378  *
379  *	LOCKING:
380  *	Inherited from caller.
381  */
ata_sff_irq_on(struct ata_port * ap)382 void ata_sff_irq_on(struct ata_port *ap)
383 {
384 	struct ata_ioports *ioaddr = &ap->ioaddr;
385 
386 	if (ap->ops->sff_irq_on) {
387 		ap->ops->sff_irq_on(ap);
388 		return;
389 	}
390 
391 	ap->ctl &= ~ATA_NIEN;
392 	ap->last_ctl = ap->ctl;
393 
394 	if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
395 		ata_sff_set_devctl(ap, ap->ctl);
396 	ata_wait_idle(ap);
397 
398 	if (ap->ops->sff_irq_clear)
399 		ap->ops->sff_irq_clear(ap);
400 }
401 EXPORT_SYMBOL_GPL(ata_sff_irq_on);
402 
403 /**
404  *	ata_sff_tf_load - send taskfile registers to host controller
405  *	@ap: Port to which output is sent
406  *	@tf: ATA taskfile register set
407  *
408  *	Outputs ATA taskfile to standard ATA host controller.
409  *
410  *	LOCKING:
411  *	Inherited from caller.
412  */
ata_sff_tf_load(struct ata_port * ap,const struct ata_taskfile * tf)413 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
414 {
415 	struct ata_ioports *ioaddr = &ap->ioaddr;
416 	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
417 
418 	if (tf->ctl != ap->last_ctl) {
419 		if (ioaddr->ctl_addr)
420 			iowrite8(tf->ctl, ioaddr->ctl_addr);
421 		ap->last_ctl = tf->ctl;
422 		ata_wait_idle(ap);
423 	}
424 
425 	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
426 		WARN_ON_ONCE(!ioaddr->ctl_addr);
427 		iowrite8(tf->hob_feature, ioaddr->feature_addr);
428 		iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
429 		iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
430 		iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
431 		iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
432 		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
433 			tf->hob_feature,
434 			tf->hob_nsect,
435 			tf->hob_lbal,
436 			tf->hob_lbam,
437 			tf->hob_lbah);
438 	}
439 
440 	if (is_addr) {
441 		iowrite8(tf->feature, ioaddr->feature_addr);
442 		iowrite8(tf->nsect, ioaddr->nsect_addr);
443 		iowrite8(tf->lbal, ioaddr->lbal_addr);
444 		iowrite8(tf->lbam, ioaddr->lbam_addr);
445 		iowrite8(tf->lbah, ioaddr->lbah_addr);
446 		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
447 			tf->feature,
448 			tf->nsect,
449 			tf->lbal,
450 			tf->lbam,
451 			tf->lbah);
452 	}
453 
454 	if (tf->flags & ATA_TFLAG_DEVICE) {
455 		iowrite8(tf->device, ioaddr->device_addr);
456 		VPRINTK("device 0x%X\n", tf->device);
457 	}
458 
459 	ata_wait_idle(ap);
460 }
461 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
462 
463 /**
464  *	ata_sff_tf_read - input device's ATA taskfile shadow registers
465  *	@ap: Port from which input is read
466  *	@tf: ATA taskfile register set for storing input
467  *
468  *	Reads ATA taskfile registers for currently-selected device
469  *	into @tf. Assumes the device has a fully SFF compliant task file
470  *	layout and behaviour. If you device does not (eg has a different
471  *	status method) then you will need to provide a replacement tf_read
472  *
473  *	LOCKING:
474  *	Inherited from caller.
475  */
ata_sff_tf_read(struct ata_port * ap,struct ata_taskfile * tf)476 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
477 {
478 	struct ata_ioports *ioaddr = &ap->ioaddr;
479 
480 	tf->command = ata_sff_check_status(ap);
481 	tf->feature = ioread8(ioaddr->error_addr);
482 	tf->nsect = ioread8(ioaddr->nsect_addr);
483 	tf->lbal = ioread8(ioaddr->lbal_addr);
484 	tf->lbam = ioread8(ioaddr->lbam_addr);
485 	tf->lbah = ioread8(ioaddr->lbah_addr);
486 	tf->device = ioread8(ioaddr->device_addr);
487 
488 	if (tf->flags & ATA_TFLAG_LBA48) {
489 		if (likely(ioaddr->ctl_addr)) {
490 			iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
491 			tf->hob_feature = ioread8(ioaddr->error_addr);
492 			tf->hob_nsect = ioread8(ioaddr->nsect_addr);
493 			tf->hob_lbal = ioread8(ioaddr->lbal_addr);
494 			tf->hob_lbam = ioread8(ioaddr->lbam_addr);
495 			tf->hob_lbah = ioread8(ioaddr->lbah_addr);
496 			iowrite8(tf->ctl, ioaddr->ctl_addr);
497 			ap->last_ctl = tf->ctl;
498 		} else
499 			WARN_ON_ONCE(1);
500 	}
501 }
502 EXPORT_SYMBOL_GPL(ata_sff_tf_read);
503 
504 /**
505  *	ata_sff_exec_command - issue ATA command to host controller
506  *	@ap: port to which command is being issued
507  *	@tf: ATA taskfile register set
508  *
509  *	Issues ATA command, with proper synchronization with interrupt
510  *	handler / other threads.
511  *
512  *	LOCKING:
513  *	spin_lock_irqsave(host lock)
514  */
ata_sff_exec_command(struct ata_port * ap,const struct ata_taskfile * tf)515 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
516 {
517 	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
518 
519 	iowrite8(tf->command, ap->ioaddr.command_addr);
520 	ata_sff_pause(ap);
521 }
522 EXPORT_SYMBOL_GPL(ata_sff_exec_command);
523 
524 /**
525  *	ata_tf_to_host - issue ATA taskfile to host controller
526  *	@ap: port to which command is being issued
527  *	@tf: ATA taskfile register set
528  *
529  *	Issues ATA taskfile register set to ATA host controller,
530  *	with proper synchronization with interrupt handler and
531  *	other threads.
532  *
533  *	LOCKING:
534  *	spin_lock_irqsave(host lock)
535  */
ata_tf_to_host(struct ata_port * ap,const struct ata_taskfile * tf)536 static inline void ata_tf_to_host(struct ata_port *ap,
537 				  const struct ata_taskfile *tf)
538 {
539 	ap->ops->sff_tf_load(ap, tf);
540 	ap->ops->sff_exec_command(ap, tf);
541 }
542 
543 /**
544  *	ata_sff_data_xfer - Transfer data by PIO
545  *	@dev: device to target
546  *	@buf: data buffer
547  *	@buflen: buffer length
548  *	@rw: read/write
549  *
550  *	Transfer data from/to the device data register by PIO.
551  *
552  *	LOCKING:
553  *	Inherited from caller.
554  *
555  *	RETURNS:
556  *	Bytes consumed.
557  */
ata_sff_data_xfer(struct ata_device * dev,unsigned char * buf,unsigned int buflen,int rw)558 unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
559 			       unsigned int buflen, int rw)
560 {
561 	struct ata_port *ap = dev->link->ap;
562 	void __iomem *data_addr = ap->ioaddr.data_addr;
563 	unsigned int words = buflen >> 1;
564 
565 	/* Transfer multiple of 2 bytes */
566 	if (rw == READ)
567 		ioread16_rep(data_addr, buf, words);
568 	else
569 		iowrite16_rep(data_addr, buf, words);
570 
571 	/* Transfer trailing byte, if any. */
572 	if (unlikely(buflen & 0x01)) {
573 		unsigned char pad[2] = { };
574 
575 		/* Point buf to the tail of buffer */
576 		buf += buflen - 1;
577 
578 		/*
579 		 * Use io*16_rep() accessors here as well to avoid pointlessly
580 		 * swapping bytes to and from on the big endian machines...
581 		 */
582 		if (rw == READ) {
583 			ioread16_rep(data_addr, pad, 1);
584 			*buf = pad[0];
585 		} else {
586 			pad[0] = *buf;
587 			iowrite16_rep(data_addr, pad, 1);
588 		}
589 		words++;
590 	}
591 
592 	return words << 1;
593 }
594 EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
595 
596 /**
597  *	ata_sff_data_xfer32 - Transfer data by PIO
598  *	@dev: device to target
599  *	@buf: data buffer
600  *	@buflen: buffer length
601  *	@rw: read/write
602  *
603  *	Transfer data from/to the device data register by PIO using 32bit
604  *	I/O operations.
605  *
606  *	LOCKING:
607  *	Inherited from caller.
608  *
609  *	RETURNS:
610  *	Bytes consumed.
611  */
612 
ata_sff_data_xfer32(struct ata_device * dev,unsigned char * buf,unsigned int buflen,int rw)613 unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
614 			       unsigned int buflen, int rw)
615 {
616 	struct ata_port *ap = dev->link->ap;
617 	void __iomem *data_addr = ap->ioaddr.data_addr;
618 	unsigned int words = buflen >> 2;
619 	int slop = buflen & 3;
620 
621 	if (!(ap->pflags & ATA_PFLAG_PIO32))
622 		return ata_sff_data_xfer(dev, buf, buflen, rw);
623 
624 	/* Transfer multiple of 4 bytes */
625 	if (rw == READ)
626 		ioread32_rep(data_addr, buf, words);
627 	else
628 		iowrite32_rep(data_addr, buf, words);
629 
630 	/* Transfer trailing bytes, if any */
631 	if (unlikely(slop)) {
632 		unsigned char pad[4] = { };
633 
634 		/* Point buf to the tail of buffer */
635 		buf += buflen - slop;
636 
637 		/*
638 		 * Use io*_rep() accessors here as well to avoid pointlessly
639 		 * swapping bytes to and from on the big endian machines...
640 		 */
641 		if (rw == READ) {
642 			if (slop < 3)
643 				ioread16_rep(data_addr, pad, 1);
644 			else
645 				ioread32_rep(data_addr, pad, 1);
646 			memcpy(buf, pad, slop);
647 		} else {
648 			memcpy(pad, buf, slop);
649 			if (slop < 3)
650 				iowrite16_rep(data_addr, pad, 1);
651 			else
652 				iowrite32_rep(data_addr, pad, 1);
653 		}
654 	}
655 	return (buflen + 1) & ~1;
656 }
657 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
658 
659 /**
660  *	ata_sff_data_xfer_noirq - Transfer data by PIO
661  *	@dev: device to target
662  *	@buf: data buffer
663  *	@buflen: buffer length
664  *	@rw: read/write
665  *
666  *	Transfer data from/to the device data register by PIO. Do the
667  *	transfer with interrupts disabled.
668  *
669  *	LOCKING:
670  *	Inherited from caller.
671  *
672  *	RETURNS:
673  *	Bytes consumed.
674  */
ata_sff_data_xfer_noirq(struct ata_device * dev,unsigned char * buf,unsigned int buflen,int rw)675 unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
676 				     unsigned int buflen, int rw)
677 {
678 	unsigned long flags;
679 	unsigned int consumed;
680 
681 	local_irq_save(flags);
682 	consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
683 	local_irq_restore(flags);
684 
685 	return consumed;
686 }
687 EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
688 
689 /**
690  *	ata_pio_sector - Transfer a sector of data.
691  *	@qc: Command on going
692  *
693  *	Transfer qc->sect_size bytes of data from/to the ATA device.
694  *
695  *	LOCKING:
696  *	Inherited from caller.
697  */
ata_pio_sector(struct ata_queued_cmd * qc)698 static void ata_pio_sector(struct ata_queued_cmd *qc)
699 {
700 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
701 	struct ata_port *ap = qc->ap;
702 	struct page *page;
703 	unsigned int offset;
704 	unsigned char *buf;
705 
706 	if (qc->curbytes == qc->nbytes - qc->sect_size)
707 		ap->hsm_task_state = HSM_ST_LAST;
708 
709 	page = sg_page(qc->cursg);
710 	offset = qc->cursg->offset + qc->cursg_ofs;
711 
712 	/* get the current page and offset */
713 	page = nth_page(page, (offset >> PAGE_SHIFT));
714 	offset %= PAGE_SIZE;
715 
716 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
717 
718 	if (PageHighMem(page)) {
719 		unsigned long flags;
720 
721 		/* FIXME: use a bounce buffer */
722 		local_irq_save(flags);
723 		buf = kmap_atomic(page);
724 
725 		/* do the actual data transfer */
726 		ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
727 				       do_write);
728 
729 		kunmap_atomic(buf);
730 		local_irq_restore(flags);
731 	} else {
732 		buf = page_address(page);
733 		ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
734 				       do_write);
735 	}
736 
737 	if (!do_write && !PageSlab(page))
738 		flush_dcache_page(page);
739 
740 	qc->curbytes += qc->sect_size;
741 	qc->cursg_ofs += qc->sect_size;
742 
743 	if (qc->cursg_ofs == qc->cursg->length) {
744 		qc->cursg = sg_next(qc->cursg);
745 		qc->cursg_ofs = 0;
746 	}
747 }
748 
749 /**
750  *	ata_pio_sectors - Transfer one or many sectors.
751  *	@qc: Command on going
752  *
753  *	Transfer one or many sectors of data from/to the
754  *	ATA device for the DRQ request.
755  *
756  *	LOCKING:
757  *	Inherited from caller.
758  */
ata_pio_sectors(struct ata_queued_cmd * qc)759 static void ata_pio_sectors(struct ata_queued_cmd *qc)
760 {
761 	if (is_multi_taskfile(&qc->tf)) {
762 		/* READ/WRITE MULTIPLE */
763 		unsigned int nsect;
764 
765 		WARN_ON_ONCE(qc->dev->multi_count == 0);
766 
767 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
768 			    qc->dev->multi_count);
769 		while (nsect--)
770 			ata_pio_sector(qc);
771 	} else
772 		ata_pio_sector(qc);
773 
774 	ata_sff_sync(qc->ap); /* flush */
775 }
776 
777 /**
778  *	atapi_send_cdb - Write CDB bytes to hardware
779  *	@ap: Port to which ATAPI device is attached.
780  *	@qc: Taskfile currently active
781  *
782  *	When device has indicated its readiness to accept
783  *	a CDB, this function is called.  Send the CDB.
784  *
785  *	LOCKING:
786  *	caller.
787  */
atapi_send_cdb(struct ata_port * ap,struct ata_queued_cmd * qc)788 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
789 {
790 	/* send SCSI cdb */
791 	DPRINTK("send cdb\n");
792 	WARN_ON_ONCE(qc->dev->cdb_len < 12);
793 
794 	ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
795 	ata_sff_sync(ap);
796 	/* FIXME: If the CDB is for DMA do we need to do the transition delay
797 	   or is bmdma_start guaranteed to do it ? */
798 	switch (qc->tf.protocol) {
799 	case ATAPI_PROT_PIO:
800 		ap->hsm_task_state = HSM_ST;
801 		break;
802 	case ATAPI_PROT_NODATA:
803 		ap->hsm_task_state = HSM_ST_LAST;
804 		break;
805 #ifdef CONFIG_ATA_BMDMA
806 	case ATAPI_PROT_DMA:
807 		ap->hsm_task_state = HSM_ST_LAST;
808 		/* initiate bmdma */
809 		ap->ops->bmdma_start(qc);
810 		break;
811 #endif /* CONFIG_ATA_BMDMA */
812 	default:
813 		BUG();
814 	}
815 }
816 
817 /**
818  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
819  *	@qc: Command on going
820  *	@bytes: number of bytes
821  *
822  *	Transfer Transfer data from/to the ATAPI device.
823  *
824  *	LOCKING:
825  *	Inherited from caller.
826  *
827  */
__atapi_pio_bytes(struct ata_queued_cmd * qc,unsigned int bytes)828 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
829 {
830 	int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
831 	struct ata_port *ap = qc->ap;
832 	struct ata_device *dev = qc->dev;
833 	struct ata_eh_info *ehi = &dev->link->eh_info;
834 	struct scatterlist *sg;
835 	struct page *page;
836 	unsigned char *buf;
837 	unsigned int offset, count, consumed;
838 
839 next_sg:
840 	sg = qc->cursg;
841 	if (unlikely(!sg)) {
842 		ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
843 				  "buf=%u cur=%u bytes=%u",
844 				  qc->nbytes, qc->curbytes, bytes);
845 		return -1;
846 	}
847 
848 	page = sg_page(sg);
849 	offset = sg->offset + qc->cursg_ofs;
850 
851 	/* get the current page and offset */
852 	page = nth_page(page, (offset >> PAGE_SHIFT));
853 	offset %= PAGE_SIZE;
854 
855 	/* don't overrun current sg */
856 	count = min(sg->length - qc->cursg_ofs, bytes);
857 
858 	/* don't cross page boundaries */
859 	count = min(count, (unsigned int)PAGE_SIZE - offset);
860 
861 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
862 
863 	if (PageHighMem(page)) {
864 		unsigned long flags;
865 
866 		/* FIXME: use bounce buffer */
867 		local_irq_save(flags);
868 		buf = kmap_atomic(page);
869 
870 		/* do the actual data transfer */
871 		consumed = ap->ops->sff_data_xfer(dev,  buf + offset,
872 								count, rw);
873 
874 		kunmap_atomic(buf);
875 		local_irq_restore(flags);
876 	} else {
877 		buf = page_address(page);
878 		consumed = ap->ops->sff_data_xfer(dev,  buf + offset,
879 								count, rw);
880 	}
881 
882 	bytes -= min(bytes, consumed);
883 	qc->curbytes += count;
884 	qc->cursg_ofs += count;
885 
886 	if (qc->cursg_ofs == sg->length) {
887 		qc->cursg = sg_next(qc->cursg);
888 		qc->cursg_ofs = 0;
889 	}
890 
891 	/*
892 	 * There used to be a  WARN_ON_ONCE(qc->cursg && count != consumed);
893 	 * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
894 	 * check correctly as it doesn't know if it is the last request being
895 	 * made. Somebody should implement a proper sanity check.
896 	 */
897 	if (bytes)
898 		goto next_sg;
899 	return 0;
900 }
901 
902 /**
903  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
904  *	@qc: Command on going
905  *
906  *	Transfer Transfer data from/to the ATAPI device.
907  *
908  *	LOCKING:
909  *	Inherited from caller.
910  */
atapi_pio_bytes(struct ata_queued_cmd * qc)911 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
912 {
913 	struct ata_port *ap = qc->ap;
914 	struct ata_device *dev = qc->dev;
915 	struct ata_eh_info *ehi = &dev->link->eh_info;
916 	unsigned int ireason, bc_lo, bc_hi, bytes;
917 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
918 
919 	/* Abuse qc->result_tf for temp storage of intermediate TF
920 	 * here to save some kernel stack usage.
921 	 * For normal completion, qc->result_tf is not relevant. For
922 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
923 	 * So, the correctness of qc->result_tf is not affected.
924 	 */
925 	ap->ops->sff_tf_read(ap, &qc->result_tf);
926 	ireason = qc->result_tf.nsect;
927 	bc_lo = qc->result_tf.lbam;
928 	bc_hi = qc->result_tf.lbah;
929 	bytes = (bc_hi << 8) | bc_lo;
930 
931 	/* shall be cleared to zero, indicating xfer of data */
932 	if (unlikely(ireason & ATAPI_COD))
933 		goto atapi_check;
934 
935 	/* make sure transfer direction matches expected */
936 	i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
937 	if (unlikely(do_write != i_write))
938 		goto atapi_check;
939 
940 	if (unlikely(!bytes))
941 		goto atapi_check;
942 
943 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
944 
945 	if (unlikely(__atapi_pio_bytes(qc, bytes)))
946 		goto err_out;
947 	ata_sff_sync(ap); /* flush */
948 
949 	return;
950 
951  atapi_check:
952 	ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
953 			  ireason, bytes);
954  err_out:
955 	qc->err_mask |= AC_ERR_HSM;
956 	ap->hsm_task_state = HSM_ST_ERR;
957 }
958 
959 /**
960  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
961  *	@ap: the target ata_port
962  *	@qc: qc on going
963  *
964  *	RETURNS:
965  *	1 if ok in workqueue, 0 otherwise.
966  */
ata_hsm_ok_in_wq(struct ata_port * ap,struct ata_queued_cmd * qc)967 static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
968 						struct ata_queued_cmd *qc)
969 {
970 	if (qc->tf.flags & ATA_TFLAG_POLLING)
971 		return 1;
972 
973 	if (ap->hsm_task_state == HSM_ST_FIRST) {
974 		if (qc->tf.protocol == ATA_PROT_PIO &&
975 		   (qc->tf.flags & ATA_TFLAG_WRITE))
976 		    return 1;
977 
978 		if (ata_is_atapi(qc->tf.protocol) &&
979 		   !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
980 			return 1;
981 	}
982 
983 	return 0;
984 }
985 
986 /**
987  *	ata_hsm_qc_complete - finish a qc running on standard HSM
988  *	@qc: Command to complete
989  *	@in_wq: 1 if called from workqueue, 0 otherwise
990  *
991  *	Finish @qc which is running on standard HSM.
992  *
993  *	LOCKING:
994  *	If @in_wq is zero, spin_lock_irqsave(host lock).
995  *	Otherwise, none on entry and grabs host lock.
996  */
ata_hsm_qc_complete(struct ata_queued_cmd * qc,int in_wq)997 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
998 {
999 	struct ata_port *ap = qc->ap;
1000 
1001 	if (ap->ops->error_handler) {
1002 		if (in_wq) {
1003 			/* EH might have kicked in while host lock is
1004 			 * released.
1005 			 */
1006 			qc = ata_qc_from_tag(ap, qc->tag);
1007 			if (qc) {
1008 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
1009 					ata_sff_irq_on(ap);
1010 					ata_qc_complete(qc);
1011 				} else
1012 					ata_port_freeze(ap);
1013 			}
1014 		} else {
1015 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
1016 				ata_qc_complete(qc);
1017 			else
1018 				ata_port_freeze(ap);
1019 		}
1020 	} else {
1021 		if (in_wq) {
1022 			ata_sff_irq_on(ap);
1023 			ata_qc_complete(qc);
1024 		} else
1025 			ata_qc_complete(qc);
1026 	}
1027 }
1028 
1029 /**
1030  *	ata_sff_hsm_move - move the HSM to the next state.
1031  *	@ap: the target ata_port
1032  *	@qc: qc on going
1033  *	@status: current device status
1034  *	@in_wq: 1 if called from workqueue, 0 otherwise
1035  *
1036  *	RETURNS:
1037  *	1 when poll next status needed, 0 otherwise.
1038  */
ata_sff_hsm_move(struct ata_port * ap,struct ata_queued_cmd * qc,u8 status,int in_wq)1039 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1040 		     u8 status, int in_wq)
1041 {
1042 	struct ata_link *link = qc->dev->link;
1043 	struct ata_eh_info *ehi = &link->eh_info;
1044 	int poll_next;
1045 
1046 	lockdep_assert_held(ap->lock);
1047 
1048 	WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1049 
1050 	/* Make sure ata_sff_qc_issue() does not throw things
1051 	 * like DMA polling into the workqueue. Notice that
1052 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
1053 	 */
1054 	WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
1055 
1056 fsm_start:
1057 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
1058 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
1059 
1060 	switch (ap->hsm_task_state) {
1061 	case HSM_ST_FIRST:
1062 		/* Send first data block or PACKET CDB */
1063 
1064 		/* If polling, we will stay in the work queue after
1065 		 * sending the data. Otherwise, interrupt handler
1066 		 * takes over after sending the data.
1067 		 */
1068 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1069 
1070 		/* check device status */
1071 		if (unlikely((status & ATA_DRQ) == 0)) {
1072 			/* handle BSY=0, DRQ=0 as error */
1073 			if (likely(status & (ATA_ERR | ATA_DF)))
1074 				/* device stops HSM for abort/error */
1075 				qc->err_mask |= AC_ERR_DEV;
1076 			else {
1077 				/* HSM violation. Let EH handle this */
1078 				ata_ehi_push_desc(ehi,
1079 					"ST_FIRST: !(DRQ|ERR|DF)");
1080 				qc->err_mask |= AC_ERR_HSM;
1081 			}
1082 
1083 			ap->hsm_task_state = HSM_ST_ERR;
1084 			goto fsm_start;
1085 		}
1086 
1087 		/* Device should not ask for data transfer (DRQ=1)
1088 		 * when it finds something wrong.
1089 		 * We ignore DRQ here and stop the HSM by
1090 		 * changing hsm_task_state to HSM_ST_ERR and
1091 		 * let the EH abort the command or reset the device.
1092 		 */
1093 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
1094 			/* Some ATAPI tape drives forget to clear the ERR bit
1095 			 * when doing the next command (mostly request sense).
1096 			 * We ignore ERR here to workaround and proceed sending
1097 			 * the CDB.
1098 			 */
1099 			if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1100 				ata_ehi_push_desc(ehi, "ST_FIRST: "
1101 					"DRQ=1 with device error, "
1102 					"dev_stat 0x%X", status);
1103 				qc->err_mask |= AC_ERR_HSM;
1104 				ap->hsm_task_state = HSM_ST_ERR;
1105 				goto fsm_start;
1106 			}
1107 		}
1108 
1109 		if (qc->tf.protocol == ATA_PROT_PIO) {
1110 			/* PIO data out protocol.
1111 			 * send first data block.
1112 			 */
1113 
1114 			/* ata_pio_sectors() might change the state
1115 			 * to HSM_ST_LAST. so, the state is changed here
1116 			 * before ata_pio_sectors().
1117 			 */
1118 			ap->hsm_task_state = HSM_ST;
1119 			ata_pio_sectors(qc);
1120 		} else
1121 			/* send CDB */
1122 			atapi_send_cdb(ap, qc);
1123 
1124 		/* if polling, ata_sff_pio_task() handles the rest.
1125 		 * otherwise, interrupt handler takes over from here.
1126 		 */
1127 		break;
1128 
1129 	case HSM_ST:
1130 		/* complete command or read/write the data register */
1131 		if (qc->tf.protocol == ATAPI_PROT_PIO) {
1132 			/* ATAPI PIO protocol */
1133 			if ((status & ATA_DRQ) == 0) {
1134 				/* No more data to transfer or device error.
1135 				 * Device error will be tagged in HSM_ST_LAST.
1136 				 */
1137 				ap->hsm_task_state = HSM_ST_LAST;
1138 				goto fsm_start;
1139 			}
1140 
1141 			/* Device should not ask for data transfer (DRQ=1)
1142 			 * when it finds something wrong.
1143 			 * We ignore DRQ here and stop the HSM by
1144 			 * changing hsm_task_state to HSM_ST_ERR and
1145 			 * let the EH abort the command or reset the device.
1146 			 */
1147 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
1148 				ata_ehi_push_desc(ehi, "ST-ATAPI: "
1149 					"DRQ=1 with device error, "
1150 					"dev_stat 0x%X", status);
1151 				qc->err_mask |= AC_ERR_HSM;
1152 				ap->hsm_task_state = HSM_ST_ERR;
1153 				goto fsm_start;
1154 			}
1155 
1156 			atapi_pio_bytes(qc);
1157 
1158 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1159 				/* bad ireason reported by device */
1160 				goto fsm_start;
1161 
1162 		} else {
1163 			/* ATA PIO protocol */
1164 			if (unlikely((status & ATA_DRQ) == 0)) {
1165 				/* handle BSY=0, DRQ=0 as error */
1166 				if (likely(status & (ATA_ERR | ATA_DF))) {
1167 					/* device stops HSM for abort/error */
1168 					qc->err_mask |= AC_ERR_DEV;
1169 
1170 					/* If diagnostic failed and this is
1171 					 * IDENTIFY, it's likely a phantom
1172 					 * device.  Mark hint.
1173 					 */
1174 					if (qc->dev->horkage &
1175 					    ATA_HORKAGE_DIAGNOSTIC)
1176 						qc->err_mask |=
1177 							AC_ERR_NODEV_HINT;
1178 				} else {
1179 					/* HSM violation. Let EH handle this.
1180 					 * Phantom devices also trigger this
1181 					 * condition.  Mark hint.
1182 					 */
1183 					ata_ehi_push_desc(ehi, "ST-ATA: "
1184 						"DRQ=0 without device error, "
1185 						"dev_stat 0x%X", status);
1186 					qc->err_mask |= AC_ERR_HSM |
1187 							AC_ERR_NODEV_HINT;
1188 				}
1189 
1190 				ap->hsm_task_state = HSM_ST_ERR;
1191 				goto fsm_start;
1192 			}
1193 
1194 			/* For PIO reads, some devices may ask for
1195 			 * data transfer (DRQ=1) alone with ERR=1.
1196 			 * We respect DRQ here and transfer one
1197 			 * block of junk data before changing the
1198 			 * hsm_task_state to HSM_ST_ERR.
1199 			 *
1200 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
1201 			 * sense since the data block has been
1202 			 * transferred to the device.
1203 			 */
1204 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
1205 				/* data might be corrputed */
1206 				qc->err_mask |= AC_ERR_DEV;
1207 
1208 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1209 					ata_pio_sectors(qc);
1210 					status = ata_wait_idle(ap);
1211 				}
1212 
1213 				if (status & (ATA_BUSY | ATA_DRQ)) {
1214 					ata_ehi_push_desc(ehi, "ST-ATA: "
1215 						"BUSY|DRQ persists on ERR|DF, "
1216 						"dev_stat 0x%X", status);
1217 					qc->err_mask |= AC_ERR_HSM;
1218 				}
1219 
1220 				/* There are oddball controllers with
1221 				 * status register stuck at 0x7f and
1222 				 * lbal/m/h at zero which makes it
1223 				 * pass all other presence detection
1224 				 * mechanisms we have.  Set NODEV_HINT
1225 				 * for it.  Kernel bz#7241.
1226 				 */
1227 				if (status == 0x7f)
1228 					qc->err_mask |= AC_ERR_NODEV_HINT;
1229 
1230 				/* ata_pio_sectors() might change the
1231 				 * state to HSM_ST_LAST. so, the state
1232 				 * is changed after ata_pio_sectors().
1233 				 */
1234 				ap->hsm_task_state = HSM_ST_ERR;
1235 				goto fsm_start;
1236 			}
1237 
1238 			ata_pio_sectors(qc);
1239 
1240 			if (ap->hsm_task_state == HSM_ST_LAST &&
1241 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1242 				/* all data read */
1243 				status = ata_wait_idle(ap);
1244 				goto fsm_start;
1245 			}
1246 		}
1247 
1248 		poll_next = 1;
1249 		break;
1250 
1251 	case HSM_ST_LAST:
1252 		if (unlikely(!ata_ok(status))) {
1253 			qc->err_mask |= __ac_err_mask(status);
1254 			ap->hsm_task_state = HSM_ST_ERR;
1255 			goto fsm_start;
1256 		}
1257 
1258 		/* no more data to transfer */
1259 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1260 			ap->print_id, qc->dev->devno, status);
1261 
1262 		WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1263 
1264 		ap->hsm_task_state = HSM_ST_IDLE;
1265 
1266 		/* complete taskfile transaction */
1267 		ata_hsm_qc_complete(qc, in_wq);
1268 
1269 		poll_next = 0;
1270 		break;
1271 
1272 	case HSM_ST_ERR:
1273 		ap->hsm_task_state = HSM_ST_IDLE;
1274 
1275 		/* complete taskfile transaction */
1276 		ata_hsm_qc_complete(qc, in_wq);
1277 
1278 		poll_next = 0;
1279 		break;
1280 	default:
1281 		poll_next = 0;
1282 		WARN(true, "ata%d: SFF host state machine in invalid state %d",
1283 		     ap->print_id, ap->hsm_task_state);
1284 	}
1285 
1286 	return poll_next;
1287 }
1288 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1289 
ata_sff_queue_work(struct work_struct * work)1290 void ata_sff_queue_work(struct work_struct *work)
1291 {
1292 	queue_work(ata_sff_wq, work);
1293 }
1294 EXPORT_SYMBOL_GPL(ata_sff_queue_work);
1295 
ata_sff_queue_delayed_work(struct delayed_work * dwork,unsigned long delay)1296 void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
1297 {
1298 	queue_delayed_work(ata_sff_wq, dwork, delay);
1299 }
1300 EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
1301 
ata_sff_queue_pio_task(struct ata_link * link,unsigned long delay)1302 void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1303 {
1304 	struct ata_port *ap = link->ap;
1305 
1306 	WARN_ON((ap->sff_pio_task_link != NULL) &&
1307 		(ap->sff_pio_task_link != link));
1308 	ap->sff_pio_task_link = link;
1309 
1310 	/* may fail if ata_sff_flush_pio_task() in progress */
1311 	ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
1312 }
1313 EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1314 
ata_sff_flush_pio_task(struct ata_port * ap)1315 void ata_sff_flush_pio_task(struct ata_port *ap)
1316 {
1317 	DPRINTK("ENTER\n");
1318 
1319 	cancel_delayed_work_sync(&ap->sff_pio_task);
1320 
1321 	/*
1322 	 * We wanna reset the HSM state to IDLE.  If we do so without
1323 	 * grabbing the port lock, critical sections protected by it which
1324 	 * expect the HSM state to stay stable may get surprised.  For
1325 	 * example, we may set IDLE in between the time
1326 	 * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
1327 	 * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
1328 	 */
1329 	spin_lock_irq(ap->lock);
1330 	ap->hsm_task_state = HSM_ST_IDLE;
1331 	spin_unlock_irq(ap->lock);
1332 
1333 	ap->sff_pio_task_link = NULL;
1334 
1335 	if (ata_msg_ctl(ap))
1336 		ata_port_dbg(ap, "%s: EXIT\n", __func__);
1337 }
1338 
ata_sff_pio_task(struct work_struct * work)1339 static void ata_sff_pio_task(struct work_struct *work)
1340 {
1341 	struct ata_port *ap =
1342 		container_of(work, struct ata_port, sff_pio_task.work);
1343 	struct ata_link *link = ap->sff_pio_task_link;
1344 	struct ata_queued_cmd *qc;
1345 	u8 status;
1346 	int poll_next;
1347 
1348 	spin_lock_irq(ap->lock);
1349 
1350 	BUG_ON(ap->sff_pio_task_link == NULL);
1351 	/* qc can be NULL if timeout occurred */
1352 	qc = ata_qc_from_tag(ap, link->active_tag);
1353 	if (!qc) {
1354 		ap->sff_pio_task_link = NULL;
1355 		goto out_unlock;
1356 	}
1357 
1358 fsm_start:
1359 	WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1360 
1361 	/*
1362 	 * This is purely heuristic.  This is a fast path.
1363 	 * Sometimes when we enter, BSY will be cleared in
1364 	 * a chk-status or two.  If not, the drive is probably seeking
1365 	 * or something.  Snooze for a couple msecs, then
1366 	 * chk-status again.  If still busy, queue delayed work.
1367 	 */
1368 	status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1369 	if (status & ATA_BUSY) {
1370 		spin_unlock_irq(ap->lock);
1371 		ata_msleep(ap, 2);
1372 		spin_lock_irq(ap->lock);
1373 
1374 		status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1375 		if (status & ATA_BUSY) {
1376 			ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1377 			goto out_unlock;
1378 		}
1379 	}
1380 
1381 	/*
1382 	 * hsm_move() may trigger another command to be processed.
1383 	 * clean the link beforehand.
1384 	 */
1385 	ap->sff_pio_task_link = NULL;
1386 	/* move the HSM */
1387 	poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1388 
1389 	/* another command or interrupt handler
1390 	 * may be running at this point.
1391 	 */
1392 	if (poll_next)
1393 		goto fsm_start;
1394 out_unlock:
1395 	spin_unlock_irq(ap->lock);
1396 }
1397 
1398 /**
1399  *	ata_sff_qc_issue - issue taskfile to a SFF controller
1400  *	@qc: command to issue to device
1401  *
1402  *	This function issues a PIO or NODATA command to a SFF
1403  *	controller.
1404  *
1405  *	LOCKING:
1406  *	spin_lock_irqsave(host lock)
1407  *
1408  *	RETURNS:
1409  *	Zero on success, AC_ERR_* mask on failure
1410  */
ata_sff_qc_issue(struct ata_queued_cmd * qc)1411 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1412 {
1413 	struct ata_port *ap = qc->ap;
1414 	struct ata_link *link = qc->dev->link;
1415 
1416 	/* Use polling pio if the LLD doesn't handle
1417 	 * interrupt driven pio and atapi CDB interrupt.
1418 	 */
1419 	if (ap->flags & ATA_FLAG_PIO_POLLING)
1420 		qc->tf.flags |= ATA_TFLAG_POLLING;
1421 
1422 	/* select the device */
1423 	ata_dev_select(ap, qc->dev->devno, 1, 0);
1424 
1425 	/* start the command */
1426 	switch (qc->tf.protocol) {
1427 	case ATA_PROT_NODATA:
1428 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1429 			ata_qc_set_polling(qc);
1430 
1431 		ata_tf_to_host(ap, &qc->tf);
1432 		ap->hsm_task_state = HSM_ST_LAST;
1433 
1434 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1435 			ata_sff_queue_pio_task(link, 0);
1436 
1437 		break;
1438 
1439 	case ATA_PROT_PIO:
1440 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1441 			ata_qc_set_polling(qc);
1442 
1443 		ata_tf_to_host(ap, &qc->tf);
1444 
1445 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
1446 			/* PIO data out protocol */
1447 			ap->hsm_task_state = HSM_ST_FIRST;
1448 			ata_sff_queue_pio_task(link, 0);
1449 
1450 			/* always send first data block using the
1451 			 * ata_sff_pio_task() codepath.
1452 			 */
1453 		} else {
1454 			/* PIO data in protocol */
1455 			ap->hsm_task_state = HSM_ST;
1456 
1457 			if (qc->tf.flags & ATA_TFLAG_POLLING)
1458 				ata_sff_queue_pio_task(link, 0);
1459 
1460 			/* if polling, ata_sff_pio_task() handles the
1461 			 * rest.  otherwise, interrupt handler takes
1462 			 * over from here.
1463 			 */
1464 		}
1465 
1466 		break;
1467 
1468 	case ATAPI_PROT_PIO:
1469 	case ATAPI_PROT_NODATA:
1470 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1471 			ata_qc_set_polling(qc);
1472 
1473 		ata_tf_to_host(ap, &qc->tf);
1474 
1475 		ap->hsm_task_state = HSM_ST_FIRST;
1476 
1477 		/* send cdb by polling if no cdb interrupt */
1478 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1479 		    (qc->tf.flags & ATA_TFLAG_POLLING))
1480 			ata_sff_queue_pio_task(link, 0);
1481 		break;
1482 
1483 	default:
1484 		return AC_ERR_SYSTEM;
1485 	}
1486 
1487 	return 0;
1488 }
1489 EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1490 
1491 /**
1492  *	ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
1493  *	@qc: qc to fill result TF for
1494  *
1495  *	@qc is finished and result TF needs to be filled.  Fill it
1496  *	using ->sff_tf_read.
1497  *
1498  *	LOCKING:
1499  *	spin_lock_irqsave(host lock)
1500  *
1501  *	RETURNS:
1502  *	true indicating that result TF is successfully filled.
1503  */
ata_sff_qc_fill_rtf(struct ata_queued_cmd * qc)1504 bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1505 {
1506 	qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1507 	return true;
1508 }
1509 EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1510 
ata_sff_idle_irq(struct ata_port * ap)1511 static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1512 {
1513 	ap->stats.idle_irq++;
1514 
1515 #ifdef ATA_IRQ_TRAP
1516 	if ((ap->stats.idle_irq % 1000) == 0) {
1517 		ap->ops->sff_check_status(ap);
1518 		if (ap->ops->sff_irq_clear)
1519 			ap->ops->sff_irq_clear(ap);
1520 		ata_port_warn(ap, "irq trap\n");
1521 		return 1;
1522 	}
1523 #endif
1524 	return 0;	/* irq not handled */
1525 }
1526 
__ata_sff_port_intr(struct ata_port * ap,struct ata_queued_cmd * qc,bool hsmv_on_idle)1527 static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1528 					struct ata_queued_cmd *qc,
1529 					bool hsmv_on_idle)
1530 {
1531 	u8 status;
1532 
1533 	VPRINTK("ata%u: protocol %d task_state %d\n",
1534 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1535 
1536 	/* Check whether we are expecting interrupt in this state */
1537 	switch (ap->hsm_task_state) {
1538 	case HSM_ST_FIRST:
1539 		/* Some pre-ATAPI-4 devices assert INTRQ
1540 		 * at this state when ready to receive CDB.
1541 		 */
1542 
1543 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1544 		 * The flag was turned on only for atapi devices.  No
1545 		 * need to check ata_is_atapi(qc->tf.protocol) again.
1546 		 */
1547 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1548 			return ata_sff_idle_irq(ap);
1549 		break;
1550 	case HSM_ST_IDLE:
1551 		return ata_sff_idle_irq(ap);
1552 	default:
1553 		break;
1554 	}
1555 
1556 	/* check main status, clearing INTRQ if needed */
1557 	status = ata_sff_irq_status(ap);
1558 	if (status & ATA_BUSY) {
1559 		if (hsmv_on_idle) {
1560 			/* BMDMA engine is already stopped, we're screwed */
1561 			qc->err_mask |= AC_ERR_HSM;
1562 			ap->hsm_task_state = HSM_ST_ERR;
1563 		} else
1564 			return ata_sff_idle_irq(ap);
1565 	}
1566 
1567 	/* clear irq events */
1568 	if (ap->ops->sff_irq_clear)
1569 		ap->ops->sff_irq_clear(ap);
1570 
1571 	ata_sff_hsm_move(ap, qc, status, 0);
1572 
1573 	return 1;	/* irq handled */
1574 }
1575 
1576 /**
1577  *	ata_sff_port_intr - Handle SFF port interrupt
1578  *	@ap: Port on which interrupt arrived (possibly...)
1579  *	@qc: Taskfile currently active in engine
1580  *
1581  *	Handle port interrupt for given queued command.
1582  *
1583  *	LOCKING:
1584  *	spin_lock_irqsave(host lock)
1585  *
1586  *	RETURNS:
1587  *	One if interrupt was handled, zero if not (shared irq).
1588  */
ata_sff_port_intr(struct ata_port * ap,struct ata_queued_cmd * qc)1589 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1590 {
1591 	return __ata_sff_port_intr(ap, qc, false);
1592 }
1593 EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1594 
__ata_sff_interrupt(int irq,void * dev_instance,unsigned int (* port_intr)(struct ata_port *,struct ata_queued_cmd *))1595 static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1596 	unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1597 {
1598 	struct ata_host *host = dev_instance;
1599 	bool retried = false;
1600 	unsigned int i;
1601 	unsigned int handled, idle, polling;
1602 	unsigned long flags;
1603 
1604 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1605 	spin_lock_irqsave(&host->lock, flags);
1606 
1607 retry:
1608 	handled = idle = polling = 0;
1609 	for (i = 0; i < host->n_ports; i++) {
1610 		struct ata_port *ap = host->ports[i];
1611 		struct ata_queued_cmd *qc;
1612 
1613 		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1614 		if (qc) {
1615 			if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1616 				handled |= port_intr(ap, qc);
1617 			else
1618 				polling |= 1 << i;
1619 		} else
1620 			idle |= 1 << i;
1621 	}
1622 
1623 	/*
1624 	 * If no port was expecting IRQ but the controller is actually
1625 	 * asserting IRQ line, nobody cared will ensue.  Check IRQ
1626 	 * pending status if available and clear spurious IRQ.
1627 	 */
1628 	if (!handled && !retried) {
1629 		bool retry = false;
1630 
1631 		for (i = 0; i < host->n_ports; i++) {
1632 			struct ata_port *ap = host->ports[i];
1633 
1634 			if (polling & (1 << i))
1635 				continue;
1636 
1637 			if (!ap->ops->sff_irq_check ||
1638 			    !ap->ops->sff_irq_check(ap))
1639 				continue;
1640 
1641 			if (idle & (1 << i)) {
1642 				ap->ops->sff_check_status(ap);
1643 				if (ap->ops->sff_irq_clear)
1644 					ap->ops->sff_irq_clear(ap);
1645 			} else {
1646 				/* clear INTRQ and check if BUSY cleared */
1647 				if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
1648 					retry |= true;
1649 				/*
1650 				 * With command in flight, we can't do
1651 				 * sff_irq_clear() w/o racing with completion.
1652 				 */
1653 			}
1654 		}
1655 
1656 		if (retry) {
1657 			retried = true;
1658 			goto retry;
1659 		}
1660 	}
1661 
1662 	spin_unlock_irqrestore(&host->lock, flags);
1663 
1664 	return IRQ_RETVAL(handled);
1665 }
1666 
1667 /**
1668  *	ata_sff_interrupt - Default SFF ATA host interrupt handler
1669  *	@irq: irq line (unused)
1670  *	@dev_instance: pointer to our ata_host information structure
1671  *
1672  *	Default interrupt handler for PCI IDE devices.  Calls
1673  *	ata_sff_port_intr() for each port that is not disabled.
1674  *
1675  *	LOCKING:
1676  *	Obtains host lock during operation.
1677  *
1678  *	RETURNS:
1679  *	IRQ_NONE or IRQ_HANDLED.
1680  */
ata_sff_interrupt(int irq,void * dev_instance)1681 irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1682 {
1683 	return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1684 }
1685 EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1686 
1687 /**
1688  *	ata_sff_lost_interrupt	-	Check for an apparent lost interrupt
1689  *	@ap: port that appears to have timed out
1690  *
1691  *	Called from the libata error handlers when the core code suspects
1692  *	an interrupt has been lost. If it has complete anything we can and
1693  *	then return. Interface must support altstatus for this faster
1694  *	recovery to occur.
1695  *
1696  *	Locking:
1697  *	Caller holds host lock
1698  */
1699 
ata_sff_lost_interrupt(struct ata_port * ap)1700 void ata_sff_lost_interrupt(struct ata_port *ap)
1701 {
1702 	u8 status;
1703 	struct ata_queued_cmd *qc;
1704 
1705 	/* Only one outstanding command per SFF channel */
1706 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
1707 	/* We cannot lose an interrupt on a non-existent or polled command */
1708 	if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1709 		return;
1710 	/* See if the controller thinks it is still busy - if so the command
1711 	   isn't a lost IRQ but is still in progress */
1712 	status = ata_sff_altstatus(ap);
1713 	if (status & ATA_BUSY)
1714 		return;
1715 
1716 	/* There was a command running, we are no longer busy and we have
1717 	   no interrupt. */
1718 	ata_port_warn(ap, "lost interrupt (Status 0x%x)\n",
1719 								status);
1720 	/* Run the host interrupt logic as if the interrupt had not been
1721 	   lost */
1722 	ata_sff_port_intr(ap, qc);
1723 }
1724 EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1725 
1726 /**
1727  *	ata_sff_freeze - Freeze SFF controller port
1728  *	@ap: port to freeze
1729  *
1730  *	Freeze SFF controller port.
1731  *
1732  *	LOCKING:
1733  *	Inherited from caller.
1734  */
ata_sff_freeze(struct ata_port * ap)1735 void ata_sff_freeze(struct ata_port *ap)
1736 {
1737 	ap->ctl |= ATA_NIEN;
1738 	ap->last_ctl = ap->ctl;
1739 
1740 	if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
1741 		ata_sff_set_devctl(ap, ap->ctl);
1742 
1743 	/* Under certain circumstances, some controllers raise IRQ on
1744 	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
1745 	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
1746 	 */
1747 	ap->ops->sff_check_status(ap);
1748 
1749 	if (ap->ops->sff_irq_clear)
1750 		ap->ops->sff_irq_clear(ap);
1751 }
1752 EXPORT_SYMBOL_GPL(ata_sff_freeze);
1753 
1754 /**
1755  *	ata_sff_thaw - Thaw SFF controller port
1756  *	@ap: port to thaw
1757  *
1758  *	Thaw SFF controller port.
1759  *
1760  *	LOCKING:
1761  *	Inherited from caller.
1762  */
ata_sff_thaw(struct ata_port * ap)1763 void ata_sff_thaw(struct ata_port *ap)
1764 {
1765 	/* clear & re-enable interrupts */
1766 	ap->ops->sff_check_status(ap);
1767 	if (ap->ops->sff_irq_clear)
1768 		ap->ops->sff_irq_clear(ap);
1769 	ata_sff_irq_on(ap);
1770 }
1771 EXPORT_SYMBOL_GPL(ata_sff_thaw);
1772 
1773 /**
1774  *	ata_sff_prereset - prepare SFF link for reset
1775  *	@link: SFF link to be reset
1776  *	@deadline: deadline jiffies for the operation
1777  *
1778  *	SFF link @link is about to be reset.  Initialize it.  It first
1779  *	calls ata_std_prereset() and wait for !BSY if the port is
1780  *	being softreset.
1781  *
1782  *	LOCKING:
1783  *	Kernel thread context (may sleep)
1784  *
1785  *	RETURNS:
1786  *	0 on success, -errno otherwise.
1787  */
ata_sff_prereset(struct ata_link * link,unsigned long deadline)1788 int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1789 {
1790 	struct ata_eh_context *ehc = &link->eh_context;
1791 	int rc;
1792 
1793 	rc = ata_std_prereset(link, deadline);
1794 	if (rc)
1795 		return rc;
1796 
1797 	/* if we're about to do hardreset, nothing more to do */
1798 	if (ehc->i.action & ATA_EH_HARDRESET)
1799 		return 0;
1800 
1801 	/* wait for !BSY if we don't know that no device is attached */
1802 	if (!ata_link_offline(link)) {
1803 		rc = ata_sff_wait_ready(link, deadline);
1804 		if (rc && rc != -ENODEV) {
1805 			ata_link_warn(link,
1806 				      "device not ready (errno=%d), forcing hardreset\n",
1807 				      rc);
1808 			ehc->i.action |= ATA_EH_HARDRESET;
1809 		}
1810 	}
1811 
1812 	return 0;
1813 }
1814 EXPORT_SYMBOL_GPL(ata_sff_prereset);
1815 
1816 /**
1817  *	ata_devchk - PATA device presence detection
1818  *	@ap: ATA channel to examine
1819  *	@device: Device to examine (starting at zero)
1820  *
1821  *	This technique was originally described in
1822  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
1823  *	later found its way into the ATA/ATAPI spec.
1824  *
1825  *	Write a pattern to the ATA shadow registers,
1826  *	and if a device is present, it will respond by
1827  *	correctly storing and echoing back the
1828  *	ATA shadow register contents.
1829  *
1830  *	LOCKING:
1831  *	caller.
1832  */
ata_devchk(struct ata_port * ap,unsigned int device)1833 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1834 {
1835 	struct ata_ioports *ioaddr = &ap->ioaddr;
1836 	u8 nsect, lbal;
1837 
1838 	ap->ops->sff_dev_select(ap, device);
1839 
1840 	iowrite8(0x55, ioaddr->nsect_addr);
1841 	iowrite8(0xaa, ioaddr->lbal_addr);
1842 
1843 	iowrite8(0xaa, ioaddr->nsect_addr);
1844 	iowrite8(0x55, ioaddr->lbal_addr);
1845 
1846 	iowrite8(0x55, ioaddr->nsect_addr);
1847 	iowrite8(0xaa, ioaddr->lbal_addr);
1848 
1849 	nsect = ioread8(ioaddr->nsect_addr);
1850 	lbal = ioread8(ioaddr->lbal_addr);
1851 
1852 	if ((nsect == 0x55) && (lbal == 0xaa))
1853 		return 1;	/* we found a device */
1854 
1855 	return 0;		/* nothing found */
1856 }
1857 
1858 /**
1859  *	ata_sff_dev_classify - Parse returned ATA device signature
1860  *	@dev: ATA device to classify (starting at zero)
1861  *	@present: device seems present
1862  *	@r_err: Value of error register on completion
1863  *
1864  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1865  *	an ATA/ATAPI-defined set of values is placed in the ATA
1866  *	shadow registers, indicating the results of device detection
1867  *	and diagnostics.
1868  *
1869  *	Select the ATA device, and read the values from the ATA shadow
1870  *	registers.  Then parse according to the Error register value,
1871  *	and the spec-defined values examined by ata_dev_classify().
1872  *
1873  *	LOCKING:
1874  *	caller.
1875  *
1876  *	RETURNS:
1877  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1878  */
ata_sff_dev_classify(struct ata_device * dev,int present,u8 * r_err)1879 unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1880 				  u8 *r_err)
1881 {
1882 	struct ata_port *ap = dev->link->ap;
1883 	struct ata_taskfile tf;
1884 	unsigned int class;
1885 	u8 err;
1886 
1887 	ap->ops->sff_dev_select(ap, dev->devno);
1888 
1889 	memset(&tf, 0, sizeof(tf));
1890 
1891 	ap->ops->sff_tf_read(ap, &tf);
1892 	err = tf.feature;
1893 	if (r_err)
1894 		*r_err = err;
1895 
1896 	/* see if device passed diags: continue and warn later */
1897 	if (err == 0)
1898 		/* diagnostic fail : do nothing _YET_ */
1899 		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
1900 	else if (err == 1)
1901 		/* do nothing */ ;
1902 	else if ((dev->devno == 0) && (err == 0x81))
1903 		/* do nothing */ ;
1904 	else
1905 		return ATA_DEV_NONE;
1906 
1907 	/* determine if device is ATA or ATAPI */
1908 	class = ata_dev_classify(&tf);
1909 
1910 	if (class == ATA_DEV_UNKNOWN) {
1911 		/* If the device failed diagnostic, it's likely to
1912 		 * have reported incorrect device signature too.
1913 		 * Assume ATA device if the device seems present but
1914 		 * device signature is invalid with diagnostic
1915 		 * failure.
1916 		 */
1917 		if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1918 			class = ATA_DEV_ATA;
1919 		else
1920 			class = ATA_DEV_NONE;
1921 	} else if ((class == ATA_DEV_ATA) &&
1922 		   (ap->ops->sff_check_status(ap) == 0))
1923 		class = ATA_DEV_NONE;
1924 
1925 	return class;
1926 }
1927 EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
1928 
1929 /**
1930  *	ata_sff_wait_after_reset - wait for devices to become ready after reset
1931  *	@link: SFF link which is just reset
1932  *	@devmask: mask of present devices
1933  *	@deadline: deadline jiffies for the operation
1934  *
1935  *	Wait devices attached to SFF @link to become ready after
1936  *	reset.  It contains preceding 150ms wait to avoid accessing TF
1937  *	status register too early.
1938  *
1939  *	LOCKING:
1940  *	Kernel thread context (may sleep).
1941  *
1942  *	RETURNS:
1943  *	0 on success, -ENODEV if some or all of devices in @devmask
1944  *	don't seem to exist.  -errno on other errors.
1945  */
ata_sff_wait_after_reset(struct ata_link * link,unsigned int devmask,unsigned long deadline)1946 int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1947 			     unsigned long deadline)
1948 {
1949 	struct ata_port *ap = link->ap;
1950 	struct ata_ioports *ioaddr = &ap->ioaddr;
1951 	unsigned int dev0 = devmask & (1 << 0);
1952 	unsigned int dev1 = devmask & (1 << 1);
1953 	int rc, ret = 0;
1954 
1955 	ata_msleep(ap, ATA_WAIT_AFTER_RESET);
1956 
1957 	/* always check readiness of the master device */
1958 	rc = ata_sff_wait_ready(link, deadline);
1959 	/* -ENODEV means the odd clown forgot the D7 pulldown resistor
1960 	 * and TF status is 0xff, bail out on it too.
1961 	 */
1962 	if (rc)
1963 		return rc;
1964 
1965 	/* if device 1 was found in ata_devchk, wait for register
1966 	 * access briefly, then wait for BSY to clear.
1967 	 */
1968 	if (dev1) {
1969 		int i;
1970 
1971 		ap->ops->sff_dev_select(ap, 1);
1972 
1973 		/* Wait for register access.  Some ATAPI devices fail
1974 		 * to set nsect/lbal after reset, so don't waste too
1975 		 * much time on it.  We're gonna wait for !BSY anyway.
1976 		 */
1977 		for (i = 0; i < 2; i++) {
1978 			u8 nsect, lbal;
1979 
1980 			nsect = ioread8(ioaddr->nsect_addr);
1981 			lbal = ioread8(ioaddr->lbal_addr);
1982 			if ((nsect == 1) && (lbal == 1))
1983 				break;
1984 			ata_msleep(ap, 50);	/* give drive a breather */
1985 		}
1986 
1987 		rc = ata_sff_wait_ready(link, deadline);
1988 		if (rc) {
1989 			if (rc != -ENODEV)
1990 				return rc;
1991 			ret = rc;
1992 		}
1993 	}
1994 
1995 	/* is all this really necessary? */
1996 	ap->ops->sff_dev_select(ap, 0);
1997 	if (dev1)
1998 		ap->ops->sff_dev_select(ap, 1);
1999 	if (dev0)
2000 		ap->ops->sff_dev_select(ap, 0);
2001 
2002 	return ret;
2003 }
2004 EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
2005 
ata_bus_softreset(struct ata_port * ap,unsigned int devmask,unsigned long deadline)2006 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
2007 			     unsigned long deadline)
2008 {
2009 	struct ata_ioports *ioaddr = &ap->ioaddr;
2010 
2011 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
2012 
2013 	if (ap->ioaddr.ctl_addr) {
2014 		/* software reset.  causes dev0 to be selected */
2015 		iowrite8(ap->ctl, ioaddr->ctl_addr);
2016 		udelay(20);	/* FIXME: flush */
2017 		iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2018 		udelay(20);	/* FIXME: flush */
2019 		iowrite8(ap->ctl, ioaddr->ctl_addr);
2020 		ap->last_ctl = ap->ctl;
2021 	}
2022 
2023 	/* wait the port to become ready */
2024 	return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
2025 }
2026 
2027 /**
2028  *	ata_sff_softreset - reset host port via ATA SRST
2029  *	@link: ATA link to reset
2030  *	@classes: resulting classes of attached devices
2031  *	@deadline: deadline jiffies for the operation
2032  *
2033  *	Reset host port using ATA SRST.
2034  *
2035  *	LOCKING:
2036  *	Kernel thread context (may sleep)
2037  *
2038  *	RETURNS:
2039  *	0 on success, -errno otherwise.
2040  */
ata_sff_softreset(struct ata_link * link,unsigned int * classes,unsigned long deadline)2041 int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
2042 		      unsigned long deadline)
2043 {
2044 	struct ata_port *ap = link->ap;
2045 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2046 	unsigned int devmask = 0;
2047 	int rc;
2048 	u8 err;
2049 
2050 	DPRINTK("ENTER\n");
2051 
2052 	/* determine if device 0/1 are present */
2053 	if (ata_devchk(ap, 0))
2054 		devmask |= (1 << 0);
2055 	if (slave_possible && ata_devchk(ap, 1))
2056 		devmask |= (1 << 1);
2057 
2058 	/* select device 0 again */
2059 	ap->ops->sff_dev_select(ap, 0);
2060 
2061 	/* issue bus reset */
2062 	DPRINTK("about to softreset, devmask=%x\n", devmask);
2063 	rc = ata_bus_softreset(ap, devmask, deadline);
2064 	/* if link is occupied, -ENODEV too is an error */
2065 	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
2066 		ata_link_err(link, "SRST failed (errno=%d)\n", rc);
2067 		return rc;
2068 	}
2069 
2070 	/* determine by signature whether we have ATA or ATAPI devices */
2071 	classes[0] = ata_sff_dev_classify(&link->device[0],
2072 					  devmask & (1 << 0), &err);
2073 	if (slave_possible && err != 0x81)
2074 		classes[1] = ata_sff_dev_classify(&link->device[1],
2075 						  devmask & (1 << 1), &err);
2076 
2077 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2078 	return 0;
2079 }
2080 EXPORT_SYMBOL_GPL(ata_sff_softreset);
2081 
2082 /**
2083  *	sata_sff_hardreset - reset host port via SATA phy reset
2084  *	@link: link to reset
2085  *	@class: resulting class of attached device
2086  *	@deadline: deadline jiffies for the operation
2087  *
2088  *	SATA phy-reset host port using DET bits of SControl register,
2089  *	wait for !BSY and classify the attached device.
2090  *
2091  *	LOCKING:
2092  *	Kernel thread context (may sleep)
2093  *
2094  *	RETURNS:
2095  *	0 on success, -errno otherwise.
2096  */
sata_sff_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)2097 int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
2098 		       unsigned long deadline)
2099 {
2100 	struct ata_eh_context *ehc = &link->eh_context;
2101 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
2102 	bool online;
2103 	int rc;
2104 
2105 	rc = sata_link_hardreset(link, timing, deadline, &online,
2106 				 ata_sff_check_ready);
2107 	if (online)
2108 		*class = ata_sff_dev_classify(link->device, 1, NULL);
2109 
2110 	DPRINTK("EXIT, class=%u\n", *class);
2111 	return rc;
2112 }
2113 EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2114 
2115 /**
2116  *	ata_sff_postreset - SFF postreset callback
2117  *	@link: the target SFF ata_link
2118  *	@classes: classes of attached devices
2119  *
2120  *	This function is invoked after a successful reset.  It first
2121  *	calls ata_std_postreset() and performs SFF specific postreset
2122  *	processing.
2123  *
2124  *	LOCKING:
2125  *	Kernel thread context (may sleep)
2126  */
ata_sff_postreset(struct ata_link * link,unsigned int * classes)2127 void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2128 {
2129 	struct ata_port *ap = link->ap;
2130 
2131 	ata_std_postreset(link, classes);
2132 
2133 	/* is double-select really necessary? */
2134 	if (classes[0] != ATA_DEV_NONE)
2135 		ap->ops->sff_dev_select(ap, 1);
2136 	if (classes[1] != ATA_DEV_NONE)
2137 		ap->ops->sff_dev_select(ap, 0);
2138 
2139 	/* bail out if no device is present */
2140 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2141 		DPRINTK("EXIT, no device\n");
2142 		return;
2143 	}
2144 
2145 	/* set up device control */
2146 	if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
2147 		ata_sff_set_devctl(ap, ap->ctl);
2148 		ap->last_ctl = ap->ctl;
2149 	}
2150 }
2151 EXPORT_SYMBOL_GPL(ata_sff_postreset);
2152 
2153 /**
2154  *	ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
2155  *	@qc: command
2156  *
2157  *	Drain the FIFO and device of any stuck data following a command
2158  *	failing to complete. In some cases this is necessary before a
2159  *	reset will recover the device.
2160  *
2161  */
2162 
ata_sff_drain_fifo(struct ata_queued_cmd * qc)2163 void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2164 {
2165 	int count;
2166 	struct ata_port *ap;
2167 
2168 	/* We only need to flush incoming data when a command was running */
2169 	if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2170 		return;
2171 
2172 	ap = qc->ap;
2173 	/* Drain up to 64K of data before we give up this recovery method */
2174 	for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2175 						&& count < 65536; count += 2)
2176 		ioread16(ap->ioaddr.data_addr);
2177 
2178 	/* Can become DEBUG later */
2179 	if (count)
2180 		ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
2181 
2182 }
2183 EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2184 
2185 /**
2186  *	ata_sff_error_handler - Stock error handler for SFF controller
2187  *	@ap: port to handle error for
2188  *
2189  *	Stock error handler for SFF controller.  It can handle both
2190  *	PATA and SATA controllers.  Many controllers should be able to
2191  *	use this EH as-is or with some added handling before and
2192  *	after.
2193  *
2194  *	LOCKING:
2195  *	Kernel thread context (may sleep)
2196  */
ata_sff_error_handler(struct ata_port * ap)2197 void ata_sff_error_handler(struct ata_port *ap)
2198 {
2199 	ata_reset_fn_t softreset = ap->ops->softreset;
2200 	ata_reset_fn_t hardreset = ap->ops->hardreset;
2201 	struct ata_queued_cmd *qc;
2202 	unsigned long flags;
2203 
2204 	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2205 	if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2206 		qc = NULL;
2207 
2208 	spin_lock_irqsave(ap->lock, flags);
2209 
2210 	/*
2211 	 * We *MUST* do FIFO draining before we issue a reset as
2212 	 * several devices helpfully clear their internal state and
2213 	 * will lock solid if we touch the data port post reset. Pass
2214 	 * qc in case anyone wants to do different PIO/DMA recovery or
2215 	 * has per command fixups
2216 	 */
2217 	if (ap->ops->sff_drain_fifo)
2218 		ap->ops->sff_drain_fifo(qc);
2219 
2220 	spin_unlock_irqrestore(ap->lock, flags);
2221 
2222 	/* ignore built-in hardresets if SCR access is not available */
2223 	if ((hardreset == sata_std_hardreset ||
2224 	     hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2225 		hardreset = NULL;
2226 
2227 	ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2228 		  ap->ops->postreset);
2229 }
2230 EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2231 
2232 /**
2233  *	ata_sff_std_ports - initialize ioaddr with standard port offsets.
2234  *	@ioaddr: IO address structure to be initialized
2235  *
2236  *	Utility function which initializes data_addr, error_addr,
2237  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2238  *	device_addr, status_addr, and command_addr to standard offsets
2239  *	relative to cmd_addr.
2240  *
2241  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2242  */
ata_sff_std_ports(struct ata_ioports * ioaddr)2243 void ata_sff_std_ports(struct ata_ioports *ioaddr)
2244 {
2245 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2246 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2247 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2248 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2249 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2250 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2251 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2252 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2253 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2254 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2255 }
2256 EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2257 
2258 #ifdef CONFIG_PCI
2259 
ata_resources_present(struct pci_dev * pdev,int port)2260 static int ata_resources_present(struct pci_dev *pdev, int port)
2261 {
2262 	int i;
2263 
2264 	/* Check the PCI resources for this channel are enabled */
2265 	port = port * 2;
2266 	for (i = 0; i < 2; i++) {
2267 		if (pci_resource_start(pdev, port + i) == 0 ||
2268 		    pci_resource_len(pdev, port + i) == 0)
2269 			return 0;
2270 	}
2271 	return 1;
2272 }
2273 
2274 /**
2275  *	ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2276  *	@host: target ATA host
2277  *
2278  *	Acquire native PCI ATA resources for @host and initialize the
2279  *	first two ports of @host accordingly.  Ports marked dummy are
2280  *	skipped and allocation failure makes the port dummy.
2281  *
2282  *	Note that native PCI resources are valid even for legacy hosts
2283  *	as we fix up pdev resources array early in boot, so this
2284  *	function can be used for both native and legacy SFF hosts.
2285  *
2286  *	LOCKING:
2287  *	Inherited from calling layer (may sleep).
2288  *
2289  *	RETURNS:
2290  *	0 if at least one port is initialized, -ENODEV if no port is
2291  *	available.
2292  */
ata_pci_sff_init_host(struct ata_host * host)2293 int ata_pci_sff_init_host(struct ata_host *host)
2294 {
2295 	struct device *gdev = host->dev;
2296 	struct pci_dev *pdev = to_pci_dev(gdev);
2297 	unsigned int mask = 0;
2298 	int i, rc;
2299 
2300 	/* request, iomap BARs and init port addresses accordingly */
2301 	for (i = 0; i < 2; i++) {
2302 		struct ata_port *ap = host->ports[i];
2303 		int base = i * 2;
2304 		void __iomem * const *iomap;
2305 
2306 		if (ata_port_is_dummy(ap))
2307 			continue;
2308 
2309 		/* Discard disabled ports.  Some controllers show
2310 		 * their unused channels this way.  Disabled ports are
2311 		 * made dummy.
2312 		 */
2313 		if (!ata_resources_present(pdev, i)) {
2314 			ap->ops = &ata_dummy_port_ops;
2315 			continue;
2316 		}
2317 
2318 		rc = pcim_iomap_regions(pdev, 0x3 << base,
2319 					dev_driver_string(gdev));
2320 		if (rc) {
2321 			dev_warn(gdev,
2322 				 "failed to request/iomap BARs for port %d (errno=%d)\n",
2323 				 i, rc);
2324 			if (rc == -EBUSY)
2325 				pcim_pin_device(pdev);
2326 			ap->ops = &ata_dummy_port_ops;
2327 			continue;
2328 		}
2329 		host->iomap = iomap = pcim_iomap_table(pdev);
2330 
2331 		ap->ioaddr.cmd_addr = iomap[base];
2332 		ap->ioaddr.altstatus_addr =
2333 		ap->ioaddr.ctl_addr = (void __iomem *)
2334 			((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2335 		ata_sff_std_ports(&ap->ioaddr);
2336 
2337 		ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2338 			(unsigned long long)pci_resource_start(pdev, base),
2339 			(unsigned long long)pci_resource_start(pdev, base + 1));
2340 
2341 		mask |= 1 << i;
2342 	}
2343 
2344 	if (!mask) {
2345 		dev_err(gdev, "no available native port\n");
2346 		return -ENODEV;
2347 	}
2348 
2349 	return 0;
2350 }
2351 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2352 
2353 /**
2354  *	ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
2355  *	@pdev: target PCI device
2356  *	@ppi: array of port_info, must be enough for two ports
2357  *	@r_host: out argument for the initialized ATA host
2358  *
2359  *	Helper to allocate PIO-only SFF ATA host for @pdev, acquire
2360  *	all PCI resources and initialize it accordingly in one go.
2361  *
2362  *	LOCKING:
2363  *	Inherited from calling layer (may sleep).
2364  *
2365  *	RETURNS:
2366  *	0 on success, -errno otherwise.
2367  */
ata_pci_sff_prepare_host(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct ata_host ** r_host)2368 int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2369 			     const struct ata_port_info * const *ppi,
2370 			     struct ata_host **r_host)
2371 {
2372 	struct ata_host *host;
2373 	int rc;
2374 
2375 	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2376 		return -ENOMEM;
2377 
2378 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2379 	if (!host) {
2380 		dev_err(&pdev->dev, "failed to allocate ATA host\n");
2381 		rc = -ENOMEM;
2382 		goto err_out;
2383 	}
2384 
2385 	rc = ata_pci_sff_init_host(host);
2386 	if (rc)
2387 		goto err_out;
2388 
2389 	devres_remove_group(&pdev->dev, NULL);
2390 	*r_host = host;
2391 	return 0;
2392 
2393 err_out:
2394 	devres_release_group(&pdev->dev, NULL);
2395 	return rc;
2396 }
2397 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2398 
2399 /**
2400  *	ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2401  *	@host: target SFF ATA host
2402  *	@irq_handler: irq_handler used when requesting IRQ(s)
2403  *	@sht: scsi_host_template to use when registering the host
2404  *
2405  *	This is the counterpart of ata_host_activate() for SFF ATA
2406  *	hosts.  This separate helper is necessary because SFF hosts
2407  *	use two separate interrupts in legacy mode.
2408  *
2409  *	LOCKING:
2410  *	Inherited from calling layer (may sleep).
2411  *
2412  *	RETURNS:
2413  *	0 on success, -errno otherwise.
2414  */
ata_pci_sff_activate_host(struct ata_host * host,irq_handler_t irq_handler,struct scsi_host_template * sht)2415 int ata_pci_sff_activate_host(struct ata_host *host,
2416 			      irq_handler_t irq_handler,
2417 			      struct scsi_host_template *sht)
2418 {
2419 	struct device *dev = host->dev;
2420 	struct pci_dev *pdev = to_pci_dev(dev);
2421 	const char *drv_name = dev_driver_string(host->dev);
2422 	int legacy_mode = 0, rc;
2423 
2424 	rc = ata_host_start(host);
2425 	if (rc)
2426 		return rc;
2427 
2428 	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2429 		u8 tmp8, mask;
2430 
2431 		/* TODO: What if one channel is in native mode ... */
2432 		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2433 		mask = (1 << 2) | (1 << 0);
2434 		if ((tmp8 & mask) != mask)
2435 			legacy_mode = 1;
2436 	}
2437 
2438 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
2439 		return -ENOMEM;
2440 
2441 	if (!legacy_mode && pdev->irq) {
2442 		int i;
2443 
2444 		rc = devm_request_irq(dev, pdev->irq, irq_handler,
2445 				      IRQF_SHARED, drv_name, host);
2446 		if (rc)
2447 			goto out;
2448 
2449 		for (i = 0; i < 2; i++) {
2450 			if (ata_port_is_dummy(host->ports[i]))
2451 				continue;
2452 			ata_port_desc(host->ports[i], "irq %d", pdev->irq);
2453 		}
2454 	} else if (legacy_mode) {
2455 		if (!ata_port_is_dummy(host->ports[0])) {
2456 			rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2457 					      irq_handler, IRQF_SHARED,
2458 					      drv_name, host);
2459 			if (rc)
2460 				goto out;
2461 
2462 			ata_port_desc(host->ports[0], "irq %d",
2463 				      ATA_PRIMARY_IRQ(pdev));
2464 		}
2465 
2466 		if (!ata_port_is_dummy(host->ports[1])) {
2467 			rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2468 					      irq_handler, IRQF_SHARED,
2469 					      drv_name, host);
2470 			if (rc)
2471 				goto out;
2472 
2473 			ata_port_desc(host->ports[1], "irq %d",
2474 				      ATA_SECONDARY_IRQ(pdev));
2475 		}
2476 	}
2477 
2478 	rc = ata_host_register(host, sht);
2479 out:
2480 	if (rc == 0)
2481 		devres_remove_group(dev, NULL);
2482 	else
2483 		devres_release_group(dev, NULL);
2484 
2485 	return rc;
2486 }
2487 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2488 
ata_sff_find_valid_pi(const struct ata_port_info * const * ppi)2489 static const struct ata_port_info *ata_sff_find_valid_pi(
2490 					const struct ata_port_info * const *ppi)
2491 {
2492 	int i;
2493 
2494 	/* look up the first valid port_info */
2495 	for (i = 0; i < 2 && ppi[i]; i++)
2496 		if (ppi[i]->port_ops != &ata_dummy_port_ops)
2497 			return ppi[i];
2498 
2499 	return NULL;
2500 }
2501 
ata_pci_init_one(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct scsi_host_template * sht,void * host_priv,int hflags,bool bmdma)2502 static int ata_pci_init_one(struct pci_dev *pdev,
2503 		const struct ata_port_info * const *ppi,
2504 		struct scsi_host_template *sht, void *host_priv,
2505 		int hflags, bool bmdma)
2506 {
2507 	struct device *dev = &pdev->dev;
2508 	const struct ata_port_info *pi;
2509 	struct ata_host *host = NULL;
2510 	int rc;
2511 
2512 	DPRINTK("ENTER\n");
2513 
2514 	pi = ata_sff_find_valid_pi(ppi);
2515 	if (!pi) {
2516 		dev_err(&pdev->dev, "no valid port_info specified\n");
2517 		return -EINVAL;
2518 	}
2519 
2520 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
2521 		return -ENOMEM;
2522 
2523 	rc = pcim_enable_device(pdev);
2524 	if (rc)
2525 		goto out;
2526 
2527 #ifdef CONFIG_ATA_BMDMA
2528 	if (bmdma)
2529 		/* prepare and activate BMDMA host */
2530 		rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2531 	else
2532 #endif
2533 		/* prepare and activate SFF host */
2534 		rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2535 	if (rc)
2536 		goto out;
2537 	host->private_data = host_priv;
2538 	host->flags |= hflags;
2539 
2540 #ifdef CONFIG_ATA_BMDMA
2541 	if (bmdma) {
2542 		pci_set_master(pdev);
2543 		rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2544 	} else
2545 #endif
2546 		rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2547 out:
2548 	if (rc == 0)
2549 		devres_remove_group(&pdev->dev, NULL);
2550 	else
2551 		devres_release_group(&pdev->dev, NULL);
2552 
2553 	return rc;
2554 }
2555 
2556 /**
2557  *	ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
2558  *	@pdev: Controller to be initialized
2559  *	@ppi: array of port_info, must be enough for two ports
2560  *	@sht: scsi_host_template to use when registering the host
2561  *	@host_priv: host private_data
2562  *	@hflag: host flags
2563  *
2564  *	This is a helper function which can be called from a driver's
2565  *	xxx_init_one() probe function if the hardware uses traditional
2566  *	IDE taskfile registers and is PIO only.
2567  *
2568  *	ASSUMPTION:
2569  *	Nobody makes a single channel controller that appears solely as
2570  *	the secondary legacy port on PCI.
2571  *
2572  *	LOCKING:
2573  *	Inherited from PCI layer (may sleep).
2574  *
2575  *	RETURNS:
2576  *	Zero on success, negative on errno-based value on error.
2577  */
ata_pci_sff_init_one(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct scsi_host_template * sht,void * host_priv,int hflag)2578 int ata_pci_sff_init_one(struct pci_dev *pdev,
2579 		 const struct ata_port_info * const *ppi,
2580 		 struct scsi_host_template *sht, void *host_priv, int hflag)
2581 {
2582 	return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
2583 }
2584 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2585 
2586 #endif /* CONFIG_PCI */
2587 
2588 /*
2589  *	BMDMA support
2590  */
2591 
2592 #ifdef CONFIG_ATA_BMDMA
2593 
2594 const struct ata_port_operations ata_bmdma_port_ops = {
2595 	.inherits		= &ata_sff_port_ops,
2596 
2597 	.error_handler		= ata_bmdma_error_handler,
2598 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
2599 
2600 	.qc_prep		= ata_bmdma_qc_prep,
2601 	.qc_issue		= ata_bmdma_qc_issue,
2602 
2603 	.sff_irq_clear		= ata_bmdma_irq_clear,
2604 	.bmdma_setup		= ata_bmdma_setup,
2605 	.bmdma_start		= ata_bmdma_start,
2606 	.bmdma_stop		= ata_bmdma_stop,
2607 	.bmdma_status		= ata_bmdma_status,
2608 
2609 	.port_start		= ata_bmdma_port_start,
2610 };
2611 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2612 
2613 const struct ata_port_operations ata_bmdma32_port_ops = {
2614 	.inherits		= &ata_bmdma_port_ops,
2615 
2616 	.sff_data_xfer		= ata_sff_data_xfer32,
2617 	.port_start		= ata_bmdma_port_start32,
2618 };
2619 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2620 
2621 /**
2622  *	ata_bmdma_fill_sg - Fill PCI IDE PRD table
2623  *	@qc: Metadata associated with taskfile to be transferred
2624  *
2625  *	Fill PCI IDE PRD (scatter-gather) table with segments
2626  *	associated with the current disk command.
2627  *
2628  *	LOCKING:
2629  *	spin_lock_irqsave(host lock)
2630  *
2631  */
ata_bmdma_fill_sg(struct ata_queued_cmd * qc)2632 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2633 {
2634 	struct ata_port *ap = qc->ap;
2635 	struct ata_bmdma_prd *prd = ap->bmdma_prd;
2636 	struct scatterlist *sg;
2637 	unsigned int si, pi;
2638 
2639 	pi = 0;
2640 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2641 		u32 addr, offset;
2642 		u32 sg_len, len;
2643 
2644 		/* determine if physical DMA addr spans 64K boundary.
2645 		 * Note h/w doesn't support 64-bit, so we unconditionally
2646 		 * truncate dma_addr_t to u32.
2647 		 */
2648 		addr = (u32) sg_dma_address(sg);
2649 		sg_len = sg_dma_len(sg);
2650 
2651 		while (sg_len) {
2652 			offset = addr & 0xffff;
2653 			len = sg_len;
2654 			if ((offset + sg_len) > 0x10000)
2655 				len = 0x10000 - offset;
2656 
2657 			prd[pi].addr = cpu_to_le32(addr);
2658 			prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2659 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2660 
2661 			pi++;
2662 			sg_len -= len;
2663 			addr += len;
2664 		}
2665 	}
2666 
2667 	prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2668 }
2669 
2670 /**
2671  *	ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2672  *	@qc: Metadata associated with taskfile to be transferred
2673  *
2674  *	Fill PCI IDE PRD (scatter-gather) table with segments
2675  *	associated with the current disk command. Perform the fill
2676  *	so that we avoid writing any length 64K records for
2677  *	controllers that don't follow the spec.
2678  *
2679  *	LOCKING:
2680  *	spin_lock_irqsave(host lock)
2681  *
2682  */
ata_bmdma_fill_sg_dumb(struct ata_queued_cmd * qc)2683 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2684 {
2685 	struct ata_port *ap = qc->ap;
2686 	struct ata_bmdma_prd *prd = ap->bmdma_prd;
2687 	struct scatterlist *sg;
2688 	unsigned int si, pi;
2689 
2690 	pi = 0;
2691 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2692 		u32 addr, offset;
2693 		u32 sg_len, len, blen;
2694 
2695 		/* determine if physical DMA addr spans 64K boundary.
2696 		 * Note h/w doesn't support 64-bit, so we unconditionally
2697 		 * truncate dma_addr_t to u32.
2698 		 */
2699 		addr = (u32) sg_dma_address(sg);
2700 		sg_len = sg_dma_len(sg);
2701 
2702 		while (sg_len) {
2703 			offset = addr & 0xffff;
2704 			len = sg_len;
2705 			if ((offset + sg_len) > 0x10000)
2706 				len = 0x10000 - offset;
2707 
2708 			blen = len & 0xffff;
2709 			prd[pi].addr = cpu_to_le32(addr);
2710 			if (blen == 0) {
2711 				/* Some PATA chipsets like the CS5530 can't
2712 				   cope with 0x0000 meaning 64K as the spec
2713 				   says */
2714 				prd[pi].flags_len = cpu_to_le32(0x8000);
2715 				blen = 0x8000;
2716 				prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2717 			}
2718 			prd[pi].flags_len = cpu_to_le32(blen);
2719 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2720 
2721 			pi++;
2722 			sg_len -= len;
2723 			addr += len;
2724 		}
2725 	}
2726 
2727 	prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2728 }
2729 
2730 /**
2731  *	ata_bmdma_qc_prep - Prepare taskfile for submission
2732  *	@qc: Metadata associated with taskfile to be prepared
2733  *
2734  *	Prepare ATA taskfile for submission.
2735  *
2736  *	LOCKING:
2737  *	spin_lock_irqsave(host lock)
2738  */
ata_bmdma_qc_prep(struct ata_queued_cmd * qc)2739 void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2740 {
2741 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2742 		return;
2743 
2744 	ata_bmdma_fill_sg(qc);
2745 }
2746 EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2747 
2748 /**
2749  *	ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2750  *	@qc: Metadata associated with taskfile to be prepared
2751  *
2752  *	Prepare ATA taskfile for submission.
2753  *
2754  *	LOCKING:
2755  *	spin_lock_irqsave(host lock)
2756  */
ata_bmdma_dumb_qc_prep(struct ata_queued_cmd * qc)2757 void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2758 {
2759 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2760 		return;
2761 
2762 	ata_bmdma_fill_sg_dumb(qc);
2763 }
2764 EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2765 
2766 /**
2767  *	ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
2768  *	@qc: command to issue to device
2769  *
2770  *	This function issues a PIO, NODATA or DMA command to a
2771  *	SFF/BMDMA controller.  PIO and NODATA are handled by
2772  *	ata_sff_qc_issue().
2773  *
2774  *	LOCKING:
2775  *	spin_lock_irqsave(host lock)
2776  *
2777  *	RETURNS:
2778  *	Zero on success, AC_ERR_* mask on failure
2779  */
ata_bmdma_qc_issue(struct ata_queued_cmd * qc)2780 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2781 {
2782 	struct ata_port *ap = qc->ap;
2783 	struct ata_link *link = qc->dev->link;
2784 
2785 	/* defer PIO handling to sff_qc_issue */
2786 	if (!ata_is_dma(qc->tf.protocol))
2787 		return ata_sff_qc_issue(qc);
2788 
2789 	/* select the device */
2790 	ata_dev_select(ap, qc->dev->devno, 1, 0);
2791 
2792 	/* start the command */
2793 	switch (qc->tf.protocol) {
2794 	case ATA_PROT_DMA:
2795 		WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2796 
2797 		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
2798 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
2799 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
2800 		ap->hsm_task_state = HSM_ST_LAST;
2801 		break;
2802 
2803 	case ATAPI_PROT_DMA:
2804 		WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2805 
2806 		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
2807 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
2808 		ap->hsm_task_state = HSM_ST_FIRST;
2809 
2810 		/* send cdb by polling if no cdb interrupt */
2811 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2812 			ata_sff_queue_pio_task(link, 0);
2813 		break;
2814 
2815 	default:
2816 		WARN_ON(1);
2817 		return AC_ERR_SYSTEM;
2818 	}
2819 
2820 	return 0;
2821 }
2822 EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2823 
2824 /**
2825  *	ata_bmdma_port_intr - Handle BMDMA port interrupt
2826  *	@ap: Port on which interrupt arrived (possibly...)
2827  *	@qc: Taskfile currently active in engine
2828  *
2829  *	Handle port interrupt for given queued command.
2830  *
2831  *	LOCKING:
2832  *	spin_lock_irqsave(host lock)
2833  *
2834  *	RETURNS:
2835  *	One if interrupt was handled, zero if not (shared irq).
2836  */
ata_bmdma_port_intr(struct ata_port * ap,struct ata_queued_cmd * qc)2837 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2838 {
2839 	struct ata_eh_info *ehi = &ap->link.eh_info;
2840 	u8 host_stat = 0;
2841 	bool bmdma_stopped = false;
2842 	unsigned int handled;
2843 
2844 	if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2845 		/* check status of DMA engine */
2846 		host_stat = ap->ops->bmdma_status(ap);
2847 		VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
2848 
2849 		/* if it's not our irq... */
2850 		if (!(host_stat & ATA_DMA_INTR))
2851 			return ata_sff_idle_irq(ap);
2852 
2853 		/* before we do anything else, clear DMA-Start bit */
2854 		ap->ops->bmdma_stop(qc);
2855 		bmdma_stopped = true;
2856 
2857 		if (unlikely(host_stat & ATA_DMA_ERR)) {
2858 			/* error when transferring data to/from memory */
2859 			qc->err_mask |= AC_ERR_HOST_BUS;
2860 			ap->hsm_task_state = HSM_ST_ERR;
2861 		}
2862 	}
2863 
2864 	handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2865 
2866 	if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2867 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2868 
2869 	return handled;
2870 }
2871 EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2872 
2873 /**
2874  *	ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
2875  *	@irq: irq line (unused)
2876  *	@dev_instance: pointer to our ata_host information structure
2877  *
2878  *	Default interrupt handler for PCI IDE devices.  Calls
2879  *	ata_bmdma_port_intr() for each port that is not disabled.
2880  *
2881  *	LOCKING:
2882  *	Obtains host lock during operation.
2883  *
2884  *	RETURNS:
2885  *	IRQ_NONE or IRQ_HANDLED.
2886  */
ata_bmdma_interrupt(int irq,void * dev_instance)2887 irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2888 {
2889 	return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2890 }
2891 EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2892 
2893 /**
2894  *	ata_bmdma_error_handler - Stock error handler for BMDMA controller
2895  *	@ap: port to handle error for
2896  *
2897  *	Stock error handler for BMDMA controller.  It can handle both
2898  *	PATA and SATA controllers.  Most BMDMA controllers should be
2899  *	able to use this EH as-is or with some added handling before
2900  *	and after.
2901  *
2902  *	LOCKING:
2903  *	Kernel thread context (may sleep)
2904  */
ata_bmdma_error_handler(struct ata_port * ap)2905 void ata_bmdma_error_handler(struct ata_port *ap)
2906 {
2907 	struct ata_queued_cmd *qc;
2908 	unsigned long flags;
2909 	bool thaw = false;
2910 
2911 	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2912 	if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2913 		qc = NULL;
2914 
2915 	/* reset PIO HSM and stop DMA engine */
2916 	spin_lock_irqsave(ap->lock, flags);
2917 
2918 	if (qc && ata_is_dma(qc->tf.protocol)) {
2919 		u8 host_stat;
2920 
2921 		host_stat = ap->ops->bmdma_status(ap);
2922 
2923 		/* BMDMA controllers indicate host bus error by
2924 		 * setting DMA_ERR bit and timing out.  As it wasn't
2925 		 * really a timeout event, adjust error mask and
2926 		 * cancel frozen state.
2927 		 */
2928 		if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2929 			qc->err_mask = AC_ERR_HOST_BUS;
2930 			thaw = true;
2931 		}
2932 
2933 		ap->ops->bmdma_stop(qc);
2934 
2935 		/* if we're gonna thaw, make sure IRQ is clear */
2936 		if (thaw) {
2937 			ap->ops->sff_check_status(ap);
2938 			if (ap->ops->sff_irq_clear)
2939 				ap->ops->sff_irq_clear(ap);
2940 		}
2941 	}
2942 
2943 	spin_unlock_irqrestore(ap->lock, flags);
2944 
2945 	if (thaw)
2946 		ata_eh_thaw_port(ap);
2947 
2948 	ata_sff_error_handler(ap);
2949 }
2950 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2951 
2952 /**
2953  *	ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
2954  *	@qc: internal command to clean up
2955  *
2956  *	LOCKING:
2957  *	Kernel thread context (may sleep)
2958  */
ata_bmdma_post_internal_cmd(struct ata_queued_cmd * qc)2959 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2960 {
2961 	struct ata_port *ap = qc->ap;
2962 	unsigned long flags;
2963 
2964 	if (ata_is_dma(qc->tf.protocol)) {
2965 		spin_lock_irqsave(ap->lock, flags);
2966 		ap->ops->bmdma_stop(qc);
2967 		spin_unlock_irqrestore(ap->lock, flags);
2968 	}
2969 }
2970 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2971 
2972 /**
2973  *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
2974  *	@ap: Port associated with this ATA transaction.
2975  *
2976  *	Clear interrupt and error flags in DMA status register.
2977  *
2978  *	May be used as the irq_clear() entry in ata_port_operations.
2979  *
2980  *	LOCKING:
2981  *	spin_lock_irqsave(host lock)
2982  */
ata_bmdma_irq_clear(struct ata_port * ap)2983 void ata_bmdma_irq_clear(struct ata_port *ap)
2984 {
2985 	void __iomem *mmio = ap->ioaddr.bmdma_addr;
2986 
2987 	if (!mmio)
2988 		return;
2989 
2990 	iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
2991 }
2992 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
2993 
2994 /**
2995  *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2996  *	@qc: Info associated with this ATA transaction.
2997  *
2998  *	LOCKING:
2999  *	spin_lock_irqsave(host lock)
3000  */
ata_bmdma_setup(struct ata_queued_cmd * qc)3001 void ata_bmdma_setup(struct ata_queued_cmd *qc)
3002 {
3003 	struct ata_port *ap = qc->ap;
3004 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3005 	u8 dmactl;
3006 
3007 	/* load PRD table addr. */
3008 	mb();	/* make sure PRD table writes are visible to controller */
3009 	iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3010 
3011 	/* specify data direction, triple-check start bit is clear */
3012 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3013 	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3014 	if (!rw)
3015 		dmactl |= ATA_DMA_WR;
3016 	iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3017 
3018 	/* issue r/w command */
3019 	ap->ops->sff_exec_command(ap, &qc->tf);
3020 }
3021 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
3022 
3023 /**
3024  *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
3025  *	@qc: Info associated with this ATA transaction.
3026  *
3027  *	LOCKING:
3028  *	spin_lock_irqsave(host lock)
3029  */
ata_bmdma_start(struct ata_queued_cmd * qc)3030 void ata_bmdma_start(struct ata_queued_cmd *qc)
3031 {
3032 	struct ata_port *ap = qc->ap;
3033 	u8 dmactl;
3034 
3035 	/* start host DMA transaction */
3036 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3037 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3038 
3039 	/* Strictly, one may wish to issue an ioread8() here, to
3040 	 * flush the mmio write.  However, control also passes
3041 	 * to the hardware at this point, and it will interrupt
3042 	 * us when we are to resume control.  So, in effect,
3043 	 * we don't care when the mmio write flushes.
3044 	 * Further, a read of the DMA status register _immediately_
3045 	 * following the write may not be what certain flaky hardware
3046 	 * is expected, so I think it is best to not add a readb()
3047 	 * without first all the MMIO ATA cards/mobos.
3048 	 * Or maybe I'm just being paranoid.
3049 	 *
3050 	 * FIXME: The posting of this write means I/O starts are
3051 	 * unnecessarily delayed for MMIO
3052 	 */
3053 }
3054 EXPORT_SYMBOL_GPL(ata_bmdma_start);
3055 
3056 /**
3057  *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3058  *	@qc: Command we are ending DMA for
3059  *
3060  *	Clears the ATA_DMA_START flag in the dma control register
3061  *
3062  *	May be used as the bmdma_stop() entry in ata_port_operations.
3063  *
3064  *	LOCKING:
3065  *	spin_lock_irqsave(host lock)
3066  */
ata_bmdma_stop(struct ata_queued_cmd * qc)3067 void ata_bmdma_stop(struct ata_queued_cmd *qc)
3068 {
3069 	struct ata_port *ap = qc->ap;
3070 	void __iomem *mmio = ap->ioaddr.bmdma_addr;
3071 
3072 	/* clear start/stop bit */
3073 	iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3074 		 mmio + ATA_DMA_CMD);
3075 
3076 	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3077 	ata_sff_dma_pause(ap);
3078 }
3079 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
3080 
3081 /**
3082  *	ata_bmdma_status - Read PCI IDE BMDMA status
3083  *	@ap: Port associated with this ATA transaction.
3084  *
3085  *	Read and return BMDMA status register.
3086  *
3087  *	May be used as the bmdma_status() entry in ata_port_operations.
3088  *
3089  *	LOCKING:
3090  *	spin_lock_irqsave(host lock)
3091  */
ata_bmdma_status(struct ata_port * ap)3092 u8 ata_bmdma_status(struct ata_port *ap)
3093 {
3094 	return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3095 }
3096 EXPORT_SYMBOL_GPL(ata_bmdma_status);
3097 
3098 
3099 /**
3100  *	ata_bmdma_port_start - Set port up for bmdma.
3101  *	@ap: Port to initialize
3102  *
3103  *	Called just after data structures for each port are
3104  *	initialized.  Allocates space for PRD table.
3105  *
3106  *	May be used as the port_start() entry in ata_port_operations.
3107  *
3108  *	LOCKING:
3109  *	Inherited from caller.
3110  */
ata_bmdma_port_start(struct ata_port * ap)3111 int ata_bmdma_port_start(struct ata_port *ap)
3112 {
3113 	if (ap->mwdma_mask || ap->udma_mask) {
3114 		ap->bmdma_prd =
3115 			dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
3116 					    &ap->bmdma_prd_dma, GFP_KERNEL);
3117 		if (!ap->bmdma_prd)
3118 			return -ENOMEM;
3119 	}
3120 
3121 	return 0;
3122 }
3123 EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
3124 
3125 /**
3126  *	ata_bmdma_port_start32 - Set port up for dma.
3127  *	@ap: Port to initialize
3128  *
3129  *	Called just after data structures for each port are
3130  *	initialized.  Enables 32bit PIO and allocates space for PRD
3131  *	table.
3132  *
3133  *	May be used as the port_start() entry in ata_port_operations for
3134  *	devices that are capable of 32bit PIO.
3135  *
3136  *	LOCKING:
3137  *	Inherited from caller.
3138  */
ata_bmdma_port_start32(struct ata_port * ap)3139 int ata_bmdma_port_start32(struct ata_port *ap)
3140 {
3141 	ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3142 	return ata_bmdma_port_start(ap);
3143 }
3144 EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3145 
3146 #ifdef CONFIG_PCI
3147 
3148 /**
3149  *	ata_pci_bmdma_clear_simplex -	attempt to kick device out of simplex
3150  *	@pdev: PCI device
3151  *
3152  *	Some PCI ATA devices report simplex mode but in fact can be told to
3153  *	enter non simplex mode. This implements the necessary logic to
3154  *	perform the task on such devices. Calling it on other devices will
3155  *	have -undefined- behaviour.
3156  */
ata_pci_bmdma_clear_simplex(struct pci_dev * pdev)3157 int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3158 {
3159 	unsigned long bmdma = pci_resource_start(pdev, 4);
3160 	u8 simplex;
3161 
3162 	if (bmdma == 0)
3163 		return -ENOENT;
3164 
3165 	simplex = inb(bmdma + 0x02);
3166 	outb(simplex & 0x60, bmdma + 0x02);
3167 	simplex = inb(bmdma + 0x02);
3168 	if (simplex & 0x80)
3169 		return -EOPNOTSUPP;
3170 	return 0;
3171 }
3172 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3173 
ata_bmdma_nodma(struct ata_host * host,const char * reason)3174 static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3175 {
3176 	int i;
3177 
3178 	dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
3179 
3180 	for (i = 0; i < 2; i++) {
3181 		host->ports[i]->mwdma_mask = 0;
3182 		host->ports[i]->udma_mask = 0;
3183 	}
3184 }
3185 
3186 /**
3187  *	ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
3188  *	@host: target ATA host
3189  *
3190  *	Acquire PCI BMDMA resources and initialize @host accordingly.
3191  *
3192  *	LOCKING:
3193  *	Inherited from calling layer (may sleep).
3194  */
ata_pci_bmdma_init(struct ata_host * host)3195 void ata_pci_bmdma_init(struct ata_host *host)
3196 {
3197 	struct device *gdev = host->dev;
3198 	struct pci_dev *pdev = to_pci_dev(gdev);
3199 	int i, rc;
3200 
3201 	/* No BAR4 allocation: No DMA */
3202 	if (pci_resource_start(pdev, 4) == 0) {
3203 		ata_bmdma_nodma(host, "BAR4 is zero");
3204 		return;
3205 	}
3206 
3207 	/*
3208 	 * Some controllers require BMDMA region to be initialized
3209 	 * even if DMA is not in use to clear IRQ status via
3210 	 * ->sff_irq_clear method.  Try to initialize bmdma_addr
3211 	 * regardless of dma masks.
3212 	 */
3213 	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
3214 	if (rc)
3215 		ata_bmdma_nodma(host, "failed to set dma mask");
3216 	if (!rc) {
3217 		rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
3218 		if (rc)
3219 			ata_bmdma_nodma(host,
3220 					"failed to set consistent dma mask");
3221 	}
3222 
3223 	/* request and iomap DMA region */
3224 	rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3225 	if (rc) {
3226 		ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3227 		return;
3228 	}
3229 	host->iomap = pcim_iomap_table(pdev);
3230 
3231 	for (i = 0; i < 2; i++) {
3232 		struct ata_port *ap = host->ports[i];
3233 		void __iomem *bmdma = host->iomap[4] + 8 * i;
3234 
3235 		if (ata_port_is_dummy(ap))
3236 			continue;
3237 
3238 		ap->ioaddr.bmdma_addr = bmdma;
3239 		if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3240 		    (ioread8(bmdma + 2) & 0x80))
3241 			host->flags |= ATA_HOST_SIMPLEX;
3242 
3243 		ata_port_desc(ap, "bmdma 0x%llx",
3244 		    (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3245 	}
3246 }
3247 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3248 
3249 /**
3250  *	ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
3251  *	@pdev: target PCI device
3252  *	@ppi: array of port_info, must be enough for two ports
3253  *	@r_host: out argument for the initialized ATA host
3254  *
3255  *	Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
3256  *	resources and initialize it accordingly in one go.
3257  *
3258  *	LOCKING:
3259  *	Inherited from calling layer (may sleep).
3260  *
3261  *	RETURNS:
3262  *	0 on success, -errno otherwise.
3263  */
ata_pci_bmdma_prepare_host(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct ata_host ** r_host)3264 int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3265 			       const struct ata_port_info * const * ppi,
3266 			       struct ata_host **r_host)
3267 {
3268 	int rc;
3269 
3270 	rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3271 	if (rc)
3272 		return rc;
3273 
3274 	ata_pci_bmdma_init(*r_host);
3275 	return 0;
3276 }
3277 EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3278 
3279 /**
3280  *	ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
3281  *	@pdev: Controller to be initialized
3282  *	@ppi: array of port_info, must be enough for two ports
3283  *	@sht: scsi_host_template to use when registering the host
3284  *	@host_priv: host private_data
3285  *	@hflags: host flags
3286  *
3287  *	This function is similar to ata_pci_sff_init_one() but also
3288  *	takes care of BMDMA initialization.
3289  *
3290  *	LOCKING:
3291  *	Inherited from PCI layer (may sleep).
3292  *
3293  *	RETURNS:
3294  *	Zero on success, negative on errno-based value on error.
3295  */
ata_pci_bmdma_init_one(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct scsi_host_template * sht,void * host_priv,int hflags)3296 int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3297 			   const struct ata_port_info * const * ppi,
3298 			   struct scsi_host_template *sht, void *host_priv,
3299 			   int hflags)
3300 {
3301 	return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
3302 }
3303 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3304 
3305 #endif /* CONFIG_PCI */
3306 #endif /* CONFIG_ATA_BMDMA */
3307 
3308 /**
3309  *	ata_sff_port_init - Initialize SFF/BMDMA ATA port
3310  *	@ap: Port to initialize
3311  *
3312  *	Called on port allocation to initialize SFF/BMDMA specific
3313  *	fields.
3314  *
3315  *	LOCKING:
3316  *	None.
3317  */
ata_sff_port_init(struct ata_port * ap)3318 void ata_sff_port_init(struct ata_port *ap)
3319 {
3320 	INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3321 	ap->ctl = ATA_DEVCTL_OBS;
3322 	ap->last_ctl = 0xFF;
3323 }
3324 
ata_sff_init(void)3325 int __init ata_sff_init(void)
3326 {
3327 	ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3328 	if (!ata_sff_wq)
3329 		return -ENOMEM;
3330 
3331 	return 0;
3332 }
3333 
ata_sff_exit(void)3334 void ata_sff_exit(void)
3335 {
3336 	destroy_workqueue(ata_sff_wq);
3337 }
3338