• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  sata_sil.c - Silicon Image SATA
3  *
4  *  Maintained by:  Tejun Heo <tj@kernel.org>
5  *  		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2005 Red Hat, Inc.
9  *  Copyright 2003 Benjamin Herrenschmidt
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/driver-api/libata.rst
29  *
30  *  Documentation for SiI 3112:
31  *  http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
32  *
33  *  Other errata and documentation available under NDA.
34  *
35  */
36 
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/pci.h>
40 #include <linux/blkdev.h>
41 #include <linux/delay.h>
42 #include <linux/interrupt.h>
43 #include <linux/device.h>
44 #include <scsi/scsi_host.h>
45 #include <linux/libata.h>
46 #include <linux/dmi.h>
47 
48 #define DRV_NAME	"sata_sil"
49 #define DRV_VERSION	"2.4"
50 
51 #define SIL_DMA_BOUNDARY	0x7fffffffUL
52 
53 enum {
54 	SIL_MMIO_BAR		= 5,
55 
56 	/*
57 	 * host flags
58 	 */
59 	SIL_FLAG_NO_SATA_IRQ	= (1 << 28),
60 	SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
61 	SIL_FLAG_MOD15WRITE	= (1 << 30),
62 
63 	SIL_DFL_PORT_FLAGS	= ATA_FLAG_SATA,
64 
65 	/*
66 	 * Controller IDs
67 	 */
68 	sil_3112		= 0,
69 	sil_3112_no_sata_irq	= 1,
70 	sil_3512		= 2,
71 	sil_3114		= 3,
72 
73 	/*
74 	 * Register offsets
75 	 */
76 	SIL_SYSCFG		= 0x48,
77 
78 	/*
79 	 * Register bits
80 	 */
81 	/* SYSCFG */
82 	SIL_MASK_IDE0_INT	= (1 << 22),
83 	SIL_MASK_IDE1_INT	= (1 << 23),
84 	SIL_MASK_IDE2_INT	= (1 << 24),
85 	SIL_MASK_IDE3_INT	= (1 << 25),
86 	SIL_MASK_2PORT		= SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
87 	SIL_MASK_4PORT		= SIL_MASK_2PORT |
88 				  SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
89 
90 	/* BMDMA/BMDMA2 */
91 	SIL_INTR_STEERING	= (1 << 1),
92 
93 	SIL_DMA_ENABLE		= (1 << 0),  /* DMA run switch */
94 	SIL_DMA_RDWR		= (1 << 3),  /* DMA Rd-Wr */
95 	SIL_DMA_SATA_IRQ	= (1 << 4),  /* OR of all SATA IRQs */
96 	SIL_DMA_ACTIVE		= (1 << 16), /* DMA running */
97 	SIL_DMA_ERROR		= (1 << 17), /* PCI bus error */
98 	SIL_DMA_COMPLETE	= (1 << 18), /* cmd complete / IRQ pending */
99 	SIL_DMA_N_SATA_IRQ	= (1 << 6),  /* SATA_IRQ for the next channel */
100 	SIL_DMA_N_ACTIVE	= (1 << 24), /* ACTIVE for the next channel */
101 	SIL_DMA_N_ERROR		= (1 << 25), /* ERROR for the next channel */
102 	SIL_DMA_N_COMPLETE	= (1 << 26), /* COMPLETE for the next channel */
103 
104 	/* SIEN */
105 	SIL_SIEN_N		= (1 << 16), /* triggered by SError.N */
106 
107 	/*
108 	 * Others
109 	 */
110 	SIL_QUIRK_MOD15WRITE	= (1 << 0),
111 	SIL_QUIRK_UDMA5MAX	= (1 << 1),
112 };
113 
114 static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
115 #ifdef CONFIG_PM_SLEEP
116 static int sil_pci_device_resume(struct pci_dev *pdev);
117 #endif
118 static void sil_dev_config(struct ata_device *dev);
119 static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
120 static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
121 static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
122 static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
123 static void sil_bmdma_setup(struct ata_queued_cmd *qc);
124 static void sil_bmdma_start(struct ata_queued_cmd *qc);
125 static void sil_bmdma_stop(struct ata_queued_cmd *qc);
126 static void sil_freeze(struct ata_port *ap);
127 static void sil_thaw(struct ata_port *ap);
128 
129 
130 static const struct pci_device_id sil_pci_tbl[] = {
131 	{ PCI_VDEVICE(CMD, 0x3112), sil_3112 },
132 	{ PCI_VDEVICE(CMD, 0x0240), sil_3112 },
133 	{ PCI_VDEVICE(CMD, 0x3512), sil_3512 },
134 	{ PCI_VDEVICE(CMD, 0x3114), sil_3114 },
135 	{ PCI_VDEVICE(ATI, 0x436e), sil_3112 },
136 	{ PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq },
137 	{ PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq },
138 
139 	{ }	/* terminate list */
140 };
141 
142 
143 /* TODO firmware versions should be added - eric */
144 static const struct sil_drivelist {
145 	const char *product;
146 	unsigned int quirk;
147 } sil_blacklist [] = {
148 	{ "ST320012AS",		SIL_QUIRK_MOD15WRITE },
149 	{ "ST330013AS",		SIL_QUIRK_MOD15WRITE },
150 	{ "ST340017AS",		SIL_QUIRK_MOD15WRITE },
151 	{ "ST360015AS",		SIL_QUIRK_MOD15WRITE },
152 	{ "ST380023AS",		SIL_QUIRK_MOD15WRITE },
153 	{ "ST3120023AS",	SIL_QUIRK_MOD15WRITE },
154 	{ "ST340014ASL",	SIL_QUIRK_MOD15WRITE },
155 	{ "ST360014ASL",	SIL_QUIRK_MOD15WRITE },
156 	{ "ST380011ASL",	SIL_QUIRK_MOD15WRITE },
157 	{ "ST3120022ASL",	SIL_QUIRK_MOD15WRITE },
158 	{ "ST3160021ASL",	SIL_QUIRK_MOD15WRITE },
159 	{ "TOSHIBA MK2561GSYN",	SIL_QUIRK_MOD15WRITE },
160 	{ "Maxtor 4D060H3",	SIL_QUIRK_UDMA5MAX },
161 	{ }
162 };
163 
164 static struct pci_driver sil_pci_driver = {
165 	.name			= DRV_NAME,
166 	.id_table		= sil_pci_tbl,
167 	.probe			= sil_init_one,
168 	.remove			= ata_pci_remove_one,
169 #ifdef CONFIG_PM_SLEEP
170 	.suspend		= ata_pci_device_suspend,
171 	.resume			= sil_pci_device_resume,
172 #endif
173 };
174 
175 static struct scsi_host_template sil_sht = {
176 	ATA_BASE_SHT(DRV_NAME),
177 	/** These controllers support Large Block Transfer which allows
178 	    transfer chunks up to 2GB and which cross 64KB boundaries,
179 	    therefore the DMA limits are more relaxed than standard ATA SFF. */
180 	.dma_boundary		= SIL_DMA_BOUNDARY,
181 	.sg_tablesize		= ATA_MAX_PRD
182 };
183 
184 static struct ata_port_operations sil_ops = {
185 	.inherits		= &ata_bmdma32_port_ops,
186 	.dev_config		= sil_dev_config,
187 	.set_mode		= sil_set_mode,
188 	.bmdma_setup            = sil_bmdma_setup,
189 	.bmdma_start            = sil_bmdma_start,
190 	.bmdma_stop		= sil_bmdma_stop,
191 	.qc_prep		= sil_qc_prep,
192 	.freeze			= sil_freeze,
193 	.thaw			= sil_thaw,
194 	.scr_read		= sil_scr_read,
195 	.scr_write		= sil_scr_write,
196 };
197 
198 static const struct ata_port_info sil_port_info[] = {
199 	/* sil_3112 */
200 	{
201 		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
202 		.pio_mask	= ATA_PIO4,
203 		.mwdma_mask	= ATA_MWDMA2,
204 		.udma_mask	= ATA_UDMA5,
205 		.port_ops	= &sil_ops,
206 	},
207 	/* sil_3112_no_sata_irq */
208 	{
209 		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
210 				  SIL_FLAG_NO_SATA_IRQ,
211 		.pio_mask	= ATA_PIO4,
212 		.mwdma_mask	= ATA_MWDMA2,
213 		.udma_mask	= ATA_UDMA5,
214 		.port_ops	= &sil_ops,
215 	},
216 	/* sil_3512 */
217 	{
218 		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
219 		.pio_mask	= ATA_PIO4,
220 		.mwdma_mask	= ATA_MWDMA2,
221 		.udma_mask	= ATA_UDMA5,
222 		.port_ops	= &sil_ops,
223 	},
224 	/* sil_3114 */
225 	{
226 		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
227 		.pio_mask	= ATA_PIO4,
228 		.mwdma_mask	= ATA_MWDMA2,
229 		.udma_mask	= ATA_UDMA5,
230 		.port_ops	= &sil_ops,
231 	},
232 };
233 
234 /* per-port register offsets */
235 /* TODO: we can probably calculate rather than use a table */
236 static const struct {
237 	unsigned long tf;	/* ATA taskfile register block */
238 	unsigned long ctl;	/* ATA control/altstatus register block */
239 	unsigned long bmdma;	/* DMA register block */
240 	unsigned long bmdma2;	/* DMA register block #2 */
241 	unsigned long fifo_cfg;	/* FIFO Valid Byte Count and Control */
242 	unsigned long scr;	/* SATA control register block */
243 	unsigned long sien;	/* SATA Interrupt Enable register */
244 	unsigned long xfer_mode;/* data transfer mode register */
245 	unsigned long sfis_cfg;	/* SATA FIS reception config register */
246 } sil_port[] = {
247 	/* port 0 ... */
248 	/*   tf    ctl  bmdma  bmdma2  fifo    scr   sien   mode   sfis */
249 	{  0x80,  0x8A,   0x0,  0x10,  0x40, 0x100, 0x148,  0xb4, 0x14c },
250 	{  0xC0,  0xCA,   0x8,  0x18,  0x44, 0x180, 0x1c8,  0xf4, 0x1cc },
251 	{ 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
252 	{ 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
253 	/* ... port 3 */
254 };
255 
256 MODULE_AUTHOR("Jeff Garzik");
257 MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
258 MODULE_LICENSE("GPL");
259 MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
260 MODULE_VERSION(DRV_VERSION);
261 
262 static int slow_down;
263 module_param(slow_down, int, 0444);
264 MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
265 
266 
sil_bmdma_stop(struct ata_queued_cmd * qc)267 static void sil_bmdma_stop(struct ata_queued_cmd *qc)
268 {
269 	struct ata_port *ap = qc->ap;
270 	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
271 	void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
272 
273 	/* clear start/stop bit - can safely always write 0 */
274 	iowrite8(0, bmdma2);
275 
276 	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
277 	ata_sff_dma_pause(ap);
278 }
279 
sil_bmdma_setup(struct ata_queued_cmd * qc)280 static void sil_bmdma_setup(struct ata_queued_cmd *qc)
281 {
282 	struct ata_port *ap = qc->ap;
283 	void __iomem *bmdma = ap->ioaddr.bmdma_addr;
284 
285 	/* load PRD table addr. */
286 	iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS);
287 
288 	/* issue r/w command */
289 	ap->ops->sff_exec_command(ap, &qc->tf);
290 }
291 
sil_bmdma_start(struct ata_queued_cmd * qc)292 static void sil_bmdma_start(struct ata_queued_cmd *qc)
293 {
294 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
295 	struct ata_port *ap = qc->ap;
296 	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
297 	void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
298 	u8 dmactl = ATA_DMA_START;
299 
300 	/* set transfer direction, start host DMA transaction
301 	   Note: For Large Block Transfer to work, the DMA must be started
302 	   using the bmdma2 register. */
303 	if (!rw)
304 		dmactl |= ATA_DMA_WR;
305 	iowrite8(dmactl, bmdma2);
306 }
307 
308 /* The way God intended PCI IDE scatter/gather lists to look and behave... */
sil_fill_sg(struct ata_queued_cmd * qc)309 static void sil_fill_sg(struct ata_queued_cmd *qc)
310 {
311 	struct scatterlist *sg;
312 	struct ata_port *ap = qc->ap;
313 	struct ata_bmdma_prd *prd, *last_prd = NULL;
314 	unsigned int si;
315 
316 	prd = &ap->bmdma_prd[0];
317 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
318 		/* Note h/w doesn't support 64-bit, so we unconditionally
319 		 * truncate dma_addr_t to u32.
320 		 */
321 		u32 addr = (u32) sg_dma_address(sg);
322 		u32 sg_len = sg_dma_len(sg);
323 
324 		prd->addr = cpu_to_le32(addr);
325 		prd->flags_len = cpu_to_le32(sg_len);
326 		VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len);
327 
328 		last_prd = prd;
329 		prd++;
330 	}
331 
332 	if (likely(last_prd))
333 		last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
334 }
335 
sil_qc_prep(struct ata_queued_cmd * qc)336 static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
337 {
338 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
339 		return AC_ERR_OK;
340 
341 	sil_fill_sg(qc);
342 
343 	return AC_ERR_OK;
344 }
345 
sil_get_device_cache_line(struct pci_dev * pdev)346 static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
347 {
348 	u8 cache_line = 0;
349 	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
350 	return cache_line;
351 }
352 
353 /**
354  *	sil_set_mode		-	wrap set_mode functions
355  *	@link: link to set up
356  *	@r_failed: returned device when we fail
357  *
358  *	Wrap the libata method for device setup as after the setup we need
359  *	to inspect the results and do some configuration work
360  */
361 
sil_set_mode(struct ata_link * link,struct ata_device ** r_failed)362 static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
363 {
364 	struct ata_port *ap = link->ap;
365 	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
366 	void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
367 	struct ata_device *dev;
368 	u32 tmp, dev_mode[2] = { };
369 	int rc;
370 
371 	rc = ata_do_set_mode(link, r_failed);
372 	if (rc)
373 		return rc;
374 
375 	ata_for_each_dev(dev, link, ALL) {
376 		if (!ata_dev_enabled(dev))
377 			dev_mode[dev->devno] = 0;	/* PIO0/1/2 */
378 		else if (dev->flags & ATA_DFLAG_PIO)
379 			dev_mode[dev->devno] = 1;	/* PIO3/4 */
380 		else
381 			dev_mode[dev->devno] = 3;	/* UDMA */
382 		/* value 2 indicates MDMA */
383 	}
384 
385 	tmp = readl(addr);
386 	tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
387 	tmp |= dev_mode[0];
388 	tmp |= (dev_mode[1] << 4);
389 	writel(tmp, addr);
390 	readl(addr);	/* flush */
391 	return 0;
392 }
393 
sil_scr_addr(struct ata_port * ap,unsigned int sc_reg)394 static inline void __iomem *sil_scr_addr(struct ata_port *ap,
395 					 unsigned int sc_reg)
396 {
397 	void __iomem *offset = ap->ioaddr.scr_addr;
398 
399 	switch (sc_reg) {
400 	case SCR_STATUS:
401 		return offset + 4;
402 	case SCR_ERROR:
403 		return offset + 8;
404 	case SCR_CONTROL:
405 		return offset;
406 	default:
407 		/* do nothing */
408 		break;
409 	}
410 
411 	return NULL;
412 }
413 
sil_scr_read(struct ata_link * link,unsigned int sc_reg,u32 * val)414 static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
415 {
416 	void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
417 
418 	if (mmio) {
419 		*val = readl(mmio);
420 		return 0;
421 	}
422 	return -EINVAL;
423 }
424 
sil_scr_write(struct ata_link * link,unsigned int sc_reg,u32 val)425 static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
426 {
427 	void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
428 
429 	if (mmio) {
430 		writel(val, mmio);
431 		return 0;
432 	}
433 	return -EINVAL;
434 }
435 
sil_host_intr(struct ata_port * ap,u32 bmdma2)436 static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
437 {
438 	struct ata_eh_info *ehi = &ap->link.eh_info;
439 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
440 	u8 status;
441 
442 	if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
443 		u32 serror = 0xffffffff;
444 
445 		/* SIEN doesn't mask SATA IRQs on some 3112s.  Those
446 		 * controllers continue to assert IRQ as long as
447 		 * SError bits are pending.  Clear SError immediately.
448 		 */
449 		sil_scr_read(&ap->link, SCR_ERROR, &serror);
450 		sil_scr_write(&ap->link, SCR_ERROR, serror);
451 
452 		/* Sometimes spurious interrupts occur, double check
453 		 * it's PHYRDY CHG.
454 		 */
455 		if (serror & SERR_PHYRDY_CHG) {
456 			ap->link.eh_info.serror |= serror;
457 			goto freeze;
458 		}
459 
460 		if (!(bmdma2 & SIL_DMA_COMPLETE))
461 			return;
462 	}
463 
464 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
465 		/* this sometimes happens, just clear IRQ */
466 		ap->ops->sff_check_status(ap);
467 		return;
468 	}
469 
470 	/* Check whether we are expecting interrupt in this state */
471 	switch (ap->hsm_task_state) {
472 	case HSM_ST_FIRST:
473 		/* Some pre-ATAPI-4 devices assert INTRQ
474 		 * at this state when ready to receive CDB.
475 		 */
476 
477 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
478 		 * The flag was turned on only for atapi devices.  No
479 		 * need to check ata_is_atapi(qc->tf.protocol) again.
480 		 */
481 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
482 			goto err_hsm;
483 		break;
484 	case HSM_ST_LAST:
485 		if (ata_is_dma(qc->tf.protocol)) {
486 			/* clear DMA-Start bit */
487 			ap->ops->bmdma_stop(qc);
488 
489 			if (bmdma2 & SIL_DMA_ERROR) {
490 				qc->err_mask |= AC_ERR_HOST_BUS;
491 				ap->hsm_task_state = HSM_ST_ERR;
492 			}
493 		}
494 		break;
495 	case HSM_ST:
496 		break;
497 	default:
498 		goto err_hsm;
499 	}
500 
501 	/* check main status, clearing INTRQ */
502 	status = ap->ops->sff_check_status(ap);
503 	if (unlikely(status & ATA_BUSY))
504 		goto err_hsm;
505 
506 	/* ack bmdma irq events */
507 	ata_bmdma_irq_clear(ap);
508 
509 	/* kick HSM in the ass */
510 	ata_sff_hsm_move(ap, qc, status, 0);
511 
512 	if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
513 		ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
514 
515 	return;
516 
517  err_hsm:
518 	qc->err_mask |= AC_ERR_HSM;
519  freeze:
520 	ata_port_freeze(ap);
521 }
522 
sil_interrupt(int irq,void * dev_instance)523 static irqreturn_t sil_interrupt(int irq, void *dev_instance)
524 {
525 	struct ata_host *host = dev_instance;
526 	void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
527 	int handled = 0;
528 	int i;
529 
530 	spin_lock(&host->lock);
531 
532 	for (i = 0; i < host->n_ports; i++) {
533 		struct ata_port *ap = host->ports[i];
534 		u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
535 
536 		/* turn off SATA_IRQ if not supported */
537 		if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
538 			bmdma2 &= ~SIL_DMA_SATA_IRQ;
539 
540 		if (bmdma2 == 0xffffffff ||
541 		    !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
542 			continue;
543 
544 		sil_host_intr(ap, bmdma2);
545 		handled = 1;
546 	}
547 
548 	spin_unlock(&host->lock);
549 
550 	return IRQ_RETVAL(handled);
551 }
552 
sil_freeze(struct ata_port * ap)553 static void sil_freeze(struct ata_port *ap)
554 {
555 	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
556 	u32 tmp;
557 
558 	/* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
559 	writel(0, mmio_base + sil_port[ap->port_no].sien);
560 
561 	/* plug IRQ */
562 	tmp = readl(mmio_base + SIL_SYSCFG);
563 	tmp |= SIL_MASK_IDE0_INT << ap->port_no;
564 	writel(tmp, mmio_base + SIL_SYSCFG);
565 	readl(mmio_base + SIL_SYSCFG);	/* flush */
566 
567 	/* Ensure DMA_ENABLE is off.
568 	 *
569 	 * This is because the controller will not give us access to the
570 	 * taskfile registers while a DMA is in progress
571 	 */
572 	iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
573 		 ap->ioaddr.bmdma_addr);
574 
575 	/* According to ata_bmdma_stop, an HDMA transition requires
576 	 * on PIO cycle. But we can't read a taskfile register.
577 	 */
578 	ioread8(ap->ioaddr.bmdma_addr);
579 }
580 
sil_thaw(struct ata_port * ap)581 static void sil_thaw(struct ata_port *ap)
582 {
583 	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
584 	u32 tmp;
585 
586 	/* clear IRQ */
587 	ap->ops->sff_check_status(ap);
588 	ata_bmdma_irq_clear(ap);
589 
590 	/* turn on SATA IRQ if supported */
591 	if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
592 		writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
593 
594 	/* turn on IRQ */
595 	tmp = readl(mmio_base + SIL_SYSCFG);
596 	tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
597 	writel(tmp, mmio_base + SIL_SYSCFG);
598 }
599 
600 /**
601  *	sil_dev_config - Apply device/host-specific errata fixups
602  *	@dev: Device to be examined
603  *
604  *	After the IDENTIFY [PACKET] DEVICE step is complete, and a
605  *	device is known to be present, this function is called.
606  *	We apply two errata fixups which are specific to Silicon Image,
607  *	a Seagate and a Maxtor fixup.
608  *
609  *	For certain Seagate devices, we must limit the maximum sectors
610  *	to under 8K.
611  *
612  *	For certain Maxtor devices, we must not program the drive
613  *	beyond udma5.
614  *
615  *	Both fixups are unfairly pessimistic.  As soon as I get more
616  *	information on these errata, I will create a more exhaustive
617  *	list, and apply the fixups to only the specific
618  *	devices/hosts/firmwares that need it.
619  *
620  *	20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
621  *	The Maxtor quirk is in the blacklist, but I'm keeping the original
622  *	pessimistic fix for the following reasons...
623  *	- There seems to be less info on it, only one device gleaned off the
624  *	Windows	driver, maybe only one is affected.  More info would be greatly
625  *	appreciated.
626  *	- But then again UDMA5 is hardly anything to complain about
627  */
sil_dev_config(struct ata_device * dev)628 static void sil_dev_config(struct ata_device *dev)
629 {
630 	struct ata_port *ap = dev->link->ap;
631 	int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
632 	unsigned int n, quirks = 0;
633 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
634 
635 	/* This controller doesn't support trim */
636 	dev->horkage |= ATA_HORKAGE_NOTRIM;
637 
638 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
639 
640 	for (n = 0; sil_blacklist[n].product; n++)
641 		if (!strcmp(sil_blacklist[n].product, model_num)) {
642 			quirks = sil_blacklist[n].quirk;
643 			break;
644 		}
645 
646 	/* limit requests to 15 sectors */
647 	if (slow_down ||
648 	    ((ap->flags & SIL_FLAG_MOD15WRITE) &&
649 	     (quirks & SIL_QUIRK_MOD15WRITE))) {
650 		if (print_info)
651 			ata_dev_info(dev,
652 		"applying Seagate errata fix (mod15write workaround)\n");
653 		dev->max_sectors = 15;
654 		return;
655 	}
656 
657 	/* limit to udma5 */
658 	if (quirks & SIL_QUIRK_UDMA5MAX) {
659 		if (print_info)
660 			ata_dev_info(dev, "applying Maxtor errata fix %s\n",
661 				     model_num);
662 		dev->udma_mask &= ATA_UDMA5;
663 		return;
664 	}
665 }
666 
sil_init_controller(struct ata_host * host)667 static void sil_init_controller(struct ata_host *host)
668 {
669 	struct pci_dev *pdev = to_pci_dev(host->dev);
670 	void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
671 	u8 cls;
672 	u32 tmp;
673 	int i;
674 
675 	/* Initialize FIFO PCI bus arbitration */
676 	cls = sil_get_device_cache_line(pdev);
677 	if (cls) {
678 		cls >>= 3;
679 		cls++;  /* cls = (line_size/8)+1 */
680 		for (i = 0; i < host->n_ports; i++)
681 			writew(cls << 8 | cls,
682 			       mmio_base + sil_port[i].fifo_cfg);
683 	} else
684 		dev_warn(&pdev->dev,
685 			 "cache line size not set.  Driver may not function\n");
686 
687 	/* Apply R_ERR on DMA activate FIS errata workaround */
688 	if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
689 		int cnt;
690 
691 		for (i = 0, cnt = 0; i < host->n_ports; i++) {
692 			tmp = readl(mmio_base + sil_port[i].sfis_cfg);
693 			if ((tmp & 0x3) != 0x01)
694 				continue;
695 			if (!cnt)
696 				dev_info(&pdev->dev,
697 					 "Applying R_ERR on DMA activate FIS errata fix\n");
698 			writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
699 			cnt++;
700 		}
701 	}
702 
703 	if (host->n_ports == 4) {
704 		/* flip the magic "make 4 ports work" bit */
705 		tmp = readl(mmio_base + sil_port[2].bmdma);
706 		if ((tmp & SIL_INTR_STEERING) == 0)
707 			writel(tmp | SIL_INTR_STEERING,
708 			       mmio_base + sil_port[2].bmdma);
709 	}
710 }
711 
sil_broken_system_poweroff(struct pci_dev * pdev)712 static bool sil_broken_system_poweroff(struct pci_dev *pdev)
713 {
714 	static const struct dmi_system_id broken_systems[] = {
715 		{
716 			.ident = "HP Compaq nx6325",
717 			.matches = {
718 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
719 				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
720 			},
721 			/* PCI slot number of the controller */
722 			.driver_data = (void *)0x12UL,
723 		},
724 
725 		{ }	/* terminate list */
726 	};
727 	const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
728 
729 	if (dmi) {
730 		unsigned long slot = (unsigned long)dmi->driver_data;
731 		/* apply the quirk only to on-board controllers */
732 		return slot == PCI_SLOT(pdev->devfn);
733 	}
734 
735 	return false;
736 }
737 
sil_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)738 static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
739 {
740 	int board_id = ent->driver_data;
741 	struct ata_port_info pi = sil_port_info[board_id];
742 	const struct ata_port_info *ppi[] = { &pi, NULL };
743 	struct ata_host *host;
744 	void __iomem *mmio_base;
745 	int n_ports, rc;
746 	unsigned int i;
747 
748 	ata_print_version_once(&pdev->dev, DRV_VERSION);
749 
750 	/* allocate host */
751 	n_ports = 2;
752 	if (board_id == sil_3114)
753 		n_ports = 4;
754 
755 	if (sil_broken_system_poweroff(pdev)) {
756 		pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN |
757 					ATA_FLAG_NO_HIBERNATE_SPINDOWN;
758 		dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
759 				"on poweroff and hibernation\n");
760 	}
761 
762 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
763 	if (!host)
764 		return -ENOMEM;
765 
766 	/* acquire resources and fill host */
767 	rc = pcim_enable_device(pdev);
768 	if (rc)
769 		return rc;
770 
771 	rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
772 	if (rc == -EBUSY)
773 		pcim_pin_device(pdev);
774 	if (rc)
775 		return rc;
776 	host->iomap = pcim_iomap_table(pdev);
777 
778 	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
779 	if (rc)
780 		return rc;
781 	rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
782 	if (rc)
783 		return rc;
784 
785 	mmio_base = host->iomap[SIL_MMIO_BAR];
786 
787 	for (i = 0; i < host->n_ports; i++) {
788 		struct ata_port *ap = host->ports[i];
789 		struct ata_ioports *ioaddr = &ap->ioaddr;
790 
791 		ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
792 		ioaddr->altstatus_addr =
793 		ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
794 		ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
795 		ioaddr->scr_addr = mmio_base + sil_port[i].scr;
796 		ata_sff_std_ports(ioaddr);
797 
798 		ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio");
799 		ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf");
800 	}
801 
802 	/* initialize and activate */
803 	sil_init_controller(host);
804 
805 	pci_set_master(pdev);
806 	return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED,
807 				 &sil_sht);
808 }
809 
810 #ifdef CONFIG_PM_SLEEP
sil_pci_device_resume(struct pci_dev * pdev)811 static int sil_pci_device_resume(struct pci_dev *pdev)
812 {
813 	struct ata_host *host = pci_get_drvdata(pdev);
814 	int rc;
815 
816 	rc = ata_pci_device_do_resume(pdev);
817 	if (rc)
818 		return rc;
819 
820 	sil_init_controller(host);
821 	ata_host_resume(host);
822 
823 	return 0;
824 }
825 #endif
826 
827 module_pci_driver(sil_pci_driver);
828