1 /*
2 * pata_atiixp.c - ATI PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * (C) 2009-2010 Bartlomiej Zolnierkiewicz
5 *
6 * Based on
7 *
8 * linux/drivers/ide/pci/atiixp.c Version 0.01-bart2 Feb. 26, 2004
9 *
10 * Copyright (C) 2003 ATI Inc. <hyu@ati.com>
11 * Copyright (C) 2004 Bartlomiej Zolnierkiewicz
12 *
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/blkdev.h>
19 #include <linux/delay.h>
20 #include <scsi/scsi_host.h>
21 #include <linux/libata.h>
22 #include <linux/dmi.h>
23
24 #define DRV_NAME "pata_atiixp"
25 #define DRV_VERSION "0.4.6"
26
27 enum {
28 ATIIXP_IDE_PIO_TIMING = 0x40,
29 ATIIXP_IDE_MWDMA_TIMING = 0x44,
30 ATIIXP_IDE_PIO_CONTROL = 0x48,
31 ATIIXP_IDE_PIO_MODE = 0x4a,
32 ATIIXP_IDE_UDMA_CONTROL = 0x54,
33 ATIIXP_IDE_UDMA_MODE = 0x56
34 };
35
36 static const struct dmi_system_id attixp_cable_override_dmi_table[] = {
37 {
38 /* Board has onboard PATA<->SATA converters */
39 .ident = "MSI E350DM-E33",
40 .matches = {
41 DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
42 DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"),
43 },
44 },
45 { }
46 };
47
atiixp_cable_detect(struct ata_port * ap)48 static int atiixp_cable_detect(struct ata_port *ap)
49 {
50 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
51 u8 udma;
52
53 if (dmi_check_system(attixp_cable_override_dmi_table))
54 return ATA_CBL_PATA40_SHORT;
55
56 /* Hack from drivers/ide/pci. Really we want to know how to do the
57 raw detection not play follow the bios mode guess */
58 pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
59 if ((udma & 0x07) >= 0x04 || (udma & 0x70) >= 0x40)
60 return ATA_CBL_PATA80;
61 return ATA_CBL_PATA40;
62 }
63
64 static DEFINE_SPINLOCK(atiixp_lock);
65
66 /**
67 * atiixp_prereset - perform reset handling
68 * @link: ATA link
69 * @deadline: deadline jiffies for the operation
70 *
71 * Reset sequence checking enable bits to see which ports are
72 * active.
73 */
74
atiixp_prereset(struct ata_link * link,unsigned long deadline)75 static int atiixp_prereset(struct ata_link *link, unsigned long deadline)
76 {
77 static const struct pci_bits atiixp_enable_bits[] = {
78 { 0x48, 1, 0x01, 0x00 },
79 { 0x48, 1, 0x08, 0x00 }
80 };
81
82 struct ata_port *ap = link->ap;
83 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
84
85 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
86 return -ENOENT;
87
88 return ata_sff_prereset(link, deadline);
89 }
90
91 /**
92 * atiixp_set_pio_timing - set initial PIO mode data
93 * @ap: ATA interface
94 * @adev: ATA device
95 *
96 * Called by both the pio and dma setup functions to set the controller
97 * timings for PIO transfers. We must load both the mode number and
98 * timing values into the controller.
99 */
100
atiixp_set_pio_timing(struct ata_port * ap,struct ata_device * adev,int pio)101 static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
102 {
103 static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
104
105 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
106 int dn = 2 * ap->port_no + adev->devno;
107 int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
108 u32 pio_timing_data;
109 u16 pio_mode_data;
110
111 pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
112 pio_mode_data &= ~(0x7 << (4 * dn));
113 pio_mode_data |= pio << (4 * dn);
114 pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
115
116 pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
117 pio_timing_data &= ~(0xFF << timing_shift);
118 pio_timing_data |= (pio_timings[pio] << timing_shift);
119 pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
120 }
121
122 /**
123 * atiixp_set_piomode - set initial PIO mode data
124 * @ap: ATA interface
125 * @adev: ATA device
126 *
127 * Called to do the PIO mode setup. We use a shared helper for this
128 * as the DMA setup must also adjust the PIO timing information.
129 */
130
atiixp_set_piomode(struct ata_port * ap,struct ata_device * adev)131 static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
132 {
133 unsigned long flags;
134 spin_lock_irqsave(&atiixp_lock, flags);
135 atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0);
136 spin_unlock_irqrestore(&atiixp_lock, flags);
137 }
138
139 /**
140 * atiixp_set_dmamode - set initial DMA mode data
141 * @ap: ATA interface
142 * @adev: ATA device
143 *
144 * Called to do the DMA mode setup. We use timing tables for most
145 * modes but must tune an appropriate PIO mode to match.
146 */
147
atiixp_set_dmamode(struct ata_port * ap,struct ata_device * adev)148 static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
149 {
150 static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
151
152 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
153 int dma = adev->dma_mode;
154 int dn = 2 * ap->port_no + adev->devno;
155 int wanted_pio;
156 unsigned long flags;
157
158 spin_lock_irqsave(&atiixp_lock, flags);
159
160 if (adev->dma_mode >= XFER_UDMA_0) {
161 u16 udma_mode_data;
162
163 dma -= XFER_UDMA_0;
164
165 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data);
166 udma_mode_data &= ~(0x7 << (4 * dn));
167 udma_mode_data |= dma << (4 * dn);
168 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
169 } else {
170 int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
171 u32 mwdma_timing_data;
172
173 dma -= XFER_MW_DMA_0;
174
175 pci_read_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
176 &mwdma_timing_data);
177 mwdma_timing_data &= ~(0xFF << timing_shift);
178 mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
179 pci_write_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
180 mwdma_timing_data);
181 }
182 /*
183 * We must now look at the PIO mode situation. We may need to
184 * adjust the PIO mode to keep the timings acceptable
185 */
186 if (adev->dma_mode >= XFER_MW_DMA_2)
187 wanted_pio = 4;
188 else if (adev->dma_mode == XFER_MW_DMA_1)
189 wanted_pio = 3;
190 else if (adev->dma_mode == XFER_MW_DMA_0)
191 wanted_pio = 0;
192 else BUG();
193
194 if (adev->pio_mode != wanted_pio)
195 atiixp_set_pio_timing(ap, adev, wanted_pio);
196 spin_unlock_irqrestore(&atiixp_lock, flags);
197 }
198
199 /**
200 * atiixp_bmdma_start - DMA start callback
201 * @qc: Command in progress
202 *
203 * When DMA begins we need to ensure that the UDMA control
204 * register for the channel is correctly set.
205 *
206 * Note: The host lock held by the libata layer protects
207 * us from two channels both trying to set DMA bits at once
208 */
209
atiixp_bmdma_start(struct ata_queued_cmd * qc)210 static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
211 {
212 struct ata_port *ap = qc->ap;
213 struct ata_device *adev = qc->dev;
214
215 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
216 int dn = (2 * ap->port_no) + adev->devno;
217 u16 tmp16;
218
219 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
220 if (ata_using_udma(adev))
221 tmp16 |= (1 << dn);
222 else
223 tmp16 &= ~(1 << dn);
224 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
225 ata_bmdma_start(qc);
226 }
227
228 /**
229 * atiixp_dma_stop - DMA stop callback
230 * @qc: Command in progress
231 *
232 * DMA has completed. Clear the UDMA flag as the next operations will
233 * be PIO ones not UDMA data transfer.
234 *
235 * Note: The host lock held by the libata layer protects
236 * us from two channels both trying to set DMA bits at once
237 */
238
atiixp_bmdma_stop(struct ata_queued_cmd * qc)239 static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
240 {
241 struct ata_port *ap = qc->ap;
242 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
243 int dn = (2 * ap->port_no) + qc->dev->devno;
244 u16 tmp16;
245
246 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
247 tmp16 &= ~(1 << dn);
248 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
249 ata_bmdma_stop(qc);
250 }
251
252 static struct scsi_host_template atiixp_sht = {
253 ATA_BMDMA_SHT(DRV_NAME),
254 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
255 };
256
257 static struct ata_port_operations atiixp_port_ops = {
258 .inherits = &ata_bmdma_port_ops,
259
260 .qc_prep = ata_bmdma_dumb_qc_prep,
261 .bmdma_start = atiixp_bmdma_start,
262 .bmdma_stop = atiixp_bmdma_stop,
263
264 .prereset = atiixp_prereset,
265 .cable_detect = atiixp_cable_detect,
266 .set_piomode = atiixp_set_piomode,
267 .set_dmamode = atiixp_set_dmamode,
268 };
269
atiixp_init_one(struct pci_dev * pdev,const struct pci_device_id * id)270 static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
271 {
272 static const struct ata_port_info info = {
273 .flags = ATA_FLAG_SLAVE_POSS,
274 .pio_mask = ATA_PIO4,
275 .mwdma_mask = ATA_MWDMA12_ONLY,
276 .udma_mask = ATA_UDMA5,
277 .port_ops = &atiixp_port_ops
278 };
279 const struct ata_port_info *ppi[] = { &info, &info };
280
281 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
282 ATA_HOST_PARALLEL_SCAN);
283 }
284
285 static const struct pci_device_id atiixp[] = {
286 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
287 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
288 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
289 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
290 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
291 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), },
292
293 { },
294 };
295
296 static struct pci_driver atiixp_pci_driver = {
297 .name = DRV_NAME,
298 .id_table = atiixp,
299 .probe = atiixp_init_one,
300 .remove = ata_pci_remove_one,
301 #ifdef CONFIG_PM_SLEEP
302 .resume = ata_pci_device_resume,
303 .suspend = ata_pci_device_suspend,
304 #endif
305 };
306
307 module_pci_driver(atiixp_pci_driver);
308
309 MODULE_AUTHOR("Alan Cox");
310 MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
311 MODULE_LICENSE("GPL");
312 MODULE_DEVICE_TABLE(pci, atiixp);
313 MODULE_VERSION(DRV_VERSION);
314