1 /*
2 * sata_sx4.c - Promise SATA
3 *
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33 /*
34 Theory of operation
35 -------------------
36
37 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
38 engine, DIMM memory, and four ATA engines (one per SATA port).
39 Data is copied to/from DIMM memory by the HDMA engine, before
40 handing off to one (or more) of the ATA engines. The ATA
41 engines operate solely on DIMM memory.
42
43 The SX4 behaves like a PATA chip, with no SATA controls or
44 knowledge whatsoever, leading to the presumption that
45 PATA<->SATA bridges exist on SX4 boards, external to the
46 PDC20621 chip itself.
47
48 The chip is quite capable, supporting an XOR engine and linked
49 hardware commands (permits a string to transactions to be
50 submitted and waited-on as a single unit), and an optional
51 microprocessor.
52
53 The limiting factor is largely software. This Linux driver was
54 written to multiplex the single HDMA engine to copy disk
55 transactions into a fixed DIMM memory space, from where an ATA
56 engine takes over. As a result, each WRITE looks like this:
57
58 submit HDMA packet to hardware
59 hardware copies data from system memory to DIMM
60 hardware raises interrupt
61
62 submit ATA packet to hardware
63 hardware executes ATA WRITE command, w/ data in DIMM
64 hardware raises interrupt
65
66 and each READ looks like this:
67
68 submit ATA packet to hardware
69 hardware executes ATA READ command, w/ data in DIMM
70 hardware raises interrupt
71
72 submit HDMA packet to hardware
73 hardware copies data from DIMM to system memory
74 hardware raises interrupt
75
76 This is a very slow, lock-step way of doing things that can
77 certainly be improved by motivated kernel hackers.
78
79 */
80
81 #include <linux/kernel.h>
82 #include <linux/module.h>
83 #include <linux/pci.h>
84 #include <linux/slab.h>
85 #include <linux/blkdev.h>
86 #include <linux/delay.h>
87 #include <linux/interrupt.h>
88 #include <linux/device.h>
89 #include <scsi/scsi_host.h>
90 #include <scsi/scsi_cmnd.h>
91 #include <linux/libata.h>
92 #include "sata_promise.h"
93
94 #define DRV_NAME "sata_sx4"
95 #define DRV_VERSION "0.12"
96
97
98 enum {
99 PDC_MMIO_BAR = 3,
100 PDC_DIMM_BAR = 4,
101
102 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
103
104 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
105 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
106 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
107 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
108
109 PDC_CTLSTAT = 0x60, /* IDEn control / status */
110
111 PDC_20621_SEQCTL = 0x400,
112 PDC_20621_SEQMASK = 0x480,
113 PDC_20621_GENERAL_CTL = 0x484,
114 PDC_20621_PAGE_SIZE = (32 * 1024),
115
116 /* chosen, not constant, values; we design our own DIMM mem map */
117 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
118 PDC_20621_DIMM_BASE = 0x00200000,
119 PDC_20621_DIMM_DATA = (64 * 1024),
120 PDC_DIMM_DATA_STEP = (256 * 1024),
121 PDC_DIMM_WINDOW_STEP = (8 * 1024),
122 PDC_DIMM_HOST_PRD = (6 * 1024),
123 PDC_DIMM_HOST_PKT = (128 * 0),
124 PDC_DIMM_HPKT_PRD = (128 * 1),
125 PDC_DIMM_ATA_PKT = (128 * 2),
126 PDC_DIMM_APKT_PRD = (128 * 3),
127 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
128 PDC_PAGE_WINDOW = 0x40,
129 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
130 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
131 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
132
133 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
134
135 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
136 (1<<23),
137
138 board_20621 = 0, /* FastTrak S150 SX4 */
139
140 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
141 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
142 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
143
144 PDC_MAX_HDMA = 32,
145 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
146
147 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
148 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
149 PDC_I2C_CONTROL = 0x48,
150 PDC_I2C_ADDR_DATA = 0x4C,
151 PDC_DIMM0_CONTROL = 0x80,
152 PDC_DIMM1_CONTROL = 0x84,
153 PDC_SDRAM_CONTROL = 0x88,
154 PDC_I2C_WRITE = 0, /* master -> slave */
155 PDC_I2C_READ = (1 << 6), /* master <- slave */
156 PDC_I2C_START = (1 << 7), /* start I2C proto */
157 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
158 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
159 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
160 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
161 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
162 PDC_DIMM_SPD_ROW_NUM = 3,
163 PDC_DIMM_SPD_COLUMN_NUM = 4,
164 PDC_DIMM_SPD_MODULE_ROW = 5,
165 PDC_DIMM_SPD_TYPE = 11,
166 PDC_DIMM_SPD_FRESH_RATE = 12,
167 PDC_DIMM_SPD_BANK_NUM = 17,
168 PDC_DIMM_SPD_CAS_LATENCY = 18,
169 PDC_DIMM_SPD_ATTRIBUTE = 21,
170 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
171 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
172 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
173 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
174 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
175 PDC_CTL_STATUS = 0x08,
176 PDC_DIMM_WINDOW_CTLR = 0x0C,
177 PDC_TIME_CONTROL = 0x3C,
178 PDC_TIME_PERIOD = 0x40,
179 PDC_TIME_COUNTER = 0x44,
180 PDC_GENERAL_CTLR = 0x484,
181 PCI_PLL_INIT = 0x8A531824,
182 PCI_X_TCOUNT = 0xEE1E5CFF,
183
184 /* PDC_TIME_CONTROL bits */
185 PDC_TIMER_BUZZER = (1 << 10),
186 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
187 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
188 PDC_TIMER_ENABLE = (1 << 7),
189 PDC_TIMER_MASK_INT = (1 << 5),
190 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
191 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
192 PDC_TIMER_ENABLE |
193 PDC_TIMER_MASK_INT,
194 };
195
196 #define ECC_ERASE_BUF_SZ (128 * 1024)
197
198 struct pdc_port_priv {
199 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
200 u8 *pkt;
201 dma_addr_t pkt_dma;
202 };
203
204 struct pdc_host_priv {
205 unsigned int doing_hdma;
206 unsigned int hdma_prod;
207 unsigned int hdma_cons;
208 struct {
209 struct ata_queued_cmd *qc;
210 unsigned int seq;
211 unsigned long pkt_ofs;
212 } hdma[32];
213 };
214
215
216 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
217 static void pdc_error_handler(struct ata_port *ap);
218 static void pdc_freeze(struct ata_port *ap);
219 static void pdc_thaw(struct ata_port *ap);
220 static int pdc_port_start(struct ata_port *ap);
221 static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
222 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
223 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
224 static unsigned int pdc20621_dimm_init(struct ata_host *host);
225 static int pdc20621_detect_dimm(struct ata_host *host);
226 static unsigned int pdc20621_i2c_read(struct ata_host *host,
227 u32 device, u32 subaddr, u32 *pdata);
228 static int pdc20621_prog_dimm0(struct ata_host *host);
229 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
230 #ifdef ATA_VERBOSE_DEBUG
231 static void pdc20621_get_from_dimm(struct ata_host *host,
232 void *psource, u32 offset, u32 size);
233 #endif
234 static void pdc20621_put_to_dimm(struct ata_host *host,
235 void *psource, u32 offset, u32 size);
236 static void pdc20621_irq_clear(struct ata_port *ap);
237 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
238 static int pdc_softreset(struct ata_link *link, unsigned int *class,
239 unsigned long deadline);
240 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
241 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
242
243
244 static struct scsi_host_template pdc_sata_sht = {
245 ATA_BASE_SHT(DRV_NAME),
246 .sg_tablesize = LIBATA_MAX_PRD,
247 .dma_boundary = ATA_DMA_BOUNDARY,
248 };
249
250 /* TODO: inherit from base port_ops after converting to new EH */
251 static struct ata_port_operations pdc_20621_ops = {
252 .inherits = &ata_sff_port_ops,
253
254 .check_atapi_dma = pdc_check_atapi_dma,
255 .qc_prep = pdc20621_qc_prep,
256 .qc_issue = pdc20621_qc_issue,
257
258 .freeze = pdc_freeze,
259 .thaw = pdc_thaw,
260 .softreset = pdc_softreset,
261 .error_handler = pdc_error_handler,
262 .lost_interrupt = ATA_OP_NULL,
263 .post_internal_cmd = pdc_post_internal_cmd,
264
265 .port_start = pdc_port_start,
266
267 .sff_tf_load = pdc_tf_load_mmio,
268 .sff_exec_command = pdc_exec_command_mmio,
269 .sff_irq_clear = pdc20621_irq_clear,
270 };
271
272 static const struct ata_port_info pdc_port_info[] = {
273 /* board_20621 */
274 {
275 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
276 ATA_FLAG_PIO_POLLING,
277 .pio_mask = ATA_PIO4,
278 .mwdma_mask = ATA_MWDMA2,
279 .udma_mask = ATA_UDMA6,
280 .port_ops = &pdc_20621_ops,
281 },
282
283 };
284
285 static const struct pci_device_id pdc_sata_pci_tbl[] = {
286 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
287
288 { } /* terminate list */
289 };
290
291 static struct pci_driver pdc_sata_pci_driver = {
292 .name = DRV_NAME,
293 .id_table = pdc_sata_pci_tbl,
294 .probe = pdc_sata_init_one,
295 .remove = ata_pci_remove_one,
296 };
297
298
pdc_port_start(struct ata_port * ap)299 static int pdc_port_start(struct ata_port *ap)
300 {
301 struct device *dev = ap->host->dev;
302 struct pdc_port_priv *pp;
303
304 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
305 if (!pp)
306 return -ENOMEM;
307
308 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
309 if (!pp->pkt)
310 return -ENOMEM;
311
312 ap->private_data = pp;
313
314 return 0;
315 }
316
pdc20621_ata_sg(u8 * buf,unsigned int portno,unsigned int total_len)317 static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
318 unsigned int total_len)
319 {
320 u32 addr;
321 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
322 __le32 *buf32 = (__le32 *) buf;
323
324 /* output ATA packet S/G table */
325 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
326 (PDC_DIMM_DATA_STEP * portno);
327 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
328 buf32[dw] = cpu_to_le32(addr);
329 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
330
331 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
332 PDC_20621_DIMM_BASE +
333 (PDC_DIMM_WINDOW_STEP * portno) +
334 PDC_DIMM_APKT_PRD,
335 buf32[dw], buf32[dw + 1]);
336 }
337
pdc20621_host_sg(u8 * buf,unsigned int portno,unsigned int total_len)338 static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
339 unsigned int total_len)
340 {
341 u32 addr;
342 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
343 __le32 *buf32 = (__le32 *) buf;
344
345 /* output Host DMA packet S/G table */
346 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
347 (PDC_DIMM_DATA_STEP * portno);
348
349 buf32[dw] = cpu_to_le32(addr);
350 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
351
352 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
353 PDC_20621_DIMM_BASE +
354 (PDC_DIMM_WINDOW_STEP * portno) +
355 PDC_DIMM_HPKT_PRD,
356 buf32[dw], buf32[dw + 1]);
357 }
358
pdc20621_ata_pkt(struct ata_taskfile * tf,unsigned int devno,u8 * buf,unsigned int portno)359 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
360 unsigned int devno, u8 *buf,
361 unsigned int portno)
362 {
363 unsigned int i, dw;
364 __le32 *buf32 = (__le32 *) buf;
365 u8 dev_reg;
366
367 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
368 (PDC_DIMM_WINDOW_STEP * portno) +
369 PDC_DIMM_APKT_PRD;
370 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
371
372 i = PDC_DIMM_ATA_PKT;
373
374 /*
375 * Set up ATA packet
376 */
377 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
378 buf[i++] = PDC_PKT_READ;
379 else if (tf->protocol == ATA_PROT_NODATA)
380 buf[i++] = PDC_PKT_NODATA;
381 else
382 buf[i++] = 0;
383 buf[i++] = 0; /* reserved */
384 buf[i++] = portno + 1; /* seq. id */
385 buf[i++] = 0xff; /* delay seq. id */
386
387 /* dimm dma S/G, and next-pkt */
388 dw = i >> 2;
389 if (tf->protocol == ATA_PROT_NODATA)
390 buf32[dw] = 0;
391 else
392 buf32[dw] = cpu_to_le32(dimm_sg);
393 buf32[dw + 1] = 0;
394 i += 8;
395
396 if (devno == 0)
397 dev_reg = ATA_DEVICE_OBS;
398 else
399 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
400
401 /* select device */
402 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
403 buf[i++] = dev_reg;
404
405 /* device control register */
406 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
407 buf[i++] = tf->ctl;
408
409 return i;
410 }
411
pdc20621_host_pkt(struct ata_taskfile * tf,u8 * buf,unsigned int portno)412 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
413 unsigned int portno)
414 {
415 unsigned int dw;
416 u32 tmp;
417 __le32 *buf32 = (__le32 *) buf;
418
419 unsigned int host_sg = PDC_20621_DIMM_BASE +
420 (PDC_DIMM_WINDOW_STEP * portno) +
421 PDC_DIMM_HOST_PRD;
422 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
423 (PDC_DIMM_WINDOW_STEP * portno) +
424 PDC_DIMM_HPKT_PRD;
425 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
426 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
427
428 dw = PDC_DIMM_HOST_PKT >> 2;
429
430 /*
431 * Set up Host DMA packet
432 */
433 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
434 tmp = PDC_PKT_READ;
435 else
436 tmp = 0;
437 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
438 tmp |= (0xff << 24); /* delay seq. id */
439 buf32[dw + 0] = cpu_to_le32(tmp);
440 buf32[dw + 1] = cpu_to_le32(host_sg);
441 buf32[dw + 2] = cpu_to_le32(dimm_sg);
442 buf32[dw + 3] = 0;
443
444 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
445 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
446 PDC_DIMM_HOST_PKT,
447 buf32[dw + 0],
448 buf32[dw + 1],
449 buf32[dw + 2],
450 buf32[dw + 3]);
451 }
452
pdc20621_dma_prep(struct ata_queued_cmd * qc)453 static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
454 {
455 struct scatterlist *sg;
456 struct ata_port *ap = qc->ap;
457 struct pdc_port_priv *pp = ap->private_data;
458 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
459 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
460 unsigned int portno = ap->port_no;
461 unsigned int i, si, idx, total_len = 0, sgt_len;
462 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
463
464 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
465
466 VPRINTK("ata%u: ENTER\n", ap->print_id);
467
468 /* hard-code chip #0 */
469 mmio += PDC_CHIP0_OFS;
470
471 /*
472 * Build S/G table
473 */
474 idx = 0;
475 for_each_sg(qc->sg, sg, qc->n_elem, si) {
476 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
477 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
478 total_len += sg_dma_len(sg);
479 }
480 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
481 sgt_len = idx * 4;
482
483 /*
484 * Build ATA, host DMA packets
485 */
486 pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
487 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
488
489 pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
490 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
491
492 if (qc->tf.flags & ATA_TFLAG_LBA48)
493 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
494 else
495 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
496
497 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
498
499 /* copy three S/G tables and two packets to DIMM MMIO window */
500 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
501 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
502 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
503 PDC_DIMM_HOST_PRD,
504 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
505
506 /* force host FIFO dump */
507 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
508
509 readl(dimm_mmio); /* MMIO PCI posting flush */
510
511 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
512 }
513
pdc20621_nodata_prep(struct ata_queued_cmd * qc)514 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
515 {
516 struct ata_port *ap = qc->ap;
517 struct pdc_port_priv *pp = ap->private_data;
518 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
519 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
520 unsigned int portno = ap->port_no;
521 unsigned int i;
522
523 VPRINTK("ata%u: ENTER\n", ap->print_id);
524
525 /* hard-code chip #0 */
526 mmio += PDC_CHIP0_OFS;
527
528 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
529
530 if (qc->tf.flags & ATA_TFLAG_LBA48)
531 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
532 else
533 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
534
535 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
536
537 /* copy three S/G tables and two packets to DIMM MMIO window */
538 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
539 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
540
541 /* force host FIFO dump */
542 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
543
544 readl(dimm_mmio); /* MMIO PCI posting flush */
545
546 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
547 }
548
pdc20621_qc_prep(struct ata_queued_cmd * qc)549 static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
550 {
551 switch (qc->tf.protocol) {
552 case ATA_PROT_DMA:
553 pdc20621_dma_prep(qc);
554 break;
555 case ATA_PROT_NODATA:
556 pdc20621_nodata_prep(qc);
557 break;
558 default:
559 break;
560 }
561
562 return AC_ERR_OK;
563 }
564
__pdc20621_push_hdma(struct ata_queued_cmd * qc,unsigned int seq,u32 pkt_ofs)565 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
566 unsigned int seq,
567 u32 pkt_ofs)
568 {
569 struct ata_port *ap = qc->ap;
570 struct ata_host *host = ap->host;
571 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
572
573 /* hard-code chip #0 */
574 mmio += PDC_CHIP0_OFS;
575
576 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
577 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
578
579 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
580 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
581 }
582
pdc20621_push_hdma(struct ata_queued_cmd * qc,unsigned int seq,u32 pkt_ofs)583 static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
584 unsigned int seq,
585 u32 pkt_ofs)
586 {
587 struct ata_port *ap = qc->ap;
588 struct pdc_host_priv *pp = ap->host->private_data;
589 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
590
591 if (!pp->doing_hdma) {
592 __pdc20621_push_hdma(qc, seq, pkt_ofs);
593 pp->doing_hdma = 1;
594 return;
595 }
596
597 pp->hdma[idx].qc = qc;
598 pp->hdma[idx].seq = seq;
599 pp->hdma[idx].pkt_ofs = pkt_ofs;
600 pp->hdma_prod++;
601 }
602
pdc20621_pop_hdma(struct ata_queued_cmd * qc)603 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
604 {
605 struct ata_port *ap = qc->ap;
606 struct pdc_host_priv *pp = ap->host->private_data;
607 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
608
609 /* if nothing on queue, we're done */
610 if (pp->hdma_prod == pp->hdma_cons) {
611 pp->doing_hdma = 0;
612 return;
613 }
614
615 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
616 pp->hdma[idx].pkt_ofs);
617 pp->hdma_cons++;
618 }
619
620 #ifdef ATA_VERBOSE_DEBUG
pdc20621_dump_hdma(struct ata_queued_cmd * qc)621 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
622 {
623 struct ata_port *ap = qc->ap;
624 unsigned int port_no = ap->port_no;
625 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
626
627 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
628 dimm_mmio += PDC_DIMM_HOST_PKT;
629
630 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
631 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
632 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
633 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
634 }
635 #else
pdc20621_dump_hdma(struct ata_queued_cmd * qc)636 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
637 #endif /* ATA_VERBOSE_DEBUG */
638
pdc20621_packet_start(struct ata_queued_cmd * qc)639 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
640 {
641 struct ata_port *ap = qc->ap;
642 struct ata_host *host = ap->host;
643 unsigned int port_no = ap->port_no;
644 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
645 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
646 u8 seq = (u8) (port_no + 1);
647 unsigned int port_ofs;
648
649 /* hard-code chip #0 */
650 mmio += PDC_CHIP0_OFS;
651
652 VPRINTK("ata%u: ENTER\n", ap->print_id);
653
654 wmb(); /* flush PRD, pkt writes */
655
656 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
657
658 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
659 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
660 seq += 4;
661
662 pdc20621_dump_hdma(qc);
663 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
664 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
665 port_ofs + PDC_DIMM_HOST_PKT,
666 port_ofs + PDC_DIMM_HOST_PKT,
667 seq);
668 } else {
669 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
670 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
671
672 writel(port_ofs + PDC_DIMM_ATA_PKT,
673 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
674 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
675 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
676 port_ofs + PDC_DIMM_ATA_PKT,
677 port_ofs + PDC_DIMM_ATA_PKT,
678 seq);
679 }
680 }
681
pdc20621_qc_issue(struct ata_queued_cmd * qc)682 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
683 {
684 switch (qc->tf.protocol) {
685 case ATA_PROT_NODATA:
686 if (qc->tf.flags & ATA_TFLAG_POLLING)
687 break;
688 /*FALLTHROUGH*/
689 case ATA_PROT_DMA:
690 pdc20621_packet_start(qc);
691 return 0;
692
693 case ATAPI_PROT_DMA:
694 BUG();
695 break;
696
697 default:
698 break;
699 }
700
701 return ata_sff_qc_issue(qc);
702 }
703
pdc20621_host_intr(struct ata_port * ap,struct ata_queued_cmd * qc,unsigned int doing_hdma,void __iomem * mmio)704 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
705 struct ata_queued_cmd *qc,
706 unsigned int doing_hdma,
707 void __iomem *mmio)
708 {
709 unsigned int port_no = ap->port_no;
710 unsigned int port_ofs =
711 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
712 u8 status;
713 unsigned int handled = 0;
714
715 VPRINTK("ENTER\n");
716
717 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
718 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
719
720 /* step two - DMA from DIMM to host */
721 if (doing_hdma) {
722 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
723 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
724 /* get drive status; clear intr; complete txn */
725 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
726 ata_qc_complete(qc);
727 pdc20621_pop_hdma(qc);
728 }
729
730 /* step one - exec ATA command */
731 else {
732 u8 seq = (u8) (port_no + 1 + 4);
733 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
734 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
735
736 /* submit hdma pkt */
737 pdc20621_dump_hdma(qc);
738 pdc20621_push_hdma(qc, seq,
739 port_ofs + PDC_DIMM_HOST_PKT);
740 }
741 handled = 1;
742
743 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
744
745 /* step one - DMA from host to DIMM */
746 if (doing_hdma) {
747 u8 seq = (u8) (port_no + 1);
748 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
749 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
750
751 /* submit ata pkt */
752 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
753 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
754 writel(port_ofs + PDC_DIMM_ATA_PKT,
755 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
756 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
757 }
758
759 /* step two - execute ATA command */
760 else {
761 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
762 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
763 /* get drive status; clear intr; complete txn */
764 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
765 ata_qc_complete(qc);
766 pdc20621_pop_hdma(qc);
767 }
768 handled = 1;
769
770 /* command completion, but no data xfer */
771 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
772
773 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
774 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
775 qc->err_mask |= ac_err_mask(status);
776 ata_qc_complete(qc);
777 handled = 1;
778
779 } else {
780 ap->stats.idle_irq++;
781 }
782
783 return handled;
784 }
785
pdc20621_irq_clear(struct ata_port * ap)786 static void pdc20621_irq_clear(struct ata_port *ap)
787 {
788 ioread8(ap->ioaddr.status_addr);
789 }
790
pdc20621_interrupt(int irq,void * dev_instance)791 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
792 {
793 struct ata_host *host = dev_instance;
794 struct ata_port *ap;
795 u32 mask = 0;
796 unsigned int i, tmp, port_no;
797 unsigned int handled = 0;
798 void __iomem *mmio_base;
799
800 VPRINTK("ENTER\n");
801
802 if (!host || !host->iomap[PDC_MMIO_BAR]) {
803 VPRINTK("QUICK EXIT\n");
804 return IRQ_NONE;
805 }
806
807 mmio_base = host->iomap[PDC_MMIO_BAR];
808
809 /* reading should also clear interrupts */
810 mmio_base += PDC_CHIP0_OFS;
811 mask = readl(mmio_base + PDC_20621_SEQMASK);
812 VPRINTK("mask == 0x%x\n", mask);
813
814 if (mask == 0xffffffff) {
815 VPRINTK("QUICK EXIT 2\n");
816 return IRQ_NONE;
817 }
818 mask &= 0xffff; /* only 16 tags possible */
819 if (!mask) {
820 VPRINTK("QUICK EXIT 3\n");
821 return IRQ_NONE;
822 }
823
824 spin_lock(&host->lock);
825
826 for (i = 1; i < 9; i++) {
827 port_no = i - 1;
828 if (port_no > 3)
829 port_no -= 4;
830 if (port_no >= host->n_ports)
831 ap = NULL;
832 else
833 ap = host->ports[port_no];
834 tmp = mask & (1 << i);
835 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
836 if (tmp && ap) {
837 struct ata_queued_cmd *qc;
838
839 qc = ata_qc_from_tag(ap, ap->link.active_tag);
840 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
841 handled += pdc20621_host_intr(ap, qc, (i > 4),
842 mmio_base);
843 }
844 }
845
846 spin_unlock(&host->lock);
847
848 VPRINTK("mask == 0x%x\n", mask);
849
850 VPRINTK("EXIT\n");
851
852 return IRQ_RETVAL(handled);
853 }
854
pdc_freeze(struct ata_port * ap)855 static void pdc_freeze(struct ata_port *ap)
856 {
857 void __iomem *mmio = ap->ioaddr.cmd_addr;
858 u32 tmp;
859
860 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
861
862 tmp = readl(mmio + PDC_CTLSTAT);
863 tmp |= PDC_MASK_INT;
864 tmp &= ~PDC_DMA_ENABLE;
865 writel(tmp, mmio + PDC_CTLSTAT);
866 readl(mmio + PDC_CTLSTAT); /* flush */
867 }
868
pdc_thaw(struct ata_port * ap)869 static void pdc_thaw(struct ata_port *ap)
870 {
871 void __iomem *mmio = ap->ioaddr.cmd_addr;
872 u32 tmp;
873
874 /* FIXME: start HDMA engine, if zero ATA engines running */
875
876 /* clear IRQ */
877 ioread8(ap->ioaddr.status_addr);
878
879 /* turn IRQ back on */
880 tmp = readl(mmio + PDC_CTLSTAT);
881 tmp &= ~PDC_MASK_INT;
882 writel(tmp, mmio + PDC_CTLSTAT);
883 readl(mmio + PDC_CTLSTAT); /* flush */
884 }
885
pdc_reset_port(struct ata_port * ap)886 static void pdc_reset_port(struct ata_port *ap)
887 {
888 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
889 unsigned int i;
890 u32 tmp;
891
892 /* FIXME: handle HDMA copy engine */
893
894 for (i = 11; i > 0; i--) {
895 tmp = readl(mmio);
896 if (tmp & PDC_RESET)
897 break;
898
899 udelay(100);
900
901 tmp |= PDC_RESET;
902 writel(tmp, mmio);
903 }
904
905 tmp &= ~PDC_RESET;
906 writel(tmp, mmio);
907 readl(mmio); /* flush */
908 }
909
pdc_softreset(struct ata_link * link,unsigned int * class,unsigned long deadline)910 static int pdc_softreset(struct ata_link *link, unsigned int *class,
911 unsigned long deadline)
912 {
913 pdc_reset_port(link->ap);
914 return ata_sff_softreset(link, class, deadline);
915 }
916
pdc_error_handler(struct ata_port * ap)917 static void pdc_error_handler(struct ata_port *ap)
918 {
919 if (!(ap->pflags & ATA_PFLAG_FROZEN))
920 pdc_reset_port(ap);
921
922 ata_sff_error_handler(ap);
923 }
924
pdc_post_internal_cmd(struct ata_queued_cmd * qc)925 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
926 {
927 struct ata_port *ap = qc->ap;
928
929 /* make DMA engine forget about the failed command */
930 if (qc->flags & ATA_QCFLAG_FAILED)
931 pdc_reset_port(ap);
932 }
933
pdc_check_atapi_dma(struct ata_queued_cmd * qc)934 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
935 {
936 u8 *scsicmd = qc->scsicmd->cmnd;
937 int pio = 1; /* atapi dma off by default */
938
939 /* Whitelist commands that may use DMA. */
940 switch (scsicmd[0]) {
941 case WRITE_12:
942 case WRITE_10:
943 case WRITE_6:
944 case READ_12:
945 case READ_10:
946 case READ_6:
947 case 0xad: /* READ_DVD_STRUCTURE */
948 case 0xbe: /* READ_CD */
949 pio = 0;
950 }
951 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
952 if (scsicmd[0] == WRITE_10) {
953 unsigned int lba =
954 (scsicmd[2] << 24) |
955 (scsicmd[3] << 16) |
956 (scsicmd[4] << 8) |
957 scsicmd[5];
958 if (lba >= 0xFFFF4FA2)
959 pio = 1;
960 }
961 return pio;
962 }
963
pdc_tf_load_mmio(struct ata_port * ap,const struct ata_taskfile * tf)964 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
965 {
966 WARN_ON(tf->protocol == ATA_PROT_DMA ||
967 tf->protocol == ATAPI_PROT_DMA);
968 ata_sff_tf_load(ap, tf);
969 }
970
971
pdc_exec_command_mmio(struct ata_port * ap,const struct ata_taskfile * tf)972 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
973 {
974 WARN_ON(tf->protocol == ATA_PROT_DMA ||
975 tf->protocol == ATAPI_PROT_DMA);
976 ata_sff_exec_command(ap, tf);
977 }
978
979
pdc_sata_setup_port(struct ata_ioports * port,void __iomem * base)980 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
981 {
982 port->cmd_addr = base;
983 port->data_addr = base;
984 port->feature_addr =
985 port->error_addr = base + 0x4;
986 port->nsect_addr = base + 0x8;
987 port->lbal_addr = base + 0xc;
988 port->lbam_addr = base + 0x10;
989 port->lbah_addr = base + 0x14;
990 port->device_addr = base + 0x18;
991 port->command_addr =
992 port->status_addr = base + 0x1c;
993 port->altstatus_addr =
994 port->ctl_addr = base + 0x38;
995 }
996
997
998 #ifdef ATA_VERBOSE_DEBUG
pdc20621_get_from_dimm(struct ata_host * host,void * psource,u32 offset,u32 size)999 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
1000 u32 offset, u32 size)
1001 {
1002 u32 window_size;
1003 u16 idx;
1004 u8 page_mask;
1005 long dist;
1006 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1007 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1008
1009 /* hard-code chip #0 */
1010 mmio += PDC_CHIP0_OFS;
1011
1012 page_mask = 0x00;
1013 window_size = 0x2000 * 4; /* 32K byte uchar size */
1014 idx = (u16) (offset / window_size);
1015
1016 writel(0x01, mmio + PDC_GENERAL_CTLR);
1017 readl(mmio + PDC_GENERAL_CTLR);
1018 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1019 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1020
1021 offset -= (idx * window_size);
1022 idx++;
1023 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
1024 (long) (window_size - offset);
1025 memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
1026
1027 psource += dist;
1028 size -= dist;
1029 for (; (long) size >= (long) window_size ;) {
1030 writel(0x01, mmio + PDC_GENERAL_CTLR);
1031 readl(mmio + PDC_GENERAL_CTLR);
1032 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1033 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1034 memcpy_fromio(psource, dimm_mmio, window_size / 4);
1035 psource += window_size;
1036 size -= window_size;
1037 idx++;
1038 }
1039
1040 if (size) {
1041 writel(0x01, mmio + PDC_GENERAL_CTLR);
1042 readl(mmio + PDC_GENERAL_CTLR);
1043 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1044 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1045 memcpy_fromio(psource, dimm_mmio, size / 4);
1046 }
1047 }
1048 #endif
1049
1050
pdc20621_put_to_dimm(struct ata_host * host,void * psource,u32 offset,u32 size)1051 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1052 u32 offset, u32 size)
1053 {
1054 u32 window_size;
1055 u16 idx;
1056 u8 page_mask;
1057 long dist;
1058 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1059 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1060
1061 /* hard-code chip #0 */
1062 mmio += PDC_CHIP0_OFS;
1063
1064 page_mask = 0x00;
1065 window_size = 0x2000 * 4; /* 32K byte uchar size */
1066 idx = (u16) (offset / window_size);
1067
1068 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1069 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1070 offset -= (idx * window_size);
1071 idx++;
1072 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1073 (long) (window_size - offset);
1074 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1075 writel(0x01, mmio + PDC_GENERAL_CTLR);
1076 readl(mmio + PDC_GENERAL_CTLR);
1077
1078 psource += dist;
1079 size -= dist;
1080 for (; (long) size >= (long) window_size ;) {
1081 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1082 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1083 memcpy_toio(dimm_mmio, psource, window_size / 4);
1084 writel(0x01, mmio + PDC_GENERAL_CTLR);
1085 readl(mmio + PDC_GENERAL_CTLR);
1086 psource += window_size;
1087 size -= window_size;
1088 idx++;
1089 }
1090
1091 if (size) {
1092 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1093 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1094 memcpy_toio(dimm_mmio, psource, size / 4);
1095 writel(0x01, mmio + PDC_GENERAL_CTLR);
1096 readl(mmio + PDC_GENERAL_CTLR);
1097 }
1098 }
1099
1100
pdc20621_i2c_read(struct ata_host * host,u32 device,u32 subaddr,u32 * pdata)1101 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1102 u32 subaddr, u32 *pdata)
1103 {
1104 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1105 u32 i2creg = 0;
1106 u32 status;
1107 u32 count = 0;
1108
1109 /* hard-code chip #0 */
1110 mmio += PDC_CHIP0_OFS;
1111
1112 i2creg |= device << 24;
1113 i2creg |= subaddr << 16;
1114
1115 /* Set the device and subaddress */
1116 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1117 readl(mmio + PDC_I2C_ADDR_DATA);
1118
1119 /* Write Control to perform read operation, mask int */
1120 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1121 mmio + PDC_I2C_CONTROL);
1122
1123 for (count = 0; count <= 1000; count ++) {
1124 status = readl(mmio + PDC_I2C_CONTROL);
1125 if (status & PDC_I2C_COMPLETE) {
1126 status = readl(mmio + PDC_I2C_ADDR_DATA);
1127 break;
1128 } else if (count == 1000)
1129 return 0;
1130 }
1131
1132 *pdata = (status >> 8) & 0x000000ff;
1133 return 1;
1134 }
1135
1136
pdc20621_detect_dimm(struct ata_host * host)1137 static int pdc20621_detect_dimm(struct ata_host *host)
1138 {
1139 u32 data = 0;
1140 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1141 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1142 if (data == 100)
1143 return 100;
1144 } else
1145 return 0;
1146
1147 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1148 if (data <= 0x75)
1149 return 133;
1150 } else
1151 return 0;
1152
1153 return 0;
1154 }
1155
1156
pdc20621_prog_dimm0(struct ata_host * host)1157 static int pdc20621_prog_dimm0(struct ata_host *host)
1158 {
1159 u32 spd0[50];
1160 u32 data = 0;
1161 int size, i;
1162 u8 bdimmsize;
1163 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1164 static const struct {
1165 unsigned int reg;
1166 unsigned int ofs;
1167 } pdc_i2c_read_data [] = {
1168 { PDC_DIMM_SPD_TYPE, 11 },
1169 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1170 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1171 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1172 { PDC_DIMM_SPD_ROW_NUM, 3 },
1173 { PDC_DIMM_SPD_BANK_NUM, 17 },
1174 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1175 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1176 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1177 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1178 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1179 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1180 };
1181
1182 /* hard-code chip #0 */
1183 mmio += PDC_CHIP0_OFS;
1184
1185 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1186 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1187 pdc_i2c_read_data[i].reg,
1188 &spd0[pdc_i2c_read_data[i].ofs]);
1189
1190 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1191 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1192 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1193 data |= (((((spd0[29] > spd0[28])
1194 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1195 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1196
1197 if (spd0[18] & 0x08)
1198 data |= ((0x03) << 14);
1199 else if (spd0[18] & 0x04)
1200 data |= ((0x02) << 14);
1201 else if (spd0[18] & 0x01)
1202 data |= ((0x01) << 14);
1203 else
1204 data |= (0 << 14);
1205
1206 /*
1207 Calculate the size of bDIMMSize (power of 2) and
1208 merge the DIMM size by program start/end address.
1209 */
1210
1211 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1212 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1213 data |= (((size / 16) - 1) << 16);
1214 data |= (0 << 23);
1215 data |= 8;
1216 writel(data, mmio + PDC_DIMM0_CONTROL);
1217 readl(mmio + PDC_DIMM0_CONTROL);
1218 return size;
1219 }
1220
1221
pdc20621_prog_dimm_global(struct ata_host * host)1222 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1223 {
1224 u32 data, spd0;
1225 int error, i;
1226 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1227
1228 /* hard-code chip #0 */
1229 mmio += PDC_CHIP0_OFS;
1230
1231 /*
1232 Set To Default : DIMM Module Global Control Register (0x022259F1)
1233 DIMM Arbitration Disable (bit 20)
1234 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1235 Refresh Enable (bit 17)
1236 */
1237
1238 data = 0x022259F1;
1239 writel(data, mmio + PDC_SDRAM_CONTROL);
1240 readl(mmio + PDC_SDRAM_CONTROL);
1241
1242 /* Turn on for ECC */
1243 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1244 PDC_DIMM_SPD_TYPE, &spd0)) {
1245 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1246 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1247 return 1;
1248 }
1249 if (spd0 == 0x02) {
1250 data |= (0x01 << 16);
1251 writel(data, mmio + PDC_SDRAM_CONTROL);
1252 readl(mmio + PDC_SDRAM_CONTROL);
1253 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1254 }
1255
1256 /* DIMM Initialization Select/Enable (bit 18/19) */
1257 data &= (~(1<<18));
1258 data |= (1<<19);
1259 writel(data, mmio + PDC_SDRAM_CONTROL);
1260
1261 error = 1;
1262 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1263 data = readl(mmio + PDC_SDRAM_CONTROL);
1264 if (!(data & (1<<19))) {
1265 error = 0;
1266 break;
1267 }
1268 msleep(i*100);
1269 }
1270 return error;
1271 }
1272
1273
pdc20621_dimm_init(struct ata_host * host)1274 static unsigned int pdc20621_dimm_init(struct ata_host *host)
1275 {
1276 int speed, size, length;
1277 u32 addr, spd0, pci_status;
1278 u32 time_period = 0;
1279 u32 tcount = 0;
1280 u32 ticks = 0;
1281 u32 clock = 0;
1282 u32 fparam = 0;
1283 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1284
1285 /* hard-code chip #0 */
1286 mmio += PDC_CHIP0_OFS;
1287
1288 /* Initialize PLL based upon PCI Bus Frequency */
1289
1290 /* Initialize Time Period Register */
1291 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1292 time_period = readl(mmio + PDC_TIME_PERIOD);
1293 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1294
1295 /* Enable timer */
1296 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1297 readl(mmio + PDC_TIME_CONTROL);
1298
1299 /* Wait 3 seconds */
1300 msleep(3000);
1301
1302 /*
1303 When timer is enabled, counter is decreased every internal
1304 clock cycle.
1305 */
1306
1307 tcount = readl(mmio + PDC_TIME_COUNTER);
1308 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1309
1310 /*
1311 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1312 register should be >= (0xffffffff - 3x10^8).
1313 */
1314 if (tcount >= PCI_X_TCOUNT) {
1315 ticks = (time_period - tcount);
1316 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1317
1318 clock = (ticks / 300000);
1319 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1320
1321 clock = (clock * 33);
1322 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1323
1324 /* PLL F Param (bit 22:16) */
1325 fparam = (1400000 / clock) - 2;
1326 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1327
1328 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1329 pci_status = (0x8a001824 | (fparam << 16));
1330 } else
1331 pci_status = PCI_PLL_INIT;
1332
1333 /* Initialize PLL. */
1334 VPRINTK("pci_status: 0x%x\n", pci_status);
1335 writel(pci_status, mmio + PDC_CTL_STATUS);
1336 readl(mmio + PDC_CTL_STATUS);
1337
1338 /*
1339 Read SPD of DIMM by I2C interface,
1340 and program the DIMM Module Controller.
1341 */
1342 if (!(speed = pdc20621_detect_dimm(host))) {
1343 printk(KERN_ERR "Detect Local DIMM Fail\n");
1344 return 1; /* DIMM error */
1345 }
1346 VPRINTK("Local DIMM Speed = %d\n", speed);
1347
1348 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1349 size = pdc20621_prog_dimm0(host);
1350 VPRINTK("Local DIMM Size = %dMB\n", size);
1351
1352 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1353 if (pdc20621_prog_dimm_global(host)) {
1354 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1355 return 1;
1356 }
1357
1358 #ifdef ATA_VERBOSE_DEBUG
1359 {
1360 u8 test_parttern1[40] =
1361 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1362 'N','o','t',' ','Y','e','t',' ',
1363 'D','e','f','i','n','e','d',' ',
1364 '1','.','1','0',
1365 '9','8','0','3','1','6','1','2',0,0};
1366 u8 test_parttern2[40] = {0};
1367
1368 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1369 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1370
1371 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1372 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1373 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1374 test_parttern2[1], &(test_parttern2[2]));
1375 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1376 40);
1377 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1378 test_parttern2[1], &(test_parttern2[2]));
1379
1380 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1381 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1382 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1383 test_parttern2[1], &(test_parttern2[2]));
1384 }
1385 #endif
1386
1387 /* ECC initiliazation. */
1388
1389 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1390 PDC_DIMM_SPD_TYPE, &spd0)) {
1391 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1392 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1393 return 1;
1394 }
1395 if (spd0 == 0x02) {
1396 void *buf;
1397 VPRINTK("Start ECC initialization\n");
1398 addr = 0;
1399 length = size * 1024 * 1024;
1400 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1401 while (addr < length) {
1402 pdc20621_put_to_dimm(host, buf, addr,
1403 ECC_ERASE_BUF_SZ);
1404 addr += ECC_ERASE_BUF_SZ;
1405 }
1406 kfree(buf);
1407 VPRINTK("Finish ECC initialization\n");
1408 }
1409 return 0;
1410 }
1411
1412
pdc_20621_init(struct ata_host * host)1413 static void pdc_20621_init(struct ata_host *host)
1414 {
1415 u32 tmp;
1416 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1417
1418 /* hard-code chip #0 */
1419 mmio += PDC_CHIP0_OFS;
1420
1421 /*
1422 * Select page 0x40 for our 32k DIMM window
1423 */
1424 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1425 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1426 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1427
1428 /*
1429 * Reset Host DMA
1430 */
1431 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1432 tmp |= PDC_RESET;
1433 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1434 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1435
1436 udelay(10);
1437
1438 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1439 tmp &= ~PDC_RESET;
1440 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1441 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1442 }
1443
pdc_sata_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1444 static int pdc_sata_init_one(struct pci_dev *pdev,
1445 const struct pci_device_id *ent)
1446 {
1447 const struct ata_port_info *ppi[] =
1448 { &pdc_port_info[ent->driver_data], NULL };
1449 struct ata_host *host;
1450 struct pdc_host_priv *hpriv;
1451 int i, rc;
1452
1453 ata_print_version_once(&pdev->dev, DRV_VERSION);
1454
1455 /* allocate host */
1456 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1457 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1458 if (!host || !hpriv)
1459 return -ENOMEM;
1460
1461 host->private_data = hpriv;
1462
1463 /* acquire resources and fill host */
1464 rc = pcim_enable_device(pdev);
1465 if (rc)
1466 return rc;
1467
1468 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1469 DRV_NAME);
1470 if (rc == -EBUSY)
1471 pcim_pin_device(pdev);
1472 if (rc)
1473 return rc;
1474 host->iomap = pcim_iomap_table(pdev);
1475
1476 for (i = 0; i < 4; i++) {
1477 struct ata_port *ap = host->ports[i];
1478 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1479 unsigned int offset = 0x200 + i * 0x80;
1480
1481 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1482
1483 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1484 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1485 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1486 }
1487
1488 /* configure and activate */
1489 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
1490 if (rc)
1491 return rc;
1492 rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
1493 if (rc)
1494 return rc;
1495
1496 if (pdc20621_dimm_init(host))
1497 return -ENOMEM;
1498 pdc_20621_init(host);
1499
1500 pci_set_master(pdev);
1501 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1502 IRQF_SHARED, &pdc_sata_sht);
1503 }
1504
1505 module_pci_driver(pdc_sata_pci_driver);
1506
1507 MODULE_AUTHOR("Jeff Garzik");
1508 MODULE_DESCRIPTION("Promise SATA low-level driver");
1509 MODULE_LICENSE("GPL");
1510 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1511 MODULE_VERSION(DRV_VERSION);
1512