1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * sata_sx4.c - Promise SATA
4 *
5 * Maintained by: Tejun Heo <tj@kernel.org>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2003-2004 Red Hat, Inc.
10 *
11 * libata documentation is available via 'make {ps|pdf}docs',
12 * as Documentation/driver-api/libata.rst
13 *
14 * Hardware documentation available under NDA.
15 */
16
17 /*
18 Theory of operation
19 -------------------
20
21 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
22 engine, DIMM memory, and four ATA engines (one per SATA port).
23 Data is copied to/from DIMM memory by the HDMA engine, before
24 handing off to one (or more) of the ATA engines. The ATA
25 engines operate solely on DIMM memory.
26
27 The SX4 behaves like a PATA chip, with no SATA controls or
28 knowledge whatsoever, leading to the presumption that
29 PATA<->SATA bridges exist on SX4 boards, external to the
30 PDC20621 chip itself.
31
32 The chip is quite capable, supporting an XOR engine and linked
33 hardware commands (permits a string to transactions to be
34 submitted and waited-on as a single unit), and an optional
35 microprocessor.
36
37 The limiting factor is largely software. This Linux driver was
38 written to multiplex the single HDMA engine to copy disk
39 transactions into a fixed DIMM memory space, from where an ATA
40 engine takes over. As a result, each WRITE looks like this:
41
42 submit HDMA packet to hardware
43 hardware copies data from system memory to DIMM
44 hardware raises interrupt
45
46 submit ATA packet to hardware
47 hardware executes ATA WRITE command, w/ data in DIMM
48 hardware raises interrupt
49
50 and each READ looks like this:
51
52 submit ATA packet to hardware
53 hardware executes ATA READ command, w/ data in DIMM
54 hardware raises interrupt
55
56 submit HDMA packet to hardware
57 hardware copies data from DIMM to system memory
58 hardware raises interrupt
59
60 This is a very slow, lock-step way of doing things that can
61 certainly be improved by motivated kernel hackers.
62
63 */
64
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/slab.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/device.h>
73 #include <scsi/scsi_host.h>
74 #include <scsi/scsi_cmnd.h>
75 #include <linux/libata.h>
76 #include "sata_promise.h"
77
78 #define DRV_NAME "sata_sx4"
79 #define DRV_VERSION "0.12"
80
81
82 enum {
83 PDC_MMIO_BAR = 3,
84 PDC_DIMM_BAR = 4,
85
86 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
87
88 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
89 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
90 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
91 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
92
93 PDC_CTLSTAT = 0x60, /* IDEn control / status */
94
95 PDC_20621_SEQCTL = 0x400,
96 PDC_20621_SEQMASK = 0x480,
97 PDC_20621_GENERAL_CTL = 0x484,
98 PDC_20621_PAGE_SIZE = (32 * 1024),
99
100 /* chosen, not constant, values; we design our own DIMM mem map */
101 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
102 PDC_20621_DIMM_BASE = 0x00200000,
103 PDC_20621_DIMM_DATA = (64 * 1024),
104 PDC_DIMM_DATA_STEP = (256 * 1024),
105 PDC_DIMM_WINDOW_STEP = (8 * 1024),
106 PDC_DIMM_HOST_PRD = (6 * 1024),
107 PDC_DIMM_HOST_PKT = (128 * 0),
108 PDC_DIMM_HPKT_PRD = (128 * 1),
109 PDC_DIMM_ATA_PKT = (128 * 2),
110 PDC_DIMM_APKT_PRD = (128 * 3),
111 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
112 PDC_PAGE_WINDOW = 0x40,
113 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
114 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
115 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
116
117 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
118
119 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
120 (1<<23),
121
122 board_20621 = 0, /* FastTrak S150 SX4 */
123
124 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
125 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
126 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
127
128 PDC_MAX_HDMA = 32,
129 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
130
131 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
132 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
133 PDC_I2C_CONTROL = 0x48,
134 PDC_I2C_ADDR_DATA = 0x4C,
135 PDC_DIMM0_CONTROL = 0x80,
136 PDC_DIMM1_CONTROL = 0x84,
137 PDC_SDRAM_CONTROL = 0x88,
138 PDC_I2C_WRITE = 0, /* master -> slave */
139 PDC_I2C_READ = (1 << 6), /* master <- slave */
140 PDC_I2C_START = (1 << 7), /* start I2C proto */
141 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
142 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
143 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
144 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
145 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
146 PDC_DIMM_SPD_ROW_NUM = 3,
147 PDC_DIMM_SPD_COLUMN_NUM = 4,
148 PDC_DIMM_SPD_MODULE_ROW = 5,
149 PDC_DIMM_SPD_TYPE = 11,
150 PDC_DIMM_SPD_FRESH_RATE = 12,
151 PDC_DIMM_SPD_BANK_NUM = 17,
152 PDC_DIMM_SPD_CAS_LATENCY = 18,
153 PDC_DIMM_SPD_ATTRIBUTE = 21,
154 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
155 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
156 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
157 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
158 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
159 PDC_CTL_STATUS = 0x08,
160 PDC_DIMM_WINDOW_CTLR = 0x0C,
161 PDC_TIME_CONTROL = 0x3C,
162 PDC_TIME_PERIOD = 0x40,
163 PDC_TIME_COUNTER = 0x44,
164 PDC_GENERAL_CTLR = 0x484,
165 PCI_PLL_INIT = 0x8A531824,
166 PCI_X_TCOUNT = 0xEE1E5CFF,
167
168 /* PDC_TIME_CONTROL bits */
169 PDC_TIMER_BUZZER = (1 << 10),
170 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
171 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
172 PDC_TIMER_ENABLE = (1 << 7),
173 PDC_TIMER_MASK_INT = (1 << 5),
174 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
175 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
176 PDC_TIMER_ENABLE |
177 PDC_TIMER_MASK_INT,
178 };
179
180 #define ECC_ERASE_BUF_SZ (128 * 1024)
181
182 struct pdc_port_priv {
183 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
184 u8 *pkt;
185 dma_addr_t pkt_dma;
186 };
187
188 struct pdc_host_priv {
189 unsigned int doing_hdma;
190 unsigned int hdma_prod;
191 unsigned int hdma_cons;
192 struct {
193 struct ata_queued_cmd *qc;
194 unsigned int seq;
195 unsigned long pkt_ofs;
196 } hdma[32];
197 };
198
199
200 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
201 static void pdc_error_handler(struct ata_port *ap);
202 static void pdc_freeze(struct ata_port *ap);
203 static void pdc_thaw(struct ata_port *ap);
204 static int pdc_port_start(struct ata_port *ap);
205 static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
206 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
207 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
208 static unsigned int pdc20621_dimm_init(struct ata_host *host);
209 static int pdc20621_detect_dimm(struct ata_host *host);
210 static unsigned int pdc20621_i2c_read(struct ata_host *host,
211 u32 device, u32 subaddr, u32 *pdata);
212 static int pdc20621_prog_dimm0(struct ata_host *host);
213 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
214 #ifdef ATA_VERBOSE_DEBUG
215 static void pdc20621_get_from_dimm(struct ata_host *host,
216 void *psource, u32 offset, u32 size);
217 #endif
218 static void pdc20621_put_to_dimm(struct ata_host *host,
219 void *psource, u32 offset, u32 size);
220 static void pdc20621_irq_clear(struct ata_port *ap);
221 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
222 static int pdc_softreset(struct ata_link *link, unsigned int *class,
223 unsigned long deadline);
224 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
225 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
226
227
228 static struct scsi_host_template pdc_sata_sht = {
229 ATA_BASE_SHT(DRV_NAME),
230 .sg_tablesize = LIBATA_MAX_PRD,
231 .dma_boundary = ATA_DMA_BOUNDARY,
232 };
233
234 /* TODO: inherit from base port_ops after converting to new EH */
235 static struct ata_port_operations pdc_20621_ops = {
236 .inherits = &ata_sff_port_ops,
237
238 .check_atapi_dma = pdc_check_atapi_dma,
239 .qc_prep = pdc20621_qc_prep,
240 .qc_issue = pdc20621_qc_issue,
241
242 .freeze = pdc_freeze,
243 .thaw = pdc_thaw,
244 .softreset = pdc_softreset,
245 .error_handler = pdc_error_handler,
246 .lost_interrupt = ATA_OP_NULL,
247 .post_internal_cmd = pdc_post_internal_cmd,
248
249 .port_start = pdc_port_start,
250
251 .sff_tf_load = pdc_tf_load_mmio,
252 .sff_exec_command = pdc_exec_command_mmio,
253 .sff_irq_clear = pdc20621_irq_clear,
254 };
255
256 static const struct ata_port_info pdc_port_info[] = {
257 /* board_20621 */
258 {
259 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
260 ATA_FLAG_PIO_POLLING,
261 .pio_mask = ATA_PIO4,
262 .mwdma_mask = ATA_MWDMA2,
263 .udma_mask = ATA_UDMA6,
264 .port_ops = &pdc_20621_ops,
265 },
266
267 };
268
269 static const struct pci_device_id pdc_sata_pci_tbl[] = {
270 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
271
272 { } /* terminate list */
273 };
274
275 static struct pci_driver pdc_sata_pci_driver = {
276 .name = DRV_NAME,
277 .id_table = pdc_sata_pci_tbl,
278 .probe = pdc_sata_init_one,
279 .remove = ata_pci_remove_one,
280 };
281
282
pdc_port_start(struct ata_port * ap)283 static int pdc_port_start(struct ata_port *ap)
284 {
285 struct device *dev = ap->host->dev;
286 struct pdc_port_priv *pp;
287
288 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
289 if (!pp)
290 return -ENOMEM;
291
292 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
293 if (!pp->pkt)
294 return -ENOMEM;
295
296 ap->private_data = pp;
297
298 return 0;
299 }
300
pdc20621_ata_sg(u8 * buf,unsigned int portno,unsigned int total_len)301 static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
302 unsigned int total_len)
303 {
304 u32 addr;
305 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
306 __le32 *buf32 = (__le32 *) buf;
307
308 /* output ATA packet S/G table */
309 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
310 (PDC_DIMM_DATA_STEP * portno);
311 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
312 buf32[dw] = cpu_to_le32(addr);
313 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
314
315 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
316 PDC_20621_DIMM_BASE +
317 (PDC_DIMM_WINDOW_STEP * portno) +
318 PDC_DIMM_APKT_PRD,
319 buf32[dw], buf32[dw + 1]);
320 }
321
pdc20621_host_sg(u8 * buf,unsigned int portno,unsigned int total_len)322 static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
323 unsigned int total_len)
324 {
325 u32 addr;
326 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
327 __le32 *buf32 = (__le32 *) buf;
328
329 /* output Host DMA packet S/G table */
330 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
331 (PDC_DIMM_DATA_STEP * portno);
332
333 buf32[dw] = cpu_to_le32(addr);
334 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
335
336 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
337 PDC_20621_DIMM_BASE +
338 (PDC_DIMM_WINDOW_STEP * portno) +
339 PDC_DIMM_HPKT_PRD,
340 buf32[dw], buf32[dw + 1]);
341 }
342
pdc20621_ata_pkt(struct ata_taskfile * tf,unsigned int devno,u8 * buf,unsigned int portno)343 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
344 unsigned int devno, u8 *buf,
345 unsigned int portno)
346 {
347 unsigned int i, dw;
348 __le32 *buf32 = (__le32 *) buf;
349 u8 dev_reg;
350
351 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
352 (PDC_DIMM_WINDOW_STEP * portno) +
353 PDC_DIMM_APKT_PRD;
354 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
355
356 i = PDC_DIMM_ATA_PKT;
357
358 /*
359 * Set up ATA packet
360 */
361 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
362 buf[i++] = PDC_PKT_READ;
363 else if (tf->protocol == ATA_PROT_NODATA)
364 buf[i++] = PDC_PKT_NODATA;
365 else
366 buf[i++] = 0;
367 buf[i++] = 0; /* reserved */
368 buf[i++] = portno + 1; /* seq. id */
369 buf[i++] = 0xff; /* delay seq. id */
370
371 /* dimm dma S/G, and next-pkt */
372 dw = i >> 2;
373 if (tf->protocol == ATA_PROT_NODATA)
374 buf32[dw] = 0;
375 else
376 buf32[dw] = cpu_to_le32(dimm_sg);
377 buf32[dw + 1] = 0;
378 i += 8;
379
380 if (devno == 0)
381 dev_reg = ATA_DEVICE_OBS;
382 else
383 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
384
385 /* select device */
386 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
387 buf[i++] = dev_reg;
388
389 /* device control register */
390 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
391 buf[i++] = tf->ctl;
392
393 return i;
394 }
395
pdc20621_host_pkt(struct ata_taskfile * tf,u8 * buf,unsigned int portno)396 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
397 unsigned int portno)
398 {
399 unsigned int dw;
400 u32 tmp;
401 __le32 *buf32 = (__le32 *) buf;
402
403 unsigned int host_sg = PDC_20621_DIMM_BASE +
404 (PDC_DIMM_WINDOW_STEP * portno) +
405 PDC_DIMM_HOST_PRD;
406 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
407 (PDC_DIMM_WINDOW_STEP * portno) +
408 PDC_DIMM_HPKT_PRD;
409 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
410 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
411
412 dw = PDC_DIMM_HOST_PKT >> 2;
413
414 /*
415 * Set up Host DMA packet
416 */
417 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
418 tmp = PDC_PKT_READ;
419 else
420 tmp = 0;
421 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
422 tmp |= (0xff << 24); /* delay seq. id */
423 buf32[dw + 0] = cpu_to_le32(tmp);
424 buf32[dw + 1] = cpu_to_le32(host_sg);
425 buf32[dw + 2] = cpu_to_le32(dimm_sg);
426 buf32[dw + 3] = 0;
427
428 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
429 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
430 PDC_DIMM_HOST_PKT,
431 buf32[dw + 0],
432 buf32[dw + 1],
433 buf32[dw + 2],
434 buf32[dw + 3]);
435 }
436
pdc20621_dma_prep(struct ata_queued_cmd * qc)437 static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
438 {
439 struct scatterlist *sg;
440 struct ata_port *ap = qc->ap;
441 struct pdc_port_priv *pp = ap->private_data;
442 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
443 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
444 unsigned int portno = ap->port_no;
445 unsigned int i, si, idx, total_len = 0, sgt_len;
446 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
447
448 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
449
450 VPRINTK("ata%u: ENTER\n", ap->print_id);
451
452 /* hard-code chip #0 */
453 mmio += PDC_CHIP0_OFS;
454
455 /*
456 * Build S/G table
457 */
458 idx = 0;
459 for_each_sg(qc->sg, sg, qc->n_elem, si) {
460 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
461 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
462 total_len += sg_dma_len(sg);
463 }
464 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
465 sgt_len = idx * 4;
466
467 /*
468 * Build ATA, host DMA packets
469 */
470 pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
471 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
472
473 pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
474 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
475
476 if (qc->tf.flags & ATA_TFLAG_LBA48)
477 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
478 else
479 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
480
481 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
482
483 /* copy three S/G tables and two packets to DIMM MMIO window */
484 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
485 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
486 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
487 PDC_DIMM_HOST_PRD,
488 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
489
490 /* force host FIFO dump */
491 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
492
493 readl(dimm_mmio); /* MMIO PCI posting flush */
494
495 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
496 }
497
pdc20621_nodata_prep(struct ata_queued_cmd * qc)498 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
499 {
500 struct ata_port *ap = qc->ap;
501 struct pdc_port_priv *pp = ap->private_data;
502 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
503 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
504 unsigned int portno = ap->port_no;
505 unsigned int i;
506
507 VPRINTK("ata%u: ENTER\n", ap->print_id);
508
509 /* hard-code chip #0 */
510 mmio += PDC_CHIP0_OFS;
511
512 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
513
514 if (qc->tf.flags & ATA_TFLAG_LBA48)
515 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
516 else
517 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
518
519 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
520
521 /* copy three S/G tables and two packets to DIMM MMIO window */
522 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
523 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
524
525 /* force host FIFO dump */
526 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
527
528 readl(dimm_mmio); /* MMIO PCI posting flush */
529
530 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
531 }
532
pdc20621_qc_prep(struct ata_queued_cmd * qc)533 static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
534 {
535 switch (qc->tf.protocol) {
536 case ATA_PROT_DMA:
537 pdc20621_dma_prep(qc);
538 break;
539 case ATA_PROT_NODATA:
540 pdc20621_nodata_prep(qc);
541 break;
542 default:
543 break;
544 }
545 }
546
__pdc20621_push_hdma(struct ata_queued_cmd * qc,unsigned int seq,u32 pkt_ofs)547 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
548 unsigned int seq,
549 u32 pkt_ofs)
550 {
551 struct ata_port *ap = qc->ap;
552 struct ata_host *host = ap->host;
553 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
554
555 /* hard-code chip #0 */
556 mmio += PDC_CHIP0_OFS;
557
558 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
559 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
560
561 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
562 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
563 }
564
pdc20621_push_hdma(struct ata_queued_cmd * qc,unsigned int seq,u32 pkt_ofs)565 static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
566 unsigned int seq,
567 u32 pkt_ofs)
568 {
569 struct ata_port *ap = qc->ap;
570 struct pdc_host_priv *pp = ap->host->private_data;
571 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
572
573 if (!pp->doing_hdma) {
574 __pdc20621_push_hdma(qc, seq, pkt_ofs);
575 pp->doing_hdma = 1;
576 return;
577 }
578
579 pp->hdma[idx].qc = qc;
580 pp->hdma[idx].seq = seq;
581 pp->hdma[idx].pkt_ofs = pkt_ofs;
582 pp->hdma_prod++;
583 }
584
pdc20621_pop_hdma(struct ata_queued_cmd * qc)585 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
586 {
587 struct ata_port *ap = qc->ap;
588 struct pdc_host_priv *pp = ap->host->private_data;
589 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
590
591 /* if nothing on queue, we're done */
592 if (pp->hdma_prod == pp->hdma_cons) {
593 pp->doing_hdma = 0;
594 return;
595 }
596
597 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
598 pp->hdma[idx].pkt_ofs);
599 pp->hdma_cons++;
600 }
601
602 #ifdef ATA_VERBOSE_DEBUG
pdc20621_dump_hdma(struct ata_queued_cmd * qc)603 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
604 {
605 struct ata_port *ap = qc->ap;
606 unsigned int port_no = ap->port_no;
607 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
608
609 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
610 dimm_mmio += PDC_DIMM_HOST_PKT;
611
612 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
613 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
614 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
615 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
616 }
617 #else
pdc20621_dump_hdma(struct ata_queued_cmd * qc)618 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
619 #endif /* ATA_VERBOSE_DEBUG */
620
pdc20621_packet_start(struct ata_queued_cmd * qc)621 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
622 {
623 struct ata_port *ap = qc->ap;
624 struct ata_host *host = ap->host;
625 unsigned int port_no = ap->port_no;
626 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
627 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
628 u8 seq = (u8) (port_no + 1);
629 unsigned int port_ofs;
630
631 /* hard-code chip #0 */
632 mmio += PDC_CHIP0_OFS;
633
634 VPRINTK("ata%u: ENTER\n", ap->print_id);
635
636 wmb(); /* flush PRD, pkt writes */
637
638 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
639
640 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
641 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
642 seq += 4;
643
644 pdc20621_dump_hdma(qc);
645 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
646 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
647 port_ofs + PDC_DIMM_HOST_PKT,
648 port_ofs + PDC_DIMM_HOST_PKT,
649 seq);
650 } else {
651 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
652 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
653
654 writel(port_ofs + PDC_DIMM_ATA_PKT,
655 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
656 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
657 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
658 port_ofs + PDC_DIMM_ATA_PKT,
659 port_ofs + PDC_DIMM_ATA_PKT,
660 seq);
661 }
662 }
663
pdc20621_qc_issue(struct ata_queued_cmd * qc)664 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
665 {
666 switch (qc->tf.protocol) {
667 case ATA_PROT_NODATA:
668 if (qc->tf.flags & ATA_TFLAG_POLLING)
669 break;
670 /*FALLTHROUGH*/
671 case ATA_PROT_DMA:
672 pdc20621_packet_start(qc);
673 return 0;
674
675 case ATAPI_PROT_DMA:
676 BUG();
677 break;
678
679 default:
680 break;
681 }
682
683 return ata_sff_qc_issue(qc);
684 }
685
pdc20621_host_intr(struct ata_port * ap,struct ata_queued_cmd * qc,unsigned int doing_hdma,void __iomem * mmio)686 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
687 struct ata_queued_cmd *qc,
688 unsigned int doing_hdma,
689 void __iomem *mmio)
690 {
691 unsigned int port_no = ap->port_no;
692 unsigned int port_ofs =
693 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
694 u8 status;
695 unsigned int handled = 0;
696
697 VPRINTK("ENTER\n");
698
699 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
700 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
701
702 /* step two - DMA from DIMM to host */
703 if (doing_hdma) {
704 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
705 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
706 /* get drive status; clear intr; complete txn */
707 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
708 ata_qc_complete(qc);
709 pdc20621_pop_hdma(qc);
710 }
711
712 /* step one - exec ATA command */
713 else {
714 u8 seq = (u8) (port_no + 1 + 4);
715 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
716 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
717
718 /* submit hdma pkt */
719 pdc20621_dump_hdma(qc);
720 pdc20621_push_hdma(qc, seq,
721 port_ofs + PDC_DIMM_HOST_PKT);
722 }
723 handled = 1;
724
725 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
726
727 /* step one - DMA from host to DIMM */
728 if (doing_hdma) {
729 u8 seq = (u8) (port_no + 1);
730 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
731 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
732
733 /* submit ata pkt */
734 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
735 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
736 writel(port_ofs + PDC_DIMM_ATA_PKT,
737 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
738 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
739 }
740
741 /* step two - execute ATA command */
742 else {
743 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
744 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
745 /* get drive status; clear intr; complete txn */
746 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
747 ata_qc_complete(qc);
748 pdc20621_pop_hdma(qc);
749 }
750 handled = 1;
751
752 /* command completion, but no data xfer */
753 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
754
755 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
756 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
757 qc->err_mask |= ac_err_mask(status);
758 ata_qc_complete(qc);
759 handled = 1;
760
761 } else {
762 ap->stats.idle_irq++;
763 }
764
765 return handled;
766 }
767
pdc20621_irq_clear(struct ata_port * ap)768 static void pdc20621_irq_clear(struct ata_port *ap)
769 {
770 ioread8(ap->ioaddr.status_addr);
771 }
772
pdc20621_interrupt(int irq,void * dev_instance)773 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
774 {
775 struct ata_host *host = dev_instance;
776 struct ata_port *ap;
777 u32 mask = 0;
778 unsigned int i, tmp, port_no;
779 unsigned int handled = 0;
780 void __iomem *mmio_base;
781
782 VPRINTK("ENTER\n");
783
784 if (!host || !host->iomap[PDC_MMIO_BAR]) {
785 VPRINTK("QUICK EXIT\n");
786 return IRQ_NONE;
787 }
788
789 mmio_base = host->iomap[PDC_MMIO_BAR];
790
791 /* reading should also clear interrupts */
792 mmio_base += PDC_CHIP0_OFS;
793 mask = readl(mmio_base + PDC_20621_SEQMASK);
794 VPRINTK("mask == 0x%x\n", mask);
795
796 if (mask == 0xffffffff) {
797 VPRINTK("QUICK EXIT 2\n");
798 return IRQ_NONE;
799 }
800 mask &= 0xffff; /* only 16 tags possible */
801 if (!mask) {
802 VPRINTK("QUICK EXIT 3\n");
803 return IRQ_NONE;
804 }
805
806 spin_lock(&host->lock);
807
808 for (i = 1; i < 9; i++) {
809 port_no = i - 1;
810 if (port_no > 3)
811 port_no -= 4;
812 if (port_no >= host->n_ports)
813 ap = NULL;
814 else
815 ap = host->ports[port_no];
816 tmp = mask & (1 << i);
817 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
818 if (tmp && ap) {
819 struct ata_queued_cmd *qc;
820
821 qc = ata_qc_from_tag(ap, ap->link.active_tag);
822 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
823 handled += pdc20621_host_intr(ap, qc, (i > 4),
824 mmio_base);
825 }
826 }
827
828 spin_unlock(&host->lock);
829
830 VPRINTK("mask == 0x%x\n", mask);
831
832 VPRINTK("EXIT\n");
833
834 return IRQ_RETVAL(handled);
835 }
836
pdc_freeze(struct ata_port * ap)837 static void pdc_freeze(struct ata_port *ap)
838 {
839 void __iomem *mmio = ap->ioaddr.cmd_addr;
840 u32 tmp;
841
842 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
843
844 tmp = readl(mmio + PDC_CTLSTAT);
845 tmp |= PDC_MASK_INT;
846 tmp &= ~PDC_DMA_ENABLE;
847 writel(tmp, mmio + PDC_CTLSTAT);
848 readl(mmio + PDC_CTLSTAT); /* flush */
849 }
850
pdc_thaw(struct ata_port * ap)851 static void pdc_thaw(struct ata_port *ap)
852 {
853 void __iomem *mmio = ap->ioaddr.cmd_addr;
854 u32 tmp;
855
856 /* FIXME: start HDMA engine, if zero ATA engines running */
857
858 /* clear IRQ */
859 ioread8(ap->ioaddr.status_addr);
860
861 /* turn IRQ back on */
862 tmp = readl(mmio + PDC_CTLSTAT);
863 tmp &= ~PDC_MASK_INT;
864 writel(tmp, mmio + PDC_CTLSTAT);
865 readl(mmio + PDC_CTLSTAT); /* flush */
866 }
867
pdc_reset_port(struct ata_port * ap)868 static void pdc_reset_port(struct ata_port *ap)
869 {
870 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
871 unsigned int i;
872 u32 tmp;
873
874 /* FIXME: handle HDMA copy engine */
875
876 for (i = 11; i > 0; i--) {
877 tmp = readl(mmio);
878 if (tmp & PDC_RESET)
879 break;
880
881 udelay(100);
882
883 tmp |= PDC_RESET;
884 writel(tmp, mmio);
885 }
886
887 tmp &= ~PDC_RESET;
888 writel(tmp, mmio);
889 readl(mmio); /* flush */
890 }
891
pdc_softreset(struct ata_link * link,unsigned int * class,unsigned long deadline)892 static int pdc_softreset(struct ata_link *link, unsigned int *class,
893 unsigned long deadline)
894 {
895 pdc_reset_port(link->ap);
896 return ata_sff_softreset(link, class, deadline);
897 }
898
pdc_error_handler(struct ata_port * ap)899 static void pdc_error_handler(struct ata_port *ap)
900 {
901 if (!(ap->pflags & ATA_PFLAG_FROZEN))
902 pdc_reset_port(ap);
903
904 ata_sff_error_handler(ap);
905 }
906
pdc_post_internal_cmd(struct ata_queued_cmd * qc)907 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
908 {
909 struct ata_port *ap = qc->ap;
910
911 /* make DMA engine forget about the failed command */
912 if (qc->flags & ATA_QCFLAG_FAILED)
913 pdc_reset_port(ap);
914 }
915
pdc_check_atapi_dma(struct ata_queued_cmd * qc)916 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
917 {
918 u8 *scsicmd = qc->scsicmd->cmnd;
919 int pio = 1; /* atapi dma off by default */
920
921 /* Whitelist commands that may use DMA. */
922 switch (scsicmd[0]) {
923 case WRITE_12:
924 case WRITE_10:
925 case WRITE_6:
926 case READ_12:
927 case READ_10:
928 case READ_6:
929 case 0xad: /* READ_DVD_STRUCTURE */
930 case 0xbe: /* READ_CD */
931 pio = 0;
932 }
933 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
934 if (scsicmd[0] == WRITE_10) {
935 unsigned int lba =
936 (scsicmd[2] << 24) |
937 (scsicmd[3] << 16) |
938 (scsicmd[4] << 8) |
939 scsicmd[5];
940 if (lba >= 0xFFFF4FA2)
941 pio = 1;
942 }
943 return pio;
944 }
945
pdc_tf_load_mmio(struct ata_port * ap,const struct ata_taskfile * tf)946 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
947 {
948 WARN_ON(tf->protocol == ATA_PROT_DMA ||
949 tf->protocol == ATAPI_PROT_DMA);
950 ata_sff_tf_load(ap, tf);
951 }
952
953
pdc_exec_command_mmio(struct ata_port * ap,const struct ata_taskfile * tf)954 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
955 {
956 WARN_ON(tf->protocol == ATA_PROT_DMA ||
957 tf->protocol == ATAPI_PROT_DMA);
958 ata_sff_exec_command(ap, tf);
959 }
960
961
pdc_sata_setup_port(struct ata_ioports * port,void __iomem * base)962 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
963 {
964 port->cmd_addr = base;
965 port->data_addr = base;
966 port->feature_addr =
967 port->error_addr = base + 0x4;
968 port->nsect_addr = base + 0x8;
969 port->lbal_addr = base + 0xc;
970 port->lbam_addr = base + 0x10;
971 port->lbah_addr = base + 0x14;
972 port->device_addr = base + 0x18;
973 port->command_addr =
974 port->status_addr = base + 0x1c;
975 port->altstatus_addr =
976 port->ctl_addr = base + 0x38;
977 }
978
979
980 #ifdef ATA_VERBOSE_DEBUG
pdc20621_get_from_dimm(struct ata_host * host,void * psource,u32 offset,u32 size)981 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
982 u32 offset, u32 size)
983 {
984 u32 window_size;
985 u16 idx;
986 u8 page_mask;
987 long dist;
988 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
989 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
990
991 /* hard-code chip #0 */
992 mmio += PDC_CHIP0_OFS;
993
994 page_mask = 0x00;
995 window_size = 0x2000 * 4; /* 32K byte uchar size */
996 idx = (u16) (offset / window_size);
997
998 writel(0x01, mmio + PDC_GENERAL_CTLR);
999 readl(mmio + PDC_GENERAL_CTLR);
1000 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1001 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1002
1003 offset -= (idx * window_size);
1004 idx++;
1005 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
1006 (long) (window_size - offset);
1007 memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
1008
1009 psource += dist;
1010 size -= dist;
1011 for (; (long) size >= (long) window_size ;) {
1012 writel(0x01, mmio + PDC_GENERAL_CTLR);
1013 readl(mmio + PDC_GENERAL_CTLR);
1014 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1015 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1016 memcpy_fromio(psource, dimm_mmio, window_size / 4);
1017 psource += window_size;
1018 size -= window_size;
1019 idx++;
1020 }
1021
1022 if (size) {
1023 writel(0x01, mmio + PDC_GENERAL_CTLR);
1024 readl(mmio + PDC_GENERAL_CTLR);
1025 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1026 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1027 memcpy_fromio(psource, dimm_mmio, size / 4);
1028 }
1029 }
1030 #endif
1031
1032
pdc20621_put_to_dimm(struct ata_host * host,void * psource,u32 offset,u32 size)1033 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1034 u32 offset, u32 size)
1035 {
1036 u32 window_size;
1037 u16 idx;
1038 u8 page_mask;
1039 long dist;
1040 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1041 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1042
1043 /* hard-code chip #0 */
1044 mmio += PDC_CHIP0_OFS;
1045
1046 page_mask = 0x00;
1047 window_size = 0x2000 * 4; /* 32K byte uchar size */
1048 idx = (u16) (offset / window_size);
1049
1050 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1051 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1052 offset -= (idx * window_size);
1053 idx++;
1054 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1055 (long) (window_size - offset);
1056 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1057 writel(0x01, mmio + PDC_GENERAL_CTLR);
1058 readl(mmio + PDC_GENERAL_CTLR);
1059
1060 psource += dist;
1061 size -= dist;
1062 for (; (long) size >= (long) window_size ;) {
1063 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1064 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1065 memcpy_toio(dimm_mmio, psource, window_size / 4);
1066 writel(0x01, mmio + PDC_GENERAL_CTLR);
1067 readl(mmio + PDC_GENERAL_CTLR);
1068 psource += window_size;
1069 size -= window_size;
1070 idx++;
1071 }
1072
1073 if (size) {
1074 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1075 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1076 memcpy_toio(dimm_mmio, psource, size / 4);
1077 writel(0x01, mmio + PDC_GENERAL_CTLR);
1078 readl(mmio + PDC_GENERAL_CTLR);
1079 }
1080 }
1081
1082
pdc20621_i2c_read(struct ata_host * host,u32 device,u32 subaddr,u32 * pdata)1083 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1084 u32 subaddr, u32 *pdata)
1085 {
1086 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1087 u32 i2creg = 0;
1088 u32 status;
1089 u32 count = 0;
1090
1091 /* hard-code chip #0 */
1092 mmio += PDC_CHIP0_OFS;
1093
1094 i2creg |= device << 24;
1095 i2creg |= subaddr << 16;
1096
1097 /* Set the device and subaddress */
1098 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1099 readl(mmio + PDC_I2C_ADDR_DATA);
1100
1101 /* Write Control to perform read operation, mask int */
1102 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1103 mmio + PDC_I2C_CONTROL);
1104
1105 for (count = 0; count <= 1000; count ++) {
1106 status = readl(mmio + PDC_I2C_CONTROL);
1107 if (status & PDC_I2C_COMPLETE) {
1108 status = readl(mmio + PDC_I2C_ADDR_DATA);
1109 break;
1110 } else if (count == 1000)
1111 return 0;
1112 }
1113
1114 *pdata = (status >> 8) & 0x000000ff;
1115 return 1;
1116 }
1117
1118
pdc20621_detect_dimm(struct ata_host * host)1119 static int pdc20621_detect_dimm(struct ata_host *host)
1120 {
1121 u32 data = 0;
1122 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1123 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1124 if (data == 100)
1125 return 100;
1126 } else
1127 return 0;
1128
1129 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1130 if (data <= 0x75)
1131 return 133;
1132 } else
1133 return 0;
1134
1135 return 0;
1136 }
1137
1138
pdc20621_prog_dimm0(struct ata_host * host)1139 static int pdc20621_prog_dimm0(struct ata_host *host)
1140 {
1141 u32 spd0[50];
1142 u32 data = 0;
1143 int size, i;
1144 u8 bdimmsize;
1145 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1146 static const struct {
1147 unsigned int reg;
1148 unsigned int ofs;
1149 } pdc_i2c_read_data [] = {
1150 { PDC_DIMM_SPD_TYPE, 11 },
1151 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1152 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1153 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1154 { PDC_DIMM_SPD_ROW_NUM, 3 },
1155 { PDC_DIMM_SPD_BANK_NUM, 17 },
1156 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1157 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1158 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1159 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1160 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1161 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1162 };
1163
1164 /* hard-code chip #0 */
1165 mmio += PDC_CHIP0_OFS;
1166
1167 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1168 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1169 pdc_i2c_read_data[i].reg,
1170 &spd0[pdc_i2c_read_data[i].ofs]);
1171
1172 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1173 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1174 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1175 data |= (((((spd0[29] > spd0[28])
1176 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1177 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1178
1179 if (spd0[18] & 0x08)
1180 data |= ((0x03) << 14);
1181 else if (spd0[18] & 0x04)
1182 data |= ((0x02) << 14);
1183 else if (spd0[18] & 0x01)
1184 data |= ((0x01) << 14);
1185 else
1186 data |= (0 << 14);
1187
1188 /*
1189 Calculate the size of bDIMMSize (power of 2) and
1190 merge the DIMM size by program start/end address.
1191 */
1192
1193 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1194 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1195 data |= (((size / 16) - 1) << 16);
1196 data |= (0 << 23);
1197 data |= 8;
1198 writel(data, mmio + PDC_DIMM0_CONTROL);
1199 readl(mmio + PDC_DIMM0_CONTROL);
1200 return size;
1201 }
1202
1203
pdc20621_prog_dimm_global(struct ata_host * host)1204 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1205 {
1206 u32 data, spd0;
1207 int error, i;
1208 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1209
1210 /* hard-code chip #0 */
1211 mmio += PDC_CHIP0_OFS;
1212
1213 /*
1214 Set To Default : DIMM Module Global Control Register (0x022259F1)
1215 DIMM Arbitration Disable (bit 20)
1216 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1217 Refresh Enable (bit 17)
1218 */
1219
1220 data = 0x022259F1;
1221 writel(data, mmio + PDC_SDRAM_CONTROL);
1222 readl(mmio + PDC_SDRAM_CONTROL);
1223
1224 /* Turn on for ECC */
1225 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1226 PDC_DIMM_SPD_TYPE, &spd0)) {
1227 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1228 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1229 return 1;
1230 }
1231 if (spd0 == 0x02) {
1232 data |= (0x01 << 16);
1233 writel(data, mmio + PDC_SDRAM_CONTROL);
1234 readl(mmio + PDC_SDRAM_CONTROL);
1235 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1236 }
1237
1238 /* DIMM Initialization Select/Enable (bit 18/19) */
1239 data &= (~(1<<18));
1240 data |= (1<<19);
1241 writel(data, mmio + PDC_SDRAM_CONTROL);
1242
1243 error = 1;
1244 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1245 data = readl(mmio + PDC_SDRAM_CONTROL);
1246 if (!(data & (1<<19))) {
1247 error = 0;
1248 break;
1249 }
1250 msleep(i*100);
1251 }
1252 return error;
1253 }
1254
1255
pdc20621_dimm_init(struct ata_host * host)1256 static unsigned int pdc20621_dimm_init(struct ata_host *host)
1257 {
1258 int speed, size, length;
1259 u32 addr, spd0, pci_status;
1260 u32 time_period = 0;
1261 u32 tcount = 0;
1262 u32 ticks = 0;
1263 u32 clock = 0;
1264 u32 fparam = 0;
1265 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1266
1267 /* hard-code chip #0 */
1268 mmio += PDC_CHIP0_OFS;
1269
1270 /* Initialize PLL based upon PCI Bus Frequency */
1271
1272 /* Initialize Time Period Register */
1273 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1274 time_period = readl(mmio + PDC_TIME_PERIOD);
1275 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1276
1277 /* Enable timer */
1278 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1279 readl(mmio + PDC_TIME_CONTROL);
1280
1281 /* Wait 3 seconds */
1282 msleep(3000);
1283
1284 /*
1285 When timer is enabled, counter is decreased every internal
1286 clock cycle.
1287 */
1288
1289 tcount = readl(mmio + PDC_TIME_COUNTER);
1290 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1291
1292 /*
1293 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1294 register should be >= (0xffffffff - 3x10^8).
1295 */
1296 if (tcount >= PCI_X_TCOUNT) {
1297 ticks = (time_period - tcount);
1298 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1299
1300 clock = (ticks / 300000);
1301 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1302
1303 clock = (clock * 33);
1304 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1305
1306 /* PLL F Param (bit 22:16) */
1307 fparam = (1400000 / clock) - 2;
1308 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1309
1310 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1311 pci_status = (0x8a001824 | (fparam << 16));
1312 } else
1313 pci_status = PCI_PLL_INIT;
1314
1315 /* Initialize PLL. */
1316 VPRINTK("pci_status: 0x%x\n", pci_status);
1317 writel(pci_status, mmio + PDC_CTL_STATUS);
1318 readl(mmio + PDC_CTL_STATUS);
1319
1320 /*
1321 Read SPD of DIMM by I2C interface,
1322 and program the DIMM Module Controller.
1323 */
1324 if (!(speed = pdc20621_detect_dimm(host))) {
1325 printk(KERN_ERR "Detect Local DIMM Fail\n");
1326 return 1; /* DIMM error */
1327 }
1328 VPRINTK("Local DIMM Speed = %d\n", speed);
1329
1330 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1331 size = pdc20621_prog_dimm0(host);
1332 VPRINTK("Local DIMM Size = %dMB\n", size);
1333
1334 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1335 if (pdc20621_prog_dimm_global(host)) {
1336 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1337 return 1;
1338 }
1339
1340 #ifdef ATA_VERBOSE_DEBUG
1341 {
1342 u8 test_parttern1[40] =
1343 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1344 'N','o','t',' ','Y','e','t',' ',
1345 'D','e','f','i','n','e','d',' ',
1346 '1','.','1','0',
1347 '9','8','0','3','1','6','1','2',0,0};
1348 u8 test_parttern2[40] = {0};
1349
1350 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1351 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1352
1353 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1354 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1355 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1356 test_parttern2[1], &(test_parttern2[2]));
1357 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1358 40);
1359 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1360 test_parttern2[1], &(test_parttern2[2]));
1361
1362 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1363 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1364 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1365 test_parttern2[1], &(test_parttern2[2]));
1366 }
1367 #endif
1368
1369 /* ECC initiliazation. */
1370
1371 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1372 PDC_DIMM_SPD_TYPE, &spd0)) {
1373 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1374 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1375 return 1;
1376 }
1377 if (spd0 == 0x02) {
1378 void *buf;
1379 VPRINTK("Start ECC initialization\n");
1380 addr = 0;
1381 length = size * 1024 * 1024;
1382 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1383 if (!buf)
1384 return 1;
1385 while (addr < length) {
1386 pdc20621_put_to_dimm(host, buf, addr,
1387 ECC_ERASE_BUF_SZ);
1388 addr += ECC_ERASE_BUF_SZ;
1389 }
1390 kfree(buf);
1391 VPRINTK("Finish ECC initialization\n");
1392 }
1393 return 0;
1394 }
1395
1396
pdc_20621_init(struct ata_host * host)1397 static void pdc_20621_init(struct ata_host *host)
1398 {
1399 u32 tmp;
1400 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1401
1402 /* hard-code chip #0 */
1403 mmio += PDC_CHIP0_OFS;
1404
1405 /*
1406 * Select page 0x40 for our 32k DIMM window
1407 */
1408 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1409 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1410 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1411
1412 /*
1413 * Reset Host DMA
1414 */
1415 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1416 tmp |= PDC_RESET;
1417 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1418 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1419
1420 udelay(10);
1421
1422 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1423 tmp &= ~PDC_RESET;
1424 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1425 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1426 }
1427
pdc_sata_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1428 static int pdc_sata_init_one(struct pci_dev *pdev,
1429 const struct pci_device_id *ent)
1430 {
1431 const struct ata_port_info *ppi[] =
1432 { &pdc_port_info[ent->driver_data], NULL };
1433 struct ata_host *host;
1434 struct pdc_host_priv *hpriv;
1435 int i, rc;
1436
1437 ata_print_version_once(&pdev->dev, DRV_VERSION);
1438
1439 /* allocate host */
1440 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1441 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1442 if (!host || !hpriv)
1443 return -ENOMEM;
1444
1445 host->private_data = hpriv;
1446
1447 /* acquire resources and fill host */
1448 rc = pcim_enable_device(pdev);
1449 if (rc)
1450 return rc;
1451
1452 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1453 DRV_NAME);
1454 if (rc == -EBUSY)
1455 pcim_pin_device(pdev);
1456 if (rc)
1457 return rc;
1458 host->iomap = pcim_iomap_table(pdev);
1459
1460 for (i = 0; i < 4; i++) {
1461 struct ata_port *ap = host->ports[i];
1462 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1463 unsigned int offset = 0x200 + i * 0x80;
1464
1465 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1466
1467 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1468 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1469 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1470 }
1471
1472 /* configure and activate */
1473 rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
1474 if (rc)
1475 return rc;
1476
1477 if (pdc20621_dimm_init(host))
1478 return -ENOMEM;
1479 pdc_20621_init(host);
1480
1481 pci_set_master(pdev);
1482 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1483 IRQF_SHARED, &pdc_sata_sht);
1484 }
1485
1486 module_pci_driver(pdc_sata_pci_driver);
1487
1488 MODULE_AUTHOR("Jeff Garzik");
1489 MODULE_DESCRIPTION("Promise SATA low-level driver");
1490 MODULE_LICENSE("GPL");
1491 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1492 MODULE_VERSION(DRV_VERSION);
1493