• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  sata_nv.c - NVIDIA nForce SATA
4  *
5  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
6  *  Copyright 2004 Andrew Chew
7  *
8  *  libata documentation is available via 'make {ps|pdf}docs',
9  *  as Documentation/driver-api/libata.rst
10  *
11  *  No hardware documentation available outside of NVIDIA.
12  *  This driver programs the NVIDIA SATA controller in a similar
13  *  fashion as with other PCI IDE BMDMA controllers, with a few
14  *  NV-specific details such as register offsets, SATA phy location,
15  *  hotplug info, etc.
16  *
17  *  CK804/MCP04 controllers support an alternate programming interface
18  *  similar to the ADMA specification (with some modifications).
19  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
20  *  sent through the legacy interface.
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/gfp.h>
26 #include <linux/pci.h>
27 #include <linux/blkdev.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <linux/libata.h>
34 
35 #define DRV_NAME			"sata_nv"
36 #define DRV_VERSION			"3.5"
37 
38 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
39 
40 enum {
41 	NV_MMIO_BAR			= 5,
42 
43 	NV_PORTS			= 2,
44 	NV_PIO_MASK			= ATA_PIO4,
45 	NV_MWDMA_MASK			= ATA_MWDMA2,
46 	NV_UDMA_MASK			= ATA_UDMA6,
47 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
48 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
49 
50 	/* INT_STATUS/ENABLE */
51 	NV_INT_STATUS			= 0x10,
52 	NV_INT_ENABLE			= 0x11,
53 	NV_INT_STATUS_CK804		= 0x440,
54 	NV_INT_ENABLE_CK804		= 0x441,
55 
56 	/* INT_STATUS/ENABLE bits */
57 	NV_INT_DEV			= 0x01,
58 	NV_INT_PM			= 0x02,
59 	NV_INT_ADDED			= 0x04,
60 	NV_INT_REMOVED			= 0x08,
61 
62 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
63 
64 	NV_INT_ALL			= 0x0f,
65 	NV_INT_MASK			= NV_INT_DEV |
66 					  NV_INT_ADDED | NV_INT_REMOVED,
67 
68 	/* INT_CONFIG */
69 	NV_INT_CONFIG			= 0x12,
70 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
71 
72 	// For PCI config register 20
73 	NV_MCP_SATA_CFG_20		= 0x50,
74 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
75 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
76 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
77 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
78 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
79 
80 	NV_ADMA_MAX_CPBS		= 32,
81 	NV_ADMA_CPB_SZ			= 128,
82 	NV_ADMA_APRD_SZ			= 16,
83 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
84 					   NV_ADMA_APRD_SZ,
85 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
86 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
87 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
88 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
89 
90 	/* BAR5 offset to ADMA general registers */
91 	NV_ADMA_GEN			= 0x400,
92 	NV_ADMA_GEN_CTL			= 0x00,
93 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
94 
95 	/* BAR5 offset to ADMA ports */
96 	NV_ADMA_PORT			= 0x480,
97 
98 	/* size of ADMA port register space  */
99 	NV_ADMA_PORT_SIZE		= 0x100,
100 
101 	/* ADMA port registers */
102 	NV_ADMA_CTL			= 0x40,
103 	NV_ADMA_CPB_COUNT		= 0x42,
104 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
105 	NV_ADMA_STAT			= 0x44,
106 	NV_ADMA_CPB_BASE_LOW		= 0x48,
107 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
108 	NV_ADMA_APPEND			= 0x50,
109 	NV_ADMA_NOTIFIER		= 0x68,
110 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
111 
112 	/* NV_ADMA_CTL register bits */
113 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
114 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
115 	NV_ADMA_CTL_GO			= (1 << 7),
116 	NV_ADMA_CTL_AIEN		= (1 << 8),
117 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
118 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
119 
120 	/* CPB response flag bits */
121 	NV_CPB_RESP_DONE		= (1 << 0),
122 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
123 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
124 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
125 
126 	/* CPB control flag bits */
127 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
128 	NV_CPB_CTL_QUEUE		= (1 << 1),
129 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
130 	NV_CPB_CTL_IEN			= (1 << 3),
131 	NV_CPB_CTL_FPDMA		= (1 << 4),
132 
133 	/* APRD flags */
134 	NV_APRD_WRITE			= (1 << 1),
135 	NV_APRD_END			= (1 << 2),
136 	NV_APRD_CONT			= (1 << 3),
137 
138 	/* NV_ADMA_STAT flags */
139 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
140 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
141 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
142 	NV_ADMA_STAT_CPBERR		= (1 << 4),
143 	NV_ADMA_STAT_SERROR		= (1 << 5),
144 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
145 	NV_ADMA_STAT_IDLE		= (1 << 8),
146 	NV_ADMA_STAT_LEGACY		= (1 << 9),
147 	NV_ADMA_STAT_STOPPED		= (1 << 10),
148 	NV_ADMA_STAT_DONE		= (1 << 12),
149 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
150 					  NV_ADMA_STAT_TIMEOUT,
151 
152 	/* port flags */
153 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
154 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
155 
156 	/* MCP55 reg offset */
157 	NV_CTL_MCP55			= 0x400,
158 	NV_INT_STATUS_MCP55		= 0x440,
159 	NV_INT_ENABLE_MCP55		= 0x444,
160 	NV_NCQ_REG_MCP55		= 0x448,
161 
162 	/* MCP55 */
163 	NV_INT_ALL_MCP55		= 0xffff,
164 	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
165 	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
166 
167 	/* SWNCQ ENABLE BITS*/
168 	NV_CTL_PRI_SWNCQ		= 0x02,
169 	NV_CTL_SEC_SWNCQ		= 0x04,
170 
171 	/* SW NCQ status bits*/
172 	NV_SWNCQ_IRQ_DEV		= (1 << 0),
173 	NV_SWNCQ_IRQ_PM			= (1 << 1),
174 	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
175 	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
176 
177 	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
178 	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
179 	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
180 	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
181 
182 	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
183 					  NV_SWNCQ_IRQ_REMOVED,
184 
185 };
186 
187 /* ADMA Physical Region Descriptor - one SG segment */
188 struct nv_adma_prd {
189 	__le64			addr;
190 	__le32			len;
191 	u8			flags;
192 	u8			packet_len;
193 	__le16			reserved;
194 };
195 
196 enum nv_adma_regbits {
197 	CMDEND	= (1 << 15),		/* end of command list */
198 	WNB	= (1 << 14),		/* wait-not-BSY */
199 	IGN	= (1 << 13),		/* ignore this entry */
200 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
201 	DA2	= (1 << (2 + 8)),
202 	DA1	= (1 << (1 + 8)),
203 	DA0	= (1 << (0 + 8)),
204 };
205 
206 /* ADMA Command Parameter Block
207    The first 5 SG segments are stored inside the Command Parameter Block itself.
208    If there are more than 5 segments the remainder are stored in a separate
209    memory area indicated by next_aprd. */
210 struct nv_adma_cpb {
211 	u8			resp_flags;    /* 0 */
212 	u8			reserved1;     /* 1 */
213 	u8			ctl_flags;     /* 2 */
214 	/* len is length of taskfile in 64 bit words */
215 	u8			len;		/* 3  */
216 	u8			tag;           /* 4 */
217 	u8			next_cpb_idx;  /* 5 */
218 	__le16			reserved2;     /* 6-7 */
219 	__le16			tf[12];        /* 8-31 */
220 	struct nv_adma_prd	aprd[5];       /* 32-111 */
221 	__le64			next_aprd;     /* 112-119 */
222 	__le64			reserved3;     /* 120-127 */
223 };
224 
225 
226 struct nv_adma_port_priv {
227 	struct nv_adma_cpb	*cpb;
228 	dma_addr_t		cpb_dma;
229 	struct nv_adma_prd	*aprd;
230 	dma_addr_t		aprd_dma;
231 	void __iomem		*ctl_block;
232 	void __iomem		*gen_block;
233 	void __iomem		*notifier_clear_block;
234 	u64			adma_dma_mask;
235 	u8			flags;
236 	int			last_issue_ncq;
237 };
238 
239 struct nv_host_priv {
240 	unsigned long		type;
241 };
242 
243 struct defer_queue {
244 	u32		defer_bits;
245 	unsigned int	head;
246 	unsigned int	tail;
247 	unsigned int	tag[ATA_MAX_QUEUE];
248 };
249 
250 enum ncq_saw_flag_list {
251 	ncq_saw_d2h	= (1U << 0),
252 	ncq_saw_dmas	= (1U << 1),
253 	ncq_saw_sdb	= (1U << 2),
254 	ncq_saw_backout	= (1U << 3),
255 };
256 
257 struct nv_swncq_port_priv {
258 	struct ata_bmdma_prd *prd;	 /* our SG list */
259 	dma_addr_t	prd_dma; /* and its DMA mapping */
260 	void __iomem	*sactive_block;
261 	void __iomem	*irq_block;
262 	void __iomem	*tag_block;
263 	u32		qc_active;
264 
265 	unsigned int	last_issue_tag;
266 
267 	/* fifo circular queue to store deferral command */
268 	struct defer_queue defer_queue;
269 
270 	/* for NCQ interrupt analysis */
271 	u32		dhfis_bits;
272 	u32		dmafis_bits;
273 	u32		sdbfis_bits;
274 
275 	unsigned int	ncq_flags;
276 };
277 
278 
279 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
280 
281 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
282 #ifdef CONFIG_PM_SLEEP
283 static int nv_pci_device_resume(struct pci_dev *pdev);
284 #endif
285 static void nv_ck804_host_stop(struct ata_host *host);
286 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
287 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
288 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
289 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
290 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
291 
292 static int nv_hardreset(struct ata_link *link, unsigned int *class,
293 			unsigned long deadline);
294 static void nv_nf2_freeze(struct ata_port *ap);
295 static void nv_nf2_thaw(struct ata_port *ap);
296 static void nv_ck804_freeze(struct ata_port *ap);
297 static void nv_ck804_thaw(struct ata_port *ap);
298 static int nv_adma_slave_config(struct scsi_device *sdev);
299 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
300 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
301 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
302 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
303 static void nv_adma_irq_clear(struct ata_port *ap);
304 static int nv_adma_port_start(struct ata_port *ap);
305 static void nv_adma_port_stop(struct ata_port *ap);
306 #ifdef CONFIG_PM
307 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
308 static int nv_adma_port_resume(struct ata_port *ap);
309 #endif
310 static void nv_adma_freeze(struct ata_port *ap);
311 static void nv_adma_thaw(struct ata_port *ap);
312 static void nv_adma_error_handler(struct ata_port *ap);
313 static void nv_adma_host_stop(struct ata_host *host);
314 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
315 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
316 
317 static void nv_mcp55_thaw(struct ata_port *ap);
318 static void nv_mcp55_freeze(struct ata_port *ap);
319 static void nv_swncq_error_handler(struct ata_port *ap);
320 static int nv_swncq_slave_config(struct scsi_device *sdev);
321 static int nv_swncq_port_start(struct ata_port *ap);
322 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
323 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
324 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
325 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
326 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
327 #ifdef CONFIG_PM
328 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
329 static int nv_swncq_port_resume(struct ata_port *ap);
330 #endif
331 
332 enum nv_host_type
333 {
334 	GENERIC,
335 	NFORCE2,
336 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
337 	CK804,
338 	ADMA,
339 	MCP5x,
340 	SWNCQ,
341 };
342 
343 static const struct pci_device_id nv_pci_tbl[] = {
344 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
345 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
346 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
347 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
348 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
349 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
350 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
351 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
352 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
353 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
354 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
355 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
356 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
357 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
358 
359 	{ } /* terminate list */
360 };
361 
362 static struct pci_driver nv_pci_driver = {
363 	.name			= DRV_NAME,
364 	.id_table		= nv_pci_tbl,
365 	.probe			= nv_init_one,
366 #ifdef CONFIG_PM_SLEEP
367 	.suspend		= ata_pci_device_suspend,
368 	.resume			= nv_pci_device_resume,
369 #endif
370 	.remove			= ata_pci_remove_one,
371 };
372 
373 static struct scsi_host_template nv_sht = {
374 	ATA_BMDMA_SHT(DRV_NAME),
375 };
376 
377 static struct scsi_host_template nv_adma_sht = {
378 	ATA_NCQ_SHT(DRV_NAME),
379 	.can_queue		= NV_ADMA_MAX_CPBS,
380 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
381 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
382 	.slave_configure	= nv_adma_slave_config,
383 };
384 
385 static struct scsi_host_template nv_swncq_sht = {
386 	ATA_NCQ_SHT(DRV_NAME),
387 	.can_queue		= ATA_MAX_QUEUE - 1,
388 	.sg_tablesize		= LIBATA_MAX_PRD,
389 	.dma_boundary		= ATA_DMA_BOUNDARY,
390 	.slave_configure	= nv_swncq_slave_config,
391 };
392 
393 /*
394  * NV SATA controllers have various different problems with hardreset
395  * protocol depending on the specific controller and device.
396  *
397  * GENERIC:
398  *
399  *  bko11195 reports that link doesn't come online after hardreset on
400  *  generic nv's and there have been several other similar reports on
401  *  linux-ide.
402  *
403  *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
404  *  softreset.
405  *
406  * NF2/3:
407  *
408  *  bko3352 reports nf2/3 controllers can't determine device signature
409  *  reliably after hardreset.  The following thread reports detection
410  *  failure on cold boot with the standard debouncing timing.
411  *
412  *  http://thread.gmane.org/gmane.linux.ide/34098
413  *
414  *  bko12176 reports that hardreset fails to bring up the link during
415  *  boot on nf2.
416  *
417  * CK804:
418  *
419  *  For initial probing after boot and hot plugging, hardreset mostly
420  *  works fine on CK804 but curiously, reprobing on the initial port
421  *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
422  *  FIS in somewhat undeterministic way.
423  *
424  * SWNCQ:
425  *
426  *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
427  *  hardreset should be used and hardreset can't report proper
428  *  signature, which suggests that mcp5x is closer to nf2 as long as
429  *  reset quirkiness is concerned.
430  *
431  *  bko12703 reports that boot probing fails for intel SSD with
432  *  hardreset.  Link fails to come online.  Softreset works fine.
433  *
434  * The failures are varied but the following patterns seem true for
435  * all flavors.
436  *
437  * - Softreset during boot always works.
438  *
439  * - Hardreset during boot sometimes fails to bring up the link on
440  *   certain comibnations and device signature acquisition is
441  *   unreliable.
442  *
443  * - Hardreset is often necessary after hotplug.
444  *
445  * So, preferring softreset for boot probing and error handling (as
446  * hardreset might bring down the link) but using hardreset for
447  * post-boot probing should work around the above issues in most
448  * cases.  Define nv_hardreset() which only kicks in for post-boot
449  * probing and use it for all variants.
450  */
451 static struct ata_port_operations nv_generic_ops = {
452 	.inherits		= &ata_bmdma_port_ops,
453 	.lost_interrupt		= ATA_OP_NULL,
454 	.scr_read		= nv_scr_read,
455 	.scr_write		= nv_scr_write,
456 	.hardreset		= nv_hardreset,
457 };
458 
459 static struct ata_port_operations nv_nf2_ops = {
460 	.inherits		= &nv_generic_ops,
461 	.freeze			= nv_nf2_freeze,
462 	.thaw			= nv_nf2_thaw,
463 };
464 
465 static struct ata_port_operations nv_ck804_ops = {
466 	.inherits		= &nv_generic_ops,
467 	.freeze			= nv_ck804_freeze,
468 	.thaw			= nv_ck804_thaw,
469 	.host_stop		= nv_ck804_host_stop,
470 };
471 
472 static struct ata_port_operations nv_adma_ops = {
473 	.inherits		= &nv_ck804_ops,
474 
475 	.check_atapi_dma	= nv_adma_check_atapi_dma,
476 	.sff_tf_read		= nv_adma_tf_read,
477 	.qc_defer		= ata_std_qc_defer,
478 	.qc_prep		= nv_adma_qc_prep,
479 	.qc_issue		= nv_adma_qc_issue,
480 	.sff_irq_clear		= nv_adma_irq_clear,
481 
482 	.freeze			= nv_adma_freeze,
483 	.thaw			= nv_adma_thaw,
484 	.error_handler		= nv_adma_error_handler,
485 	.post_internal_cmd	= nv_adma_post_internal_cmd,
486 
487 	.port_start		= nv_adma_port_start,
488 	.port_stop		= nv_adma_port_stop,
489 #ifdef CONFIG_PM
490 	.port_suspend		= nv_adma_port_suspend,
491 	.port_resume		= nv_adma_port_resume,
492 #endif
493 	.host_stop		= nv_adma_host_stop,
494 };
495 
496 static struct ata_port_operations nv_swncq_ops = {
497 	.inherits		= &nv_generic_ops,
498 
499 	.qc_defer		= ata_std_qc_defer,
500 	.qc_prep		= nv_swncq_qc_prep,
501 	.qc_issue		= nv_swncq_qc_issue,
502 
503 	.freeze			= nv_mcp55_freeze,
504 	.thaw			= nv_mcp55_thaw,
505 	.error_handler		= nv_swncq_error_handler,
506 
507 #ifdef CONFIG_PM
508 	.port_suspend		= nv_swncq_port_suspend,
509 	.port_resume		= nv_swncq_port_resume,
510 #endif
511 	.port_start		= nv_swncq_port_start,
512 };
513 
514 struct nv_pi_priv {
515 	irq_handler_t			irq_handler;
516 	struct scsi_host_template	*sht;
517 };
518 
519 #define NV_PI_PRIV(_irq_handler, _sht) \
520 	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
521 
522 static const struct ata_port_info nv_port_info[] = {
523 	/* generic */
524 	{
525 		.flags		= ATA_FLAG_SATA,
526 		.pio_mask	= NV_PIO_MASK,
527 		.mwdma_mask	= NV_MWDMA_MASK,
528 		.udma_mask	= NV_UDMA_MASK,
529 		.port_ops	= &nv_generic_ops,
530 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
531 	},
532 	/* nforce2/3 */
533 	{
534 		.flags		= ATA_FLAG_SATA,
535 		.pio_mask	= NV_PIO_MASK,
536 		.mwdma_mask	= NV_MWDMA_MASK,
537 		.udma_mask	= NV_UDMA_MASK,
538 		.port_ops	= &nv_nf2_ops,
539 		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
540 	},
541 	/* ck804 */
542 	{
543 		.flags		= ATA_FLAG_SATA,
544 		.pio_mask	= NV_PIO_MASK,
545 		.mwdma_mask	= NV_MWDMA_MASK,
546 		.udma_mask	= NV_UDMA_MASK,
547 		.port_ops	= &nv_ck804_ops,
548 		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
549 	},
550 	/* ADMA */
551 	{
552 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
553 		.pio_mask	= NV_PIO_MASK,
554 		.mwdma_mask	= NV_MWDMA_MASK,
555 		.udma_mask	= NV_UDMA_MASK,
556 		.port_ops	= &nv_adma_ops,
557 		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
558 	},
559 	/* MCP5x */
560 	{
561 		.flags		= ATA_FLAG_SATA,
562 		.pio_mask	= NV_PIO_MASK,
563 		.mwdma_mask	= NV_MWDMA_MASK,
564 		.udma_mask	= NV_UDMA_MASK,
565 		.port_ops	= &nv_generic_ops,
566 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
567 	},
568 	/* SWNCQ */
569 	{
570 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NCQ,
571 		.pio_mask	= NV_PIO_MASK,
572 		.mwdma_mask	= NV_MWDMA_MASK,
573 		.udma_mask	= NV_UDMA_MASK,
574 		.port_ops	= &nv_swncq_ops,
575 		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
576 	},
577 };
578 
579 MODULE_AUTHOR("NVIDIA");
580 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
581 MODULE_LICENSE("GPL");
582 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
583 MODULE_VERSION(DRV_VERSION);
584 
585 static bool adma_enabled;
586 static bool swncq_enabled = true;
587 static bool msi_enabled;
588 
nv_adma_register_mode(struct ata_port * ap)589 static void nv_adma_register_mode(struct ata_port *ap)
590 {
591 	struct nv_adma_port_priv *pp = ap->private_data;
592 	void __iomem *mmio = pp->ctl_block;
593 	u16 tmp, status;
594 	int count = 0;
595 
596 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
597 		return;
598 
599 	status = readw(mmio + NV_ADMA_STAT);
600 	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
601 		ndelay(50);
602 		status = readw(mmio + NV_ADMA_STAT);
603 		count++;
604 	}
605 	if (count == 20)
606 		ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
607 			      status);
608 
609 	tmp = readw(mmio + NV_ADMA_CTL);
610 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
611 
612 	count = 0;
613 	status = readw(mmio + NV_ADMA_STAT);
614 	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
615 		ndelay(50);
616 		status = readw(mmio + NV_ADMA_STAT);
617 		count++;
618 	}
619 	if (count == 20)
620 		ata_port_warn(ap,
621 			      "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
622 			      status);
623 
624 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
625 }
626 
nv_adma_mode(struct ata_port * ap)627 static void nv_adma_mode(struct ata_port *ap)
628 {
629 	struct nv_adma_port_priv *pp = ap->private_data;
630 	void __iomem *mmio = pp->ctl_block;
631 	u16 tmp, status;
632 	int count = 0;
633 
634 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
635 		return;
636 
637 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
638 
639 	tmp = readw(mmio + NV_ADMA_CTL);
640 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
641 
642 	status = readw(mmio + NV_ADMA_STAT);
643 	while (((status & NV_ADMA_STAT_LEGACY) ||
644 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
645 		ndelay(50);
646 		status = readw(mmio + NV_ADMA_STAT);
647 		count++;
648 	}
649 	if (count == 20)
650 		ata_port_warn(ap,
651 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
652 			status);
653 
654 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
655 }
656 
nv_adma_slave_config(struct scsi_device * sdev)657 static int nv_adma_slave_config(struct scsi_device *sdev)
658 {
659 	struct ata_port *ap = ata_shost_to_port(sdev->host);
660 	struct nv_adma_port_priv *pp = ap->private_data;
661 	struct nv_adma_port_priv *port0, *port1;
662 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
663 	unsigned long segment_boundary, flags;
664 	unsigned short sg_tablesize;
665 	int rc;
666 	int adma_enable;
667 	u32 current_reg, new_reg, config_mask;
668 
669 	rc = ata_scsi_slave_config(sdev);
670 
671 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
672 		/* Not a proper libata device, ignore */
673 		return rc;
674 
675 	spin_lock_irqsave(ap->lock, flags);
676 
677 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
678 		/*
679 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
680 		 * Therefore ATAPI commands are sent through the legacy interface.
681 		 * However, the legacy interface only supports 32-bit DMA.
682 		 * Restrict DMA parameters as required by the legacy interface
683 		 * when an ATAPI device is connected.
684 		 */
685 		segment_boundary = ATA_DMA_BOUNDARY;
686 		/* Subtract 1 since an extra entry may be needed for padding, see
687 		   libata-scsi.c */
688 		sg_tablesize = LIBATA_MAX_PRD - 1;
689 
690 		/* Since the legacy DMA engine is in use, we need to disable ADMA
691 		   on the port. */
692 		adma_enable = 0;
693 		nv_adma_register_mode(ap);
694 	} else {
695 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
696 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
697 		adma_enable = 1;
698 	}
699 
700 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
701 
702 	if (ap->port_no == 1)
703 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
704 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
705 	else
706 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
707 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
708 
709 	if (adma_enable) {
710 		new_reg = current_reg | config_mask;
711 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
712 	} else {
713 		new_reg = current_reg & ~config_mask;
714 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
715 	}
716 
717 	if (current_reg != new_reg)
718 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
719 
720 	port0 = ap->host->ports[0]->private_data;
721 	port1 = ap->host->ports[1]->private_data;
722 	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
723 	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
724 		/*
725 		 * We have to set the DMA mask to 32-bit if either port is in
726 		 * ATAPI mode, since they are on the same PCI device which is
727 		 * used for DMA mapping.  If either SCSI device is not allocated
728 		 * yet, it's OK since that port will discover its correct
729 		 * setting when it does get allocated.
730 		 */
731 		rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
732 	} else {
733 		rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
734 	}
735 
736 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
737 	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
738 	ata_port_info(ap,
739 		      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
740 		      (unsigned long long)*ap->host->dev->dma_mask,
741 		      segment_boundary, sg_tablesize);
742 
743 	spin_unlock_irqrestore(ap->lock, flags);
744 
745 	return rc;
746 }
747 
nv_adma_check_atapi_dma(struct ata_queued_cmd * qc)748 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
749 {
750 	struct nv_adma_port_priv *pp = qc->ap->private_data;
751 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
752 }
753 
nv_adma_tf_read(struct ata_port * ap,struct ata_taskfile * tf)754 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
755 {
756 	/* Other than when internal or pass-through commands are executed,
757 	   the only time this function will be called in ADMA mode will be
758 	   if a command fails. In the failure case we don't care about going
759 	   into register mode with ADMA commands pending, as the commands will
760 	   all shortly be aborted anyway. We assume that NCQ commands are not
761 	   issued via passthrough, which is the only way that switching into
762 	   ADMA mode could abort outstanding commands. */
763 	nv_adma_register_mode(ap);
764 
765 	ata_sff_tf_read(ap, tf);
766 }
767 
nv_adma_tf_to_cpb(struct ata_taskfile * tf,__le16 * cpb)768 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
769 {
770 	unsigned int idx = 0;
771 
772 	if (tf->flags & ATA_TFLAG_ISADDR) {
773 		if (tf->flags & ATA_TFLAG_LBA48) {
774 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
775 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
776 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
777 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
778 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
779 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
780 		} else
781 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
782 
783 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
784 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
785 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
786 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
787 	}
788 
789 	if (tf->flags & ATA_TFLAG_DEVICE)
790 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
791 
792 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
793 
794 	while (idx < 12)
795 		cpb[idx++] = cpu_to_le16(IGN);
796 
797 	return idx;
798 }
799 
nv_adma_check_cpb(struct ata_port * ap,int cpb_num,int force_err)800 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
801 {
802 	struct nv_adma_port_priv *pp = ap->private_data;
803 	u8 flags = pp->cpb[cpb_num].resp_flags;
804 
805 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
806 
807 	if (unlikely((force_err ||
808 		     flags & (NV_CPB_RESP_ATA_ERR |
809 			      NV_CPB_RESP_CMD_ERR |
810 			      NV_CPB_RESP_CPB_ERR)))) {
811 		struct ata_eh_info *ehi = &ap->link.eh_info;
812 		int freeze = 0;
813 
814 		ata_ehi_clear_desc(ehi);
815 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
816 		if (flags & NV_CPB_RESP_ATA_ERR) {
817 			ata_ehi_push_desc(ehi, "ATA error");
818 			ehi->err_mask |= AC_ERR_DEV;
819 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
820 			ata_ehi_push_desc(ehi, "CMD error");
821 			ehi->err_mask |= AC_ERR_DEV;
822 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
823 			ata_ehi_push_desc(ehi, "CPB error");
824 			ehi->err_mask |= AC_ERR_SYSTEM;
825 			freeze = 1;
826 		} else {
827 			/* notifier error, but no error in CPB flags? */
828 			ata_ehi_push_desc(ehi, "unknown");
829 			ehi->err_mask |= AC_ERR_OTHER;
830 			freeze = 1;
831 		}
832 		/* Kill all commands. EH will determine what actually failed. */
833 		if (freeze)
834 			ata_port_freeze(ap);
835 		else
836 			ata_port_abort(ap);
837 		return -1;
838 	}
839 
840 	if (likely(flags & NV_CPB_RESP_DONE))
841 		return 1;
842 	return 0;
843 }
844 
nv_host_intr(struct ata_port * ap,u8 irq_stat)845 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
846 {
847 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
848 
849 	/* freeze if hotplugged */
850 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
851 		ata_port_freeze(ap);
852 		return 1;
853 	}
854 
855 	/* bail out if not our interrupt */
856 	if (!(irq_stat & NV_INT_DEV))
857 		return 0;
858 
859 	/* DEV interrupt w/ no active qc? */
860 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
861 		ata_sff_check_status(ap);
862 		return 1;
863 	}
864 
865 	/* handle interrupt */
866 	return ata_bmdma_port_intr(ap, qc);
867 }
868 
nv_adma_interrupt(int irq,void * dev_instance)869 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
870 {
871 	struct ata_host *host = dev_instance;
872 	int i, handled = 0;
873 	u32 notifier_clears[2];
874 
875 	spin_lock(&host->lock);
876 
877 	for (i = 0; i < host->n_ports; i++) {
878 		struct ata_port *ap = host->ports[i];
879 		struct nv_adma_port_priv *pp = ap->private_data;
880 		void __iomem *mmio = pp->ctl_block;
881 		u16 status;
882 		u32 gen_ctl;
883 		u32 notifier, notifier_error;
884 
885 		notifier_clears[i] = 0;
886 
887 		/* if ADMA is disabled, use standard ata interrupt handler */
888 		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
889 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
890 				>> (NV_INT_PORT_SHIFT * i);
891 			handled += nv_host_intr(ap, irq_stat);
892 			continue;
893 		}
894 
895 		/* if in ATA register mode, check for standard interrupts */
896 		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
897 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
898 				>> (NV_INT_PORT_SHIFT * i);
899 			if (ata_tag_valid(ap->link.active_tag))
900 				/** NV_INT_DEV indication seems unreliable
901 				    at times at least in ADMA mode. Force it
902 				    on always when a command is active, to
903 				    prevent losing interrupts. */
904 				irq_stat |= NV_INT_DEV;
905 			handled += nv_host_intr(ap, irq_stat);
906 		}
907 
908 		notifier = readl(mmio + NV_ADMA_NOTIFIER);
909 		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
910 		notifier_clears[i] = notifier | notifier_error;
911 
912 		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
913 
914 		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
915 		    !notifier_error)
916 			/* Nothing to do */
917 			continue;
918 
919 		status = readw(mmio + NV_ADMA_STAT);
920 
921 		/*
922 		 * Clear status. Ensure the controller sees the
923 		 * clearing before we start looking at any of the CPB
924 		 * statuses, so that any CPB completions after this
925 		 * point in the handler will raise another interrupt.
926 		 */
927 		writew(status, mmio + NV_ADMA_STAT);
928 		readw(mmio + NV_ADMA_STAT); /* flush posted write */
929 		rmb();
930 
931 		handled++; /* irq handled if we got here */
932 
933 		/* freeze if hotplugged or controller error */
934 		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
935 				       NV_ADMA_STAT_HOTUNPLUG |
936 				       NV_ADMA_STAT_TIMEOUT |
937 				       NV_ADMA_STAT_SERROR))) {
938 			struct ata_eh_info *ehi = &ap->link.eh_info;
939 
940 			ata_ehi_clear_desc(ehi);
941 			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
942 			if (status & NV_ADMA_STAT_TIMEOUT) {
943 				ehi->err_mask |= AC_ERR_SYSTEM;
944 				ata_ehi_push_desc(ehi, "timeout");
945 			} else if (status & NV_ADMA_STAT_HOTPLUG) {
946 				ata_ehi_hotplugged(ehi);
947 				ata_ehi_push_desc(ehi, "hotplug");
948 			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
949 				ata_ehi_hotplugged(ehi);
950 				ata_ehi_push_desc(ehi, "hot unplug");
951 			} else if (status & NV_ADMA_STAT_SERROR) {
952 				/* let EH analyze SError and figure out cause */
953 				ata_ehi_push_desc(ehi, "SError");
954 			} else
955 				ata_ehi_push_desc(ehi, "unknown");
956 			ata_port_freeze(ap);
957 			continue;
958 		}
959 
960 		if (status & (NV_ADMA_STAT_DONE |
961 			      NV_ADMA_STAT_CPBERR |
962 			      NV_ADMA_STAT_CMD_COMPLETE)) {
963 			u32 check_commands = notifier_clears[i];
964 			u32 done_mask = 0;
965 			int pos, rc;
966 
967 			if (status & NV_ADMA_STAT_CPBERR) {
968 				/* check all active commands */
969 				if (ata_tag_valid(ap->link.active_tag))
970 					check_commands = 1 <<
971 						ap->link.active_tag;
972 				else
973 					check_commands = ap->link.sactive;
974 			}
975 
976 			/* check CPBs for completed commands */
977 			while ((pos = ffs(check_commands))) {
978 				pos--;
979 				rc = nv_adma_check_cpb(ap, pos,
980 						notifier_error & (1 << pos));
981 				if (rc > 0)
982 					done_mask |= 1 << pos;
983 				else if (unlikely(rc < 0))
984 					check_commands = 0;
985 				check_commands &= ~(1 << pos);
986 			}
987 			ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
988 		}
989 	}
990 
991 	if (notifier_clears[0] || notifier_clears[1]) {
992 		/* Note: Both notifier clear registers must be written
993 		   if either is set, even if one is zero, according to NVIDIA. */
994 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
995 		writel(notifier_clears[0], pp->notifier_clear_block);
996 		pp = host->ports[1]->private_data;
997 		writel(notifier_clears[1], pp->notifier_clear_block);
998 	}
999 
1000 	spin_unlock(&host->lock);
1001 
1002 	return IRQ_RETVAL(handled);
1003 }
1004 
nv_adma_freeze(struct ata_port * ap)1005 static void nv_adma_freeze(struct ata_port *ap)
1006 {
1007 	struct nv_adma_port_priv *pp = ap->private_data;
1008 	void __iomem *mmio = pp->ctl_block;
1009 	u16 tmp;
1010 
1011 	nv_ck804_freeze(ap);
1012 
1013 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1014 		return;
1015 
1016 	/* clear any outstanding CK804 notifications */
1017 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1018 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1019 
1020 	/* Disable interrupt */
1021 	tmp = readw(mmio + NV_ADMA_CTL);
1022 	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1023 		mmio + NV_ADMA_CTL);
1024 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1025 }
1026 
nv_adma_thaw(struct ata_port * ap)1027 static void nv_adma_thaw(struct ata_port *ap)
1028 {
1029 	struct nv_adma_port_priv *pp = ap->private_data;
1030 	void __iomem *mmio = pp->ctl_block;
1031 	u16 tmp;
1032 
1033 	nv_ck804_thaw(ap);
1034 
1035 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1036 		return;
1037 
1038 	/* Enable interrupt */
1039 	tmp = readw(mmio + NV_ADMA_CTL);
1040 	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1041 		mmio + NV_ADMA_CTL);
1042 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1043 }
1044 
nv_adma_irq_clear(struct ata_port * ap)1045 static void nv_adma_irq_clear(struct ata_port *ap)
1046 {
1047 	struct nv_adma_port_priv *pp = ap->private_data;
1048 	void __iomem *mmio = pp->ctl_block;
1049 	u32 notifier_clears[2];
1050 
1051 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1052 		ata_bmdma_irq_clear(ap);
1053 		return;
1054 	}
1055 
1056 	/* clear any outstanding CK804 notifications */
1057 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1058 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1059 
1060 	/* clear ADMA status */
1061 	writew(0xffff, mmio + NV_ADMA_STAT);
1062 
1063 	/* clear notifiers - note both ports need to be written with
1064 	   something even though we are only clearing on one */
1065 	if (ap->port_no == 0) {
1066 		notifier_clears[0] = 0xFFFFFFFF;
1067 		notifier_clears[1] = 0;
1068 	} else {
1069 		notifier_clears[0] = 0;
1070 		notifier_clears[1] = 0xFFFFFFFF;
1071 	}
1072 	pp = ap->host->ports[0]->private_data;
1073 	writel(notifier_clears[0], pp->notifier_clear_block);
1074 	pp = ap->host->ports[1]->private_data;
1075 	writel(notifier_clears[1], pp->notifier_clear_block);
1076 }
1077 
nv_adma_post_internal_cmd(struct ata_queued_cmd * qc)1078 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1079 {
1080 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1081 
1082 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1083 		ata_bmdma_post_internal_cmd(qc);
1084 }
1085 
nv_adma_port_start(struct ata_port * ap)1086 static int nv_adma_port_start(struct ata_port *ap)
1087 {
1088 	struct device *dev = ap->host->dev;
1089 	struct nv_adma_port_priv *pp;
1090 	int rc;
1091 	void *mem;
1092 	dma_addr_t mem_dma;
1093 	void __iomem *mmio;
1094 	struct pci_dev *pdev = to_pci_dev(dev);
1095 	u16 tmp;
1096 
1097 	VPRINTK("ENTER\n");
1098 
1099 	/*
1100 	 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1101 	 * pad buffers.
1102 	 */
1103 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1104 	if (rc)
1105 		return rc;
1106 
1107 	/* we might fallback to bmdma, allocate bmdma resources */
1108 	rc = ata_bmdma_port_start(ap);
1109 	if (rc)
1110 		return rc;
1111 
1112 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1113 	if (!pp)
1114 		return -ENOMEM;
1115 
1116 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1117 	       ap->port_no * NV_ADMA_PORT_SIZE;
1118 	pp->ctl_block = mmio;
1119 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1120 	pp->notifier_clear_block = pp->gen_block +
1121 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1122 
1123 	/*
1124 	 * Now that the legacy PRD and padding buffer are allocated we can
1125 	 * raise the DMA mask to allocate the CPB/APRD table.
1126 	 */
1127 	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1128 
1129 	pp->adma_dma_mask = *dev->dma_mask;
1130 
1131 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1132 				  &mem_dma, GFP_KERNEL);
1133 	if (!mem)
1134 		return -ENOMEM;
1135 
1136 	/*
1137 	 * First item in chunk of DMA memory:
1138 	 * 128-byte command parameter block (CPB)
1139 	 * one for each command tag
1140 	 */
1141 	pp->cpb     = mem;
1142 	pp->cpb_dma = mem_dma;
1143 
1144 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1145 	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1146 
1147 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1148 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1149 
1150 	/*
1151 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1152 	 */
1153 	pp->aprd = mem;
1154 	pp->aprd_dma = mem_dma;
1155 
1156 	ap->private_data = pp;
1157 
1158 	/* clear any outstanding interrupt conditions */
1159 	writew(0xffff, mmio + NV_ADMA_STAT);
1160 
1161 	/* initialize port variables */
1162 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1163 
1164 	/* clear CPB fetch count */
1165 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1166 
1167 	/* clear GO for register mode, enable interrupt */
1168 	tmp = readw(mmio + NV_ADMA_CTL);
1169 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1170 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1171 
1172 	tmp = readw(mmio + NV_ADMA_CTL);
1173 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1174 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1175 	udelay(1);
1176 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1177 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1178 
1179 	return 0;
1180 }
1181 
nv_adma_port_stop(struct ata_port * ap)1182 static void nv_adma_port_stop(struct ata_port *ap)
1183 {
1184 	struct nv_adma_port_priv *pp = ap->private_data;
1185 	void __iomem *mmio = pp->ctl_block;
1186 
1187 	VPRINTK("ENTER\n");
1188 	writew(0, mmio + NV_ADMA_CTL);
1189 }
1190 
1191 #ifdef CONFIG_PM
nv_adma_port_suspend(struct ata_port * ap,pm_message_t mesg)1192 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1193 {
1194 	struct nv_adma_port_priv *pp = ap->private_data;
1195 	void __iomem *mmio = pp->ctl_block;
1196 
1197 	/* Go to register mode - clears GO */
1198 	nv_adma_register_mode(ap);
1199 
1200 	/* clear CPB fetch count */
1201 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1202 
1203 	/* disable interrupt, shut down port */
1204 	writew(0, mmio + NV_ADMA_CTL);
1205 
1206 	return 0;
1207 }
1208 
nv_adma_port_resume(struct ata_port * ap)1209 static int nv_adma_port_resume(struct ata_port *ap)
1210 {
1211 	struct nv_adma_port_priv *pp = ap->private_data;
1212 	void __iomem *mmio = pp->ctl_block;
1213 	u16 tmp;
1214 
1215 	/* set CPB block location */
1216 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1217 	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1218 
1219 	/* clear any outstanding interrupt conditions */
1220 	writew(0xffff, mmio + NV_ADMA_STAT);
1221 
1222 	/* initialize port variables */
1223 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1224 
1225 	/* clear CPB fetch count */
1226 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1227 
1228 	/* clear GO for register mode, enable interrupt */
1229 	tmp = readw(mmio + NV_ADMA_CTL);
1230 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1231 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1232 
1233 	tmp = readw(mmio + NV_ADMA_CTL);
1234 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1235 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1236 	udelay(1);
1237 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1238 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1239 
1240 	return 0;
1241 }
1242 #endif
1243 
nv_adma_setup_port(struct ata_port * ap)1244 static void nv_adma_setup_port(struct ata_port *ap)
1245 {
1246 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1247 	struct ata_ioports *ioport = &ap->ioaddr;
1248 
1249 	VPRINTK("ENTER\n");
1250 
1251 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1252 
1253 	ioport->cmd_addr	= mmio;
1254 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1255 	ioport->error_addr	=
1256 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1257 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1258 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1259 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1260 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1261 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1262 	ioport->status_addr	=
1263 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1264 	ioport->altstatus_addr	=
1265 	ioport->ctl_addr	= mmio + 0x20;
1266 }
1267 
nv_adma_host_init(struct ata_host * host)1268 static int nv_adma_host_init(struct ata_host *host)
1269 {
1270 	struct pci_dev *pdev = to_pci_dev(host->dev);
1271 	unsigned int i;
1272 	u32 tmp32;
1273 
1274 	VPRINTK("ENTER\n");
1275 
1276 	/* enable ADMA on the ports */
1277 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1278 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1279 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1280 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1281 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1282 
1283 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1284 
1285 	for (i = 0; i < host->n_ports; i++)
1286 		nv_adma_setup_port(host->ports[i]);
1287 
1288 	return 0;
1289 }
1290 
nv_adma_fill_aprd(struct ata_queued_cmd * qc,struct scatterlist * sg,int idx,struct nv_adma_prd * aprd)1291 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1292 			      struct scatterlist *sg,
1293 			      int idx,
1294 			      struct nv_adma_prd *aprd)
1295 {
1296 	u8 flags = 0;
1297 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1298 		flags |= NV_APRD_WRITE;
1299 	if (idx == qc->n_elem - 1)
1300 		flags |= NV_APRD_END;
1301 	else if (idx != 4)
1302 		flags |= NV_APRD_CONT;
1303 
1304 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1305 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1306 	aprd->flags = flags;
1307 	aprd->packet_len = 0;
1308 }
1309 
nv_adma_fill_sg(struct ata_queued_cmd * qc,struct nv_adma_cpb * cpb)1310 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1311 {
1312 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1313 	struct nv_adma_prd *aprd;
1314 	struct scatterlist *sg;
1315 	unsigned int si;
1316 
1317 	VPRINTK("ENTER\n");
1318 
1319 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1320 		aprd = (si < 5) ? &cpb->aprd[si] :
1321 			&pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1322 		nv_adma_fill_aprd(qc, sg, si, aprd);
1323 	}
1324 	if (si > 5)
1325 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1326 	else
1327 		cpb->next_aprd = cpu_to_le64(0);
1328 }
1329 
nv_adma_use_reg_mode(struct ata_queued_cmd * qc)1330 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1331 {
1332 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1333 
1334 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1335 	   or interrupt-driven no-data commands. */
1336 	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1337 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1338 		return 1;
1339 
1340 	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1341 	   (qc->tf.protocol == ATA_PROT_NODATA))
1342 		return 0;
1343 
1344 	return 1;
1345 }
1346 
nv_adma_qc_prep(struct ata_queued_cmd * qc)1347 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1348 {
1349 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1350 	struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1351 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1352 		       NV_CPB_CTL_IEN;
1353 
1354 	if (nv_adma_use_reg_mode(qc)) {
1355 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1356 			(qc->flags & ATA_QCFLAG_DMAMAP));
1357 		nv_adma_register_mode(qc->ap);
1358 		ata_bmdma_qc_prep(qc);
1359 		return;
1360 	}
1361 
1362 	cpb->resp_flags = NV_CPB_RESP_DONE;
1363 	wmb();
1364 	cpb->ctl_flags = 0;
1365 	wmb();
1366 
1367 	cpb->len		= 3;
1368 	cpb->tag		= qc->hw_tag;
1369 	cpb->next_cpb_idx	= 0;
1370 
1371 	/* turn on NCQ flags for NCQ commands */
1372 	if (qc->tf.protocol == ATA_PROT_NCQ)
1373 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1374 
1375 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1376 
1377 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1378 
1379 	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1380 		nv_adma_fill_sg(qc, cpb);
1381 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1382 	} else
1383 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1384 
1385 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1386 	   until we are finished filling in all of the contents */
1387 	wmb();
1388 	cpb->ctl_flags = ctl_flags;
1389 	wmb();
1390 	cpb->resp_flags = 0;
1391 }
1392 
nv_adma_qc_issue(struct ata_queued_cmd * qc)1393 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1394 {
1395 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1396 	void __iomem *mmio = pp->ctl_block;
1397 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1398 
1399 	VPRINTK("ENTER\n");
1400 
1401 	/* We can't handle result taskfile with NCQ commands, since
1402 	   retrieving the taskfile switches us out of ADMA mode and would abort
1403 	   existing commands. */
1404 	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1405 		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1406 		ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1407 		return AC_ERR_SYSTEM;
1408 	}
1409 
1410 	if (nv_adma_use_reg_mode(qc)) {
1411 		/* use ATA register mode */
1412 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1413 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1414 			(qc->flags & ATA_QCFLAG_DMAMAP));
1415 		nv_adma_register_mode(qc->ap);
1416 		return ata_bmdma_qc_issue(qc);
1417 	} else
1418 		nv_adma_mode(qc->ap);
1419 
1420 	/* write append register, command tag in lower 8 bits
1421 	   and (number of cpbs to append -1) in top 8 bits */
1422 	wmb();
1423 
1424 	if (curr_ncq != pp->last_issue_ncq) {
1425 		/* Seems to need some delay before switching between NCQ and
1426 		   non-NCQ commands, else we get command timeouts and such. */
1427 		udelay(20);
1428 		pp->last_issue_ncq = curr_ncq;
1429 	}
1430 
1431 	writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1432 
1433 	DPRINTK("Issued tag %u\n", qc->hw_tag);
1434 
1435 	return 0;
1436 }
1437 
nv_generic_interrupt(int irq,void * dev_instance)1438 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1439 {
1440 	struct ata_host *host = dev_instance;
1441 	unsigned int i;
1442 	unsigned int handled = 0;
1443 	unsigned long flags;
1444 
1445 	spin_lock_irqsave(&host->lock, flags);
1446 
1447 	for (i = 0; i < host->n_ports; i++) {
1448 		struct ata_port *ap = host->ports[i];
1449 		struct ata_queued_cmd *qc;
1450 
1451 		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1452 		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1453 			handled += ata_bmdma_port_intr(ap, qc);
1454 		} else {
1455 			/*
1456 			 * No request pending?  Clear interrupt status
1457 			 * anyway, in case there's one pending.
1458 			 */
1459 			ap->ops->sff_check_status(ap);
1460 		}
1461 	}
1462 
1463 	spin_unlock_irqrestore(&host->lock, flags);
1464 
1465 	return IRQ_RETVAL(handled);
1466 }
1467 
nv_do_interrupt(struct ata_host * host,u8 irq_stat)1468 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1469 {
1470 	int i, handled = 0;
1471 
1472 	for (i = 0; i < host->n_ports; i++) {
1473 		handled += nv_host_intr(host->ports[i], irq_stat);
1474 		irq_stat >>= NV_INT_PORT_SHIFT;
1475 	}
1476 
1477 	return IRQ_RETVAL(handled);
1478 }
1479 
nv_nf2_interrupt(int irq,void * dev_instance)1480 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1481 {
1482 	struct ata_host *host = dev_instance;
1483 	u8 irq_stat;
1484 	irqreturn_t ret;
1485 
1486 	spin_lock(&host->lock);
1487 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1488 	ret = nv_do_interrupt(host, irq_stat);
1489 	spin_unlock(&host->lock);
1490 
1491 	return ret;
1492 }
1493 
nv_ck804_interrupt(int irq,void * dev_instance)1494 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1495 {
1496 	struct ata_host *host = dev_instance;
1497 	u8 irq_stat;
1498 	irqreturn_t ret;
1499 
1500 	spin_lock(&host->lock);
1501 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1502 	ret = nv_do_interrupt(host, irq_stat);
1503 	spin_unlock(&host->lock);
1504 
1505 	return ret;
1506 }
1507 
nv_scr_read(struct ata_link * link,unsigned int sc_reg,u32 * val)1508 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1509 {
1510 	if (sc_reg > SCR_CONTROL)
1511 		return -EINVAL;
1512 
1513 	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1514 	return 0;
1515 }
1516 
nv_scr_write(struct ata_link * link,unsigned int sc_reg,u32 val)1517 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1518 {
1519 	if (sc_reg > SCR_CONTROL)
1520 		return -EINVAL;
1521 
1522 	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1523 	return 0;
1524 }
1525 
nv_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)1526 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1527 			unsigned long deadline)
1528 {
1529 	struct ata_eh_context *ehc = &link->eh_context;
1530 
1531 	/* Do hardreset iff it's post-boot probing, please read the
1532 	 * comment above port ops for details.
1533 	 */
1534 	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1535 	    !ata_dev_enabled(link->device))
1536 		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1537 				    NULL, NULL);
1538 	else {
1539 		const unsigned long *timing = sata_ehc_deb_timing(ehc);
1540 		int rc;
1541 
1542 		if (!(ehc->i.flags & ATA_EHI_QUIET))
1543 			ata_link_info(link,
1544 				      "nv: skipping hardreset on occupied port\n");
1545 
1546 		/* make sure the link is online */
1547 		rc = sata_link_resume(link, timing, deadline);
1548 		/* whine about phy resume failure but proceed */
1549 		if (rc && rc != -EOPNOTSUPP)
1550 			ata_link_warn(link, "failed to resume link (errno=%d)\n",
1551 				      rc);
1552 	}
1553 
1554 	/* device signature acquisition is unreliable */
1555 	return -EAGAIN;
1556 }
1557 
nv_nf2_freeze(struct ata_port * ap)1558 static void nv_nf2_freeze(struct ata_port *ap)
1559 {
1560 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1561 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1562 	u8 mask;
1563 
1564 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1565 	mask &= ~(NV_INT_ALL << shift);
1566 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1567 }
1568 
nv_nf2_thaw(struct ata_port * ap)1569 static void nv_nf2_thaw(struct ata_port *ap)
1570 {
1571 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1572 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1573 	u8 mask;
1574 
1575 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1576 
1577 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1578 	mask |= (NV_INT_MASK << shift);
1579 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1580 }
1581 
nv_ck804_freeze(struct ata_port * ap)1582 static void nv_ck804_freeze(struct ata_port *ap)
1583 {
1584 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1585 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1586 	u8 mask;
1587 
1588 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1589 	mask &= ~(NV_INT_ALL << shift);
1590 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1591 }
1592 
nv_ck804_thaw(struct ata_port * ap)1593 static void nv_ck804_thaw(struct ata_port *ap)
1594 {
1595 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1596 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1597 	u8 mask;
1598 
1599 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1600 
1601 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1602 	mask |= (NV_INT_MASK << shift);
1603 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1604 }
1605 
nv_mcp55_freeze(struct ata_port * ap)1606 static void nv_mcp55_freeze(struct ata_port *ap)
1607 {
1608 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1609 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1610 	u32 mask;
1611 
1612 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1613 
1614 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1615 	mask &= ~(NV_INT_ALL_MCP55 << shift);
1616 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1617 }
1618 
nv_mcp55_thaw(struct ata_port * ap)1619 static void nv_mcp55_thaw(struct ata_port *ap)
1620 {
1621 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1622 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1623 	u32 mask;
1624 
1625 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1626 
1627 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1628 	mask |= (NV_INT_MASK_MCP55 << shift);
1629 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1630 }
1631 
nv_adma_error_handler(struct ata_port * ap)1632 static void nv_adma_error_handler(struct ata_port *ap)
1633 {
1634 	struct nv_adma_port_priv *pp = ap->private_data;
1635 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1636 		void __iomem *mmio = pp->ctl_block;
1637 		int i;
1638 		u16 tmp;
1639 
1640 		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1641 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1642 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1643 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1644 			u32 status = readw(mmio + NV_ADMA_STAT);
1645 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1646 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1647 
1648 			ata_port_err(ap,
1649 				"EH in ADMA mode, notifier 0x%X "
1650 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1651 				"next cpb count 0x%X next cpb idx 0x%x\n",
1652 				notifier, notifier_error, gen_ctl, status,
1653 				cpb_count, next_cpb_idx);
1654 
1655 			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1656 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1657 				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1658 				    ap->link.sactive & (1 << i))
1659 					ata_port_err(ap,
1660 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1661 						i, cpb->ctl_flags, cpb->resp_flags);
1662 			}
1663 		}
1664 
1665 		/* Push us back into port register mode for error handling. */
1666 		nv_adma_register_mode(ap);
1667 
1668 		/* Mark all of the CPBs as invalid to prevent them from
1669 		   being executed */
1670 		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1671 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1672 
1673 		/* clear CPB fetch count */
1674 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1675 
1676 		/* Reset channel */
1677 		tmp = readw(mmio + NV_ADMA_CTL);
1678 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1679 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1680 		udelay(1);
1681 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1682 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1683 	}
1684 
1685 	ata_bmdma_error_handler(ap);
1686 }
1687 
nv_swncq_qc_to_dq(struct ata_port * ap,struct ata_queued_cmd * qc)1688 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1689 {
1690 	struct nv_swncq_port_priv *pp = ap->private_data;
1691 	struct defer_queue *dq = &pp->defer_queue;
1692 
1693 	/* queue is full */
1694 	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1695 	dq->defer_bits |= (1 << qc->hw_tag);
1696 	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1697 }
1698 
nv_swncq_qc_from_dq(struct ata_port * ap)1699 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1700 {
1701 	struct nv_swncq_port_priv *pp = ap->private_data;
1702 	struct defer_queue *dq = &pp->defer_queue;
1703 	unsigned int tag;
1704 
1705 	if (dq->head == dq->tail)	/* null queue */
1706 		return NULL;
1707 
1708 	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1709 	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1710 	WARN_ON(!(dq->defer_bits & (1 << tag)));
1711 	dq->defer_bits &= ~(1 << tag);
1712 
1713 	return ata_qc_from_tag(ap, tag);
1714 }
1715 
nv_swncq_fis_reinit(struct ata_port * ap)1716 static void nv_swncq_fis_reinit(struct ata_port *ap)
1717 {
1718 	struct nv_swncq_port_priv *pp = ap->private_data;
1719 
1720 	pp->dhfis_bits = 0;
1721 	pp->dmafis_bits = 0;
1722 	pp->sdbfis_bits = 0;
1723 	pp->ncq_flags = 0;
1724 }
1725 
nv_swncq_pp_reinit(struct ata_port * ap)1726 static void nv_swncq_pp_reinit(struct ata_port *ap)
1727 {
1728 	struct nv_swncq_port_priv *pp = ap->private_data;
1729 	struct defer_queue *dq = &pp->defer_queue;
1730 
1731 	dq->head = 0;
1732 	dq->tail = 0;
1733 	dq->defer_bits = 0;
1734 	pp->qc_active = 0;
1735 	pp->last_issue_tag = ATA_TAG_POISON;
1736 	nv_swncq_fis_reinit(ap);
1737 }
1738 
nv_swncq_irq_clear(struct ata_port * ap,u16 fis)1739 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1740 {
1741 	struct nv_swncq_port_priv *pp = ap->private_data;
1742 
1743 	writew(fis, pp->irq_block);
1744 }
1745 
__ata_bmdma_stop(struct ata_port * ap)1746 static void __ata_bmdma_stop(struct ata_port *ap)
1747 {
1748 	struct ata_queued_cmd qc;
1749 
1750 	qc.ap = ap;
1751 	ata_bmdma_stop(&qc);
1752 }
1753 
nv_swncq_ncq_stop(struct ata_port * ap)1754 static void nv_swncq_ncq_stop(struct ata_port *ap)
1755 {
1756 	struct nv_swncq_port_priv *pp = ap->private_data;
1757 	unsigned int i;
1758 	u32 sactive;
1759 	u32 done_mask;
1760 
1761 	ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1762 		     ap->qc_active, ap->link.sactive);
1763 	ata_port_err(ap,
1764 		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1765 		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1766 		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1767 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1768 
1769 	ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1770 		     ap->ops->sff_check_status(ap),
1771 		     ioread8(ap->ioaddr.error_addr));
1772 
1773 	sactive = readl(pp->sactive_block);
1774 	done_mask = pp->qc_active ^ sactive;
1775 
1776 	ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1777 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1778 		u8 err = 0;
1779 		if (pp->qc_active & (1 << i))
1780 			err = 0;
1781 		else if (done_mask & (1 << i))
1782 			err = 1;
1783 		else
1784 			continue;
1785 
1786 		ata_port_err(ap,
1787 			     "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1788 			     (pp->dhfis_bits >> i) & 0x1,
1789 			     (pp->dmafis_bits >> i) & 0x1,
1790 			     (pp->sdbfis_bits >> i) & 0x1,
1791 			     (sactive >> i) & 0x1,
1792 			     (err ? "error! tag doesn't exit" : " "));
1793 	}
1794 
1795 	nv_swncq_pp_reinit(ap);
1796 	ap->ops->sff_irq_clear(ap);
1797 	__ata_bmdma_stop(ap);
1798 	nv_swncq_irq_clear(ap, 0xffff);
1799 }
1800 
nv_swncq_error_handler(struct ata_port * ap)1801 static void nv_swncq_error_handler(struct ata_port *ap)
1802 {
1803 	struct ata_eh_context *ehc = &ap->link.eh_context;
1804 
1805 	if (ap->link.sactive) {
1806 		nv_swncq_ncq_stop(ap);
1807 		ehc->i.action |= ATA_EH_RESET;
1808 	}
1809 
1810 	ata_bmdma_error_handler(ap);
1811 }
1812 
1813 #ifdef CONFIG_PM
nv_swncq_port_suspend(struct ata_port * ap,pm_message_t mesg)1814 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1815 {
1816 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1817 	u32 tmp;
1818 
1819 	/* clear irq */
1820 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1821 
1822 	/* disable irq */
1823 	writel(0, mmio + NV_INT_ENABLE_MCP55);
1824 
1825 	/* disable swncq */
1826 	tmp = readl(mmio + NV_CTL_MCP55);
1827 	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1828 	writel(tmp, mmio + NV_CTL_MCP55);
1829 
1830 	return 0;
1831 }
1832 
nv_swncq_port_resume(struct ata_port * ap)1833 static int nv_swncq_port_resume(struct ata_port *ap)
1834 {
1835 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1836 	u32 tmp;
1837 
1838 	/* clear irq */
1839 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1840 
1841 	/* enable irq */
1842 	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1843 
1844 	/* enable swncq */
1845 	tmp = readl(mmio + NV_CTL_MCP55);
1846 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1847 
1848 	return 0;
1849 }
1850 #endif
1851 
nv_swncq_host_init(struct ata_host * host)1852 static void nv_swncq_host_init(struct ata_host *host)
1853 {
1854 	u32 tmp;
1855 	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1856 	struct pci_dev *pdev = to_pci_dev(host->dev);
1857 	u8 regval;
1858 
1859 	/* disable  ECO 398 */
1860 	pci_read_config_byte(pdev, 0x7f, &regval);
1861 	regval &= ~(1 << 7);
1862 	pci_write_config_byte(pdev, 0x7f, regval);
1863 
1864 	/* enable swncq */
1865 	tmp = readl(mmio + NV_CTL_MCP55);
1866 	VPRINTK("HOST_CTL:0x%X\n", tmp);
1867 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1868 
1869 	/* enable irq intr */
1870 	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1871 	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1872 	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1873 
1874 	/*  clear port irq */
1875 	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1876 }
1877 
nv_swncq_slave_config(struct scsi_device * sdev)1878 static int nv_swncq_slave_config(struct scsi_device *sdev)
1879 {
1880 	struct ata_port *ap = ata_shost_to_port(sdev->host);
1881 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1882 	struct ata_device *dev;
1883 	int rc;
1884 	u8 rev;
1885 	u8 check_maxtor = 0;
1886 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1887 
1888 	rc = ata_scsi_slave_config(sdev);
1889 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1890 		/* Not a proper libata device, ignore */
1891 		return rc;
1892 
1893 	dev = &ap->link.device[sdev->id];
1894 	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1895 		return rc;
1896 
1897 	/* if MCP51 and Maxtor, then disable ncq */
1898 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1899 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1900 		check_maxtor = 1;
1901 
1902 	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1903 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1904 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1905 		pci_read_config_byte(pdev, 0x8, &rev);
1906 		if (rev <= 0xa2)
1907 			check_maxtor = 1;
1908 	}
1909 
1910 	if (!check_maxtor)
1911 		return rc;
1912 
1913 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1914 
1915 	if (strncmp(model_num, "Maxtor", 6) == 0) {
1916 		ata_scsi_change_queue_depth(sdev, 1);
1917 		ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1918 			       sdev->queue_depth);
1919 	}
1920 
1921 	return rc;
1922 }
1923 
nv_swncq_port_start(struct ata_port * ap)1924 static int nv_swncq_port_start(struct ata_port *ap)
1925 {
1926 	struct device *dev = ap->host->dev;
1927 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1928 	struct nv_swncq_port_priv *pp;
1929 	int rc;
1930 
1931 	/* we might fallback to bmdma, allocate bmdma resources */
1932 	rc = ata_bmdma_port_start(ap);
1933 	if (rc)
1934 		return rc;
1935 
1936 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1937 	if (!pp)
1938 		return -ENOMEM;
1939 
1940 	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1941 				      &pp->prd_dma, GFP_KERNEL);
1942 	if (!pp->prd)
1943 		return -ENOMEM;
1944 
1945 	ap->private_data = pp;
1946 	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1947 	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1948 	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1949 
1950 	return 0;
1951 }
1952 
nv_swncq_qc_prep(struct ata_queued_cmd * qc)1953 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1954 {
1955 	if (qc->tf.protocol != ATA_PROT_NCQ) {
1956 		ata_bmdma_qc_prep(qc);
1957 		return;
1958 	}
1959 
1960 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1961 		return;
1962 
1963 	nv_swncq_fill_sg(qc);
1964 }
1965 
nv_swncq_fill_sg(struct ata_queued_cmd * qc)1966 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1967 {
1968 	struct ata_port *ap = qc->ap;
1969 	struct scatterlist *sg;
1970 	struct nv_swncq_port_priv *pp = ap->private_data;
1971 	struct ata_bmdma_prd *prd;
1972 	unsigned int si, idx;
1973 
1974 	prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
1975 
1976 	idx = 0;
1977 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1978 		u32 addr, offset;
1979 		u32 sg_len, len;
1980 
1981 		addr = (u32)sg_dma_address(sg);
1982 		sg_len = sg_dma_len(sg);
1983 
1984 		while (sg_len) {
1985 			offset = addr & 0xffff;
1986 			len = sg_len;
1987 			if ((offset + sg_len) > 0x10000)
1988 				len = 0x10000 - offset;
1989 
1990 			prd[idx].addr = cpu_to_le32(addr);
1991 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1992 
1993 			idx++;
1994 			sg_len -= len;
1995 			addr += len;
1996 		}
1997 	}
1998 
1999 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2000 }
2001 
nv_swncq_issue_atacmd(struct ata_port * ap,struct ata_queued_cmd * qc)2002 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2003 					  struct ata_queued_cmd *qc)
2004 {
2005 	struct nv_swncq_port_priv *pp = ap->private_data;
2006 
2007 	if (qc == NULL)
2008 		return 0;
2009 
2010 	DPRINTK("Enter\n");
2011 
2012 	writel((1 << qc->hw_tag), pp->sactive_block);
2013 	pp->last_issue_tag = qc->hw_tag;
2014 	pp->dhfis_bits &= ~(1 << qc->hw_tag);
2015 	pp->dmafis_bits &= ~(1 << qc->hw_tag);
2016 	pp->qc_active |= (0x1 << qc->hw_tag);
2017 
2018 	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2019 	ap->ops->sff_exec_command(ap, &qc->tf);
2020 
2021 	DPRINTK("Issued tag %u\n", qc->hw_tag);
2022 
2023 	return 0;
2024 }
2025 
nv_swncq_qc_issue(struct ata_queued_cmd * qc)2026 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2027 {
2028 	struct ata_port *ap = qc->ap;
2029 	struct nv_swncq_port_priv *pp = ap->private_data;
2030 
2031 	if (qc->tf.protocol != ATA_PROT_NCQ)
2032 		return ata_bmdma_qc_issue(qc);
2033 
2034 	DPRINTK("Enter\n");
2035 
2036 	if (!pp->qc_active)
2037 		nv_swncq_issue_atacmd(ap, qc);
2038 	else
2039 		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2040 
2041 	return 0;
2042 }
2043 
nv_swncq_hotplug(struct ata_port * ap,u32 fis)2044 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2045 {
2046 	u32 serror;
2047 	struct ata_eh_info *ehi = &ap->link.eh_info;
2048 
2049 	ata_ehi_clear_desc(ehi);
2050 
2051 	/* AHCI needs SError cleared; otherwise, it might lock up */
2052 	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2053 	sata_scr_write(&ap->link, SCR_ERROR, serror);
2054 
2055 	/* analyze @irq_stat */
2056 	if (fis & NV_SWNCQ_IRQ_ADDED)
2057 		ata_ehi_push_desc(ehi, "hot plug");
2058 	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2059 		ata_ehi_push_desc(ehi, "hot unplug");
2060 
2061 	ata_ehi_hotplugged(ehi);
2062 
2063 	/* okay, let's hand over to EH */
2064 	ehi->serror |= serror;
2065 
2066 	ata_port_freeze(ap);
2067 }
2068 
nv_swncq_sdbfis(struct ata_port * ap)2069 static int nv_swncq_sdbfis(struct ata_port *ap)
2070 {
2071 	struct ata_queued_cmd *qc;
2072 	struct nv_swncq_port_priv *pp = ap->private_data;
2073 	struct ata_eh_info *ehi = &ap->link.eh_info;
2074 	u32 sactive;
2075 	u32 done_mask;
2076 	u8 host_stat;
2077 	u8 lack_dhfis = 0;
2078 
2079 	host_stat = ap->ops->bmdma_status(ap);
2080 	if (unlikely(host_stat & ATA_DMA_ERR)) {
2081 		/* error when transferring data to/from memory */
2082 		ata_ehi_clear_desc(ehi);
2083 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2084 		ehi->err_mask |= AC_ERR_HOST_BUS;
2085 		ehi->action |= ATA_EH_RESET;
2086 		return -EINVAL;
2087 	}
2088 
2089 	ap->ops->sff_irq_clear(ap);
2090 	__ata_bmdma_stop(ap);
2091 
2092 	sactive = readl(pp->sactive_block);
2093 	done_mask = pp->qc_active ^ sactive;
2094 
2095 	pp->qc_active &= ~done_mask;
2096 	pp->dhfis_bits &= ~done_mask;
2097 	pp->dmafis_bits &= ~done_mask;
2098 	pp->sdbfis_bits |= done_mask;
2099 	ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2100 
2101 	if (!ap->qc_active) {
2102 		DPRINTK("over\n");
2103 		nv_swncq_pp_reinit(ap);
2104 		return 0;
2105 	}
2106 
2107 	if (pp->qc_active & pp->dhfis_bits)
2108 		return 0;
2109 
2110 	if ((pp->ncq_flags & ncq_saw_backout) ||
2111 	    (pp->qc_active ^ pp->dhfis_bits))
2112 		/* if the controller can't get a device to host register FIS,
2113 		 * The driver needs to reissue the new command.
2114 		 */
2115 		lack_dhfis = 1;
2116 
2117 	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2118 		"SWNCQ:qc_active 0x%X defer_bits %X "
2119 		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2120 		ap->print_id, ap->qc_active, pp->qc_active,
2121 		pp->defer_queue.defer_bits, pp->dhfis_bits,
2122 		pp->dmafis_bits, pp->last_issue_tag);
2123 
2124 	nv_swncq_fis_reinit(ap);
2125 
2126 	if (lack_dhfis) {
2127 		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2128 		nv_swncq_issue_atacmd(ap, qc);
2129 		return 0;
2130 	}
2131 
2132 	if (pp->defer_queue.defer_bits) {
2133 		/* send deferral queue command */
2134 		qc = nv_swncq_qc_from_dq(ap);
2135 		WARN_ON(qc == NULL);
2136 		nv_swncq_issue_atacmd(ap, qc);
2137 	}
2138 
2139 	return 0;
2140 }
2141 
nv_swncq_tag(struct ata_port * ap)2142 static inline u32 nv_swncq_tag(struct ata_port *ap)
2143 {
2144 	struct nv_swncq_port_priv *pp = ap->private_data;
2145 	u32 tag;
2146 
2147 	tag = readb(pp->tag_block) >> 2;
2148 	return (tag & 0x1f);
2149 }
2150 
nv_swncq_dmafis(struct ata_port * ap)2151 static void nv_swncq_dmafis(struct ata_port *ap)
2152 {
2153 	struct ata_queued_cmd *qc;
2154 	unsigned int rw;
2155 	u8 dmactl;
2156 	u32 tag;
2157 	struct nv_swncq_port_priv *pp = ap->private_data;
2158 
2159 	__ata_bmdma_stop(ap);
2160 	tag = nv_swncq_tag(ap);
2161 
2162 	DPRINTK("dma setup tag 0x%x\n", tag);
2163 	qc = ata_qc_from_tag(ap, tag);
2164 
2165 	if (unlikely(!qc))
2166 		return;
2167 
2168 	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2169 
2170 	/* load PRD table addr. */
2171 	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2172 		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2173 
2174 	/* specify data direction, triple-check start bit is clear */
2175 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2176 	dmactl &= ~ATA_DMA_WR;
2177 	if (!rw)
2178 		dmactl |= ATA_DMA_WR;
2179 
2180 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2181 }
2182 
nv_swncq_host_interrupt(struct ata_port * ap,u16 fis)2183 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2184 {
2185 	struct nv_swncq_port_priv *pp = ap->private_data;
2186 	struct ata_queued_cmd *qc;
2187 	struct ata_eh_info *ehi = &ap->link.eh_info;
2188 	u32 serror;
2189 	u8 ata_stat;
2190 
2191 	ata_stat = ap->ops->sff_check_status(ap);
2192 	nv_swncq_irq_clear(ap, fis);
2193 	if (!fis)
2194 		return;
2195 
2196 	if (ap->pflags & ATA_PFLAG_FROZEN)
2197 		return;
2198 
2199 	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2200 		nv_swncq_hotplug(ap, fis);
2201 		return;
2202 	}
2203 
2204 	if (!pp->qc_active)
2205 		return;
2206 
2207 	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2208 		return;
2209 	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2210 
2211 	if (ata_stat & ATA_ERR) {
2212 		ata_ehi_clear_desc(ehi);
2213 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2214 		ehi->err_mask |= AC_ERR_DEV;
2215 		ehi->serror |= serror;
2216 		ehi->action |= ATA_EH_RESET;
2217 		ata_port_freeze(ap);
2218 		return;
2219 	}
2220 
2221 	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2222 		/* If the IRQ is backout, driver must issue
2223 		 * the new command again some time later.
2224 		 */
2225 		pp->ncq_flags |= ncq_saw_backout;
2226 	}
2227 
2228 	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2229 		pp->ncq_flags |= ncq_saw_sdb;
2230 		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2231 			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2232 			ap->print_id, pp->qc_active, pp->dhfis_bits,
2233 			pp->dmafis_bits, readl(pp->sactive_block));
2234 		if (nv_swncq_sdbfis(ap) < 0)
2235 			goto irq_error;
2236 	}
2237 
2238 	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2239 		/* The interrupt indicates the new command
2240 		 * was transmitted correctly to the drive.
2241 		 */
2242 		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2243 		pp->ncq_flags |= ncq_saw_d2h;
2244 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2245 			ata_ehi_push_desc(ehi, "illegal fis transaction");
2246 			ehi->err_mask |= AC_ERR_HSM;
2247 			ehi->action |= ATA_EH_RESET;
2248 			goto irq_error;
2249 		}
2250 
2251 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2252 		    !(pp->ncq_flags & ncq_saw_dmas)) {
2253 			ata_stat = ap->ops->sff_check_status(ap);
2254 			if (ata_stat & ATA_BUSY)
2255 				goto irq_exit;
2256 
2257 			if (pp->defer_queue.defer_bits) {
2258 				DPRINTK("send next command\n");
2259 				qc = nv_swncq_qc_from_dq(ap);
2260 				nv_swncq_issue_atacmd(ap, qc);
2261 			}
2262 		}
2263 	}
2264 
2265 	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2266 		/* program the dma controller with appropriate PRD buffers
2267 		 * and start the DMA transfer for requested command.
2268 		 */
2269 		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2270 		pp->ncq_flags |= ncq_saw_dmas;
2271 		nv_swncq_dmafis(ap);
2272 	}
2273 
2274 irq_exit:
2275 	return;
2276 irq_error:
2277 	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2278 	ata_port_freeze(ap);
2279 	return;
2280 }
2281 
nv_swncq_interrupt(int irq,void * dev_instance)2282 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2283 {
2284 	struct ata_host *host = dev_instance;
2285 	unsigned int i;
2286 	unsigned int handled = 0;
2287 	unsigned long flags;
2288 	u32 irq_stat;
2289 
2290 	spin_lock_irqsave(&host->lock, flags);
2291 
2292 	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2293 
2294 	for (i = 0; i < host->n_ports; i++) {
2295 		struct ata_port *ap = host->ports[i];
2296 
2297 		if (ap->link.sactive) {
2298 			nv_swncq_host_interrupt(ap, (u16)irq_stat);
2299 			handled = 1;
2300 		} else {
2301 			if (irq_stat)	/* reserve Hotplug */
2302 				nv_swncq_irq_clear(ap, 0xfff0);
2303 
2304 			handled += nv_host_intr(ap, (u8)irq_stat);
2305 		}
2306 		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2307 	}
2308 
2309 	spin_unlock_irqrestore(&host->lock, flags);
2310 
2311 	return IRQ_RETVAL(handled);
2312 }
2313 
nv_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)2314 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2315 {
2316 	const struct ata_port_info *ppi[] = { NULL, NULL };
2317 	struct nv_pi_priv *ipriv;
2318 	struct ata_host *host;
2319 	struct nv_host_priv *hpriv;
2320 	int rc;
2321 	u32 bar;
2322 	void __iomem *base;
2323 	unsigned long type = ent->driver_data;
2324 
2325         // Make sure this is a SATA controller by counting the number of bars
2326         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2327         // it's an IDE controller and we ignore it.
2328 	for (bar = 0; bar < 6; bar++)
2329 		if (pci_resource_start(pdev, bar) == 0)
2330 			return -ENODEV;
2331 
2332 	ata_print_version_once(&pdev->dev, DRV_VERSION);
2333 
2334 	rc = pcim_enable_device(pdev);
2335 	if (rc)
2336 		return rc;
2337 
2338 	/* determine type and allocate host */
2339 	if (type == CK804 && adma_enabled) {
2340 		dev_notice(&pdev->dev, "Using ADMA mode\n");
2341 		type = ADMA;
2342 	} else if (type == MCP5x && swncq_enabled) {
2343 		dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2344 		type = SWNCQ;
2345 	}
2346 
2347 	ppi[0] = &nv_port_info[type];
2348 	ipriv = ppi[0]->private_data;
2349 	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2350 	if (rc)
2351 		return rc;
2352 
2353 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2354 	if (!hpriv)
2355 		return -ENOMEM;
2356 	hpriv->type = type;
2357 	host->private_data = hpriv;
2358 
2359 	/* request and iomap NV_MMIO_BAR */
2360 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2361 	if (rc)
2362 		return rc;
2363 
2364 	/* configure SCR access */
2365 	base = host->iomap[NV_MMIO_BAR];
2366 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2367 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2368 
2369 	/* enable SATA space for CK804 */
2370 	if (type >= CK804) {
2371 		u8 regval;
2372 
2373 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2374 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2375 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2376 	}
2377 
2378 	/* init ADMA */
2379 	if (type == ADMA) {
2380 		rc = nv_adma_host_init(host);
2381 		if (rc)
2382 			return rc;
2383 	} else if (type == SWNCQ)
2384 		nv_swncq_host_init(host);
2385 
2386 	if (msi_enabled) {
2387 		dev_notice(&pdev->dev, "Using MSI\n");
2388 		pci_enable_msi(pdev);
2389 	}
2390 
2391 	pci_set_master(pdev);
2392 	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2393 }
2394 
2395 #ifdef CONFIG_PM_SLEEP
nv_pci_device_resume(struct pci_dev * pdev)2396 static int nv_pci_device_resume(struct pci_dev *pdev)
2397 {
2398 	struct ata_host *host = pci_get_drvdata(pdev);
2399 	struct nv_host_priv *hpriv = host->private_data;
2400 	int rc;
2401 
2402 	rc = ata_pci_device_do_resume(pdev);
2403 	if (rc)
2404 		return rc;
2405 
2406 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2407 		if (hpriv->type >= CK804) {
2408 			u8 regval;
2409 
2410 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2411 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2412 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2413 		}
2414 		if (hpriv->type == ADMA) {
2415 			u32 tmp32;
2416 			struct nv_adma_port_priv *pp;
2417 			/* enable/disable ADMA on the ports appropriately */
2418 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2419 
2420 			pp = host->ports[0]->private_data;
2421 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2422 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2423 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2424 			else
2425 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2426 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2427 			pp = host->ports[1]->private_data;
2428 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2429 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2430 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2431 			else
2432 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2433 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2434 
2435 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2436 		}
2437 	}
2438 
2439 	ata_host_resume(host);
2440 
2441 	return 0;
2442 }
2443 #endif
2444 
nv_ck804_host_stop(struct ata_host * host)2445 static void nv_ck804_host_stop(struct ata_host *host)
2446 {
2447 	struct pci_dev *pdev = to_pci_dev(host->dev);
2448 	u8 regval;
2449 
2450 	/* disable SATA space for CK804 */
2451 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2452 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2453 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2454 }
2455 
nv_adma_host_stop(struct ata_host * host)2456 static void nv_adma_host_stop(struct ata_host *host)
2457 {
2458 	struct pci_dev *pdev = to_pci_dev(host->dev);
2459 	u32 tmp32;
2460 
2461 	/* disable ADMA on the ports */
2462 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2463 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2464 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2465 		   NV_MCP_SATA_CFG_20_PORT1_EN |
2466 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2467 
2468 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2469 
2470 	nv_ck804_host_stop(host);
2471 }
2472 
2473 module_pci_driver(nv_pci_driver);
2474 
2475 module_param_named(adma, adma_enabled, bool, 0444);
2476 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2477 module_param_named(swncq, swncq_enabled, bool, 0444);
2478 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2479 module_param_named(msi, msi_enabled, bool, 0444);
2480 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2481