• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/gfp.h>
42 #include <linux/pci.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50 
51 #define DRV_NAME			"sata_nv"
52 #define DRV_VERSION			"3.5"
53 
54 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
55 
56 enum {
57 	NV_MMIO_BAR			= 5,
58 
59 	NV_PORTS			= 2,
60 	NV_PIO_MASK			= ATA_PIO4,
61 	NV_MWDMA_MASK			= ATA_MWDMA2,
62 	NV_UDMA_MASK			= ATA_UDMA6,
63 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
64 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
65 
66 	/* INT_STATUS/ENABLE */
67 	NV_INT_STATUS			= 0x10,
68 	NV_INT_ENABLE			= 0x11,
69 	NV_INT_STATUS_CK804		= 0x440,
70 	NV_INT_ENABLE_CK804		= 0x441,
71 
72 	/* INT_STATUS/ENABLE bits */
73 	NV_INT_DEV			= 0x01,
74 	NV_INT_PM			= 0x02,
75 	NV_INT_ADDED			= 0x04,
76 	NV_INT_REMOVED			= 0x08,
77 
78 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
79 
80 	NV_INT_ALL			= 0x0f,
81 	NV_INT_MASK			= NV_INT_DEV |
82 					  NV_INT_ADDED | NV_INT_REMOVED,
83 
84 	/* INT_CONFIG */
85 	NV_INT_CONFIG			= 0x12,
86 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
87 
88 	// For PCI config register 20
89 	NV_MCP_SATA_CFG_20		= 0x50,
90 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
92 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
93 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
94 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
95 
96 	NV_ADMA_MAX_CPBS		= 32,
97 	NV_ADMA_CPB_SZ			= 128,
98 	NV_ADMA_APRD_SZ			= 16,
99 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
100 					   NV_ADMA_APRD_SZ,
101 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
102 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105 
106 	/* BAR5 offset to ADMA general registers */
107 	NV_ADMA_GEN			= 0x400,
108 	NV_ADMA_GEN_CTL			= 0x00,
109 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
110 
111 	/* BAR5 offset to ADMA ports */
112 	NV_ADMA_PORT			= 0x480,
113 
114 	/* size of ADMA port register space  */
115 	NV_ADMA_PORT_SIZE		= 0x100,
116 
117 	/* ADMA port registers */
118 	NV_ADMA_CTL			= 0x40,
119 	NV_ADMA_CPB_COUNT		= 0x42,
120 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
121 	NV_ADMA_STAT			= 0x44,
122 	NV_ADMA_CPB_BASE_LOW		= 0x48,
123 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
124 	NV_ADMA_APPEND			= 0x50,
125 	NV_ADMA_NOTIFIER		= 0x68,
126 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
127 
128 	/* NV_ADMA_CTL register bits */
129 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
130 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
131 	NV_ADMA_CTL_GO			= (1 << 7),
132 	NV_ADMA_CTL_AIEN		= (1 << 8),
133 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
134 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
135 
136 	/* CPB response flag bits */
137 	NV_CPB_RESP_DONE		= (1 << 0),
138 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
139 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
140 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
141 
142 	/* CPB control flag bits */
143 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
144 	NV_CPB_CTL_QUEUE		= (1 << 1),
145 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
146 	NV_CPB_CTL_IEN			= (1 << 3),
147 	NV_CPB_CTL_FPDMA		= (1 << 4),
148 
149 	/* APRD flags */
150 	NV_APRD_WRITE			= (1 << 1),
151 	NV_APRD_END			= (1 << 2),
152 	NV_APRD_CONT			= (1 << 3),
153 
154 	/* NV_ADMA_STAT flags */
155 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
156 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
157 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
158 	NV_ADMA_STAT_CPBERR		= (1 << 4),
159 	NV_ADMA_STAT_SERROR		= (1 << 5),
160 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
161 	NV_ADMA_STAT_IDLE		= (1 << 8),
162 	NV_ADMA_STAT_LEGACY		= (1 << 9),
163 	NV_ADMA_STAT_STOPPED		= (1 << 10),
164 	NV_ADMA_STAT_DONE		= (1 << 12),
165 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
166 					  NV_ADMA_STAT_TIMEOUT,
167 
168 	/* port flags */
169 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
170 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
171 
172 	/* MCP55 reg offset */
173 	NV_CTL_MCP55			= 0x400,
174 	NV_INT_STATUS_MCP55		= 0x440,
175 	NV_INT_ENABLE_MCP55		= 0x444,
176 	NV_NCQ_REG_MCP55		= 0x448,
177 
178 	/* MCP55 */
179 	NV_INT_ALL_MCP55		= 0xffff,
180 	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
181 	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
182 
183 	/* SWNCQ ENABLE BITS*/
184 	NV_CTL_PRI_SWNCQ		= 0x02,
185 	NV_CTL_SEC_SWNCQ		= 0x04,
186 
187 	/* SW NCQ status bits*/
188 	NV_SWNCQ_IRQ_DEV		= (1 << 0),
189 	NV_SWNCQ_IRQ_PM			= (1 << 1),
190 	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
191 	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
192 
193 	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
194 	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
195 	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
196 	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
197 
198 	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
199 					  NV_SWNCQ_IRQ_REMOVED,
200 
201 };
202 
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205 	__le64			addr;
206 	__le32			len;
207 	u8			flags;
208 	u8			packet_len;
209 	__le16			reserved;
210 };
211 
212 enum nv_adma_regbits {
213 	CMDEND	= (1 << 15),		/* end of command list */
214 	WNB	= (1 << 14),		/* wait-not-BSY */
215 	IGN	= (1 << 13),		/* ignore this entry */
216 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
217 	DA2	= (1 << (2 + 8)),
218 	DA1	= (1 << (1 + 8)),
219 	DA0	= (1 << (0 + 8)),
220 };
221 
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227 	u8			resp_flags;    /* 0 */
228 	u8			reserved1;     /* 1 */
229 	u8			ctl_flags;     /* 2 */
230 	/* len is length of taskfile in 64 bit words */
231 	u8			len;		/* 3  */
232 	u8			tag;           /* 4 */
233 	u8			next_cpb_idx;  /* 5 */
234 	__le16			reserved2;     /* 6-7 */
235 	__le16			tf[12];        /* 8-31 */
236 	struct nv_adma_prd	aprd[5];       /* 32-111 */
237 	__le64			next_aprd;     /* 112-119 */
238 	__le64			reserved3;     /* 120-127 */
239 };
240 
241 
242 struct nv_adma_port_priv {
243 	struct nv_adma_cpb	*cpb;
244 	dma_addr_t		cpb_dma;
245 	struct nv_adma_prd	*aprd;
246 	dma_addr_t		aprd_dma;
247 	void __iomem		*ctl_block;
248 	void __iomem		*gen_block;
249 	void __iomem		*notifier_clear_block;
250 	u64			adma_dma_mask;
251 	u8			flags;
252 	int			last_issue_ncq;
253 };
254 
255 struct nv_host_priv {
256 	unsigned long		type;
257 };
258 
259 struct defer_queue {
260 	u32		defer_bits;
261 	unsigned int	head;
262 	unsigned int	tail;
263 	unsigned int	tag[ATA_MAX_QUEUE];
264 };
265 
266 enum ncq_saw_flag_list {
267 	ncq_saw_d2h	= (1U << 0),
268 	ncq_saw_dmas	= (1U << 1),
269 	ncq_saw_sdb	= (1U << 2),
270 	ncq_saw_backout	= (1U << 3),
271 };
272 
273 struct nv_swncq_port_priv {
274 	struct ata_bmdma_prd *prd;	 /* our SG list */
275 	dma_addr_t	prd_dma; /* and its DMA mapping */
276 	void __iomem	*sactive_block;
277 	void __iomem	*irq_block;
278 	void __iomem	*tag_block;
279 	u32		qc_active;
280 
281 	unsigned int	last_issue_tag;
282 
283 	/* fifo circular queue to store deferral command */
284 	struct defer_queue defer_queue;
285 
286 	/* for NCQ interrupt analysis */
287 	u32		dhfis_bits;
288 	u32		dmafis_bits;
289 	u32		sdbfis_bits;
290 
291 	unsigned int	ncq_flags;
292 };
293 
294 
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
296 
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM_SLEEP
299 static int nv_pci_device_resume(struct pci_dev *pdev);
300 #endif
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307 
308 static int nv_hardreset(struct ata_link *link, unsigned int *class,
309 			unsigned long deadline);
310 static void nv_nf2_freeze(struct ata_port *ap);
311 static void nv_nf2_thaw(struct ata_port *ap);
312 static void nv_ck804_freeze(struct ata_port *ap);
313 static void nv_ck804_thaw(struct ata_port *ap);
314 static int nv_adma_slave_config(struct scsi_device *sdev);
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319 static void nv_adma_irq_clear(struct ata_port *ap);
320 static int nv_adma_port_start(struct ata_port *ap);
321 static void nv_adma_port_stop(struct ata_port *ap);
322 #ifdef CONFIG_PM
323 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int nv_adma_port_resume(struct ata_port *ap);
325 #endif
326 static void nv_adma_freeze(struct ata_port *ap);
327 static void nv_adma_thaw(struct ata_port *ap);
328 static void nv_adma_error_handler(struct ata_port *ap);
329 static void nv_adma_host_stop(struct ata_host *host);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
331 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
332 
333 static void nv_mcp55_thaw(struct ata_port *ap);
334 static void nv_mcp55_freeze(struct ata_port *ap);
335 static void nv_swncq_error_handler(struct ata_port *ap);
336 static int nv_swncq_slave_config(struct scsi_device *sdev);
337 static int nv_swncq_port_start(struct ata_port *ap);
338 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343 #ifdef CONFIG_PM
344 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345 static int nv_swncq_port_resume(struct ata_port *ap);
346 #endif
347 
348 enum nv_host_type
349 {
350 	GENERIC,
351 	NFORCE2,
352 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
353 	CK804,
354 	ADMA,
355 	MCP5x,
356 	SWNCQ,
357 };
358 
359 static const struct pci_device_id nv_pci_tbl[] = {
360 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
361 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
362 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
363 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
364 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
365 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
366 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
367 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
368 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
369 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
370 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
371 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
372 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
373 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
374 
375 	{ } /* terminate list */
376 };
377 
378 static struct pci_driver nv_pci_driver = {
379 	.name			= DRV_NAME,
380 	.id_table		= nv_pci_tbl,
381 	.probe			= nv_init_one,
382 #ifdef CONFIG_PM_SLEEP
383 	.suspend		= ata_pci_device_suspend,
384 	.resume			= nv_pci_device_resume,
385 #endif
386 	.remove			= ata_pci_remove_one,
387 };
388 
389 static struct scsi_host_template nv_sht = {
390 	ATA_BMDMA_SHT(DRV_NAME),
391 };
392 
393 static struct scsi_host_template nv_adma_sht = {
394 	ATA_NCQ_SHT(DRV_NAME),
395 	.can_queue		= NV_ADMA_MAX_CPBS,
396 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
397 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
398 	.slave_configure	= nv_adma_slave_config,
399 };
400 
401 static struct scsi_host_template nv_swncq_sht = {
402 	ATA_NCQ_SHT(DRV_NAME),
403 	.can_queue		= ATA_MAX_QUEUE,
404 	.sg_tablesize		= LIBATA_MAX_PRD,
405 	.dma_boundary		= ATA_DMA_BOUNDARY,
406 	.slave_configure	= nv_swncq_slave_config,
407 };
408 
409 /*
410  * NV SATA controllers have various different problems with hardreset
411  * protocol depending on the specific controller and device.
412  *
413  * GENERIC:
414  *
415  *  bko11195 reports that link doesn't come online after hardreset on
416  *  generic nv's and there have been several other similar reports on
417  *  linux-ide.
418  *
419  *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
420  *  softreset.
421  *
422  * NF2/3:
423  *
424  *  bko3352 reports nf2/3 controllers can't determine device signature
425  *  reliably after hardreset.  The following thread reports detection
426  *  failure on cold boot with the standard debouncing timing.
427  *
428  *  http://thread.gmane.org/gmane.linux.ide/34098
429  *
430  *  bko12176 reports that hardreset fails to bring up the link during
431  *  boot on nf2.
432  *
433  * CK804:
434  *
435  *  For initial probing after boot and hot plugging, hardreset mostly
436  *  works fine on CK804 but curiously, reprobing on the initial port
437  *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
438  *  FIS in somewhat undeterministic way.
439  *
440  * SWNCQ:
441  *
442  *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
443  *  hardreset should be used and hardreset can't report proper
444  *  signature, which suggests that mcp5x is closer to nf2 as long as
445  *  reset quirkiness is concerned.
446  *
447  *  bko12703 reports that boot probing fails for intel SSD with
448  *  hardreset.  Link fails to come online.  Softreset works fine.
449  *
450  * The failures are varied but the following patterns seem true for
451  * all flavors.
452  *
453  * - Softreset during boot always works.
454  *
455  * - Hardreset during boot sometimes fails to bring up the link on
456  *   certain comibnations and device signature acquisition is
457  *   unreliable.
458  *
459  * - Hardreset is often necessary after hotplug.
460  *
461  * So, preferring softreset for boot probing and error handling (as
462  * hardreset might bring down the link) but using hardreset for
463  * post-boot probing should work around the above issues in most
464  * cases.  Define nv_hardreset() which only kicks in for post-boot
465  * probing and use it for all variants.
466  */
467 static struct ata_port_operations nv_generic_ops = {
468 	.inherits		= &ata_bmdma_port_ops,
469 	.lost_interrupt		= ATA_OP_NULL,
470 	.scr_read		= nv_scr_read,
471 	.scr_write		= nv_scr_write,
472 	.hardreset		= nv_hardreset,
473 };
474 
475 static struct ata_port_operations nv_nf2_ops = {
476 	.inherits		= &nv_generic_ops,
477 	.freeze			= nv_nf2_freeze,
478 	.thaw			= nv_nf2_thaw,
479 };
480 
481 static struct ata_port_operations nv_ck804_ops = {
482 	.inherits		= &nv_generic_ops,
483 	.freeze			= nv_ck804_freeze,
484 	.thaw			= nv_ck804_thaw,
485 	.host_stop		= nv_ck804_host_stop,
486 };
487 
488 static struct ata_port_operations nv_adma_ops = {
489 	.inherits		= &nv_ck804_ops,
490 
491 	.check_atapi_dma	= nv_adma_check_atapi_dma,
492 	.sff_tf_read		= nv_adma_tf_read,
493 	.qc_defer		= ata_std_qc_defer,
494 	.qc_prep		= nv_adma_qc_prep,
495 	.qc_issue		= nv_adma_qc_issue,
496 	.sff_irq_clear		= nv_adma_irq_clear,
497 
498 	.freeze			= nv_adma_freeze,
499 	.thaw			= nv_adma_thaw,
500 	.error_handler		= nv_adma_error_handler,
501 	.post_internal_cmd	= nv_adma_post_internal_cmd,
502 
503 	.port_start		= nv_adma_port_start,
504 	.port_stop		= nv_adma_port_stop,
505 #ifdef CONFIG_PM
506 	.port_suspend		= nv_adma_port_suspend,
507 	.port_resume		= nv_adma_port_resume,
508 #endif
509 	.host_stop		= nv_adma_host_stop,
510 };
511 
512 static struct ata_port_operations nv_swncq_ops = {
513 	.inherits		= &nv_generic_ops,
514 
515 	.qc_defer		= ata_std_qc_defer,
516 	.qc_prep		= nv_swncq_qc_prep,
517 	.qc_issue		= nv_swncq_qc_issue,
518 
519 	.freeze			= nv_mcp55_freeze,
520 	.thaw			= nv_mcp55_thaw,
521 	.error_handler		= nv_swncq_error_handler,
522 
523 #ifdef CONFIG_PM
524 	.port_suspend		= nv_swncq_port_suspend,
525 	.port_resume		= nv_swncq_port_resume,
526 #endif
527 	.port_start		= nv_swncq_port_start,
528 };
529 
530 struct nv_pi_priv {
531 	irq_handler_t			irq_handler;
532 	struct scsi_host_template	*sht;
533 };
534 
535 #define NV_PI_PRIV(_irq_handler, _sht) \
536 	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
537 
538 static const struct ata_port_info nv_port_info[] = {
539 	/* generic */
540 	{
541 		.flags		= ATA_FLAG_SATA,
542 		.pio_mask	= NV_PIO_MASK,
543 		.mwdma_mask	= NV_MWDMA_MASK,
544 		.udma_mask	= NV_UDMA_MASK,
545 		.port_ops	= &nv_generic_ops,
546 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
547 	},
548 	/* nforce2/3 */
549 	{
550 		.flags		= ATA_FLAG_SATA,
551 		.pio_mask	= NV_PIO_MASK,
552 		.mwdma_mask	= NV_MWDMA_MASK,
553 		.udma_mask	= NV_UDMA_MASK,
554 		.port_ops	= &nv_nf2_ops,
555 		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
556 	},
557 	/* ck804 */
558 	{
559 		.flags		= ATA_FLAG_SATA,
560 		.pio_mask	= NV_PIO_MASK,
561 		.mwdma_mask	= NV_MWDMA_MASK,
562 		.udma_mask	= NV_UDMA_MASK,
563 		.port_ops	= &nv_ck804_ops,
564 		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
565 	},
566 	/* ADMA */
567 	{
568 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
569 		.pio_mask	= NV_PIO_MASK,
570 		.mwdma_mask	= NV_MWDMA_MASK,
571 		.udma_mask	= NV_UDMA_MASK,
572 		.port_ops	= &nv_adma_ops,
573 		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
574 	},
575 	/* MCP5x */
576 	{
577 		.flags		= ATA_FLAG_SATA,
578 		.pio_mask	= NV_PIO_MASK,
579 		.mwdma_mask	= NV_MWDMA_MASK,
580 		.udma_mask	= NV_UDMA_MASK,
581 		.port_ops	= &nv_generic_ops,
582 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
583 	},
584 	/* SWNCQ */
585 	{
586 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NCQ,
587 		.pio_mask	= NV_PIO_MASK,
588 		.mwdma_mask	= NV_MWDMA_MASK,
589 		.udma_mask	= NV_UDMA_MASK,
590 		.port_ops	= &nv_swncq_ops,
591 		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
592 	},
593 };
594 
595 MODULE_AUTHOR("NVIDIA");
596 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
597 MODULE_LICENSE("GPL");
598 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
599 MODULE_VERSION(DRV_VERSION);
600 
601 static bool adma_enabled;
602 static bool swncq_enabled = true;
603 static bool msi_enabled;
604 
nv_adma_register_mode(struct ata_port * ap)605 static void nv_adma_register_mode(struct ata_port *ap)
606 {
607 	struct nv_adma_port_priv *pp = ap->private_data;
608 	void __iomem *mmio = pp->ctl_block;
609 	u16 tmp, status;
610 	int count = 0;
611 
612 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
613 		return;
614 
615 	status = readw(mmio + NV_ADMA_STAT);
616 	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
617 		ndelay(50);
618 		status = readw(mmio + NV_ADMA_STAT);
619 		count++;
620 	}
621 	if (count == 20)
622 		ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
623 			      status);
624 
625 	tmp = readw(mmio + NV_ADMA_CTL);
626 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
627 
628 	count = 0;
629 	status = readw(mmio + NV_ADMA_STAT);
630 	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
631 		ndelay(50);
632 		status = readw(mmio + NV_ADMA_STAT);
633 		count++;
634 	}
635 	if (count == 20)
636 		ata_port_warn(ap,
637 			      "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
638 			      status);
639 
640 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
641 }
642 
nv_adma_mode(struct ata_port * ap)643 static void nv_adma_mode(struct ata_port *ap)
644 {
645 	struct nv_adma_port_priv *pp = ap->private_data;
646 	void __iomem *mmio = pp->ctl_block;
647 	u16 tmp, status;
648 	int count = 0;
649 
650 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
651 		return;
652 
653 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
654 
655 	tmp = readw(mmio + NV_ADMA_CTL);
656 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
657 
658 	status = readw(mmio + NV_ADMA_STAT);
659 	while (((status & NV_ADMA_STAT_LEGACY) ||
660 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
661 		ndelay(50);
662 		status = readw(mmio + NV_ADMA_STAT);
663 		count++;
664 	}
665 	if (count == 20)
666 		ata_port_warn(ap,
667 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
668 			status);
669 
670 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
671 }
672 
nv_adma_slave_config(struct scsi_device * sdev)673 static int nv_adma_slave_config(struct scsi_device *sdev)
674 {
675 	struct ata_port *ap = ata_shost_to_port(sdev->host);
676 	struct nv_adma_port_priv *pp = ap->private_data;
677 	struct nv_adma_port_priv *port0, *port1;
678 	struct scsi_device *sdev0, *sdev1;
679 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
680 	unsigned long segment_boundary, flags;
681 	unsigned short sg_tablesize;
682 	int rc;
683 	int adma_enable;
684 	u32 current_reg, new_reg, config_mask;
685 
686 	rc = ata_scsi_slave_config(sdev);
687 
688 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
689 		/* Not a proper libata device, ignore */
690 		return rc;
691 
692 	spin_lock_irqsave(ap->lock, flags);
693 
694 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
695 		/*
696 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
697 		 * Therefore ATAPI commands are sent through the legacy interface.
698 		 * However, the legacy interface only supports 32-bit DMA.
699 		 * Restrict DMA parameters as required by the legacy interface
700 		 * when an ATAPI device is connected.
701 		 */
702 		segment_boundary = ATA_DMA_BOUNDARY;
703 		/* Subtract 1 since an extra entry may be needed for padding, see
704 		   libata-scsi.c */
705 		sg_tablesize = LIBATA_MAX_PRD - 1;
706 
707 		/* Since the legacy DMA engine is in use, we need to disable ADMA
708 		   on the port. */
709 		adma_enable = 0;
710 		nv_adma_register_mode(ap);
711 	} else {
712 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
713 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
714 		adma_enable = 1;
715 	}
716 
717 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
718 
719 	if (ap->port_no == 1)
720 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
721 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
722 	else
723 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
724 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
725 
726 	if (adma_enable) {
727 		new_reg = current_reg | config_mask;
728 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
729 	} else {
730 		new_reg = current_reg & ~config_mask;
731 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
732 	}
733 
734 	if (current_reg != new_reg)
735 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
736 
737 	port0 = ap->host->ports[0]->private_data;
738 	port1 = ap->host->ports[1]->private_data;
739 	sdev0 = ap->host->ports[0]->link.device[0].sdev;
740 	sdev1 = ap->host->ports[1]->link.device[0].sdev;
741 	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
742 	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
743 		/** We have to set the DMA mask to 32-bit if either port is in
744 		    ATAPI mode, since they are on the same PCI device which is
745 		    used for DMA mapping. If we set the mask we also need to set
746 		    the bounce limit on both ports to ensure that the block
747 		    layer doesn't feed addresses that cause DMA mapping to
748 		    choke. If either SCSI device is not allocated yet, it's OK
749 		    since that port will discover its correct setting when it
750 		    does get allocated.
751 		    Note: Setting 32-bit mask should not fail. */
752 		if (sdev0)
753 			blk_queue_bounce_limit(sdev0->request_queue,
754 					       ATA_DMA_MASK);
755 		if (sdev1)
756 			blk_queue_bounce_limit(sdev1->request_queue,
757 					       ATA_DMA_MASK);
758 
759 		dma_set_mask(&pdev->dev, ATA_DMA_MASK);
760 	} else {
761 		/** This shouldn't fail as it was set to this value before */
762 		dma_set_mask(&pdev->dev, pp->adma_dma_mask);
763 		if (sdev0)
764 			blk_queue_bounce_limit(sdev0->request_queue,
765 					       pp->adma_dma_mask);
766 		if (sdev1)
767 			blk_queue_bounce_limit(sdev1->request_queue,
768 					       pp->adma_dma_mask);
769 	}
770 
771 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
772 	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
773 	ata_port_info(ap,
774 		      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
775 		      (unsigned long long)*ap->host->dev->dma_mask,
776 		      segment_boundary, sg_tablesize);
777 
778 	spin_unlock_irqrestore(ap->lock, flags);
779 
780 	return rc;
781 }
782 
nv_adma_check_atapi_dma(struct ata_queued_cmd * qc)783 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
784 {
785 	struct nv_adma_port_priv *pp = qc->ap->private_data;
786 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
787 }
788 
nv_adma_tf_read(struct ata_port * ap,struct ata_taskfile * tf)789 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
790 {
791 	/* Other than when internal or pass-through commands are executed,
792 	   the only time this function will be called in ADMA mode will be
793 	   if a command fails. In the failure case we don't care about going
794 	   into register mode with ADMA commands pending, as the commands will
795 	   all shortly be aborted anyway. We assume that NCQ commands are not
796 	   issued via passthrough, which is the only way that switching into
797 	   ADMA mode could abort outstanding commands. */
798 	nv_adma_register_mode(ap);
799 
800 	ata_sff_tf_read(ap, tf);
801 }
802 
nv_adma_tf_to_cpb(struct ata_taskfile * tf,__le16 * cpb)803 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
804 {
805 	unsigned int idx = 0;
806 
807 	if (tf->flags & ATA_TFLAG_ISADDR) {
808 		if (tf->flags & ATA_TFLAG_LBA48) {
809 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
810 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
811 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
812 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
813 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
814 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
815 		} else
816 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
817 
818 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
819 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
820 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
821 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
822 	}
823 
824 	if (tf->flags & ATA_TFLAG_DEVICE)
825 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
826 
827 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
828 
829 	while (idx < 12)
830 		cpb[idx++] = cpu_to_le16(IGN);
831 
832 	return idx;
833 }
834 
nv_adma_check_cpb(struct ata_port * ap,int cpb_num,int force_err)835 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
836 {
837 	struct nv_adma_port_priv *pp = ap->private_data;
838 	u8 flags = pp->cpb[cpb_num].resp_flags;
839 
840 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
841 
842 	if (unlikely((force_err ||
843 		     flags & (NV_CPB_RESP_ATA_ERR |
844 			      NV_CPB_RESP_CMD_ERR |
845 			      NV_CPB_RESP_CPB_ERR)))) {
846 		struct ata_eh_info *ehi = &ap->link.eh_info;
847 		int freeze = 0;
848 
849 		ata_ehi_clear_desc(ehi);
850 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
851 		if (flags & NV_CPB_RESP_ATA_ERR) {
852 			ata_ehi_push_desc(ehi, "ATA error");
853 			ehi->err_mask |= AC_ERR_DEV;
854 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
855 			ata_ehi_push_desc(ehi, "CMD error");
856 			ehi->err_mask |= AC_ERR_DEV;
857 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
858 			ata_ehi_push_desc(ehi, "CPB error");
859 			ehi->err_mask |= AC_ERR_SYSTEM;
860 			freeze = 1;
861 		} else {
862 			/* notifier error, but no error in CPB flags? */
863 			ata_ehi_push_desc(ehi, "unknown");
864 			ehi->err_mask |= AC_ERR_OTHER;
865 			freeze = 1;
866 		}
867 		/* Kill all commands. EH will determine what actually failed. */
868 		if (freeze)
869 			ata_port_freeze(ap);
870 		else
871 			ata_port_abort(ap);
872 		return -1;
873 	}
874 
875 	if (likely(flags & NV_CPB_RESP_DONE))
876 		return 1;
877 	return 0;
878 }
879 
nv_host_intr(struct ata_port * ap,u8 irq_stat)880 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
881 {
882 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
883 
884 	/* freeze if hotplugged */
885 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
886 		ata_port_freeze(ap);
887 		return 1;
888 	}
889 
890 	/* bail out if not our interrupt */
891 	if (!(irq_stat & NV_INT_DEV))
892 		return 0;
893 
894 	/* DEV interrupt w/ no active qc? */
895 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
896 		ata_sff_check_status(ap);
897 		return 1;
898 	}
899 
900 	/* handle interrupt */
901 	return ata_bmdma_port_intr(ap, qc);
902 }
903 
nv_adma_interrupt(int irq,void * dev_instance)904 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
905 {
906 	struct ata_host *host = dev_instance;
907 	int i, handled = 0;
908 	u32 notifier_clears[2];
909 
910 	spin_lock(&host->lock);
911 
912 	for (i = 0; i < host->n_ports; i++) {
913 		struct ata_port *ap = host->ports[i];
914 		struct nv_adma_port_priv *pp = ap->private_data;
915 		void __iomem *mmio = pp->ctl_block;
916 		u16 status;
917 		u32 gen_ctl;
918 		u32 notifier, notifier_error;
919 
920 		notifier_clears[i] = 0;
921 
922 		/* if ADMA is disabled, use standard ata interrupt handler */
923 		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
924 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
925 				>> (NV_INT_PORT_SHIFT * i);
926 			handled += nv_host_intr(ap, irq_stat);
927 			continue;
928 		}
929 
930 		/* if in ATA register mode, check for standard interrupts */
931 		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
932 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
933 				>> (NV_INT_PORT_SHIFT * i);
934 			if (ata_tag_valid(ap->link.active_tag))
935 				/** NV_INT_DEV indication seems unreliable
936 				    at times at least in ADMA mode. Force it
937 				    on always when a command is active, to
938 				    prevent losing interrupts. */
939 				irq_stat |= NV_INT_DEV;
940 			handled += nv_host_intr(ap, irq_stat);
941 		}
942 
943 		notifier = readl(mmio + NV_ADMA_NOTIFIER);
944 		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
945 		notifier_clears[i] = notifier | notifier_error;
946 
947 		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
948 
949 		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
950 		    !notifier_error)
951 			/* Nothing to do */
952 			continue;
953 
954 		status = readw(mmio + NV_ADMA_STAT);
955 
956 		/*
957 		 * Clear status. Ensure the controller sees the
958 		 * clearing before we start looking at any of the CPB
959 		 * statuses, so that any CPB completions after this
960 		 * point in the handler will raise another interrupt.
961 		 */
962 		writew(status, mmio + NV_ADMA_STAT);
963 		readw(mmio + NV_ADMA_STAT); /* flush posted write */
964 		rmb();
965 
966 		handled++; /* irq handled if we got here */
967 
968 		/* freeze if hotplugged or controller error */
969 		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
970 				       NV_ADMA_STAT_HOTUNPLUG |
971 				       NV_ADMA_STAT_TIMEOUT |
972 				       NV_ADMA_STAT_SERROR))) {
973 			struct ata_eh_info *ehi = &ap->link.eh_info;
974 
975 			ata_ehi_clear_desc(ehi);
976 			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
977 			if (status & NV_ADMA_STAT_TIMEOUT) {
978 				ehi->err_mask |= AC_ERR_SYSTEM;
979 				ata_ehi_push_desc(ehi, "timeout");
980 			} else if (status & NV_ADMA_STAT_HOTPLUG) {
981 				ata_ehi_hotplugged(ehi);
982 				ata_ehi_push_desc(ehi, "hotplug");
983 			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
984 				ata_ehi_hotplugged(ehi);
985 				ata_ehi_push_desc(ehi, "hot unplug");
986 			} else if (status & NV_ADMA_STAT_SERROR) {
987 				/* let EH analyze SError and figure out cause */
988 				ata_ehi_push_desc(ehi, "SError");
989 			} else
990 				ata_ehi_push_desc(ehi, "unknown");
991 			ata_port_freeze(ap);
992 			continue;
993 		}
994 
995 		if (status & (NV_ADMA_STAT_DONE |
996 			      NV_ADMA_STAT_CPBERR |
997 			      NV_ADMA_STAT_CMD_COMPLETE)) {
998 			u32 check_commands = notifier_clears[i];
999 			u32 done_mask = 0;
1000 			int pos, rc;
1001 
1002 			if (status & NV_ADMA_STAT_CPBERR) {
1003 				/* check all active commands */
1004 				if (ata_tag_valid(ap->link.active_tag))
1005 					check_commands = 1 <<
1006 						ap->link.active_tag;
1007 				else
1008 					check_commands = ap->link.sactive;
1009 			}
1010 
1011 			/* check CPBs for completed commands */
1012 			while ((pos = ffs(check_commands))) {
1013 				pos--;
1014 				rc = nv_adma_check_cpb(ap, pos,
1015 						notifier_error & (1 << pos));
1016 				if (rc > 0)
1017 					done_mask |= 1 << pos;
1018 				else if (unlikely(rc < 0))
1019 					check_commands = 0;
1020 				check_commands &= ~(1 << pos);
1021 			}
1022 			ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1023 		}
1024 	}
1025 
1026 	if (notifier_clears[0] || notifier_clears[1]) {
1027 		/* Note: Both notifier clear registers must be written
1028 		   if either is set, even if one is zero, according to NVIDIA. */
1029 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1030 		writel(notifier_clears[0], pp->notifier_clear_block);
1031 		pp = host->ports[1]->private_data;
1032 		writel(notifier_clears[1], pp->notifier_clear_block);
1033 	}
1034 
1035 	spin_unlock(&host->lock);
1036 
1037 	return IRQ_RETVAL(handled);
1038 }
1039 
nv_adma_freeze(struct ata_port * ap)1040 static void nv_adma_freeze(struct ata_port *ap)
1041 {
1042 	struct nv_adma_port_priv *pp = ap->private_data;
1043 	void __iomem *mmio = pp->ctl_block;
1044 	u16 tmp;
1045 
1046 	nv_ck804_freeze(ap);
1047 
1048 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1049 		return;
1050 
1051 	/* clear any outstanding CK804 notifications */
1052 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1053 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1054 
1055 	/* Disable interrupt */
1056 	tmp = readw(mmio + NV_ADMA_CTL);
1057 	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1058 		mmio + NV_ADMA_CTL);
1059 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1060 }
1061 
nv_adma_thaw(struct ata_port * ap)1062 static void nv_adma_thaw(struct ata_port *ap)
1063 {
1064 	struct nv_adma_port_priv *pp = ap->private_data;
1065 	void __iomem *mmio = pp->ctl_block;
1066 	u16 tmp;
1067 
1068 	nv_ck804_thaw(ap);
1069 
1070 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1071 		return;
1072 
1073 	/* Enable interrupt */
1074 	tmp = readw(mmio + NV_ADMA_CTL);
1075 	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1076 		mmio + NV_ADMA_CTL);
1077 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1078 }
1079 
nv_adma_irq_clear(struct ata_port * ap)1080 static void nv_adma_irq_clear(struct ata_port *ap)
1081 {
1082 	struct nv_adma_port_priv *pp = ap->private_data;
1083 	void __iomem *mmio = pp->ctl_block;
1084 	u32 notifier_clears[2];
1085 
1086 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1087 		ata_bmdma_irq_clear(ap);
1088 		return;
1089 	}
1090 
1091 	/* clear any outstanding CK804 notifications */
1092 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1093 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1094 
1095 	/* clear ADMA status */
1096 	writew(0xffff, mmio + NV_ADMA_STAT);
1097 
1098 	/* clear notifiers - note both ports need to be written with
1099 	   something even though we are only clearing on one */
1100 	if (ap->port_no == 0) {
1101 		notifier_clears[0] = 0xFFFFFFFF;
1102 		notifier_clears[1] = 0;
1103 	} else {
1104 		notifier_clears[0] = 0;
1105 		notifier_clears[1] = 0xFFFFFFFF;
1106 	}
1107 	pp = ap->host->ports[0]->private_data;
1108 	writel(notifier_clears[0], pp->notifier_clear_block);
1109 	pp = ap->host->ports[1]->private_data;
1110 	writel(notifier_clears[1], pp->notifier_clear_block);
1111 }
1112 
nv_adma_post_internal_cmd(struct ata_queued_cmd * qc)1113 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1114 {
1115 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1116 
1117 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1118 		ata_bmdma_post_internal_cmd(qc);
1119 }
1120 
nv_adma_port_start(struct ata_port * ap)1121 static int nv_adma_port_start(struct ata_port *ap)
1122 {
1123 	struct device *dev = ap->host->dev;
1124 	struct nv_adma_port_priv *pp;
1125 	int rc;
1126 	void *mem;
1127 	dma_addr_t mem_dma;
1128 	void __iomem *mmio;
1129 	struct pci_dev *pdev = to_pci_dev(dev);
1130 	u16 tmp;
1131 
1132 	VPRINTK("ENTER\n");
1133 
1134 	/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1135 	   pad buffers */
1136 	rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1137 	if (rc)
1138 		return rc;
1139 	rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1140 	if (rc)
1141 		return rc;
1142 
1143 	/* we might fallback to bmdma, allocate bmdma resources */
1144 	rc = ata_bmdma_port_start(ap);
1145 	if (rc)
1146 		return rc;
1147 
1148 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1149 	if (!pp)
1150 		return -ENOMEM;
1151 
1152 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1153 	       ap->port_no * NV_ADMA_PORT_SIZE;
1154 	pp->ctl_block = mmio;
1155 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1156 	pp->notifier_clear_block = pp->gen_block +
1157 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1158 
1159 	/* Now that the legacy PRD and padding buffer are allocated we can
1160 	   safely raise the DMA mask to allocate the CPB/APRD table.
1161 	   These are allowed to fail since we store the value that ends up
1162 	   being used to set as the bounce limit in slave_config later if
1163 	   needed. */
1164 	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1165 	dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1166 	pp->adma_dma_mask = *dev->dma_mask;
1167 
1168 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1169 				  &mem_dma, GFP_KERNEL);
1170 	if (!mem)
1171 		return -ENOMEM;
1172 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1173 
1174 	/*
1175 	 * First item in chunk of DMA memory:
1176 	 * 128-byte command parameter block (CPB)
1177 	 * one for each command tag
1178 	 */
1179 	pp->cpb     = mem;
1180 	pp->cpb_dma = mem_dma;
1181 
1182 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1183 	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1184 
1185 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1186 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1187 
1188 	/*
1189 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1190 	 */
1191 	pp->aprd = mem;
1192 	pp->aprd_dma = mem_dma;
1193 
1194 	ap->private_data = pp;
1195 
1196 	/* clear any outstanding interrupt conditions */
1197 	writew(0xffff, mmio + NV_ADMA_STAT);
1198 
1199 	/* initialize port variables */
1200 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1201 
1202 	/* clear CPB fetch count */
1203 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1204 
1205 	/* clear GO for register mode, enable interrupt */
1206 	tmp = readw(mmio + NV_ADMA_CTL);
1207 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1208 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1209 
1210 	tmp = readw(mmio + NV_ADMA_CTL);
1211 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1212 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1213 	udelay(1);
1214 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1215 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1216 
1217 	return 0;
1218 }
1219 
nv_adma_port_stop(struct ata_port * ap)1220 static void nv_adma_port_stop(struct ata_port *ap)
1221 {
1222 	struct nv_adma_port_priv *pp = ap->private_data;
1223 	void __iomem *mmio = pp->ctl_block;
1224 
1225 	VPRINTK("ENTER\n");
1226 	writew(0, mmio + NV_ADMA_CTL);
1227 }
1228 
1229 #ifdef CONFIG_PM
nv_adma_port_suspend(struct ata_port * ap,pm_message_t mesg)1230 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1231 {
1232 	struct nv_adma_port_priv *pp = ap->private_data;
1233 	void __iomem *mmio = pp->ctl_block;
1234 
1235 	/* Go to register mode - clears GO */
1236 	nv_adma_register_mode(ap);
1237 
1238 	/* clear CPB fetch count */
1239 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1240 
1241 	/* disable interrupt, shut down port */
1242 	writew(0, mmio + NV_ADMA_CTL);
1243 
1244 	return 0;
1245 }
1246 
nv_adma_port_resume(struct ata_port * ap)1247 static int nv_adma_port_resume(struct ata_port *ap)
1248 {
1249 	struct nv_adma_port_priv *pp = ap->private_data;
1250 	void __iomem *mmio = pp->ctl_block;
1251 	u16 tmp;
1252 
1253 	/* set CPB block location */
1254 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1255 	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1256 
1257 	/* clear any outstanding interrupt conditions */
1258 	writew(0xffff, mmio + NV_ADMA_STAT);
1259 
1260 	/* initialize port variables */
1261 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1262 
1263 	/* clear CPB fetch count */
1264 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1265 
1266 	/* clear GO for register mode, enable interrupt */
1267 	tmp = readw(mmio + NV_ADMA_CTL);
1268 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1269 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1270 
1271 	tmp = readw(mmio + NV_ADMA_CTL);
1272 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1273 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1274 	udelay(1);
1275 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1276 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1277 
1278 	return 0;
1279 }
1280 #endif
1281 
nv_adma_setup_port(struct ata_port * ap)1282 static void nv_adma_setup_port(struct ata_port *ap)
1283 {
1284 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1285 	struct ata_ioports *ioport = &ap->ioaddr;
1286 
1287 	VPRINTK("ENTER\n");
1288 
1289 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1290 
1291 	ioport->cmd_addr	= mmio;
1292 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1293 	ioport->error_addr	=
1294 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1295 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1296 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1297 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1298 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1299 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1300 	ioport->status_addr	=
1301 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1302 	ioport->altstatus_addr	=
1303 	ioport->ctl_addr	= mmio + 0x20;
1304 }
1305 
nv_adma_host_init(struct ata_host * host)1306 static int nv_adma_host_init(struct ata_host *host)
1307 {
1308 	struct pci_dev *pdev = to_pci_dev(host->dev);
1309 	unsigned int i;
1310 	u32 tmp32;
1311 
1312 	VPRINTK("ENTER\n");
1313 
1314 	/* enable ADMA on the ports */
1315 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1316 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1317 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1318 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1319 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1320 
1321 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1322 
1323 	for (i = 0; i < host->n_ports; i++)
1324 		nv_adma_setup_port(host->ports[i]);
1325 
1326 	return 0;
1327 }
1328 
nv_adma_fill_aprd(struct ata_queued_cmd * qc,struct scatterlist * sg,int idx,struct nv_adma_prd * aprd)1329 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1330 			      struct scatterlist *sg,
1331 			      int idx,
1332 			      struct nv_adma_prd *aprd)
1333 {
1334 	u8 flags = 0;
1335 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1336 		flags |= NV_APRD_WRITE;
1337 	if (idx == qc->n_elem - 1)
1338 		flags |= NV_APRD_END;
1339 	else if (idx != 4)
1340 		flags |= NV_APRD_CONT;
1341 
1342 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1343 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1344 	aprd->flags = flags;
1345 	aprd->packet_len = 0;
1346 }
1347 
nv_adma_fill_sg(struct ata_queued_cmd * qc,struct nv_adma_cpb * cpb)1348 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1349 {
1350 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1351 	struct nv_adma_prd *aprd;
1352 	struct scatterlist *sg;
1353 	unsigned int si;
1354 
1355 	VPRINTK("ENTER\n");
1356 
1357 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1358 		aprd = (si < 5) ? &cpb->aprd[si] :
1359 			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1360 		nv_adma_fill_aprd(qc, sg, si, aprd);
1361 	}
1362 	if (si > 5)
1363 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1364 	else
1365 		cpb->next_aprd = cpu_to_le64(0);
1366 }
1367 
nv_adma_use_reg_mode(struct ata_queued_cmd * qc)1368 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1369 {
1370 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1371 
1372 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1373 	   or interrupt-driven no-data commands. */
1374 	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1375 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1376 		return 1;
1377 
1378 	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1379 	   (qc->tf.protocol == ATA_PROT_NODATA))
1380 		return 0;
1381 
1382 	return 1;
1383 }
1384 
nv_adma_qc_prep(struct ata_queued_cmd * qc)1385 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1386 {
1387 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1388 	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1389 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1390 		       NV_CPB_CTL_IEN;
1391 
1392 	if (nv_adma_use_reg_mode(qc)) {
1393 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1394 			(qc->flags & ATA_QCFLAG_DMAMAP));
1395 		nv_adma_register_mode(qc->ap);
1396 		ata_bmdma_qc_prep(qc);
1397 		return AC_ERR_OK;
1398 	}
1399 
1400 	cpb->resp_flags = NV_CPB_RESP_DONE;
1401 	wmb();
1402 	cpb->ctl_flags = 0;
1403 	wmb();
1404 
1405 	cpb->len		= 3;
1406 	cpb->tag		= qc->tag;
1407 	cpb->next_cpb_idx	= 0;
1408 
1409 	/* turn on NCQ flags for NCQ commands */
1410 	if (qc->tf.protocol == ATA_PROT_NCQ)
1411 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1412 
1413 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1414 
1415 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1416 
1417 	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1418 		nv_adma_fill_sg(qc, cpb);
1419 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1420 	} else
1421 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1422 
1423 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1424 	   until we are finished filling in all of the contents */
1425 	wmb();
1426 	cpb->ctl_flags = ctl_flags;
1427 	wmb();
1428 	cpb->resp_flags = 0;
1429 
1430 	return AC_ERR_OK;
1431 }
1432 
nv_adma_qc_issue(struct ata_queued_cmd * qc)1433 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1434 {
1435 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1436 	void __iomem *mmio = pp->ctl_block;
1437 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1438 
1439 	VPRINTK("ENTER\n");
1440 
1441 	/* We can't handle result taskfile with NCQ commands, since
1442 	   retrieving the taskfile switches us out of ADMA mode and would abort
1443 	   existing commands. */
1444 	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1445 		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1446 		ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1447 		return AC_ERR_SYSTEM;
1448 	}
1449 
1450 	if (nv_adma_use_reg_mode(qc)) {
1451 		/* use ATA register mode */
1452 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1453 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1454 			(qc->flags & ATA_QCFLAG_DMAMAP));
1455 		nv_adma_register_mode(qc->ap);
1456 		return ata_bmdma_qc_issue(qc);
1457 	} else
1458 		nv_adma_mode(qc->ap);
1459 
1460 	/* write append register, command tag in lower 8 bits
1461 	   and (number of cpbs to append -1) in top 8 bits */
1462 	wmb();
1463 
1464 	if (curr_ncq != pp->last_issue_ncq) {
1465 		/* Seems to need some delay before switching between NCQ and
1466 		   non-NCQ commands, else we get command timeouts and such. */
1467 		udelay(20);
1468 		pp->last_issue_ncq = curr_ncq;
1469 	}
1470 
1471 	writew(qc->tag, mmio + NV_ADMA_APPEND);
1472 
1473 	DPRINTK("Issued tag %u\n", qc->tag);
1474 
1475 	return 0;
1476 }
1477 
nv_generic_interrupt(int irq,void * dev_instance)1478 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1479 {
1480 	struct ata_host *host = dev_instance;
1481 	unsigned int i;
1482 	unsigned int handled = 0;
1483 	unsigned long flags;
1484 
1485 	spin_lock_irqsave(&host->lock, flags);
1486 
1487 	for (i = 0; i < host->n_ports; i++) {
1488 		struct ata_port *ap = host->ports[i];
1489 		struct ata_queued_cmd *qc;
1490 
1491 		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1492 		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1493 			handled += ata_bmdma_port_intr(ap, qc);
1494 		} else {
1495 			/*
1496 			 * No request pending?  Clear interrupt status
1497 			 * anyway, in case there's one pending.
1498 			 */
1499 			ap->ops->sff_check_status(ap);
1500 		}
1501 	}
1502 
1503 	spin_unlock_irqrestore(&host->lock, flags);
1504 
1505 	return IRQ_RETVAL(handled);
1506 }
1507 
nv_do_interrupt(struct ata_host * host,u8 irq_stat)1508 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1509 {
1510 	int i, handled = 0;
1511 
1512 	for (i = 0; i < host->n_ports; i++) {
1513 		handled += nv_host_intr(host->ports[i], irq_stat);
1514 		irq_stat >>= NV_INT_PORT_SHIFT;
1515 	}
1516 
1517 	return IRQ_RETVAL(handled);
1518 }
1519 
nv_nf2_interrupt(int irq,void * dev_instance)1520 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1521 {
1522 	struct ata_host *host = dev_instance;
1523 	u8 irq_stat;
1524 	irqreturn_t ret;
1525 
1526 	spin_lock(&host->lock);
1527 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1528 	ret = nv_do_interrupt(host, irq_stat);
1529 	spin_unlock(&host->lock);
1530 
1531 	return ret;
1532 }
1533 
nv_ck804_interrupt(int irq,void * dev_instance)1534 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1535 {
1536 	struct ata_host *host = dev_instance;
1537 	u8 irq_stat;
1538 	irqreturn_t ret;
1539 
1540 	spin_lock(&host->lock);
1541 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1542 	ret = nv_do_interrupt(host, irq_stat);
1543 	spin_unlock(&host->lock);
1544 
1545 	return ret;
1546 }
1547 
nv_scr_read(struct ata_link * link,unsigned int sc_reg,u32 * val)1548 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1549 {
1550 	if (sc_reg > SCR_CONTROL)
1551 		return -EINVAL;
1552 
1553 	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1554 	return 0;
1555 }
1556 
nv_scr_write(struct ata_link * link,unsigned int sc_reg,u32 val)1557 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1558 {
1559 	if (sc_reg > SCR_CONTROL)
1560 		return -EINVAL;
1561 
1562 	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1563 	return 0;
1564 }
1565 
nv_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)1566 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1567 			unsigned long deadline)
1568 {
1569 	struct ata_eh_context *ehc = &link->eh_context;
1570 
1571 	/* Do hardreset iff it's post-boot probing, please read the
1572 	 * comment above port ops for details.
1573 	 */
1574 	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1575 	    !ata_dev_enabled(link->device))
1576 		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1577 				    NULL, NULL);
1578 	else {
1579 		const unsigned long *timing = sata_ehc_deb_timing(ehc);
1580 		int rc;
1581 
1582 		if (!(ehc->i.flags & ATA_EHI_QUIET))
1583 			ata_link_info(link,
1584 				      "nv: skipping hardreset on occupied port\n");
1585 
1586 		/* make sure the link is online */
1587 		rc = sata_link_resume(link, timing, deadline);
1588 		/* whine about phy resume failure but proceed */
1589 		if (rc && rc != -EOPNOTSUPP)
1590 			ata_link_warn(link, "failed to resume link (errno=%d)\n",
1591 				      rc);
1592 	}
1593 
1594 	/* device signature acquisition is unreliable */
1595 	return -EAGAIN;
1596 }
1597 
nv_nf2_freeze(struct ata_port * ap)1598 static void nv_nf2_freeze(struct ata_port *ap)
1599 {
1600 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1601 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1602 	u8 mask;
1603 
1604 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1605 	mask &= ~(NV_INT_ALL << shift);
1606 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1607 }
1608 
nv_nf2_thaw(struct ata_port * ap)1609 static void nv_nf2_thaw(struct ata_port *ap)
1610 {
1611 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1612 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1613 	u8 mask;
1614 
1615 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1616 
1617 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1618 	mask |= (NV_INT_MASK << shift);
1619 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1620 }
1621 
nv_ck804_freeze(struct ata_port * ap)1622 static void nv_ck804_freeze(struct ata_port *ap)
1623 {
1624 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1625 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1626 	u8 mask;
1627 
1628 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1629 	mask &= ~(NV_INT_ALL << shift);
1630 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1631 }
1632 
nv_ck804_thaw(struct ata_port * ap)1633 static void nv_ck804_thaw(struct ata_port *ap)
1634 {
1635 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1636 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1637 	u8 mask;
1638 
1639 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1640 
1641 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1642 	mask |= (NV_INT_MASK << shift);
1643 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1644 }
1645 
nv_mcp55_freeze(struct ata_port * ap)1646 static void nv_mcp55_freeze(struct ata_port *ap)
1647 {
1648 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1649 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1650 	u32 mask;
1651 
1652 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1653 
1654 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1655 	mask &= ~(NV_INT_ALL_MCP55 << shift);
1656 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1657 }
1658 
nv_mcp55_thaw(struct ata_port * ap)1659 static void nv_mcp55_thaw(struct ata_port *ap)
1660 {
1661 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1662 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1663 	u32 mask;
1664 
1665 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1666 
1667 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1668 	mask |= (NV_INT_MASK_MCP55 << shift);
1669 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1670 }
1671 
nv_adma_error_handler(struct ata_port * ap)1672 static void nv_adma_error_handler(struct ata_port *ap)
1673 {
1674 	struct nv_adma_port_priv *pp = ap->private_data;
1675 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1676 		void __iomem *mmio = pp->ctl_block;
1677 		int i;
1678 		u16 tmp;
1679 
1680 		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1681 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1682 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1683 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1684 			u32 status = readw(mmio + NV_ADMA_STAT);
1685 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1686 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1687 
1688 			ata_port_err(ap,
1689 				"EH in ADMA mode, notifier 0x%X "
1690 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1691 				"next cpb count 0x%X next cpb idx 0x%x\n",
1692 				notifier, notifier_error, gen_ctl, status,
1693 				cpb_count, next_cpb_idx);
1694 
1695 			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1696 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1697 				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1698 				    ap->link.sactive & (1 << i))
1699 					ata_port_err(ap,
1700 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1701 						i, cpb->ctl_flags, cpb->resp_flags);
1702 			}
1703 		}
1704 
1705 		/* Push us back into port register mode for error handling. */
1706 		nv_adma_register_mode(ap);
1707 
1708 		/* Mark all of the CPBs as invalid to prevent them from
1709 		   being executed */
1710 		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1711 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1712 
1713 		/* clear CPB fetch count */
1714 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1715 
1716 		/* Reset channel */
1717 		tmp = readw(mmio + NV_ADMA_CTL);
1718 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1719 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1720 		udelay(1);
1721 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1722 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1723 	}
1724 
1725 	ata_bmdma_error_handler(ap);
1726 }
1727 
nv_swncq_qc_to_dq(struct ata_port * ap,struct ata_queued_cmd * qc)1728 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1729 {
1730 	struct nv_swncq_port_priv *pp = ap->private_data;
1731 	struct defer_queue *dq = &pp->defer_queue;
1732 
1733 	/* queue is full */
1734 	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1735 	dq->defer_bits |= (1 << qc->tag);
1736 	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1737 }
1738 
nv_swncq_qc_from_dq(struct ata_port * ap)1739 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1740 {
1741 	struct nv_swncq_port_priv *pp = ap->private_data;
1742 	struct defer_queue *dq = &pp->defer_queue;
1743 	unsigned int tag;
1744 
1745 	if (dq->head == dq->tail)	/* null queue */
1746 		return NULL;
1747 
1748 	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1749 	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1750 	WARN_ON(!(dq->defer_bits & (1 << tag)));
1751 	dq->defer_bits &= ~(1 << tag);
1752 
1753 	return ata_qc_from_tag(ap, tag);
1754 }
1755 
nv_swncq_fis_reinit(struct ata_port * ap)1756 static void nv_swncq_fis_reinit(struct ata_port *ap)
1757 {
1758 	struct nv_swncq_port_priv *pp = ap->private_data;
1759 
1760 	pp->dhfis_bits = 0;
1761 	pp->dmafis_bits = 0;
1762 	pp->sdbfis_bits = 0;
1763 	pp->ncq_flags = 0;
1764 }
1765 
nv_swncq_pp_reinit(struct ata_port * ap)1766 static void nv_swncq_pp_reinit(struct ata_port *ap)
1767 {
1768 	struct nv_swncq_port_priv *pp = ap->private_data;
1769 	struct defer_queue *dq = &pp->defer_queue;
1770 
1771 	dq->head = 0;
1772 	dq->tail = 0;
1773 	dq->defer_bits = 0;
1774 	pp->qc_active = 0;
1775 	pp->last_issue_tag = ATA_TAG_POISON;
1776 	nv_swncq_fis_reinit(ap);
1777 }
1778 
nv_swncq_irq_clear(struct ata_port * ap,u16 fis)1779 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1780 {
1781 	struct nv_swncq_port_priv *pp = ap->private_data;
1782 
1783 	writew(fis, pp->irq_block);
1784 }
1785 
__ata_bmdma_stop(struct ata_port * ap)1786 static void __ata_bmdma_stop(struct ata_port *ap)
1787 {
1788 	struct ata_queued_cmd qc;
1789 
1790 	qc.ap = ap;
1791 	ata_bmdma_stop(&qc);
1792 }
1793 
nv_swncq_ncq_stop(struct ata_port * ap)1794 static void nv_swncq_ncq_stop(struct ata_port *ap)
1795 {
1796 	struct nv_swncq_port_priv *pp = ap->private_data;
1797 	unsigned int i;
1798 	u32 sactive;
1799 	u32 done_mask;
1800 
1801 	ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1802 		     ap->qc_active, ap->link.sactive);
1803 	ata_port_err(ap,
1804 		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1805 		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1806 		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1807 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1808 
1809 	ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1810 		     ap->ops->sff_check_status(ap),
1811 		     ioread8(ap->ioaddr.error_addr));
1812 
1813 	sactive = readl(pp->sactive_block);
1814 	done_mask = pp->qc_active ^ sactive;
1815 
1816 	ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1817 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1818 		u8 err = 0;
1819 		if (pp->qc_active & (1 << i))
1820 			err = 0;
1821 		else if (done_mask & (1 << i))
1822 			err = 1;
1823 		else
1824 			continue;
1825 
1826 		ata_port_err(ap,
1827 			     "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1828 			     (pp->dhfis_bits >> i) & 0x1,
1829 			     (pp->dmafis_bits >> i) & 0x1,
1830 			     (pp->sdbfis_bits >> i) & 0x1,
1831 			     (sactive >> i) & 0x1,
1832 			     (err ? "error! tag doesn't exit" : " "));
1833 	}
1834 
1835 	nv_swncq_pp_reinit(ap);
1836 	ap->ops->sff_irq_clear(ap);
1837 	__ata_bmdma_stop(ap);
1838 	nv_swncq_irq_clear(ap, 0xffff);
1839 }
1840 
nv_swncq_error_handler(struct ata_port * ap)1841 static void nv_swncq_error_handler(struct ata_port *ap)
1842 {
1843 	struct ata_eh_context *ehc = &ap->link.eh_context;
1844 
1845 	if (ap->link.sactive) {
1846 		nv_swncq_ncq_stop(ap);
1847 		ehc->i.action |= ATA_EH_RESET;
1848 	}
1849 
1850 	ata_bmdma_error_handler(ap);
1851 }
1852 
1853 #ifdef CONFIG_PM
nv_swncq_port_suspend(struct ata_port * ap,pm_message_t mesg)1854 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1855 {
1856 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1857 	u32 tmp;
1858 
1859 	/* clear irq */
1860 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1861 
1862 	/* disable irq */
1863 	writel(0, mmio + NV_INT_ENABLE_MCP55);
1864 
1865 	/* disable swncq */
1866 	tmp = readl(mmio + NV_CTL_MCP55);
1867 	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1868 	writel(tmp, mmio + NV_CTL_MCP55);
1869 
1870 	return 0;
1871 }
1872 
nv_swncq_port_resume(struct ata_port * ap)1873 static int nv_swncq_port_resume(struct ata_port *ap)
1874 {
1875 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1876 	u32 tmp;
1877 
1878 	/* clear irq */
1879 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1880 
1881 	/* enable irq */
1882 	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1883 
1884 	/* enable swncq */
1885 	tmp = readl(mmio + NV_CTL_MCP55);
1886 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1887 
1888 	return 0;
1889 }
1890 #endif
1891 
nv_swncq_host_init(struct ata_host * host)1892 static void nv_swncq_host_init(struct ata_host *host)
1893 {
1894 	u32 tmp;
1895 	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1896 	struct pci_dev *pdev = to_pci_dev(host->dev);
1897 	u8 regval;
1898 
1899 	/* disable  ECO 398 */
1900 	pci_read_config_byte(pdev, 0x7f, &regval);
1901 	regval &= ~(1 << 7);
1902 	pci_write_config_byte(pdev, 0x7f, regval);
1903 
1904 	/* enable swncq */
1905 	tmp = readl(mmio + NV_CTL_MCP55);
1906 	VPRINTK("HOST_CTL:0x%X\n", tmp);
1907 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1908 
1909 	/* enable irq intr */
1910 	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1911 	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1912 	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1913 
1914 	/*  clear port irq */
1915 	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1916 }
1917 
nv_swncq_slave_config(struct scsi_device * sdev)1918 static int nv_swncq_slave_config(struct scsi_device *sdev)
1919 {
1920 	struct ata_port *ap = ata_shost_to_port(sdev->host);
1921 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1922 	struct ata_device *dev;
1923 	int rc;
1924 	u8 rev;
1925 	u8 check_maxtor = 0;
1926 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1927 
1928 	rc = ata_scsi_slave_config(sdev);
1929 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1930 		/* Not a proper libata device, ignore */
1931 		return rc;
1932 
1933 	dev = &ap->link.device[sdev->id];
1934 	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1935 		return rc;
1936 
1937 	/* if MCP51 and Maxtor, then disable ncq */
1938 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1939 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1940 		check_maxtor = 1;
1941 
1942 	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1943 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1944 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1945 		pci_read_config_byte(pdev, 0x8, &rev);
1946 		if (rev <= 0xa2)
1947 			check_maxtor = 1;
1948 	}
1949 
1950 	if (!check_maxtor)
1951 		return rc;
1952 
1953 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1954 
1955 	if (strncmp(model_num, "Maxtor", 6) == 0) {
1956 		ata_scsi_change_queue_depth(sdev, 1);
1957 		ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1958 			       sdev->queue_depth);
1959 	}
1960 
1961 	return rc;
1962 }
1963 
nv_swncq_port_start(struct ata_port * ap)1964 static int nv_swncq_port_start(struct ata_port *ap)
1965 {
1966 	struct device *dev = ap->host->dev;
1967 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1968 	struct nv_swncq_port_priv *pp;
1969 	int rc;
1970 
1971 	/* we might fallback to bmdma, allocate bmdma resources */
1972 	rc = ata_bmdma_port_start(ap);
1973 	if (rc)
1974 		return rc;
1975 
1976 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1977 	if (!pp)
1978 		return -ENOMEM;
1979 
1980 	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1981 				      &pp->prd_dma, GFP_KERNEL);
1982 	if (!pp->prd)
1983 		return -ENOMEM;
1984 	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1985 
1986 	ap->private_data = pp;
1987 	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1988 	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1989 	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1990 
1991 	return 0;
1992 }
1993 
nv_swncq_qc_prep(struct ata_queued_cmd * qc)1994 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1995 {
1996 	if (qc->tf.protocol != ATA_PROT_NCQ) {
1997 		ata_bmdma_qc_prep(qc);
1998 		return AC_ERR_OK;
1999 	}
2000 
2001 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2002 		return AC_ERR_OK;
2003 
2004 	nv_swncq_fill_sg(qc);
2005 
2006 	return AC_ERR_OK;
2007 }
2008 
nv_swncq_fill_sg(struct ata_queued_cmd * qc)2009 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2010 {
2011 	struct ata_port *ap = qc->ap;
2012 	struct scatterlist *sg;
2013 	struct nv_swncq_port_priv *pp = ap->private_data;
2014 	struct ata_bmdma_prd *prd;
2015 	unsigned int si, idx;
2016 
2017 	prd = pp->prd + ATA_MAX_PRD * qc->tag;
2018 
2019 	idx = 0;
2020 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2021 		u32 addr, offset;
2022 		u32 sg_len, len;
2023 
2024 		addr = (u32)sg_dma_address(sg);
2025 		sg_len = sg_dma_len(sg);
2026 
2027 		while (sg_len) {
2028 			offset = addr & 0xffff;
2029 			len = sg_len;
2030 			if ((offset + sg_len) > 0x10000)
2031 				len = 0x10000 - offset;
2032 
2033 			prd[idx].addr = cpu_to_le32(addr);
2034 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2035 
2036 			idx++;
2037 			sg_len -= len;
2038 			addr += len;
2039 		}
2040 	}
2041 
2042 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2043 }
2044 
nv_swncq_issue_atacmd(struct ata_port * ap,struct ata_queued_cmd * qc)2045 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2046 					  struct ata_queued_cmd *qc)
2047 {
2048 	struct nv_swncq_port_priv *pp = ap->private_data;
2049 
2050 	if (qc == NULL)
2051 		return 0;
2052 
2053 	DPRINTK("Enter\n");
2054 
2055 	writel((1 << qc->tag), pp->sactive_block);
2056 	pp->last_issue_tag = qc->tag;
2057 	pp->dhfis_bits &= ~(1 << qc->tag);
2058 	pp->dmafis_bits &= ~(1 << qc->tag);
2059 	pp->qc_active |= (0x1 << qc->tag);
2060 
2061 	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2062 	ap->ops->sff_exec_command(ap, &qc->tf);
2063 
2064 	DPRINTK("Issued tag %u\n", qc->tag);
2065 
2066 	return 0;
2067 }
2068 
nv_swncq_qc_issue(struct ata_queued_cmd * qc)2069 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2070 {
2071 	struct ata_port *ap = qc->ap;
2072 	struct nv_swncq_port_priv *pp = ap->private_data;
2073 
2074 	if (qc->tf.protocol != ATA_PROT_NCQ)
2075 		return ata_bmdma_qc_issue(qc);
2076 
2077 	DPRINTK("Enter\n");
2078 
2079 	if (!pp->qc_active)
2080 		nv_swncq_issue_atacmd(ap, qc);
2081 	else
2082 		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2083 
2084 	return 0;
2085 }
2086 
nv_swncq_hotplug(struct ata_port * ap,u32 fis)2087 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2088 {
2089 	u32 serror;
2090 	struct ata_eh_info *ehi = &ap->link.eh_info;
2091 
2092 	ata_ehi_clear_desc(ehi);
2093 
2094 	/* AHCI needs SError cleared; otherwise, it might lock up */
2095 	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2096 	sata_scr_write(&ap->link, SCR_ERROR, serror);
2097 
2098 	/* analyze @irq_stat */
2099 	if (fis & NV_SWNCQ_IRQ_ADDED)
2100 		ata_ehi_push_desc(ehi, "hot plug");
2101 	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2102 		ata_ehi_push_desc(ehi, "hot unplug");
2103 
2104 	ata_ehi_hotplugged(ehi);
2105 
2106 	/* okay, let's hand over to EH */
2107 	ehi->serror |= serror;
2108 
2109 	ata_port_freeze(ap);
2110 }
2111 
nv_swncq_sdbfis(struct ata_port * ap)2112 static int nv_swncq_sdbfis(struct ata_port *ap)
2113 {
2114 	struct ata_queued_cmd *qc;
2115 	struct nv_swncq_port_priv *pp = ap->private_data;
2116 	struct ata_eh_info *ehi = &ap->link.eh_info;
2117 	u32 sactive;
2118 	u32 done_mask;
2119 	u8 host_stat;
2120 	u8 lack_dhfis = 0;
2121 
2122 	host_stat = ap->ops->bmdma_status(ap);
2123 	if (unlikely(host_stat & ATA_DMA_ERR)) {
2124 		/* error when transferring data to/from memory */
2125 		ata_ehi_clear_desc(ehi);
2126 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2127 		ehi->err_mask |= AC_ERR_HOST_BUS;
2128 		ehi->action |= ATA_EH_RESET;
2129 		return -EINVAL;
2130 	}
2131 
2132 	ap->ops->sff_irq_clear(ap);
2133 	__ata_bmdma_stop(ap);
2134 
2135 	sactive = readl(pp->sactive_block);
2136 	done_mask = pp->qc_active ^ sactive;
2137 
2138 	pp->qc_active &= ~done_mask;
2139 	pp->dhfis_bits &= ~done_mask;
2140 	pp->dmafis_bits &= ~done_mask;
2141 	pp->sdbfis_bits |= done_mask;
2142 	ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2143 
2144 	if (!ap->qc_active) {
2145 		DPRINTK("over\n");
2146 		nv_swncq_pp_reinit(ap);
2147 		return 0;
2148 	}
2149 
2150 	if (pp->qc_active & pp->dhfis_bits)
2151 		return 0;
2152 
2153 	if ((pp->ncq_flags & ncq_saw_backout) ||
2154 	    (pp->qc_active ^ pp->dhfis_bits))
2155 		/* if the controller can't get a device to host register FIS,
2156 		 * The driver needs to reissue the new command.
2157 		 */
2158 		lack_dhfis = 1;
2159 
2160 	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2161 		"SWNCQ:qc_active 0x%X defer_bits %X "
2162 		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2163 		ap->print_id, ap->qc_active, pp->qc_active,
2164 		pp->defer_queue.defer_bits, pp->dhfis_bits,
2165 		pp->dmafis_bits, pp->last_issue_tag);
2166 
2167 	nv_swncq_fis_reinit(ap);
2168 
2169 	if (lack_dhfis) {
2170 		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2171 		nv_swncq_issue_atacmd(ap, qc);
2172 		return 0;
2173 	}
2174 
2175 	if (pp->defer_queue.defer_bits) {
2176 		/* send deferral queue command */
2177 		qc = nv_swncq_qc_from_dq(ap);
2178 		WARN_ON(qc == NULL);
2179 		nv_swncq_issue_atacmd(ap, qc);
2180 	}
2181 
2182 	return 0;
2183 }
2184 
nv_swncq_tag(struct ata_port * ap)2185 static inline u32 nv_swncq_tag(struct ata_port *ap)
2186 {
2187 	struct nv_swncq_port_priv *pp = ap->private_data;
2188 	u32 tag;
2189 
2190 	tag = readb(pp->tag_block) >> 2;
2191 	return (tag & 0x1f);
2192 }
2193 
nv_swncq_dmafis(struct ata_port * ap)2194 static void nv_swncq_dmafis(struct ata_port *ap)
2195 {
2196 	struct ata_queued_cmd *qc;
2197 	unsigned int rw;
2198 	u8 dmactl;
2199 	u32 tag;
2200 	struct nv_swncq_port_priv *pp = ap->private_data;
2201 
2202 	__ata_bmdma_stop(ap);
2203 	tag = nv_swncq_tag(ap);
2204 
2205 	DPRINTK("dma setup tag 0x%x\n", tag);
2206 	qc = ata_qc_from_tag(ap, tag);
2207 
2208 	if (unlikely(!qc))
2209 		return;
2210 
2211 	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2212 
2213 	/* load PRD table addr. */
2214 	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2215 		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2216 
2217 	/* specify data direction, triple-check start bit is clear */
2218 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2219 	dmactl &= ~ATA_DMA_WR;
2220 	if (!rw)
2221 		dmactl |= ATA_DMA_WR;
2222 
2223 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2224 }
2225 
nv_swncq_host_interrupt(struct ata_port * ap,u16 fis)2226 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2227 {
2228 	struct nv_swncq_port_priv *pp = ap->private_data;
2229 	struct ata_queued_cmd *qc;
2230 	struct ata_eh_info *ehi = &ap->link.eh_info;
2231 	u32 serror;
2232 	u8 ata_stat;
2233 
2234 	ata_stat = ap->ops->sff_check_status(ap);
2235 	nv_swncq_irq_clear(ap, fis);
2236 	if (!fis)
2237 		return;
2238 
2239 	if (ap->pflags & ATA_PFLAG_FROZEN)
2240 		return;
2241 
2242 	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2243 		nv_swncq_hotplug(ap, fis);
2244 		return;
2245 	}
2246 
2247 	if (!pp->qc_active)
2248 		return;
2249 
2250 	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2251 		return;
2252 	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2253 
2254 	if (ata_stat & ATA_ERR) {
2255 		ata_ehi_clear_desc(ehi);
2256 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2257 		ehi->err_mask |= AC_ERR_DEV;
2258 		ehi->serror |= serror;
2259 		ehi->action |= ATA_EH_RESET;
2260 		ata_port_freeze(ap);
2261 		return;
2262 	}
2263 
2264 	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2265 		/* If the IRQ is backout, driver must issue
2266 		 * the new command again some time later.
2267 		 */
2268 		pp->ncq_flags |= ncq_saw_backout;
2269 	}
2270 
2271 	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2272 		pp->ncq_flags |= ncq_saw_sdb;
2273 		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2274 			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2275 			ap->print_id, pp->qc_active, pp->dhfis_bits,
2276 			pp->dmafis_bits, readl(pp->sactive_block));
2277 		if (nv_swncq_sdbfis(ap) < 0)
2278 			goto irq_error;
2279 	}
2280 
2281 	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2282 		/* The interrupt indicates the new command
2283 		 * was transmitted correctly to the drive.
2284 		 */
2285 		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2286 		pp->ncq_flags |= ncq_saw_d2h;
2287 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2288 			ata_ehi_push_desc(ehi, "illegal fis transaction");
2289 			ehi->err_mask |= AC_ERR_HSM;
2290 			ehi->action |= ATA_EH_RESET;
2291 			goto irq_error;
2292 		}
2293 
2294 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2295 		    !(pp->ncq_flags & ncq_saw_dmas)) {
2296 			ata_stat = ap->ops->sff_check_status(ap);
2297 			if (ata_stat & ATA_BUSY)
2298 				goto irq_exit;
2299 
2300 			if (pp->defer_queue.defer_bits) {
2301 				DPRINTK("send next command\n");
2302 				qc = nv_swncq_qc_from_dq(ap);
2303 				nv_swncq_issue_atacmd(ap, qc);
2304 			}
2305 		}
2306 	}
2307 
2308 	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2309 		/* program the dma controller with appropriate PRD buffers
2310 		 * and start the DMA transfer for requested command.
2311 		 */
2312 		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2313 		pp->ncq_flags |= ncq_saw_dmas;
2314 		nv_swncq_dmafis(ap);
2315 	}
2316 
2317 irq_exit:
2318 	return;
2319 irq_error:
2320 	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2321 	ata_port_freeze(ap);
2322 	return;
2323 }
2324 
nv_swncq_interrupt(int irq,void * dev_instance)2325 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2326 {
2327 	struct ata_host *host = dev_instance;
2328 	unsigned int i;
2329 	unsigned int handled = 0;
2330 	unsigned long flags;
2331 	u32 irq_stat;
2332 
2333 	spin_lock_irqsave(&host->lock, flags);
2334 
2335 	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2336 
2337 	for (i = 0; i < host->n_ports; i++) {
2338 		struct ata_port *ap = host->ports[i];
2339 
2340 		if (ap->link.sactive) {
2341 			nv_swncq_host_interrupt(ap, (u16)irq_stat);
2342 			handled = 1;
2343 		} else {
2344 			if (irq_stat)	/* reserve Hotplug */
2345 				nv_swncq_irq_clear(ap, 0xfff0);
2346 
2347 			handled += nv_host_intr(ap, (u8)irq_stat);
2348 		}
2349 		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2350 	}
2351 
2352 	spin_unlock_irqrestore(&host->lock, flags);
2353 
2354 	return IRQ_RETVAL(handled);
2355 }
2356 
nv_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)2357 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2358 {
2359 	const struct ata_port_info *ppi[] = { NULL, NULL };
2360 	struct nv_pi_priv *ipriv;
2361 	struct ata_host *host;
2362 	struct nv_host_priv *hpriv;
2363 	int rc;
2364 	u32 bar;
2365 	void __iomem *base;
2366 	unsigned long type = ent->driver_data;
2367 
2368         // Make sure this is a SATA controller by counting the number of bars
2369         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2370         // it's an IDE controller and we ignore it.
2371 	for (bar = 0; bar < 6; bar++)
2372 		if (pci_resource_start(pdev, bar) == 0)
2373 			return -ENODEV;
2374 
2375 	ata_print_version_once(&pdev->dev, DRV_VERSION);
2376 
2377 	rc = pcim_enable_device(pdev);
2378 	if (rc)
2379 		return rc;
2380 
2381 	/* determine type and allocate host */
2382 	if (type == CK804 && adma_enabled) {
2383 		dev_notice(&pdev->dev, "Using ADMA mode\n");
2384 		type = ADMA;
2385 	} else if (type == MCP5x && swncq_enabled) {
2386 		dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2387 		type = SWNCQ;
2388 	}
2389 
2390 	ppi[0] = &nv_port_info[type];
2391 	ipriv = ppi[0]->private_data;
2392 	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2393 	if (rc)
2394 		return rc;
2395 
2396 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2397 	if (!hpriv)
2398 		return -ENOMEM;
2399 	hpriv->type = type;
2400 	host->private_data = hpriv;
2401 
2402 	/* request and iomap NV_MMIO_BAR */
2403 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2404 	if (rc)
2405 		return rc;
2406 
2407 	/* configure SCR access */
2408 	base = host->iomap[NV_MMIO_BAR];
2409 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2410 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2411 
2412 	/* enable SATA space for CK804 */
2413 	if (type >= CK804) {
2414 		u8 regval;
2415 
2416 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2417 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2418 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2419 	}
2420 
2421 	/* init ADMA */
2422 	if (type == ADMA) {
2423 		rc = nv_adma_host_init(host);
2424 		if (rc)
2425 			return rc;
2426 	} else if (type == SWNCQ)
2427 		nv_swncq_host_init(host);
2428 
2429 	if (msi_enabled) {
2430 		dev_notice(&pdev->dev, "Using MSI\n");
2431 		pci_enable_msi(pdev);
2432 	}
2433 
2434 	pci_set_master(pdev);
2435 	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2436 }
2437 
2438 #ifdef CONFIG_PM_SLEEP
nv_pci_device_resume(struct pci_dev * pdev)2439 static int nv_pci_device_resume(struct pci_dev *pdev)
2440 {
2441 	struct ata_host *host = pci_get_drvdata(pdev);
2442 	struct nv_host_priv *hpriv = host->private_data;
2443 	int rc;
2444 
2445 	rc = ata_pci_device_do_resume(pdev);
2446 	if (rc)
2447 		return rc;
2448 
2449 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2450 		if (hpriv->type >= CK804) {
2451 			u8 regval;
2452 
2453 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2454 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2455 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2456 		}
2457 		if (hpriv->type == ADMA) {
2458 			u32 tmp32;
2459 			struct nv_adma_port_priv *pp;
2460 			/* enable/disable ADMA on the ports appropriately */
2461 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2462 
2463 			pp = host->ports[0]->private_data;
2464 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2465 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2466 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2467 			else
2468 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2469 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2470 			pp = host->ports[1]->private_data;
2471 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2472 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2473 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2474 			else
2475 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2476 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2477 
2478 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2479 		}
2480 	}
2481 
2482 	ata_host_resume(host);
2483 
2484 	return 0;
2485 }
2486 #endif
2487 
nv_ck804_host_stop(struct ata_host * host)2488 static void nv_ck804_host_stop(struct ata_host *host)
2489 {
2490 	struct pci_dev *pdev = to_pci_dev(host->dev);
2491 	u8 regval;
2492 
2493 	/* disable SATA space for CK804 */
2494 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2495 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2496 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2497 }
2498 
nv_adma_host_stop(struct ata_host * host)2499 static void nv_adma_host_stop(struct ata_host *host)
2500 {
2501 	struct pci_dev *pdev = to_pci_dev(host->dev);
2502 	u32 tmp32;
2503 
2504 	/* disable ADMA on the ports */
2505 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2506 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2507 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2508 		   NV_MCP_SATA_CFG_20_PORT1_EN |
2509 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2510 
2511 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2512 
2513 	nv_ck804_host_stop(host);
2514 }
2515 
2516 module_pci_driver(nv_pci_driver);
2517 
2518 module_param_named(adma, adma_enabled, bool, 0444);
2519 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2520 module_param_named(swncq, swncq_enabled, bool, 0444);
2521 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2522 module_param_named(msi, msi_enabled, bool, 0444);
2523 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2524