• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/driver-api/libata.rst
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/gfp.h>
42 #include <linux/pci.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50 
51 #define DRV_NAME			"sata_nv"
52 #define DRV_VERSION			"3.5"
53 
54 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
55 
56 enum {
57 	NV_MMIO_BAR			= 5,
58 
59 	NV_PORTS			= 2,
60 	NV_PIO_MASK			= ATA_PIO4,
61 	NV_MWDMA_MASK			= ATA_MWDMA2,
62 	NV_UDMA_MASK			= ATA_UDMA6,
63 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
64 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
65 
66 	/* INT_STATUS/ENABLE */
67 	NV_INT_STATUS			= 0x10,
68 	NV_INT_ENABLE			= 0x11,
69 	NV_INT_STATUS_CK804		= 0x440,
70 	NV_INT_ENABLE_CK804		= 0x441,
71 
72 	/* INT_STATUS/ENABLE bits */
73 	NV_INT_DEV			= 0x01,
74 	NV_INT_PM			= 0x02,
75 	NV_INT_ADDED			= 0x04,
76 	NV_INT_REMOVED			= 0x08,
77 
78 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
79 
80 	NV_INT_ALL			= 0x0f,
81 	NV_INT_MASK			= NV_INT_DEV |
82 					  NV_INT_ADDED | NV_INT_REMOVED,
83 
84 	/* INT_CONFIG */
85 	NV_INT_CONFIG			= 0x12,
86 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
87 
88 	// For PCI config register 20
89 	NV_MCP_SATA_CFG_20		= 0x50,
90 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
92 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
93 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
94 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
95 
96 	NV_ADMA_MAX_CPBS		= 32,
97 	NV_ADMA_CPB_SZ			= 128,
98 	NV_ADMA_APRD_SZ			= 16,
99 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
100 					   NV_ADMA_APRD_SZ,
101 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
102 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105 
106 	/* BAR5 offset to ADMA general registers */
107 	NV_ADMA_GEN			= 0x400,
108 	NV_ADMA_GEN_CTL			= 0x00,
109 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
110 
111 	/* BAR5 offset to ADMA ports */
112 	NV_ADMA_PORT			= 0x480,
113 
114 	/* size of ADMA port register space  */
115 	NV_ADMA_PORT_SIZE		= 0x100,
116 
117 	/* ADMA port registers */
118 	NV_ADMA_CTL			= 0x40,
119 	NV_ADMA_CPB_COUNT		= 0x42,
120 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
121 	NV_ADMA_STAT			= 0x44,
122 	NV_ADMA_CPB_BASE_LOW		= 0x48,
123 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
124 	NV_ADMA_APPEND			= 0x50,
125 	NV_ADMA_NOTIFIER		= 0x68,
126 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
127 
128 	/* NV_ADMA_CTL register bits */
129 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
130 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
131 	NV_ADMA_CTL_GO			= (1 << 7),
132 	NV_ADMA_CTL_AIEN		= (1 << 8),
133 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
134 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
135 
136 	/* CPB response flag bits */
137 	NV_CPB_RESP_DONE		= (1 << 0),
138 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
139 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
140 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
141 
142 	/* CPB control flag bits */
143 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
144 	NV_CPB_CTL_QUEUE		= (1 << 1),
145 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
146 	NV_CPB_CTL_IEN			= (1 << 3),
147 	NV_CPB_CTL_FPDMA		= (1 << 4),
148 
149 	/* APRD flags */
150 	NV_APRD_WRITE			= (1 << 1),
151 	NV_APRD_END			= (1 << 2),
152 	NV_APRD_CONT			= (1 << 3),
153 
154 	/* NV_ADMA_STAT flags */
155 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
156 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
157 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
158 	NV_ADMA_STAT_CPBERR		= (1 << 4),
159 	NV_ADMA_STAT_SERROR		= (1 << 5),
160 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
161 	NV_ADMA_STAT_IDLE		= (1 << 8),
162 	NV_ADMA_STAT_LEGACY		= (1 << 9),
163 	NV_ADMA_STAT_STOPPED		= (1 << 10),
164 	NV_ADMA_STAT_DONE		= (1 << 12),
165 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
166 					  NV_ADMA_STAT_TIMEOUT,
167 
168 	/* port flags */
169 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
170 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
171 
172 	/* MCP55 reg offset */
173 	NV_CTL_MCP55			= 0x400,
174 	NV_INT_STATUS_MCP55		= 0x440,
175 	NV_INT_ENABLE_MCP55		= 0x444,
176 	NV_NCQ_REG_MCP55		= 0x448,
177 
178 	/* MCP55 */
179 	NV_INT_ALL_MCP55		= 0xffff,
180 	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
181 	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
182 
183 	/* SWNCQ ENABLE BITS*/
184 	NV_CTL_PRI_SWNCQ		= 0x02,
185 	NV_CTL_SEC_SWNCQ		= 0x04,
186 
187 	/* SW NCQ status bits*/
188 	NV_SWNCQ_IRQ_DEV		= (1 << 0),
189 	NV_SWNCQ_IRQ_PM			= (1 << 1),
190 	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
191 	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
192 
193 	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
194 	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
195 	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
196 	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
197 
198 	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
199 					  NV_SWNCQ_IRQ_REMOVED,
200 
201 };
202 
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205 	__le64			addr;
206 	__le32			len;
207 	u8			flags;
208 	u8			packet_len;
209 	__le16			reserved;
210 };
211 
212 enum nv_adma_regbits {
213 	CMDEND	= (1 << 15),		/* end of command list */
214 	WNB	= (1 << 14),		/* wait-not-BSY */
215 	IGN	= (1 << 13),		/* ignore this entry */
216 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
217 	DA2	= (1 << (2 + 8)),
218 	DA1	= (1 << (1 + 8)),
219 	DA0	= (1 << (0 + 8)),
220 };
221 
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227 	u8			resp_flags;    /* 0 */
228 	u8			reserved1;     /* 1 */
229 	u8			ctl_flags;     /* 2 */
230 	/* len is length of taskfile in 64 bit words */
231 	u8			len;		/* 3  */
232 	u8			tag;           /* 4 */
233 	u8			next_cpb_idx;  /* 5 */
234 	__le16			reserved2;     /* 6-7 */
235 	__le16			tf[12];        /* 8-31 */
236 	struct nv_adma_prd	aprd[5];       /* 32-111 */
237 	__le64			next_aprd;     /* 112-119 */
238 	__le64			reserved3;     /* 120-127 */
239 };
240 
241 
242 struct nv_adma_port_priv {
243 	struct nv_adma_cpb	*cpb;
244 	dma_addr_t		cpb_dma;
245 	struct nv_adma_prd	*aprd;
246 	dma_addr_t		aprd_dma;
247 	void __iomem		*ctl_block;
248 	void __iomem		*gen_block;
249 	void __iomem		*notifier_clear_block;
250 	u64			adma_dma_mask;
251 	u8			flags;
252 	int			last_issue_ncq;
253 };
254 
255 struct nv_host_priv {
256 	unsigned long		type;
257 };
258 
259 struct defer_queue {
260 	u32		defer_bits;
261 	unsigned int	head;
262 	unsigned int	tail;
263 	unsigned int	tag[ATA_MAX_QUEUE];
264 };
265 
266 enum ncq_saw_flag_list {
267 	ncq_saw_d2h	= (1U << 0),
268 	ncq_saw_dmas	= (1U << 1),
269 	ncq_saw_sdb	= (1U << 2),
270 	ncq_saw_backout	= (1U << 3),
271 };
272 
273 struct nv_swncq_port_priv {
274 	struct ata_bmdma_prd *prd;	 /* our SG list */
275 	dma_addr_t	prd_dma; /* and its DMA mapping */
276 	void __iomem	*sactive_block;
277 	void __iomem	*irq_block;
278 	void __iomem	*tag_block;
279 	u32		qc_active;
280 
281 	unsigned int	last_issue_tag;
282 
283 	/* fifo circular queue to store deferral command */
284 	struct defer_queue defer_queue;
285 
286 	/* for NCQ interrupt analysis */
287 	u32		dhfis_bits;
288 	u32		dmafis_bits;
289 	u32		sdbfis_bits;
290 
291 	unsigned int	ncq_flags;
292 };
293 
294 
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
296 
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM_SLEEP
299 static int nv_pci_device_resume(struct pci_dev *pdev);
300 #endif
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307 
308 static int nv_hardreset(struct ata_link *link, unsigned int *class,
309 			unsigned long deadline);
310 static void nv_nf2_freeze(struct ata_port *ap);
311 static void nv_nf2_thaw(struct ata_port *ap);
312 static void nv_ck804_freeze(struct ata_port *ap);
313 static void nv_ck804_thaw(struct ata_port *ap);
314 static int nv_adma_slave_config(struct scsi_device *sdev);
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319 static void nv_adma_irq_clear(struct ata_port *ap);
320 static int nv_adma_port_start(struct ata_port *ap);
321 static void nv_adma_port_stop(struct ata_port *ap);
322 #ifdef CONFIG_PM
323 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int nv_adma_port_resume(struct ata_port *ap);
325 #endif
326 static void nv_adma_freeze(struct ata_port *ap);
327 static void nv_adma_thaw(struct ata_port *ap);
328 static void nv_adma_error_handler(struct ata_port *ap);
329 static void nv_adma_host_stop(struct ata_host *host);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
331 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
332 
333 static void nv_mcp55_thaw(struct ata_port *ap);
334 static void nv_mcp55_freeze(struct ata_port *ap);
335 static void nv_swncq_error_handler(struct ata_port *ap);
336 static int nv_swncq_slave_config(struct scsi_device *sdev);
337 static int nv_swncq_port_start(struct ata_port *ap);
338 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343 #ifdef CONFIG_PM
344 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345 static int nv_swncq_port_resume(struct ata_port *ap);
346 #endif
347 
348 enum nv_host_type
349 {
350 	GENERIC,
351 	NFORCE2,
352 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
353 	CK804,
354 	ADMA,
355 	MCP5x,
356 	SWNCQ,
357 };
358 
359 static const struct pci_device_id nv_pci_tbl[] = {
360 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
361 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
362 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
363 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
364 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
365 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
366 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
367 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
368 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
369 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
370 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
371 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
372 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
373 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
374 
375 	{ } /* terminate list */
376 };
377 
378 static struct pci_driver nv_pci_driver = {
379 	.name			= DRV_NAME,
380 	.id_table		= nv_pci_tbl,
381 	.probe			= nv_init_one,
382 #ifdef CONFIG_PM_SLEEP
383 	.suspend		= ata_pci_device_suspend,
384 	.resume			= nv_pci_device_resume,
385 #endif
386 	.remove			= ata_pci_remove_one,
387 };
388 
389 static struct scsi_host_template nv_sht = {
390 	ATA_BMDMA_SHT(DRV_NAME),
391 };
392 
393 static struct scsi_host_template nv_adma_sht = {
394 	ATA_NCQ_SHT(DRV_NAME),
395 	.can_queue		= NV_ADMA_MAX_CPBS,
396 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
397 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
398 	.slave_configure	= nv_adma_slave_config,
399 };
400 
401 static struct scsi_host_template nv_swncq_sht = {
402 	ATA_NCQ_SHT(DRV_NAME),
403 	.can_queue		= ATA_MAX_QUEUE - 1,
404 	.sg_tablesize		= LIBATA_MAX_PRD,
405 	.dma_boundary		= ATA_DMA_BOUNDARY,
406 	.slave_configure	= nv_swncq_slave_config,
407 };
408 
409 /*
410  * NV SATA controllers have various different problems with hardreset
411  * protocol depending on the specific controller and device.
412  *
413  * GENERIC:
414  *
415  *  bko11195 reports that link doesn't come online after hardreset on
416  *  generic nv's and there have been several other similar reports on
417  *  linux-ide.
418  *
419  *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
420  *  softreset.
421  *
422  * NF2/3:
423  *
424  *  bko3352 reports nf2/3 controllers can't determine device signature
425  *  reliably after hardreset.  The following thread reports detection
426  *  failure on cold boot with the standard debouncing timing.
427  *
428  *  http://thread.gmane.org/gmane.linux.ide/34098
429  *
430  *  bko12176 reports that hardreset fails to bring up the link during
431  *  boot on nf2.
432  *
433  * CK804:
434  *
435  *  For initial probing after boot and hot plugging, hardreset mostly
436  *  works fine on CK804 but curiously, reprobing on the initial port
437  *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
438  *  FIS in somewhat undeterministic way.
439  *
440  * SWNCQ:
441  *
442  *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
443  *  hardreset should be used and hardreset can't report proper
444  *  signature, which suggests that mcp5x is closer to nf2 as long as
445  *  reset quirkiness is concerned.
446  *
447  *  bko12703 reports that boot probing fails for intel SSD with
448  *  hardreset.  Link fails to come online.  Softreset works fine.
449  *
450  * The failures are varied but the following patterns seem true for
451  * all flavors.
452  *
453  * - Softreset during boot always works.
454  *
455  * - Hardreset during boot sometimes fails to bring up the link on
456  *   certain comibnations and device signature acquisition is
457  *   unreliable.
458  *
459  * - Hardreset is often necessary after hotplug.
460  *
461  * So, preferring softreset for boot probing and error handling (as
462  * hardreset might bring down the link) but using hardreset for
463  * post-boot probing should work around the above issues in most
464  * cases.  Define nv_hardreset() which only kicks in for post-boot
465  * probing and use it for all variants.
466  */
467 static struct ata_port_operations nv_generic_ops = {
468 	.inherits		= &ata_bmdma_port_ops,
469 	.lost_interrupt		= ATA_OP_NULL,
470 	.scr_read		= nv_scr_read,
471 	.scr_write		= nv_scr_write,
472 	.hardreset		= nv_hardreset,
473 };
474 
475 static struct ata_port_operations nv_nf2_ops = {
476 	.inherits		= &nv_generic_ops,
477 	.freeze			= nv_nf2_freeze,
478 	.thaw			= nv_nf2_thaw,
479 };
480 
481 static struct ata_port_operations nv_ck804_ops = {
482 	.inherits		= &nv_generic_ops,
483 	.freeze			= nv_ck804_freeze,
484 	.thaw			= nv_ck804_thaw,
485 	.host_stop		= nv_ck804_host_stop,
486 };
487 
488 static struct ata_port_operations nv_adma_ops = {
489 	.inherits		= &nv_ck804_ops,
490 
491 	.check_atapi_dma	= nv_adma_check_atapi_dma,
492 	.sff_tf_read		= nv_adma_tf_read,
493 	.qc_defer		= ata_std_qc_defer,
494 	.qc_prep		= nv_adma_qc_prep,
495 	.qc_issue		= nv_adma_qc_issue,
496 	.sff_irq_clear		= nv_adma_irq_clear,
497 
498 	.freeze			= nv_adma_freeze,
499 	.thaw			= nv_adma_thaw,
500 	.error_handler		= nv_adma_error_handler,
501 	.post_internal_cmd	= nv_adma_post_internal_cmd,
502 
503 	.port_start		= nv_adma_port_start,
504 	.port_stop		= nv_adma_port_stop,
505 #ifdef CONFIG_PM
506 	.port_suspend		= nv_adma_port_suspend,
507 	.port_resume		= nv_adma_port_resume,
508 #endif
509 	.host_stop		= nv_adma_host_stop,
510 };
511 
512 static struct ata_port_operations nv_swncq_ops = {
513 	.inherits		= &nv_generic_ops,
514 
515 	.qc_defer		= ata_std_qc_defer,
516 	.qc_prep		= nv_swncq_qc_prep,
517 	.qc_issue		= nv_swncq_qc_issue,
518 
519 	.freeze			= nv_mcp55_freeze,
520 	.thaw			= nv_mcp55_thaw,
521 	.error_handler		= nv_swncq_error_handler,
522 
523 #ifdef CONFIG_PM
524 	.port_suspend		= nv_swncq_port_suspend,
525 	.port_resume		= nv_swncq_port_resume,
526 #endif
527 	.port_start		= nv_swncq_port_start,
528 };
529 
530 struct nv_pi_priv {
531 	irq_handler_t			irq_handler;
532 	struct scsi_host_template	*sht;
533 };
534 
535 #define NV_PI_PRIV(_irq_handler, _sht) \
536 	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
537 
538 static const struct ata_port_info nv_port_info[] = {
539 	/* generic */
540 	{
541 		.flags		= ATA_FLAG_SATA,
542 		.pio_mask	= NV_PIO_MASK,
543 		.mwdma_mask	= NV_MWDMA_MASK,
544 		.udma_mask	= NV_UDMA_MASK,
545 		.port_ops	= &nv_generic_ops,
546 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
547 	},
548 	/* nforce2/3 */
549 	{
550 		.flags		= ATA_FLAG_SATA,
551 		.pio_mask	= NV_PIO_MASK,
552 		.mwdma_mask	= NV_MWDMA_MASK,
553 		.udma_mask	= NV_UDMA_MASK,
554 		.port_ops	= &nv_nf2_ops,
555 		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
556 	},
557 	/* ck804 */
558 	{
559 		.flags		= ATA_FLAG_SATA,
560 		.pio_mask	= NV_PIO_MASK,
561 		.mwdma_mask	= NV_MWDMA_MASK,
562 		.udma_mask	= NV_UDMA_MASK,
563 		.port_ops	= &nv_ck804_ops,
564 		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
565 	},
566 	/* ADMA */
567 	{
568 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
569 		.pio_mask	= NV_PIO_MASK,
570 		.mwdma_mask	= NV_MWDMA_MASK,
571 		.udma_mask	= NV_UDMA_MASK,
572 		.port_ops	= &nv_adma_ops,
573 		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
574 	},
575 	/* MCP5x */
576 	{
577 		.flags		= ATA_FLAG_SATA,
578 		.pio_mask	= NV_PIO_MASK,
579 		.mwdma_mask	= NV_MWDMA_MASK,
580 		.udma_mask	= NV_UDMA_MASK,
581 		.port_ops	= &nv_generic_ops,
582 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
583 	},
584 	/* SWNCQ */
585 	{
586 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NCQ,
587 		.pio_mask	= NV_PIO_MASK,
588 		.mwdma_mask	= NV_MWDMA_MASK,
589 		.udma_mask	= NV_UDMA_MASK,
590 		.port_ops	= &nv_swncq_ops,
591 		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
592 	},
593 };
594 
595 MODULE_AUTHOR("NVIDIA");
596 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
597 MODULE_LICENSE("GPL");
598 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
599 MODULE_VERSION(DRV_VERSION);
600 
601 static bool adma_enabled;
602 static bool swncq_enabled = true;
603 static bool msi_enabled;
604 
nv_adma_register_mode(struct ata_port * ap)605 static void nv_adma_register_mode(struct ata_port *ap)
606 {
607 	struct nv_adma_port_priv *pp = ap->private_data;
608 	void __iomem *mmio = pp->ctl_block;
609 	u16 tmp, status;
610 	int count = 0;
611 
612 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
613 		return;
614 
615 	status = readw(mmio + NV_ADMA_STAT);
616 	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
617 		ndelay(50);
618 		status = readw(mmio + NV_ADMA_STAT);
619 		count++;
620 	}
621 	if (count == 20)
622 		ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
623 			      status);
624 
625 	tmp = readw(mmio + NV_ADMA_CTL);
626 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
627 
628 	count = 0;
629 	status = readw(mmio + NV_ADMA_STAT);
630 	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
631 		ndelay(50);
632 		status = readw(mmio + NV_ADMA_STAT);
633 		count++;
634 	}
635 	if (count == 20)
636 		ata_port_warn(ap,
637 			      "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
638 			      status);
639 
640 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
641 }
642 
nv_adma_mode(struct ata_port * ap)643 static void nv_adma_mode(struct ata_port *ap)
644 {
645 	struct nv_adma_port_priv *pp = ap->private_data;
646 	void __iomem *mmio = pp->ctl_block;
647 	u16 tmp, status;
648 	int count = 0;
649 
650 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
651 		return;
652 
653 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
654 
655 	tmp = readw(mmio + NV_ADMA_CTL);
656 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
657 
658 	status = readw(mmio + NV_ADMA_STAT);
659 	while (((status & NV_ADMA_STAT_LEGACY) ||
660 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
661 		ndelay(50);
662 		status = readw(mmio + NV_ADMA_STAT);
663 		count++;
664 	}
665 	if (count == 20)
666 		ata_port_warn(ap,
667 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
668 			status);
669 
670 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
671 }
672 
nv_adma_slave_config(struct scsi_device * sdev)673 static int nv_adma_slave_config(struct scsi_device *sdev)
674 {
675 	struct ata_port *ap = ata_shost_to_port(sdev->host);
676 	struct nv_adma_port_priv *pp = ap->private_data;
677 	struct nv_adma_port_priv *port0, *port1;
678 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
679 	unsigned long segment_boundary, flags;
680 	unsigned short sg_tablesize;
681 	int rc;
682 	int adma_enable;
683 	u32 current_reg, new_reg, config_mask;
684 
685 	rc = ata_scsi_slave_config(sdev);
686 
687 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
688 		/* Not a proper libata device, ignore */
689 		return rc;
690 
691 	spin_lock_irqsave(ap->lock, flags);
692 
693 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
694 		/*
695 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
696 		 * Therefore ATAPI commands are sent through the legacy interface.
697 		 * However, the legacy interface only supports 32-bit DMA.
698 		 * Restrict DMA parameters as required by the legacy interface
699 		 * when an ATAPI device is connected.
700 		 */
701 		segment_boundary = ATA_DMA_BOUNDARY;
702 		/* Subtract 1 since an extra entry may be needed for padding, see
703 		   libata-scsi.c */
704 		sg_tablesize = LIBATA_MAX_PRD - 1;
705 
706 		/* Since the legacy DMA engine is in use, we need to disable ADMA
707 		   on the port. */
708 		adma_enable = 0;
709 		nv_adma_register_mode(ap);
710 	} else {
711 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
712 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
713 		adma_enable = 1;
714 	}
715 
716 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
717 
718 	if (ap->port_no == 1)
719 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
720 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
721 	else
722 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
723 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
724 
725 	if (adma_enable) {
726 		new_reg = current_reg | config_mask;
727 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
728 	} else {
729 		new_reg = current_reg & ~config_mask;
730 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
731 	}
732 
733 	if (current_reg != new_reg)
734 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
735 
736 	port0 = ap->host->ports[0]->private_data;
737 	port1 = ap->host->ports[1]->private_data;
738 	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
739 	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
740 		/*
741 		 * We have to set the DMA mask to 32-bit if either port is in
742 		 * ATAPI mode, since they are on the same PCI device which is
743 		 * used for DMA mapping.  If either SCSI device is not allocated
744 		 * yet, it's OK since that port will discover its correct
745 		 * setting when it does get allocated.
746 		 */
747 		rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
748 	} else {
749 		rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
750 	}
751 
752 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
753 	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
754 	ata_port_info(ap,
755 		      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
756 		      (unsigned long long)*ap->host->dev->dma_mask,
757 		      segment_boundary, sg_tablesize);
758 
759 	spin_unlock_irqrestore(ap->lock, flags);
760 
761 	return rc;
762 }
763 
nv_adma_check_atapi_dma(struct ata_queued_cmd * qc)764 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
765 {
766 	struct nv_adma_port_priv *pp = qc->ap->private_data;
767 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
768 }
769 
nv_adma_tf_read(struct ata_port * ap,struct ata_taskfile * tf)770 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
771 {
772 	/* Other than when internal or pass-through commands are executed,
773 	   the only time this function will be called in ADMA mode will be
774 	   if a command fails. In the failure case we don't care about going
775 	   into register mode with ADMA commands pending, as the commands will
776 	   all shortly be aborted anyway. We assume that NCQ commands are not
777 	   issued via passthrough, which is the only way that switching into
778 	   ADMA mode could abort outstanding commands. */
779 	nv_adma_register_mode(ap);
780 
781 	ata_sff_tf_read(ap, tf);
782 }
783 
nv_adma_tf_to_cpb(struct ata_taskfile * tf,__le16 * cpb)784 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
785 {
786 	unsigned int idx = 0;
787 
788 	if (tf->flags & ATA_TFLAG_ISADDR) {
789 		if (tf->flags & ATA_TFLAG_LBA48) {
790 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
791 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
792 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
793 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
794 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
795 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
796 		} else
797 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
798 
799 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
800 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
801 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
802 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
803 	}
804 
805 	if (tf->flags & ATA_TFLAG_DEVICE)
806 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
807 
808 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
809 
810 	while (idx < 12)
811 		cpb[idx++] = cpu_to_le16(IGN);
812 
813 	return idx;
814 }
815 
nv_adma_check_cpb(struct ata_port * ap,int cpb_num,int force_err)816 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
817 {
818 	struct nv_adma_port_priv *pp = ap->private_data;
819 	u8 flags = pp->cpb[cpb_num].resp_flags;
820 
821 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
822 
823 	if (unlikely((force_err ||
824 		     flags & (NV_CPB_RESP_ATA_ERR |
825 			      NV_CPB_RESP_CMD_ERR |
826 			      NV_CPB_RESP_CPB_ERR)))) {
827 		struct ata_eh_info *ehi = &ap->link.eh_info;
828 		int freeze = 0;
829 
830 		ata_ehi_clear_desc(ehi);
831 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
832 		if (flags & NV_CPB_RESP_ATA_ERR) {
833 			ata_ehi_push_desc(ehi, "ATA error");
834 			ehi->err_mask |= AC_ERR_DEV;
835 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
836 			ata_ehi_push_desc(ehi, "CMD error");
837 			ehi->err_mask |= AC_ERR_DEV;
838 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
839 			ata_ehi_push_desc(ehi, "CPB error");
840 			ehi->err_mask |= AC_ERR_SYSTEM;
841 			freeze = 1;
842 		} else {
843 			/* notifier error, but no error in CPB flags? */
844 			ata_ehi_push_desc(ehi, "unknown");
845 			ehi->err_mask |= AC_ERR_OTHER;
846 			freeze = 1;
847 		}
848 		/* Kill all commands. EH will determine what actually failed. */
849 		if (freeze)
850 			ata_port_freeze(ap);
851 		else
852 			ata_port_abort(ap);
853 		return -1;
854 	}
855 
856 	if (likely(flags & NV_CPB_RESP_DONE))
857 		return 1;
858 	return 0;
859 }
860 
nv_host_intr(struct ata_port * ap,u8 irq_stat)861 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
862 {
863 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
864 
865 	/* freeze if hotplugged */
866 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
867 		ata_port_freeze(ap);
868 		return 1;
869 	}
870 
871 	/* bail out if not our interrupt */
872 	if (!(irq_stat & NV_INT_DEV))
873 		return 0;
874 
875 	/* DEV interrupt w/ no active qc? */
876 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
877 		ata_sff_check_status(ap);
878 		return 1;
879 	}
880 
881 	/* handle interrupt */
882 	return ata_bmdma_port_intr(ap, qc);
883 }
884 
nv_adma_interrupt(int irq,void * dev_instance)885 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
886 {
887 	struct ata_host *host = dev_instance;
888 	int i, handled = 0;
889 	u32 notifier_clears[2];
890 
891 	spin_lock(&host->lock);
892 
893 	for (i = 0; i < host->n_ports; i++) {
894 		struct ata_port *ap = host->ports[i];
895 		struct nv_adma_port_priv *pp = ap->private_data;
896 		void __iomem *mmio = pp->ctl_block;
897 		u16 status;
898 		u32 gen_ctl;
899 		u32 notifier, notifier_error;
900 
901 		notifier_clears[i] = 0;
902 
903 		/* if ADMA is disabled, use standard ata interrupt handler */
904 		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
905 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
906 				>> (NV_INT_PORT_SHIFT * i);
907 			handled += nv_host_intr(ap, irq_stat);
908 			continue;
909 		}
910 
911 		/* if in ATA register mode, check for standard interrupts */
912 		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
913 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
914 				>> (NV_INT_PORT_SHIFT * i);
915 			if (ata_tag_valid(ap->link.active_tag))
916 				/** NV_INT_DEV indication seems unreliable
917 				    at times at least in ADMA mode. Force it
918 				    on always when a command is active, to
919 				    prevent losing interrupts. */
920 				irq_stat |= NV_INT_DEV;
921 			handled += nv_host_intr(ap, irq_stat);
922 		}
923 
924 		notifier = readl(mmio + NV_ADMA_NOTIFIER);
925 		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
926 		notifier_clears[i] = notifier | notifier_error;
927 
928 		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
929 
930 		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
931 		    !notifier_error)
932 			/* Nothing to do */
933 			continue;
934 
935 		status = readw(mmio + NV_ADMA_STAT);
936 
937 		/*
938 		 * Clear status. Ensure the controller sees the
939 		 * clearing before we start looking at any of the CPB
940 		 * statuses, so that any CPB completions after this
941 		 * point in the handler will raise another interrupt.
942 		 */
943 		writew(status, mmio + NV_ADMA_STAT);
944 		readw(mmio + NV_ADMA_STAT); /* flush posted write */
945 		rmb();
946 
947 		handled++; /* irq handled if we got here */
948 
949 		/* freeze if hotplugged or controller error */
950 		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
951 				       NV_ADMA_STAT_HOTUNPLUG |
952 				       NV_ADMA_STAT_TIMEOUT |
953 				       NV_ADMA_STAT_SERROR))) {
954 			struct ata_eh_info *ehi = &ap->link.eh_info;
955 
956 			ata_ehi_clear_desc(ehi);
957 			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
958 			if (status & NV_ADMA_STAT_TIMEOUT) {
959 				ehi->err_mask |= AC_ERR_SYSTEM;
960 				ata_ehi_push_desc(ehi, "timeout");
961 			} else if (status & NV_ADMA_STAT_HOTPLUG) {
962 				ata_ehi_hotplugged(ehi);
963 				ata_ehi_push_desc(ehi, "hotplug");
964 			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
965 				ata_ehi_hotplugged(ehi);
966 				ata_ehi_push_desc(ehi, "hot unplug");
967 			} else if (status & NV_ADMA_STAT_SERROR) {
968 				/* let EH analyze SError and figure out cause */
969 				ata_ehi_push_desc(ehi, "SError");
970 			} else
971 				ata_ehi_push_desc(ehi, "unknown");
972 			ata_port_freeze(ap);
973 			continue;
974 		}
975 
976 		if (status & (NV_ADMA_STAT_DONE |
977 			      NV_ADMA_STAT_CPBERR |
978 			      NV_ADMA_STAT_CMD_COMPLETE)) {
979 			u32 check_commands = notifier_clears[i];
980 			u32 done_mask = 0;
981 			int pos, rc;
982 
983 			if (status & NV_ADMA_STAT_CPBERR) {
984 				/* check all active commands */
985 				if (ata_tag_valid(ap->link.active_tag))
986 					check_commands = 1 <<
987 						ap->link.active_tag;
988 				else
989 					check_commands = ap->link.sactive;
990 			}
991 
992 			/* check CPBs for completed commands */
993 			while ((pos = ffs(check_commands))) {
994 				pos--;
995 				rc = nv_adma_check_cpb(ap, pos,
996 						notifier_error & (1 << pos));
997 				if (rc > 0)
998 					done_mask |= 1 << pos;
999 				else if (unlikely(rc < 0))
1000 					check_commands = 0;
1001 				check_commands &= ~(1 << pos);
1002 			}
1003 			ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
1004 		}
1005 	}
1006 
1007 	if (notifier_clears[0] || notifier_clears[1]) {
1008 		/* Note: Both notifier clear registers must be written
1009 		   if either is set, even if one is zero, according to NVIDIA. */
1010 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1011 		writel(notifier_clears[0], pp->notifier_clear_block);
1012 		pp = host->ports[1]->private_data;
1013 		writel(notifier_clears[1], pp->notifier_clear_block);
1014 	}
1015 
1016 	spin_unlock(&host->lock);
1017 
1018 	return IRQ_RETVAL(handled);
1019 }
1020 
nv_adma_freeze(struct ata_port * ap)1021 static void nv_adma_freeze(struct ata_port *ap)
1022 {
1023 	struct nv_adma_port_priv *pp = ap->private_data;
1024 	void __iomem *mmio = pp->ctl_block;
1025 	u16 tmp;
1026 
1027 	nv_ck804_freeze(ap);
1028 
1029 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1030 		return;
1031 
1032 	/* clear any outstanding CK804 notifications */
1033 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1034 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1035 
1036 	/* Disable interrupt */
1037 	tmp = readw(mmio + NV_ADMA_CTL);
1038 	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1039 		mmio + NV_ADMA_CTL);
1040 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1041 }
1042 
nv_adma_thaw(struct ata_port * ap)1043 static void nv_adma_thaw(struct ata_port *ap)
1044 {
1045 	struct nv_adma_port_priv *pp = ap->private_data;
1046 	void __iomem *mmio = pp->ctl_block;
1047 	u16 tmp;
1048 
1049 	nv_ck804_thaw(ap);
1050 
1051 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1052 		return;
1053 
1054 	/* Enable interrupt */
1055 	tmp = readw(mmio + NV_ADMA_CTL);
1056 	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1057 		mmio + NV_ADMA_CTL);
1058 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1059 }
1060 
nv_adma_irq_clear(struct ata_port * ap)1061 static void nv_adma_irq_clear(struct ata_port *ap)
1062 {
1063 	struct nv_adma_port_priv *pp = ap->private_data;
1064 	void __iomem *mmio = pp->ctl_block;
1065 	u32 notifier_clears[2];
1066 
1067 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1068 		ata_bmdma_irq_clear(ap);
1069 		return;
1070 	}
1071 
1072 	/* clear any outstanding CK804 notifications */
1073 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1074 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1075 
1076 	/* clear ADMA status */
1077 	writew(0xffff, mmio + NV_ADMA_STAT);
1078 
1079 	/* clear notifiers - note both ports need to be written with
1080 	   something even though we are only clearing on one */
1081 	if (ap->port_no == 0) {
1082 		notifier_clears[0] = 0xFFFFFFFF;
1083 		notifier_clears[1] = 0;
1084 	} else {
1085 		notifier_clears[0] = 0;
1086 		notifier_clears[1] = 0xFFFFFFFF;
1087 	}
1088 	pp = ap->host->ports[0]->private_data;
1089 	writel(notifier_clears[0], pp->notifier_clear_block);
1090 	pp = ap->host->ports[1]->private_data;
1091 	writel(notifier_clears[1], pp->notifier_clear_block);
1092 }
1093 
nv_adma_post_internal_cmd(struct ata_queued_cmd * qc)1094 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1095 {
1096 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1097 
1098 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1099 		ata_bmdma_post_internal_cmd(qc);
1100 }
1101 
nv_adma_port_start(struct ata_port * ap)1102 static int nv_adma_port_start(struct ata_port *ap)
1103 {
1104 	struct device *dev = ap->host->dev;
1105 	struct nv_adma_port_priv *pp;
1106 	int rc;
1107 	void *mem;
1108 	dma_addr_t mem_dma;
1109 	void __iomem *mmio;
1110 	struct pci_dev *pdev = to_pci_dev(dev);
1111 	u16 tmp;
1112 
1113 	VPRINTK("ENTER\n");
1114 
1115 	/*
1116 	 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1117 	 * pad buffers.
1118 	 */
1119 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1120 	if (rc)
1121 		return rc;
1122 
1123 	/* we might fallback to bmdma, allocate bmdma resources */
1124 	rc = ata_bmdma_port_start(ap);
1125 	if (rc)
1126 		return rc;
1127 
1128 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1129 	if (!pp)
1130 		return -ENOMEM;
1131 
1132 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1133 	       ap->port_no * NV_ADMA_PORT_SIZE;
1134 	pp->ctl_block = mmio;
1135 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1136 	pp->notifier_clear_block = pp->gen_block +
1137 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1138 
1139 	/*
1140 	 * Now that the legacy PRD and padding buffer are allocated we can
1141 	 * try to raise the DMA mask to allocate the CPB/APRD table.
1142 	 */
1143 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1144 	if (rc) {
1145 		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1146 		if (rc)
1147 			return rc;
1148 	}
1149 	pp->adma_dma_mask = *dev->dma_mask;
1150 
1151 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1152 				  &mem_dma, GFP_KERNEL);
1153 	if (!mem)
1154 		return -ENOMEM;
1155 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1156 
1157 	/*
1158 	 * First item in chunk of DMA memory:
1159 	 * 128-byte command parameter block (CPB)
1160 	 * one for each command tag
1161 	 */
1162 	pp->cpb     = mem;
1163 	pp->cpb_dma = mem_dma;
1164 
1165 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1166 	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1167 
1168 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1169 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1170 
1171 	/*
1172 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1173 	 */
1174 	pp->aprd = mem;
1175 	pp->aprd_dma = mem_dma;
1176 
1177 	ap->private_data = pp;
1178 
1179 	/* clear any outstanding interrupt conditions */
1180 	writew(0xffff, mmio + NV_ADMA_STAT);
1181 
1182 	/* initialize port variables */
1183 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1184 
1185 	/* clear CPB fetch count */
1186 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1187 
1188 	/* clear GO for register mode, enable interrupt */
1189 	tmp = readw(mmio + NV_ADMA_CTL);
1190 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1191 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1192 
1193 	tmp = readw(mmio + NV_ADMA_CTL);
1194 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1195 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1196 	udelay(1);
1197 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1198 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1199 
1200 	return 0;
1201 }
1202 
nv_adma_port_stop(struct ata_port * ap)1203 static void nv_adma_port_stop(struct ata_port *ap)
1204 {
1205 	struct nv_adma_port_priv *pp = ap->private_data;
1206 	void __iomem *mmio = pp->ctl_block;
1207 
1208 	VPRINTK("ENTER\n");
1209 	writew(0, mmio + NV_ADMA_CTL);
1210 }
1211 
1212 #ifdef CONFIG_PM
nv_adma_port_suspend(struct ata_port * ap,pm_message_t mesg)1213 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1214 {
1215 	struct nv_adma_port_priv *pp = ap->private_data;
1216 	void __iomem *mmio = pp->ctl_block;
1217 
1218 	/* Go to register mode - clears GO */
1219 	nv_adma_register_mode(ap);
1220 
1221 	/* clear CPB fetch count */
1222 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1223 
1224 	/* disable interrupt, shut down port */
1225 	writew(0, mmio + NV_ADMA_CTL);
1226 
1227 	return 0;
1228 }
1229 
nv_adma_port_resume(struct ata_port * ap)1230 static int nv_adma_port_resume(struct ata_port *ap)
1231 {
1232 	struct nv_adma_port_priv *pp = ap->private_data;
1233 	void __iomem *mmio = pp->ctl_block;
1234 	u16 tmp;
1235 
1236 	/* set CPB block location */
1237 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1238 	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1239 
1240 	/* clear any outstanding interrupt conditions */
1241 	writew(0xffff, mmio + NV_ADMA_STAT);
1242 
1243 	/* initialize port variables */
1244 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1245 
1246 	/* clear CPB fetch count */
1247 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1248 
1249 	/* clear GO for register mode, enable interrupt */
1250 	tmp = readw(mmio + NV_ADMA_CTL);
1251 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1252 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1253 
1254 	tmp = readw(mmio + NV_ADMA_CTL);
1255 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1256 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1257 	udelay(1);
1258 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1259 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1260 
1261 	return 0;
1262 }
1263 #endif
1264 
nv_adma_setup_port(struct ata_port * ap)1265 static void nv_adma_setup_port(struct ata_port *ap)
1266 {
1267 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1268 	struct ata_ioports *ioport = &ap->ioaddr;
1269 
1270 	VPRINTK("ENTER\n");
1271 
1272 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1273 
1274 	ioport->cmd_addr	= mmio;
1275 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1276 	ioport->error_addr	=
1277 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1278 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1279 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1280 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1281 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1282 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1283 	ioport->status_addr	=
1284 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1285 	ioport->altstatus_addr	=
1286 	ioport->ctl_addr	= mmio + 0x20;
1287 }
1288 
nv_adma_host_init(struct ata_host * host)1289 static int nv_adma_host_init(struct ata_host *host)
1290 {
1291 	struct pci_dev *pdev = to_pci_dev(host->dev);
1292 	unsigned int i;
1293 	u32 tmp32;
1294 
1295 	VPRINTK("ENTER\n");
1296 
1297 	/* enable ADMA on the ports */
1298 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1299 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1300 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1301 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1302 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1303 
1304 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1305 
1306 	for (i = 0; i < host->n_ports; i++)
1307 		nv_adma_setup_port(host->ports[i]);
1308 
1309 	return 0;
1310 }
1311 
nv_adma_fill_aprd(struct ata_queued_cmd * qc,struct scatterlist * sg,int idx,struct nv_adma_prd * aprd)1312 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1313 			      struct scatterlist *sg,
1314 			      int idx,
1315 			      struct nv_adma_prd *aprd)
1316 {
1317 	u8 flags = 0;
1318 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1319 		flags |= NV_APRD_WRITE;
1320 	if (idx == qc->n_elem - 1)
1321 		flags |= NV_APRD_END;
1322 	else if (idx != 4)
1323 		flags |= NV_APRD_CONT;
1324 
1325 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1326 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1327 	aprd->flags = flags;
1328 	aprd->packet_len = 0;
1329 }
1330 
nv_adma_fill_sg(struct ata_queued_cmd * qc,struct nv_adma_cpb * cpb)1331 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1332 {
1333 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1334 	struct nv_adma_prd *aprd;
1335 	struct scatterlist *sg;
1336 	unsigned int si;
1337 
1338 	VPRINTK("ENTER\n");
1339 
1340 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1341 		aprd = (si < 5) ? &cpb->aprd[si] :
1342 			&pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1343 		nv_adma_fill_aprd(qc, sg, si, aprd);
1344 	}
1345 	if (si > 5)
1346 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1347 	else
1348 		cpb->next_aprd = cpu_to_le64(0);
1349 }
1350 
nv_adma_use_reg_mode(struct ata_queued_cmd * qc)1351 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1352 {
1353 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1354 
1355 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1356 	   or interrupt-driven no-data commands. */
1357 	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1358 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1359 		return 1;
1360 
1361 	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1362 	   (qc->tf.protocol == ATA_PROT_NODATA))
1363 		return 0;
1364 
1365 	return 1;
1366 }
1367 
nv_adma_qc_prep(struct ata_queued_cmd * qc)1368 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1369 {
1370 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1371 	struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1372 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1373 		       NV_CPB_CTL_IEN;
1374 
1375 	if (nv_adma_use_reg_mode(qc)) {
1376 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1377 			(qc->flags & ATA_QCFLAG_DMAMAP));
1378 		nv_adma_register_mode(qc->ap);
1379 		ata_bmdma_qc_prep(qc);
1380 		return AC_ERR_OK;
1381 	}
1382 
1383 	cpb->resp_flags = NV_CPB_RESP_DONE;
1384 	wmb();
1385 	cpb->ctl_flags = 0;
1386 	wmb();
1387 
1388 	cpb->len		= 3;
1389 	cpb->tag		= qc->hw_tag;
1390 	cpb->next_cpb_idx	= 0;
1391 
1392 	/* turn on NCQ flags for NCQ commands */
1393 	if (qc->tf.protocol == ATA_PROT_NCQ)
1394 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1395 
1396 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1397 
1398 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1399 
1400 	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1401 		nv_adma_fill_sg(qc, cpb);
1402 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1403 	} else
1404 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1405 
1406 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1407 	   until we are finished filling in all of the contents */
1408 	wmb();
1409 	cpb->ctl_flags = ctl_flags;
1410 	wmb();
1411 	cpb->resp_flags = 0;
1412 
1413 	return AC_ERR_OK;
1414 }
1415 
nv_adma_qc_issue(struct ata_queued_cmd * qc)1416 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1417 {
1418 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1419 	void __iomem *mmio = pp->ctl_block;
1420 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1421 
1422 	VPRINTK("ENTER\n");
1423 
1424 	/* We can't handle result taskfile with NCQ commands, since
1425 	   retrieving the taskfile switches us out of ADMA mode and would abort
1426 	   existing commands. */
1427 	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1428 		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1429 		ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1430 		return AC_ERR_SYSTEM;
1431 	}
1432 
1433 	if (nv_adma_use_reg_mode(qc)) {
1434 		/* use ATA register mode */
1435 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1436 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1437 			(qc->flags & ATA_QCFLAG_DMAMAP));
1438 		nv_adma_register_mode(qc->ap);
1439 		return ata_bmdma_qc_issue(qc);
1440 	} else
1441 		nv_adma_mode(qc->ap);
1442 
1443 	/* write append register, command tag in lower 8 bits
1444 	   and (number of cpbs to append -1) in top 8 bits */
1445 	wmb();
1446 
1447 	if (curr_ncq != pp->last_issue_ncq) {
1448 		/* Seems to need some delay before switching between NCQ and
1449 		   non-NCQ commands, else we get command timeouts and such. */
1450 		udelay(20);
1451 		pp->last_issue_ncq = curr_ncq;
1452 	}
1453 
1454 	writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1455 
1456 	DPRINTK("Issued tag %u\n", qc->hw_tag);
1457 
1458 	return 0;
1459 }
1460 
nv_generic_interrupt(int irq,void * dev_instance)1461 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1462 {
1463 	struct ata_host *host = dev_instance;
1464 	unsigned int i;
1465 	unsigned int handled = 0;
1466 	unsigned long flags;
1467 
1468 	spin_lock_irqsave(&host->lock, flags);
1469 
1470 	for (i = 0; i < host->n_ports; i++) {
1471 		struct ata_port *ap = host->ports[i];
1472 		struct ata_queued_cmd *qc;
1473 
1474 		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1475 		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1476 			handled += ata_bmdma_port_intr(ap, qc);
1477 		} else {
1478 			/*
1479 			 * No request pending?  Clear interrupt status
1480 			 * anyway, in case there's one pending.
1481 			 */
1482 			ap->ops->sff_check_status(ap);
1483 		}
1484 	}
1485 
1486 	spin_unlock_irqrestore(&host->lock, flags);
1487 
1488 	return IRQ_RETVAL(handled);
1489 }
1490 
nv_do_interrupt(struct ata_host * host,u8 irq_stat)1491 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1492 {
1493 	int i, handled = 0;
1494 
1495 	for (i = 0; i < host->n_ports; i++) {
1496 		handled += nv_host_intr(host->ports[i], irq_stat);
1497 		irq_stat >>= NV_INT_PORT_SHIFT;
1498 	}
1499 
1500 	return IRQ_RETVAL(handled);
1501 }
1502 
nv_nf2_interrupt(int irq,void * dev_instance)1503 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1504 {
1505 	struct ata_host *host = dev_instance;
1506 	u8 irq_stat;
1507 	irqreturn_t ret;
1508 
1509 	spin_lock(&host->lock);
1510 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1511 	ret = nv_do_interrupt(host, irq_stat);
1512 	spin_unlock(&host->lock);
1513 
1514 	return ret;
1515 }
1516 
nv_ck804_interrupt(int irq,void * dev_instance)1517 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1518 {
1519 	struct ata_host *host = dev_instance;
1520 	u8 irq_stat;
1521 	irqreturn_t ret;
1522 
1523 	spin_lock(&host->lock);
1524 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1525 	ret = nv_do_interrupt(host, irq_stat);
1526 	spin_unlock(&host->lock);
1527 
1528 	return ret;
1529 }
1530 
nv_scr_read(struct ata_link * link,unsigned int sc_reg,u32 * val)1531 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1532 {
1533 	if (sc_reg > SCR_CONTROL)
1534 		return -EINVAL;
1535 
1536 	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1537 	return 0;
1538 }
1539 
nv_scr_write(struct ata_link * link,unsigned int sc_reg,u32 val)1540 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1541 {
1542 	if (sc_reg > SCR_CONTROL)
1543 		return -EINVAL;
1544 
1545 	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1546 	return 0;
1547 }
1548 
nv_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)1549 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1550 			unsigned long deadline)
1551 {
1552 	struct ata_eh_context *ehc = &link->eh_context;
1553 
1554 	/* Do hardreset iff it's post-boot probing, please read the
1555 	 * comment above port ops for details.
1556 	 */
1557 	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1558 	    !ata_dev_enabled(link->device))
1559 		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1560 				    NULL, NULL);
1561 	else {
1562 		const unsigned long *timing = sata_ehc_deb_timing(ehc);
1563 		int rc;
1564 
1565 		if (!(ehc->i.flags & ATA_EHI_QUIET))
1566 			ata_link_info(link,
1567 				      "nv: skipping hardreset on occupied port\n");
1568 
1569 		/* make sure the link is online */
1570 		rc = sata_link_resume(link, timing, deadline);
1571 		/* whine about phy resume failure but proceed */
1572 		if (rc && rc != -EOPNOTSUPP)
1573 			ata_link_warn(link, "failed to resume link (errno=%d)\n",
1574 				      rc);
1575 	}
1576 
1577 	/* device signature acquisition is unreliable */
1578 	return -EAGAIN;
1579 }
1580 
nv_nf2_freeze(struct ata_port * ap)1581 static void nv_nf2_freeze(struct ata_port *ap)
1582 {
1583 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1584 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1585 	u8 mask;
1586 
1587 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1588 	mask &= ~(NV_INT_ALL << shift);
1589 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1590 }
1591 
nv_nf2_thaw(struct ata_port * ap)1592 static void nv_nf2_thaw(struct ata_port *ap)
1593 {
1594 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1595 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1596 	u8 mask;
1597 
1598 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1599 
1600 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1601 	mask |= (NV_INT_MASK << shift);
1602 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1603 }
1604 
nv_ck804_freeze(struct ata_port * ap)1605 static void nv_ck804_freeze(struct ata_port *ap)
1606 {
1607 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1608 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1609 	u8 mask;
1610 
1611 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1612 	mask &= ~(NV_INT_ALL << shift);
1613 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1614 }
1615 
nv_ck804_thaw(struct ata_port * ap)1616 static void nv_ck804_thaw(struct ata_port *ap)
1617 {
1618 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1619 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1620 	u8 mask;
1621 
1622 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1623 
1624 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1625 	mask |= (NV_INT_MASK << shift);
1626 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1627 }
1628 
nv_mcp55_freeze(struct ata_port * ap)1629 static void nv_mcp55_freeze(struct ata_port *ap)
1630 {
1631 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1632 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1633 	u32 mask;
1634 
1635 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1636 
1637 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1638 	mask &= ~(NV_INT_ALL_MCP55 << shift);
1639 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1640 }
1641 
nv_mcp55_thaw(struct ata_port * ap)1642 static void nv_mcp55_thaw(struct ata_port *ap)
1643 {
1644 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1645 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1646 	u32 mask;
1647 
1648 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1649 
1650 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1651 	mask |= (NV_INT_MASK_MCP55 << shift);
1652 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1653 }
1654 
nv_adma_error_handler(struct ata_port * ap)1655 static void nv_adma_error_handler(struct ata_port *ap)
1656 {
1657 	struct nv_adma_port_priv *pp = ap->private_data;
1658 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1659 		void __iomem *mmio = pp->ctl_block;
1660 		int i;
1661 		u16 tmp;
1662 
1663 		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1664 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1665 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1666 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1667 			u32 status = readw(mmio + NV_ADMA_STAT);
1668 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1669 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1670 
1671 			ata_port_err(ap,
1672 				"EH in ADMA mode, notifier 0x%X "
1673 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1674 				"next cpb count 0x%X next cpb idx 0x%x\n",
1675 				notifier, notifier_error, gen_ctl, status,
1676 				cpb_count, next_cpb_idx);
1677 
1678 			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1679 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1680 				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1681 				    ap->link.sactive & (1 << i))
1682 					ata_port_err(ap,
1683 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1684 						i, cpb->ctl_flags, cpb->resp_flags);
1685 			}
1686 		}
1687 
1688 		/* Push us back into port register mode for error handling. */
1689 		nv_adma_register_mode(ap);
1690 
1691 		/* Mark all of the CPBs as invalid to prevent them from
1692 		   being executed */
1693 		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1694 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1695 
1696 		/* clear CPB fetch count */
1697 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1698 
1699 		/* Reset channel */
1700 		tmp = readw(mmio + NV_ADMA_CTL);
1701 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1702 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1703 		udelay(1);
1704 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1705 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1706 	}
1707 
1708 	ata_bmdma_error_handler(ap);
1709 }
1710 
nv_swncq_qc_to_dq(struct ata_port * ap,struct ata_queued_cmd * qc)1711 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1712 {
1713 	struct nv_swncq_port_priv *pp = ap->private_data;
1714 	struct defer_queue *dq = &pp->defer_queue;
1715 
1716 	/* queue is full */
1717 	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1718 	dq->defer_bits |= (1 << qc->hw_tag);
1719 	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1720 }
1721 
nv_swncq_qc_from_dq(struct ata_port * ap)1722 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1723 {
1724 	struct nv_swncq_port_priv *pp = ap->private_data;
1725 	struct defer_queue *dq = &pp->defer_queue;
1726 	unsigned int tag;
1727 
1728 	if (dq->head == dq->tail)	/* null queue */
1729 		return NULL;
1730 
1731 	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1732 	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1733 	WARN_ON(!(dq->defer_bits & (1 << tag)));
1734 	dq->defer_bits &= ~(1 << tag);
1735 
1736 	return ata_qc_from_tag(ap, tag);
1737 }
1738 
nv_swncq_fis_reinit(struct ata_port * ap)1739 static void nv_swncq_fis_reinit(struct ata_port *ap)
1740 {
1741 	struct nv_swncq_port_priv *pp = ap->private_data;
1742 
1743 	pp->dhfis_bits = 0;
1744 	pp->dmafis_bits = 0;
1745 	pp->sdbfis_bits = 0;
1746 	pp->ncq_flags = 0;
1747 }
1748 
nv_swncq_pp_reinit(struct ata_port * ap)1749 static void nv_swncq_pp_reinit(struct ata_port *ap)
1750 {
1751 	struct nv_swncq_port_priv *pp = ap->private_data;
1752 	struct defer_queue *dq = &pp->defer_queue;
1753 
1754 	dq->head = 0;
1755 	dq->tail = 0;
1756 	dq->defer_bits = 0;
1757 	pp->qc_active = 0;
1758 	pp->last_issue_tag = ATA_TAG_POISON;
1759 	nv_swncq_fis_reinit(ap);
1760 }
1761 
nv_swncq_irq_clear(struct ata_port * ap,u16 fis)1762 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1763 {
1764 	struct nv_swncq_port_priv *pp = ap->private_data;
1765 
1766 	writew(fis, pp->irq_block);
1767 }
1768 
__ata_bmdma_stop(struct ata_port * ap)1769 static void __ata_bmdma_stop(struct ata_port *ap)
1770 {
1771 	struct ata_queued_cmd qc;
1772 
1773 	qc.ap = ap;
1774 	ata_bmdma_stop(&qc);
1775 }
1776 
nv_swncq_ncq_stop(struct ata_port * ap)1777 static void nv_swncq_ncq_stop(struct ata_port *ap)
1778 {
1779 	struct nv_swncq_port_priv *pp = ap->private_data;
1780 	unsigned int i;
1781 	u32 sactive;
1782 	u32 done_mask;
1783 
1784 	ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1785 		     ap->qc_active, ap->link.sactive);
1786 	ata_port_err(ap,
1787 		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1788 		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1789 		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1790 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1791 
1792 	ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1793 		     ap->ops->sff_check_status(ap),
1794 		     ioread8(ap->ioaddr.error_addr));
1795 
1796 	sactive = readl(pp->sactive_block);
1797 	done_mask = pp->qc_active ^ sactive;
1798 
1799 	ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1800 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1801 		u8 err = 0;
1802 		if (pp->qc_active & (1 << i))
1803 			err = 0;
1804 		else if (done_mask & (1 << i))
1805 			err = 1;
1806 		else
1807 			continue;
1808 
1809 		ata_port_err(ap,
1810 			     "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1811 			     (pp->dhfis_bits >> i) & 0x1,
1812 			     (pp->dmafis_bits >> i) & 0x1,
1813 			     (pp->sdbfis_bits >> i) & 0x1,
1814 			     (sactive >> i) & 0x1,
1815 			     (err ? "error! tag doesn't exit" : " "));
1816 	}
1817 
1818 	nv_swncq_pp_reinit(ap);
1819 	ap->ops->sff_irq_clear(ap);
1820 	__ata_bmdma_stop(ap);
1821 	nv_swncq_irq_clear(ap, 0xffff);
1822 }
1823 
nv_swncq_error_handler(struct ata_port * ap)1824 static void nv_swncq_error_handler(struct ata_port *ap)
1825 {
1826 	struct ata_eh_context *ehc = &ap->link.eh_context;
1827 
1828 	if (ap->link.sactive) {
1829 		nv_swncq_ncq_stop(ap);
1830 		ehc->i.action |= ATA_EH_RESET;
1831 	}
1832 
1833 	ata_bmdma_error_handler(ap);
1834 }
1835 
1836 #ifdef CONFIG_PM
nv_swncq_port_suspend(struct ata_port * ap,pm_message_t mesg)1837 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1838 {
1839 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1840 	u32 tmp;
1841 
1842 	/* clear irq */
1843 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1844 
1845 	/* disable irq */
1846 	writel(0, mmio + NV_INT_ENABLE_MCP55);
1847 
1848 	/* disable swncq */
1849 	tmp = readl(mmio + NV_CTL_MCP55);
1850 	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1851 	writel(tmp, mmio + NV_CTL_MCP55);
1852 
1853 	return 0;
1854 }
1855 
nv_swncq_port_resume(struct ata_port * ap)1856 static int nv_swncq_port_resume(struct ata_port *ap)
1857 {
1858 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1859 	u32 tmp;
1860 
1861 	/* clear irq */
1862 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1863 
1864 	/* enable irq */
1865 	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1866 
1867 	/* enable swncq */
1868 	tmp = readl(mmio + NV_CTL_MCP55);
1869 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1870 
1871 	return 0;
1872 }
1873 #endif
1874 
nv_swncq_host_init(struct ata_host * host)1875 static void nv_swncq_host_init(struct ata_host *host)
1876 {
1877 	u32 tmp;
1878 	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1879 	struct pci_dev *pdev = to_pci_dev(host->dev);
1880 	u8 regval;
1881 
1882 	/* disable  ECO 398 */
1883 	pci_read_config_byte(pdev, 0x7f, &regval);
1884 	regval &= ~(1 << 7);
1885 	pci_write_config_byte(pdev, 0x7f, regval);
1886 
1887 	/* enable swncq */
1888 	tmp = readl(mmio + NV_CTL_MCP55);
1889 	VPRINTK("HOST_CTL:0x%X\n", tmp);
1890 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1891 
1892 	/* enable irq intr */
1893 	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1894 	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1895 	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1896 
1897 	/*  clear port irq */
1898 	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1899 }
1900 
nv_swncq_slave_config(struct scsi_device * sdev)1901 static int nv_swncq_slave_config(struct scsi_device *sdev)
1902 {
1903 	struct ata_port *ap = ata_shost_to_port(sdev->host);
1904 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1905 	struct ata_device *dev;
1906 	int rc;
1907 	u8 rev;
1908 	u8 check_maxtor = 0;
1909 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1910 
1911 	rc = ata_scsi_slave_config(sdev);
1912 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1913 		/* Not a proper libata device, ignore */
1914 		return rc;
1915 
1916 	dev = &ap->link.device[sdev->id];
1917 	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1918 		return rc;
1919 
1920 	/* if MCP51 and Maxtor, then disable ncq */
1921 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1922 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1923 		check_maxtor = 1;
1924 
1925 	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1926 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1927 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1928 		pci_read_config_byte(pdev, 0x8, &rev);
1929 		if (rev <= 0xa2)
1930 			check_maxtor = 1;
1931 	}
1932 
1933 	if (!check_maxtor)
1934 		return rc;
1935 
1936 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1937 
1938 	if (strncmp(model_num, "Maxtor", 6) == 0) {
1939 		ata_scsi_change_queue_depth(sdev, 1);
1940 		ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1941 			       sdev->queue_depth);
1942 	}
1943 
1944 	return rc;
1945 }
1946 
nv_swncq_port_start(struct ata_port * ap)1947 static int nv_swncq_port_start(struct ata_port *ap)
1948 {
1949 	struct device *dev = ap->host->dev;
1950 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1951 	struct nv_swncq_port_priv *pp;
1952 	int rc;
1953 
1954 	/* we might fallback to bmdma, allocate bmdma resources */
1955 	rc = ata_bmdma_port_start(ap);
1956 	if (rc)
1957 		return rc;
1958 
1959 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1960 	if (!pp)
1961 		return -ENOMEM;
1962 
1963 	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1964 				      &pp->prd_dma, GFP_KERNEL);
1965 	if (!pp->prd)
1966 		return -ENOMEM;
1967 	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1968 
1969 	ap->private_data = pp;
1970 	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1971 	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1972 	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1973 
1974 	return 0;
1975 }
1976 
nv_swncq_qc_prep(struct ata_queued_cmd * qc)1977 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1978 {
1979 	if (qc->tf.protocol != ATA_PROT_NCQ) {
1980 		ata_bmdma_qc_prep(qc);
1981 		return AC_ERR_OK;
1982 	}
1983 
1984 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1985 		return AC_ERR_OK;
1986 
1987 	nv_swncq_fill_sg(qc);
1988 
1989 	return AC_ERR_OK;
1990 }
1991 
nv_swncq_fill_sg(struct ata_queued_cmd * qc)1992 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1993 {
1994 	struct ata_port *ap = qc->ap;
1995 	struct scatterlist *sg;
1996 	struct nv_swncq_port_priv *pp = ap->private_data;
1997 	struct ata_bmdma_prd *prd;
1998 	unsigned int si, idx;
1999 
2000 	prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
2001 
2002 	idx = 0;
2003 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2004 		u32 addr, offset;
2005 		u32 sg_len, len;
2006 
2007 		addr = (u32)sg_dma_address(sg);
2008 		sg_len = sg_dma_len(sg);
2009 
2010 		while (sg_len) {
2011 			offset = addr & 0xffff;
2012 			len = sg_len;
2013 			if ((offset + sg_len) > 0x10000)
2014 				len = 0x10000 - offset;
2015 
2016 			prd[idx].addr = cpu_to_le32(addr);
2017 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2018 
2019 			idx++;
2020 			sg_len -= len;
2021 			addr += len;
2022 		}
2023 	}
2024 
2025 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2026 }
2027 
nv_swncq_issue_atacmd(struct ata_port * ap,struct ata_queued_cmd * qc)2028 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2029 					  struct ata_queued_cmd *qc)
2030 {
2031 	struct nv_swncq_port_priv *pp = ap->private_data;
2032 
2033 	if (qc == NULL)
2034 		return 0;
2035 
2036 	DPRINTK("Enter\n");
2037 
2038 	writel((1 << qc->hw_tag), pp->sactive_block);
2039 	pp->last_issue_tag = qc->hw_tag;
2040 	pp->dhfis_bits &= ~(1 << qc->hw_tag);
2041 	pp->dmafis_bits &= ~(1 << qc->hw_tag);
2042 	pp->qc_active |= (0x1 << qc->hw_tag);
2043 
2044 	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2045 	ap->ops->sff_exec_command(ap, &qc->tf);
2046 
2047 	DPRINTK("Issued tag %u\n", qc->hw_tag);
2048 
2049 	return 0;
2050 }
2051 
nv_swncq_qc_issue(struct ata_queued_cmd * qc)2052 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2053 {
2054 	struct ata_port *ap = qc->ap;
2055 	struct nv_swncq_port_priv *pp = ap->private_data;
2056 
2057 	if (qc->tf.protocol != ATA_PROT_NCQ)
2058 		return ata_bmdma_qc_issue(qc);
2059 
2060 	DPRINTK("Enter\n");
2061 
2062 	if (!pp->qc_active)
2063 		nv_swncq_issue_atacmd(ap, qc);
2064 	else
2065 		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2066 
2067 	return 0;
2068 }
2069 
nv_swncq_hotplug(struct ata_port * ap,u32 fis)2070 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2071 {
2072 	u32 serror;
2073 	struct ata_eh_info *ehi = &ap->link.eh_info;
2074 
2075 	ata_ehi_clear_desc(ehi);
2076 
2077 	/* AHCI needs SError cleared; otherwise, it might lock up */
2078 	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2079 	sata_scr_write(&ap->link, SCR_ERROR, serror);
2080 
2081 	/* analyze @irq_stat */
2082 	if (fis & NV_SWNCQ_IRQ_ADDED)
2083 		ata_ehi_push_desc(ehi, "hot plug");
2084 	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2085 		ata_ehi_push_desc(ehi, "hot unplug");
2086 
2087 	ata_ehi_hotplugged(ehi);
2088 
2089 	/* okay, let's hand over to EH */
2090 	ehi->serror |= serror;
2091 
2092 	ata_port_freeze(ap);
2093 }
2094 
nv_swncq_sdbfis(struct ata_port * ap)2095 static int nv_swncq_sdbfis(struct ata_port *ap)
2096 {
2097 	struct ata_queued_cmd *qc;
2098 	struct nv_swncq_port_priv *pp = ap->private_data;
2099 	struct ata_eh_info *ehi = &ap->link.eh_info;
2100 	u32 sactive;
2101 	u32 done_mask;
2102 	u8 host_stat;
2103 	u8 lack_dhfis = 0;
2104 
2105 	host_stat = ap->ops->bmdma_status(ap);
2106 	if (unlikely(host_stat & ATA_DMA_ERR)) {
2107 		/* error when transferring data to/from memory */
2108 		ata_ehi_clear_desc(ehi);
2109 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2110 		ehi->err_mask |= AC_ERR_HOST_BUS;
2111 		ehi->action |= ATA_EH_RESET;
2112 		return -EINVAL;
2113 	}
2114 
2115 	ap->ops->sff_irq_clear(ap);
2116 	__ata_bmdma_stop(ap);
2117 
2118 	sactive = readl(pp->sactive_block);
2119 	done_mask = pp->qc_active ^ sactive;
2120 
2121 	pp->qc_active &= ~done_mask;
2122 	pp->dhfis_bits &= ~done_mask;
2123 	pp->dmafis_bits &= ~done_mask;
2124 	pp->sdbfis_bits |= done_mask;
2125 	ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2126 
2127 	if (!ap->qc_active) {
2128 		DPRINTK("over\n");
2129 		nv_swncq_pp_reinit(ap);
2130 		return 0;
2131 	}
2132 
2133 	if (pp->qc_active & pp->dhfis_bits)
2134 		return 0;
2135 
2136 	if ((pp->ncq_flags & ncq_saw_backout) ||
2137 	    (pp->qc_active ^ pp->dhfis_bits))
2138 		/* if the controller can't get a device to host register FIS,
2139 		 * The driver needs to reissue the new command.
2140 		 */
2141 		lack_dhfis = 1;
2142 
2143 	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2144 		"SWNCQ:qc_active 0x%X defer_bits %X "
2145 		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2146 		ap->print_id, ap->qc_active, pp->qc_active,
2147 		pp->defer_queue.defer_bits, pp->dhfis_bits,
2148 		pp->dmafis_bits, pp->last_issue_tag);
2149 
2150 	nv_swncq_fis_reinit(ap);
2151 
2152 	if (lack_dhfis) {
2153 		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2154 		nv_swncq_issue_atacmd(ap, qc);
2155 		return 0;
2156 	}
2157 
2158 	if (pp->defer_queue.defer_bits) {
2159 		/* send deferral queue command */
2160 		qc = nv_swncq_qc_from_dq(ap);
2161 		WARN_ON(qc == NULL);
2162 		nv_swncq_issue_atacmd(ap, qc);
2163 	}
2164 
2165 	return 0;
2166 }
2167 
nv_swncq_tag(struct ata_port * ap)2168 static inline u32 nv_swncq_tag(struct ata_port *ap)
2169 {
2170 	struct nv_swncq_port_priv *pp = ap->private_data;
2171 	u32 tag;
2172 
2173 	tag = readb(pp->tag_block) >> 2;
2174 	return (tag & 0x1f);
2175 }
2176 
nv_swncq_dmafis(struct ata_port * ap)2177 static void nv_swncq_dmafis(struct ata_port *ap)
2178 {
2179 	struct ata_queued_cmd *qc;
2180 	unsigned int rw;
2181 	u8 dmactl;
2182 	u32 tag;
2183 	struct nv_swncq_port_priv *pp = ap->private_data;
2184 
2185 	__ata_bmdma_stop(ap);
2186 	tag = nv_swncq_tag(ap);
2187 
2188 	DPRINTK("dma setup tag 0x%x\n", tag);
2189 	qc = ata_qc_from_tag(ap, tag);
2190 
2191 	if (unlikely(!qc))
2192 		return;
2193 
2194 	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2195 
2196 	/* load PRD table addr. */
2197 	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2198 		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2199 
2200 	/* specify data direction, triple-check start bit is clear */
2201 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2202 	dmactl &= ~ATA_DMA_WR;
2203 	if (!rw)
2204 		dmactl |= ATA_DMA_WR;
2205 
2206 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2207 }
2208 
nv_swncq_host_interrupt(struct ata_port * ap,u16 fis)2209 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2210 {
2211 	struct nv_swncq_port_priv *pp = ap->private_data;
2212 	struct ata_queued_cmd *qc;
2213 	struct ata_eh_info *ehi = &ap->link.eh_info;
2214 	u32 serror;
2215 	u8 ata_stat;
2216 
2217 	ata_stat = ap->ops->sff_check_status(ap);
2218 	nv_swncq_irq_clear(ap, fis);
2219 	if (!fis)
2220 		return;
2221 
2222 	if (ap->pflags & ATA_PFLAG_FROZEN)
2223 		return;
2224 
2225 	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2226 		nv_swncq_hotplug(ap, fis);
2227 		return;
2228 	}
2229 
2230 	if (!pp->qc_active)
2231 		return;
2232 
2233 	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2234 		return;
2235 	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2236 
2237 	if (ata_stat & ATA_ERR) {
2238 		ata_ehi_clear_desc(ehi);
2239 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2240 		ehi->err_mask |= AC_ERR_DEV;
2241 		ehi->serror |= serror;
2242 		ehi->action |= ATA_EH_RESET;
2243 		ata_port_freeze(ap);
2244 		return;
2245 	}
2246 
2247 	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2248 		/* If the IRQ is backout, driver must issue
2249 		 * the new command again some time later.
2250 		 */
2251 		pp->ncq_flags |= ncq_saw_backout;
2252 	}
2253 
2254 	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2255 		pp->ncq_flags |= ncq_saw_sdb;
2256 		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2257 			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2258 			ap->print_id, pp->qc_active, pp->dhfis_bits,
2259 			pp->dmafis_bits, readl(pp->sactive_block));
2260 		if (nv_swncq_sdbfis(ap) < 0)
2261 			goto irq_error;
2262 	}
2263 
2264 	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2265 		/* The interrupt indicates the new command
2266 		 * was transmitted correctly to the drive.
2267 		 */
2268 		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2269 		pp->ncq_flags |= ncq_saw_d2h;
2270 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2271 			ata_ehi_push_desc(ehi, "illegal fis transaction");
2272 			ehi->err_mask |= AC_ERR_HSM;
2273 			ehi->action |= ATA_EH_RESET;
2274 			goto irq_error;
2275 		}
2276 
2277 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2278 		    !(pp->ncq_flags & ncq_saw_dmas)) {
2279 			ata_stat = ap->ops->sff_check_status(ap);
2280 			if (ata_stat & ATA_BUSY)
2281 				goto irq_exit;
2282 
2283 			if (pp->defer_queue.defer_bits) {
2284 				DPRINTK("send next command\n");
2285 				qc = nv_swncq_qc_from_dq(ap);
2286 				nv_swncq_issue_atacmd(ap, qc);
2287 			}
2288 		}
2289 	}
2290 
2291 	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2292 		/* program the dma controller with appropriate PRD buffers
2293 		 * and start the DMA transfer for requested command.
2294 		 */
2295 		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2296 		pp->ncq_flags |= ncq_saw_dmas;
2297 		nv_swncq_dmafis(ap);
2298 	}
2299 
2300 irq_exit:
2301 	return;
2302 irq_error:
2303 	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2304 	ata_port_freeze(ap);
2305 	return;
2306 }
2307 
nv_swncq_interrupt(int irq,void * dev_instance)2308 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2309 {
2310 	struct ata_host *host = dev_instance;
2311 	unsigned int i;
2312 	unsigned int handled = 0;
2313 	unsigned long flags;
2314 	u32 irq_stat;
2315 
2316 	spin_lock_irqsave(&host->lock, flags);
2317 
2318 	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2319 
2320 	for (i = 0; i < host->n_ports; i++) {
2321 		struct ata_port *ap = host->ports[i];
2322 
2323 		if (ap->link.sactive) {
2324 			nv_swncq_host_interrupt(ap, (u16)irq_stat);
2325 			handled = 1;
2326 		} else {
2327 			if (irq_stat)	/* reserve Hotplug */
2328 				nv_swncq_irq_clear(ap, 0xfff0);
2329 
2330 			handled += nv_host_intr(ap, (u8)irq_stat);
2331 		}
2332 		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2333 	}
2334 
2335 	spin_unlock_irqrestore(&host->lock, flags);
2336 
2337 	return IRQ_RETVAL(handled);
2338 }
2339 
nv_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)2340 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2341 {
2342 	const struct ata_port_info *ppi[] = { NULL, NULL };
2343 	struct nv_pi_priv *ipriv;
2344 	struct ata_host *host;
2345 	struct nv_host_priv *hpriv;
2346 	int rc;
2347 	u32 bar;
2348 	void __iomem *base;
2349 	unsigned long type = ent->driver_data;
2350 
2351         // Make sure this is a SATA controller by counting the number of bars
2352         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2353         // it's an IDE controller and we ignore it.
2354 	for (bar = 0; bar < 6; bar++)
2355 		if (pci_resource_start(pdev, bar) == 0)
2356 			return -ENODEV;
2357 
2358 	ata_print_version_once(&pdev->dev, DRV_VERSION);
2359 
2360 	rc = pcim_enable_device(pdev);
2361 	if (rc)
2362 		return rc;
2363 
2364 	/* determine type and allocate host */
2365 	if (type == CK804 && adma_enabled) {
2366 		dev_notice(&pdev->dev, "Using ADMA mode\n");
2367 		type = ADMA;
2368 	} else if (type == MCP5x && swncq_enabled) {
2369 		dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2370 		type = SWNCQ;
2371 	}
2372 
2373 	ppi[0] = &nv_port_info[type];
2374 	ipriv = ppi[0]->private_data;
2375 	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2376 	if (rc)
2377 		return rc;
2378 
2379 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2380 	if (!hpriv)
2381 		return -ENOMEM;
2382 	hpriv->type = type;
2383 	host->private_data = hpriv;
2384 
2385 	/* request and iomap NV_MMIO_BAR */
2386 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2387 	if (rc)
2388 		return rc;
2389 
2390 	/* configure SCR access */
2391 	base = host->iomap[NV_MMIO_BAR];
2392 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2393 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2394 
2395 	/* enable SATA space for CK804 */
2396 	if (type >= CK804) {
2397 		u8 regval;
2398 
2399 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2400 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2401 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2402 	}
2403 
2404 	/* init ADMA */
2405 	if (type == ADMA) {
2406 		rc = nv_adma_host_init(host);
2407 		if (rc)
2408 			return rc;
2409 	} else if (type == SWNCQ)
2410 		nv_swncq_host_init(host);
2411 
2412 	if (msi_enabled) {
2413 		dev_notice(&pdev->dev, "Using MSI\n");
2414 		pci_enable_msi(pdev);
2415 	}
2416 
2417 	pci_set_master(pdev);
2418 	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2419 }
2420 
2421 #ifdef CONFIG_PM_SLEEP
nv_pci_device_resume(struct pci_dev * pdev)2422 static int nv_pci_device_resume(struct pci_dev *pdev)
2423 {
2424 	struct ata_host *host = pci_get_drvdata(pdev);
2425 	struct nv_host_priv *hpriv = host->private_data;
2426 	int rc;
2427 
2428 	rc = ata_pci_device_do_resume(pdev);
2429 	if (rc)
2430 		return rc;
2431 
2432 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2433 		if (hpriv->type >= CK804) {
2434 			u8 regval;
2435 
2436 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2437 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2438 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2439 		}
2440 		if (hpriv->type == ADMA) {
2441 			u32 tmp32;
2442 			struct nv_adma_port_priv *pp;
2443 			/* enable/disable ADMA on the ports appropriately */
2444 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2445 
2446 			pp = host->ports[0]->private_data;
2447 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2448 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2449 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2450 			else
2451 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2452 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2453 			pp = host->ports[1]->private_data;
2454 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2455 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2456 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2457 			else
2458 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2459 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2460 
2461 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2462 		}
2463 	}
2464 
2465 	ata_host_resume(host);
2466 
2467 	return 0;
2468 }
2469 #endif
2470 
nv_ck804_host_stop(struct ata_host * host)2471 static void nv_ck804_host_stop(struct ata_host *host)
2472 {
2473 	struct pci_dev *pdev = to_pci_dev(host->dev);
2474 	u8 regval;
2475 
2476 	/* disable SATA space for CK804 */
2477 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2478 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2479 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2480 }
2481 
nv_adma_host_stop(struct ata_host * host)2482 static void nv_adma_host_stop(struct ata_host *host)
2483 {
2484 	struct pci_dev *pdev = to_pci_dev(host->dev);
2485 	u32 tmp32;
2486 
2487 	/* disable ADMA on the ports */
2488 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2489 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2490 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2491 		   NV_MCP_SATA_CFG_20_PORT1_EN |
2492 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2493 
2494 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2495 
2496 	nv_ck804_host_stop(host);
2497 }
2498 
2499 module_pci_driver(nv_pci_driver);
2500 
2501 module_param_named(adma, adma_enabled, bool, 0444);
2502 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2503 module_param_named(swncq, swncq_enabled, bool, 0444);
2504 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2505 module_param_named(msi, msi_enabled, bool, 0444);
2506 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2507