• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * sata_mv.c - Marvell SATA support
3  *
4  * Copyright 2008-2009: Marvell Corporation, all rights reserved.
5  * Copyright 2005: EMC Corporation, all rights reserved.
6  * Copyright 2005 Red Hat, Inc.  All rights reserved.
7  *
8  * Originally written by Brett Russ.
9  * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
10  *
11  * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; version 2 of the License.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  */
27 
28 /*
29  * sata_mv TODO list:
30  *
31  * --> Develop a low-power-consumption strategy, and implement it.
32  *
33  * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
34  *
35  * --> [Experiment, Marvell value added] Is it possible to use target
36  *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
37  *       creating LibATA target mode support would be very interesting.
38  *
39  *       Target mode, for those without docs, is the ability to directly
40  *       connect two SATA ports.
41  */
42 
43 /*
44  * 80x1-B2 errata PCI#11:
45  *
46  * Users of the 6041/6081 Rev.B2 chips (current is C0)
47  * should be careful to insert those cards only onto PCI-X bus #0,
48  * and only in device slots 0..7, not higher.  The chips may not
49  * work correctly otherwise  (note: this is a pretty rare condition).
50  */
51 
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/pci.h>
55 #include <linux/init.h>
56 #include <linux/blkdev.h>
57 #include <linux/delay.h>
58 #include <linux/interrupt.h>
59 #include <linux/dmapool.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/device.h>
62 #include <linux/clk.h>
63 #include <linux/phy/phy.h>
64 #include <linux/platform_device.h>
65 #include <linux/ata_platform.h>
66 #include <linux/mbus.h>
67 #include <linux/bitops.h>
68 #include <linux/gfp.h>
69 #include <linux/of.h>
70 #include <linux/of_irq.h>
71 #include <scsi/scsi_host.h>
72 #include <scsi/scsi_cmnd.h>
73 #include <scsi/scsi_device.h>
74 #include <linux/libata.h>
75 
76 #define DRV_NAME	"sata_mv"
77 #define DRV_VERSION	"1.28"
78 
79 /*
80  * module options
81  */
82 
83 #ifdef CONFIG_PCI
84 static int msi;
85 module_param(msi, int, S_IRUGO);
86 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
87 #endif
88 
89 static int irq_coalescing_io_count;
90 module_param(irq_coalescing_io_count, int, S_IRUGO);
91 MODULE_PARM_DESC(irq_coalescing_io_count,
92 		 "IRQ coalescing I/O count threshold (0..255)");
93 
94 static int irq_coalescing_usecs;
95 module_param(irq_coalescing_usecs, int, S_IRUGO);
96 MODULE_PARM_DESC(irq_coalescing_usecs,
97 		 "IRQ coalescing time threshold in usecs");
98 
99 enum {
100 	/* BAR's are enumerated in terms of pci_resource_start() terms */
101 	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
102 	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
103 	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
104 
105 	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
106 	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
107 
108 	/* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
109 	COAL_CLOCKS_PER_USEC	= 150,		/* for calculating COAL_TIMEs */
110 	MAX_COAL_TIME_THRESHOLD	= ((1 << 24) - 1), /* internal clocks count */
111 	MAX_COAL_IO_COUNT	= 255,		/* completed I/O count */
112 
113 	MV_PCI_REG_BASE		= 0,
114 
115 	/*
116 	 * Per-chip ("all ports") interrupt coalescing feature.
117 	 * This is only for GEN_II / GEN_IIE hardware.
118 	 *
119 	 * Coalescing defers the interrupt until either the IO_THRESHOLD
120 	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
121 	 */
122 	COAL_REG_BASE		= 0x18000,
123 	IRQ_COAL_CAUSE		= (COAL_REG_BASE + 0x08),
124 	ALL_PORTS_COAL_IRQ	= (1 << 4),	/* all ports irq event */
125 
126 	IRQ_COAL_IO_THRESHOLD   = (COAL_REG_BASE + 0xcc),
127 	IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
128 
129 	/*
130 	 * Registers for the (unused here) transaction coalescing feature:
131 	 */
132 	TRAN_COAL_CAUSE_LO	= (COAL_REG_BASE + 0x88),
133 	TRAN_COAL_CAUSE_HI	= (COAL_REG_BASE + 0x8c),
134 
135 	SATAHC0_REG_BASE	= 0x20000,
136 	FLASH_CTL		= 0x1046c,
137 	GPIO_PORT_CTL		= 0x104f0,
138 	RESET_CFG		= 0x180d8,
139 
140 	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
141 	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
142 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
143 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
144 
145 	MV_MAX_Q_DEPTH		= 32,
146 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
147 
148 	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
149 	 * CRPB needs alignment on a 256B boundary. Size == 256B
150 	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
151 	 */
152 	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
153 	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
154 	MV_MAX_SG_CT		= 256,
155 	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
156 
157 	/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
158 	MV_PORT_HC_SHIFT	= 2,
159 	MV_PORTS_PER_HC		= (1 << MV_PORT_HC_SHIFT), /* 4 */
160 	/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
161 	MV_PORT_MASK		= (MV_PORTS_PER_HC - 1),   /* 3 */
162 
163 	/* Host Flags */
164 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
165 
166 	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
167 
168 	MV_GEN_I_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
169 
170 	MV_GEN_II_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NCQ |
171 				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
172 
173 	MV_GEN_IIE_FLAGS	= MV_GEN_II_FLAGS | ATA_FLAG_AN,
174 
175 	CRQB_FLAG_READ		= (1 << 0),
176 	CRQB_TAG_SHIFT		= 1,
177 	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
178 	CRQB_PMP_SHIFT		= 12,	/* CRQB Gen-II/IIE PMP shift */
179 	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
180 	CRQB_CMD_ADDR_SHIFT	= 8,
181 	CRQB_CMD_CS		= (0x2 << 11),
182 	CRQB_CMD_LAST		= (1 << 15),
183 
184 	CRPB_FLAG_STATUS_SHIFT	= 8,
185 	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
186 	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
187 
188 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
189 
190 	/* PCI interface registers */
191 
192 	MV_PCI_COMMAND		= 0xc00,
193 	MV_PCI_COMMAND_MWRCOM	= (1 << 4),	/* PCI Master Write Combining */
194 	MV_PCI_COMMAND_MRDTRIG	= (1 << 7),	/* PCI Master Read Trigger */
195 
196 	PCI_MAIN_CMD_STS	= 0xd30,
197 	STOP_PCI_MASTER		= (1 << 2),
198 	PCI_MASTER_EMPTY	= (1 << 3),
199 	GLOB_SFT_RST		= (1 << 4),
200 
201 	MV_PCI_MODE		= 0xd00,
202 	MV_PCI_MODE_MASK	= 0x30,
203 
204 	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
205 	MV_PCI_DISC_TIMER	= 0xd04,
206 	MV_PCI_MSI_TRIGGER	= 0xc38,
207 	MV_PCI_SERR_MASK	= 0xc28,
208 	MV_PCI_XBAR_TMOUT	= 0x1d04,
209 	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
210 	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
211 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
212 	MV_PCI_ERR_COMMAND	= 0x1d50,
213 
214 	PCI_IRQ_CAUSE		= 0x1d58,
215 	PCI_IRQ_MASK		= 0x1d5c,
216 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
217 
218 	PCIE_IRQ_CAUSE		= 0x1900,
219 	PCIE_IRQ_MASK		= 0x1910,
220 	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
221 
222 	/* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
223 	PCI_HC_MAIN_IRQ_CAUSE	= 0x1d60,
224 	PCI_HC_MAIN_IRQ_MASK	= 0x1d64,
225 	SOC_HC_MAIN_IRQ_CAUSE	= 0x20020,
226 	SOC_HC_MAIN_IRQ_MASK	= 0x20024,
227 	ERR_IRQ			= (1 << 0),	/* shift by (2 * port #) */
228 	DONE_IRQ		= (1 << 1),	/* shift by (2 * port #) */
229 	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
230 	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
231 	DONE_IRQ_0_3		= 0x000000aa,	/* DONE_IRQ ports 0,1,2,3 */
232 	DONE_IRQ_4_7		= (DONE_IRQ_0_3 << HC_SHIFT),  /* 4,5,6,7 */
233 	PCI_ERR			= (1 << 18),
234 	TRAN_COAL_LO_DONE	= (1 << 19),	/* transaction coalescing */
235 	TRAN_COAL_HI_DONE	= (1 << 20),	/* transaction coalescing */
236 	PORTS_0_3_COAL_DONE	= (1 << 8),	/* HC0 IRQ coalescing */
237 	PORTS_4_7_COAL_DONE	= (1 << 17),	/* HC1 IRQ coalescing */
238 	ALL_PORTS_COAL_DONE	= (1 << 21),	/* GEN_II(E) IRQ coalescing */
239 	GPIO_INT		= (1 << 22),
240 	SELF_INT		= (1 << 23),
241 	TWSI_INT		= (1 << 24),
242 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
243 	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
244 	HC_MAIN_RSVD_SOC	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
245 
246 	/* SATAHC registers */
247 	HC_CFG			= 0x00,
248 
249 	HC_IRQ_CAUSE		= 0x14,
250 	DMA_IRQ			= (1 << 0),	/* shift by port # */
251 	HC_COAL_IRQ		= (1 << 4),	/* IRQ coalescing */
252 	DEV_IRQ			= (1 << 8),	/* shift by port # */
253 
254 	/*
255 	 * Per-HC (Host-Controller) interrupt coalescing feature.
256 	 * This is present on all chip generations.
257 	 *
258 	 * Coalescing defers the interrupt until either the IO_THRESHOLD
259 	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
260 	 */
261 	HC_IRQ_COAL_IO_THRESHOLD	= 0x000c,
262 	HC_IRQ_COAL_TIME_THRESHOLD	= 0x0010,
263 
264 	SOC_LED_CTRL		= 0x2c,
265 	SOC_LED_CTRL_BLINK	= (1 << 0),	/* Active LED blink */
266 	SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),	/* Multiplex dev presence */
267 						/*  with dev activity LED */
268 
269 	/* Shadow block registers */
270 	SHD_BLK			= 0x100,
271 	SHD_CTL_AST		= 0x20,		/* ofs from SHD_BLK */
272 
273 	/* SATA registers */
274 	SATA_STATUS		= 0x300,  /* ctrl, err regs follow status */
275 	SATA_ACTIVE		= 0x350,
276 	FIS_IRQ_CAUSE		= 0x364,
277 	FIS_IRQ_CAUSE_AN	= (1 << 9),	/* async notification */
278 
279 	LTMODE			= 0x30c,	/* requires read-after-write */
280 	LTMODE_BIT8		= (1 << 8),	/* unknown, but necessary */
281 
282 	PHY_MODE2		= 0x330,
283 	PHY_MODE3		= 0x310,
284 
285 	PHY_MODE4		= 0x314,	/* requires read-after-write */
286 	PHY_MODE4_CFG_MASK	= 0x00000003,	/* phy internal config field */
287 	PHY_MODE4_CFG_VALUE	= 0x00000001,	/* phy internal config field */
288 	PHY_MODE4_RSVD_ZEROS	= 0x5de3fffa,	/* Gen2e always write zeros */
289 	PHY_MODE4_RSVD_ONES	= 0x00000005,	/* Gen2e always write ones */
290 
291 	SATA_IFCTL		= 0x344,
292 	SATA_TESTCTL		= 0x348,
293 	SATA_IFSTAT		= 0x34c,
294 	VENDOR_UNIQUE_FIS	= 0x35c,
295 
296 	FISCFG			= 0x360,
297 	FISCFG_WAIT_DEV_ERR	= (1 << 8),	/* wait for host on DevErr */
298 	FISCFG_SINGLE_SYNC	= (1 << 16),	/* SYNC on DMA activation */
299 
300 	PHY_MODE9_GEN2		= 0x398,
301 	PHY_MODE9_GEN1		= 0x39c,
302 	PHYCFG_OFS		= 0x3a0,	/* only in 65n devices */
303 
304 	MV5_PHY_MODE		= 0x74,
305 	MV5_LTMODE		= 0x30,
306 	MV5_PHY_CTL		= 0x0C,
307 	SATA_IFCFG		= 0x050,
308 	LP_PHY_CTL		= 0x058,
309 	LP_PHY_CTL_PIN_PU_PLL   = (1 << 0),
310 	LP_PHY_CTL_PIN_PU_RX    = (1 << 1),
311 	LP_PHY_CTL_PIN_PU_TX    = (1 << 2),
312 	LP_PHY_CTL_GEN_TX_3G    = (1 << 5),
313 	LP_PHY_CTL_GEN_RX_3G    = (1 << 9),
314 
315 	MV_M2_PREAMP_MASK	= 0x7e0,
316 
317 	/* Port registers */
318 	EDMA_CFG		= 0,
319 	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
320 	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
321 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
322 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
323 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
324 	EDMA_CFG_EDMA_FBS	= (1 << 16),	/* EDMA FIS-Based Switching */
325 	EDMA_CFG_FBS		= (1 << 26),	/* FIS-Based Switching */
326 
327 	EDMA_ERR_IRQ_CAUSE	= 0x8,
328 	EDMA_ERR_IRQ_MASK	= 0xc,
329 	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
330 	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
331 	EDMA_ERR_DEV		= (1 << 2),	/* device error */
332 	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
333 	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
334 	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
335 	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
336 	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
337 	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
338 	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
339 	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
340 	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
341 	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
342 	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
343 
344 	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
345 	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
346 	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
347 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
348 	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */
349 
350 	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
351 
352 	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
353 	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
354 	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
355 	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
356 	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
357 	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */
358 
359 	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
360 
361 	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
362 	EDMA_ERR_OVERRUN_5	= (1 << 5),
363 	EDMA_ERR_UNDERRUN_5	= (1 << 6),
364 
365 	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
366 				  EDMA_ERR_LNK_CTRL_RX_1 |
367 				  EDMA_ERR_LNK_CTRL_RX_3 |
368 				  EDMA_ERR_LNK_CTRL_TX,
369 
370 	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
371 				  EDMA_ERR_PRD_PAR |
372 				  EDMA_ERR_DEV_DCON |
373 				  EDMA_ERR_DEV_CON |
374 				  EDMA_ERR_SERR |
375 				  EDMA_ERR_SELF_DIS |
376 				  EDMA_ERR_CRQB_PAR |
377 				  EDMA_ERR_CRPB_PAR |
378 				  EDMA_ERR_INTRL_PAR |
379 				  EDMA_ERR_IORDY |
380 				  EDMA_ERR_LNK_CTRL_RX_2 |
381 				  EDMA_ERR_LNK_DATA_RX |
382 				  EDMA_ERR_LNK_DATA_TX |
383 				  EDMA_ERR_TRANS_PROTO,
384 
385 	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
386 				  EDMA_ERR_PRD_PAR |
387 				  EDMA_ERR_DEV_DCON |
388 				  EDMA_ERR_DEV_CON |
389 				  EDMA_ERR_OVERRUN_5 |
390 				  EDMA_ERR_UNDERRUN_5 |
391 				  EDMA_ERR_SELF_DIS_5 |
392 				  EDMA_ERR_CRQB_PAR |
393 				  EDMA_ERR_CRPB_PAR |
394 				  EDMA_ERR_INTRL_PAR |
395 				  EDMA_ERR_IORDY,
396 
397 	EDMA_REQ_Q_BASE_HI	= 0x10,
398 	EDMA_REQ_Q_IN_PTR	= 0x14,		/* also contains BASE_LO */
399 
400 	EDMA_REQ_Q_OUT_PTR	= 0x18,
401 	EDMA_REQ_Q_PTR_SHIFT	= 5,
402 
403 	EDMA_RSP_Q_BASE_HI	= 0x1c,
404 	EDMA_RSP_Q_IN_PTR	= 0x20,
405 	EDMA_RSP_Q_OUT_PTR	= 0x24,		/* also contains BASE_LO */
406 	EDMA_RSP_Q_PTR_SHIFT	= 3,
407 
408 	EDMA_CMD		= 0x28,		/* EDMA command register */
409 	EDMA_EN			= (1 << 0),	/* enable EDMA */
410 	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
411 	EDMA_RESET		= (1 << 2),	/* reset eng/trans/link/phy */
412 
413 	EDMA_STATUS		= 0x30,		/* EDMA engine status */
414 	EDMA_STATUS_CACHE_EMPTY	= (1 << 6),	/* GenIIe command cache empty */
415 	EDMA_STATUS_IDLE	= (1 << 7),	/* GenIIe EDMA enabled/idle */
416 
417 	EDMA_IORDY_TMOUT	= 0x34,
418 	EDMA_ARB_CFG		= 0x38,
419 
420 	EDMA_HALTCOND		= 0x60,		/* GenIIe halt conditions */
421 	EDMA_UNKNOWN_RSVD	= 0x6C,		/* GenIIe unknown/reserved */
422 
423 	BMDMA_CMD		= 0x224,	/* bmdma command register */
424 	BMDMA_STATUS		= 0x228,	/* bmdma status register */
425 	BMDMA_PRD_LOW		= 0x22c,	/* bmdma PRD addr 31:0 */
426 	BMDMA_PRD_HIGH		= 0x230,	/* bmdma PRD addr 63:32 */
427 
428 	/* Host private flags (hp_flags) */
429 	MV_HP_FLAG_MSI		= (1 << 0),
430 	MV_HP_ERRATA_50XXB0	= (1 << 1),
431 	MV_HP_ERRATA_50XXB2	= (1 << 2),
432 	MV_HP_ERRATA_60X1B2	= (1 << 3),
433 	MV_HP_ERRATA_60X1C0	= (1 << 4),
434 	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
435 	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
436 	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
437 	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
438 	MV_HP_CUT_THROUGH	= (1 << 10),	/* can use EDMA cut-through */
439 	MV_HP_FLAG_SOC		= (1 << 11),	/* SystemOnChip, no PCI */
440 	MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),	/* is led blinking enabled? */
441 	MV_HP_FIX_LP_PHY_CTL	= (1 << 13),	/* fix speed in LP_PHY_CTL ? */
442 
443 	/* Port private flags (pp_flags) */
444 	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
445 	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
446 	MV_PP_FLAG_FBS_EN	= (1 << 2),	/* is EDMA set up for FBS? */
447 	MV_PP_FLAG_DELAYED_EH	= (1 << 3),	/* delayed dev err handling */
448 	MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),	/* ignore initial ATA_DRDY */
449 };
450 
451 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
452 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
453 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
454 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
455 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
456 
457 #define WINDOW_CTRL(i)		(0x20030 + ((i) << 4))
458 #define WINDOW_BASE(i)		(0x20034 + ((i) << 4))
459 
460 enum {
461 	/* DMA boundary 0xffff is required by the s/g splitting
462 	 * we need on /length/ in mv_fill-sg().
463 	 */
464 	MV_DMA_BOUNDARY		= 0xffffU,
465 
466 	/* mask of register bits containing lower 32 bits
467 	 * of EDMA request queue DMA address
468 	 */
469 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
470 
471 	/* ditto, for response queue */
472 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
473 };
474 
475 enum chip_type {
476 	chip_504x,
477 	chip_508x,
478 	chip_5080,
479 	chip_604x,
480 	chip_608x,
481 	chip_6042,
482 	chip_7042,
483 	chip_soc,
484 };
485 
486 /* Command ReQuest Block: 32B */
487 struct mv_crqb {
488 	__le32			sg_addr;
489 	__le32			sg_addr_hi;
490 	__le16			ctrl_flags;
491 	__le16			ata_cmd[11];
492 };
493 
494 struct mv_crqb_iie {
495 	__le32			addr;
496 	__le32			addr_hi;
497 	__le32			flags;
498 	__le32			len;
499 	__le32			ata_cmd[4];
500 };
501 
502 /* Command ResPonse Block: 8B */
503 struct mv_crpb {
504 	__le16			id;
505 	__le16			flags;
506 	__le32			tmstmp;
507 };
508 
509 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
510 struct mv_sg {
511 	__le32			addr;
512 	__le32			flags_size;
513 	__le32			addr_hi;
514 	__le32			reserved;
515 };
516 
517 /*
518  * We keep a local cache of a few frequently accessed port
519  * registers here, to avoid having to read them (very slow)
520  * when switching between EDMA and non-EDMA modes.
521  */
522 struct mv_cached_regs {
523 	u32			fiscfg;
524 	u32			ltmode;
525 	u32			haltcond;
526 	u32			unknown_rsvd;
527 };
528 
529 struct mv_port_priv {
530 	struct mv_crqb		*crqb;
531 	dma_addr_t		crqb_dma;
532 	struct mv_crpb		*crpb;
533 	dma_addr_t		crpb_dma;
534 	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
535 	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
536 
537 	unsigned int		req_idx;
538 	unsigned int		resp_idx;
539 
540 	u32			pp_flags;
541 	struct mv_cached_regs	cached;
542 	unsigned int		delayed_eh_pmp_map;
543 };
544 
545 struct mv_port_signal {
546 	u32			amps;
547 	u32			pre;
548 };
549 
550 struct mv_host_priv {
551 	u32			hp_flags;
552 	unsigned int 		board_idx;
553 	u32			main_irq_mask;
554 	struct mv_port_signal	signal[8];
555 	const struct mv_hw_ops	*ops;
556 	int			n_ports;
557 	void __iomem		*base;
558 	void __iomem		*main_irq_cause_addr;
559 	void __iomem		*main_irq_mask_addr;
560 	u32			irq_cause_offset;
561 	u32			irq_mask_offset;
562 	u32			unmask_all_irqs;
563 
564 	/*
565 	 * Needed on some devices that require their clocks to be enabled.
566 	 * These are optional: if the platform device does not have any
567 	 * clocks, they won't be used.  Also, if the underlying hardware
568 	 * does not support the common clock framework (CONFIG_HAVE_CLK=n),
569 	 * all the clock operations become no-ops (see clk.h).
570 	 */
571 	struct clk		*clk;
572 	struct clk              **port_clks;
573 	/*
574 	 * Some devices have a SATA PHY which can be enabled/disabled
575 	 * in order to save power. These are optional: if the platform
576 	 * devices does not have any phy, they won't be used.
577 	 */
578 	struct phy		**port_phys;
579 	/*
580 	 * These consistent DMA memory pools give us guaranteed
581 	 * alignment for hardware-accessed data structures,
582 	 * and less memory waste in accomplishing the alignment.
583 	 */
584 	struct dma_pool		*crqb_pool;
585 	struct dma_pool		*crpb_pool;
586 	struct dma_pool		*sg_tbl_pool;
587 };
588 
589 struct mv_hw_ops {
590 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
591 			   unsigned int port);
592 	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
593 	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
594 			   void __iomem *mmio);
595 	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
596 			unsigned int n_hc);
597 	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
598 	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
599 };
600 
601 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
602 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
603 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
604 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
605 static int mv_port_start(struct ata_port *ap);
606 static void mv_port_stop(struct ata_port *ap);
607 static int mv_qc_defer(struct ata_queued_cmd *qc);
608 static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
609 static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
610 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
611 static int mv_hardreset(struct ata_link *link, unsigned int *class,
612 			unsigned long deadline);
613 static void mv_eh_freeze(struct ata_port *ap);
614 static void mv_eh_thaw(struct ata_port *ap);
615 static void mv6_dev_config(struct ata_device *dev);
616 
617 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
618 			   unsigned int port);
619 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
620 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
621 			   void __iomem *mmio);
622 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
623 			unsigned int n_hc);
624 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
625 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
626 
627 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
628 			   unsigned int port);
629 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
630 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
631 			   void __iomem *mmio);
632 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
633 			unsigned int n_hc);
634 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
635 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
636 				      void __iomem *mmio);
637 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
638 				      void __iomem *mmio);
639 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
640 				  void __iomem *mmio, unsigned int n_hc);
641 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
642 				      void __iomem *mmio);
643 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
644 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
645 				  void __iomem *mmio, unsigned int port);
646 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
647 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
648 			     unsigned int port_no);
649 static int mv_stop_edma(struct ata_port *ap);
650 static int mv_stop_edma_engine(void __iomem *port_mmio);
651 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
652 
653 static void mv_pmp_select(struct ata_port *ap, int pmp);
654 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
655 				unsigned long deadline);
656 static int  mv_softreset(struct ata_link *link, unsigned int *class,
657 				unsigned long deadline);
658 static void mv_pmp_error_handler(struct ata_port *ap);
659 static void mv_process_crpb_entries(struct ata_port *ap,
660 					struct mv_port_priv *pp);
661 
662 static void mv_sff_irq_clear(struct ata_port *ap);
663 static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
664 static void mv_bmdma_setup(struct ata_queued_cmd *qc);
665 static void mv_bmdma_start(struct ata_queued_cmd *qc);
666 static void mv_bmdma_stop(struct ata_queued_cmd *qc);
667 static u8   mv_bmdma_status(struct ata_port *ap);
668 static u8 mv_sff_check_status(struct ata_port *ap);
669 
670 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
671  * because we have to allow room for worst case splitting of
672  * PRDs for 64K boundaries in mv_fill_sg().
673  */
674 #ifdef CONFIG_PCI
675 static struct scsi_host_template mv5_sht = {
676 	ATA_BASE_SHT(DRV_NAME),
677 	.sg_tablesize		= MV_MAX_SG_CT / 2,
678 	.dma_boundary		= MV_DMA_BOUNDARY,
679 };
680 #endif
681 static struct scsi_host_template mv6_sht = {
682 	ATA_NCQ_SHT(DRV_NAME),
683 	.can_queue		= MV_MAX_Q_DEPTH - 1,
684 	.sg_tablesize		= MV_MAX_SG_CT / 2,
685 	.dma_boundary		= MV_DMA_BOUNDARY,
686 };
687 
688 static struct ata_port_operations mv5_ops = {
689 	.inherits		= &ata_sff_port_ops,
690 
691 	.lost_interrupt		= ATA_OP_NULL,
692 
693 	.qc_defer		= mv_qc_defer,
694 	.qc_prep		= mv_qc_prep,
695 	.qc_issue		= mv_qc_issue,
696 
697 	.freeze			= mv_eh_freeze,
698 	.thaw			= mv_eh_thaw,
699 	.hardreset		= mv_hardreset,
700 
701 	.scr_read		= mv5_scr_read,
702 	.scr_write		= mv5_scr_write,
703 
704 	.port_start		= mv_port_start,
705 	.port_stop		= mv_port_stop,
706 };
707 
708 static struct ata_port_operations mv6_ops = {
709 	.inherits		= &ata_bmdma_port_ops,
710 
711 	.lost_interrupt		= ATA_OP_NULL,
712 
713 	.qc_defer		= mv_qc_defer,
714 	.qc_prep		= mv_qc_prep,
715 	.qc_issue		= mv_qc_issue,
716 
717 	.dev_config             = mv6_dev_config,
718 
719 	.freeze			= mv_eh_freeze,
720 	.thaw			= mv_eh_thaw,
721 	.hardreset		= mv_hardreset,
722 	.softreset		= mv_softreset,
723 	.pmp_hardreset		= mv_pmp_hardreset,
724 	.pmp_softreset		= mv_softreset,
725 	.error_handler		= mv_pmp_error_handler,
726 
727 	.scr_read		= mv_scr_read,
728 	.scr_write		= mv_scr_write,
729 
730 	.sff_check_status	= mv_sff_check_status,
731 	.sff_irq_clear		= mv_sff_irq_clear,
732 	.check_atapi_dma	= mv_check_atapi_dma,
733 	.bmdma_setup		= mv_bmdma_setup,
734 	.bmdma_start		= mv_bmdma_start,
735 	.bmdma_stop		= mv_bmdma_stop,
736 	.bmdma_status		= mv_bmdma_status,
737 
738 	.port_start		= mv_port_start,
739 	.port_stop		= mv_port_stop,
740 };
741 
742 static struct ata_port_operations mv_iie_ops = {
743 	.inherits		= &mv6_ops,
744 	.dev_config		= ATA_OP_NULL,
745 	.qc_prep		= mv_qc_prep_iie,
746 };
747 
748 static const struct ata_port_info mv_port_info[] = {
749 	{  /* chip_504x */
750 		.flags		= MV_GEN_I_FLAGS,
751 		.pio_mask	= ATA_PIO4,
752 		.udma_mask	= ATA_UDMA6,
753 		.port_ops	= &mv5_ops,
754 	},
755 	{  /* chip_508x */
756 		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
757 		.pio_mask	= ATA_PIO4,
758 		.udma_mask	= ATA_UDMA6,
759 		.port_ops	= &mv5_ops,
760 	},
761 	{  /* chip_5080 */
762 		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
763 		.pio_mask	= ATA_PIO4,
764 		.udma_mask	= ATA_UDMA6,
765 		.port_ops	= &mv5_ops,
766 	},
767 	{  /* chip_604x */
768 		.flags		= MV_GEN_II_FLAGS,
769 		.pio_mask	= ATA_PIO4,
770 		.udma_mask	= ATA_UDMA6,
771 		.port_ops	= &mv6_ops,
772 	},
773 	{  /* chip_608x */
774 		.flags		= MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
775 		.pio_mask	= ATA_PIO4,
776 		.udma_mask	= ATA_UDMA6,
777 		.port_ops	= &mv6_ops,
778 	},
779 	{  /* chip_6042 */
780 		.flags		= MV_GEN_IIE_FLAGS,
781 		.pio_mask	= ATA_PIO4,
782 		.udma_mask	= ATA_UDMA6,
783 		.port_ops	= &mv_iie_ops,
784 	},
785 	{  /* chip_7042 */
786 		.flags		= MV_GEN_IIE_FLAGS,
787 		.pio_mask	= ATA_PIO4,
788 		.udma_mask	= ATA_UDMA6,
789 		.port_ops	= &mv_iie_ops,
790 	},
791 	{  /* chip_soc */
792 		.flags		= MV_GEN_IIE_FLAGS,
793 		.pio_mask	= ATA_PIO4,
794 		.udma_mask	= ATA_UDMA6,
795 		.port_ops	= &mv_iie_ops,
796 	},
797 };
798 
799 static const struct pci_device_id mv_pci_tbl[] = {
800 	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
801 	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
802 	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
803 	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
804 	/* RocketRAID 1720/174x have different identifiers */
805 	{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
806 	{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
807 	{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
808 
809 	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
810 	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
811 	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
812 	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
813 	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
814 
815 	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
816 
817 	/* Adaptec 1430SA */
818 	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
819 
820 	/* Marvell 7042 support */
821 	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
822 
823 	/* Highpoint RocketRAID PCIe series */
824 	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
825 	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
826 
827 	{ }			/* terminate list */
828 };
829 
830 static const struct mv_hw_ops mv5xxx_ops = {
831 	.phy_errata		= mv5_phy_errata,
832 	.enable_leds		= mv5_enable_leds,
833 	.read_preamp		= mv5_read_preamp,
834 	.reset_hc		= mv5_reset_hc,
835 	.reset_flash		= mv5_reset_flash,
836 	.reset_bus		= mv5_reset_bus,
837 };
838 
839 static const struct mv_hw_ops mv6xxx_ops = {
840 	.phy_errata		= mv6_phy_errata,
841 	.enable_leds		= mv6_enable_leds,
842 	.read_preamp		= mv6_read_preamp,
843 	.reset_hc		= mv6_reset_hc,
844 	.reset_flash		= mv6_reset_flash,
845 	.reset_bus		= mv_reset_pci_bus,
846 };
847 
848 static const struct mv_hw_ops mv_soc_ops = {
849 	.phy_errata		= mv6_phy_errata,
850 	.enable_leds		= mv_soc_enable_leds,
851 	.read_preamp		= mv_soc_read_preamp,
852 	.reset_hc		= mv_soc_reset_hc,
853 	.reset_flash		= mv_soc_reset_flash,
854 	.reset_bus		= mv_soc_reset_bus,
855 };
856 
857 static const struct mv_hw_ops mv_soc_65n_ops = {
858 	.phy_errata		= mv_soc_65n_phy_errata,
859 	.enable_leds		= mv_soc_enable_leds,
860 	.reset_hc		= mv_soc_reset_hc,
861 	.reset_flash		= mv_soc_reset_flash,
862 	.reset_bus		= mv_soc_reset_bus,
863 };
864 
865 /*
866  * Functions
867  */
868 
writelfl(unsigned long data,void __iomem * addr)869 static inline void writelfl(unsigned long data, void __iomem *addr)
870 {
871 	writel(data, addr);
872 	(void) readl(addr);	/* flush to avoid PCI posted write */
873 }
874 
mv_hc_from_port(unsigned int port)875 static inline unsigned int mv_hc_from_port(unsigned int port)
876 {
877 	return port >> MV_PORT_HC_SHIFT;
878 }
879 
mv_hardport_from_port(unsigned int port)880 static inline unsigned int mv_hardport_from_port(unsigned int port)
881 {
882 	return port & MV_PORT_MASK;
883 }
884 
885 /*
886  * Consolidate some rather tricky bit shift calculations.
887  * This is hot-path stuff, so not a function.
888  * Simple code, with two return values, so macro rather than inline.
889  *
890  * port is the sole input, in range 0..7.
891  * shift is one output, for use with main_irq_cause / main_irq_mask registers.
892  * hardport is the other output, in range 0..3.
893  *
894  * Note that port and hardport may be the same variable in some cases.
895  */
896 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)	\
897 {								\
898 	shift    = mv_hc_from_port(port) * HC_SHIFT;		\
899 	hardport = mv_hardport_from_port(port);			\
900 	shift   += hardport * 2;				\
901 }
902 
mv_hc_base(void __iomem * base,unsigned int hc)903 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
904 {
905 	return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
906 }
907 
mv_hc_base_from_port(void __iomem * base,unsigned int port)908 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
909 						 unsigned int port)
910 {
911 	return mv_hc_base(base, mv_hc_from_port(port));
912 }
913 
mv_port_base(void __iomem * base,unsigned int port)914 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
915 {
916 	return  mv_hc_base_from_port(base, port) +
917 		MV_SATAHC_ARBTR_REG_SZ +
918 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
919 }
920 
mv5_phy_base(void __iomem * mmio,unsigned int port)921 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
922 {
923 	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
924 	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
925 
926 	return hc_mmio + ofs;
927 }
928 
mv_host_base(struct ata_host * host)929 static inline void __iomem *mv_host_base(struct ata_host *host)
930 {
931 	struct mv_host_priv *hpriv = host->private_data;
932 	return hpriv->base;
933 }
934 
mv_ap_base(struct ata_port * ap)935 static inline void __iomem *mv_ap_base(struct ata_port *ap)
936 {
937 	return mv_port_base(mv_host_base(ap->host), ap->port_no);
938 }
939 
mv_get_hc_count(unsigned long port_flags)940 static inline int mv_get_hc_count(unsigned long port_flags)
941 {
942 	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
943 }
944 
945 /**
946  *      mv_save_cached_regs - (re-)initialize cached port registers
947  *      @ap: the port whose registers we are caching
948  *
949  *	Initialize the local cache of port registers,
950  *	so that reading them over and over again can
951  *	be avoided on the hotter paths of this driver.
952  *	This saves a few microseconds each time we switch
953  *	to/from EDMA mode to perform (eg.) a drive cache flush.
954  */
mv_save_cached_regs(struct ata_port * ap)955 static void mv_save_cached_regs(struct ata_port *ap)
956 {
957 	void __iomem *port_mmio = mv_ap_base(ap);
958 	struct mv_port_priv *pp = ap->private_data;
959 
960 	pp->cached.fiscfg = readl(port_mmio + FISCFG);
961 	pp->cached.ltmode = readl(port_mmio + LTMODE);
962 	pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
963 	pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
964 }
965 
966 /**
967  *      mv_write_cached_reg - write to a cached port register
968  *      @addr: hardware address of the register
969  *      @old: pointer to cached value of the register
970  *      @new: new value for the register
971  *
972  *	Write a new value to a cached register,
973  *	but only if the value is different from before.
974  */
mv_write_cached_reg(void __iomem * addr,u32 * old,u32 new)975 static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
976 {
977 	if (new != *old) {
978 		unsigned long laddr;
979 		*old = new;
980 		/*
981 		 * Workaround for 88SX60x1-B2 FEr SATA#13:
982 		 * Read-after-write is needed to prevent generating 64-bit
983 		 * write cycles on the PCI bus for SATA interface registers
984 		 * at offsets ending in 0x4 or 0xc.
985 		 *
986 		 * Looks like a lot of fuss, but it avoids an unnecessary
987 		 * +1 usec read-after-write delay for unaffected registers.
988 		 */
989 		laddr = (long)addr & 0xffff;
990 		if (laddr >= 0x300 && laddr <= 0x33c) {
991 			laddr &= 0x000f;
992 			if (laddr == 0x4 || laddr == 0xc) {
993 				writelfl(new, addr); /* read after write */
994 				return;
995 			}
996 		}
997 		writel(new, addr); /* unaffected by the errata */
998 	}
999 }
1000 
mv_set_edma_ptrs(void __iomem * port_mmio,struct mv_host_priv * hpriv,struct mv_port_priv * pp)1001 static void mv_set_edma_ptrs(void __iomem *port_mmio,
1002 			     struct mv_host_priv *hpriv,
1003 			     struct mv_port_priv *pp)
1004 {
1005 	u32 index;
1006 
1007 	/*
1008 	 * initialize request queue
1009 	 */
1010 	pp->req_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
1011 	index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
1012 
1013 	WARN_ON(pp->crqb_dma & 0x3ff);
1014 	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
1015 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
1016 		 port_mmio + EDMA_REQ_Q_IN_PTR);
1017 	writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
1018 
1019 	/*
1020 	 * initialize response queue
1021 	 */
1022 	pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
1023 	index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1024 
1025 	WARN_ON(pp->crpb_dma & 0xff);
1026 	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1027 	writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1028 	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1029 		 port_mmio + EDMA_RSP_Q_OUT_PTR);
1030 }
1031 
mv_write_main_irq_mask(u32 mask,struct mv_host_priv * hpriv)1032 static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1033 {
1034 	/*
1035 	 * When writing to the main_irq_mask in hardware,
1036 	 * we must ensure exclusivity between the interrupt coalescing bits
1037 	 * and the corresponding individual port DONE_IRQ bits.
1038 	 *
1039 	 * Note that this register is really an "IRQ enable" register,
1040 	 * not an "IRQ mask" register as Marvell's naming might suggest.
1041 	 */
1042 	if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1043 		mask &= ~DONE_IRQ_0_3;
1044 	if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1045 		mask &= ~DONE_IRQ_4_7;
1046 	writelfl(mask, hpriv->main_irq_mask_addr);
1047 }
1048 
mv_set_main_irq_mask(struct ata_host * host,u32 disable_bits,u32 enable_bits)1049 static void mv_set_main_irq_mask(struct ata_host *host,
1050 				 u32 disable_bits, u32 enable_bits)
1051 {
1052 	struct mv_host_priv *hpriv = host->private_data;
1053 	u32 old_mask, new_mask;
1054 
1055 	old_mask = hpriv->main_irq_mask;
1056 	new_mask = (old_mask & ~disable_bits) | enable_bits;
1057 	if (new_mask != old_mask) {
1058 		hpriv->main_irq_mask = new_mask;
1059 		mv_write_main_irq_mask(new_mask, hpriv);
1060 	}
1061 }
1062 
mv_enable_port_irqs(struct ata_port * ap,unsigned int port_bits)1063 static void mv_enable_port_irqs(struct ata_port *ap,
1064 				     unsigned int port_bits)
1065 {
1066 	unsigned int shift, hardport, port = ap->port_no;
1067 	u32 disable_bits, enable_bits;
1068 
1069 	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1070 
1071 	disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1072 	enable_bits  = port_bits << shift;
1073 	mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1074 }
1075 
mv_clear_and_enable_port_irqs(struct ata_port * ap,void __iomem * port_mmio,unsigned int port_irqs)1076 static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1077 					  void __iomem *port_mmio,
1078 					  unsigned int port_irqs)
1079 {
1080 	struct mv_host_priv *hpriv = ap->host->private_data;
1081 	int hardport = mv_hardport_from_port(ap->port_no);
1082 	void __iomem *hc_mmio = mv_hc_base_from_port(
1083 				mv_host_base(ap->host), ap->port_no);
1084 	u32 hc_irq_cause;
1085 
1086 	/* clear EDMA event indicators, if any */
1087 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1088 
1089 	/* clear pending irq events */
1090 	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1091 	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1092 
1093 	/* clear FIS IRQ Cause */
1094 	if (IS_GEN_IIE(hpriv))
1095 		writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1096 
1097 	mv_enable_port_irqs(ap, port_irqs);
1098 }
1099 
mv_set_irq_coalescing(struct ata_host * host,unsigned int count,unsigned int usecs)1100 static void mv_set_irq_coalescing(struct ata_host *host,
1101 				  unsigned int count, unsigned int usecs)
1102 {
1103 	struct mv_host_priv *hpriv = host->private_data;
1104 	void __iomem *mmio = hpriv->base, *hc_mmio;
1105 	u32 coal_enable = 0;
1106 	unsigned long flags;
1107 	unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1108 	const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1109 							ALL_PORTS_COAL_DONE;
1110 
1111 	/* Disable IRQ coalescing if either threshold is zero */
1112 	if (!usecs || !count) {
1113 		clks = count = 0;
1114 	} else {
1115 		/* Respect maximum limits of the hardware */
1116 		clks = usecs * COAL_CLOCKS_PER_USEC;
1117 		if (clks > MAX_COAL_TIME_THRESHOLD)
1118 			clks = MAX_COAL_TIME_THRESHOLD;
1119 		if (count > MAX_COAL_IO_COUNT)
1120 			count = MAX_COAL_IO_COUNT;
1121 	}
1122 
1123 	spin_lock_irqsave(&host->lock, flags);
1124 	mv_set_main_irq_mask(host, coal_disable, 0);
1125 
1126 	if (is_dual_hc && !IS_GEN_I(hpriv)) {
1127 		/*
1128 		 * GEN_II/GEN_IIE with dual host controllers:
1129 		 * one set of global thresholds for the entire chip.
1130 		 */
1131 		writel(clks,  mmio + IRQ_COAL_TIME_THRESHOLD);
1132 		writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1133 		/* clear leftover coal IRQ bit */
1134 		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1135 		if (count)
1136 			coal_enable = ALL_PORTS_COAL_DONE;
1137 		clks = count = 0; /* force clearing of regular regs below */
1138 	}
1139 
1140 	/*
1141 	 * All chips: independent thresholds for each HC on the chip.
1142 	 */
1143 	hc_mmio = mv_hc_base_from_port(mmio, 0);
1144 	writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1145 	writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1146 	writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1147 	if (count)
1148 		coal_enable |= PORTS_0_3_COAL_DONE;
1149 	if (is_dual_hc) {
1150 		hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1151 		writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1152 		writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1153 		writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1154 		if (count)
1155 			coal_enable |= PORTS_4_7_COAL_DONE;
1156 	}
1157 
1158 	mv_set_main_irq_mask(host, 0, coal_enable);
1159 	spin_unlock_irqrestore(&host->lock, flags);
1160 }
1161 
1162 /**
1163  *      mv_start_edma - Enable eDMA engine
1164  *      @base: port base address
1165  *      @pp: port private data
1166  *
1167  *      Verify the local cache of the eDMA state is accurate with a
1168  *      WARN_ON.
1169  *
1170  *      LOCKING:
1171  *      Inherited from caller.
1172  */
mv_start_edma(struct ata_port * ap,void __iomem * port_mmio,struct mv_port_priv * pp,u8 protocol)1173 static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1174 			 struct mv_port_priv *pp, u8 protocol)
1175 {
1176 	int want_ncq = (protocol == ATA_PROT_NCQ);
1177 
1178 	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1179 		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1180 		if (want_ncq != using_ncq)
1181 			mv_stop_edma(ap);
1182 	}
1183 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1184 		struct mv_host_priv *hpriv = ap->host->private_data;
1185 
1186 		mv_edma_cfg(ap, want_ncq, 1);
1187 
1188 		mv_set_edma_ptrs(port_mmio, hpriv, pp);
1189 		mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1190 
1191 		writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1192 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1193 	}
1194 }
1195 
mv_wait_for_edma_empty_idle(struct ata_port * ap)1196 static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1197 {
1198 	void __iomem *port_mmio = mv_ap_base(ap);
1199 	const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1200 	const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1201 	int i;
1202 
1203 	/*
1204 	 * Wait for the EDMA engine to finish transactions in progress.
1205 	 * No idea what a good "timeout" value might be, but measurements
1206 	 * indicate that it often requires hundreds of microseconds
1207 	 * with two drives in-use.  So we use the 15msec value above
1208 	 * as a rough guess at what even more drives might require.
1209 	 */
1210 	for (i = 0; i < timeout; ++i) {
1211 		u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1212 		if ((edma_stat & empty_idle) == empty_idle)
1213 			break;
1214 		udelay(per_loop);
1215 	}
1216 	/* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
1217 }
1218 
1219 /**
1220  *      mv_stop_edma_engine - Disable eDMA engine
1221  *      @port_mmio: io base address
1222  *
1223  *      LOCKING:
1224  *      Inherited from caller.
1225  */
mv_stop_edma_engine(void __iomem * port_mmio)1226 static int mv_stop_edma_engine(void __iomem *port_mmio)
1227 {
1228 	int i;
1229 
1230 	/* Disable eDMA.  The disable bit auto clears. */
1231 	writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1232 
1233 	/* Wait for the chip to confirm eDMA is off. */
1234 	for (i = 10000; i > 0; i--) {
1235 		u32 reg = readl(port_mmio + EDMA_CMD);
1236 		if (!(reg & EDMA_EN))
1237 			return 0;
1238 		udelay(10);
1239 	}
1240 	return -EIO;
1241 }
1242 
mv_stop_edma(struct ata_port * ap)1243 static int mv_stop_edma(struct ata_port *ap)
1244 {
1245 	void __iomem *port_mmio = mv_ap_base(ap);
1246 	struct mv_port_priv *pp = ap->private_data;
1247 	int err = 0;
1248 
1249 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1250 		return 0;
1251 	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1252 	mv_wait_for_edma_empty_idle(ap);
1253 	if (mv_stop_edma_engine(port_mmio)) {
1254 		ata_port_err(ap, "Unable to stop eDMA\n");
1255 		err = -EIO;
1256 	}
1257 	mv_edma_cfg(ap, 0, 0);
1258 	return err;
1259 }
1260 
1261 #ifdef ATA_DEBUG
mv_dump_mem(void __iomem * start,unsigned bytes)1262 static void mv_dump_mem(void __iomem *start, unsigned bytes)
1263 {
1264 	int b, w;
1265 	for (b = 0; b < bytes; ) {
1266 		DPRINTK("%p: ", start + b);
1267 		for (w = 0; b < bytes && w < 4; w++) {
1268 			printk("%08x ", readl(start + b));
1269 			b += sizeof(u32);
1270 		}
1271 		printk("\n");
1272 	}
1273 }
1274 #endif
1275 #if defined(ATA_DEBUG) || defined(CONFIG_PCI)
mv_dump_pci_cfg(struct pci_dev * pdev,unsigned bytes)1276 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1277 {
1278 #ifdef ATA_DEBUG
1279 	int b, w;
1280 	u32 dw;
1281 	for (b = 0; b < bytes; ) {
1282 		DPRINTK("%02x: ", b);
1283 		for (w = 0; b < bytes && w < 4; w++) {
1284 			(void) pci_read_config_dword(pdev, b, &dw);
1285 			printk("%08x ", dw);
1286 			b += sizeof(u32);
1287 		}
1288 		printk("\n");
1289 	}
1290 #endif
1291 }
1292 #endif
mv_dump_all_regs(void __iomem * mmio_base,int port,struct pci_dev * pdev)1293 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1294 			     struct pci_dev *pdev)
1295 {
1296 #ifdef ATA_DEBUG
1297 	void __iomem *hc_base = mv_hc_base(mmio_base,
1298 					   port >> MV_PORT_HC_SHIFT);
1299 	void __iomem *port_base;
1300 	int start_port, num_ports, p, start_hc, num_hcs, hc;
1301 
1302 	if (0 > port) {
1303 		start_hc = start_port = 0;
1304 		num_ports = 8;		/* shld be benign for 4 port devs */
1305 		num_hcs = 2;
1306 	} else {
1307 		start_hc = port >> MV_PORT_HC_SHIFT;
1308 		start_port = port;
1309 		num_ports = num_hcs = 1;
1310 	}
1311 	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1312 		num_ports > 1 ? num_ports - 1 : start_port);
1313 
1314 	if (NULL != pdev) {
1315 		DPRINTK("PCI config space regs:\n");
1316 		mv_dump_pci_cfg(pdev, 0x68);
1317 	}
1318 	DPRINTK("PCI regs:\n");
1319 	mv_dump_mem(mmio_base+0xc00, 0x3c);
1320 	mv_dump_mem(mmio_base+0xd00, 0x34);
1321 	mv_dump_mem(mmio_base+0xf00, 0x4);
1322 	mv_dump_mem(mmio_base+0x1d00, 0x6c);
1323 	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1324 		hc_base = mv_hc_base(mmio_base, hc);
1325 		DPRINTK("HC regs (HC %i):\n", hc);
1326 		mv_dump_mem(hc_base, 0x1c);
1327 	}
1328 	for (p = start_port; p < start_port + num_ports; p++) {
1329 		port_base = mv_port_base(mmio_base, p);
1330 		DPRINTK("EDMA regs (port %i):\n", p);
1331 		mv_dump_mem(port_base, 0x54);
1332 		DPRINTK("SATA regs (port %i):\n", p);
1333 		mv_dump_mem(port_base+0x300, 0x60);
1334 	}
1335 #endif
1336 }
1337 
mv_scr_offset(unsigned int sc_reg_in)1338 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1339 {
1340 	unsigned int ofs;
1341 
1342 	switch (sc_reg_in) {
1343 	case SCR_STATUS:
1344 	case SCR_CONTROL:
1345 	case SCR_ERROR:
1346 		ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1347 		break;
1348 	case SCR_ACTIVE:
1349 		ofs = SATA_ACTIVE;   /* active is not with the others */
1350 		break;
1351 	default:
1352 		ofs = 0xffffffffU;
1353 		break;
1354 	}
1355 	return ofs;
1356 }
1357 
mv_scr_read(struct ata_link * link,unsigned int sc_reg_in,u32 * val)1358 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1359 {
1360 	unsigned int ofs = mv_scr_offset(sc_reg_in);
1361 
1362 	if (ofs != 0xffffffffU) {
1363 		*val = readl(mv_ap_base(link->ap) + ofs);
1364 		return 0;
1365 	} else
1366 		return -EINVAL;
1367 }
1368 
mv_scr_write(struct ata_link * link,unsigned int sc_reg_in,u32 val)1369 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1370 {
1371 	unsigned int ofs = mv_scr_offset(sc_reg_in);
1372 
1373 	if (ofs != 0xffffffffU) {
1374 		void __iomem *addr = mv_ap_base(link->ap) + ofs;
1375 		struct mv_host_priv *hpriv = link->ap->host->private_data;
1376 		if (sc_reg_in == SCR_CONTROL) {
1377 			/*
1378 			 * Workaround for 88SX60x1 FEr SATA#26:
1379 			 *
1380 			 * COMRESETs have to take care not to accidentally
1381 			 * put the drive to sleep when writing SCR_CONTROL.
1382 			 * Setting bits 12..15 prevents this problem.
1383 			 *
1384 			 * So if we see an outbound COMMRESET, set those bits.
1385 			 * Ditto for the followup write that clears the reset.
1386 			 *
1387 			 * The proprietary driver does this for
1388 			 * all chip versions, and so do we.
1389 			 */
1390 			if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1391 				val |= 0xf000;
1392 
1393 			if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
1394 				void __iomem *lp_phy_addr =
1395 					mv_ap_base(link->ap) + LP_PHY_CTL;
1396 				/*
1397 				 * Set PHY speed according to SControl speed.
1398 				 */
1399 				u32 lp_phy_val =
1400 					LP_PHY_CTL_PIN_PU_PLL |
1401 					LP_PHY_CTL_PIN_PU_RX  |
1402 					LP_PHY_CTL_PIN_PU_TX;
1403 
1404 				if ((val & 0xf0) != 0x10)
1405 					lp_phy_val |=
1406 						LP_PHY_CTL_GEN_TX_3G |
1407 						LP_PHY_CTL_GEN_RX_3G;
1408 
1409 				writelfl(lp_phy_val, lp_phy_addr);
1410 			}
1411 		}
1412 		writelfl(val, addr);
1413 		return 0;
1414 	} else
1415 		return -EINVAL;
1416 }
1417 
mv6_dev_config(struct ata_device * adev)1418 static void mv6_dev_config(struct ata_device *adev)
1419 {
1420 	/*
1421 	 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1422 	 *
1423 	 * Gen-II does not support NCQ over a port multiplier
1424 	 *  (no FIS-based switching).
1425 	 */
1426 	if (adev->flags & ATA_DFLAG_NCQ) {
1427 		if (sata_pmp_attached(adev->link->ap)) {
1428 			adev->flags &= ~ATA_DFLAG_NCQ;
1429 			ata_dev_info(adev,
1430 				"NCQ disabled for command-based switching\n");
1431 		}
1432 	}
1433 }
1434 
mv_qc_defer(struct ata_queued_cmd * qc)1435 static int mv_qc_defer(struct ata_queued_cmd *qc)
1436 {
1437 	struct ata_link *link = qc->dev->link;
1438 	struct ata_port *ap = link->ap;
1439 	struct mv_port_priv *pp = ap->private_data;
1440 
1441 	/*
1442 	 * Don't allow new commands if we're in a delayed EH state
1443 	 * for NCQ and/or FIS-based switching.
1444 	 */
1445 	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1446 		return ATA_DEFER_PORT;
1447 
1448 	/* PIO commands need exclusive link: no other commands [DMA or PIO]
1449 	 * can run concurrently.
1450 	 * set excl_link when we want to send a PIO command in DMA mode
1451 	 * or a non-NCQ command in NCQ mode.
1452 	 * When we receive a command from that link, and there are no
1453 	 * outstanding commands, mark a flag to clear excl_link and let
1454 	 * the command go through.
1455 	 */
1456 	if (unlikely(ap->excl_link)) {
1457 		if (link == ap->excl_link) {
1458 			if (ap->nr_active_links)
1459 				return ATA_DEFER_PORT;
1460 			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1461 			return 0;
1462 		} else
1463 			return ATA_DEFER_PORT;
1464 	}
1465 
1466 	/*
1467 	 * If the port is completely idle, then allow the new qc.
1468 	 */
1469 	if (ap->nr_active_links == 0)
1470 		return 0;
1471 
1472 	/*
1473 	 * The port is operating in host queuing mode (EDMA) with NCQ
1474 	 * enabled, allow multiple NCQ commands.  EDMA also allows
1475 	 * queueing multiple DMA commands but libata core currently
1476 	 * doesn't allow it.
1477 	 */
1478 	if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1479 	    (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1480 		if (ata_is_ncq(qc->tf.protocol))
1481 			return 0;
1482 		else {
1483 			ap->excl_link = link;
1484 			return ATA_DEFER_PORT;
1485 		}
1486 	}
1487 
1488 	return ATA_DEFER_PORT;
1489 }
1490 
mv_config_fbs(struct ata_port * ap,int want_ncq,int want_fbs)1491 static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1492 {
1493 	struct mv_port_priv *pp = ap->private_data;
1494 	void __iomem *port_mmio;
1495 
1496 	u32 fiscfg,   *old_fiscfg   = &pp->cached.fiscfg;
1497 	u32 ltmode,   *old_ltmode   = &pp->cached.ltmode;
1498 	u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1499 
1500 	ltmode   = *old_ltmode & ~LTMODE_BIT8;
1501 	haltcond = *old_haltcond | EDMA_ERR_DEV;
1502 
1503 	if (want_fbs) {
1504 		fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1505 		ltmode = *old_ltmode | LTMODE_BIT8;
1506 		if (want_ncq)
1507 			haltcond &= ~EDMA_ERR_DEV;
1508 		else
1509 			fiscfg |=  FISCFG_WAIT_DEV_ERR;
1510 	} else {
1511 		fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1512 	}
1513 
1514 	port_mmio = mv_ap_base(ap);
1515 	mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1516 	mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1517 	mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1518 }
1519 
mv_60x1_errata_sata25(struct ata_port * ap,int want_ncq)1520 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1521 {
1522 	struct mv_host_priv *hpriv = ap->host->private_data;
1523 	u32 old, new;
1524 
1525 	/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1526 	old = readl(hpriv->base + GPIO_PORT_CTL);
1527 	if (want_ncq)
1528 		new = old | (1 << 22);
1529 	else
1530 		new = old & ~(1 << 22);
1531 	if (new != old)
1532 		writel(new, hpriv->base + GPIO_PORT_CTL);
1533 }
1534 
1535 /**
1536  *	mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1537  *	@ap: Port being initialized
1538  *
1539  *	There are two DMA modes on these chips:  basic DMA, and EDMA.
1540  *
1541  *	Bit-0 of the "EDMA RESERVED" register enables/disables use
1542  *	of basic DMA on the GEN_IIE versions of the chips.
1543  *
1544  *	This bit survives EDMA resets, and must be set for basic DMA
1545  *	to function, and should be cleared when EDMA is active.
1546  */
mv_bmdma_enable_iie(struct ata_port * ap,int enable_bmdma)1547 static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1548 {
1549 	struct mv_port_priv *pp = ap->private_data;
1550 	u32 new, *old = &pp->cached.unknown_rsvd;
1551 
1552 	if (enable_bmdma)
1553 		new = *old | 1;
1554 	else
1555 		new = *old & ~1;
1556 	mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1557 }
1558 
1559 /*
1560  * SOC chips have an issue whereby the HDD LEDs don't always blink
1561  * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1562  * of the SOC takes care of it, generating a steady blink rate when
1563  * any drive on the chip is active.
1564  *
1565  * Unfortunately, the blink mode is a global hardware setting for the SOC,
1566  * so we must use it whenever at least one port on the SOC has NCQ enabled.
1567  *
1568  * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1569  * LED operation works then, and provides better (more accurate) feedback.
1570  *
1571  * Note that this code assumes that an SOC never has more than one HC onboard.
1572  */
mv_soc_led_blink_enable(struct ata_port * ap)1573 static void mv_soc_led_blink_enable(struct ata_port *ap)
1574 {
1575 	struct ata_host *host = ap->host;
1576 	struct mv_host_priv *hpriv = host->private_data;
1577 	void __iomem *hc_mmio;
1578 	u32 led_ctrl;
1579 
1580 	if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1581 		return;
1582 	hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1583 	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1584 	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1585 	writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1586 }
1587 
mv_soc_led_blink_disable(struct ata_port * ap)1588 static void mv_soc_led_blink_disable(struct ata_port *ap)
1589 {
1590 	struct ata_host *host = ap->host;
1591 	struct mv_host_priv *hpriv = host->private_data;
1592 	void __iomem *hc_mmio;
1593 	u32 led_ctrl;
1594 	unsigned int port;
1595 
1596 	if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1597 		return;
1598 
1599 	/* disable led-blink only if no ports are using NCQ */
1600 	for (port = 0; port < hpriv->n_ports; port++) {
1601 		struct ata_port *this_ap = host->ports[port];
1602 		struct mv_port_priv *pp = this_ap->private_data;
1603 
1604 		if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1605 			return;
1606 	}
1607 
1608 	hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1609 	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1610 	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1611 	writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1612 }
1613 
mv_edma_cfg(struct ata_port * ap,int want_ncq,int want_edma)1614 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1615 {
1616 	u32 cfg;
1617 	struct mv_port_priv *pp    = ap->private_data;
1618 	struct mv_host_priv *hpriv = ap->host->private_data;
1619 	void __iomem *port_mmio    = mv_ap_base(ap);
1620 
1621 	/* set up non-NCQ EDMA configuration */
1622 	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
1623 	pp->pp_flags &=
1624 	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1625 
1626 	if (IS_GEN_I(hpriv))
1627 		cfg |= (1 << 8);	/* enab config burst size mask */
1628 
1629 	else if (IS_GEN_II(hpriv)) {
1630 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1631 		mv_60x1_errata_sata25(ap, want_ncq);
1632 
1633 	} else if (IS_GEN_IIE(hpriv)) {
1634 		int want_fbs = sata_pmp_attached(ap);
1635 		/*
1636 		 * Possible future enhancement:
1637 		 *
1638 		 * The chip can use FBS with non-NCQ, if we allow it,
1639 		 * But first we need to have the error handling in place
1640 		 * for this mode (datasheet section 7.3.15.4.2.3).
1641 		 * So disallow non-NCQ FBS for now.
1642 		 */
1643 		want_fbs &= want_ncq;
1644 
1645 		mv_config_fbs(ap, want_ncq, want_fbs);
1646 
1647 		if (want_fbs) {
1648 			pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1649 			cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1650 		}
1651 
1652 		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1653 		if (want_edma) {
1654 			cfg |= (1 << 22); /* enab 4-entry host queue cache */
1655 			if (!IS_SOC(hpriv))
1656 				cfg |= (1 << 18); /* enab early completion */
1657 		}
1658 		if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1659 			cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1660 		mv_bmdma_enable_iie(ap, !want_edma);
1661 
1662 		if (IS_SOC(hpriv)) {
1663 			if (want_ncq)
1664 				mv_soc_led_blink_enable(ap);
1665 			else
1666 				mv_soc_led_blink_disable(ap);
1667 		}
1668 	}
1669 
1670 	if (want_ncq) {
1671 		cfg |= EDMA_CFG_NCQ;
1672 		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
1673 	}
1674 
1675 	writelfl(cfg, port_mmio + EDMA_CFG);
1676 }
1677 
mv_port_free_dma_mem(struct ata_port * ap)1678 static void mv_port_free_dma_mem(struct ata_port *ap)
1679 {
1680 	struct mv_host_priv *hpriv = ap->host->private_data;
1681 	struct mv_port_priv *pp = ap->private_data;
1682 	int tag;
1683 
1684 	if (pp->crqb) {
1685 		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1686 		pp->crqb = NULL;
1687 	}
1688 	if (pp->crpb) {
1689 		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1690 		pp->crpb = NULL;
1691 	}
1692 	/*
1693 	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1694 	 * For later hardware, we have one unique sg_tbl per NCQ tag.
1695 	 */
1696 	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1697 		if (pp->sg_tbl[tag]) {
1698 			if (tag == 0 || !IS_GEN_I(hpriv))
1699 				dma_pool_free(hpriv->sg_tbl_pool,
1700 					      pp->sg_tbl[tag],
1701 					      pp->sg_tbl_dma[tag]);
1702 			pp->sg_tbl[tag] = NULL;
1703 		}
1704 	}
1705 }
1706 
1707 /**
1708  *      mv_port_start - Port specific init/start routine.
1709  *      @ap: ATA channel to manipulate
1710  *
1711  *      Allocate and point to DMA memory, init port private memory,
1712  *      zero indices.
1713  *
1714  *      LOCKING:
1715  *      Inherited from caller.
1716  */
mv_port_start(struct ata_port * ap)1717 static int mv_port_start(struct ata_port *ap)
1718 {
1719 	struct device *dev = ap->host->dev;
1720 	struct mv_host_priv *hpriv = ap->host->private_data;
1721 	struct mv_port_priv *pp;
1722 	unsigned long flags;
1723 	int tag;
1724 
1725 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1726 	if (!pp)
1727 		return -ENOMEM;
1728 	ap->private_data = pp;
1729 
1730 	pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1731 	if (!pp->crqb)
1732 		return -ENOMEM;
1733 	memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1734 
1735 	pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1736 	if (!pp->crpb)
1737 		goto out_port_free_dma_mem;
1738 	memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1739 
1740 	/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1741 	if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1742 		ap->flags |= ATA_FLAG_AN;
1743 	/*
1744 	 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1745 	 * For later hardware, we need one unique sg_tbl per NCQ tag.
1746 	 */
1747 	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1748 		if (tag == 0 || !IS_GEN_I(hpriv)) {
1749 			pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1750 					      GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1751 			if (!pp->sg_tbl[tag])
1752 				goto out_port_free_dma_mem;
1753 		} else {
1754 			pp->sg_tbl[tag]     = pp->sg_tbl[0];
1755 			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1756 		}
1757 	}
1758 
1759 	spin_lock_irqsave(ap->lock, flags);
1760 	mv_save_cached_regs(ap);
1761 	mv_edma_cfg(ap, 0, 0);
1762 	spin_unlock_irqrestore(ap->lock, flags);
1763 
1764 	return 0;
1765 
1766 out_port_free_dma_mem:
1767 	mv_port_free_dma_mem(ap);
1768 	return -ENOMEM;
1769 }
1770 
1771 /**
1772  *      mv_port_stop - Port specific cleanup/stop routine.
1773  *      @ap: ATA channel to manipulate
1774  *
1775  *      Stop DMA, cleanup port memory.
1776  *
1777  *      LOCKING:
1778  *      This routine uses the host lock to protect the DMA stop.
1779  */
mv_port_stop(struct ata_port * ap)1780 static void mv_port_stop(struct ata_port *ap)
1781 {
1782 	unsigned long flags;
1783 
1784 	spin_lock_irqsave(ap->lock, flags);
1785 	mv_stop_edma(ap);
1786 	mv_enable_port_irqs(ap, 0);
1787 	spin_unlock_irqrestore(ap->lock, flags);
1788 	mv_port_free_dma_mem(ap);
1789 }
1790 
1791 /**
1792  *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1793  *      @qc: queued command whose SG list to source from
1794  *
1795  *      Populate the SG list and mark the last entry.
1796  *
1797  *      LOCKING:
1798  *      Inherited from caller.
1799  */
mv_fill_sg(struct ata_queued_cmd * qc)1800 static void mv_fill_sg(struct ata_queued_cmd *qc)
1801 {
1802 	struct mv_port_priv *pp = qc->ap->private_data;
1803 	struct scatterlist *sg;
1804 	struct mv_sg *mv_sg, *last_sg = NULL;
1805 	unsigned int si;
1806 
1807 	mv_sg = pp->sg_tbl[qc->tag];
1808 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1809 		dma_addr_t addr = sg_dma_address(sg);
1810 		u32 sg_len = sg_dma_len(sg);
1811 
1812 		while (sg_len) {
1813 			u32 offset = addr & 0xffff;
1814 			u32 len = sg_len;
1815 
1816 			if (offset + len > 0x10000)
1817 				len = 0x10000 - offset;
1818 
1819 			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1820 			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1821 			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1822 			mv_sg->reserved = 0;
1823 
1824 			sg_len -= len;
1825 			addr += len;
1826 
1827 			last_sg = mv_sg;
1828 			mv_sg++;
1829 		}
1830 	}
1831 
1832 	if (likely(last_sg))
1833 		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1834 	mb(); /* ensure data structure is visible to the chipset */
1835 }
1836 
mv_crqb_pack_cmd(__le16 * cmdw,u8 data,u8 addr,unsigned last)1837 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1838 {
1839 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1840 		(last ? CRQB_CMD_LAST : 0);
1841 	*cmdw = cpu_to_le16(tmp);
1842 }
1843 
1844 /**
1845  *	mv_sff_irq_clear - Clear hardware interrupt after DMA.
1846  *	@ap: Port associated with this ATA transaction.
1847  *
1848  *	We need this only for ATAPI bmdma transactions,
1849  *	as otherwise we experience spurious interrupts
1850  *	after libata-sff handles the bmdma interrupts.
1851  */
mv_sff_irq_clear(struct ata_port * ap)1852 static void mv_sff_irq_clear(struct ata_port *ap)
1853 {
1854 	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1855 }
1856 
1857 /**
1858  *	mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1859  *	@qc: queued command to check for chipset/DMA compatibility.
1860  *
1861  *	The bmdma engines cannot handle speculative data sizes
1862  *	(bytecount under/over flow).  So only allow DMA for
1863  *	data transfer commands with known data sizes.
1864  *
1865  *	LOCKING:
1866  *	Inherited from caller.
1867  */
mv_check_atapi_dma(struct ata_queued_cmd * qc)1868 static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1869 {
1870 	struct scsi_cmnd *scmd = qc->scsicmd;
1871 
1872 	if (scmd) {
1873 		switch (scmd->cmnd[0]) {
1874 		case READ_6:
1875 		case READ_10:
1876 		case READ_12:
1877 		case WRITE_6:
1878 		case WRITE_10:
1879 		case WRITE_12:
1880 		case GPCMD_READ_CD:
1881 		case GPCMD_SEND_DVD_STRUCTURE:
1882 		case GPCMD_SEND_CUE_SHEET:
1883 			return 0; /* DMA is safe */
1884 		}
1885 	}
1886 	return -EOPNOTSUPP; /* use PIO instead */
1887 }
1888 
1889 /**
1890  *	mv_bmdma_setup - Set up BMDMA transaction
1891  *	@qc: queued command to prepare DMA for.
1892  *
1893  *	LOCKING:
1894  *	Inherited from caller.
1895  */
mv_bmdma_setup(struct ata_queued_cmd * qc)1896 static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1897 {
1898 	struct ata_port *ap = qc->ap;
1899 	void __iomem *port_mmio = mv_ap_base(ap);
1900 	struct mv_port_priv *pp = ap->private_data;
1901 
1902 	mv_fill_sg(qc);
1903 
1904 	/* clear all DMA cmd bits */
1905 	writel(0, port_mmio + BMDMA_CMD);
1906 
1907 	/* load PRD table addr. */
1908 	writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1909 		port_mmio + BMDMA_PRD_HIGH);
1910 	writelfl(pp->sg_tbl_dma[qc->tag],
1911 		port_mmio + BMDMA_PRD_LOW);
1912 
1913 	/* issue r/w command */
1914 	ap->ops->sff_exec_command(ap, &qc->tf);
1915 }
1916 
1917 /**
1918  *	mv_bmdma_start - Start a BMDMA transaction
1919  *	@qc: queued command to start DMA on.
1920  *
1921  *	LOCKING:
1922  *	Inherited from caller.
1923  */
mv_bmdma_start(struct ata_queued_cmd * qc)1924 static void mv_bmdma_start(struct ata_queued_cmd *qc)
1925 {
1926 	struct ata_port *ap = qc->ap;
1927 	void __iomem *port_mmio = mv_ap_base(ap);
1928 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1929 	u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1930 
1931 	/* start host DMA transaction */
1932 	writelfl(cmd, port_mmio + BMDMA_CMD);
1933 }
1934 
1935 /**
1936  *	mv_bmdma_stop - Stop BMDMA transfer
1937  *	@qc: queued command to stop DMA on.
1938  *
1939  *	Clears the ATA_DMA_START flag in the bmdma control register
1940  *
1941  *	LOCKING:
1942  *	Inherited from caller.
1943  */
mv_bmdma_stop_ap(struct ata_port * ap)1944 static void mv_bmdma_stop_ap(struct ata_port *ap)
1945 {
1946 	void __iomem *port_mmio = mv_ap_base(ap);
1947 	u32 cmd;
1948 
1949 	/* clear start/stop bit */
1950 	cmd = readl(port_mmio + BMDMA_CMD);
1951 	if (cmd & ATA_DMA_START) {
1952 		cmd &= ~ATA_DMA_START;
1953 		writelfl(cmd, port_mmio + BMDMA_CMD);
1954 
1955 		/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1956 		ata_sff_dma_pause(ap);
1957 	}
1958 }
1959 
mv_bmdma_stop(struct ata_queued_cmd * qc)1960 static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1961 {
1962 	mv_bmdma_stop_ap(qc->ap);
1963 }
1964 
1965 /**
1966  *	mv_bmdma_status - Read BMDMA status
1967  *	@ap: port for which to retrieve DMA status.
1968  *
1969  *	Read and return equivalent of the sff BMDMA status register.
1970  *
1971  *	LOCKING:
1972  *	Inherited from caller.
1973  */
mv_bmdma_status(struct ata_port * ap)1974 static u8 mv_bmdma_status(struct ata_port *ap)
1975 {
1976 	void __iomem *port_mmio = mv_ap_base(ap);
1977 	u32 reg, status;
1978 
1979 	/*
1980 	 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1981 	 * and the ATA_DMA_INTR bit doesn't exist.
1982 	 */
1983 	reg = readl(port_mmio + BMDMA_STATUS);
1984 	if (reg & ATA_DMA_ACTIVE)
1985 		status = ATA_DMA_ACTIVE;
1986 	else if (reg & ATA_DMA_ERR)
1987 		status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1988 	else {
1989 		/*
1990 		 * Just because DMA_ACTIVE is 0 (DMA completed),
1991 		 * this does _not_ mean the device is "done".
1992 		 * So we should not yet be signalling ATA_DMA_INTR
1993 		 * in some cases.  Eg. DSM/TRIM, and perhaps others.
1994 		 */
1995 		mv_bmdma_stop_ap(ap);
1996 		if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1997 			status = 0;
1998 		else
1999 			status = ATA_DMA_INTR;
2000 	}
2001 	return status;
2002 }
2003 
mv_rw_multi_errata_sata24(struct ata_queued_cmd * qc)2004 static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
2005 {
2006 	struct ata_taskfile *tf = &qc->tf;
2007 	/*
2008 	 * Workaround for 88SX60x1 FEr SATA#24.
2009 	 *
2010 	 * Chip may corrupt WRITEs if multi_count >= 4kB.
2011 	 * Note that READs are unaffected.
2012 	 *
2013 	 * It's not clear if this errata really means "4K bytes",
2014 	 * or if it always happens for multi_count > 7
2015 	 * regardless of device sector_size.
2016 	 *
2017 	 * So, for safety, any write with multi_count > 7
2018 	 * gets converted here into a regular PIO write instead:
2019 	 */
2020 	if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
2021 		if (qc->dev->multi_count > 7) {
2022 			switch (tf->command) {
2023 			case ATA_CMD_WRITE_MULTI:
2024 				tf->command = ATA_CMD_PIO_WRITE;
2025 				break;
2026 			case ATA_CMD_WRITE_MULTI_FUA_EXT:
2027 				tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
2028 				/* fall through */
2029 			case ATA_CMD_WRITE_MULTI_EXT:
2030 				tf->command = ATA_CMD_PIO_WRITE_EXT;
2031 				break;
2032 			}
2033 		}
2034 	}
2035 }
2036 
2037 /**
2038  *      mv_qc_prep - Host specific command preparation.
2039  *      @qc: queued command to prepare
2040  *
2041  *      This routine simply redirects to the general purpose routine
2042  *      if command is not DMA.  Else, it handles prep of the CRQB
2043  *      (command request block), does some sanity checking, and calls
2044  *      the SG load routine.
2045  *
2046  *      LOCKING:
2047  *      Inherited from caller.
2048  */
mv_qc_prep(struct ata_queued_cmd * qc)2049 static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
2050 {
2051 	struct ata_port *ap = qc->ap;
2052 	struct mv_port_priv *pp = ap->private_data;
2053 	__le16 *cw;
2054 	struct ata_taskfile *tf = &qc->tf;
2055 	u16 flags = 0;
2056 	unsigned in_index;
2057 
2058 	switch (tf->protocol) {
2059 	case ATA_PROT_DMA:
2060 		if (tf->command == ATA_CMD_DSM)
2061 			return AC_ERR_OK;
2062 		/* fall-thru */
2063 	case ATA_PROT_NCQ:
2064 		break;	/* continue below */
2065 	case ATA_PROT_PIO:
2066 		mv_rw_multi_errata_sata24(qc);
2067 		return AC_ERR_OK;
2068 	default:
2069 		return AC_ERR_OK;
2070 	}
2071 
2072 	/* Fill in command request block
2073 	 */
2074 	if (!(tf->flags & ATA_TFLAG_WRITE))
2075 		flags |= CRQB_FLAG_READ;
2076 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2077 	flags |= qc->tag << CRQB_TAG_SHIFT;
2078 	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2079 
2080 	/* get current queue index from software */
2081 	in_index = pp->req_idx;
2082 
2083 	pp->crqb[in_index].sg_addr =
2084 		cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2085 	pp->crqb[in_index].sg_addr_hi =
2086 		cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2087 	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2088 
2089 	cw = &pp->crqb[in_index].ata_cmd[0];
2090 
2091 	/* Sadly, the CRQB cannot accommodate all registers--there are
2092 	 * only 11 bytes...so we must pick and choose required
2093 	 * registers based on the command.  So, we drop feature and
2094 	 * hob_feature for [RW] DMA commands, but they are needed for
2095 	 * NCQ.  NCQ will drop hob_nsect, which is not needed there
2096 	 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
2097 	 */
2098 	switch (tf->command) {
2099 	case ATA_CMD_READ:
2100 	case ATA_CMD_READ_EXT:
2101 	case ATA_CMD_WRITE:
2102 	case ATA_CMD_WRITE_EXT:
2103 	case ATA_CMD_WRITE_FUA_EXT:
2104 		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2105 		break;
2106 	case ATA_CMD_FPDMA_READ:
2107 	case ATA_CMD_FPDMA_WRITE:
2108 		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2109 		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2110 		break;
2111 	default:
2112 		/* The only other commands EDMA supports in non-queued and
2113 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2114 		 * of which are defined/used by Linux.  If we get here, this
2115 		 * driver needs work.
2116 		 */
2117 		ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
2118 				tf->command);
2119 		return AC_ERR_INVALID;
2120 	}
2121 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2122 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2123 	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2124 	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2125 	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2126 	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2127 	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2128 	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2129 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
2130 
2131 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2132 		return AC_ERR_OK;
2133 	mv_fill_sg(qc);
2134 
2135 	return AC_ERR_OK;
2136 }
2137 
2138 /**
2139  *      mv_qc_prep_iie - Host specific command preparation.
2140  *      @qc: queued command to prepare
2141  *
2142  *      This routine simply redirects to the general purpose routine
2143  *      if command is not DMA.  Else, it handles prep of the CRQB
2144  *      (command request block), does some sanity checking, and calls
2145  *      the SG load routine.
2146  *
2147  *      LOCKING:
2148  *      Inherited from caller.
2149  */
mv_qc_prep_iie(struct ata_queued_cmd * qc)2150 static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
2151 {
2152 	struct ata_port *ap = qc->ap;
2153 	struct mv_port_priv *pp = ap->private_data;
2154 	struct mv_crqb_iie *crqb;
2155 	struct ata_taskfile *tf = &qc->tf;
2156 	unsigned in_index;
2157 	u32 flags = 0;
2158 
2159 	if ((tf->protocol != ATA_PROT_DMA) &&
2160 	    (tf->protocol != ATA_PROT_NCQ))
2161 		return AC_ERR_OK;
2162 	if (tf->command == ATA_CMD_DSM)
2163 		return AC_ERR_OK;  /* use bmdma for this */
2164 
2165 	/* Fill in Gen IIE command request block */
2166 	if (!(tf->flags & ATA_TFLAG_WRITE))
2167 		flags |= CRQB_FLAG_READ;
2168 
2169 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2170 	flags |= qc->tag << CRQB_TAG_SHIFT;
2171 	flags |= qc->tag << CRQB_HOSTQ_SHIFT;
2172 	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2173 
2174 	/* get current queue index from software */
2175 	in_index = pp->req_idx;
2176 
2177 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2178 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2179 	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2180 	crqb->flags = cpu_to_le32(flags);
2181 
2182 	crqb->ata_cmd[0] = cpu_to_le32(
2183 			(tf->command << 16) |
2184 			(tf->feature << 24)
2185 		);
2186 	crqb->ata_cmd[1] = cpu_to_le32(
2187 			(tf->lbal << 0) |
2188 			(tf->lbam << 8) |
2189 			(tf->lbah << 16) |
2190 			(tf->device << 24)
2191 		);
2192 	crqb->ata_cmd[2] = cpu_to_le32(
2193 			(tf->hob_lbal << 0) |
2194 			(tf->hob_lbam << 8) |
2195 			(tf->hob_lbah << 16) |
2196 			(tf->hob_feature << 24)
2197 		);
2198 	crqb->ata_cmd[3] = cpu_to_le32(
2199 			(tf->nsect << 0) |
2200 			(tf->hob_nsect << 8)
2201 		);
2202 
2203 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2204 		return AC_ERR_OK;
2205 	mv_fill_sg(qc);
2206 
2207 	return AC_ERR_OK;
2208 }
2209 
2210 /**
2211  *	mv_sff_check_status - fetch device status, if valid
2212  *	@ap: ATA port to fetch status from
2213  *
2214  *	When using command issue via mv_qc_issue_fis(),
2215  *	the initial ATA_BUSY state does not show up in the
2216  *	ATA status (shadow) register.  This can confuse libata!
2217  *
2218  *	So we have a hook here to fake ATA_BUSY for that situation,
2219  *	until the first time a BUSY, DRQ, or ERR bit is seen.
2220  *
2221  *	The rest of the time, it simply returns the ATA status register.
2222  */
mv_sff_check_status(struct ata_port * ap)2223 static u8 mv_sff_check_status(struct ata_port *ap)
2224 {
2225 	u8 stat = ioread8(ap->ioaddr.status_addr);
2226 	struct mv_port_priv *pp = ap->private_data;
2227 
2228 	if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2229 		if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2230 			pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2231 		else
2232 			stat = ATA_BUSY;
2233 	}
2234 	return stat;
2235 }
2236 
2237 /**
2238  *	mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2239  *	@fis: fis to be sent
2240  *	@nwords: number of 32-bit words in the fis
2241  */
mv_send_fis(struct ata_port * ap,u32 * fis,int nwords)2242 static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2243 {
2244 	void __iomem *port_mmio = mv_ap_base(ap);
2245 	u32 ifctl, old_ifctl, ifstat;
2246 	int i, timeout = 200, final_word = nwords - 1;
2247 
2248 	/* Initiate FIS transmission mode */
2249 	old_ifctl = readl(port_mmio + SATA_IFCTL);
2250 	ifctl = 0x100 | (old_ifctl & 0xf);
2251 	writelfl(ifctl, port_mmio + SATA_IFCTL);
2252 
2253 	/* Send all words of the FIS except for the final word */
2254 	for (i = 0; i < final_word; ++i)
2255 		writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2256 
2257 	/* Flag end-of-transmission, and then send the final word */
2258 	writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2259 	writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2260 
2261 	/*
2262 	 * Wait for FIS transmission to complete.
2263 	 * This typically takes just a single iteration.
2264 	 */
2265 	do {
2266 		ifstat = readl(port_mmio + SATA_IFSTAT);
2267 	} while (!(ifstat & 0x1000) && --timeout);
2268 
2269 	/* Restore original port configuration */
2270 	writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2271 
2272 	/* See if it worked */
2273 	if ((ifstat & 0x3000) != 0x1000) {
2274 		ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2275 			      __func__, ifstat);
2276 		return AC_ERR_OTHER;
2277 	}
2278 	return 0;
2279 }
2280 
2281 /**
2282  *	mv_qc_issue_fis - Issue a command directly as a FIS
2283  *	@qc: queued command to start
2284  *
2285  *	Note that the ATA shadow registers are not updated
2286  *	after command issue, so the device will appear "READY"
2287  *	if polled, even while it is BUSY processing the command.
2288  *
2289  *	So we use a status hook to fake ATA_BUSY until the drive changes state.
2290  *
2291  *	Note: we don't get updated shadow regs on *completion*
2292  *	of non-data commands. So avoid sending them via this function,
2293  *	as they will appear to have completed immediately.
2294  *
2295  *	GEN_IIE has special registers that we could get the result tf from,
2296  *	but earlier chipsets do not.  For now, we ignore those registers.
2297  */
mv_qc_issue_fis(struct ata_queued_cmd * qc)2298 static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2299 {
2300 	struct ata_port *ap = qc->ap;
2301 	struct mv_port_priv *pp = ap->private_data;
2302 	struct ata_link *link = qc->dev->link;
2303 	u32 fis[5];
2304 	int err = 0;
2305 
2306 	ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2307 	err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2308 	if (err)
2309 		return err;
2310 
2311 	switch (qc->tf.protocol) {
2312 	case ATAPI_PROT_PIO:
2313 		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2314 		/* fall through */
2315 	case ATAPI_PROT_NODATA:
2316 		ap->hsm_task_state = HSM_ST_FIRST;
2317 		break;
2318 	case ATA_PROT_PIO:
2319 		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2320 		if (qc->tf.flags & ATA_TFLAG_WRITE)
2321 			ap->hsm_task_state = HSM_ST_FIRST;
2322 		else
2323 			ap->hsm_task_state = HSM_ST;
2324 		break;
2325 	default:
2326 		ap->hsm_task_state = HSM_ST_LAST;
2327 		break;
2328 	}
2329 
2330 	if (qc->tf.flags & ATA_TFLAG_POLLING)
2331 		ata_sff_queue_pio_task(link, 0);
2332 	return 0;
2333 }
2334 
2335 /**
2336  *      mv_qc_issue - Initiate a command to the host
2337  *      @qc: queued command to start
2338  *
2339  *      This routine simply redirects to the general purpose routine
2340  *      if command is not DMA.  Else, it sanity checks our local
2341  *      caches of the request producer/consumer indices then enables
2342  *      DMA and bumps the request producer index.
2343  *
2344  *      LOCKING:
2345  *      Inherited from caller.
2346  */
mv_qc_issue(struct ata_queued_cmd * qc)2347 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2348 {
2349 	static int limit_warnings = 10;
2350 	struct ata_port *ap = qc->ap;
2351 	void __iomem *port_mmio = mv_ap_base(ap);
2352 	struct mv_port_priv *pp = ap->private_data;
2353 	u32 in_index;
2354 	unsigned int port_irqs;
2355 
2356 	pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2357 
2358 	switch (qc->tf.protocol) {
2359 	case ATA_PROT_DMA:
2360 		if (qc->tf.command == ATA_CMD_DSM) {
2361 			if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
2362 				return AC_ERR_OTHER;
2363 			break;  /* use bmdma for this */
2364 		}
2365 		/* fall thru */
2366 	case ATA_PROT_NCQ:
2367 		mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2368 		pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2369 		in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2370 
2371 		/* Write the request in pointer to kick the EDMA to life */
2372 		writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2373 					port_mmio + EDMA_REQ_Q_IN_PTR);
2374 		return 0;
2375 
2376 	case ATA_PROT_PIO:
2377 		/*
2378 		 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2379 		 *
2380 		 * Someday, we might implement special polling workarounds
2381 		 * for these, but it all seems rather unnecessary since we
2382 		 * normally use only DMA for commands which transfer more
2383 		 * than a single block of data.
2384 		 *
2385 		 * Much of the time, this could just work regardless.
2386 		 * So for now, just log the incident, and allow the attempt.
2387 		 */
2388 		if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2389 			--limit_warnings;
2390 			ata_link_warn(qc->dev->link, DRV_NAME
2391 				      ": attempting PIO w/multiple DRQ: "
2392 				      "this may fail due to h/w errata\n");
2393 		}
2394 		/* drop through */
2395 	case ATA_PROT_NODATA:
2396 	case ATAPI_PROT_PIO:
2397 	case ATAPI_PROT_NODATA:
2398 		if (ap->flags & ATA_FLAG_PIO_POLLING)
2399 			qc->tf.flags |= ATA_TFLAG_POLLING;
2400 		break;
2401 	}
2402 
2403 	if (qc->tf.flags & ATA_TFLAG_POLLING)
2404 		port_irqs = ERR_IRQ;	/* mask device interrupt when polling */
2405 	else
2406 		port_irqs = ERR_IRQ | DONE_IRQ;	/* unmask all interrupts */
2407 
2408 	/*
2409 	 * We're about to send a non-EDMA capable command to the
2410 	 * port.  Turn off EDMA so there won't be problems accessing
2411 	 * shadow block, etc registers.
2412 	 */
2413 	mv_stop_edma(ap);
2414 	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2415 	mv_pmp_select(ap, qc->dev->link->pmp);
2416 
2417 	if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2418 		struct mv_host_priv *hpriv = ap->host->private_data;
2419 		/*
2420 		 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2421 		 *
2422 		 * After any NCQ error, the READ_LOG_EXT command
2423 		 * from libata-eh *must* use mv_qc_issue_fis().
2424 		 * Otherwise it might fail, due to chip errata.
2425 		 *
2426 		 * Rather than special-case it, we'll just *always*
2427 		 * use this method here for READ_LOG_EXT, making for
2428 		 * easier testing.
2429 		 */
2430 		if (IS_GEN_II(hpriv))
2431 			return mv_qc_issue_fis(qc);
2432 	}
2433 	return ata_bmdma_qc_issue(qc);
2434 }
2435 
mv_get_active_qc(struct ata_port * ap)2436 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2437 {
2438 	struct mv_port_priv *pp = ap->private_data;
2439 	struct ata_queued_cmd *qc;
2440 
2441 	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2442 		return NULL;
2443 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
2444 	if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2445 		return qc;
2446 	return NULL;
2447 }
2448 
mv_pmp_error_handler(struct ata_port * ap)2449 static void mv_pmp_error_handler(struct ata_port *ap)
2450 {
2451 	unsigned int pmp, pmp_map;
2452 	struct mv_port_priv *pp = ap->private_data;
2453 
2454 	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2455 		/*
2456 		 * Perform NCQ error analysis on failed PMPs
2457 		 * before we freeze the port entirely.
2458 		 *
2459 		 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2460 		 */
2461 		pmp_map = pp->delayed_eh_pmp_map;
2462 		pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2463 		for (pmp = 0; pmp_map != 0; pmp++) {
2464 			unsigned int this_pmp = (1 << pmp);
2465 			if (pmp_map & this_pmp) {
2466 				struct ata_link *link = &ap->pmp_link[pmp];
2467 				pmp_map &= ~this_pmp;
2468 				ata_eh_analyze_ncq_error(link);
2469 			}
2470 		}
2471 		ata_port_freeze(ap);
2472 	}
2473 	sata_pmp_error_handler(ap);
2474 }
2475 
mv_get_err_pmp_map(struct ata_port * ap)2476 static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2477 {
2478 	void __iomem *port_mmio = mv_ap_base(ap);
2479 
2480 	return readl(port_mmio + SATA_TESTCTL) >> 16;
2481 }
2482 
mv_pmp_eh_prep(struct ata_port * ap,unsigned int pmp_map)2483 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2484 {
2485 	struct ata_eh_info *ehi;
2486 	unsigned int pmp;
2487 
2488 	/*
2489 	 * Initialize EH info for PMPs which saw device errors
2490 	 */
2491 	ehi = &ap->link.eh_info;
2492 	for (pmp = 0; pmp_map != 0; pmp++) {
2493 		unsigned int this_pmp = (1 << pmp);
2494 		if (pmp_map & this_pmp) {
2495 			struct ata_link *link = &ap->pmp_link[pmp];
2496 
2497 			pmp_map &= ~this_pmp;
2498 			ehi = &link->eh_info;
2499 			ata_ehi_clear_desc(ehi);
2500 			ata_ehi_push_desc(ehi, "dev err");
2501 			ehi->err_mask |= AC_ERR_DEV;
2502 			ehi->action |= ATA_EH_RESET;
2503 			ata_link_abort(link);
2504 		}
2505 	}
2506 }
2507 
mv_req_q_empty(struct ata_port * ap)2508 static int mv_req_q_empty(struct ata_port *ap)
2509 {
2510 	void __iomem *port_mmio = mv_ap_base(ap);
2511 	u32 in_ptr, out_ptr;
2512 
2513 	in_ptr  = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2514 			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2515 	out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2516 			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2517 	return (in_ptr == out_ptr);	/* 1 == queue_is_empty */
2518 }
2519 
mv_handle_fbs_ncq_dev_err(struct ata_port * ap)2520 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2521 {
2522 	struct mv_port_priv *pp = ap->private_data;
2523 	int failed_links;
2524 	unsigned int old_map, new_map;
2525 
2526 	/*
2527 	 * Device error during FBS+NCQ operation:
2528 	 *
2529 	 * Set a port flag to prevent further I/O being enqueued.
2530 	 * Leave the EDMA running to drain outstanding commands from this port.
2531 	 * Perform the post-mortem/EH only when all responses are complete.
2532 	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2533 	 */
2534 	if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2535 		pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2536 		pp->delayed_eh_pmp_map = 0;
2537 	}
2538 	old_map = pp->delayed_eh_pmp_map;
2539 	new_map = old_map | mv_get_err_pmp_map(ap);
2540 
2541 	if (old_map != new_map) {
2542 		pp->delayed_eh_pmp_map = new_map;
2543 		mv_pmp_eh_prep(ap, new_map & ~old_map);
2544 	}
2545 	failed_links = hweight16(new_map);
2546 
2547 	ata_port_info(ap,
2548 		      "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n",
2549 		      __func__, pp->delayed_eh_pmp_map,
2550 		      ap->qc_active, failed_links,
2551 		      ap->nr_active_links);
2552 
2553 	if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2554 		mv_process_crpb_entries(ap, pp);
2555 		mv_stop_edma(ap);
2556 		mv_eh_freeze(ap);
2557 		ata_port_info(ap, "%s: done\n", __func__);
2558 		return 1;	/* handled */
2559 	}
2560 	ata_port_info(ap, "%s: waiting\n", __func__);
2561 	return 1;	/* handled */
2562 }
2563 
mv_handle_fbs_non_ncq_dev_err(struct ata_port * ap)2564 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2565 {
2566 	/*
2567 	 * Possible future enhancement:
2568 	 *
2569 	 * FBS+non-NCQ operation is not yet implemented.
2570 	 * See related notes in mv_edma_cfg().
2571 	 *
2572 	 * Device error during FBS+non-NCQ operation:
2573 	 *
2574 	 * We need to snapshot the shadow registers for each failed command.
2575 	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2576 	 */
2577 	return 0;	/* not handled */
2578 }
2579 
mv_handle_dev_err(struct ata_port * ap,u32 edma_err_cause)2580 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2581 {
2582 	struct mv_port_priv *pp = ap->private_data;
2583 
2584 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2585 		return 0;	/* EDMA was not active: not handled */
2586 	if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2587 		return 0;	/* FBS was not active: not handled */
2588 
2589 	if (!(edma_err_cause & EDMA_ERR_DEV))
2590 		return 0;	/* non DEV error: not handled */
2591 	edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2592 	if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2593 		return 0;	/* other problems: not handled */
2594 
2595 	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2596 		/*
2597 		 * EDMA should NOT have self-disabled for this case.
2598 		 * If it did, then something is wrong elsewhere,
2599 		 * and we cannot handle it here.
2600 		 */
2601 		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2602 			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2603 				      __func__, edma_err_cause, pp->pp_flags);
2604 			return 0; /* not handled */
2605 		}
2606 		return mv_handle_fbs_ncq_dev_err(ap);
2607 	} else {
2608 		/*
2609 		 * EDMA should have self-disabled for this case.
2610 		 * If it did not, then something is wrong elsewhere,
2611 		 * and we cannot handle it here.
2612 		 */
2613 		if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2614 			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2615 				      __func__, edma_err_cause, pp->pp_flags);
2616 			return 0; /* not handled */
2617 		}
2618 		return mv_handle_fbs_non_ncq_dev_err(ap);
2619 	}
2620 	return 0;	/* not handled */
2621 }
2622 
mv_unexpected_intr(struct ata_port * ap,int edma_was_enabled)2623 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2624 {
2625 	struct ata_eh_info *ehi = &ap->link.eh_info;
2626 	char *when = "idle";
2627 
2628 	ata_ehi_clear_desc(ehi);
2629 	if (edma_was_enabled) {
2630 		when = "EDMA enabled";
2631 	} else {
2632 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2633 		if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2634 			when = "polling";
2635 	}
2636 	ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2637 	ehi->err_mask |= AC_ERR_OTHER;
2638 	ehi->action   |= ATA_EH_RESET;
2639 	ata_port_freeze(ap);
2640 }
2641 
2642 /**
2643  *      mv_err_intr - Handle error interrupts on the port
2644  *      @ap: ATA channel to manipulate
2645  *
2646  *      Most cases require a full reset of the chip's state machine,
2647  *      which also performs a COMRESET.
2648  *      Also, if the port disabled DMA, update our cached copy to match.
2649  *
2650  *      LOCKING:
2651  *      Inherited from caller.
2652  */
mv_err_intr(struct ata_port * ap)2653 static void mv_err_intr(struct ata_port *ap)
2654 {
2655 	void __iomem *port_mmio = mv_ap_base(ap);
2656 	u32 edma_err_cause, eh_freeze_mask, serr = 0;
2657 	u32 fis_cause = 0;
2658 	struct mv_port_priv *pp = ap->private_data;
2659 	struct mv_host_priv *hpriv = ap->host->private_data;
2660 	unsigned int action = 0, err_mask = 0;
2661 	struct ata_eh_info *ehi = &ap->link.eh_info;
2662 	struct ata_queued_cmd *qc;
2663 	int abort = 0;
2664 
2665 	/*
2666 	 * Read and clear the SError and err_cause bits.
2667 	 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2668 	 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2669 	 */
2670 	sata_scr_read(&ap->link, SCR_ERROR, &serr);
2671 	sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2672 
2673 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2674 	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2675 		fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2676 		writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2677 	}
2678 	writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2679 
2680 	if (edma_err_cause & EDMA_ERR_DEV) {
2681 		/*
2682 		 * Device errors during FIS-based switching operation
2683 		 * require special handling.
2684 		 */
2685 		if (mv_handle_dev_err(ap, edma_err_cause))
2686 			return;
2687 	}
2688 
2689 	qc = mv_get_active_qc(ap);
2690 	ata_ehi_clear_desc(ehi);
2691 	ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2692 			  edma_err_cause, pp->pp_flags);
2693 
2694 	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2695 		ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2696 		if (fis_cause & FIS_IRQ_CAUSE_AN) {
2697 			u32 ec = edma_err_cause &
2698 			       ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2699 			sata_async_notification(ap);
2700 			if (!ec)
2701 				return; /* Just an AN; no need for the nukes */
2702 			ata_ehi_push_desc(ehi, "SDB notify");
2703 		}
2704 	}
2705 	/*
2706 	 * All generations share these EDMA error cause bits:
2707 	 */
2708 	if (edma_err_cause & EDMA_ERR_DEV) {
2709 		err_mask |= AC_ERR_DEV;
2710 		action |= ATA_EH_RESET;
2711 		ata_ehi_push_desc(ehi, "dev error");
2712 	}
2713 	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2714 			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2715 			EDMA_ERR_INTRL_PAR)) {
2716 		err_mask |= AC_ERR_ATA_BUS;
2717 		action |= ATA_EH_RESET;
2718 		ata_ehi_push_desc(ehi, "parity error");
2719 	}
2720 	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2721 		ata_ehi_hotplugged(ehi);
2722 		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2723 			"dev disconnect" : "dev connect");
2724 		action |= ATA_EH_RESET;
2725 	}
2726 
2727 	/*
2728 	 * Gen-I has a different SELF_DIS bit,
2729 	 * different FREEZE bits, and no SERR bit:
2730 	 */
2731 	if (IS_GEN_I(hpriv)) {
2732 		eh_freeze_mask = EDMA_EH_FREEZE_5;
2733 		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2734 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2735 			ata_ehi_push_desc(ehi, "EDMA self-disable");
2736 		}
2737 	} else {
2738 		eh_freeze_mask = EDMA_EH_FREEZE;
2739 		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2740 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2741 			ata_ehi_push_desc(ehi, "EDMA self-disable");
2742 		}
2743 		if (edma_err_cause & EDMA_ERR_SERR) {
2744 			ata_ehi_push_desc(ehi, "SError=%08x", serr);
2745 			err_mask |= AC_ERR_ATA_BUS;
2746 			action |= ATA_EH_RESET;
2747 		}
2748 	}
2749 
2750 	if (!err_mask) {
2751 		err_mask = AC_ERR_OTHER;
2752 		action |= ATA_EH_RESET;
2753 	}
2754 
2755 	ehi->serror |= serr;
2756 	ehi->action |= action;
2757 
2758 	if (qc)
2759 		qc->err_mask |= err_mask;
2760 	else
2761 		ehi->err_mask |= err_mask;
2762 
2763 	if (err_mask == AC_ERR_DEV) {
2764 		/*
2765 		 * Cannot do ata_port_freeze() here,
2766 		 * because it would kill PIO access,
2767 		 * which is needed for further diagnosis.
2768 		 */
2769 		mv_eh_freeze(ap);
2770 		abort = 1;
2771 	} else if (edma_err_cause & eh_freeze_mask) {
2772 		/*
2773 		 * Note to self: ata_port_freeze() calls ata_port_abort()
2774 		 */
2775 		ata_port_freeze(ap);
2776 	} else {
2777 		abort = 1;
2778 	}
2779 
2780 	if (abort) {
2781 		if (qc)
2782 			ata_link_abort(qc->dev->link);
2783 		else
2784 			ata_port_abort(ap);
2785 	}
2786 }
2787 
mv_process_crpb_response(struct ata_port * ap,struct mv_crpb * response,unsigned int tag,int ncq_enabled)2788 static bool mv_process_crpb_response(struct ata_port *ap,
2789 		struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2790 {
2791 	u8 ata_status;
2792 	u16 edma_status = le16_to_cpu(response->flags);
2793 
2794 	/*
2795 	 * edma_status from a response queue entry:
2796 	 *   LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2797 	 *   MSB is saved ATA status from command completion.
2798 	 */
2799 	if (!ncq_enabled) {
2800 		u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2801 		if (err_cause) {
2802 			/*
2803 			 * Error will be seen/handled by
2804 			 * mv_err_intr().  So do nothing at all here.
2805 			 */
2806 			return false;
2807 		}
2808 	}
2809 	ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2810 	if (!ac_err_mask(ata_status))
2811 		return true;
2812 	/* else: leave it for mv_err_intr() */
2813 	return false;
2814 }
2815 
mv_process_crpb_entries(struct ata_port * ap,struct mv_port_priv * pp)2816 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2817 {
2818 	void __iomem *port_mmio = mv_ap_base(ap);
2819 	struct mv_host_priv *hpriv = ap->host->private_data;
2820 	u32 in_index;
2821 	bool work_done = false;
2822 	u32 done_mask = 0;
2823 	int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2824 
2825 	/* Get the hardware queue position index */
2826 	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2827 			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2828 
2829 	/* Process new responses from since the last time we looked */
2830 	while (in_index != pp->resp_idx) {
2831 		unsigned int tag;
2832 		struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2833 
2834 		pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2835 
2836 		if (IS_GEN_I(hpriv)) {
2837 			/* 50xx: no NCQ, only one command active at a time */
2838 			tag = ap->link.active_tag;
2839 		} else {
2840 			/* Gen II/IIE: get command tag from CRPB entry */
2841 			tag = le16_to_cpu(response->id) & 0x1f;
2842 		}
2843 		if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2844 			done_mask |= 1 << tag;
2845 		work_done = true;
2846 	}
2847 
2848 	if (work_done) {
2849 		ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2850 
2851 		/* Update the software queue position index in hardware */
2852 		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2853 			 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2854 			 port_mmio + EDMA_RSP_Q_OUT_PTR);
2855 	}
2856 }
2857 
mv_port_intr(struct ata_port * ap,u32 port_cause)2858 static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2859 {
2860 	struct mv_port_priv *pp;
2861 	int edma_was_enabled;
2862 
2863 	/*
2864 	 * Grab a snapshot of the EDMA_EN flag setting,
2865 	 * so that we have a consistent view for this port,
2866 	 * even if something we call of our routines changes it.
2867 	 */
2868 	pp = ap->private_data;
2869 	edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2870 	/*
2871 	 * Process completed CRPB response(s) before other events.
2872 	 */
2873 	if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2874 		mv_process_crpb_entries(ap, pp);
2875 		if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2876 			mv_handle_fbs_ncq_dev_err(ap);
2877 	}
2878 	/*
2879 	 * Handle chip-reported errors, or continue on to handle PIO.
2880 	 */
2881 	if (unlikely(port_cause & ERR_IRQ)) {
2882 		mv_err_intr(ap);
2883 	} else if (!edma_was_enabled) {
2884 		struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2885 		if (qc)
2886 			ata_bmdma_port_intr(ap, qc);
2887 		else
2888 			mv_unexpected_intr(ap, edma_was_enabled);
2889 	}
2890 }
2891 
2892 /**
2893  *      mv_host_intr - Handle all interrupts on the given host controller
2894  *      @host: host specific structure
2895  *      @main_irq_cause: Main interrupt cause register for the chip.
2896  *
2897  *      LOCKING:
2898  *      Inherited from caller.
2899  */
mv_host_intr(struct ata_host * host,u32 main_irq_cause)2900 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2901 {
2902 	struct mv_host_priv *hpriv = host->private_data;
2903 	void __iomem *mmio = hpriv->base, *hc_mmio;
2904 	unsigned int handled = 0, port;
2905 
2906 	/* If asserted, clear the "all ports" IRQ coalescing bit */
2907 	if (main_irq_cause & ALL_PORTS_COAL_DONE)
2908 		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2909 
2910 	for (port = 0; port < hpriv->n_ports; port++) {
2911 		struct ata_port *ap = host->ports[port];
2912 		unsigned int p, shift, hardport, port_cause;
2913 
2914 		MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2915 		/*
2916 		 * Each hc within the host has its own hc_irq_cause register,
2917 		 * where the interrupting ports bits get ack'd.
2918 		 */
2919 		if (hardport == 0) {	/* first port on this hc ? */
2920 			u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2921 			u32 port_mask, ack_irqs;
2922 			/*
2923 			 * Skip this entire hc if nothing pending for any ports
2924 			 */
2925 			if (!hc_cause) {
2926 				port += MV_PORTS_PER_HC - 1;
2927 				continue;
2928 			}
2929 			/*
2930 			 * We don't need/want to read the hc_irq_cause register,
2931 			 * because doing so hurts performance, and
2932 			 * main_irq_cause already gives us everything we need.
2933 			 *
2934 			 * But we do have to *write* to the hc_irq_cause to ack
2935 			 * the ports that we are handling this time through.
2936 			 *
2937 			 * This requires that we create a bitmap for those
2938 			 * ports which interrupted us, and use that bitmap
2939 			 * to ack (only) those ports via hc_irq_cause.
2940 			 */
2941 			ack_irqs = 0;
2942 			if (hc_cause & PORTS_0_3_COAL_DONE)
2943 				ack_irqs = HC_COAL_IRQ;
2944 			for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2945 				if ((port + p) >= hpriv->n_ports)
2946 					break;
2947 				port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2948 				if (hc_cause & port_mask)
2949 					ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2950 			}
2951 			hc_mmio = mv_hc_base_from_port(mmio, port);
2952 			writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2953 			handled = 1;
2954 		}
2955 		/*
2956 		 * Handle interrupts signalled for this port:
2957 		 */
2958 		port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2959 		if (port_cause)
2960 			mv_port_intr(ap, port_cause);
2961 	}
2962 	return handled;
2963 }
2964 
mv_pci_error(struct ata_host * host,void __iomem * mmio)2965 static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2966 {
2967 	struct mv_host_priv *hpriv = host->private_data;
2968 	struct ata_port *ap;
2969 	struct ata_queued_cmd *qc;
2970 	struct ata_eh_info *ehi;
2971 	unsigned int i, err_mask, printed = 0;
2972 	u32 err_cause;
2973 
2974 	err_cause = readl(mmio + hpriv->irq_cause_offset);
2975 
2976 	dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
2977 
2978 	DPRINTK("All regs @ PCI error\n");
2979 	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2980 
2981 	writelfl(0, mmio + hpriv->irq_cause_offset);
2982 
2983 	for (i = 0; i < host->n_ports; i++) {
2984 		ap = host->ports[i];
2985 		if (!ata_link_offline(&ap->link)) {
2986 			ehi = &ap->link.eh_info;
2987 			ata_ehi_clear_desc(ehi);
2988 			if (!printed++)
2989 				ata_ehi_push_desc(ehi,
2990 					"PCI err cause 0x%08x", err_cause);
2991 			err_mask = AC_ERR_HOST_BUS;
2992 			ehi->action = ATA_EH_RESET;
2993 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
2994 			if (qc)
2995 				qc->err_mask |= err_mask;
2996 			else
2997 				ehi->err_mask |= err_mask;
2998 
2999 			ata_port_freeze(ap);
3000 		}
3001 	}
3002 	return 1;	/* handled */
3003 }
3004 
3005 /**
3006  *      mv_interrupt - Main interrupt event handler
3007  *      @irq: unused
3008  *      @dev_instance: private data; in this case the host structure
3009  *
3010  *      Read the read only register to determine if any host
3011  *      controllers have pending interrupts.  If so, call lower level
3012  *      routine to handle.  Also check for PCI errors which are only
3013  *      reported here.
3014  *
3015  *      LOCKING:
3016  *      This routine holds the host lock while processing pending
3017  *      interrupts.
3018  */
mv_interrupt(int irq,void * dev_instance)3019 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
3020 {
3021 	struct ata_host *host = dev_instance;
3022 	struct mv_host_priv *hpriv = host->private_data;
3023 	unsigned int handled = 0;
3024 	int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
3025 	u32 main_irq_cause, pending_irqs;
3026 
3027 	spin_lock(&host->lock);
3028 
3029 	/* for MSI:  block new interrupts while in here */
3030 	if (using_msi)
3031 		mv_write_main_irq_mask(0, hpriv);
3032 
3033 	main_irq_cause = readl(hpriv->main_irq_cause_addr);
3034 	pending_irqs   = main_irq_cause & hpriv->main_irq_mask;
3035 	/*
3036 	 * Deal with cases where we either have nothing pending, or have read
3037 	 * a bogus register value which can indicate HW removal or PCI fault.
3038 	 */
3039 	if (pending_irqs && main_irq_cause != 0xffffffffU) {
3040 		if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
3041 			handled = mv_pci_error(host, hpriv->base);
3042 		else
3043 			handled = mv_host_intr(host, pending_irqs);
3044 	}
3045 
3046 	/* for MSI: unmask; interrupt cause bits will retrigger now */
3047 	if (using_msi)
3048 		mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3049 
3050 	spin_unlock(&host->lock);
3051 
3052 	return IRQ_RETVAL(handled);
3053 }
3054 
mv5_scr_offset(unsigned int sc_reg_in)3055 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3056 {
3057 	unsigned int ofs;
3058 
3059 	switch (sc_reg_in) {
3060 	case SCR_STATUS:
3061 	case SCR_ERROR:
3062 	case SCR_CONTROL:
3063 		ofs = sc_reg_in * sizeof(u32);
3064 		break;
3065 	default:
3066 		ofs = 0xffffffffU;
3067 		break;
3068 	}
3069 	return ofs;
3070 }
3071 
mv5_scr_read(struct ata_link * link,unsigned int sc_reg_in,u32 * val)3072 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3073 {
3074 	struct mv_host_priv *hpriv = link->ap->host->private_data;
3075 	void __iomem *mmio = hpriv->base;
3076 	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3077 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
3078 
3079 	if (ofs != 0xffffffffU) {
3080 		*val = readl(addr + ofs);
3081 		return 0;
3082 	} else
3083 		return -EINVAL;
3084 }
3085 
mv5_scr_write(struct ata_link * link,unsigned int sc_reg_in,u32 val)3086 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3087 {
3088 	struct mv_host_priv *hpriv = link->ap->host->private_data;
3089 	void __iomem *mmio = hpriv->base;
3090 	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3091 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
3092 
3093 	if (ofs != 0xffffffffU) {
3094 		writelfl(val, addr + ofs);
3095 		return 0;
3096 	} else
3097 		return -EINVAL;
3098 }
3099 
mv5_reset_bus(struct ata_host * host,void __iomem * mmio)3100 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3101 {
3102 	struct pci_dev *pdev = to_pci_dev(host->dev);
3103 	int early_5080;
3104 
3105 	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3106 
3107 	if (!early_5080) {
3108 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3109 		tmp |= (1 << 0);
3110 		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3111 	}
3112 
3113 	mv_reset_pci_bus(host, mmio);
3114 }
3115 
mv5_reset_flash(struct mv_host_priv * hpriv,void __iomem * mmio)3116 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3117 {
3118 	writel(0x0fcfffff, mmio + FLASH_CTL);
3119 }
3120 
mv5_read_preamp(struct mv_host_priv * hpriv,int idx,void __iomem * mmio)3121 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3122 			   void __iomem *mmio)
3123 {
3124 	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3125 	u32 tmp;
3126 
3127 	tmp = readl(phy_mmio + MV5_PHY_MODE);
3128 
3129 	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
3130 	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
3131 }
3132 
mv5_enable_leds(struct mv_host_priv * hpriv,void __iomem * mmio)3133 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3134 {
3135 	u32 tmp;
3136 
3137 	writel(0, mmio + GPIO_PORT_CTL);
3138 
3139 	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3140 
3141 	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3142 	tmp |= ~(1 << 0);
3143 	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3144 }
3145 
mv5_phy_errata(struct mv_host_priv * hpriv,void __iomem * mmio,unsigned int port)3146 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3147 			   unsigned int port)
3148 {
3149 	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3150 	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3151 	u32 tmp;
3152 	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3153 
3154 	if (fix_apm_sq) {
3155 		tmp = readl(phy_mmio + MV5_LTMODE);
3156 		tmp |= (1 << 19);
3157 		writel(tmp, phy_mmio + MV5_LTMODE);
3158 
3159 		tmp = readl(phy_mmio + MV5_PHY_CTL);
3160 		tmp &= ~0x3;
3161 		tmp |= 0x1;
3162 		writel(tmp, phy_mmio + MV5_PHY_CTL);
3163 	}
3164 
3165 	tmp = readl(phy_mmio + MV5_PHY_MODE);
3166 	tmp &= ~mask;
3167 	tmp |= hpriv->signal[port].pre;
3168 	tmp |= hpriv->signal[port].amps;
3169 	writel(tmp, phy_mmio + MV5_PHY_MODE);
3170 }
3171 
3172 
3173 #undef ZERO
3174 #define ZERO(reg) writel(0, port_mmio + (reg))
mv5_reset_hc_port(struct mv_host_priv * hpriv,void __iomem * mmio,unsigned int port)3175 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3176 			     unsigned int port)
3177 {
3178 	void __iomem *port_mmio = mv_port_base(mmio, port);
3179 
3180 	mv_reset_channel(hpriv, mmio, port);
3181 
3182 	ZERO(0x028);	/* command */
3183 	writel(0x11f, port_mmio + EDMA_CFG);
3184 	ZERO(0x004);	/* timer */
3185 	ZERO(0x008);	/* irq err cause */
3186 	ZERO(0x00c);	/* irq err mask */
3187 	ZERO(0x010);	/* rq bah */
3188 	ZERO(0x014);	/* rq inp */
3189 	ZERO(0x018);	/* rq outp */
3190 	ZERO(0x01c);	/* respq bah */
3191 	ZERO(0x024);	/* respq outp */
3192 	ZERO(0x020);	/* respq inp */
3193 	ZERO(0x02c);	/* test control */
3194 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3195 }
3196 #undef ZERO
3197 
3198 #define ZERO(reg) writel(0, hc_mmio + (reg))
mv5_reset_one_hc(struct mv_host_priv * hpriv,void __iomem * mmio,unsigned int hc)3199 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3200 			unsigned int hc)
3201 {
3202 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3203 	u32 tmp;
3204 
3205 	ZERO(0x00c);
3206 	ZERO(0x010);
3207 	ZERO(0x014);
3208 	ZERO(0x018);
3209 
3210 	tmp = readl(hc_mmio + 0x20);
3211 	tmp &= 0x1c1c1c1c;
3212 	tmp |= 0x03030303;
3213 	writel(tmp, hc_mmio + 0x20);
3214 }
3215 #undef ZERO
3216 
mv5_reset_hc(struct mv_host_priv * hpriv,void __iomem * mmio,unsigned int n_hc)3217 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3218 			unsigned int n_hc)
3219 {
3220 	unsigned int hc, port;
3221 
3222 	for (hc = 0; hc < n_hc; hc++) {
3223 		for (port = 0; port < MV_PORTS_PER_HC; port++)
3224 			mv5_reset_hc_port(hpriv, mmio,
3225 					  (hc * MV_PORTS_PER_HC) + port);
3226 
3227 		mv5_reset_one_hc(hpriv, mmio, hc);
3228 	}
3229 
3230 	return 0;
3231 }
3232 
3233 #undef ZERO
3234 #define ZERO(reg) writel(0, mmio + (reg))
mv_reset_pci_bus(struct ata_host * host,void __iomem * mmio)3235 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3236 {
3237 	struct mv_host_priv *hpriv = host->private_data;
3238 	u32 tmp;
3239 
3240 	tmp = readl(mmio + MV_PCI_MODE);
3241 	tmp &= 0xff00ffff;
3242 	writel(tmp, mmio + MV_PCI_MODE);
3243 
3244 	ZERO(MV_PCI_DISC_TIMER);
3245 	ZERO(MV_PCI_MSI_TRIGGER);
3246 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3247 	ZERO(MV_PCI_SERR_MASK);
3248 	ZERO(hpriv->irq_cause_offset);
3249 	ZERO(hpriv->irq_mask_offset);
3250 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
3251 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3252 	ZERO(MV_PCI_ERR_ATTRIBUTE);
3253 	ZERO(MV_PCI_ERR_COMMAND);
3254 }
3255 #undef ZERO
3256 
mv6_reset_flash(struct mv_host_priv * hpriv,void __iomem * mmio)3257 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3258 {
3259 	u32 tmp;
3260 
3261 	mv5_reset_flash(hpriv, mmio);
3262 
3263 	tmp = readl(mmio + GPIO_PORT_CTL);
3264 	tmp &= 0x3;
3265 	tmp |= (1 << 5) | (1 << 6);
3266 	writel(tmp, mmio + GPIO_PORT_CTL);
3267 }
3268 
3269 /**
3270  *      mv6_reset_hc - Perform the 6xxx global soft reset
3271  *      @mmio: base address of the HBA
3272  *
3273  *      This routine only applies to 6xxx parts.
3274  *
3275  *      LOCKING:
3276  *      Inherited from caller.
3277  */
mv6_reset_hc(struct mv_host_priv * hpriv,void __iomem * mmio,unsigned int n_hc)3278 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3279 			unsigned int n_hc)
3280 {
3281 	void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3282 	int i, rc = 0;
3283 	u32 t;
3284 
3285 	/* Following procedure defined in PCI "main command and status
3286 	 * register" table.
3287 	 */
3288 	t = readl(reg);
3289 	writel(t | STOP_PCI_MASTER, reg);
3290 
3291 	for (i = 0; i < 1000; i++) {
3292 		udelay(1);
3293 		t = readl(reg);
3294 		if (PCI_MASTER_EMPTY & t)
3295 			break;
3296 	}
3297 	if (!(PCI_MASTER_EMPTY & t)) {
3298 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3299 		rc = 1;
3300 		goto done;
3301 	}
3302 
3303 	/* set reset */
3304 	i = 5;
3305 	do {
3306 		writel(t | GLOB_SFT_RST, reg);
3307 		t = readl(reg);
3308 		udelay(1);
3309 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
3310 
3311 	if (!(GLOB_SFT_RST & t)) {
3312 		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3313 		rc = 1;
3314 		goto done;
3315 	}
3316 
3317 	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
3318 	i = 5;
3319 	do {
3320 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3321 		t = readl(reg);
3322 		udelay(1);
3323 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
3324 
3325 	if (GLOB_SFT_RST & t) {
3326 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3327 		rc = 1;
3328 	}
3329 done:
3330 	return rc;
3331 }
3332 
mv6_read_preamp(struct mv_host_priv * hpriv,int idx,void __iomem * mmio)3333 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3334 			   void __iomem *mmio)
3335 {
3336 	void __iomem *port_mmio;
3337 	u32 tmp;
3338 
3339 	tmp = readl(mmio + RESET_CFG);
3340 	if ((tmp & (1 << 0)) == 0) {
3341 		hpriv->signal[idx].amps = 0x7 << 8;
3342 		hpriv->signal[idx].pre = 0x1 << 5;
3343 		return;
3344 	}
3345 
3346 	port_mmio = mv_port_base(mmio, idx);
3347 	tmp = readl(port_mmio + PHY_MODE2);
3348 
3349 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
3350 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
3351 }
3352 
mv6_enable_leds(struct mv_host_priv * hpriv,void __iomem * mmio)3353 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3354 {
3355 	writel(0x00000060, mmio + GPIO_PORT_CTL);
3356 }
3357 
mv6_phy_errata(struct mv_host_priv * hpriv,void __iomem * mmio,unsigned int port)3358 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3359 			   unsigned int port)
3360 {
3361 	void __iomem *port_mmio = mv_port_base(mmio, port);
3362 
3363 	u32 hp_flags = hpriv->hp_flags;
3364 	int fix_phy_mode2 =
3365 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3366 	int fix_phy_mode4 =
3367 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3368 	u32 m2, m3;
3369 
3370 	if (fix_phy_mode2) {
3371 		m2 = readl(port_mmio + PHY_MODE2);
3372 		m2 &= ~(1 << 16);
3373 		m2 |= (1 << 31);
3374 		writel(m2, port_mmio + PHY_MODE2);
3375 
3376 		udelay(200);
3377 
3378 		m2 = readl(port_mmio + PHY_MODE2);
3379 		m2 &= ~((1 << 16) | (1 << 31));
3380 		writel(m2, port_mmio + PHY_MODE2);
3381 
3382 		udelay(200);
3383 	}
3384 
3385 	/*
3386 	 * Gen-II/IIe PHY_MODE3 errata RM#2:
3387 	 * Achieves better receiver noise performance than the h/w default:
3388 	 */
3389 	m3 = readl(port_mmio + PHY_MODE3);
3390 	m3 = (m3 & 0x1f) | (0x5555601 << 5);
3391 
3392 	/* Guideline 88F5182 (GL# SATA-S11) */
3393 	if (IS_SOC(hpriv))
3394 		m3 &= ~0x1c;
3395 
3396 	if (fix_phy_mode4) {
3397 		u32 m4 = readl(port_mmio + PHY_MODE4);
3398 		/*
3399 		 * Enforce reserved-bit restrictions on GenIIe devices only.
3400 		 * For earlier chipsets, force only the internal config field
3401 		 *  (workaround for errata FEr SATA#10 part 1).
3402 		 */
3403 		if (IS_GEN_IIE(hpriv))
3404 			m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3405 		else
3406 			m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3407 		writel(m4, port_mmio + PHY_MODE4);
3408 	}
3409 	/*
3410 	 * Workaround for 60x1-B2 errata SATA#13:
3411 	 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3412 	 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3413 	 * Or ensure we use writelfl() when writing PHY_MODE4.
3414 	 */
3415 	writel(m3, port_mmio + PHY_MODE3);
3416 
3417 	/* Revert values of pre-emphasis and signal amps to the saved ones */
3418 	m2 = readl(port_mmio + PHY_MODE2);
3419 
3420 	m2 &= ~MV_M2_PREAMP_MASK;
3421 	m2 |= hpriv->signal[port].amps;
3422 	m2 |= hpriv->signal[port].pre;
3423 	m2 &= ~(1 << 16);
3424 
3425 	/* according to mvSata 3.6.1, some IIE values are fixed */
3426 	if (IS_GEN_IIE(hpriv)) {
3427 		m2 &= ~0xC30FF01F;
3428 		m2 |= 0x0000900F;
3429 	}
3430 
3431 	writel(m2, port_mmio + PHY_MODE2);
3432 }
3433 
3434 /* TODO: use the generic LED interface to configure the SATA Presence */
3435 /* & Acitivy LEDs on the board */
mv_soc_enable_leds(struct mv_host_priv * hpriv,void __iomem * mmio)3436 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3437 				      void __iomem *mmio)
3438 {
3439 	return;
3440 }
3441 
mv_soc_read_preamp(struct mv_host_priv * hpriv,int idx,void __iomem * mmio)3442 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3443 			   void __iomem *mmio)
3444 {
3445 	void __iomem *port_mmio;
3446 	u32 tmp;
3447 
3448 	port_mmio = mv_port_base(mmio, idx);
3449 	tmp = readl(port_mmio + PHY_MODE2);
3450 
3451 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
3452 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
3453 }
3454 
3455 #undef ZERO
3456 #define ZERO(reg) writel(0, port_mmio + (reg))
mv_soc_reset_hc_port(struct mv_host_priv * hpriv,void __iomem * mmio,unsigned int port)3457 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3458 					void __iomem *mmio, unsigned int port)
3459 {
3460 	void __iomem *port_mmio = mv_port_base(mmio, port);
3461 
3462 	mv_reset_channel(hpriv, mmio, port);
3463 
3464 	ZERO(0x028);		/* command */
3465 	writel(0x101f, port_mmio + EDMA_CFG);
3466 	ZERO(0x004);		/* timer */
3467 	ZERO(0x008);		/* irq err cause */
3468 	ZERO(0x00c);		/* irq err mask */
3469 	ZERO(0x010);		/* rq bah */
3470 	ZERO(0x014);		/* rq inp */
3471 	ZERO(0x018);		/* rq outp */
3472 	ZERO(0x01c);		/* respq bah */
3473 	ZERO(0x024);		/* respq outp */
3474 	ZERO(0x020);		/* respq inp */
3475 	ZERO(0x02c);		/* test control */
3476 	writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3477 }
3478 
3479 #undef ZERO
3480 
3481 #define ZERO(reg) writel(0, hc_mmio + (reg))
mv_soc_reset_one_hc(struct mv_host_priv * hpriv,void __iomem * mmio)3482 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3483 				       void __iomem *mmio)
3484 {
3485 	void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3486 
3487 	ZERO(0x00c);
3488 	ZERO(0x010);
3489 	ZERO(0x014);
3490 
3491 }
3492 
3493 #undef ZERO
3494 
mv_soc_reset_hc(struct mv_host_priv * hpriv,void __iomem * mmio,unsigned int n_hc)3495 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3496 				  void __iomem *mmio, unsigned int n_hc)
3497 {
3498 	unsigned int port;
3499 
3500 	for (port = 0; port < hpriv->n_ports; port++)
3501 		mv_soc_reset_hc_port(hpriv, mmio, port);
3502 
3503 	mv_soc_reset_one_hc(hpriv, mmio);
3504 
3505 	return 0;
3506 }
3507 
mv_soc_reset_flash(struct mv_host_priv * hpriv,void __iomem * mmio)3508 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3509 				      void __iomem *mmio)
3510 {
3511 	return;
3512 }
3513 
mv_soc_reset_bus(struct ata_host * host,void __iomem * mmio)3514 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3515 {
3516 	return;
3517 }
3518 
mv_soc_65n_phy_errata(struct mv_host_priv * hpriv,void __iomem * mmio,unsigned int port)3519 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3520 				  void __iomem *mmio, unsigned int port)
3521 {
3522 	void __iomem *port_mmio = mv_port_base(mmio, port);
3523 	u32	reg;
3524 
3525 	reg = readl(port_mmio + PHY_MODE3);
3526 	reg &= ~(0x3 << 27);	/* SELMUPF (bits 28:27) to 1 */
3527 	reg |= (0x1 << 27);
3528 	reg &= ~(0x3 << 29);	/* SELMUPI (bits 30:29) to 1 */
3529 	reg |= (0x1 << 29);
3530 	writel(reg, port_mmio + PHY_MODE3);
3531 
3532 	reg = readl(port_mmio + PHY_MODE4);
3533 	reg &= ~0x1;	/* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3534 	reg |= (0x1 << 16);
3535 	writel(reg, port_mmio + PHY_MODE4);
3536 
3537 	reg = readl(port_mmio + PHY_MODE9_GEN2);
3538 	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
3539 	reg |= 0x8;
3540 	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
3541 	writel(reg, port_mmio + PHY_MODE9_GEN2);
3542 
3543 	reg = readl(port_mmio + PHY_MODE9_GEN1);
3544 	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
3545 	reg |= 0x8;
3546 	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
3547 	writel(reg, port_mmio + PHY_MODE9_GEN1);
3548 }
3549 
3550 /**
3551  *	soc_is_65 - check if the soc is 65 nano device
3552  *
3553  *	Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3554  *	register, this register should contain non-zero value and it exists only
3555  *	in the 65 nano devices, when reading it from older devices we get 0.
3556  */
soc_is_65n(struct mv_host_priv * hpriv)3557 static bool soc_is_65n(struct mv_host_priv *hpriv)
3558 {
3559 	void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3560 
3561 	if (readl(port0_mmio + PHYCFG_OFS))
3562 		return true;
3563 	return false;
3564 }
3565 
mv_setup_ifcfg(void __iomem * port_mmio,int want_gen2i)3566 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3567 {
3568 	u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3569 
3570 	ifcfg = (ifcfg & 0xf7f) | 0x9b1000;	/* from chip spec */
3571 	if (want_gen2i)
3572 		ifcfg |= (1 << 7);		/* enable gen2i speed */
3573 	writelfl(ifcfg, port_mmio + SATA_IFCFG);
3574 }
3575 
mv_reset_channel(struct mv_host_priv * hpriv,void __iomem * mmio,unsigned int port_no)3576 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3577 			     unsigned int port_no)
3578 {
3579 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
3580 
3581 	/*
3582 	 * The datasheet warns against setting EDMA_RESET when EDMA is active
3583 	 * (but doesn't say what the problem might be).  So we first try
3584 	 * to disable the EDMA engine before doing the EDMA_RESET operation.
3585 	 */
3586 	mv_stop_edma_engine(port_mmio);
3587 	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3588 
3589 	if (!IS_GEN_I(hpriv)) {
3590 		/* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3591 		mv_setup_ifcfg(port_mmio, 1);
3592 	}
3593 	/*
3594 	 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3595 	 * link, and physical layers.  It resets all SATA interface registers
3596 	 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
3597 	 */
3598 	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3599 	udelay(25);	/* allow reset propagation */
3600 	writelfl(0, port_mmio + EDMA_CMD);
3601 
3602 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
3603 
3604 	if (IS_GEN_I(hpriv))
3605 		mdelay(1);
3606 }
3607 
mv_pmp_select(struct ata_port * ap,int pmp)3608 static void mv_pmp_select(struct ata_port *ap, int pmp)
3609 {
3610 	if (sata_pmp_supported(ap)) {
3611 		void __iomem *port_mmio = mv_ap_base(ap);
3612 		u32 reg = readl(port_mmio + SATA_IFCTL);
3613 		int old = reg & 0xf;
3614 
3615 		if (old != pmp) {
3616 			reg = (reg & ~0xf) | pmp;
3617 			writelfl(reg, port_mmio + SATA_IFCTL);
3618 		}
3619 	}
3620 }
3621 
mv_pmp_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)3622 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3623 				unsigned long deadline)
3624 {
3625 	mv_pmp_select(link->ap, sata_srst_pmp(link));
3626 	return sata_std_hardreset(link, class, deadline);
3627 }
3628 
mv_softreset(struct ata_link * link,unsigned int * class,unsigned long deadline)3629 static int mv_softreset(struct ata_link *link, unsigned int *class,
3630 				unsigned long deadline)
3631 {
3632 	mv_pmp_select(link->ap, sata_srst_pmp(link));
3633 	return ata_sff_softreset(link, class, deadline);
3634 }
3635 
mv_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)3636 static int mv_hardreset(struct ata_link *link, unsigned int *class,
3637 			unsigned long deadline)
3638 {
3639 	struct ata_port *ap = link->ap;
3640 	struct mv_host_priv *hpriv = ap->host->private_data;
3641 	struct mv_port_priv *pp = ap->private_data;
3642 	void __iomem *mmio = hpriv->base;
3643 	int rc, attempts = 0, extra = 0;
3644 	u32 sstatus;
3645 	bool online;
3646 
3647 	mv_reset_channel(hpriv, mmio, ap->port_no);
3648 	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3649 	pp->pp_flags &=
3650 	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3651 
3652 	/* Workaround for errata FEr SATA#10 (part 2) */
3653 	do {
3654 		const unsigned long *timing =
3655 				sata_ehc_deb_timing(&link->eh_context);
3656 
3657 		rc = sata_link_hardreset(link, timing, deadline + extra,
3658 					 &online, NULL);
3659 		rc = online ? -EAGAIN : rc;
3660 		if (rc)
3661 			return rc;
3662 		sata_scr_read(link, SCR_STATUS, &sstatus);
3663 		if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3664 			/* Force 1.5gb/s link speed and try again */
3665 			mv_setup_ifcfg(mv_ap_base(ap), 0);
3666 			if (time_after(jiffies + HZ, deadline))
3667 				extra = HZ; /* only extend it once, max */
3668 		}
3669 	} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3670 	mv_save_cached_regs(ap);
3671 	mv_edma_cfg(ap, 0, 0);
3672 
3673 	return rc;
3674 }
3675 
mv_eh_freeze(struct ata_port * ap)3676 static void mv_eh_freeze(struct ata_port *ap)
3677 {
3678 	mv_stop_edma(ap);
3679 	mv_enable_port_irqs(ap, 0);
3680 }
3681 
mv_eh_thaw(struct ata_port * ap)3682 static void mv_eh_thaw(struct ata_port *ap)
3683 {
3684 	struct mv_host_priv *hpriv = ap->host->private_data;
3685 	unsigned int port = ap->port_no;
3686 	unsigned int hardport = mv_hardport_from_port(port);
3687 	void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3688 	void __iomem *port_mmio = mv_ap_base(ap);
3689 	u32 hc_irq_cause;
3690 
3691 	/* clear EDMA errors on this port */
3692 	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3693 
3694 	/* clear pending irq events */
3695 	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3696 	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3697 
3698 	mv_enable_port_irqs(ap, ERR_IRQ);
3699 }
3700 
3701 /**
3702  *      mv_port_init - Perform some early initialization on a single port.
3703  *      @port: libata data structure storing shadow register addresses
3704  *      @port_mmio: base address of the port
3705  *
3706  *      Initialize shadow register mmio addresses, clear outstanding
3707  *      interrupts on the port, and unmask interrupts for the future
3708  *      start of the port.
3709  *
3710  *      LOCKING:
3711  *      Inherited from caller.
3712  */
mv_port_init(struct ata_ioports * port,void __iomem * port_mmio)3713 static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
3714 {
3715 	void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3716 
3717 	/* PIO related setup
3718 	 */
3719 	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3720 	port->error_addr =
3721 		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3722 	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3723 	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3724 	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3725 	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3726 	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3727 	port->status_addr =
3728 		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3729 	/* special case: control/altstatus doesn't have ATA_REG_ address */
3730 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3731 
3732 	/* Clear any currently outstanding port interrupt conditions */
3733 	serr = port_mmio + mv_scr_offset(SCR_ERROR);
3734 	writelfl(readl(serr), serr);
3735 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3736 
3737 	/* unmask all non-transient EDMA error interrupts */
3738 	writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3739 
3740 	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3741 		readl(port_mmio + EDMA_CFG),
3742 		readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3743 		readl(port_mmio + EDMA_ERR_IRQ_MASK));
3744 }
3745 
mv_in_pcix_mode(struct ata_host * host)3746 static unsigned int mv_in_pcix_mode(struct ata_host *host)
3747 {
3748 	struct mv_host_priv *hpriv = host->private_data;
3749 	void __iomem *mmio = hpriv->base;
3750 	u32 reg;
3751 
3752 	if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3753 		return 0;	/* not PCI-X capable */
3754 	reg = readl(mmio + MV_PCI_MODE);
3755 	if ((reg & MV_PCI_MODE_MASK) == 0)
3756 		return 0;	/* conventional PCI mode */
3757 	return 1;	/* chip is in PCI-X mode */
3758 }
3759 
mv_pci_cut_through_okay(struct ata_host * host)3760 static int mv_pci_cut_through_okay(struct ata_host *host)
3761 {
3762 	struct mv_host_priv *hpriv = host->private_data;
3763 	void __iomem *mmio = hpriv->base;
3764 	u32 reg;
3765 
3766 	if (!mv_in_pcix_mode(host)) {
3767 		reg = readl(mmio + MV_PCI_COMMAND);
3768 		if (reg & MV_PCI_COMMAND_MRDTRIG)
3769 			return 0; /* not okay */
3770 	}
3771 	return 1; /* okay */
3772 }
3773 
mv_60x1b2_errata_pci7(struct ata_host * host)3774 static void mv_60x1b2_errata_pci7(struct ata_host *host)
3775 {
3776 	struct mv_host_priv *hpriv = host->private_data;
3777 	void __iomem *mmio = hpriv->base;
3778 
3779 	/* workaround for 60x1-B2 errata PCI#7 */
3780 	if (mv_in_pcix_mode(host)) {
3781 		u32 reg = readl(mmio + MV_PCI_COMMAND);
3782 		writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3783 	}
3784 }
3785 
mv_chip_id(struct ata_host * host,unsigned int board_idx)3786 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3787 {
3788 	struct pci_dev *pdev = to_pci_dev(host->dev);
3789 	struct mv_host_priv *hpriv = host->private_data;
3790 	u32 hp_flags = hpriv->hp_flags;
3791 
3792 	switch (board_idx) {
3793 	case chip_5080:
3794 		hpriv->ops = &mv5xxx_ops;
3795 		hp_flags |= MV_HP_GEN_I;
3796 
3797 		switch (pdev->revision) {
3798 		case 0x1:
3799 			hp_flags |= MV_HP_ERRATA_50XXB0;
3800 			break;
3801 		case 0x3:
3802 			hp_flags |= MV_HP_ERRATA_50XXB2;
3803 			break;
3804 		default:
3805 			dev_warn(&pdev->dev,
3806 				 "Applying 50XXB2 workarounds to unknown rev\n");
3807 			hp_flags |= MV_HP_ERRATA_50XXB2;
3808 			break;
3809 		}
3810 		break;
3811 
3812 	case chip_504x:
3813 	case chip_508x:
3814 		hpriv->ops = &mv5xxx_ops;
3815 		hp_flags |= MV_HP_GEN_I;
3816 
3817 		switch (pdev->revision) {
3818 		case 0x0:
3819 			hp_flags |= MV_HP_ERRATA_50XXB0;
3820 			break;
3821 		case 0x3:
3822 			hp_flags |= MV_HP_ERRATA_50XXB2;
3823 			break;
3824 		default:
3825 			dev_warn(&pdev->dev,
3826 				 "Applying B2 workarounds to unknown rev\n");
3827 			hp_flags |= MV_HP_ERRATA_50XXB2;
3828 			break;
3829 		}
3830 		break;
3831 
3832 	case chip_604x:
3833 	case chip_608x:
3834 		hpriv->ops = &mv6xxx_ops;
3835 		hp_flags |= MV_HP_GEN_II;
3836 
3837 		switch (pdev->revision) {
3838 		case 0x7:
3839 			mv_60x1b2_errata_pci7(host);
3840 			hp_flags |= MV_HP_ERRATA_60X1B2;
3841 			break;
3842 		case 0x9:
3843 			hp_flags |= MV_HP_ERRATA_60X1C0;
3844 			break;
3845 		default:
3846 			dev_warn(&pdev->dev,
3847 				 "Applying B2 workarounds to unknown rev\n");
3848 			hp_flags |= MV_HP_ERRATA_60X1B2;
3849 			break;
3850 		}
3851 		break;
3852 
3853 	case chip_7042:
3854 		hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3855 		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3856 		    (pdev->device == 0x2300 || pdev->device == 0x2310))
3857 		{
3858 			/*
3859 			 * Highpoint RocketRAID PCIe 23xx series cards:
3860 			 *
3861 			 * Unconfigured drives are treated as "Legacy"
3862 			 * by the BIOS, and it overwrites sector 8 with
3863 			 * a "Lgcy" metadata block prior to Linux boot.
3864 			 *
3865 			 * Configured drives (RAID or JBOD) leave sector 8
3866 			 * alone, but instead overwrite a high numbered
3867 			 * sector for the RAID metadata.  This sector can
3868 			 * be determined exactly, by truncating the physical
3869 			 * drive capacity to a nice even GB value.
3870 			 *
3871 			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3872 			 *
3873 			 * Warn the user, lest they think we're just buggy.
3874 			 */
3875 			printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3876 				" BIOS CORRUPTS DATA on all attached drives,"
3877 				" regardless of if/how they are configured."
3878 				" BEWARE!\n");
3879 			printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3880 				" use sectors 8-9 on \"Legacy\" drives,"
3881 				" and avoid the final two gigabytes on"
3882 				" all RocketRAID BIOS initialized drives.\n");
3883 		}
3884 		/* drop through */
3885 	case chip_6042:
3886 		hpriv->ops = &mv6xxx_ops;
3887 		hp_flags |= MV_HP_GEN_IIE;
3888 		if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3889 			hp_flags |= MV_HP_CUT_THROUGH;
3890 
3891 		switch (pdev->revision) {
3892 		case 0x2: /* Rev.B0: the first/only public release */
3893 			hp_flags |= MV_HP_ERRATA_60X1C0;
3894 			break;
3895 		default:
3896 			dev_warn(&pdev->dev,
3897 				 "Applying 60X1C0 workarounds to unknown rev\n");
3898 			hp_flags |= MV_HP_ERRATA_60X1C0;
3899 			break;
3900 		}
3901 		break;
3902 	case chip_soc:
3903 		if (soc_is_65n(hpriv))
3904 			hpriv->ops = &mv_soc_65n_ops;
3905 		else
3906 			hpriv->ops = &mv_soc_ops;
3907 		hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3908 			MV_HP_ERRATA_60X1C0;
3909 		break;
3910 
3911 	default:
3912 		dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
3913 		return -EINVAL;
3914 	}
3915 
3916 	hpriv->hp_flags = hp_flags;
3917 	if (hp_flags & MV_HP_PCIE) {
3918 		hpriv->irq_cause_offset	= PCIE_IRQ_CAUSE;
3919 		hpriv->irq_mask_offset	= PCIE_IRQ_MASK;
3920 		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
3921 	} else {
3922 		hpriv->irq_cause_offset	= PCI_IRQ_CAUSE;
3923 		hpriv->irq_mask_offset	= PCI_IRQ_MASK;
3924 		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
3925 	}
3926 
3927 	return 0;
3928 }
3929 
3930 /**
3931  *      mv_init_host - Perform some early initialization of the host.
3932  *	@host: ATA host to initialize
3933  *
3934  *      If possible, do an early global reset of the host.  Then do
3935  *      our port init and clear/unmask all/relevant host interrupts.
3936  *
3937  *      LOCKING:
3938  *      Inherited from caller.
3939  */
mv_init_host(struct ata_host * host)3940 static int mv_init_host(struct ata_host *host)
3941 {
3942 	int rc = 0, n_hc, port, hc;
3943 	struct mv_host_priv *hpriv = host->private_data;
3944 	void __iomem *mmio = hpriv->base;
3945 
3946 	rc = mv_chip_id(host, hpriv->board_idx);
3947 	if (rc)
3948 		goto done;
3949 
3950 	if (IS_SOC(hpriv)) {
3951 		hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3952 		hpriv->main_irq_mask_addr  = mmio + SOC_HC_MAIN_IRQ_MASK;
3953 	} else {
3954 		hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3955 		hpriv->main_irq_mask_addr  = mmio + PCI_HC_MAIN_IRQ_MASK;
3956 	}
3957 
3958 	/* initialize shadow irq mask with register's value */
3959 	hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3960 
3961 	/* global interrupt mask: 0 == mask everything */
3962 	mv_set_main_irq_mask(host, ~0, 0);
3963 
3964 	n_hc = mv_get_hc_count(host->ports[0]->flags);
3965 
3966 	for (port = 0; port < host->n_ports; port++)
3967 		if (hpriv->ops->read_preamp)
3968 			hpriv->ops->read_preamp(hpriv, port, mmio);
3969 
3970 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3971 	if (rc)
3972 		goto done;
3973 
3974 	hpriv->ops->reset_flash(hpriv, mmio);
3975 	hpriv->ops->reset_bus(host, mmio);
3976 	hpriv->ops->enable_leds(hpriv, mmio);
3977 
3978 	for (port = 0; port < host->n_ports; port++) {
3979 		struct ata_port *ap = host->ports[port];
3980 		void __iomem *port_mmio = mv_port_base(mmio, port);
3981 
3982 		mv_port_init(&ap->ioaddr, port_mmio);
3983 	}
3984 
3985 	for (hc = 0; hc < n_hc; hc++) {
3986 		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3987 
3988 		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3989 			"(before clear)=0x%08x\n", hc,
3990 			readl(hc_mmio + HC_CFG),
3991 			readl(hc_mmio + HC_IRQ_CAUSE));
3992 
3993 		/* Clear any currently outstanding hc interrupt conditions */
3994 		writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3995 	}
3996 
3997 	if (!IS_SOC(hpriv)) {
3998 		/* Clear any currently outstanding host interrupt conditions */
3999 		writelfl(0, mmio + hpriv->irq_cause_offset);
4000 
4001 		/* and unmask interrupt generation for host regs */
4002 		writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
4003 	}
4004 
4005 	/*
4006 	 * enable only global host interrupts for now.
4007 	 * The per-port interrupts get done later as ports are set up.
4008 	 */
4009 	mv_set_main_irq_mask(host, 0, PCI_ERR);
4010 	mv_set_irq_coalescing(host, irq_coalescing_io_count,
4011 				    irq_coalescing_usecs);
4012 done:
4013 	return rc;
4014 }
4015 
mv_create_dma_pools(struct mv_host_priv * hpriv,struct device * dev)4016 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
4017 {
4018 	hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
4019 							     MV_CRQB_Q_SZ, 0);
4020 	if (!hpriv->crqb_pool)
4021 		return -ENOMEM;
4022 
4023 	hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
4024 							     MV_CRPB_Q_SZ, 0);
4025 	if (!hpriv->crpb_pool)
4026 		return -ENOMEM;
4027 
4028 	hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
4029 							     MV_SG_TBL_SZ, 0);
4030 	if (!hpriv->sg_tbl_pool)
4031 		return -ENOMEM;
4032 
4033 	return 0;
4034 }
4035 
mv_conf_mbus_windows(struct mv_host_priv * hpriv,const struct mbus_dram_target_info * dram)4036 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
4037 				 const struct mbus_dram_target_info *dram)
4038 {
4039 	int i;
4040 
4041 	for (i = 0; i < 4; i++) {
4042 		writel(0, hpriv->base + WINDOW_CTRL(i));
4043 		writel(0, hpriv->base + WINDOW_BASE(i));
4044 	}
4045 
4046 	for (i = 0; i < dram->num_cs; i++) {
4047 		const struct mbus_dram_window *cs = dram->cs + i;
4048 
4049 		writel(((cs->size - 1) & 0xffff0000) |
4050 			(cs->mbus_attr << 8) |
4051 			(dram->mbus_dram_target_id << 4) | 1,
4052 			hpriv->base + WINDOW_CTRL(i));
4053 		writel(cs->base, hpriv->base + WINDOW_BASE(i));
4054 	}
4055 }
4056 
4057 /**
4058  *      mv_platform_probe - handle a positive probe of an soc Marvell
4059  *      host
4060  *      @pdev: platform device found
4061  *
4062  *      LOCKING:
4063  *      Inherited from caller.
4064  */
mv_platform_probe(struct platform_device * pdev)4065 static int mv_platform_probe(struct platform_device *pdev)
4066 {
4067 	const struct mv_sata_platform_data *mv_platform_data;
4068 	const struct mbus_dram_target_info *dram;
4069 	const struct ata_port_info *ppi[] =
4070 	    { &mv_port_info[chip_soc], NULL };
4071 	struct ata_host *host;
4072 	struct mv_host_priv *hpriv;
4073 	struct resource *res;
4074 	int n_ports = 0, irq = 0;
4075 	int rc;
4076 	int port;
4077 
4078 	ata_print_version_once(&pdev->dev, DRV_VERSION);
4079 
4080 	/*
4081 	 * Simple resource validation ..
4082 	 */
4083 	if (unlikely(pdev->num_resources != 2)) {
4084 		dev_err(&pdev->dev, "invalid number of resources\n");
4085 		return -EINVAL;
4086 	}
4087 
4088 	/*
4089 	 * Get the register base first
4090 	 */
4091 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4092 	if (res == NULL)
4093 		return -EINVAL;
4094 
4095 	/* allocate host */
4096 	if (pdev->dev.of_node) {
4097 		of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports);
4098 		irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
4099 	} else {
4100 		mv_platform_data = dev_get_platdata(&pdev->dev);
4101 		n_ports = mv_platform_data->n_ports;
4102 		irq = platform_get_irq(pdev, 0);
4103 	}
4104 	if (irq < 0)
4105 		return irq;
4106 	if (!irq)
4107 		return -EINVAL;
4108 
4109 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4110 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4111 
4112 	if (!host || !hpriv)
4113 		return -ENOMEM;
4114 	hpriv->port_clks = devm_kzalloc(&pdev->dev,
4115 					sizeof(struct clk *) * n_ports,
4116 					GFP_KERNEL);
4117 	if (!hpriv->port_clks)
4118 		return -ENOMEM;
4119 	hpriv->port_phys = devm_kzalloc(&pdev->dev,
4120 					sizeof(struct phy *) * n_ports,
4121 					GFP_KERNEL);
4122 	if (!hpriv->port_phys)
4123 		return -ENOMEM;
4124 	host->private_data = hpriv;
4125 	hpriv->board_idx = chip_soc;
4126 
4127 	host->iomap = NULL;
4128 	hpriv->base = devm_ioremap(&pdev->dev, res->start,
4129 				   resource_size(res));
4130 	if (!hpriv->base)
4131 		return -ENOMEM;
4132 
4133 	hpriv->base -= SATAHC0_REG_BASE;
4134 
4135 	hpriv->clk = clk_get(&pdev->dev, NULL);
4136 	if (IS_ERR(hpriv->clk))
4137 		dev_notice(&pdev->dev, "cannot get optional clkdev\n");
4138 	else
4139 		clk_prepare_enable(hpriv->clk);
4140 
4141 	for (port = 0; port < n_ports; port++) {
4142 		char port_number[16];
4143 		sprintf(port_number, "%d", port);
4144 		hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
4145 		if (!IS_ERR(hpriv->port_clks[port]))
4146 			clk_prepare_enable(hpriv->port_clks[port]);
4147 
4148 		sprintf(port_number, "port%d", port);
4149 		hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
4150 							       port_number);
4151 		if (IS_ERR(hpriv->port_phys[port])) {
4152 			rc = PTR_ERR(hpriv->port_phys[port]);
4153 			hpriv->port_phys[port] = NULL;
4154 			if (rc != -EPROBE_DEFER)
4155 				dev_warn(&pdev->dev, "error getting phy %d", rc);
4156 
4157 			/* Cleanup only the initialized ports */
4158 			hpriv->n_ports = port;
4159 			goto err;
4160 		} else
4161 			phy_power_on(hpriv->port_phys[port]);
4162 	}
4163 
4164 	/* All the ports have been initialized */
4165 	hpriv->n_ports = n_ports;
4166 
4167 	/*
4168 	 * (Re-)program MBUS remapping windows if we are asked to.
4169 	 */
4170 	dram = mv_mbus_dram_info();
4171 	if (dram)
4172 		mv_conf_mbus_windows(hpriv, dram);
4173 
4174 	rc = mv_create_dma_pools(hpriv, &pdev->dev);
4175 	if (rc)
4176 		goto err;
4177 
4178 	/*
4179 	 * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
4180 	 * updated in the LP_PHY_CTL register.
4181 	 */
4182 	if (pdev->dev.of_node &&
4183 		of_device_is_compatible(pdev->dev.of_node,
4184 					"marvell,armada-370-sata"))
4185 		hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
4186 
4187 	/* initialize adapter */
4188 	rc = mv_init_host(host);
4189 	if (rc)
4190 		goto err;
4191 
4192 	dev_info(&pdev->dev, "slots %u ports %d\n",
4193 		 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
4194 
4195 	rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
4196 	if (!rc)
4197 		return 0;
4198 
4199 err:
4200 	if (!IS_ERR(hpriv->clk)) {
4201 		clk_disable_unprepare(hpriv->clk);
4202 		clk_put(hpriv->clk);
4203 	}
4204 	for (port = 0; port < hpriv->n_ports; port++) {
4205 		if (!IS_ERR(hpriv->port_clks[port])) {
4206 			clk_disable_unprepare(hpriv->port_clks[port]);
4207 			clk_put(hpriv->port_clks[port]);
4208 		}
4209 		phy_power_off(hpriv->port_phys[port]);
4210 	}
4211 
4212 	return rc;
4213 }
4214 
4215 /*
4216  *
4217  *      mv_platform_remove    -       unplug a platform interface
4218  *      @pdev: platform device
4219  *
4220  *      A platform bus SATA device has been unplugged. Perform the needed
4221  *      cleanup. Also called on module unload for any active devices.
4222  */
mv_platform_remove(struct platform_device * pdev)4223 static int mv_platform_remove(struct platform_device *pdev)
4224 {
4225 	struct ata_host *host = platform_get_drvdata(pdev);
4226 	struct mv_host_priv *hpriv = host->private_data;
4227 	int port;
4228 	ata_host_detach(host);
4229 
4230 	if (!IS_ERR(hpriv->clk)) {
4231 		clk_disable_unprepare(hpriv->clk);
4232 		clk_put(hpriv->clk);
4233 	}
4234 	for (port = 0; port < host->n_ports; port++) {
4235 		if (!IS_ERR(hpriv->port_clks[port])) {
4236 			clk_disable_unprepare(hpriv->port_clks[port]);
4237 			clk_put(hpriv->port_clks[port]);
4238 		}
4239 		phy_power_off(hpriv->port_phys[port]);
4240 	}
4241 	return 0;
4242 }
4243 
4244 #ifdef CONFIG_PM_SLEEP
mv_platform_suspend(struct platform_device * pdev,pm_message_t state)4245 static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4246 {
4247 	struct ata_host *host = platform_get_drvdata(pdev);
4248 	if (host)
4249 		return ata_host_suspend(host, state);
4250 	else
4251 		return 0;
4252 }
4253 
mv_platform_resume(struct platform_device * pdev)4254 static int mv_platform_resume(struct platform_device *pdev)
4255 {
4256 	struct ata_host *host = platform_get_drvdata(pdev);
4257 	const struct mbus_dram_target_info *dram;
4258 	int ret;
4259 
4260 	if (host) {
4261 		struct mv_host_priv *hpriv = host->private_data;
4262 
4263 		/*
4264 		 * (Re-)program MBUS remapping windows if we are asked to.
4265 		 */
4266 		dram = mv_mbus_dram_info();
4267 		if (dram)
4268 			mv_conf_mbus_windows(hpriv, dram);
4269 
4270 		/* initialize adapter */
4271 		ret = mv_init_host(host);
4272 		if (ret) {
4273 			printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4274 			return ret;
4275 		}
4276 		ata_host_resume(host);
4277 	}
4278 
4279 	return 0;
4280 }
4281 #else
4282 #define mv_platform_suspend NULL
4283 #define mv_platform_resume NULL
4284 #endif
4285 
4286 #ifdef CONFIG_OF
4287 static struct of_device_id mv_sata_dt_ids[] = {
4288 	{ .compatible = "marvell,armada-370-sata", },
4289 	{ .compatible = "marvell,orion-sata", },
4290 	{},
4291 };
4292 MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
4293 #endif
4294 
4295 static struct platform_driver mv_platform_driver = {
4296 	.probe		= mv_platform_probe,
4297 	.remove		= mv_platform_remove,
4298 	.suspend	= mv_platform_suspend,
4299 	.resume		= mv_platform_resume,
4300 	.driver		= {
4301 		.name = DRV_NAME,
4302 		.of_match_table = of_match_ptr(mv_sata_dt_ids),
4303 	},
4304 };
4305 
4306 
4307 #ifdef CONFIG_PCI
4308 static int mv_pci_init_one(struct pci_dev *pdev,
4309 			   const struct pci_device_id *ent);
4310 #ifdef CONFIG_PM_SLEEP
4311 static int mv_pci_device_resume(struct pci_dev *pdev);
4312 #endif
4313 
4314 
4315 static struct pci_driver mv_pci_driver = {
4316 	.name			= DRV_NAME,
4317 	.id_table		= mv_pci_tbl,
4318 	.probe			= mv_pci_init_one,
4319 	.remove			= ata_pci_remove_one,
4320 #ifdef CONFIG_PM_SLEEP
4321 	.suspend		= ata_pci_device_suspend,
4322 	.resume			= mv_pci_device_resume,
4323 #endif
4324 
4325 };
4326 
4327 /* move to PCI layer or libata core? */
pci_go_64(struct pci_dev * pdev)4328 static int pci_go_64(struct pci_dev *pdev)
4329 {
4330 	int rc;
4331 
4332 	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4333 		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4334 		if (rc) {
4335 			rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4336 			if (rc) {
4337 				dev_err(&pdev->dev,
4338 					"64-bit DMA enable failed\n");
4339 				return rc;
4340 			}
4341 		}
4342 	} else {
4343 		rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4344 		if (rc) {
4345 			dev_err(&pdev->dev, "32-bit DMA enable failed\n");
4346 			return rc;
4347 		}
4348 		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4349 		if (rc) {
4350 			dev_err(&pdev->dev,
4351 				"32-bit consistent DMA enable failed\n");
4352 			return rc;
4353 		}
4354 	}
4355 
4356 	return rc;
4357 }
4358 
4359 /**
4360  *      mv_print_info - Dump key info to kernel log for perusal.
4361  *      @host: ATA host to print info about
4362  *
4363  *      FIXME: complete this.
4364  *
4365  *      LOCKING:
4366  *      Inherited from caller.
4367  */
mv_print_info(struct ata_host * host)4368 static void mv_print_info(struct ata_host *host)
4369 {
4370 	struct pci_dev *pdev = to_pci_dev(host->dev);
4371 	struct mv_host_priv *hpriv = host->private_data;
4372 	u8 scc;
4373 	const char *scc_s, *gen;
4374 
4375 	/* Use this to determine the HW stepping of the chip so we know
4376 	 * what errata to workaround
4377 	 */
4378 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4379 	if (scc == 0)
4380 		scc_s = "SCSI";
4381 	else if (scc == 0x01)
4382 		scc_s = "RAID";
4383 	else
4384 		scc_s = "?";
4385 
4386 	if (IS_GEN_I(hpriv))
4387 		gen = "I";
4388 	else if (IS_GEN_II(hpriv))
4389 		gen = "II";
4390 	else if (IS_GEN_IIE(hpriv))
4391 		gen = "IIE";
4392 	else
4393 		gen = "?";
4394 
4395 	dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4396 		 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4397 		 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4398 }
4399 
4400 /**
4401  *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
4402  *      @pdev: PCI device found
4403  *      @ent: PCI device ID entry for the matched host
4404  *
4405  *      LOCKING:
4406  *      Inherited from caller.
4407  */
mv_pci_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)4408 static int mv_pci_init_one(struct pci_dev *pdev,
4409 			   const struct pci_device_id *ent)
4410 {
4411 	unsigned int board_idx = (unsigned int)ent->driver_data;
4412 	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4413 	struct ata_host *host;
4414 	struct mv_host_priv *hpriv;
4415 	int n_ports, port, rc;
4416 
4417 	ata_print_version_once(&pdev->dev, DRV_VERSION);
4418 
4419 	/* allocate host */
4420 	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4421 
4422 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4423 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4424 	if (!host || !hpriv)
4425 		return -ENOMEM;
4426 	host->private_data = hpriv;
4427 	hpriv->n_ports = n_ports;
4428 	hpriv->board_idx = board_idx;
4429 
4430 	/* acquire resources */
4431 	rc = pcim_enable_device(pdev);
4432 	if (rc)
4433 		return rc;
4434 
4435 	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4436 	if (rc == -EBUSY)
4437 		pcim_pin_device(pdev);
4438 	if (rc)
4439 		return rc;
4440 	host->iomap = pcim_iomap_table(pdev);
4441 	hpriv->base = host->iomap[MV_PRIMARY_BAR];
4442 
4443 	rc = pci_go_64(pdev);
4444 	if (rc)
4445 		return rc;
4446 
4447 	rc = mv_create_dma_pools(hpriv, &pdev->dev);
4448 	if (rc)
4449 		return rc;
4450 
4451 	for (port = 0; port < host->n_ports; port++) {
4452 		struct ata_port *ap = host->ports[port];
4453 		void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4454 		unsigned int offset = port_mmio - hpriv->base;
4455 
4456 		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4457 		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4458 	}
4459 
4460 	/* initialize adapter */
4461 	rc = mv_init_host(host);
4462 	if (rc)
4463 		return rc;
4464 
4465 	/* Enable message-switched interrupts, if requested */
4466 	if (msi && pci_enable_msi(pdev) == 0)
4467 		hpriv->hp_flags |= MV_HP_FLAG_MSI;
4468 
4469 	mv_dump_pci_cfg(pdev, 0x68);
4470 	mv_print_info(host);
4471 
4472 	pci_set_master(pdev);
4473 	pci_try_set_mwi(pdev);
4474 	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4475 				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4476 }
4477 
4478 #ifdef CONFIG_PM_SLEEP
mv_pci_device_resume(struct pci_dev * pdev)4479 static int mv_pci_device_resume(struct pci_dev *pdev)
4480 {
4481 	struct ata_host *host = pci_get_drvdata(pdev);
4482 	int rc;
4483 
4484 	rc = ata_pci_device_do_resume(pdev);
4485 	if (rc)
4486 		return rc;
4487 
4488 	/* initialize adapter */
4489 	rc = mv_init_host(host);
4490 	if (rc)
4491 		return rc;
4492 
4493 	ata_host_resume(host);
4494 
4495 	return 0;
4496 }
4497 #endif
4498 #endif
4499 
mv_init(void)4500 static int __init mv_init(void)
4501 {
4502 	int rc = -ENODEV;
4503 #ifdef CONFIG_PCI
4504 	rc = pci_register_driver(&mv_pci_driver);
4505 	if (rc < 0)
4506 		return rc;
4507 #endif
4508 	rc = platform_driver_register(&mv_platform_driver);
4509 
4510 #ifdef CONFIG_PCI
4511 	if (rc < 0)
4512 		pci_unregister_driver(&mv_pci_driver);
4513 #endif
4514 	return rc;
4515 }
4516 
mv_exit(void)4517 static void __exit mv_exit(void)
4518 {
4519 #ifdef CONFIG_PCI
4520 	pci_unregister_driver(&mv_pci_driver);
4521 #endif
4522 	platform_driver_unregister(&mv_platform_driver);
4523 }
4524 
4525 MODULE_AUTHOR("Brett Russ");
4526 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4527 MODULE_LICENSE("GPL");
4528 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4529 MODULE_VERSION(DRV_VERSION);
4530 MODULE_ALIAS("platform:" DRV_NAME);
4531 
4532 module_init(mv_init);
4533 module_exit(mv_exit);
4534