• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Freescale eSDHC controller driver.
4  *
5  * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6  * Copyright (c) 2009 MontaVista Software, Inc.
7  * Copyright 2020 NXP
8  *
9  * Authors: Xiaobo Xie <X.Xie@freescale.com>
10  *	    Anton Vorontsov <avorontsov@ru.mvista.com>
11  */
12 
13 #include <linux/err.h>
14 #include <linux/io.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
19 #include <linux/sys_soc.h>
20 #include <linux/clk.h>
21 #include <linux/ktime.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/iopoll.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/mmc.h>
26 #include "sdhci-pltfm.h"
27 #include "sdhci-esdhc.h"
28 
29 #define VENDOR_V_22	0x12
30 #define VENDOR_V_23	0x13
31 
32 #define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
33 
34 struct esdhc_clk_fixup {
35 	const unsigned int sd_dflt_max_clk;
36 	const unsigned int max_clk[MMC_TIMING_NUM];
37 };
38 
39 static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
40 	.sd_dflt_max_clk = 25000000,
41 	.max_clk[MMC_TIMING_MMC_HS] = 46500000,
42 	.max_clk[MMC_TIMING_SD_HS] = 46500000,
43 };
44 
45 static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
46 	.sd_dflt_max_clk = 25000000,
47 	.max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
48 	.max_clk[MMC_TIMING_MMC_HS200] = 167000000,
49 };
50 
51 static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
52 	.sd_dflt_max_clk = 25000000,
53 	.max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
54 	.max_clk[MMC_TIMING_MMC_HS200] = 125000000,
55 };
56 
57 static const struct esdhc_clk_fixup p1010_esdhc_clk = {
58 	.sd_dflt_max_clk = 20000000,
59 	.max_clk[MMC_TIMING_LEGACY] = 20000000,
60 	.max_clk[MMC_TIMING_MMC_HS] = 42000000,
61 	.max_clk[MMC_TIMING_SD_HS] = 40000000,
62 };
63 
64 static const struct of_device_id sdhci_esdhc_of_match[] = {
65 	{ .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
66 	{ .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
67 	{ .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
68 	{ .compatible = "fsl,p1010-esdhc",   .data = &p1010_esdhc_clk},
69 	{ .compatible = "fsl,mpc8379-esdhc" },
70 	{ .compatible = "fsl,mpc8536-esdhc" },
71 	{ .compatible = "fsl,esdhc" },
72 	{ }
73 };
74 MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
75 
76 struct sdhci_esdhc {
77 	u8 vendor_ver;
78 	u8 spec_ver;
79 	bool quirk_incorrect_hostver;
80 	bool quirk_limited_clk_division;
81 	bool quirk_unreliable_pulse_detection;
82 	bool quirk_tuning_erratum_type1;
83 	bool quirk_tuning_erratum_type2;
84 	bool quirk_ignore_data_inhibit;
85 	bool quirk_delay_before_data_reset;
86 	bool quirk_trans_complete_erratum;
87 	bool in_sw_tuning;
88 	unsigned int peripheral_clock;
89 	const struct esdhc_clk_fixup *clk_fixup;
90 	u32 div_ratio;
91 };
92 
93 /**
94  * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
95  *		       to make it compatible with SD spec.
96  *
97  * @host: pointer to sdhci_host
98  * @spec_reg: SD spec register address
99  * @value: 32bit eSDHC register value on spec_reg address
100  *
101  * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
102  * registers are 32 bits. There are differences in register size, register
103  * address, register function, bit position and function between eSDHC spec
104  * and SD spec.
105  *
106  * Return a fixed up register value
107  */
esdhc_readl_fixup(struct sdhci_host * host,int spec_reg,u32 value)108 static u32 esdhc_readl_fixup(struct sdhci_host *host,
109 				     int spec_reg, u32 value)
110 {
111 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
112 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
113 	u32 ret;
114 
115 	/*
116 	 * The bit of ADMA flag in eSDHC is not compatible with standard
117 	 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
118 	 * supported by eSDHC.
119 	 * And for many FSL eSDHC controller, the reset value of field
120 	 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
121 	 * only these vendor version is greater than 2.2/0x12 support ADMA.
122 	 */
123 	if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
124 		if (esdhc->vendor_ver > VENDOR_V_22) {
125 			ret = value | SDHCI_CAN_DO_ADMA2;
126 			return ret;
127 		}
128 	}
129 
130 	/*
131 	 * The DAT[3:0] line signal levels and the CMD line signal level are
132 	 * not compatible with standard SDHC register. The line signal levels
133 	 * DAT[7:0] are at bits 31:24 and the command line signal level is at
134 	 * bit 23. All other bits are the same as in the standard SDHC
135 	 * register.
136 	 */
137 	if (spec_reg == SDHCI_PRESENT_STATE) {
138 		ret = value & 0x000fffff;
139 		ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
140 		ret |= (value << 1) & SDHCI_CMD_LVL;
141 
142 		/*
143 		 * Some controllers have unreliable Data Line Active
144 		 * bit for commands with busy signal. This affects
145 		 * Command Inhibit (data) bit. Just ignore it since
146 		 * MMC core driver has already polled card status
147 		 * with CMD13 after any command with busy siganl.
148 		 */
149 		if (esdhc->quirk_ignore_data_inhibit)
150 			ret &= ~SDHCI_DATA_INHIBIT;
151 		return ret;
152 	}
153 
154 	/*
155 	 * DTS properties of mmc host are used to enable each speed mode
156 	 * according to soc and board capability. So clean up
157 	 * SDR50/SDR104/DDR50 support bits here.
158 	 */
159 	if (spec_reg == SDHCI_CAPABILITIES_1) {
160 		ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
161 				SDHCI_SUPPORT_DDR50);
162 		return ret;
163 	}
164 
165 	ret = value;
166 	return ret;
167 }
168 
esdhc_readw_fixup(struct sdhci_host * host,int spec_reg,u32 value)169 static u16 esdhc_readw_fixup(struct sdhci_host *host,
170 				     int spec_reg, u32 value)
171 {
172 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
173 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
174 	u16 ret;
175 	int shift = (spec_reg & 0x2) * 8;
176 
177 	if (spec_reg == SDHCI_TRANSFER_MODE)
178 		return pltfm_host->xfer_mode_shadow;
179 
180 	if (spec_reg == SDHCI_HOST_VERSION)
181 		ret = value & 0xffff;
182 	else
183 		ret = (value >> shift) & 0xffff;
184 	/* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
185 	 * vendor version and spec version information.
186 	 */
187 	if ((spec_reg == SDHCI_HOST_VERSION) &&
188 	    (esdhc->quirk_incorrect_hostver))
189 		ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
190 	return ret;
191 }
192 
esdhc_readb_fixup(struct sdhci_host * host,int spec_reg,u32 value)193 static u8 esdhc_readb_fixup(struct sdhci_host *host,
194 				     int spec_reg, u32 value)
195 {
196 	u8 ret;
197 	u8 dma_bits;
198 	int shift = (spec_reg & 0x3) * 8;
199 
200 	ret = (value >> shift) & 0xff;
201 
202 	/*
203 	 * "DMA select" locates at offset 0x28 in SD specification, but on
204 	 * P5020 or P3041, it locates at 0x29.
205 	 */
206 	if (spec_reg == SDHCI_HOST_CONTROL) {
207 		/* DMA select is 22,23 bits in Protocol Control Register */
208 		dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
209 		/* fixup the result */
210 		ret &= ~SDHCI_CTRL_DMA_MASK;
211 		ret |= dma_bits;
212 	}
213 	return ret;
214 }
215 
216 /**
217  * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
218  *			written into eSDHC register.
219  *
220  * @host: pointer to sdhci_host
221  * @spec_reg: SD spec register address
222  * @value: 8/16/32bit SD spec register value that would be written
223  * @old_value: 32bit eSDHC register value on spec_reg address
224  *
225  * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
226  * registers are 32 bits. There are differences in register size, register
227  * address, register function, bit position and function between eSDHC spec
228  * and SD spec.
229  *
230  * Return a fixed up register value
231  */
esdhc_writel_fixup(struct sdhci_host * host,int spec_reg,u32 value,u32 old_value)232 static u32 esdhc_writel_fixup(struct sdhci_host *host,
233 				     int spec_reg, u32 value, u32 old_value)
234 {
235 	u32 ret;
236 
237 	/*
238 	 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
239 	 * when SYSCTL[RSTD] is set for some special operations.
240 	 * No any impact on other operation.
241 	 */
242 	if (spec_reg == SDHCI_INT_ENABLE)
243 		ret = value | SDHCI_INT_BLK_GAP;
244 	else
245 		ret = value;
246 
247 	return ret;
248 }
249 
esdhc_writew_fixup(struct sdhci_host * host,int spec_reg,u16 value,u32 old_value)250 static u32 esdhc_writew_fixup(struct sdhci_host *host,
251 				     int spec_reg, u16 value, u32 old_value)
252 {
253 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
254 	int shift = (spec_reg & 0x2) * 8;
255 	u32 ret;
256 
257 	switch (spec_reg) {
258 	case SDHCI_TRANSFER_MODE:
259 		/*
260 		 * Postpone this write, we must do it together with a
261 		 * command write that is down below. Return old value.
262 		 */
263 		pltfm_host->xfer_mode_shadow = value;
264 		return old_value;
265 	case SDHCI_COMMAND:
266 		ret = (value << 16) | pltfm_host->xfer_mode_shadow;
267 		return ret;
268 	}
269 
270 	ret = old_value & (~(0xffff << shift));
271 	ret |= (value << shift);
272 
273 	if (spec_reg == SDHCI_BLOCK_SIZE) {
274 		/*
275 		 * Two last DMA bits are reserved, and first one is used for
276 		 * non-standard blksz of 4096 bytes that we don't support
277 		 * yet. So clear the DMA boundary bits.
278 		 */
279 		ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
280 	}
281 	return ret;
282 }
283 
esdhc_writeb_fixup(struct sdhci_host * host,int spec_reg,u8 value,u32 old_value)284 static u32 esdhc_writeb_fixup(struct sdhci_host *host,
285 				     int spec_reg, u8 value, u32 old_value)
286 {
287 	u32 ret;
288 	u32 dma_bits;
289 	u8 tmp;
290 	int shift = (spec_reg & 0x3) * 8;
291 
292 	/*
293 	 * eSDHC doesn't have a standard power control register, so we do
294 	 * nothing here to avoid incorrect operation.
295 	 */
296 	if (spec_reg == SDHCI_POWER_CONTROL)
297 		return old_value;
298 	/*
299 	 * "DMA select" location is offset 0x28 in SD specification, but on
300 	 * P5020 or P3041, it's located at 0x29.
301 	 */
302 	if (spec_reg == SDHCI_HOST_CONTROL) {
303 		/*
304 		 * If host control register is not standard, exit
305 		 * this function
306 		 */
307 		if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
308 			return old_value;
309 
310 		/* DMA select is 22,23 bits in Protocol Control Register */
311 		dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
312 		ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
313 		tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
314 		      (old_value & SDHCI_CTRL_DMA_MASK);
315 		ret = (ret & (~0xff)) | tmp;
316 
317 		/* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
318 		ret &= ~ESDHC_HOST_CONTROL_RES;
319 		return ret;
320 	}
321 
322 	ret = (old_value & (~(0xff << shift))) | (value << shift);
323 	return ret;
324 }
325 
esdhc_be_readl(struct sdhci_host * host,int reg)326 static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
327 {
328 	u32 ret;
329 	u32 value;
330 
331 	if (reg == SDHCI_CAPABILITIES_1)
332 		value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
333 	else
334 		value = ioread32be(host->ioaddr + reg);
335 
336 	ret = esdhc_readl_fixup(host, reg, value);
337 
338 	return ret;
339 }
340 
esdhc_le_readl(struct sdhci_host * host,int reg)341 static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
342 {
343 	u32 ret;
344 	u32 value;
345 
346 	if (reg == SDHCI_CAPABILITIES_1)
347 		value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
348 	else
349 		value = ioread32(host->ioaddr + reg);
350 
351 	ret = esdhc_readl_fixup(host, reg, value);
352 
353 	return ret;
354 }
355 
esdhc_be_readw(struct sdhci_host * host,int reg)356 static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
357 {
358 	u16 ret;
359 	u32 value;
360 	int base = reg & ~0x3;
361 
362 	value = ioread32be(host->ioaddr + base);
363 	ret = esdhc_readw_fixup(host, reg, value);
364 	return ret;
365 }
366 
esdhc_le_readw(struct sdhci_host * host,int reg)367 static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
368 {
369 	u16 ret;
370 	u32 value;
371 	int base = reg & ~0x3;
372 
373 	value = ioread32(host->ioaddr + base);
374 	ret = esdhc_readw_fixup(host, reg, value);
375 	return ret;
376 }
377 
esdhc_be_readb(struct sdhci_host * host,int reg)378 static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
379 {
380 	u8 ret;
381 	u32 value;
382 	int base = reg & ~0x3;
383 
384 	value = ioread32be(host->ioaddr + base);
385 	ret = esdhc_readb_fixup(host, reg, value);
386 	return ret;
387 }
388 
esdhc_le_readb(struct sdhci_host * host,int reg)389 static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
390 {
391 	u8 ret;
392 	u32 value;
393 	int base = reg & ~0x3;
394 
395 	value = ioread32(host->ioaddr + base);
396 	ret = esdhc_readb_fixup(host, reg, value);
397 	return ret;
398 }
399 
esdhc_be_writel(struct sdhci_host * host,u32 val,int reg)400 static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
401 {
402 	u32 value;
403 
404 	value = esdhc_writel_fixup(host, reg, val, 0);
405 	iowrite32be(value, host->ioaddr + reg);
406 }
407 
esdhc_le_writel(struct sdhci_host * host,u32 val,int reg)408 static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
409 {
410 	u32 value;
411 
412 	value = esdhc_writel_fixup(host, reg, val, 0);
413 	iowrite32(value, host->ioaddr + reg);
414 }
415 
esdhc_be_writew(struct sdhci_host * host,u16 val,int reg)416 static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
417 {
418 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
419 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
420 	int base = reg & ~0x3;
421 	u32 value;
422 	u32 ret;
423 
424 	value = ioread32be(host->ioaddr + base);
425 	ret = esdhc_writew_fixup(host, reg, val, value);
426 	if (reg != SDHCI_TRANSFER_MODE)
427 		iowrite32be(ret, host->ioaddr + base);
428 
429 	/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
430 	 * 1us later after ESDHC_EXTN is set.
431 	 */
432 	if (base == ESDHC_SYSTEM_CONTROL_2) {
433 		if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
434 		    esdhc->in_sw_tuning) {
435 			udelay(1);
436 			ret |= ESDHC_SMPCLKSEL;
437 			iowrite32be(ret, host->ioaddr + base);
438 		}
439 	}
440 }
441 
esdhc_le_writew(struct sdhci_host * host,u16 val,int reg)442 static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
443 {
444 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
445 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
446 	int base = reg & ~0x3;
447 	u32 value;
448 	u32 ret;
449 
450 	value = ioread32(host->ioaddr + base);
451 	ret = esdhc_writew_fixup(host, reg, val, value);
452 	if (reg != SDHCI_TRANSFER_MODE)
453 		iowrite32(ret, host->ioaddr + base);
454 
455 	/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
456 	 * 1us later after ESDHC_EXTN is set.
457 	 */
458 	if (base == ESDHC_SYSTEM_CONTROL_2) {
459 		if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
460 		    esdhc->in_sw_tuning) {
461 			udelay(1);
462 			ret |= ESDHC_SMPCLKSEL;
463 			iowrite32(ret, host->ioaddr + base);
464 		}
465 	}
466 }
467 
esdhc_be_writeb(struct sdhci_host * host,u8 val,int reg)468 static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
469 {
470 	int base = reg & ~0x3;
471 	u32 value;
472 	u32 ret;
473 
474 	value = ioread32be(host->ioaddr + base);
475 	ret = esdhc_writeb_fixup(host, reg, val, value);
476 	iowrite32be(ret, host->ioaddr + base);
477 }
478 
esdhc_le_writeb(struct sdhci_host * host,u8 val,int reg)479 static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
480 {
481 	int base = reg & ~0x3;
482 	u32 value;
483 	u32 ret;
484 
485 	value = ioread32(host->ioaddr + base);
486 	ret = esdhc_writeb_fixup(host, reg, val, value);
487 	iowrite32(ret, host->ioaddr + base);
488 }
489 
490 /*
491  * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
492  * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
493  * and Block Gap Event(IRQSTAT[BGE]) are also set.
494  * For Continue, apply soft reset for data(SYSCTL[RSTD]);
495  * and re-issue the entire read transaction from beginning.
496  */
esdhc_of_adma_workaround(struct sdhci_host * host,u32 intmask)497 static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
498 {
499 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
500 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
501 	bool applicable;
502 	dma_addr_t dmastart;
503 	dma_addr_t dmanow;
504 
505 	applicable = (intmask & SDHCI_INT_DATA_END) &&
506 		     (intmask & SDHCI_INT_BLK_GAP) &&
507 		     (esdhc->vendor_ver == VENDOR_V_23);
508 	if (!applicable)
509 		return;
510 
511 	host->data->error = 0;
512 	dmastart = sg_dma_address(host->data->sg);
513 	dmanow = dmastart + host->data->bytes_xfered;
514 	/*
515 	 * Force update to the next DMA block boundary.
516 	 */
517 	dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
518 		SDHCI_DEFAULT_BOUNDARY_SIZE;
519 	host->data->bytes_xfered = dmanow - dmastart;
520 	sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
521 }
522 
esdhc_of_enable_dma(struct sdhci_host * host)523 static int esdhc_of_enable_dma(struct sdhci_host *host)
524 {
525 	int ret;
526 	u32 value;
527 	struct device *dev = mmc_dev(host->mmc);
528 
529 	if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
530 	    of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
531 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
532 		if (ret)
533 			return ret;
534 	}
535 
536 	value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
537 
538 	if (of_dma_is_coherent(dev->of_node))
539 		value |= ESDHC_DMA_SNOOP;
540 	else
541 		value &= ~ESDHC_DMA_SNOOP;
542 
543 	sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
544 	return 0;
545 }
546 
esdhc_of_get_max_clock(struct sdhci_host * host)547 static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
548 {
549 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
550 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
551 
552 	if (esdhc->peripheral_clock)
553 		return esdhc->peripheral_clock;
554 	else
555 		return pltfm_host->clock;
556 }
557 
esdhc_of_get_min_clock(struct sdhci_host * host)558 static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
559 {
560 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
561 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
562 	unsigned int clock;
563 
564 	if (esdhc->peripheral_clock)
565 		clock = esdhc->peripheral_clock;
566 	else
567 		clock = pltfm_host->clock;
568 	return clock / 256 / 16;
569 }
570 
esdhc_clock_enable(struct sdhci_host * host,bool enable)571 static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
572 {
573 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
574 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
575 	ktime_t timeout;
576 	u32 val, clk_en;
577 
578 	clk_en = ESDHC_CLOCK_SDCLKEN;
579 
580 	/*
581 	 * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
582 	 * is 2.2 or lower.
583 	 */
584 	if (esdhc->vendor_ver <= VENDOR_V_22)
585 		clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
586 			   ESDHC_CLOCK_PEREN);
587 
588 	val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
589 
590 	if (enable)
591 		val |= clk_en;
592 	else
593 		val &= ~clk_en;
594 
595 	sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
596 
597 	/*
598 	 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
599 	 * wait clock stable bit which does not exist.
600 	 */
601 	timeout = ktime_add_ms(ktime_get(), 20);
602 	while (esdhc->vendor_ver > VENDOR_V_22) {
603 		bool timedout = ktime_after(ktime_get(), timeout);
604 
605 		if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
606 			break;
607 		if (timedout) {
608 			pr_err("%s: Internal clock never stabilised.\n",
609 				mmc_hostname(host->mmc));
610 			break;
611 		}
612 		usleep_range(10, 20);
613 	}
614 }
615 
esdhc_flush_async_fifo(struct sdhci_host * host)616 static void esdhc_flush_async_fifo(struct sdhci_host *host)
617 {
618 	ktime_t timeout;
619 	u32 val;
620 
621 	val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
622 	val |= ESDHC_FLUSH_ASYNC_FIFO;
623 	sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
624 
625 	/* Wait max 20 ms */
626 	timeout = ktime_add_ms(ktime_get(), 20);
627 	while (1) {
628 		bool timedout = ktime_after(ktime_get(), timeout);
629 
630 		if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
631 		      ESDHC_FLUSH_ASYNC_FIFO))
632 			break;
633 		if (timedout) {
634 			pr_err("%s: flushing asynchronous FIFO timeout.\n",
635 				mmc_hostname(host->mmc));
636 			break;
637 		}
638 		usleep_range(10, 20);
639 	}
640 }
641 
esdhc_of_set_clock(struct sdhci_host * host,unsigned int clock)642 static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
643 {
644 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
645 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
646 	unsigned int pre_div = 1, div = 1;
647 	unsigned int clock_fixup = 0;
648 	ktime_t timeout;
649 	u32 temp;
650 
651 	if (clock == 0) {
652 		host->mmc->actual_clock = 0;
653 		esdhc_clock_enable(host, false);
654 		return;
655 	}
656 
657 	/* Start pre_div at 2 for vendor version < 2.3. */
658 	if (esdhc->vendor_ver < VENDOR_V_23)
659 		pre_div = 2;
660 
661 	/* Fix clock value. */
662 	if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
663 	    esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
664 		clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
665 	else if (esdhc->clk_fixup)
666 		clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
667 
668 	if (clock_fixup == 0 || clock < clock_fixup)
669 		clock_fixup = clock;
670 
671 	/* Calculate pre_div and div. */
672 	while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
673 		pre_div *= 2;
674 
675 	while (host->max_clk / pre_div / div > clock_fixup && div < 16)
676 		div++;
677 
678 	esdhc->div_ratio = pre_div * div;
679 
680 	/* Limit clock division for HS400 200MHz clock for quirk. */
681 	if (esdhc->quirk_limited_clk_division &&
682 	    clock == MMC_HS200_MAX_DTR &&
683 	    (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
684 	     host->flags & SDHCI_HS400_TUNING)) {
685 		if (esdhc->div_ratio <= 4) {
686 			pre_div = 4;
687 			div = 1;
688 		} else if (esdhc->div_ratio <= 8) {
689 			pre_div = 4;
690 			div = 2;
691 		} else if (esdhc->div_ratio <= 12) {
692 			pre_div = 4;
693 			div = 3;
694 		} else {
695 			pr_warn("%s: using unsupported clock division.\n",
696 				mmc_hostname(host->mmc));
697 		}
698 		esdhc->div_ratio = pre_div * div;
699 	}
700 
701 	host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
702 
703 	dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
704 		clock, host->mmc->actual_clock);
705 
706 	/* Set clock division into register. */
707 	pre_div >>= 1;
708 	div--;
709 
710 	esdhc_clock_enable(host, false);
711 
712 	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
713 	temp &= ~ESDHC_CLOCK_MASK;
714 	temp |= ((div << ESDHC_DIVIDER_SHIFT) |
715 		(pre_div << ESDHC_PREDIV_SHIFT));
716 	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
717 
718 	/*
719 	 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
720 	 * wait clock stable bit which does not exist.
721 	 */
722 	timeout = ktime_add_ms(ktime_get(), 20);
723 	while (esdhc->vendor_ver > VENDOR_V_22) {
724 		bool timedout = ktime_after(ktime_get(), timeout);
725 
726 		if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
727 			break;
728 		if (timedout) {
729 			pr_err("%s: Internal clock never stabilised.\n",
730 				mmc_hostname(host->mmc));
731 			break;
732 		}
733 		usleep_range(10, 20);
734 	}
735 
736 	/* Additional setting for HS400. */
737 	if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
738 	    clock == MMC_HS200_MAX_DTR) {
739 		temp = sdhci_readl(host, ESDHC_TBCTL);
740 		sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
741 		temp = sdhci_readl(host, ESDHC_SDCLKCTL);
742 		sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
743 		esdhc_clock_enable(host, true);
744 
745 		temp = sdhci_readl(host, ESDHC_DLLCFG0);
746 		temp |= ESDHC_DLL_ENABLE;
747 		if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
748 			temp |= ESDHC_DLL_FREQ_SEL;
749 		sdhci_writel(host, temp, ESDHC_DLLCFG0);
750 
751 		temp |= ESDHC_DLL_RESET;
752 		sdhci_writel(host, temp, ESDHC_DLLCFG0);
753 		udelay(1);
754 		temp &= ~ESDHC_DLL_RESET;
755 		sdhci_writel(host, temp, ESDHC_DLLCFG0);
756 
757 		/* Wait max 20 ms */
758 		if (read_poll_timeout(sdhci_readl, temp,
759 				      temp & ESDHC_DLL_STS_SLV_LOCK,
760 				      10, 20000, false,
761 				      host, ESDHC_DLLSTAT0))
762 			pr_err("%s: timeout for delay chain lock.\n",
763 			       mmc_hostname(host->mmc));
764 
765 		temp = sdhci_readl(host, ESDHC_TBCTL);
766 		sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
767 
768 		esdhc_clock_enable(host, false);
769 		esdhc_flush_async_fifo(host);
770 	}
771 	esdhc_clock_enable(host, true);
772 }
773 
esdhc_pltfm_set_bus_width(struct sdhci_host * host,int width)774 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
775 {
776 	u32 ctrl;
777 
778 	ctrl = sdhci_readl(host, ESDHC_PROCTL);
779 	ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
780 	switch (width) {
781 	case MMC_BUS_WIDTH_8:
782 		ctrl |= ESDHC_CTRL_8BITBUS;
783 		break;
784 
785 	case MMC_BUS_WIDTH_4:
786 		ctrl |= ESDHC_CTRL_4BITBUS;
787 		break;
788 
789 	default:
790 		break;
791 	}
792 
793 	sdhci_writel(host, ctrl, ESDHC_PROCTL);
794 }
795 
esdhc_reset(struct sdhci_host * host,u8 mask)796 static void esdhc_reset(struct sdhci_host *host, u8 mask)
797 {
798 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
799 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
800 	u32 val, bus_width = 0;
801 
802 	/*
803 	 * Add delay to make sure all the DMA transfers are finished
804 	 * for quirk.
805 	 */
806 	if (esdhc->quirk_delay_before_data_reset &&
807 	    (mask & SDHCI_RESET_DATA) &&
808 	    (host->flags & SDHCI_REQ_USE_DMA))
809 		mdelay(5);
810 
811 	/*
812 	 * Save bus-width for eSDHC whose vendor version is 2.2
813 	 * or lower for data reset.
814 	 */
815 	if ((mask & SDHCI_RESET_DATA) &&
816 	    (esdhc->vendor_ver <= VENDOR_V_22)) {
817 		val = sdhci_readl(host, ESDHC_PROCTL);
818 		bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
819 	}
820 
821 	sdhci_reset(host, mask);
822 
823 	/*
824 	 * Restore bus-width setting and interrupt registers for eSDHC
825 	 * whose vendor version is 2.2 or lower for data reset.
826 	 */
827 	if ((mask & SDHCI_RESET_DATA) &&
828 	    (esdhc->vendor_ver <= VENDOR_V_22)) {
829 		val = sdhci_readl(host, ESDHC_PROCTL);
830 		val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
831 		val |= bus_width;
832 		sdhci_writel(host, val, ESDHC_PROCTL);
833 
834 		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
835 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
836 	}
837 
838 	/*
839 	 * Some bits have to be cleaned manually for eSDHC whose spec
840 	 * version is higher than 3.0 for all reset.
841 	 */
842 	if ((mask & SDHCI_RESET_ALL) &&
843 	    (esdhc->spec_ver >= SDHCI_SPEC_300)) {
844 		val = sdhci_readl(host, ESDHC_TBCTL);
845 		val &= ~ESDHC_TB_EN;
846 		sdhci_writel(host, val, ESDHC_TBCTL);
847 
848 		/*
849 		 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
850 		 * 0 for quirk.
851 		 */
852 		if (esdhc->quirk_unreliable_pulse_detection) {
853 			val = sdhci_readl(host, ESDHC_DLLCFG1);
854 			val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
855 			sdhci_writel(host, val, ESDHC_DLLCFG1);
856 		}
857 	}
858 }
859 
860 /* The SCFG, Supplemental Configuration Unit, provides SoC specific
861  * configuration and status registers for the device. There is a
862  * SDHC IO VSEL control register on SCFG for some platforms. It's
863  * used to support SDHC IO voltage switching.
864  */
865 static const struct of_device_id scfg_device_ids[] = {
866 	{ .compatible = "fsl,t1040-scfg", },
867 	{ .compatible = "fsl,ls1012a-scfg", },
868 	{ .compatible = "fsl,ls1046a-scfg", },
869 	{}
870 };
871 
872 /* SDHC IO VSEL control register definition */
873 #define SCFG_SDHCIOVSELCR	0x408
874 #define SDHCIOVSELCR_TGLEN	0x80000000
875 #define SDHCIOVSELCR_VSELVAL	0x60000000
876 #define SDHCIOVSELCR_SDHC_VS	0x00000001
877 
esdhc_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)878 static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
879 				       struct mmc_ios *ios)
880 {
881 	struct sdhci_host *host = mmc_priv(mmc);
882 	struct device_node *scfg_node;
883 	void __iomem *scfg_base = NULL;
884 	u32 sdhciovselcr;
885 	u32 val;
886 
887 	/*
888 	 * Signal Voltage Switching is only applicable for Host Controllers
889 	 * v3.00 and above.
890 	 */
891 	if (host->version < SDHCI_SPEC_300)
892 		return 0;
893 
894 	val = sdhci_readl(host, ESDHC_PROCTL);
895 
896 	switch (ios->signal_voltage) {
897 	case MMC_SIGNAL_VOLTAGE_330:
898 		val &= ~ESDHC_VOLT_SEL;
899 		sdhci_writel(host, val, ESDHC_PROCTL);
900 		return 0;
901 	case MMC_SIGNAL_VOLTAGE_180:
902 		scfg_node = of_find_matching_node(NULL, scfg_device_ids);
903 		if (scfg_node)
904 			scfg_base = of_iomap(scfg_node, 0);
905 		of_node_put(scfg_node);
906 		if (scfg_base) {
907 			sdhciovselcr = SDHCIOVSELCR_TGLEN |
908 				       SDHCIOVSELCR_VSELVAL;
909 			iowrite32be(sdhciovselcr,
910 				scfg_base + SCFG_SDHCIOVSELCR);
911 
912 			val |= ESDHC_VOLT_SEL;
913 			sdhci_writel(host, val, ESDHC_PROCTL);
914 			mdelay(5);
915 
916 			sdhciovselcr = SDHCIOVSELCR_TGLEN |
917 				       SDHCIOVSELCR_SDHC_VS;
918 			iowrite32be(sdhciovselcr,
919 				scfg_base + SCFG_SDHCIOVSELCR);
920 			iounmap(scfg_base);
921 		} else {
922 			val |= ESDHC_VOLT_SEL;
923 			sdhci_writel(host, val, ESDHC_PROCTL);
924 		}
925 		return 0;
926 	default:
927 		return 0;
928 	}
929 }
930 
931 static struct soc_device_attribute soc_tuning_erratum_type1[] = {
932 	{ .family = "QorIQ T1023", },
933 	{ .family = "QorIQ T1040", },
934 	{ .family = "QorIQ T2080", },
935 	{ .family = "QorIQ LS1021A", },
936 	{ },
937 };
938 
939 static struct soc_device_attribute soc_tuning_erratum_type2[] = {
940 	{ .family = "QorIQ LS1012A", },
941 	{ .family = "QorIQ LS1043A", },
942 	{ .family = "QorIQ LS1046A", },
943 	{ .family = "QorIQ LS1080A", },
944 	{ .family = "QorIQ LS2080A", },
945 	{ .family = "QorIQ LA1575A", },
946 	{ },
947 };
948 
esdhc_tuning_block_enable(struct sdhci_host * host,bool enable)949 static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
950 {
951 	u32 val;
952 
953 	esdhc_clock_enable(host, false);
954 	esdhc_flush_async_fifo(host);
955 
956 	val = sdhci_readl(host, ESDHC_TBCTL);
957 	if (enable)
958 		val |= ESDHC_TB_EN;
959 	else
960 		val &= ~ESDHC_TB_EN;
961 	sdhci_writel(host, val, ESDHC_TBCTL);
962 
963 	esdhc_clock_enable(host, true);
964 }
965 
esdhc_tuning_window_ptr(struct sdhci_host * host,u8 * window_start,u8 * window_end)966 static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
967 				    u8 *window_end)
968 {
969 	u32 val;
970 
971 	/* Write TBCTL[11:8]=4'h8 */
972 	val = sdhci_readl(host, ESDHC_TBCTL);
973 	val &= ~(0xf << 8);
974 	val |= 8 << 8;
975 	sdhci_writel(host, val, ESDHC_TBCTL);
976 
977 	mdelay(1);
978 
979 	/* Read TBCTL[31:0] register and rewrite again */
980 	val = sdhci_readl(host, ESDHC_TBCTL);
981 	sdhci_writel(host, val, ESDHC_TBCTL);
982 
983 	mdelay(1);
984 
985 	/* Read the TBSTAT[31:0] register twice */
986 	val = sdhci_readl(host, ESDHC_TBSTAT);
987 	val = sdhci_readl(host, ESDHC_TBSTAT);
988 
989 	*window_end = val & 0xff;
990 	*window_start = (val >> 8) & 0xff;
991 }
992 
esdhc_prepare_sw_tuning(struct sdhci_host * host,u8 * window_start,u8 * window_end)993 static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
994 				    u8 *window_end)
995 {
996 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
997 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
998 	u8 start_ptr, end_ptr;
999 
1000 	if (esdhc->quirk_tuning_erratum_type1) {
1001 		*window_start = 5 * esdhc->div_ratio;
1002 		*window_end = 3 * esdhc->div_ratio;
1003 		return;
1004 	}
1005 
1006 	esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
1007 
1008 	/* Reset data lines by setting ESDHCCTL[RSTD] */
1009 	sdhci_reset(host, SDHCI_RESET_DATA);
1010 	/* Write 32'hFFFF_FFFF to IRQSTAT register */
1011 	sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
1012 
1013 	/* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
1014 	 * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
1015 	 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
1016 	 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
1017 	 */
1018 
1019 	if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
1020 		*window_start = 8 * esdhc->div_ratio;
1021 		*window_end = 4 * esdhc->div_ratio;
1022 	} else {
1023 		*window_start = 5 * esdhc->div_ratio;
1024 		*window_end = 3 * esdhc->div_ratio;
1025 	}
1026 }
1027 
esdhc_execute_sw_tuning(struct mmc_host * mmc,u32 opcode,u8 window_start,u8 window_end)1028 static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
1029 				   u8 window_start, u8 window_end)
1030 {
1031 	struct sdhci_host *host = mmc_priv(mmc);
1032 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1033 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1034 	u32 val;
1035 	int ret;
1036 
1037 	/* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
1038 	val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
1039 	      ESDHC_WNDW_STRT_PTR_MASK;
1040 	val |= window_end & ESDHC_WNDW_END_PTR_MASK;
1041 	sdhci_writel(host, val, ESDHC_TBPTR);
1042 
1043 	/* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
1044 	val = sdhci_readl(host, ESDHC_TBCTL);
1045 	val &= ~ESDHC_TB_MODE_MASK;
1046 	val |= ESDHC_TB_MODE_SW;
1047 	sdhci_writel(host, val, ESDHC_TBCTL);
1048 
1049 	esdhc->in_sw_tuning = true;
1050 	ret = sdhci_execute_tuning(mmc, opcode);
1051 	esdhc->in_sw_tuning = false;
1052 	return ret;
1053 }
1054 
esdhc_execute_tuning(struct mmc_host * mmc,u32 opcode)1055 static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1056 {
1057 	struct sdhci_host *host = mmc_priv(mmc);
1058 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1059 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1060 	u8 window_start, window_end;
1061 	int ret, retries = 1;
1062 	bool hs400_tuning;
1063 	unsigned int clk;
1064 	u32 val;
1065 
1066 	/* For tuning mode, the sd clock divisor value
1067 	 * must be larger than 3 according to reference manual.
1068 	 */
1069 	clk = esdhc->peripheral_clock / 3;
1070 	if (host->clock > clk)
1071 		esdhc_of_set_clock(host, clk);
1072 
1073 	esdhc_tuning_block_enable(host, true);
1074 
1075 	/*
1076 	 * The eSDHC controller takes the data timeout value into account
1077 	 * during tuning. If the SD card is too slow sending the response, the
1078 	 * timer will expire and a "Buffer Read Ready" interrupt without data
1079 	 * is triggered. This leads to tuning errors.
1080 	 *
1081 	 * Just set the timeout to the maximum value because the core will
1082 	 * already take care of it in sdhci_send_tuning().
1083 	 */
1084 	sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
1085 
1086 	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1087 
1088 	do {
1089 		if (esdhc->quirk_limited_clk_division &&
1090 		    hs400_tuning)
1091 			esdhc_of_set_clock(host, host->clock);
1092 
1093 		/* Do HW tuning */
1094 		val = sdhci_readl(host, ESDHC_TBCTL);
1095 		val &= ~ESDHC_TB_MODE_MASK;
1096 		val |= ESDHC_TB_MODE_3;
1097 		sdhci_writel(host, val, ESDHC_TBCTL);
1098 
1099 		ret = sdhci_execute_tuning(mmc, opcode);
1100 		if (ret)
1101 			break;
1102 
1103 		/* For type2 affected platforms of the tuning erratum,
1104 		 * tuning may succeed although eSDHC might not have
1105 		 * tuned properly. Need to check tuning window.
1106 		 */
1107 		if (esdhc->quirk_tuning_erratum_type2 &&
1108 		    !host->tuning_err) {
1109 			esdhc_tuning_window_ptr(host, &window_start,
1110 						&window_end);
1111 			if (abs(window_start - window_end) >
1112 			    (4 * esdhc->div_ratio + 2))
1113 				host->tuning_err = -EAGAIN;
1114 		}
1115 
1116 		/* If HW tuning fails and triggers erratum,
1117 		 * try workaround.
1118 		 */
1119 		ret = host->tuning_err;
1120 		if (ret == -EAGAIN &&
1121 		    (esdhc->quirk_tuning_erratum_type1 ||
1122 		     esdhc->quirk_tuning_erratum_type2)) {
1123 			/* Recover HS400 tuning flag */
1124 			if (hs400_tuning)
1125 				host->flags |= SDHCI_HS400_TUNING;
1126 			pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1127 				mmc_hostname(mmc));
1128 			/* Do SW tuning */
1129 			esdhc_prepare_sw_tuning(host, &window_start,
1130 						&window_end);
1131 			ret = esdhc_execute_sw_tuning(mmc, opcode,
1132 						      window_start,
1133 						      window_end);
1134 			if (ret)
1135 				break;
1136 
1137 			/* Retry both HW/SW tuning with reduced clock. */
1138 			ret = host->tuning_err;
1139 			if (ret == -EAGAIN && retries) {
1140 				/* Recover HS400 tuning flag */
1141 				if (hs400_tuning)
1142 					host->flags |= SDHCI_HS400_TUNING;
1143 
1144 				clk = host->max_clk / (esdhc->div_ratio + 1);
1145 				esdhc_of_set_clock(host, clk);
1146 				pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1147 					mmc_hostname(mmc));
1148 			} else {
1149 				break;
1150 			}
1151 		} else {
1152 			break;
1153 		}
1154 	} while (retries--);
1155 
1156 	if (ret) {
1157 		esdhc_tuning_block_enable(host, false);
1158 	} else if (hs400_tuning) {
1159 		val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1160 		val |= ESDHC_FLW_CTL_BG;
1161 		sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1162 	}
1163 
1164 	return ret;
1165 }
1166 
esdhc_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)1167 static void esdhc_set_uhs_signaling(struct sdhci_host *host,
1168 				   unsigned int timing)
1169 {
1170 	u32 val;
1171 
1172 	/*
1173 	 * There are specific registers setting for HS400 mode.
1174 	 * Clean all of them if controller is in HS400 mode to
1175 	 * exit HS400 mode before re-setting any speed mode.
1176 	 */
1177 	val = sdhci_readl(host, ESDHC_TBCTL);
1178 	if (val & ESDHC_HS400_MODE) {
1179 		val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1180 		val &= ~ESDHC_FLW_CTL_BG;
1181 		sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1182 
1183 		val = sdhci_readl(host, ESDHC_SDCLKCTL);
1184 		val &= ~ESDHC_CMD_CLK_CTL;
1185 		sdhci_writel(host, val, ESDHC_SDCLKCTL);
1186 
1187 		esdhc_clock_enable(host, false);
1188 		val = sdhci_readl(host, ESDHC_TBCTL);
1189 		val &= ~ESDHC_HS400_MODE;
1190 		sdhci_writel(host, val, ESDHC_TBCTL);
1191 		esdhc_clock_enable(host, true);
1192 
1193 		val = sdhci_readl(host, ESDHC_DLLCFG0);
1194 		val &= ~(ESDHC_DLL_ENABLE | ESDHC_DLL_FREQ_SEL);
1195 		sdhci_writel(host, val, ESDHC_DLLCFG0);
1196 
1197 		val = sdhci_readl(host, ESDHC_TBCTL);
1198 		val &= ~ESDHC_HS400_WNDW_ADJUST;
1199 		sdhci_writel(host, val, ESDHC_TBCTL);
1200 
1201 		esdhc_tuning_block_enable(host, false);
1202 	}
1203 
1204 	if (timing == MMC_TIMING_MMC_HS400)
1205 		esdhc_tuning_block_enable(host, true);
1206 	else
1207 		sdhci_set_uhs_signaling(host, timing);
1208 }
1209 
esdhc_irq(struct sdhci_host * host,u32 intmask)1210 static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
1211 {
1212 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1213 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1214 	u32 command;
1215 
1216 	if (esdhc->quirk_trans_complete_erratum) {
1217 		command = SDHCI_GET_CMD(sdhci_readw(host,
1218 					SDHCI_COMMAND));
1219 		if (command == MMC_WRITE_MULTIPLE_BLOCK &&
1220 				sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
1221 				intmask & SDHCI_INT_DATA_END) {
1222 			intmask &= ~SDHCI_INT_DATA_END;
1223 			sdhci_writel(host, SDHCI_INT_DATA_END,
1224 					SDHCI_INT_STATUS);
1225 		}
1226 	}
1227 	return intmask;
1228 }
1229 
1230 #ifdef CONFIG_PM_SLEEP
1231 static u32 esdhc_proctl;
esdhc_of_suspend(struct device * dev)1232 static int esdhc_of_suspend(struct device *dev)
1233 {
1234 	struct sdhci_host *host = dev_get_drvdata(dev);
1235 
1236 	esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
1237 
1238 	if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1239 		mmc_retune_needed(host->mmc);
1240 
1241 	return sdhci_suspend_host(host);
1242 }
1243 
esdhc_of_resume(struct device * dev)1244 static int esdhc_of_resume(struct device *dev)
1245 {
1246 	struct sdhci_host *host = dev_get_drvdata(dev);
1247 	int ret = sdhci_resume_host(host);
1248 
1249 	if (ret == 0) {
1250 		/* Isn't this already done by sdhci_resume_host() ? --rmk */
1251 		esdhc_of_enable_dma(host);
1252 		sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
1253 	}
1254 	return ret;
1255 }
1256 #endif
1257 
1258 static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
1259 			esdhc_of_suspend,
1260 			esdhc_of_resume);
1261 
1262 static const struct sdhci_ops sdhci_esdhc_be_ops = {
1263 	.read_l = esdhc_be_readl,
1264 	.read_w = esdhc_be_readw,
1265 	.read_b = esdhc_be_readb,
1266 	.write_l = esdhc_be_writel,
1267 	.write_w = esdhc_be_writew,
1268 	.write_b = esdhc_be_writeb,
1269 	.set_clock = esdhc_of_set_clock,
1270 	.enable_dma = esdhc_of_enable_dma,
1271 	.get_max_clock = esdhc_of_get_max_clock,
1272 	.get_min_clock = esdhc_of_get_min_clock,
1273 	.adma_workaround = esdhc_of_adma_workaround,
1274 	.set_bus_width = esdhc_pltfm_set_bus_width,
1275 	.reset = esdhc_reset,
1276 	.set_uhs_signaling = esdhc_set_uhs_signaling,
1277 	.irq = esdhc_irq,
1278 };
1279 
1280 static const struct sdhci_ops sdhci_esdhc_le_ops = {
1281 	.read_l = esdhc_le_readl,
1282 	.read_w = esdhc_le_readw,
1283 	.read_b = esdhc_le_readb,
1284 	.write_l = esdhc_le_writel,
1285 	.write_w = esdhc_le_writew,
1286 	.write_b = esdhc_le_writeb,
1287 	.set_clock = esdhc_of_set_clock,
1288 	.enable_dma = esdhc_of_enable_dma,
1289 	.get_max_clock = esdhc_of_get_max_clock,
1290 	.get_min_clock = esdhc_of_get_min_clock,
1291 	.adma_workaround = esdhc_of_adma_workaround,
1292 	.set_bus_width = esdhc_pltfm_set_bus_width,
1293 	.reset = esdhc_reset,
1294 	.set_uhs_signaling = esdhc_set_uhs_signaling,
1295 	.irq = esdhc_irq,
1296 };
1297 
1298 static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
1299 	.quirks = ESDHC_DEFAULT_QUIRKS |
1300 #ifdef CONFIG_PPC
1301 		  SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1302 #endif
1303 		  SDHCI_QUIRK_NO_CARD_NO_RESET |
1304 		  SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1305 	.ops = &sdhci_esdhc_be_ops,
1306 };
1307 
1308 static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
1309 	.quirks = ESDHC_DEFAULT_QUIRKS |
1310 		  SDHCI_QUIRK_NO_CARD_NO_RESET |
1311 		  SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1312 	.ops = &sdhci_esdhc_le_ops,
1313 };
1314 
1315 static struct soc_device_attribute soc_incorrect_hostver[] = {
1316 	{ .family = "QorIQ T4240", .revision = "1.0", },
1317 	{ .family = "QorIQ T4240", .revision = "2.0", },
1318 	{ },
1319 };
1320 
1321 static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1322 	{ .family = "QorIQ LX2160A", .revision = "1.0", },
1323 	{ .family = "QorIQ LX2160A", .revision = "2.0", },
1324 	{ .family = "QorIQ LS1028A", .revision = "1.0", },
1325 	{ },
1326 };
1327 
1328 static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1329 	{ .family = "QorIQ LX2160A", .revision = "1.0", },
1330 	{ .family = "QorIQ LX2160A", .revision = "2.0", },
1331 	{ .family = "QorIQ LS1028A", .revision = "1.0", },
1332 	{ },
1333 };
1334 
esdhc_init(struct platform_device * pdev,struct sdhci_host * host)1335 static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1336 {
1337 	const struct of_device_id *match;
1338 	struct sdhci_pltfm_host *pltfm_host;
1339 	struct sdhci_esdhc *esdhc;
1340 	struct device_node *np;
1341 	struct clk *clk;
1342 	u32 val;
1343 	u16 host_ver;
1344 
1345 	pltfm_host = sdhci_priv(host);
1346 	esdhc = sdhci_pltfm_priv(pltfm_host);
1347 
1348 	host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1349 	esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1350 			     SDHCI_VENDOR_VER_SHIFT;
1351 	esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1352 	if (soc_device_match(soc_incorrect_hostver))
1353 		esdhc->quirk_incorrect_hostver = true;
1354 	else
1355 		esdhc->quirk_incorrect_hostver = false;
1356 
1357 	if (soc_device_match(soc_fixup_sdhc_clkdivs))
1358 		esdhc->quirk_limited_clk_division = true;
1359 	else
1360 		esdhc->quirk_limited_clk_division = false;
1361 
1362 	if (soc_device_match(soc_unreliable_pulse_detection))
1363 		esdhc->quirk_unreliable_pulse_detection = true;
1364 	else
1365 		esdhc->quirk_unreliable_pulse_detection = false;
1366 
1367 	match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1368 	if (match)
1369 		esdhc->clk_fixup = match->data;
1370 	np = pdev->dev.of_node;
1371 
1372 	if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1373 		esdhc->quirk_delay_before_data_reset = true;
1374 		esdhc->quirk_trans_complete_erratum = true;
1375 	}
1376 
1377 	clk = of_clk_get(np, 0);
1378 	if (!IS_ERR(clk)) {
1379 		/*
1380 		 * esdhc->peripheral_clock would be assigned with a value
1381 		 * which is eSDHC base clock when use periperal clock.
1382 		 * For some platforms, the clock value got by common clk
1383 		 * API is peripheral clock while the eSDHC base clock is
1384 		 * 1/2 peripheral clock.
1385 		 */
1386 		if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1387 		    of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
1388 		    of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
1389 			esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1390 		else
1391 			esdhc->peripheral_clock = clk_get_rate(clk);
1392 
1393 		clk_put(clk);
1394 	}
1395 
1396 	esdhc_clock_enable(host, false);
1397 	val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1398 	/*
1399 	 * This bit is not able to be reset by SDHCI_RESET_ALL. Need to
1400 	 * initialize it as 1 or 0 once, to override the different value
1401 	 * which may be configured in bootloader.
1402 	 */
1403 	if (esdhc->peripheral_clock)
1404 		val |= ESDHC_PERIPHERAL_CLK_SEL;
1405 	else
1406 		val &= ~ESDHC_PERIPHERAL_CLK_SEL;
1407 	sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1408 	esdhc_clock_enable(host, true);
1409 }
1410 
esdhc_hs400_prepare_ddr(struct mmc_host * mmc)1411 static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1412 {
1413 	esdhc_tuning_block_enable(mmc_priv(mmc), false);
1414 	return 0;
1415 }
1416 
sdhci_esdhc_probe(struct platform_device * pdev)1417 static int sdhci_esdhc_probe(struct platform_device *pdev)
1418 {
1419 	struct sdhci_host *host;
1420 	struct device_node *np;
1421 	struct sdhci_pltfm_host *pltfm_host;
1422 	struct sdhci_esdhc *esdhc;
1423 	int ret;
1424 
1425 	np = pdev->dev.of_node;
1426 
1427 	if (of_property_read_bool(np, "little-endian"))
1428 		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1429 					sizeof(struct sdhci_esdhc));
1430 	else
1431 		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1432 					sizeof(struct sdhci_esdhc));
1433 
1434 	if (IS_ERR(host))
1435 		return PTR_ERR(host);
1436 
1437 	host->mmc_host_ops.start_signal_voltage_switch =
1438 		esdhc_signal_voltage_switch;
1439 	host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1440 	host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1441 	host->tuning_delay = 1;
1442 
1443 	esdhc_init(pdev, host);
1444 
1445 	sdhci_get_of_property(pdev);
1446 
1447 	pltfm_host = sdhci_priv(host);
1448 	esdhc = sdhci_pltfm_priv(pltfm_host);
1449 	if (soc_device_match(soc_tuning_erratum_type1))
1450 		esdhc->quirk_tuning_erratum_type1 = true;
1451 	else
1452 		esdhc->quirk_tuning_erratum_type1 = false;
1453 
1454 	if (soc_device_match(soc_tuning_erratum_type2))
1455 		esdhc->quirk_tuning_erratum_type2 = true;
1456 	else
1457 		esdhc->quirk_tuning_erratum_type2 = false;
1458 
1459 	if (esdhc->vendor_ver == VENDOR_V_22)
1460 		host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1461 
1462 	if (esdhc->vendor_ver > VENDOR_V_22)
1463 		host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1464 
1465 	if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
1466 		host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1467 		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1468 	}
1469 
1470 	if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1471 	    of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1472 	    of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1473 	    of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1474 	    of_device_is_compatible(np, "fsl,t1040-esdhc"))
1475 		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1476 
1477 	if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1478 		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1479 
1480 	esdhc->quirk_ignore_data_inhibit = false;
1481 	if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1482 		/*
1483 		 * Freescale messed up with P2020 as it has a non-standard
1484 		 * host control register
1485 		 */
1486 		host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1487 		esdhc->quirk_ignore_data_inhibit = true;
1488 	}
1489 
1490 	/* call to generic mmc_of_parse to support additional capabilities */
1491 	ret = mmc_of_parse(host->mmc);
1492 	if (ret)
1493 		goto err;
1494 
1495 	mmc_of_parse_voltage(host->mmc, &host->ocr_mask);
1496 
1497 	ret = sdhci_add_host(host);
1498 	if (ret)
1499 		goto err;
1500 
1501 	return 0;
1502  err:
1503 	sdhci_pltfm_free(pdev);
1504 	return ret;
1505 }
1506 
1507 static struct platform_driver sdhci_esdhc_driver = {
1508 	.driver = {
1509 		.name = "sdhci-esdhc",
1510 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1511 		.of_match_table = sdhci_esdhc_of_match,
1512 		.pm = &esdhc_of_dev_pm_ops,
1513 	},
1514 	.probe = sdhci_esdhc_probe,
1515 	.remove = sdhci_pltfm_unregister,
1516 };
1517 
1518 module_platform_driver(sdhci_esdhc_driver);
1519 
1520 MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1521 MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1522 	      "Anton Vorontsov <avorontsov@ru.mvista.com>");
1523 MODULE_LICENSE("GPL v2");
1524