• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Sunxi SD/MMC host driver
3 *
4 * Copyright (C) 2015 AllWinnertech Ltd.
5 * Author: lixiang <lixiang@allwinnertech>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 */
16 
17 
18 #include <linux/clk.h>
19 #include <linux/reset/sunxi.h>
20 
21 #include <linux/gpio.h>
22 #include <linux/platform_device.h>
23 #include <linux/spinlock.h>
24 #include <linux/scatterlist.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/slab.h>
27 #include <linux/reset.h>
28 
29 #include <linux/of_address.h>
30 #include <linux/of_gpio.h>
31 #include <linux/of_platform.h>
32 
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/sd.h>
35 #include <linux/mmc/sdio.h>
36 #include <linux/mmc/mmc.h>
37 #include <linux/mmc/core.h>
38 #include <linux/mmc/card.h>
39 #include <linux/mmc/slot-gpio.h>
40 
41 #include "sunxi-mmc.h"
42 #include "sunxi-mmc-v4p1x.h"
43 #include "sunxi-mmc-export.h"
44 #include "sunxi-mmc-debug.h"
45 
46 #define SUNXI_RETRY_CNT_PER_PHA_V4P1X		3
47 
48 /*dma triger level setting*/
49 #define SUNXI_DMA_TL_SDMMC_V4P1X	((0x2<<28)|(7<<16)|248)
50 /*one dma des can transfer data size = 1<<SUNXI_DES_SIZE_SDMMC*/
51 #if defined(CONFIG_ARCH_SUN50IW10)
52 #define SUNXI_DES_SIZE_SDMMC_V4P1X	(12)
53 #else
54 #define SUNXI_DES_SIZE_SDMMC_V4P1X	(15)
55 #endif
56 
57 /*reg*/
58 /*SMHC eMMC4.5 DDR Start Bit Detection Control Register */
59 /*SMHC CRC Status Detect Control Register */
60 /*SMHC Card Threshold Control Register */
61 /*SMHC Drive Delay Control Register */
62 /*SMHC Sample Delay Control Register */
63 /*SMHC Data Strobe Delay Control Register */
64 /*SMHC NewTiming Set Register */
65 #define SDXC_REG_EDSD		(0x010C)
66 #define SDXC_REG_CSDC		(0x0054)
67 #define SDXC_REG_THLD		(0x0100)
68 #define SDXC_REG_DRV_DL		(0x0140)
69 #define SDXC_REG_SAMP_DL	(0x0144)
70 #define SDXC_REG_DS_DL		(0x0148)
71 #define SDXC_REG_SD_NTSR	(0x005C)
72 
73 /*bit*/
74 #define SDXC_HS400_MD_EN				(1U<<31)
75 #define SDXC_CARD_WR_THLD_ENB		(1U<<2)
76 #define SDXC_CARD_RD_THLD_ENB		(1U)
77 
78 #define SDXC_DAT_DRV_PH_SEL			(1U<<17)
79 #define SDXC_CMD_DRV_PH_SEL			(1U<<16)
80 #define SDXC_SAMP_DL_SW_EN			(1u<<7)
81 #define SDXC_DS_DL_SW_EN			(1u<<7)
82 
83 #define	SDXC_2X_TIMING_MODE			(1U<<31)
84 
85 /*mask*/
86 #define SDXC_CRC_DET_PARA_MASK		(0xf)
87 #define SDXC_CARD_RD_THLD_MASK		(0x0FFF0000)
88 #define SDXC_TX_TL_MASK				(0xff)
89 #define SDXC_RX_TL_MASK				(0x00FF0000)
90 
91 #define SDXC_SAMP_DL_SW_MASK		(0x0000003F)
92 #define SDXC_DS_DL_SW_MASK			(0x0000003F)
93 
94 #define SDXC_STIMING_CMD_PH_MASK		(0x00000030)
95 #define SDXC_STIMING_DAT_PH_MASK		(0x00000300)
96 
97 /*value*/
98 #define SDXC_CRC_DET_PARA_HS400		(6)
99 #define SDXC_CRC_DET_PARA_OTHER		(3)
100 #define SDXC_FIFO_DETH					(1024>>2)
101 
102 /*size*/
103 #define SDXC_CARD_RD_THLD_SIZE		(0x00000FFF)
104 
105 /*shit*/
106 #define SDXC_CARD_RD_THLD_SIZE_SHIFT		(16)
107 
108 #define SDXC_STIMING_CMD_PH_SHIFT			(4)
109 #define SDXC_STIMING_DAT_PH_SHIFT			(8)
110 
111 enum sunxi_mmc_clk_mode {
112 	mmc_clk_400k = 0,
113 	mmc_clk_26M,
114 	mmc_clk_52M,
115 	mmc_clk_52M_DDR4,
116 	mmc_clk_52M_DDR8,
117 	mmc_clk_104M,
118 	mmc_clk_208M,
119 	mmc_clk_104M_DDR,
120 	mmc_clk_208M_DDR,
121 	mmc_clk_mod_num,
122 };
123 
124 struct sunxi_mmc_clk_dly {
125 	enum sunxi_mmc_clk_mode cmod;
126 	char *mod_str;
127 	u32 cmd_drv_ph;
128 	u32 dat_drv_ph;
129 	u32 sam_dly;
130 	u32 ds_dly;
131 	u32 sam_ph_dat;
132 	u32 sam_ph_cmd;
133 };
134 
135 struct sunxi_mmc_spec_regs {
136 	u32 drv_dl;		/*REG_DRV_DL */
137 	u32 samp_dl;		/*REG_SAMP_DL */
138 	u32 ds_dl;		/*REG_DS_DL */
139 	u32 sd_ntsr;		/*REG_SD_NTSR */
140 };
141 
142 struct sunxi_mmc_ver_priv {
143 	struct sunxi_mmc_spec_regs bak_spec_regs;
144 	struct sunxi_mmc_clk_dly mmc_clk_dly[mmc_clk_mod_num];
145 };
146 
147 
sunxi_mmc_set_clk_dly(struct sunxi_mmc_host * host,int clk,int bus_width,int timing)148 static void sunxi_mmc_set_clk_dly(struct sunxi_mmc_host *host, int clk,
149 				  int bus_width, int timing)
150 {
151 	struct mmc_host *mhost = host->mmc;
152 	u32 rval = 0;
153 	enum sunxi_mmc_clk_mode cmod = mmc_clk_400k;
154 	u32 in_clk_dly[6] = { 0 };
155 	int ret = 0;
156 	struct device_node *np = NULL;
157 	struct sunxi_mmc_clk_dly *mmc_clk_dly =
158 	    ((struct sunxi_mmc_ver_priv *)host->version_priv_dat)->mmc_clk_dly;
159 
160 	if (!mhost->parent || !mhost->parent->of_node) {
161 		SM_ERR(mmc_dev(host->mmc),
162 			"no dts to parse clk dly,use default\n");
163 		return;
164 	}
165 
166 	np = mhost->parent->of_node;
167 
168 	if (clk <= 400 * 1000) {
169 		cmod = mmc_clk_400k;
170 	} else if (clk <= 26 * 1000 * 1000) {
171 		cmod = mmc_clk_26M;
172 	} else if (clk <= 52 * 1000 * 1000) {
173 		if ((bus_width == MMC_BUS_WIDTH_4)
174 		    && sunxi_mmc_ddr_timing(timing)) {
175 			cmod = mmc_clk_52M_DDR4;
176 		} else if ((bus_width == MMC_BUS_WIDTH_8)
177 			   && (timing == MMC_TIMING_MMC_DDR52)) {
178 			cmod = mmc_clk_52M_DDR8;
179 		} else {
180 			cmod = mmc_clk_52M;
181 		}
182 	} else if (clk <= 104 * 1000 * 1000) {
183 		if ((bus_width == MMC_BUS_WIDTH_8)
184 		    && (timing == MMC_TIMING_MMC_HS400)) {
185 			cmod = mmc_clk_104M_DDR;
186 		} else {
187 			cmod = mmc_clk_104M;
188 		}
189 	} else if (clk <= 208 * 1000 * 1000) {
190 		if ((bus_width == MMC_BUS_WIDTH_8)
191 		    && (timing == MMC_TIMING_MMC_HS400)) {
192 			cmod = mmc_clk_208M_DDR;
193 		} else {
194 			cmod = mmc_clk_208M;
195 		}
196 	} else {
197 		SM_ERR(mmc_dev(mhost), "clk %d is out of range\n", clk);
198 		return;
199 	}
200 
201 	ret = of_property_read_u32_array(np, mmc_clk_dly[cmod].mod_str,
202 					 in_clk_dly, ARRAY_SIZE(in_clk_dly));
203 	if (ret) {
204 		SM_DBG(mmc_dev(host->mmc), "failed to get %s used default\n",
205 			mmc_clk_dly[cmod].mod_str);
206 	} else {
207 		mmc_clk_dly[cmod].cmd_drv_ph = in_clk_dly[0];
208 		mmc_clk_dly[cmod].dat_drv_ph = in_clk_dly[1];
209 		/*mmc_clk_dly[cmod].sam_dly             = in_clk_dly[2]; */
210 		/*mmc_clk_dly[cmod].ds_dly              = in_clk_dly[3]; */
211 		mmc_clk_dly[cmod].sam_ph_dat = in_clk_dly[4];
212 		mmc_clk_dly[cmod].sam_ph_cmd = in_clk_dly[5];
213 		SM_DBG(mmc_dev(host->mmc), "Get %s clk dly ok\n",
214 			mmc_clk_dly[cmod].mod_str);
215 
216 	}
217 
218 	SM_DBG(mmc_dev(host->mmc), "Try set %s clk dly	ok\n",
219 		mmc_clk_dly[cmod].mod_str);
220 	SM_DBG(mmc_dev(host->mmc), "cmd_drv_ph	%d\n",
221 		mmc_clk_dly[cmod].cmd_drv_ph);
222 	SM_DBG(mmc_dev(host->mmc), "dat_drv_ph	%d\n",
223 		mmc_clk_dly[cmod].dat_drv_ph);
224 	SM_DBG(mmc_dev(host->mmc), "sam_ph_dat	%d\n",
225 		mmc_clk_dly[cmod].sam_ph_dat);
226 	SM_DBG(mmc_dev(host->mmc), "sam_ph_cmd	%d\n",
227 		mmc_clk_dly[cmod].sam_ph_cmd);
228 
229 	rval = mmc_readl(host, REG_DRV_DL);
230 	if (mmc_clk_dly[cmod].cmd_drv_ph)
231 		rval |= SDXC_CMD_DRV_PH_SEL;	/*180 phase */
232 	else
233 		rval &= ~SDXC_CMD_DRV_PH_SEL;	/*90 phase */
234 
235 	if (mmc_clk_dly[cmod].dat_drv_ph)
236 		rval |= SDXC_DAT_DRV_PH_SEL;	/*180 phase */
237 	else
238 		rval &= ~SDXC_DAT_DRV_PH_SEL;	/*90 phase */
239 
240 	sunxi_r_op(host, mmc_writel(host, REG_DRV_DL, rval));
241 
242 /*
243 *      rval = mmc_readl(host,REG_SAMP_DL);
244 *      rval &= ~SDXC_SAMP_DL_SW_MASK;
245 *      rval |= mmc_clk_dly[cmod].sam_dly & SDXC_SAMP_DL_SW_MASK;
246 *      rval |= SDXC_SAMP_DL_SW_EN;
247 *      mmc_writel(host,REG_SAMP_DL,rval);
248 *
249 *     rval = mmc_readl(host,REG_DS_DL);
250 *     rval &= ~SDXC_DS_DL_SW_MASK;
251 *     rval |= mmc_clk_dly[cmod].ds_dly & SDXC_DS_DL_SW_MASK;
252 *     rval |= SDXC_DS_DL_SW_EN;
253 *    mmc_writel(host,REG_DS_DL,rval);
254 */
255 
256 	rval = mmc_readl(host, REG_SD_NTSR);
257 	rval &= ~SDXC_STIMING_DAT_PH_MASK;
258 	rval |=
259 	    (mmc_clk_dly[cmod].
260 	     sam_ph_dat << SDXC_STIMING_DAT_PH_SHIFT) &
261 	    SDXC_STIMING_DAT_PH_MASK;
262 	mmc_writel(host, REG_SD_NTSR, rval);
263 
264 	rval = mmc_readl(host, REG_SD_NTSR);
265 	rval &= ~SDXC_STIMING_CMD_PH_MASK;
266 	rval |=
267 	    (mmc_clk_dly[cmod].
268 	     sam_ph_cmd << SDXC_STIMING_CMD_PH_SHIFT) &
269 	    SDXC_STIMING_CMD_PH_MASK;
270 	mmc_writel(host, REG_SD_NTSR, rval);
271 
272 	SM_DBG(mmc_dev(host->mmc), " REG_DRV_DL    %08x\n",
273 		mmc_readl(host, REG_DRV_DL));
274 	SM_DBG(mmc_dev(host->mmc), " REG_SAMP_DL  %08x\n",
275 		mmc_readl(host, REG_SAMP_DL));
276 	SM_DBG(mmc_dev(host->mmc), " REG_DS_DL      %08x\n",
277 		mmc_readl(host, REG_DS_DL));
278 	SM_DBG(mmc_dev(host->mmc), " REG_SD_NTSR      %08x\n",
279 		mmc_readl(host, REG_SD_NTSR));
280 
281 }
282 
__sunxi_mmc_do_oclk_onoff(struct sunxi_mmc_host * host,u32 oclk_en,u32 pwr_save,u32 ignore_dat0)283 static int __sunxi_mmc_do_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en,
284 				     u32 pwr_save, u32 ignore_dat0)
285 {
286 	unsigned long expire = jiffies + msecs_to_jiffies(250);
287 	u32 rval;
288 
289 	rval = mmc_readl(host, REG_CLKCR);
290 	rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON | SDXC_MASK_DATA0);
291 
292 	if (oclk_en)
293 		rval |= SDXC_CARD_CLOCK_ON;
294 	if (pwr_save && host->voltage_switching == 0)
295 		rval |= SDXC_LOW_POWER_ON;
296 	if (ignore_dat0)
297 		rval |= SDXC_MASK_DATA0;
298 
299 	mmc_writel(host, REG_CLKCR, rval);
300 
301 	SM_DBG(mmc_dev(host->mmc), "%s REG_CLKCR:%x\n", __func__,
302 		mmc_readl(host, REG_CLKCR));
303 
304 	if (host->voltage_switching == 1) {
305 		rval = SDXC_START | SDXC_UPCLK_ONLY | SDXC_WAIT_PRE_OVER | SDXC_VOLTAGE_SWITCH;
306 	} else {
307 		rval = SDXC_START | SDXC_UPCLK_ONLY | SDXC_WAIT_PRE_OVER;
308 	}
309 	mmc_writel(host, REG_CMDR, rval);
310 
311 	do {
312 		rval = mmc_readl(host, REG_CMDR);
313 	} while (time_before(jiffies, expire) && (rval & SDXC_START));
314 
315 	/* clear irq status bits set by the command */
316 	mmc_writel(host, REG_RINTR,
317 		   mmc_readl(host, REG_RINTR) & ~SDXC_SDIO_INTERRUPT);
318 
319 	if (rval & SDXC_START) {
320 		SM_ERR(mmc_dev(host->mmc), "fatal err update clk timeout\n");
321 		return -EIO;
322 	}
323 
324 	/*only use mask data0 when update clk,clear it when not update clk */
325 	if (ignore_dat0)
326 		mmc_writel(host, REG_CLKCR,
327 			   mmc_readl(host, REG_CLKCR) & ~SDXC_MASK_DATA0);
328 
329 	return 0;
330 }
331 
sunxi_mmc_oclk_onoff(struct sunxi_mmc_host * host,u32 oclk_en)332 static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
333 {
334 	struct device_node *np = NULL;
335 	struct mmc_host *mmc = host->mmc;
336 	int pwr_save = 0;
337 	int len = 0;
338 
339 	if (!mmc->parent || !mmc->parent->of_node) {
340 		SM_ERR(mmc_dev(host->mmc),
341 			"no dts to parse power save mode\n");
342 		return -EIO;
343 	}
344 
345 	np = mmc->parent->of_node;
346 	if (of_find_property(np, "sunxi-power-save-mode", &len))
347 		pwr_save = 1;
348 	return __sunxi_mmc_do_oclk_onoff(host, oclk_en, pwr_save, 1);
349 }
350 
sunxi_mmc_2xmod_onoff(struct sunxi_mmc_host * host,u32 newmode_en)351 static void sunxi_mmc_2xmod_onoff(struct sunxi_mmc_host *host, u32 newmode_en)
352 {
353 	u32 rval = mmc_readl(host, REG_SD_NTSR);
354 
355 	if (newmode_en)
356 		rval |= SDXC_2X_TIMING_MODE;
357 	else
358 		rval &= ~SDXC_2X_TIMING_MODE;
359 
360 	mmc_writel(host, REG_SD_NTSR, rval);
361 
362 	SM_DBG(mmc_dev(host->mmc), "REG_SD_NTSR: 0x%08x ,val %x\n",
363 		mmc_readl(host, REG_SD_NTSR), rval);
364 }
365 
sunxi_mmc_clk_set_rate_for_sdmmc_v4p1x(struct sunxi_mmc_host * host,struct mmc_ios * ios)366 static int sunxi_mmc_clk_set_rate_for_sdmmc_v4p1x(struct sunxi_mmc_host *host,
367 						  struct mmc_ios *ios)
368 {
369 	u32 mod_clk = 0;
370 	u32 src_clk = 0;
371 	u32 rval = 0;
372 	s32 err = 0;
373 	u32 rate = 0;
374 	char *sclk_name = NULL;
375 	struct clk *mclk = host->clk_mmc;
376 	struct clk *sclk = NULL;
377 	struct device *dev = mmc_dev(host->mmc);
378 	int div = 0;
379 
380 	if (ios->clock == 0) {
381 		__sunxi_mmc_do_oclk_onoff(host, 0, 0, 1);
382 		return 0;
383 	}
384 
385 	if (sunxi_mmc_ddr_timing(ios->timing)) {
386 		mod_clk = ios->clock << 2;
387 		div = 1;
388 	} else {
389 		mod_clk = ios->clock << 1;
390 		div = 0;
391 	}
392 
393 	sclk = clk_get(dev, "osc24m");
394 	sclk_name = "osc24m";
395 	if (IS_ERR(sclk)) {
396 		SM_ERR(mmc_dev(host->mmc), "Error to get source clock %s\n",
397 			sclk_name);
398 		return -1;
399 	}
400 
401 	src_clk = clk_get_rate(sclk);
402 	if (mod_clk > src_clk) {
403 		clk_put(sclk);
404 		sclk = clk_get(dev, "pll_periph");
405 		sclk_name = "pll_periph";
406 	}
407 	if (IS_ERR(sclk)) {
408 		SM_ERR(mmc_dev(host->mmc), "Error to get source clock %s\n",
409 			sclk_name);
410 		return -1;
411 	}
412 
413 	sunxi_mmc_oclk_onoff(host, 0);
414 
415 	err = clk_set_parent(mclk, sclk);
416 	if (err) {
417 		SM_ERR(mmc_dev(host->mmc), "set parent failed\n");
418 		clk_put(sclk);
419 		return -1;
420 	}
421 
422 	rate = clk_round_rate(mclk, mod_clk);
423 
424 	SM_DBG(mmc_dev(host->mmc), "get round rate %d\n", rate);
425 
426 	/*clk_disable_unprepare(host->clk_mmc);*/
427 
428 	err = clk_set_rate(mclk, rate);
429 	if (err) {
430 		SM_ERR(mmc_dev(host->mmc), "set mclk rate error, rate %dHz\n",
431 			rate);
432 		clk_put(sclk);
433 		return -1;
434 	}
435 /*
436 	rval = clk_prepare_enable(host->clk_mmc);
437 	if (rval) {
438 		SM_ERR(mmc_dev(host->mmc), "Enable mmc clk err %d\n", rval);
439 		return -1;
440 	}
441 */
442 	src_clk = clk_get_rate(sclk);
443 	clk_put(sclk);
444 
445 	SM_DBG(mmc_dev(host->mmc), "set round clock %d, soure clk is %d\n",
446 		rate, src_clk);
447 
448 #ifdef MMC_FPGA
449 	if (sunxi_mmc_ddr_timing(ios->timing)) {
450 		/* clear internal divider */
451 		rval = mmc_readl(host, REG_CLKCR);
452 		rval &= ~0xff;
453 		rval |= 1;
454 	} else {
455 		/* support internal divide clock under fpga environment  */
456 		rval = mmc_readl(host, REG_CLKCR);
457 		rval &= ~0xff;
458 		rval |= 24000000 / mod_clk / 2;	/* =24M/400K/2=0x1E */
459 	}
460 	mmc_writel(host, REG_CLKCR, rval);
461 	SM_INFO(mmc_dev(host->mmc), "FPGA REG_CLKCR: 0x%08x\n",
462 		mmc_readl(host, REG_CLKCR));
463 #else
464 	/* clear internal divider */
465 	rval = mmc_readl(host, REG_CLKCR);
466 	rval &= ~0xff;
467 	rval |= div;
468 	mmc_writel(host, REG_CLKCR, rval);
469 #endif
470 
471 	/*sunxi_of_parse_clk_dly(host); */
472 	sunxi_mmc_2xmod_onoff(host, 1);
473 
474 	if (sunxi_mmc_ddr_timing(ios->timing))
475 		ios->clock = rate >> 2;
476 	else
477 		ios->clock = rate >> 1;
478 
479 	sunxi_mmc_set_clk_dly(host, ios->clock, ios->bus_width, ios->timing);
480 
481 	return sunxi_mmc_oclk_onoff(host, 1);
482 }
483 
sunxi_mmc_thld_ctl_for_sdmmc_v4p1x(struct sunxi_mmc_host * host,struct mmc_ios * ios,struct mmc_data * data)484 static void sunxi_mmc_thld_ctl_for_sdmmc_v4p1x(struct sunxi_mmc_host *host,
485 					       struct mmc_ios *ios,
486 					       struct mmc_data *data)
487 {
488 	u32 bsz = data->blksz;
489 	/*unit:byte */
490 	/*u32 tdtl = (host->dma_tl & SDXC_TX_TL_MASK)<<2;*/
491 	/*unit:byte */
492 	u32 rdtl = ((host->dma_tl & SDXC_RX_TL_MASK) >> 16) << 2;
493 	u32 rval = 0;
494 
495 	if ((data->flags & MMC_DATA_READ)
496 	    && (bsz <= SDXC_CARD_RD_THLD_SIZE)
497 	    /*((SDXC_FIFO_DETH<<2)-bsz) >= (rdtl) */
498 	    && ((SDXC_FIFO_DETH << 2) >= (rdtl + bsz))
499 	    && ((ios->timing == MMC_TIMING_MMC_HS200)
500 	       || (ios->timing == MMC_TIMING_UHS_SDR50)
501 	       || (ios->timing == MMC_TIMING_UHS_SDR104))) {
502 		rval = mmc_readl(host, REG_THLD);
503 		rval &= ~SDXC_CARD_RD_THLD_MASK;
504 		rval |= data->blksz << SDXC_CARD_RD_THLD_SIZE_SHIFT;
505 		rval |= SDXC_CARD_RD_THLD_ENB;
506 		mmc_writel(host, REG_THLD, rval);
507 	} else {
508 		rval = mmc_readl(host, REG_THLD);
509 		rval &= ~SDXC_CARD_RD_THLD_ENB;
510 		mmc_writel(host, REG_THLD, rval);
511 	}
512 
513 	SM_DBG(mmc_dev(host->mmc), "SDXC_REG_THLD: 0x%08x\n",
514 		mmc_readl(host, REG_THLD));
515 
516 }
517 
sunxi_mmc_save_spec_reg_v4p1x(struct sunxi_mmc_host * host)518 static void sunxi_mmc_save_spec_reg_v4p1x(struct sunxi_mmc_host *host)
519 {
520 	struct sunxi_mmc_spec_regs *spec_regs =
521 	    &((struct sunxi_mmc_ver_priv *)(host->version_priv_dat))->
522 	    bak_spec_regs;
523 	spec_regs->drv_dl = mmc_readl(host, REG_DRV_DL);
524 	spec_regs->samp_dl = mmc_readl(host, REG_SAMP_DL);
525 	spec_regs->ds_dl = mmc_readl(host, REG_DS_DL);
526 	spec_regs->sd_ntsr = mmc_readl(host, REG_SD_NTSR);
527 }
528 
sunxi_mmc_restore_spec_reg_v4p1x(struct sunxi_mmc_host * host)529 static void sunxi_mmc_restore_spec_reg_v4p1x(struct sunxi_mmc_host *host)
530 {
531 	struct sunxi_mmc_spec_regs *spec_regs =
532 	    &((struct sunxi_mmc_ver_priv *)(host->version_priv_dat))->
533 	    bak_spec_regs;
534 	sunxi_r_op(host, mmc_writel(host, REG_DRV_DL, spec_regs->drv_dl));
535 	mmc_writel(host, REG_SAMP_DL, spec_regs->samp_dl);
536 	mmc_writel(host, REG_DS_DL, spec_regs->ds_dl);
537 	mmc_writel(host, REG_SD_NTSR, spec_regs->sd_ntsr);
538 }
539 
sunxi_mmc_set_dly_raw(struct sunxi_mmc_host * host,s32 opha_cmd,s32 ipha_cmd,s32 opha_dat,s32 ipha_dat)540 static inline void sunxi_mmc_set_dly_raw(struct sunxi_mmc_host *host,
541 					 s32 opha_cmd, s32 ipha_cmd,
542 					 s32 opha_dat, s32 ipha_dat)
543 {
544 	u32 rval = mmc_readl(host, REG_DRV_DL);
545 
546 	if (opha_cmd > 0)
547 		rval |= SDXC_CMD_DRV_PH_SEL;	/*180 phase */
548 	else if (opha_cmd == 0)
549 		rval &= ~SDXC_CMD_DRV_PH_SEL;	/*90 phase */
550 
551 	if (opha_dat > 0)
552 		rval |= SDXC_DAT_DRV_PH_SEL;	/*180 phase */
553 	else if (opha_dat == 0)
554 		rval &= ~SDXC_DAT_DRV_PH_SEL;	/*90 phase */
555 
556 	sunxi_r_op(host, mmc_writel(host, REG_DRV_DL, rval));
557 
558 	rval = mmc_readl(host, REG_SD_NTSR);
559 
560 	if (ipha_cmd >= 0) {
561 		rval &= ~SDXC_STIMING_CMD_PH_MASK;
562 		rval |=
563 		    (ipha_cmd << SDXC_STIMING_CMD_PH_SHIFT) &
564 		    SDXC_STIMING_CMD_PH_MASK;
565 	}
566 
567 	if (ipha_dat >= 0) {
568 		rval &= ~SDXC_STIMING_DAT_PH_MASK;
569 		rval |=
570 		    (ipha_dat << SDXC_STIMING_DAT_PH_SHIFT) &
571 		    SDXC_STIMING_DAT_PH_MASK;
572 	}
573 
574 	rval &= ~SDXC_2X_TIMING_MODE;
575 	mmc_writel(host, REG_SD_NTSR, rval);
576 	rval |= SDXC_2X_TIMING_MODE;
577 	mmc_writel(host, REG_SD_NTSR, rval);
578 
579 	SM_INFO(mmc_dev(host->mmc), "REG_DRV_DL: 0x%08x\n",
580 		 mmc_readl(host, REG_DRV_DL));
581 	SM_INFO(mmc_dev(host->mmc), "REG_SD_NTSR: 0x%08x\n",
582 		 mmc_readl(host, REG_SD_NTSR));
583 }
584 
sunxi_mmc_judge_retry_v4p1x(struct sunxi_mmc_host * host,struct mmc_command * cmd,u32 rcnt,u32 errno,void * other)585 static int sunxi_mmc_judge_retry_v4p1x(struct sunxi_mmc_host *host,
586 				       struct mmc_command *cmd, u32 rcnt,
587 				       u32 errno, void *other)
588 {
589 	/****-1 means use default value***/
590 	/*
591 	*We use {-1,-1} as first member,because we want to
592 	*retry current delay first.
593 	*Only If current delay failed,we try new delay
594 	*/
595 	const s32 sunxi_phase[10][2] = { {-1, -1},
596 		{1, 1}, {0, 0}, {1, 0}, {0, 1}, {1, 2}, {0, 2} };
597 
598 	if (rcnt < (SUNXI_RETRY_CNT_PER_PHA_V4P1X * 10)) {
599 		sunxi_mmc_set_dly_raw(host,
600 				      sunxi_phase[rcnt /
601 						  SUNXI_RETRY_CNT_PER_PHA_V4P1X]
602 				      [0],
603 				      sunxi_phase[rcnt /
604 						  SUNXI_RETRY_CNT_PER_PHA_V4P1X]
605 				      [1],
606 				      sunxi_phase[rcnt /
607 						  SUNXI_RETRY_CNT_PER_PHA_V4P1X]
608 				      [0],
609 				      sunxi_phase[rcnt /
610 						  SUNXI_RETRY_CNT_PER_PHA_V4P1X]
611 				      [1]);
612 		return 0;
613 	}
614 
615 	sunxi_mmc_set_dly_raw(host, sunxi_phase[0][0],
616 			      sunxi_phase[0][1],
617 			      sunxi_phase[0][0], sunxi_phase[0][1]);
618 	SM_INFO(mmc_dev(host->mmc), "sunxi v4p1x retry give up\n");
619 	return -1;
620 }
621 
sunxi_mmc_init_priv_v4p1x(struct sunxi_mmc_host * host,struct platform_device * pdev,int phy_index)622 void sunxi_mmc_init_priv_v4p1x(struct sunxi_mmc_host *host,
623 			       struct platform_device *pdev, int phy_index)
624 {
625 	struct sunxi_mmc_ver_priv *ver_priv =
626 	    devm_kzalloc(&pdev->dev, sizeof(struct sunxi_mmc_ver_priv),
627 			 GFP_KERNEL);
628 	host->version_priv_dat = ver_priv;
629 	ver_priv->mmc_clk_dly[mmc_clk_400k].cmod = mmc_clk_400k;
630 	ver_priv->mmc_clk_dly[mmc_clk_400k].mod_str = "sunxi-dly-400k";
631 	ver_priv->mmc_clk_dly[mmc_clk_400k].cmd_drv_ph = 1;
632 	ver_priv->mmc_clk_dly[mmc_clk_400k].dat_drv_ph = 0;
633 	ver_priv->mmc_clk_dly[mmc_clk_400k].sam_dly = 0;
634 	ver_priv->mmc_clk_dly[mmc_clk_400k].ds_dly = 0;
635 	ver_priv->mmc_clk_dly[mmc_clk_400k].sam_ph_dat = 0;
636 	ver_priv->mmc_clk_dly[mmc_clk_400k].sam_ph_cmd = 0;
637 
638 	ver_priv->mmc_clk_dly[mmc_clk_26M].cmod = mmc_clk_26M;
639 	ver_priv->mmc_clk_dly[mmc_clk_26M].mod_str = "sunxi-dly-26M";
640 	ver_priv->mmc_clk_dly[mmc_clk_26M].cmd_drv_ph = 1;
641 	ver_priv->mmc_clk_dly[mmc_clk_26M].dat_drv_ph = 0;
642 	ver_priv->mmc_clk_dly[mmc_clk_26M].sam_dly = 0;
643 	ver_priv->mmc_clk_dly[mmc_clk_26M].ds_dly = 0;
644 	ver_priv->mmc_clk_dly[mmc_clk_26M].sam_ph_dat = 0;
645 	ver_priv->mmc_clk_dly[mmc_clk_26M].sam_ph_cmd = 0;
646 
647 	ver_priv->mmc_clk_dly[mmc_clk_52M].cmod = mmc_clk_52M,
648 	    ver_priv->mmc_clk_dly[mmc_clk_52M].mod_str = "sunxi-dly-52M";
649 	ver_priv->mmc_clk_dly[mmc_clk_52M].cmd_drv_ph = 1;
650 	ver_priv->mmc_clk_dly[mmc_clk_52M].dat_drv_ph = 1;
651 	ver_priv->mmc_clk_dly[mmc_clk_52M].sam_dly = 0;
652 	ver_priv->mmc_clk_dly[mmc_clk_52M].ds_dly = 0;
653 	ver_priv->mmc_clk_dly[mmc_clk_52M].sam_ph_dat = 1;
654 	ver_priv->mmc_clk_dly[mmc_clk_52M].sam_ph_cmd = 1;
655 
656 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].cmod = mmc_clk_52M_DDR4;
657 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].mod_str = "sunxi-dly-52M-ddr4";
658 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].cmd_drv_ph = 1;
659 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].dat_drv_ph = 1;
660 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].sam_dly = 0;
661 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].ds_dly = 0;
662 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].sam_ph_dat = 1;
663 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].sam_ph_cmd = 1;
664 
665 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].cmod = mmc_clk_52M_DDR8;
666 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].mod_str = "sunxi-dly-52M-ddr8";
667 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].cmd_drv_ph = 1;
668 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].dat_drv_ph = 1;
669 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].sam_dly = 0;
670 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].ds_dly = 0;
671 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].sam_ph_dat = 1;
672 	ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].sam_ph_cmd = 1;
673 
674 	ver_priv->mmc_clk_dly[mmc_clk_104M].cmod = mmc_clk_104M;
675 	ver_priv->mmc_clk_dly[mmc_clk_104M].mod_str = "sunxi-dly-104M";
676 	ver_priv->mmc_clk_dly[mmc_clk_104M].cmd_drv_ph = 1;
677 	ver_priv->mmc_clk_dly[mmc_clk_104M].dat_drv_ph = 0;
678 	ver_priv->mmc_clk_dly[mmc_clk_104M].sam_dly = 0;
679 	ver_priv->mmc_clk_dly[mmc_clk_104M].ds_dly = 0;
680 	ver_priv->mmc_clk_dly[mmc_clk_104M].sam_ph_dat = 0;
681 	ver_priv->mmc_clk_dly[mmc_clk_104M].sam_ph_cmd = 0;
682 
683 	ver_priv->mmc_clk_dly[mmc_clk_208M].cmod = mmc_clk_208M;
684 	ver_priv->mmc_clk_dly[mmc_clk_208M].mod_str = "sunxi-dly-208M";
685 	ver_priv->mmc_clk_dly[mmc_clk_208M].cmd_drv_ph = 1;
686 	ver_priv->mmc_clk_dly[mmc_clk_208M].dat_drv_ph = 0;
687 	ver_priv->mmc_clk_dly[mmc_clk_208M].sam_dly = 0;
688 	ver_priv->mmc_clk_dly[mmc_clk_208M].ds_dly = 0;
689 	ver_priv->mmc_clk_dly[mmc_clk_208M].sam_ph_dat = 0;
690 	ver_priv->mmc_clk_dly[mmc_clk_208M].sam_ph_cmd = 0;
691 
692 	ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].cmod = mmc_clk_104M_DDR;
693 	ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].mod_str = "sunxi-dly-104M-ddr";
694 	ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].cmd_drv_ph = 1;
695 	ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].dat_drv_ph = 0;
696 	ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].sam_dly = 0;
697 	ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].ds_dly = 0;
698 	ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].sam_ph_dat = 0;
699 	ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].sam_ph_cmd = 0;
700 
701 	ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].cmod = mmc_clk_208M_DDR;
702 	ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].mod_str = "sunxi-dly-208M-ddr";
703 	ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].cmd_drv_ph = 1;
704 	ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].dat_drv_ph = 0;
705 	ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].sam_dly = 0;
706 	ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].ds_dly = 0;
707 	ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].sam_ph_dat = 0;
708 	ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].sam_ph_cmd = 0;
709 
710 	host->sunxi_mmc_clk_set_rate = sunxi_mmc_clk_set_rate_for_sdmmc_v4p1x;
711 	host->dma_tl = SUNXI_DMA_TL_SDMMC_V4P1X;
712 	host->idma_des_size_bits = SUNXI_DES_SIZE_SDMMC_V4P1X;
713 	host->sunxi_mmc_thld_ctl = sunxi_mmc_thld_ctl_for_sdmmc_v4p1x;
714 	host->sunxi_mmc_save_spec_reg = sunxi_mmc_save_spec_reg_v4p1x;
715 	host->sunxi_mmc_restore_spec_reg = sunxi_mmc_restore_spec_reg_v4p1x;
716 	sunxi_mmc_reg_ex_res_inter(host, phy_index);
717 	host->sunxi_mmc_set_acmda = sunxi_mmc_set_a12a;
718 	host->phy_index = phy_index;
719 	host->sunxi_mmc_oclk_en = sunxi_mmc_oclk_onoff;
720 	host->sunxi_mmc_judge_retry = sunxi_mmc_judge_retry_v4p1x;
721 	/*sunxi_of_parse_clk_dly(host); */
722 #if (defined(CONFIG_ARCH_SUN50IW9) || defined(CONFIG_ARCH_SUN50IW10))
723 	host->des_addr_shift = 2;
724 #endif
725 }
726 EXPORT_SYMBOL_GPL(sunxi_mmc_init_priv_v4p1x);
727