1 /*
2 * SUNXI EMMC/SD driver
3 *
4 * Copyright (C) 2015 AllWinnertech Ltd.
5 * Author: lijuan <lijuan@allwinnertech.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17 #include <linux/clk.h>
18 #include <linux/reset/sunxi.h>
19
20 #include <linux/gpio.h>
21 //#include <linux/sunxi-gpio.h>
22 #include <linux/platform_device.h>
23 #include <linux/spinlock.h>
24 #include <linux/scatterlist.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/slab.h>
27 #include <linux/reset.h>
28
29 #include <linux/of_address.h>
30 #include <linux/of_gpio.h>
31 #include <linux/of_platform.h>
32
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/sd.h>
35 #include <linux/mmc/sdio.h>
36 #include <linux/mmc/mmc.h>
37 #include <linux/mmc/core.h>
38 #include <linux/mmc/card.h>
39 #include <linux/mmc/slot-gpio.h>
40
41 #include "sunxi-mmc.h"
42 #include "sunxi-mmc-v4p10x.h"
43 #include "sunxi-mmc-export.h"
44 #include "sunxi-mmc-debug.h"
45
46
47
48 #define MMC_2MOD_CLK "sdmmc2mod"
49 #define MMC_SRCCLK_PLL "pll_periph"
50 #define MMC_SRCCLK_HOSC "hosc"
51 #define SUNXI_RETRY_CNT_PER_PHA_V4P1X 3
52
53 /*dma triger level setting*/
54 #define SUNXI_DMA_TL_SDMMC_V4P1X ((0x2<<28)|(7<<16)|16)
55 /*one dma des can transfer data size = 1<<SUNXI_DES_SIZE_SDMMC*/
56 #define SUNXI_DES_SIZE_SDMMC_V4P1X (15)
57
58 /*reg*/
59 #define SDXC_REG_SD_NTSR (0x005C)
60 /*bit*/
61 #define SDXC_2X_TIMING_MODE (1U<<31)
62
63
64 /*mask*/
65 #define SDXC_TX_TL_MASK (0x1f)
66 #define SDXC_RX_TL_MASK (0x001F0000)
67 #define SDXC_STIMING_PH_MASK (0x00000030)
68 #define SDXC_DRV_PH_MASK (0x00000003)
69
70 /*shift*/
71 #define SDXC_STIMING_PH_SHIFT (4)
72 #define SDXC_DRV_PH_SHIFT (0)
73
74 enum sunxi_mmc_clk_mode {
75 mmc_clk_400k = 0,
76 mmc_clk_26M,
77 mmc_clk_52M,
78 mmc_clk_52M_DDR4,
79 mmc_clk_52M_DDR8,
80 mmc_clk_104M,
81 mmc_clk_208M,
82 mmc_clk_104M_DDR,
83 mmc_clk_208M_DDR,
84 mmc_clk_mod_num,
85 };
86
87 struct sunxi_mmc_clk_dly {
88 enum sunxi_mmc_clk_mode cmod;
89 char *mod_str;
90 u32 drv_ph;
91 u32 sam_ph;
92 };
93
94
95 /*sample delay and output deley setting */
96
97 struct sunxi_mmc_spec_regs {
98 u32 sd_ntsr; /*REG_SD_NTSR*/
99
100 };
101
102 struct sunxi_mmc_ver_priv {
103 struct sunxi_mmc_spec_regs bak_spec_regs;
104 struct sunxi_mmc_clk_dly mmc_clk_dly[mmc_clk_mod_num];
105 };
106
107
sunxi_mmc_set_clk_dly(struct sunxi_mmc_host * host,int clk,int bus_width,int timing)108 static void sunxi_mmc_set_clk_dly(struct sunxi_mmc_host *host, int clk,
109 int bus_width, int timing)
110 {
111 struct mmc_host *mhost = host->mmc;
112 u32 rval = 0;
113 enum sunxi_mmc_clk_mode cmod = mmc_clk_400k;
114 u32 in_clk_dly[5] = { 0 };
115 int ret = 0;
116 struct device_node *np = NULL;
117 struct sunxi_mmc_clk_dly *mmc_clk_dly =
118 ((struct sunxi_mmc_ver_priv *)host->version_priv_dat)->mmc_clk_dly;
119
120 if (!mhost->parent || !mhost->parent->of_node) {
121 SM_ERR(mmc_dev(host->mmc),
122 "no dts to parse clk dly,use default\n");
123 return;
124 }
125
126 np = mhost->parent->of_node;
127
128 if (clk <= 400 * 1000) {
129 cmod = mmc_clk_400k;
130 } else if (clk <= 26 * 1000 * 1000) {
131 cmod = mmc_clk_26M;
132 } else if (clk <= 52 * 1000 * 1000) {
133 if ((bus_width == MMC_BUS_WIDTH_4)
134 && sunxi_mmc_ddr_timing(timing)) {
135 cmod = mmc_clk_52M_DDR4;
136 } else if ((bus_width == MMC_BUS_WIDTH_8)
137 && (timing == MMC_TIMING_MMC_DDR52)) {
138 cmod = mmc_clk_52M_DDR8;
139 } else {
140 cmod = mmc_clk_52M;
141 }
142 } else if (clk <= 104 * 1000 * 1000) {
143 if ((bus_width == MMC_BUS_WIDTH_8)
144 && (timing == MMC_TIMING_MMC_HS400)) {
145 cmod = mmc_clk_104M_DDR;
146 } else {
147 cmod = mmc_clk_104M;
148 }
149 } else if (clk <= 208 * 1000 * 1000) {
150 if ((bus_width == MMC_BUS_WIDTH_8)
151 && (timing == MMC_TIMING_MMC_HS400)) {
152 cmod = mmc_clk_208M_DDR;
153 } else {
154 cmod = mmc_clk_208M;
155 }
156 } else {
157 SM_ERR(mmc_dev(mhost), "clk %d is out of range\n", clk);
158 return;
159 }
160
161 ret = of_property_read_u32_array(np, mmc_clk_dly[cmod].mod_str,
162 in_clk_dly, ARRAY_SIZE(in_clk_dly));
163 if (ret) {
164 SM_DBG(mmc_dev(host->mmc), "failed to get %s used default\n",
165 mmc_clk_dly[cmod].mod_str);
166 } else {
167 mmc_clk_dly[cmod].drv_ph = in_clk_dly[0];
168 /*mmc_clk_dly[cmod].sam_dly = in_clk_dly[2];*/
169 /*mmc_clk_dly[cmod].ds_dly = in_clk_dly[3];*/
170 mmc_clk_dly[cmod].sam_ph = in_clk_dly[3];
171 SM_DBG(mmc_dev(host->mmc), "Get %s clk dly ok\n",
172 mmc_clk_dly[cmod].mod_str);
173 }
174 SM_DBG(mmc_dev(host->mmc), "Try set %s clk dly ok\n",
175 mmc_clk_dly[cmod].mod_str);
176 SM_DBG(mmc_dev(host->mmc), "drv_ph %d\n",
177 mmc_clk_dly[cmod].drv_ph);
178 SM_DBG(mmc_dev(host->mmc), "sam_ph %d\n",
179 mmc_clk_dly[cmod].sam_ph);
180
181 /*
182 * rval = mmc_readl(host,REG_SAMP_DL);
183 * rval &= ~SDXC_SAMP_DL_SW_MASK;
184 * rval |= mmc_clk_dly[cmod].sam_dly & SDXC_SAMP_DL_SW_MASK;
185 * rval |= SDXC_SAMP_DL_SW_EN;
186 * mmc_writel(host,REG_SAMP_DL,rval);
187
188 * rval = mmc_readl(host,REG_DS_DL);
189 * rval &= ~SDXC_DS_DL_SW_MASK;
190 * rval |= mmc_clk_dly[cmod].ds_dly & SDXC_DS_DL_SW_MASK;
191 * rval |= SDXC_DS_DL_SW_EN;
192 * mmc_writel(host,REG_DS_DL,rval);
193 */
194
195 rval = mmc_readl(host, REG_SD_NTSR);
196 rval &= ~SDXC_DRV_PH_MASK;
197 rval |=
198 (mmc_clk_dly[cmod].
199 drv_ph << SDXC_DRV_PH_SHIFT) & SDXC_DRV_PH_MASK;
200 mmc_writel(host, REG_SD_NTSR, rval);
201
202 rval = mmc_readl(host, REG_SD_NTSR);
203 rval &= ~SDXC_STIMING_PH_MASK;
204 rval |=
205 (mmc_clk_dly[cmod].
206 sam_ph << SDXC_STIMING_PH_SHIFT) & SDXC_STIMING_PH_MASK;
207 mmc_writel(host, REG_SD_NTSR, rval);
208
209 SM_DBG(mmc_dev(host->mmc), "REG_SD_NTSR %08x\n",
210 mmc_readl(host, REG_SD_NTSR));
211
212 }
213
__sunxi_mmc_do_oclk_onoff(struct sunxi_mmc_host * host,u32 oclk_en,u32 pwr_save,u32 ignore_dat0)214 static int __sunxi_mmc_do_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en,
215 u32 pwr_save, u32 ignore_dat0)
216 {
217 unsigned long expire = jiffies + msecs_to_jiffies(250);
218 u32 rval;
219
220 rval = mmc_readl(host, REG_CLKCR);
221 rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON | SDXC_MASK_DATA0);
222
223 if (oclk_en)
224 rval |= SDXC_CARD_CLOCK_ON;
225 if (pwr_save)
226 rval |= SDXC_LOW_POWER_ON;
227 if (ignore_dat0)
228 rval |= SDXC_MASK_DATA0;
229
230 mmc_writel(host, REG_CLKCR, rval);
231
232 SM_DBG(mmc_dev(host->mmc), "%s REG_CLKCR:%x\n", __func__,
233 mmc_readl(host, REG_CLKCR));
234
235 rval = SDXC_START | SDXC_UPCLK_ONLY | SDXC_WAIT_PRE_OVER;
236 mmc_writel(host, REG_CMDR, rval);
237
238 do {
239 rval = mmc_readl(host, REG_CMDR);
240 } while (time_before(jiffies, expire) && (rval & SDXC_START));
241
242 /* clear irq status bits set by the command */
243 mmc_writel(host, REG_RINTR,
244 mmc_readl(host, REG_RINTR) & ~SDXC_SDIO_INTERRUPT);
245
246 if (rval & SDXC_START) {
247 SM_ERR(mmc_dev(host->mmc), "fatal err update clk timeout\n");
248 return -EIO;
249 }
250
251 /*only use mask data0 when update clk,clear it when not update clk */
252 if (ignore_dat0)
253 mmc_writel(host, REG_CLKCR,
254 mmc_readl(host, REG_CLKCR) & ~SDXC_MASK_DATA0);
255
256 return 0;
257 }
258
sunxi_mmc_oclk_onoff(struct sunxi_mmc_host * host,u32 oclk_en)259 static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
260 {
261 struct device_node *np = NULL;
262 struct mmc_host *mmc = host->mmc;
263 int pwr_save = 0;
264 int len = 0;
265
266 if (!mmc->parent || !mmc->parent->of_node) {
267 SM_ERR(mmc_dev(host->mmc),
268 "no dts to parse power save mode\n");
269 return -EIO;
270 }
271
272 np = mmc->parent->of_node;
273 if (of_find_property(np, "sunxi-power-save-mode", &len))
274 pwr_save = 1;
275 return __sunxi_mmc_do_oclk_onoff(host, oclk_en, pwr_save, 1);
276 }
277
sunxi_mmc_updata_pha_v4p10x(struct sunxi_mmc_host * host,struct mmc_command * cmd,struct mmc_data * data)278 static int sunxi_mmc_updata_pha_v4p10x(struct sunxi_mmc_host *host,
279 struct mmc_command *cmd, struct mmc_data *data)
280 {
281 return sunxi_mmc_oclk_onoff(host, 1);
282 }
283
sunxi_mmc_2xmod_onoff(struct sunxi_mmc_host * host,u32 newmode_en)284 static void sunxi_mmc_2xmod_onoff(struct sunxi_mmc_host *host, u32 newmode_en)
285 {
286 u32 rval = mmc_readl(host, REG_SD_NTSR);
287
288 if (newmode_en)
289 rval |= SDXC_2X_TIMING_MODE;
290 else
291 rval &= ~SDXC_2X_TIMING_MODE;
292 mmc_writel(host, REG_SD_NTSR, rval);
293
294 SM_DBG(mmc_dev(host->mmc), "REG_SD_NTSR: 0x%08x ,val %x\n",
295 mmc_readl(host, REG_SD_NTSR), rval);
296 }
sunxi_mmc_clk_set_rate_for_sdmmc_v4p10x(struct sunxi_mmc_host * host,struct mmc_ios * ios)297 static int sunxi_mmc_clk_set_rate_for_sdmmc_v4p10x(
298 struct sunxi_mmc_host *host, struct mmc_ios *ios)
299 {
300 u32 mod_clk = 0;
301 u32 src_clk = 0;
302 u32 rval = 0;
303 u32 rval1 = 0;
304 s32 err = 0;
305 u32 rate = 0;
306 u32 clk = 0;
307 u32 source_rate = 0;
308 u32 sdmmc2mod_rate = 0;
309 char *sclk_name = NULL;
310 struct clk *sclk = NULL;
311 struct clk *mclk2 = NULL;
312 struct device *dev = mmc_dev(host->mmc);
313
314 if (ios->clock == 0) {
315 __sunxi_mmc_do_oclk_onoff(host, 0, 0, 1);
316 return 0;
317 }
318 if (sunxi_mmc_ddr_timing(ios->timing))
319 mod_clk = ios->clock << 1;
320 else
321 mod_clk = ios->clock;
322
323 if (ios->clock <= 400000) {
324 sclk_name = MMC_SRCCLK_HOSC;
325 sclk = clk_get(dev, sclk_name);
326 } else {
327 sclk_name = MMC_SRCCLK_PLL;
328 sclk = clk_get(dev, sclk_name);
329 }
330 if ((sclk == NULL) || IS_ERR(sclk)) {
331 SM_ERR(mmc_dev(host->mmc),
332 "Error to get source clock %s %ld\n",
333 sclk_name, (long)sclk);
334 return -1;
335 }
336 sunxi_mmc_2xmod_onoff(host, 1);
337 mclk2 = clk_get(dev, MMC_2MOD_CLK);
338
339 if (IS_ERR_OR_NULL(mclk2)) {
340 SM_ERR(mmc_dev(host->mmc),
341 "Error to get source clock for clk %dHz\n", clk);
342 return -1;
343 }
344
345 err = clk_set_parent(mclk2, sclk);
346
347 source_rate = clk_get_rate(sclk);
348 sdmmc2mod_rate = source_rate/2;
349 clk_set_rate(mclk2, sdmmc2mod_rate);
350
351 clk_put(mclk2);
352 if (err) {
353 clk_put(mclk2);
354 return -1;
355 }
356
357 rate = clk_round_rate(host->clk_mmc, mod_clk);
358 SM_DBG(mmc_dev(host->mmc), "get round rate %d\n", rate);
359
360 clk_disable_unprepare(host->clk_mmc);
361 /* sunxi_dump_reg(NULL); */
362
363 err = clk_set_rate(host->clk_mmc, rate);
364 if (err) {
365 SM_ERR(mmc_dev(host->mmc),
366 "set mclk rate error, rate %dHz\n",
367 rate);
368 clk_put(sclk);
369 return -1;
370 }
371
372 rval1 = clk_prepare_enable(host->clk_mmc);
373 if (rval1) {
374 SM_ERR(mmc_dev(host->mmc),
375 "Enable mmc clk err %d\n", rval1);
376 return -1;
377 }
378
379 /* sunxi_dump_reg(NULL); */
380 src_clk = clk_get_rate(sclk);
381 clk_put(sclk);
382
383 SM_DBG(mmc_dev(host->mmc),
384 "set round clock %d, soure clk is %d\n",
385 rate, src_clk);
386
387 rval = mmc_readl(host, REG_CLKCR);
388 rval &= ~0xff;
389 if (sunxi_mmc_ddr_timing(ios->timing)) {
390 rval |= 1;
391 ios->clock = rate >> 1;
392 clk = ios->clock;
393 SM_DBG(mmc_dev(host->mmc), "card clk%d\n", clk);
394 } else {
395 ios->clock = rate;
396 clk = ios->clock;
397 }
398 mmc_writel(host, REG_CLKCR, rval);
399 sunxi_mmc_set_clk_dly(host, ios->clock, ios->bus_width, ios->timing);
400 return sunxi_mmc_oclk_onoff(host, 1);
401 }
402
sunxi_mmc_save_spec_reg_v4p10x(struct sunxi_mmc_host * host)403 static void sunxi_mmc_save_spec_reg_v4p10x(struct sunxi_mmc_host *host)
404 {
405 struct sunxi_mmc_spec_regs *spec_regs = NULL;
406
407 spec_regs = &((struct sunxi_mmc_ver_priv *)(host->version_priv_dat))
408 ->bak_spec_regs;
409
410 spec_regs->sd_ntsr = mmc_readl(host, REG_SD_NTSR);
411 }
412
sunxi_mmc_restore_spec_reg_v4p10x(struct sunxi_mmc_host * host)413 static void sunxi_mmc_restore_spec_reg_v4p10x(struct sunxi_mmc_host *host)
414 {
415 struct sunxi_mmc_spec_regs *spec_regs = NULL;
416
417 spec_regs = &((struct sunxi_mmc_ver_priv *)(host->version_priv_dat))
418 ->bak_spec_regs;
419
420 mmc_writel(host, REG_SD_NTSR, spec_regs->sd_ntsr);
421 }
422
sunxi_mmc_set_dly_raw(struct sunxi_mmc_host * host,s32 opha,s32 ipha)423 static inline void sunxi_mmc_set_dly_raw(
424 struct sunxi_mmc_host *host,
425 s32 opha, s32 ipha)
426 {
427 u32 rval = mmc_readl(host, REG_SD_NTSR);
428
429 if (ipha >= 0) {
430 rval &= ~SDXC_STIMING_PH_MASK;
431 rval |= (ipha << SDXC_STIMING_PH_SHIFT) & SDXC_STIMING_PH_MASK;
432 }
433
434 if (opha >= 0) {
435 rval &= ~SDXC_DRV_PH_MASK;
436 rval |= (opha << SDXC_DRV_PH_SHIFT) & SDXC_DRV_PH_MASK;
437 }
438
439 rval &= ~SDXC_2X_TIMING_MODE;
440 mmc_writel(host, REG_SD_NTSR, rval);
441 rval |= SDXC_2X_TIMING_MODE;
442 mmc_writel(host, REG_SD_NTSR, rval);
443
444 SM_INFO(mmc_dev(host->mmc), "REG_SD_NTSR: 0x%08x\n",
445 mmc_readl(host, REG_SD_NTSR));
446 }
447
sunxi_mmc_judge_retry_v4p10x(struct sunxi_mmc_host * host,struct mmc_command * cmd,u32 rcnt,u32 errno,void * other)448 static int sunxi_mmc_judge_retry_v4p10x(
449 struct sunxi_mmc_host *host,
450 struct mmc_command *cmd, u32 rcnt,
451 u32 errno, void *other)
452 {
453
454 const s32 sunxi_phase[10][2] = {{-1, -1},
455 {1, 1}, {0, 0}, {1, 0}, {0, 1},
456 {1, 2}, {0, 2} };
457
458 if (rcnt < (SUNXI_RETRY_CNT_PER_PHA_V4P1X*10)) {
459 sunxi_mmc_set_dly_raw(host,
460 sunxi_phase[rcnt/SUNXI_RETRY_CNT_PER_PHA_V4P1X][0],
461 sunxi_phase[rcnt/SUNXI_RETRY_CNT_PER_PHA_V4P1X][1]);
462 } else {
463 sunxi_mmc_set_dly_raw(host, sunxi_phase[0][0],
464 sunxi_phase[0][1]);
465 SM_INFO(mmc_dev(host->mmc), "sunxi v4p10x retry give up\n");
466 return -1;
467 }
468 return 0;
469 }
470
sunxi_mmc_hw_busy_v4p10x(struct sunxi_mmc_host * host)471 static bool sunxi_mmc_hw_busy_v4p10x(struct sunxi_mmc_host *host)
472 {
473 /**if use v4p10x sdmc, all use dat0-gpio check card busy status*/
474 if (host->sunxi_mmc_dat0_busy)
475 return true;
476
477 return false;
478 }
479
sunxi_mmc_dat0_busy_v4p10x(struct sunxi_mmc_host * host)480 static int sunxi_mmc_dat0_busy_v4p10x(struct sunxi_mmc_host *host)
481 {
482 struct device_node *np;
483 struct mmc_host *mmc = host->mmc;
484 unsigned long config_set;
485 unsigned long config_get = 0;
486 enum of_gpio_flags gpio_flags;
487 int gpio;
488
489 if (!mmc->parent || !mmc->parent->of_node)
490 return 0;
491
492 np = mmc->parent->of_node;
493 gpio = of_get_named_gpio_flags(np, "dat0-gpios", 0,
494 (enum of_gpio_flags *)&gpio_flags);
495 if (!gpio_is_valid(gpio))
496 pr_err("mmc:failed to get dat0-gpios\n");
497 else {
498 /***********change gpio func to input*************/
499 config_set = pinconf_to_config_packed((enum pin_config_param)SUNXI_PINCFG_TYPE_FUNC, 0);
500 pinctrl_gpio_set_config(gpio, config_set);
501
502 /***********get sdcx_dat0 value*************/
503 config_get = gpio_get_value(gpio);
504
505 /***********change gpio func to sdcx_dat0*************/
506 config_set = pinconf_to_config_packed((enum pin_config_param)SUNXI_PINCFG_TYPE_FUNC, 3);
507 pinctrl_gpio_set_config(gpio, config_set);
508 }
509
510 return (!config_get);
511 }
512
sunxi_mmc_init_priv_v4p10x(struct sunxi_mmc_host * host,struct platform_device * pdev,int phy_index)513 void sunxi_mmc_init_priv_v4p10x(struct sunxi_mmc_host *host,
514 struct platform_device *pdev, int phy_index)
515 {
516 struct sunxi_mmc_ver_priv *ver_priv =
517 devm_kzalloc(&pdev->dev, sizeof(struct sunxi_mmc_ver_priv),
518 GFP_KERNEL);
519 host->version_priv_dat = ver_priv;
520 ver_priv->mmc_clk_dly[mmc_clk_400k].cmod = mmc_clk_400k;
521 ver_priv->mmc_clk_dly[mmc_clk_400k].mod_str = "sunxi-dly-400k";
522 ver_priv->mmc_clk_dly[mmc_clk_400k].drv_ph = 01;
523 ver_priv->mmc_clk_dly[mmc_clk_400k].sam_ph = 01;
524 ver_priv->mmc_clk_dly[mmc_clk_26M].cmod = mmc_clk_26M;
525 ver_priv->mmc_clk_dly[mmc_clk_26M].mod_str = "sunxi-dly-26M";
526 ver_priv->mmc_clk_dly[mmc_clk_26M].drv_ph = 01;
527 ver_priv->mmc_clk_dly[mmc_clk_26M].sam_ph = 01;
528 ver_priv->mmc_clk_dly[mmc_clk_52M].cmod = mmc_clk_52M,
529 ver_priv->mmc_clk_dly[mmc_clk_52M].mod_str = "sunxi-dly-52M";
530 ver_priv->mmc_clk_dly[mmc_clk_52M].drv_ph = 01;
531 ver_priv->mmc_clk_dly[mmc_clk_52M].sam_ph = 01;
532 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].cmod = mmc_clk_52M_DDR4;
533 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].mod_str =
534 "sunxi-dly-52M-ddr4";
535 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].drv_ph = 01;
536 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].sam_ph = 01;
537 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].cmod = mmc_clk_52M_DDR8;
538 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].mod_str =
539 "sunxi-dly-52M-ddr8";
540 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].drv_ph = 01;
541 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].sam_ph = 01;
542 ver_priv->mmc_clk_dly[mmc_clk_104M].cmod = mmc_clk_104M;
543 ver_priv->mmc_clk_dly[mmc_clk_104M].mod_str =
544 "sunxi-dly-104M";
545 ver_priv->mmc_clk_dly[mmc_clk_104M].drv_ph = 01;
546 ver_priv->mmc_clk_dly[mmc_clk_104M].sam_ph = 00;
547 ver_priv->mmc_clk_dly[mmc_clk_208M].cmod = mmc_clk_208M;
548 ver_priv->mmc_clk_dly[mmc_clk_208M].mod_str =
549 "sunxi-dly-208M";
550 ver_priv->mmc_clk_dly[mmc_clk_208M].drv_ph = 01;
551 ver_priv->mmc_clk_dly[mmc_clk_208M].sam_ph = 00;
552 ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].cmod = mmc_clk_104M_DDR;
553 ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].mod_str =
554 "sunxi-dly-104M-ddr";
555 ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].drv_ph = 01;
556 ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].sam_ph = 00;
557 ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].cmod = mmc_clk_208M_DDR;
558 ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].mod_str =
559 "sunxi-dly-208M-ddr";
560 ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].drv_ph = 01;
561 ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].sam_ph = 00;
562
563 host->sunxi_mmc_clk_set_rate = sunxi_mmc_clk_set_rate_for_sdmmc_v4p10x;
564 host->dma_tl = SUNXI_DMA_TL_SDMMC_V4P1X;
565 host->idma_des_size_bits = SUNXI_DES_SIZE_SDMMC_V4P1X;
566 host->sunxi_mmc_thld_ctl = NULL;
567 host->sunxi_mmc_save_spec_reg = sunxi_mmc_save_spec_reg_v4p10x;
568 host->sunxi_mmc_restore_spec_reg = sunxi_mmc_restore_spec_reg_v4p10x;
569 sunxi_mmc_reg_ex_res_inter(host, phy_index);
570 host->sunxi_mmc_set_acmda = sunxi_mmc_set_a12a;
571 host->phy_index = phy_index;
572 host->sunxi_mmc_oclk_en = sunxi_mmc_oclk_onoff;
573 host->sunxi_mmc_judge_retry = sunxi_mmc_judge_retry_v4p10x;
574 host->sunxi_mmc_updata_pha = sunxi_mmc_updata_pha_v4p10x;
575 host->sunxi_mmc_hw_busy = sunxi_mmc_hw_busy_v4p10x;
576 host->sunxi_mmc_dat0_busy = sunxi_mmc_dat0_busy_v4p10x;
577 /*sunxi_of_parse_clk_dly(host);*/
578 }
579 EXPORT_SYMBOL_GPL(sunxi_mmc_init_priv_v4p10x);
580