1 /*
2 * Sunxi SD/MMC host driver
3 *
4 * Copyright (C) 2015 AllWinnertech Ltd.
5 * Author: lixiang <lixiang@allwinnertech>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17 #include <linux/clk.h>
18 #include <linux/reset/sunxi.h>
19
20 #include <linux/gpio.h>
21 #include <linux/platform_device.h>
22 #include <linux/spinlock.h>
23 #include <linux/scatterlist.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h>
26 #include <linux/reset.h>
27
28 #include <linux/of_address.h>
29 #include <linux/of_gpio.h>
30 #include <linux/of_platform.h>
31
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/sd.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/core.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/slot-gpio.h>
39
40 #include "sunxi-mmc.h"
41 #include "sunxi-mmc-v4p5x.h"
42 #include "sunxi-mmc-export.h"
43 #include "sunxi-mmc-debug.h"
44
45 /*reg*/
46 /*SMHC eMMC4.5 DDR Start Bit Detection Control Register */
47 /*SMHC CRC Status Detect Control Register */
48 /*SMHC Card Threshold Control Register */
49 /*SMHC Drive Delay Control Register */
50 /*SMHC Sample Delay Control Register */
51 /*SMHC Data Strobe Delay Control Register */
52 #define SDXC_REG_SFC (0x0104)
53 #define SDXC_REG_EDSD (0x010C)
54 #define SDXC_REG_CSDC (0x0054)
55 #define SDXC_REG_THLD (0x0100)
56 #define SDXC_REG_DRV_DL (0x0140)
57 #define SDXC_REG_SAMP_DL (0x0144)
58 #define SDXC_REG_DS_DL (0x0148)
59 #define SDXC_REG_EMCE (0x64) /*SMHC EMCE Control Register*/
60 #define SDXC_REG_SD_NTSR (0x005C)
61 #define SDXC_REG_SMCV (0x300) /*SMHC Version Register */
62
63 /*use only for version after or equel 4.9*/
64 #define SDXC_REG_A23A (0X108)
65 #define SDXC_REG_ECMD (0X138)
66 #define SDXC_REG_ERESP (0X13C)
67
68
69
70 /*bit*/
71 #define SDXC_HS400_MD_EN (1U<<31)
72 #define SDXC_CARD_WR_THLD_ENB (1U<<2)
73 #define SDXC_CARD_RD_THLD_ENB (1U)
74 #define SDXC_CARD_BCIG_ENB (1U<<1)
75
76 #define SDXC_DAT_DRV_PH_SEL (1U<<17)
77 #define SDXC_CMD_DRV_PH_SEL (1U<<16)
78 #define SDXC_SAMP_DL_SW_EN (1u<<7)
79 #define SDXC_DS_DL_SW_EN (1u<<7)
80
81 #define SDXC_2X_TIMING_MODE (1U<<31)
82
83 #define SDXC_SFC_BP BIT(0)
84
85 /*for SDXC_REG_ECMD register*/
86 #define SDXC_A23_EN (1u<<0)
87
88
89 /*mask*/
90 #define SDXC_CRC_DET_PARA_MASK (0xf)
91 #define SDXC_CARD_RD_THLD_MASK (0x0FFF0000)
92 #define SDXC_TX_TL_MASK (0xff)
93 #define SDXC_RX_TL_MASK (0x00FF0000)
94
95 #define SDXC_SAMP_DL_SW_MASK (0x0000003F)
96 #define SDXC_DS_DL_SW_MASK (0x0000003F)
97
98 /*value*/
99 #define SDXC_CRC_DET_PARA_HS400 (6)
100 #define SDXC_CRC_DET_PARA_OTHER (3)
101 #define SDXC_FIFO_DETH (1024>>2)
102
103 /*size*/
104 #define SDXC_CARD_RD_THLD_SIZE (0x00000FFF)
105
106 /*shit*/
107 #define SDXC_CARD_RD_THLD_SIZE_SHIFT (16)
108
109 #define SUNXI_DMA_TL_SDMMC_V4P5X ((0x3<<28)|(15<<16)|240)
110 /*one dma des can transfer data size = 1<<SUNXI_DES_SIZE_SDMMC2*/
111 #define SUNXI_DES_SIZE_SDMMC_V4P5X (12)
112
113 /* EMCE controller */
114 #define SDXC_EMCE_ENCR BIT(4)
115 #define SDXC_EMCE_AC_MD BIT(1)
116 #define SDXC_EMCE_ENB BIT(0)
117
118 /*Sunxi MMC Host Controller Version*/
119 #define SMHC_VERSION_V4P7 0x40700
120 #define SMHC_VERSION_V4P9 0x40900
121 #define SMHC_VERSION_V4P5P1 0x40501
122 #define SMHC_VERSION_V4P5P2 0x40502
123 #define SMHC_VERSION_V5P3 0x50300
124
125 #if IS_ENABLED(CONFIG_SUNXI_EMCE)
126 extern int sunxi_emce_set_task_des(int data_len, int bypass);
127 extern void sunxi_emce_set_task_load(int para);
128 #endif
129
130 struct sunxi_mmc_spec_regs {
131 u32 drv_dl; /*REG_DRV_DL */
132 u32 samp_dl; /*REG_SAMP_DL */
133 u32 ds_dl; /*REG_DS_DL */
134 u32 sd_ntsr;//REG_SD_NTSR
135 u32 edsd; /*REG_EDSD */
136 u32 csdc; /*REG_CSDC */
137 };
138
139 enum sunxi_mmc_speed_mode {
140 SM0_DS26_SDR12 = 0,
141 SM1_HSSDR52_SDR25,
142 SM2_HSDDR52_DDR50,
143 SM3_HS200_SDR104,
144 SM4_HS400,
145 SM4_HS400_CMD,
146 SMX_UNUSED0,
147 SMX_UNUSED1,
148 SM_NUM,
149 };
150
151 struct sunxi_mmc_clk_dly {
152 enum sunxi_mmc_speed_mode spm;
153 char *mod_str;
154 char *raw_tm_sm_str[2];
155 u32 raw_tm_sm[2];
156 u32 raw_tm_sm_def[2];
157 };
158
159
160 struct sunxi_mmc_ver_priv {
161 struct sunxi_mmc_spec_regs bak_spec_regs;
162 struct sunxi_mmc_clk_dly mmc_clk_dly[SM_NUM];
163 };
164
sunxi_mmc_get_hs400_cmd_dly(struct sunxi_mmc_host * host,u32 clk,u32 * out_dly)165 static u32 sunxi_mmc_get_hs400_cmd_dly(struct sunxi_mmc_host *host, u32 clk, u32 *out_dly)
166 {
167 struct mmc_host *mmc = host->mmc;
168 enum sunxi_mmc_speed_mode speed_mod = SM0_DS26_SDR12;
169 char *raw_sm_str = NULL;
170 char *m_str = NULL;
171 struct device_node *np = NULL;
172 u32 *raw_sm = 0;
173 u32 *raw_sm_def = 0;
174 u32 rval = 0;
175 int frq_index = 0;
176 u32 sam_dly = 0;
177 struct sunxi_mmc_clk_dly *mmc_clk_dly = ((struct sunxi_mmc_ver_priv *)host->version_priv_dat)->mmc_clk_dly;
178
179 if (!mmc->parent || !mmc->parent->of_node) {
180 SM_ERR(mmc_dev(host->mmc), "no dts to parse clk dly,use default\n");
181 return -EINVAL;
182 }
183
184 np = mmc->parent->of_node;
185 speed_mod = SM4_HS400_CMD;
186
187 if (clk <= 400 * 1000) {
188 frq_index = 0;
189 } else if (clk <= 25 * 1000 * 1000) {
190 frq_index = 1;
191 } else if (clk <= 50 * 1000 * 1000) {
192 frq_index = 2;
193 } else if (clk <= 100 * 1000 * 1000) {
194 frq_index = 3;
195 } else if (clk <= 150 * 1000 * 1000) {
196 frq_index = 4;
197 } else if (clk <= 200 * 1000 * 1000) {
198 frq_index = 5;
199 } else if (clk <= 250 * 1000 * 1000) {
200 frq_index = 6;
201 } else if (clk <= 300 * 1000 * 1000) {
202 frq_index = 7;
203 } else {
204 SM_ERR(mmc_dev(mmc), "clkver 300mhz\n");
205 return -EINVAL;
206 }
207
208 if (frq_index / 4 > 2) {
209 SM_ERR(mmc_dev(host->mmc), "err frq_index\n");
210 return -EINVAL;
211 }
212
213 SM_DBG(mmc_dev(host->mmc), "freq %d frq index %d,frq/4 %x\n", clk, frq_index, frq_index / 4);
214 raw_sm_str = mmc_clk_dly[speed_mod].raw_tm_sm_str[frq_index / 4];
215 raw_sm = &mmc_clk_dly[speed_mod].raw_tm_sm[frq_index / 4];
216 raw_sm_def = &mmc_clk_dly[speed_mod].raw_tm_sm_def[frq_index / 4];
217 m_str = mmc_clk_dly[speed_mod].mod_str;
218
219 rval = of_property_read_u32(np, raw_sm_str, raw_sm);
220 if (rval) {
221 SM_INFO(mmc_dev(host->mmc), "failed to get %s used default\n", m_str);
222 return -EINVAL;
223 } else {
224 u32 sm_shift = (frq_index % 4) * 8;
225 rval = ((*raw_sm) >> sm_shift) & 0xff;
226
227 if (rval != 0xff) {
228 sam_dly = rval;
229 SM_DBG(mmc_dev(host->mmc), "Get speed mode %s clk dly %s ok\n", m_str, raw_sm_str);
230 } else {
231 u32 sm_shift = (frq_index % 4) * 8;
232 SM_DBG(mmc_dev(host->mmc), "%s use default value\n", m_str);
233 rval = ((*raw_sm_def) >> sm_shift) & 0xff;
234 sam_dly = rval;
235 }
236 *out_dly = sam_dly;
237 }
238
239 return 0;
240 }
241
sunxi_mmc_set_clk_dly(struct sunxi_mmc_host * host,int clk,int bus_width,int timing)242 static void sunxi_mmc_set_clk_dly(struct sunxi_mmc_host *host, int clk,
243 int bus_width, int timing)
244 {
245 struct mmc_host *mmc = host->mmc;
246 enum sunxi_mmc_speed_mode speed_mod = SM0_DS26_SDR12;
247 char *raw_sm_str = NULL;
248 char *m_str = NULL;
249 struct device_node *np = NULL;
250 u32 *raw_sm = 0;
251 u32 *raw_sm_def = 0;
252 u32 rval = 0;
253 int frq_index = 0;
254 u32 cmd_drv_ph = 1;
255 u32 dat_drv_ph = 0;
256 u32 sam_dly = 0;
257 u32 ds_dly = 0;
258 struct sunxi_mmc_clk_dly *mmc_clk_dly =
259 ((struct sunxi_mmc_ver_priv *)host->version_priv_dat)->mmc_clk_dly;
260
261 if (!mmc->parent || !mmc->parent->of_node) {
262 SM_ERR(mmc_dev(host->mmc),
263 "no dts to parse clk dly,use default\n");
264 return;
265 }
266
267 np = mmc->parent->of_node;
268
269 switch (timing) {
270 case MMC_TIMING_LEGACY:
271 case MMC_TIMING_UHS_SDR12:
272 speed_mod = SM0_DS26_SDR12;
273 break;
274 case MMC_TIMING_MMC_HS:
275 case MMC_TIMING_SD_HS:
276 case MMC_TIMING_UHS_SDR25:
277 speed_mod = SM1_HSSDR52_SDR25;
278 break;
279 case MMC_TIMING_UHS_DDR50:
280 case MMC_TIMING_MMC_DDR52:
281 if (bus_width == 8)
282 dat_drv_ph = 1;
283 speed_mod = SM2_HSDDR52_DDR50;
284 break;
285 case MMC_TIMING_UHS_SDR50:
286 case MMC_TIMING_UHS_SDR104:
287 case MMC_TIMING_MMC_HS200:
288 speed_mod = SM3_HS200_SDR104;
289 break;
290 case MMC_TIMING_MMC_HS400:
291 speed_mod = SM4_HS400;
292 break;
293 default:
294 SM_ERR(mmc_dev(mmc), "Wrong timing input\n");
295 return;
296 }
297
298 if (clk <= 400 * 1000) {
299 frq_index = 0;
300 } else if (clk <= 25 * 1000 * 1000) {
301 frq_index = 1;
302 } else if (clk <= 50 * 1000 * 1000) {
303 frq_index = 2;
304 } else if (clk <= 100 * 1000 * 1000) {
305 frq_index = 3;
306 } else if (clk <= 150 * 1000 * 1000) {
307 frq_index = 4;
308 } else if (clk <= 200 * 1000 * 1000) {
309 frq_index = 5;
310 } else if (clk <= 250 * 1000 * 1000) {
311 frq_index = 6;
312 } else if (clk <= 300 * 1000 * 1000) {
313 frq_index = 7;
314 } else {
315 SM_ERR(mmc_dev(mmc), "clk is over 300mhz\n");
316 return;
317 }
318
319 if (frq_index / 4 > 2) {
320 SM_ERR(mmc_dev(host->mmc), "err frq_index\n");
321 return;
322 }
323
324 SM_DBG(mmc_dev(host->mmc), "freq %d frq index %d,frq/4 %x\n", clk,
325 frq_index, frq_index / 4);
326 raw_sm_str = mmc_clk_dly[speed_mod].raw_tm_sm_str[frq_index / 4];
327 raw_sm = &mmc_clk_dly[speed_mod].raw_tm_sm[frq_index / 4];
328 raw_sm_def = &mmc_clk_dly[speed_mod].raw_tm_sm_def[frq_index / 4];
329 m_str = mmc_clk_dly[speed_mod].mod_str;
330
331 rval = of_property_read_u32(np, raw_sm_str, raw_sm);
332 if (rval) {
333 SM_INFO(mmc_dev(host->mmc), "failed to get %s used default\n",
334 m_str);
335 } else {
336 u32 sm_shift = (frq_index % 4) * 8;
337
338 rval = ((*raw_sm) >> sm_shift) & 0xff;
339 if (rval != 0xff) {
340 if (timing == MMC_TIMING_MMC_HS400) {
341 u32 raw_sm_hs200 = 0;
342 u32 hs400_cmd_dly = 0;
343 s32 ret = sunxi_mmc_get_hs400_cmd_dly(host, clk, &hs400_cmd_dly);
344
345 ds_dly = rval;
346 if (ret != 0) {
347 raw_sm_hs200 =
348 mmc_clk_dly[SM3_HS200_SDR104].raw_tm_sm[frq_index / 4];
349 sam_dly = ((raw_sm_hs200) >> sm_shift) & 0xff;
350 } else {
351 sam_dly = hs400_cmd_dly;
352 //sam_dly = 57;
353 //printk("forec 57 sample dly\n");
354 }
355 } else {
356 sam_dly = rval;
357 }
358 SM_DBG(mmc_dev(host->mmc),
359 "Get speed mode %s clk dly %s ok\n", m_str,
360 raw_sm_str);
361 } else {
362 u32 sm_shift = (frq_index % 4) * 8;
363
364 SM_DBG(mmc_dev(host->mmc), "%s use default value\n",
365 m_str);
366 rval = ((*raw_sm_def) >> sm_shift) & 0xff;
367 if (timing == MMC_TIMING_MMC_HS400) {
368 u32 raw_sm_hs200 = 0;
369
370 ds_dly = rval;
371 raw_sm_hs200 =
372 mmc_clk_dly[SM3_HS200_SDR104].
373 raw_tm_sm_def[frq_index / 4];
374 sam_dly = ((raw_sm_hs200) >> sm_shift) & 0xff;
375 } else {
376 sam_dly = rval;
377 }
378 }
379
380 }
381
382 SM_DBG(mmc_dev(host->mmc), "Try set %s clk dly ok\n", m_str);
383 SM_DBG(mmc_dev(host->mmc), "cmd_drv_ph %d\n", cmd_drv_ph);
384 SM_DBG(mmc_dev(host->mmc), "dat_drv_ph %d\n", dat_drv_ph);
385 SM_DBG(mmc_dev(host->mmc), "sam_dly %d\n", sam_dly);
386 SM_DBG(mmc_dev(host->mmc), "ds_dly %d\n", ds_dly);
387
388 rval = mmc_readl(host, REG_DRV_DL);
389 if (cmd_drv_ph)
390 rval |= SDXC_CMD_DRV_PH_SEL; /*180 phase */
391 else
392 rval &= ~SDXC_CMD_DRV_PH_SEL; /*90 phase */
393
394 if (dat_drv_ph)
395 rval |= SDXC_DAT_DRV_PH_SEL; /*180 phase */
396 else
397 rval &= ~SDXC_DAT_DRV_PH_SEL; /*90 phase */
398
399 sunxi_r_op(host, mmc_writel(host, REG_DRV_DL, rval));
400 rval = mmc_readl(host, REG_SAMP_DL);
401 rval &= ~SDXC_SAMP_DL_SW_MASK;
402 rval |= sam_dly & SDXC_SAMP_DL_SW_MASK;
403 rval |= SDXC_SAMP_DL_SW_EN;
404 mmc_writel(host, REG_SAMP_DL, rval);
405
406 rval = mmc_readl(host, REG_DS_DL);
407 rval &= ~SDXC_DS_DL_SW_MASK;
408 rval |= ds_dly & SDXC_DS_DL_SW_MASK;
409 rval |= SDXC_DS_DL_SW_EN;
410 mmc_writel(host, REG_DS_DL, rval);
411
412 if (host->sfc_dis == true) {
413 rval = mmc_readl(host, REG_SFC);
414 rval |= SDXC_SFC_BP;
415 mmc_writel(host, REG_SFC, rval);
416 SM_DBG(mmc_dev(host->mmc), "sfc 0x%x\n", mmc_readl(host, REG_SFC));
417 }
418
419 SM_DBG(mmc_dev(host->mmc), " REG_DRV_DL %08x\n",
420 mmc_readl(host, REG_DRV_DL));
421 SM_DBG(mmc_dev(host->mmc), " REG_SAMP_DL %08x\n",
422 mmc_readl(host, REG_SAMP_DL));
423 SM_DBG(mmc_dev(host->mmc), " REG_DS_DL %08x\n",
424 mmc_readl(host, REG_DS_DL));
425
426 }
427
sunxi_mmc_dump_dly2(struct sunxi_mmc_host * host)428 static void sunxi_mmc_dump_dly2(struct sunxi_mmc_host *host)
429 {
430 struct sunxi_mmc_clk_dly *mmc_clk_dly =
431 ((struct sunxi_mmc_ver_priv *)host->version_priv_dat)->mmc_clk_dly;
432 int i = 0;
433
434 for (i = 0; i < SM_NUM; i++) {
435 pr_info("mod_str %s\n", mmc_clk_dly[i].mod_str);
436 pr_info("raw_tm_sm_str %s\n", mmc_clk_dly[i].raw_tm_sm_str[0]);
437 pr_info("raw_tm_sm_str %s\n", mmc_clk_dly[i].raw_tm_sm_str[1]);
438 pr_info("raw_tm_sm0 %x\n", mmc_clk_dly[i].raw_tm_sm[0]);
439 pr_info("raw_tm_sm1 %x\n", mmc_clk_dly[i].raw_tm_sm[1]);
440 pr_info("********************\n");
441 }
442 }
443
sunxi_mmc_on_off_emce_v4p6x(struct sunxi_mmc_host * host,u32 en_crypt,u32 ac_mode,u32 en_emce,int data_len,int bypass,int task_load)444 static void sunxi_mmc_on_off_emce_v4p6x(struct sunxi_mmc_host *host,
445 u32 en_crypt, u32 ac_mode, u32 en_emce, int data_len,
446 int bypass, int task_load)
447 {
448 u32 rval = 0;
449
450 #if IS_ENABLED(CONFIG_SUNXI_EMCE)
451 sunxi_emce_set_task_des(data_len, bypass);
452 #endif
453 rval = mmc_readl(host, REG_EMCE);
454 rval &= 0x0000FFFF;
455 rval |= (0x200 << 16);
456 mmc_writel(host, REG_EMCE, rval);
457 rval &= ~(SDXC_EMCE_ENB | SDXC_EMCE_ENCR | SDXC_EMCE_AC_MD);
458 if (en_emce)
459 rval |= SDXC_EMCE_ENB;
460 if (en_crypt)
461 rval |= SDXC_EMCE_ENCR;
462 if (ac_mode)
463 rval |= SDXC_EMCE_AC_MD;
464 mmc_writel(host, REG_EMCE, rval);
465 SM_DBG(mmc_dev(host->mmc), "%s REG_EMCE:%x\n", __func__,
466 mmc_readl(host, REG_EMCE));
467 #if IS_ENABLED(CONFIG_SUNXI_EMCE)
468 sunxi_emce_set_task_load(task_load);
469 #endif
470 }
471
__sunxi_mmc_do_oclk_onoff(struct sunxi_mmc_host * host,u32 oclk_en,u32 pwr_save,u32 ignore_dat0)472 static int __sunxi_mmc_do_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en,
473 u32 pwr_save, u32 ignore_dat0)
474 {
475 unsigned long expire = jiffies + msecs_to_jiffies(250);
476 u32 rval;
477
478 rval = mmc_readl(host, REG_CLKCR);
479 rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON | SDXC_MASK_DATA0);
480
481 if (oclk_en)
482 rval |= SDXC_CARD_CLOCK_ON;
483 if (pwr_save)
484 rval |= SDXC_LOW_POWER_ON;
485 if (ignore_dat0)
486 rval |= SDXC_MASK_DATA0;
487
488 mmc_writel(host, REG_CLKCR, rval);
489
490 SM_DBG(mmc_dev(host->mmc), "%s REG_CLKCR:%x\n", __func__,
491 mmc_readl(host, REG_CLKCR));
492
493 rval = SDXC_START | SDXC_UPCLK_ONLY | SDXC_WAIT_PRE_OVER;
494 mmc_writel(host, REG_CMDR, rval);
495
496 do {
497 rval = mmc_readl(host, REG_CMDR);
498 } while (time_before(jiffies, expire) && (rval & SDXC_START));
499
500 /* clear irq status bits set by the command */
501 mmc_writel(host, REG_RINTR,
502 mmc_readl(host, REG_RINTR) & ~SDXC_SDIO_INTERRUPT);
503
504 if (rval & SDXC_START) {
505 SM_ERR(mmc_dev(host->mmc), "fatal err update clk timeout\n");
506 return -EIO;
507 }
508
509 /*only use mask data0 when update clk,clear it when not update clk */
510 if (ignore_dat0)
511 mmc_writel(host, REG_CLKCR,
512 mmc_readl(host, REG_CLKCR) & ~SDXC_MASK_DATA0);
513
514 return 0;
515 }
516
sunxi_mmc_oclk_onoff(struct sunxi_mmc_host * host,u32 oclk_en)517 static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
518 {
519 struct device_node *np = NULL;
520 struct mmc_host *mmc = host->mmc;
521 int pwr_save = 0;
522 int len = 0;
523
524 if (!mmc->parent || !mmc->parent->of_node) {
525 SM_ERR(mmc_dev(host->mmc),
526 "no dts to parse power save mode\n");
527 return -EIO;
528 }
529
530 np = mmc->parent->of_node;
531 if (of_find_property(np, "sunxi-power-save-mode", &len))
532 pwr_save = 1;
533 return __sunxi_mmc_do_oclk_onoff(host, oclk_en, pwr_save, 1);
534 }
sunxi_mmc_clk_set_rate_for_sdmmc_v4p5x(struct sunxi_mmc_host * host,struct mmc_ios * ios)535 int sunxi_mmc_clk_set_rate_for_sdmmc_v4p5x(struct sunxi_mmc_host *host,
536 struct mmc_ios *ios)
537 {
538 u32 mod_clk = 0;
539 u32 src_clk = 0;
540 u32 rval = 0;
541 s32 err = 0;
542 u32 rate = 0;
543 char *sclk_name = NULL;
544 struct clk *mclk = host->clk_mmc;
545 struct clk *sclk = NULL;
546 struct device *dev = mmc_dev(host->mmc);
547 int div = 0;
548
549 if (ios->clock == 0) {
550 __sunxi_mmc_do_oclk_onoff(host, 0, 0, 1);
551 return 0;
552 }
553
554 if ((ios->bus_width == MMC_BUS_WIDTH_8)
555 && (ios->timing == MMC_TIMING_MMC_DDR52)
556 ) {
557 mod_clk = ios->clock << 2;
558 div = 1;
559 } else {
560 mod_clk = ios->clock << 1;
561 div = 0;
562 }
563
564 sclk = clk_get(dev, "osc24m");
565 sclk_name = "osc24m";
566 if (IS_ERR(sclk)) {
567 SM_ERR(mmc_dev(host->mmc), "Error to get source clock %s\n",
568 sclk_name);
569 return -1;
570 }
571
572 src_clk = clk_get_rate(sclk);
573 if (mod_clk > src_clk) {
574 clk_put(sclk);
575 sclk = clk_get(dev, "pll_periph");
576 sclk_name = "pll_periph";
577 }
578 if (IS_ERR(sclk)) {
579 SM_ERR(mmc_dev(host->mmc), "Error to get source clock %s\n",
580 sclk_name);
581 return -1;
582 }
583
584 sunxi_mmc_oclk_onoff(host, 0);
585
586 err = clk_set_parent(mclk, sclk);
587 if (err) {
588 SM_ERR(mmc_dev(host->mmc), "set parent failed\n");
589 clk_put(sclk);
590 return -1;
591 }
592
593 rate = clk_round_rate(mclk, mod_clk);
594
595 SM_DBG(mmc_dev(host->mmc), "get round rate %d\n", rate);
596
597 clk_disable_unprepare(host->clk_mmc);
598
599 err = clk_set_rate(mclk, rate);
600 if (err) {
601 SM_ERR(mmc_dev(host->mmc), "set mclk rate error, rate %dHz\n",
602 rate);
603 clk_put(sclk);
604 return -1;
605 }
606
607 rval = clk_prepare_enable(host->clk_mmc);
608 if (rval) {
609 SM_ERR(mmc_dev(host->mmc), "Enable mmc clk err %d\n", rval);
610 return -1;
611 }
612
613 src_clk = clk_get_rate(sclk);
614 clk_put(sclk);
615
616 SM_DBG(mmc_dev(host->mmc), "set round clock %d, soure clk is %d\n",
617 rate, src_clk);
618
619 #ifdef MMC_FPGA
620 if ((ios->bus_width == MMC_BUS_WIDTH_8)
621 && (ios->timing == MMC_TIMING_MMC_DDR52)
622 ) {
623 /* clear internal divider */
624 rval = mmc_readl(host, REG_CLKCR);
625 rval &= ~0xff;
626 rval |= 1;
627 } else {
628 /* support internal divide clock under fpga environment */
629 rval = mmc_readl(host, REG_CLKCR);
630 rval &= ~0xff;
631 rval |= 24000000 / mod_clk / 2; /* =24M/400K/2=0x1E */
632 }
633 mmc_writel(host, REG_CLKCR, rval);
634 SM_INFO(mmc_dev(host->mmc), "--FPGA REG_CLKCR: 0x%08x\n",
635 mmc_readl(host, REG_CLKCR));
636 #else
637 /* clear internal divider */
638 rval = mmc_readl(host, REG_CLKCR);
639 rval &= ~0xff;
640 rval |= div;
641 mmc_writel(host, REG_CLKCR, rval);
642 #endif
643
644 /*enable 1x mode*/
645 rval = mmc_readl(host, REG_SD_NTSR);
646 rval &= ~(SDXC_2X_TIMING_MODE);
647 mmc_writel(host, REG_SD_NTSR, rval);
648
649 if ((ios->bus_width == MMC_BUS_WIDTH_8)
650 && (ios->timing == MMC_TIMING_MMC_HS400)
651 ) {
652 rval = mmc_readl(host, REG_EDSD);
653 rval |= SDXC_HS400_MD_EN;
654 mmc_writel(host, REG_EDSD, rval);
655 rval = mmc_readl(host, REG_CSDC);
656 rval &= ~SDXC_CRC_DET_PARA_MASK;
657 rval |= SDXC_CRC_DET_PARA_HS400;
658 mmc_writel(host, REG_CSDC, rval);
659 } else {
660 rval = mmc_readl(host, REG_EDSD);
661 rval &= ~SDXC_HS400_MD_EN;
662 mmc_writel(host, REG_EDSD, rval);
663 rval = mmc_readl(host, REG_CSDC);
664 rval &= ~SDXC_CRC_DET_PARA_MASK;
665 rval |= SDXC_CRC_DET_PARA_OTHER;
666 mmc_writel(host, REG_CSDC, rval);
667 }
668 SM_DBG(mmc_dev(host->mmc), "SDXC_REG_EDSD: 0x%08x\n",
669 mmc_readl(host, REG_EDSD));
670 SM_DBG(mmc_dev(host->mmc), "SDXC_REG_CSDC: 0x%08x\n",
671 mmc_readl(host, REG_CSDC));
672
673 /*sunxi_of_parse_clk_dly(host); */
674 if ((ios->bus_width == MMC_BUS_WIDTH_8)
675 && (ios->timing == MMC_TIMING_MMC_DDR52)
676 ) {
677 ios->clock = rate >> 2;
678 } else {
679 ios->clock = rate >> 1;
680 }
681
682 sunxi_mmc_set_clk_dly(host, ios->clock, ios->bus_width, ios->timing);
683
684 return sunxi_mmc_oclk_onoff(host, 1);
685 }
686
sunxi_mmc_thld_ctl_for_sdmmc_v4p5x(struct sunxi_mmc_host * host,struct mmc_ios * ios,struct mmc_data * data)687 void sunxi_mmc_thld_ctl_for_sdmmc_v4p5x(struct sunxi_mmc_host *host,
688 struct mmc_ios *ios,
689 struct mmc_data *data)
690 {
691 u32 bsz = data->blksz;
692 /*unit:byte */
693 u32 tdtl = (host->dma_tl & SDXC_TX_TL_MASK) << 2;
694 /*unit:byte */
695 u32 rdtl = ((host->dma_tl & SDXC_RX_TL_MASK) >> 16) << 2;
696 u32 rval = 0;
697
698 if ((data->flags & MMC_DATA_WRITE)
699 && (bsz <= SDXC_CARD_RD_THLD_SIZE)
700 && (bsz <= tdtl)) {
701 rval = mmc_readl(host, REG_THLD);
702 rval &= ~SDXC_CARD_RD_THLD_MASK;
703 rval |= data->blksz << SDXC_CARD_RD_THLD_SIZE_SHIFT;
704 rval |= SDXC_CARD_WR_THLD_ENB;
705 mmc_writel(host, REG_THLD, rval);
706 } else {
707 rval = mmc_readl(host, REG_THLD);
708 rval &= ~(SDXC_CARD_WR_THLD_ENB);
709 mmc_writel(host, REG_THLD, rval);
710 }
711
712 if ((data->flags & MMC_DATA_READ)
713 && (bsz <= SDXC_CARD_RD_THLD_SIZE)
714 /*((SDXC_FIFO_DETH<<2)-bsz) >= (rdtl) */
715 && ((SDXC_FIFO_DETH << 2) >= (rdtl + bsz))
716 && ((ios->timing == MMC_TIMING_MMC_HS200)
717 || (ios->timing == MMC_TIMING_MMC_HS400))) {
718 rval = mmc_readl(host, REG_THLD);
719 rval &= ~SDXC_CARD_RD_THLD_MASK;
720 rval |= data->blksz << SDXC_CARD_RD_THLD_SIZE_SHIFT;
721 rval |= SDXC_CARD_RD_THLD_ENB;
722 mmc_writel(host, REG_THLD, rval);
723 } else {
724 rval = mmc_readl(host, REG_THLD);
725 rval &= ~SDXC_CARD_RD_THLD_ENB;
726 mmc_writel(host, REG_THLD, rval);
727 }
728
729 SM_DBG(mmc_dev(host->mmc), "--SDXC_REG_THLD: 0x%08x\n",
730 mmc_readl(host, REG_THLD));
731
732 }
733
sunxi_mmc_save_spec_reg_v4p5x(struct sunxi_mmc_host * host)734 void sunxi_mmc_save_spec_reg_v4p5x(struct sunxi_mmc_host *host)
735 {
736 struct sunxi_mmc_spec_regs *spec_regs =
737 &((struct sunxi_mmc_ver_priv *)(host->version_priv_dat))->
738 bak_spec_regs;
739 spec_regs->drv_dl = mmc_readl(host, REG_DRV_DL);
740 spec_regs->samp_dl = mmc_readl(host, REG_SAMP_DL);
741 spec_regs->ds_dl = mmc_readl(host, REG_DS_DL);
742 spec_regs->sd_ntsr = mmc_readl(host, REG_SD_NTSR);
743 spec_regs->edsd = mmc_readl(host, REG_EDSD);
744 spec_regs->csdc = mmc_readl(host, REG_CSDC);
745 }
746
sunxi_mmc_restore_spec_reg_v4p5x(struct sunxi_mmc_host * host)747 void sunxi_mmc_restore_spec_reg_v4p5x(struct sunxi_mmc_host *host)
748 {
749 struct sunxi_mmc_spec_regs *spec_regs =
750 &((struct sunxi_mmc_ver_priv *)(host->version_priv_dat))->
751 bak_spec_regs;
752 sunxi_r_op(host, mmc_writel(host, REG_DRV_DL, spec_regs->drv_dl));
753 mmc_writel(host, REG_SAMP_DL, spec_regs->samp_dl);
754 mmc_writel(host, REG_DS_DL, spec_regs->ds_dl);
755 mmc_writel(host, REG_SD_NTSR, spec_regs->sd_ntsr);
756 mmc_writel(host, REG_EDSD, spec_regs->edsd);
757 mmc_writel(host, REG_CSDC, spec_regs->csdc);
758 }
759
760
sunxi_mmc_opacmd23_v4p9(struct sunxi_mmc_host * host,bool set,u32 arg,u32 * rep)761 bool sunxi_mmc_opacmd23_v4p9(struct sunxi_mmc_host *host, bool set, u32 arg, u32 *rep)
762 {
763 if (set) {
764 mmc_writel(host, REG_A23A, arg);
765 mmc_writel(host, REG_ECMD, mmc_readl(host, REG_ECMD) | SDXC_A23_EN);
766 SM_DBG(mmc_dev(host->mmc), "REG_ECMD %x,REG_A23A %x\n", mmc_readl(host, REG_ECMD), mmc_readl(host, REG_A23A));
767 } else {
768 if (rep)
769 *rep = mmc_readl(host, REG_ERESP);
770 else
771 SM_ERR(mmc_dev(host->mmc), "wrong fun rep point\n");
772 }
773 return set;
774 }
775
sunxi_mmc_set_ds_dl_raw(struct sunxi_mmc_host * host,int sunxi_ds_dl)776 void sunxi_mmc_set_ds_dl_raw(struct sunxi_mmc_host *host,
777 int sunxi_ds_dl)
778 {
779 u32 rval;
780
781 rval = mmc_readl(host, REG_DS_DL);
782 rval &= ~SDXC_DS_DL_SW_MASK;
783 rval |= sunxi_ds_dl & SDXC_DS_DL_SW_MASK;
784 rval |= SDXC_DS_DL_SW_EN;
785 mmc_writel(host, REG_DS_DL, rval);
786
787 SM_INFO(mmc_dev(host->mmc), "RETRY: REG_DS_DL: 0x%08x\n",
788 mmc_readl(host, REG_DS_DL));
789 }
790
sunxi_mmc_set_samp_dl_raw(struct sunxi_mmc_host * host,int sunxi_samp_dl)791 void sunxi_mmc_set_samp_dl_raw(struct sunxi_mmc_host *host,
792 int sunxi_samp_dl)
793 {
794 u32 rval;
795
796 rval = mmc_readl(host, REG_SAMP_DL);
797 rval &= ~SDXC_SAMP_DL_SW_MASK;
798 rval |= sunxi_samp_dl & SDXC_SAMP_DL_SW_MASK;
799 rval |= SDXC_SAMP_DL_SW_EN;
800 mmc_writel(host, REG_SAMP_DL, rval);
801
802 SM_INFO(mmc_dev(host->mmc), "RETRY: REG_SAMP_DL: 0x%08x\n",
803 mmc_readl(host, REG_SAMP_DL));
804 }
805
806 /*#define SUNXI_RETRY_TEST 1*/
807
808 #ifdef SUNXI_RETRY_TEST
809 #define SUNXI_MAX_RETRY_INTERVAL_V4P5X 32
810 #else
811 #define SUNXI_MAX_RETRY_INTERVAL_V4P5X 2
812 #endif
813 #define SUNXI_MAX_DELAY_POINT_V4P5X 64
814 #define SUNXI_MAX_RETRY_CNT_V4P5X (SUNXI_MAX_DELAY_POINT_V4P5X/SUNXI_MAX_RETRY_INTERVAL_V4P5X)
815
sunxi_mmc_judge_retry_v4p6x(struct sunxi_mmc_host * host,struct mmc_command * cmd,u32 rcnt,u32 herrno,void * other)816 static int sunxi_mmc_judge_retry_v4p6x(struct sunxi_mmc_host *host,
817 struct mmc_command *cmd, u32 rcnt,
818 u32 herrno, void *other)
819 {
820 struct mmc_host *mmc = host->mmc;
821 struct mmc_card *card = mmc->card;
822
823 if (mmc->ios.timing == MMC_TIMING_MMC_HS400) {
824 if (rcnt < SUNXI_MAX_RETRY_CNT_V4P5X * 2) {
825 if (herrno) {
826 SM_DBG(mmc_dev(host->mmc), "<error retry>\n");
827 if (herrno & SDXC_INTERRUPT_CMD_ERROR_BIT) {
828 if (host->sunxi_samp_dl_cnt >= SUNXI_MAX_RETRY_CNT_V4P5X)
829 goto TO_HS;
830 sunxi_mmc_set_samp_dl_raw(host, (host->sunxi_samp_dl) % SUNXI_MAX_DELAY_POINT_V4P5X);
831 #ifdef SUNXI_RETRY_TEST
832 sunxi_mmc_set_samp_dl_raw(host, 52);
833 #endif
834 host->sunxi_samp_dl += SUNXI_MAX_RETRY_INTERVAL_V4P5X ;
835 host->sunxi_samp_dl_cnt++;
836 return 0;
837 } else if (host->sunxi_ds_dl_cnt < SUNXI_MAX_RETRY_CNT_V4P5X) {
838 sunxi_mmc_set_ds_dl_raw(host, (host->sunxi_ds_dl) % SUNXI_MAX_DELAY_POINT_V4P5X);
839 #ifdef SUNXI_RETRY_TEST
840 sunxi_mmc_set_ds_dl_raw(host, 55);
841 #endif
842 host->sunxi_ds_dl += SUNXI_MAX_RETRY_INTERVAL_V4P5X;
843 host->sunxi_ds_dl_cnt++;
844 return 0;
845 }
846 } else {
847 SM_DBG(mmc_dev(host->mmc), "<timeout retry>\n");
848 if (host->sunxi_samp_dl_cnt < SUNXI_MAX_RETRY_CNT_V4P5X) {
849 sunxi_mmc_set_samp_dl_raw(host, (host->sunxi_samp_dl) % SUNXI_MAX_DELAY_POINT_V4P5X);
850 #ifdef SUNXI_RETRY_TEST
851 sunxi_mmc_set_samp_dl_raw(host, 52);
852 #endif
853 host->sunxi_samp_dl_cnt++;
854 host->sunxi_samp_dl += SUNXI_MAX_RETRY_INTERVAL_V4P5X;
855 return 0;
856 } else if (host->sunxi_ds_dl_cnt < SUNXI_MAX_RETRY_CNT_V4P5X) {
857 sunxi_mmc_set_ds_dl_raw(host, (host->sunxi_ds_dl) % SUNXI_MAX_DELAY_POINT_V4P5X);
858 #ifdef SUNXI_RETRY_TEST
859 sunxi_mmc_set_ds_dl_raw(host, 55);
860 #endif
861 host->sunxi_ds_dl_cnt++;
862 host->sunxi_ds_dl += SUNXI_MAX_RETRY_INTERVAL_V4P5X;
863 return 0;
864 }
865 }
866 }
867 TO_HS:
868 /* Reset and disabled mmc_avail_type to switch speed mode to HSDDR */
869 SM_INFO(mmc_dev(host->mmc), "sunxi v4p5x/v4p6x retry give up, return to HS\n");
870 card->mmc_avail_type &= ~(EXT_CSD_CARD_TYPE_HS200 | EXT_CSD_CARD_TYPE_HS400
871 | EXT_CSD_CARD_TYPE_HS400ES | EXT_CSD_CARD_TYPE_DDR_52);
872 return -1;
873 } else {
874 if (rcnt < SUNXI_MAX_RETRY_CNT_V4P5X) {
875 sunxi_mmc_set_samp_dl_raw(host, (host->sunxi_samp_dl) % 64);
876 host->sunxi_samp_dl += SUNXI_MAX_RETRY_INTERVAL_V4P5X;
877 return 0;
878 }
879
880 SM_INFO(mmc_dev(host->mmc), "sunxi v4p5x/v4p6x retry give up!\n");
881 return -1;
882 }
883 }
884
sunxi_mmc_hw_wbusy_wait_v4p5x(struct sunxi_mmc_host * host,struct mmc_data * data,bool set)885 static void sunxi_mmc_hw_wbusy_wait_v4p5x(struct sunxi_mmc_host *host, struct mmc_data *data, bool set)
886 {
887 u32 rval = 0;
888 rval = mmc_readl(host, REG_THLD);
889
890 if (data && (data->flags & MMC_DATA_WRITE) && set) {
891 rval |= SDXC_CARD_BCIG_ENB;
892 } else {
893 rval &= ~SDXC_CARD_BCIG_ENB;
894 }
895
896 mmc_writel(host, REG_THLD, rval);
897 SM_DBG(mmc_dev(host->mmc), "SDXC_REG_THLD: 0x%08x\n",
898 mmc_readl(host, REG_THLD));
899 }
900
901
902
sunxi_mmc_init_priv_v4p5x(struct sunxi_mmc_host * host,struct platform_device * pdev,int phy_index)903 void sunxi_mmc_init_priv_v4p5x(struct sunxi_mmc_host *host,
904 struct platform_device *pdev, int phy_index)
905 {
906 struct sunxi_mmc_ver_priv *ver_priv =
907 devm_kzalloc(&pdev->dev, sizeof(struct sunxi_mmc_ver_priv),
908 GFP_KERNEL);
909 host->version_priv_dat = ver_priv;
910
911 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].spm = SM0_DS26_SDR12;
912 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].mod_str = "DS26_SDR12";
913 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].raw_tm_sm_str[0] =
914 "sdc_tm4_sm0_freq0";
915 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].raw_tm_sm_str[1] =
916 "sdc_tm4_sm0_freq1";
917 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].raw_tm_sm[0] = 0;
918 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].raw_tm_sm[1] = 0;
919 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].raw_tm_sm_def[0] = 0;
920 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].raw_tm_sm_def[1] = 0;
921
922 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].spm = SM1_HSSDR52_SDR25;
923 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].mod_str = "HSSDR52_SDR25";
924 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].raw_tm_sm_str[0] =
925 "sdc_tm4_sm1_freq0";
926 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].raw_tm_sm_str[1] =
927 "sdc_tm4_sm1_freq1";
928 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].raw_tm_sm[0] = 0;
929 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].raw_tm_sm[1] = 0;
930 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].raw_tm_sm_def[0] = 0;
931 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].raw_tm_sm_def[1] = 0;
932
933 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].spm = SM2_HSDDR52_DDR50;
934 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].mod_str = "HSDDR52_DDR50";
935 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].raw_tm_sm_str[0] =
936 "sdc_tm4_sm2_freq0";
937 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].raw_tm_sm_str[1] =
938 "sdc_tm4_sm2_freq1";
939 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].raw_tm_sm[0] = 0;
940 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].raw_tm_sm[1] = 0;
941 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].raw_tm_sm_def[0] = 0;
942 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].raw_tm_sm_def[1] = 0;
943
944 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].spm = SM3_HS200_SDR104;
945 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].mod_str = "HS200_SDR104";
946 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].raw_tm_sm_str[0] =
947 "sdc_tm4_sm3_freq0";
948 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].raw_tm_sm_str[1] =
949 "sdc_tm4_sm3_freq1";
950 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].raw_tm_sm[0] = 0;
951 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].raw_tm_sm[1] = 0;
952 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].raw_tm_sm_def[0] = 0;
953 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].raw_tm_sm_def[1] = 0x00000405;
954
955 ver_priv->mmc_clk_dly[SM4_HS400].spm = SM4_HS400;
956 ver_priv->mmc_clk_dly[SM4_HS400].mod_str = "HS400";
957 ver_priv->mmc_clk_dly[SM4_HS400].raw_tm_sm_str[0] = "sdc_tm4_sm4_freq0";
958 ver_priv->mmc_clk_dly[SM4_HS400].raw_tm_sm_str[1] = "sdc_tm4_sm4_freq1";
959 ver_priv->mmc_clk_dly[SM4_HS400].raw_tm_sm[0] = 0;
960 ver_priv->mmc_clk_dly[SM4_HS400].raw_tm_sm[1] = 0x00000608;
961 ver_priv->mmc_clk_dly[SM4_HS400].raw_tm_sm_def[0] = 0;
962 ver_priv->mmc_clk_dly[SM4_HS400].raw_tm_sm_def[1] = 0x00000408;
963
964 ver_priv->mmc_clk_dly[SM4_HS400_CMD].spm = SM4_HS400_CMD;
965 ver_priv->mmc_clk_dly[SM4_HS400_CMD].mod_str = "HS400_cmd";
966 ver_priv->mmc_clk_dly[SM4_HS400_CMD].raw_tm_sm_str[0] = "sdc_tm4_sm4_freq0_cmd";
967 ver_priv->mmc_clk_dly[SM4_HS400_CMD].raw_tm_sm_str[1] = "sdc_tm4_sm4_freq1_cmd";
968 ver_priv->mmc_clk_dly[SM4_HS400_CMD].raw_tm_sm[0] = 0x0;
969 ver_priv->mmc_clk_dly[SM4_HS400_CMD].raw_tm_sm[1] = 0x0;
970 ver_priv->mmc_clk_dly[SM4_HS400_CMD].raw_tm_sm_def[0] = 0x2520ffff;
971 ver_priv->mmc_clk_dly[SM4_HS400_CMD].raw_tm_sm_def[1] = 0xffffff11;
972
973 host->sunxi_mmc_clk_set_rate = sunxi_mmc_clk_set_rate_for_sdmmc_v4p5x;
974 /*host->dma_tl = (0x2<<28)|(7<<16)|248; */
975 host->dma_tl = SUNXI_DMA_TL_SDMMC_V4P5X;
976 /*host->idma_des_size_bits = 15; */
977 host->idma_des_size_bits = SUNXI_DES_SIZE_SDMMC_V4P5X;
978 host->sunxi_mmc_thld_ctl = sunxi_mmc_thld_ctl_for_sdmmc_v4p5x;
979 host->sunxi_mmc_save_spec_reg = sunxi_mmc_save_spec_reg_v4p5x;
980 host->sunxi_mmc_restore_spec_reg = sunxi_mmc_restore_spec_reg_v4p5x;
981 sunxi_mmc_reg_ex_res_inter(host, phy_index);
982 host->sunxi_mmc_set_acmda = sunxi_mmc_set_a12a;
983 host->sunxi_mmc_dump_dly_table = sunxi_mmc_dump_dly2;
984 host->phy_index = phy_index;
985 host->sunxi_mmc_judge_retry = sunxi_mmc_judge_retry_v4p6x;
986 if (mmc_readl(host, REG_SMCV) == SMHC_VERSION_V4P5P1) {
987 host->sfc_dis = true;
988 }
989
990 host->sunxi_mmc_oclk_en = sunxi_mmc_oclk_onoff;
991 }
992 EXPORT_SYMBOL_GPL(sunxi_mmc_init_priv_v4p5x);
993
sunxi_mmc_init_priv_v4p6x(struct sunxi_mmc_host * host,struct platform_device * pdev,int phy_index)994 void sunxi_mmc_init_priv_v4p6x(struct sunxi_mmc_host *host,
995 struct platform_device *pdev, int phy_index)
996 {
997 struct sunxi_mmc_ver_priv *ver_priv =
998 devm_kzalloc(&pdev->dev, sizeof(struct sunxi_mmc_ver_priv),
999 GFP_KERNEL);
1000 host->version_priv_dat = ver_priv;
1001
1002 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].spm = SM0_DS26_SDR12;
1003 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].mod_str = "DS26_SDR12";
1004 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].raw_tm_sm_str[0] =
1005 "sdc_tm4_sm0_freq0";
1006 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].raw_tm_sm_str[1] =
1007 "sdc_tm4_sm0_freq1";
1008 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].raw_tm_sm[0] = 0;
1009 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].raw_tm_sm[1] = 0;
1010 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].raw_tm_sm_def[0] = 0;
1011 ver_priv->mmc_clk_dly[SM0_DS26_SDR12].raw_tm_sm_def[1] = 0;
1012
1013 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].spm = SM1_HSSDR52_SDR25;
1014 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].mod_str = "HSSDR52_SDR25";
1015 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].raw_tm_sm_str[0] =
1016 "sdc_tm4_sm1_freq0";
1017 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].raw_tm_sm_str[1] =
1018 "sdc_tm4_sm1_freq1";
1019 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].raw_tm_sm[0] = 0;
1020 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].raw_tm_sm[1] = 0;
1021 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].raw_tm_sm_def[0] = 0;
1022 ver_priv->mmc_clk_dly[SM1_HSSDR52_SDR25].raw_tm_sm_def[1] = 0;
1023
1024 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].spm = SM2_HSDDR52_DDR50;
1025 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].mod_str = "HSDDR52_DDR50";
1026 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].raw_tm_sm_str[0] =
1027 "sdc_tm4_sm2_freq0";
1028 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].raw_tm_sm_str[1] =
1029 "sdc_tm4_sm2_freq1";
1030 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].raw_tm_sm[0] = 0;
1031 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].raw_tm_sm[1] = 0;
1032 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].raw_tm_sm_def[0] = 0;
1033 ver_priv->mmc_clk_dly[SM2_HSDDR52_DDR50].raw_tm_sm_def[1] = 0;
1034
1035 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].spm = SM3_HS200_SDR104;
1036 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].mod_str = "HS200_SDR104";
1037 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].raw_tm_sm_str[0] =
1038 "sdc_tm4_sm3_freq0";
1039 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].raw_tm_sm_str[1] =
1040 "sdc_tm4_sm3_freq1";
1041 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].raw_tm_sm[0] = 0;
1042 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].raw_tm_sm[1] = 0;
1043 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].raw_tm_sm_def[0] = 0;
1044 ver_priv->mmc_clk_dly[SM3_HS200_SDR104].raw_tm_sm_def[1] = 0x00000405;
1045
1046 ver_priv->mmc_clk_dly[SM4_HS400].spm = SM4_HS400;
1047 ver_priv->mmc_clk_dly[SM4_HS400].mod_str = "HS400";
1048 ver_priv->mmc_clk_dly[SM4_HS400].raw_tm_sm_str[0] = "sdc_tm4_sm4_freq0";
1049 ver_priv->mmc_clk_dly[SM4_HS400].raw_tm_sm_str[1] = "sdc_tm4_sm4_freq1";
1050 ver_priv->mmc_clk_dly[SM4_HS400].raw_tm_sm[0] = 0;
1051 ver_priv->mmc_clk_dly[SM4_HS400].raw_tm_sm[1] = 0x00000608;
1052 ver_priv->mmc_clk_dly[SM4_HS400].raw_tm_sm_def[0] = 0;
1053 ver_priv->mmc_clk_dly[SM4_HS400].raw_tm_sm_def[1] = 0x00000408;
1054
1055 ver_priv->mmc_clk_dly[SM4_HS400_CMD].spm = SM4_HS400_CMD;
1056 ver_priv->mmc_clk_dly[SM4_HS400_CMD].mod_str = "HS400_cmd";
1057 ver_priv->mmc_clk_dly[SM4_HS400_CMD].raw_tm_sm_str[0] = "sdc_tm4_sm4_freq0_cmd";
1058 ver_priv->mmc_clk_dly[SM4_HS400_CMD].raw_tm_sm_str[1] = "sdc_tm4_sm4_freq1_cmd";
1059 ver_priv->mmc_clk_dly[SM4_HS400_CMD].raw_tm_sm[0] = 0x0;
1060 ver_priv->mmc_clk_dly[SM4_HS400_CMD].raw_tm_sm[1] = 0x0;
1061 ver_priv->mmc_clk_dly[SM4_HS400_CMD].raw_tm_sm_def[0] = 0x2520ffff;
1062 ver_priv->mmc_clk_dly[SM4_HS400_CMD].raw_tm_sm_def[1] = 0xffffff11;
1063
1064 host->sunxi_mmc_clk_set_rate = sunxi_mmc_clk_set_rate_for_sdmmc_v4p5x;
1065 /*host->dma_tl = (0x2<<28)|(7<<16)|248; */
1066 host->dma_tl = SUNXI_DMA_TL_SDMMC_V4P5X;
1067 /*host->idma_des_size_bits = 15; */
1068 host->idma_des_size_bits = SUNXI_DES_SIZE_SDMMC_V4P5X;
1069 host->sunxi_mmc_thld_ctl = sunxi_mmc_thld_ctl_for_sdmmc_v4p5x;
1070 host->sunxi_mmc_save_spec_reg = sunxi_mmc_save_spec_reg_v4p5x;
1071 host->sunxi_mmc_restore_spec_reg = sunxi_mmc_restore_spec_reg_v4p5x;
1072 sunxi_mmc_reg_ex_res_inter(host, phy_index);
1073 host->sunxi_mmc_set_acmda = sunxi_mmc_set_a12a;
1074 host->sunxi_mmc_dump_dly_table = sunxi_mmc_dump_dly2;
1075 host->phy_index = phy_index;
1076 host->sunxi_mmc_judge_retry = sunxi_mmc_judge_retry_v4p6x;
1077 host->sunxi_mmc_hw_wbusy_wait = sunxi_mmc_hw_wbusy_wait_v4p5x;
1078
1079 if (mmc_readl(host, REG_SMCV) >= SMHC_VERSION_V4P7)
1080 host->sunxi_mmc_on_off_emce = sunxi_mmc_on_off_emce_v4p6x;
1081 if (mmc_readl(host, REG_SMCV) >= SMHC_VERSION_V4P9) {
1082 host->sunxi_mmc_opacmd23 = sunxi_mmc_opacmd23_v4p9;
1083 }
1084 if (mmc_readl(host, REG_SMCV) == SMHC_VERSION_V4P9) {
1085 host->sfc_dis = true;
1086 }
1087 if (mmc_readl(host, REG_SMCV) == SMHC_VERSION_V4P5P2) {
1088 host->des_addr_shift = 2;
1089 }
1090
1091 if (mmc_readl(host, REG_SMCV) >= SMHC_VERSION_V5P3) {
1092 host->des_addr_shift = 2;
1093 }
1094
1095
1096
1097 host->sunxi_mmc_oclk_en = sunxi_mmc_oclk_onoff;
1098 }
1099 EXPORT_SYMBOL_GPL(sunxi_mmc_init_priv_v4p6x);
1100