1 /*
2 * Sunxi SD/MMC host driver
3 *
4 * Copyright (C) 2015 AllWinnertech Ltd.
5 * Author: lixiang <lixiang@allwinnertech>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17
18 #include <linux/clk.h>
19 #include <linux/reset/sunxi.h>
20
21 #include <linux/gpio.h>
22 #include <linux/platform_device.h>
23 #include <linux/spinlock.h>
24 #include <linux/scatterlist.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/slab.h>
27 #include <linux/reset.h>
28
29 #include <linux/of_address.h>
30 #include <linux/of_gpio.h>
31 #include <linux/of_platform.h>
32
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/sd.h>
35 #include <linux/mmc/sdio.h>
36 #include <linux/mmc/mmc.h>
37 #include <linux/mmc/core.h>
38 #include <linux/mmc/card.h>
39 #include <linux/mmc/slot-gpio.h>
40
41 #include "sunxi-mmc.h"
42 #include "sunxi-mmc-v5p3x.h"
43 #include "sunxi-mmc-export.h"
44 #include "sunxi-mmc-debug.h"
45
46 #define SUNXI_RETRY_CNT_PER_PHA_V5P3X 3
47
48 /*dma triger level setting*/
49 #define SUNXI_DMA_TL_SDMMC_V5P3X ((0x2<<28)|(7<<16)|248)
50 /*one dma des can transfer data size = 1<<SUNXI_DES_SIZE_SDMMC*/
51
52 #define SUNXI_DES_SIZE_SDMMC_V5P3X (12)
53
54 /*reg*/
55 /*SMHC eMMC4.5 DDR Start Bit Detection Control Register */
56 /*SMHC CRC Status Detect Control Register */
57 /*SMHC Card Threshold Control Register */
58 /*SMHC Drive Delay Control Register */
59 /*SMHC Sample Delay Control Register */
60 /*SMHC Data Strobe Delay Control Register */
61 /*SMHC NewTiming Set Register */
62 /*SMHC Version Register */
63 /*SMHC HS400 New Timing Delay Control Register*/
64 #define SDXC_REG_EDSD (0x010C)
65 #define SDXC_REG_CSDC (0x0054)
66 #define SDXC_REG_THLD (0x0100)
67 #define SDXC_REG_DRV_DL (0x0140)
68 #define SDXC_REG_SAMP_DL (0x0144)
69 #define SDXC_REG_DS_DL (0x0148)
70 #define SDXC_REG_SD_NTSR (0x005C)
71 #define SDXC_REG_SMCV (0x300) /*SMHC Version Register */
72 #define SDXC_REG_NTDL_HS400 (0x800)
73
74 #define SDXC_SFC_BP BIT(0)
75 /*bit*/
76 #define SDXC_HS400_MD_EN (1U<<31)
77 #define SDXC_CARD_WR_THLD_ENB (1U<<2)
78 #define SDXC_CARD_RD_THLD_ENB (1U)
79
80 #define SDXC_DAT_DRV_PH_SEL (1U<<17)
81 #define SDXC_CMD_DRV_PH_SEL (1U<<16)
82 #define SDXC_SAMP_DL_SW_EN (1u<<7)
83 #define SDXC_DS_DL_SW_EN (1u<<7)
84
85 #define SDXC_2X_TIMING_MODE (1U<<31)
86 #define SDXC_HS400_NEW_SAMPLE_EN (1U<<0)
87
88 /*mask*/
89 #define SDXC_CRC_DET_PARA_MASK (0xf)
90 #define SDXC_CARD_RD_THLD_MASK (0x0FFF0000)
91 #define SDXC_TX_TL_MASK (0xff)
92 #define SDXC_RX_TL_MASK (0x00FF0000)
93
94 #define SDXC_HS400_SAMP_DL_SW_MASK (0x0000000F)
95 #define SDXC_SAMP_DL_SW_MASK (0x0000003F)
96 #define SDXC_DS_DL_SW_MASK (0x0000003F)
97
98 #define SDXC_STIMING_CMD_PH_MASK (0x00000030)
99 #define SDXC_STIMING_DAT_PH_MASK (0x00000300)
100
101 /*value*/
102 #define SDXC_CRC_DET_PARA_HS400 (6)
103 #define SDXC_CRC_DET_PARA_OTHER (3)
104 #define SDXC_FIFO_DETH (1024>>2)
105
106 /*size*/
107 #define SDXC_CARD_RD_THLD_SIZE (0x00000FFF)
108
109 /*shit*/
110 #define SDXC_CARD_RD_THLD_SIZE_SHIFT (16)
111
112 #define SDXC_STIMING_CMD_PH_SHIFT (4)
113 #define SDXC_STIMING_DAT_PH_SHIFT (8)
114
115 /*Sunxi MMC Host Controller Version*/
116 #define SMHC_VERSION_V5P3 0x50300
117
118
119 enum sunxi_mmc_clk_mode {
120 mmc_clk_400k = 0,
121 mmc_clk_26M,
122 mmc_clk_52M,
123 mmc_clk_52M_DDR4,
124 mmc_clk_52M_DDR8,
125 mmc_clk_104M,
126 mmc_clk_208M,
127 mmc_clk_104M_DDR,
128 mmc_clk_208M_DDR,
129 mmc_clk_mod_num,
130 };
131
132 struct sunxi_mmc_clk_dly {
133 char *mod_str;
134 /*only used for 2X mode*/
135 enum sunxi_mmc_clk_mode cmod;
136 u32 cmd_drv_ph;
137 u32 dat_drv_ph;
138 u32 sam_dly;
139 u32 ds_dly;
140 u32 sam_ph_dat;
141 u32 sam_ph_cmd;
142 };
143
144 struct sunxi_mmc_spec_regs {
145 u32 drv_dl; /*REG_DRV_DL */
146 u32 samp_dl; /*REG_SAMP_DL */
147 u32 ds_dl; /*REG_DS_DL */
148 u32 sd_ntsr; /*REG_SD_NTSR */
149 };
150
151 struct sunxi_mmc_ver_priv {
152 struct sunxi_mmc_spec_regs bak_spec_regs;
153 struct sunxi_mmc_clk_dly mmc_clk_dly[mmc_clk_mod_num];
154 };
155
sunxi_mmc_set_clk_dly(struct sunxi_mmc_host * host,int clk,int bus_width,int timing)156 static void sunxi_mmc_set_clk_dly(struct sunxi_mmc_host *host, int clk,
157 int bus_width, int timing)
158 {
159 struct mmc_host *mhost = host->mmc;
160 u32 rval = 0;
161 enum sunxi_mmc_clk_mode cmod = mmc_clk_400k;
162 u32 in_clk_dly[6] = { 0 };
163 int ret = 0;
164 struct device_node *np = NULL;
165 struct sunxi_mmc_clk_dly *mmc_clk_dly =
166 ((struct sunxi_mmc_ver_priv *)host->version_priv_dat)->mmc_clk_dly;
167
168 if (!mhost->parent || !mhost->parent->of_node) {
169 SM_ERR(mmc_dev(host->mmc),
170 "no dts to parse clk dly,use default\n");
171 return;
172 }
173
174 np = mhost->parent->of_node;
175
176 if (clk <= 400 * 1000) {
177 cmod = mmc_clk_400k;
178 } else if (clk <= 26 * 1000 * 1000) {
179 cmod = mmc_clk_26M;
180 } else if (clk <= 52 * 1000 * 1000) {
181 if ((bus_width == MMC_BUS_WIDTH_4)
182 && sunxi_mmc_ddr_timing(timing)) {
183 cmod = mmc_clk_52M_DDR4;
184 } else if ((bus_width == MMC_BUS_WIDTH_8)
185 && (timing == MMC_TIMING_MMC_DDR52)) {
186 cmod = mmc_clk_52M_DDR8;
187 } else {
188 cmod = mmc_clk_52M;
189 }
190 } else if (clk <= 104 * 1000 * 1000) {
191 if ((bus_width == MMC_BUS_WIDTH_8)
192 && (timing == MMC_TIMING_MMC_HS400)) {
193 cmod = mmc_clk_104M_DDR;
194 } else {
195 cmod = mmc_clk_104M;
196 }
197 } else if (clk <= 208 * 1000 * 1000) {
198 if ((bus_width == MMC_BUS_WIDTH_8)
199 && (timing == MMC_TIMING_MMC_HS400)) {
200 cmod = mmc_clk_208M_DDR;
201 } else {
202 cmod = mmc_clk_208M;
203 }
204 } else {
205 SM_ERR(mmc_dev(mhost), "clk %d is out of range\n", clk);
206 return;
207 }
208
209 ret = of_property_read_u32_array(np, mmc_clk_dly[cmod].mod_str,
210 in_clk_dly, ARRAY_SIZE(in_clk_dly));
211 if (ret) {
212 SM_DBG(mmc_dev(host->mmc), "failed to get %s used default\n",
213 mmc_clk_dly[cmod].mod_str);
214 } else {
215 mmc_clk_dly[cmod].cmd_drv_ph = in_clk_dly[0];
216 mmc_clk_dly[cmod].dat_drv_ph = in_clk_dly[1];
217 /*mmc_clk_dly[cmod].sam_dly = in_clk_dly[2]; */
218 /*mmc_clk_dly[cmod].ds_dly = in_clk_dly[3]; */
219 mmc_clk_dly[cmod].sam_ph_dat = in_clk_dly[4];
220 mmc_clk_dly[cmod].sam_ph_cmd = in_clk_dly[5];
221 SM_DBG(mmc_dev(host->mmc), "Get %s clk dly ok\n",
222 mmc_clk_dly[cmod].mod_str);
223
224 }
225
226 SM_DBG(mmc_dev(host->mmc), "Try set %s clk dly ok\n",
227 mmc_clk_dly[cmod].mod_str);
228 SM_DBG(mmc_dev(host->mmc), "cmd_drv_ph %d\n",
229 mmc_clk_dly[cmod].cmd_drv_ph);
230 SM_DBG(mmc_dev(host->mmc), "dat_drv_ph %d\n",
231 mmc_clk_dly[cmod].dat_drv_ph);
232 SM_DBG(mmc_dev(host->mmc), "sam_ph_dat %d\n",
233 mmc_clk_dly[cmod].sam_ph_dat);
234 SM_DBG(mmc_dev(host->mmc), "sam_ph_cmd %d\n",
235 mmc_clk_dly[cmod].sam_ph_cmd);
236
237 rval = mmc_readl(host, REG_DRV_DL);
238 if (mmc_clk_dly[cmod].cmd_drv_ph)
239 rval |= SDXC_CMD_DRV_PH_SEL; /*180 phase */
240 else
241 rval &= ~SDXC_CMD_DRV_PH_SEL; /*90 phase */
242
243 if (mmc_clk_dly[cmod].dat_drv_ph)
244 rval |= SDXC_DAT_DRV_PH_SEL; /*180 phase */
245 else
246 rval &= ~SDXC_DAT_DRV_PH_SEL; /*90 phase */
247
248 sunxi_r_op(host, mmc_writel(host, REG_DRV_DL, rval));
249
250 /*
251 * rval = mmc_readl(host,REG_SAMP_DL);
252 * rval &= ~SDXC_SAMP_DL_SW_MASK;
253 * rval |= mmc_clk_dly[cmod].sam_dly & SDXC_SAMP_DL_SW_MASK;
254 * rval |= SDXC_SAMP_DL_SW_EN;
255 * mmc_writel(host,REG_SAMP_DL,rval);
256 *
257 * rval = mmc_readl(host,REG_DS_DL);
258 * rval &= ~SDXC_DS_DL_SW_MASK;
259 * rval |= mmc_clk_dly[cmod].ds_dly & SDXC_DS_DL_SW_MASK;
260 * rval |= SDXC_DS_DL_SW_EN;
261 * mmc_writel(host,REG_DS_DL,rval);
262 */
263
264 rval = mmc_readl(host, REG_SD_NTSR);
265 rval &= ~SDXC_STIMING_DAT_PH_MASK;
266 rval |=
267 (mmc_clk_dly[cmod].
268 sam_ph_dat << SDXC_STIMING_DAT_PH_SHIFT) &
269 SDXC_STIMING_DAT_PH_MASK;
270 mmc_writel(host, REG_SD_NTSR, rval);
271
272 rval = mmc_readl(host, REG_SD_NTSR);
273 rval &= ~SDXC_STIMING_CMD_PH_MASK;
274 rval |=
275 (mmc_clk_dly[cmod].
276 sam_ph_cmd << SDXC_STIMING_CMD_PH_SHIFT) &
277 SDXC_STIMING_CMD_PH_MASK;
278 mmc_writel(host, REG_SD_NTSR, rval);
279
280 SM_DBG(mmc_dev(host->mmc), " REG_DRV_DL %08x\n",
281 mmc_readl(host, REG_DRV_DL));
282 SM_DBG(mmc_dev(host->mmc), " REG_SAMP_DL %08x\n",
283 mmc_readl(host, REG_SAMP_DL));
284 SM_DBG(mmc_dev(host->mmc), " REG_DS_DL %08x\n",
285 mmc_readl(host, REG_DS_DL));
286 SM_DBG(mmc_dev(host->mmc), " REG_SD_NTSR %08x\n",
287 mmc_readl(host, REG_SD_NTSR));
288
289 }
290
__sunxi_mmc_do_oclk_onoff(struct sunxi_mmc_host * host,u32 oclk_en,u32 pwr_save,u32 ignore_dat0)291 static int __sunxi_mmc_do_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en,
292 u32 pwr_save, u32 ignore_dat0)
293 {
294 unsigned long expire = jiffies + msecs_to_jiffies(250);
295 u32 rval;
296
297 rval = mmc_readl(host, REG_CLKCR);
298 rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON | SDXC_MASK_DATA0);
299
300 if (oclk_en)
301 rval |= SDXC_CARD_CLOCK_ON;
302 if (pwr_save && host->voltage_switching == 0)
303 rval |= SDXC_LOW_POWER_ON;
304 if (ignore_dat0)
305 rval |= SDXC_MASK_DATA0;
306
307 mmc_writel(host, REG_CLKCR, rval);
308
309 SM_DBG(mmc_dev(host->mmc), "%s REG_CLKCR:%x\n", __func__,
310 mmc_readl(host, REG_CLKCR));
311
312 if (host->voltage_switching == 1) {
313 rval = SDXC_START | SDXC_UPCLK_ONLY | SDXC_WAIT_PRE_OVER | SDXC_VOLTAGE_SWITCH;
314 } else {
315 rval = SDXC_START | SDXC_UPCLK_ONLY | SDXC_WAIT_PRE_OVER;
316 }
317 mmc_writel(host, REG_CMDR, rval);
318
319 do {
320 rval = mmc_readl(host, REG_CMDR);
321 } while (time_before(jiffies, expire) && (rval & SDXC_START));
322
323 /* clear irq status bits set by the command */
324 mmc_writel(host, REG_RINTR,
325 mmc_readl(host, REG_RINTR) & ~SDXC_SDIO_INTERRUPT);
326
327 if (rval & SDXC_START) {
328 SM_ERR(mmc_dev(host->mmc), "fatal err update clk timeout\n");
329 return -EIO;
330 }
331
332 /*only use mask data0 when update clk,clear it when not update clk */
333 if (ignore_dat0)
334 mmc_writel(host, REG_CLKCR,
335 mmc_readl(host, REG_CLKCR) & ~SDXC_MASK_DATA0);
336
337 return 0;
338 }
339
sunxi_mmc_oclk_onoff(struct sunxi_mmc_host * host,u32 oclk_en)340 static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
341 {
342 struct device_node *np = NULL;
343 struct mmc_host *mmc = host->mmc;
344 int pwr_save = 0;
345 int len = 0;
346
347 if (!mmc->parent || !mmc->parent->of_node) {
348 SM_ERR(mmc_dev(host->mmc),
349 "no dts to parse power save mode\n");
350 return -EIO;
351 }
352
353 np = mmc->parent->of_node;
354 if (of_find_property(np, "sunxi-power-save-mode", &len))
355 pwr_save = 1;
356 return __sunxi_mmc_do_oclk_onoff(host, oclk_en, pwr_save, 1);
357 }
358
sunxi_mmc_2xmod_onoff(struct sunxi_mmc_host * host,u32 newmode_en)359 static void sunxi_mmc_2xmod_onoff(struct sunxi_mmc_host *host, u32 newmode_en)
360 {
361 u32 rval = mmc_readl(host, REG_SD_NTSR);
362
363 if (newmode_en)
364 rval |= SDXC_2X_TIMING_MODE;
365 else
366 rval &= ~SDXC_2X_TIMING_MODE;
367
368 mmc_writel(host, REG_SD_NTSR, rval);
369
370 SM_DBG(mmc_dev(host->mmc), "REG_SD_NTSR: 0x%08x ,val %x\n",
371 mmc_readl(host, REG_SD_NTSR), rval);
372 }
373
sunxi_mmc_clk_set_rate_for_sdmmc_v5p3x(struct sunxi_mmc_host * host,struct mmc_ios * ios)374 static int sunxi_mmc_clk_set_rate_for_sdmmc_v5p3x(struct sunxi_mmc_host *host,
375 struct mmc_ios *ios)
376 {
377 u32 mod_clk = 0;
378 u32 src_clk = 0;
379 u32 rval = 0;
380 s32 err = 0;
381 u32 rate = 0;
382 char *sclk_name = NULL;
383 struct clk *mclk = host->clk_mmc;
384 struct clk *sclk = NULL;
385 struct device *dev = mmc_dev(host->mmc);
386 int div = 0;
387 #ifdef MMC_FPGA
388 void __iomem *ctl_2x_en = ioremap(0x03000024, 0x4);
389 #endif
390 if (ios->clock == 0) {
391 __sunxi_mmc_do_oclk_onoff(host, 0, 0, 1);
392 return 0;
393 }
394 if (sunxi_mmc_ddr_timing(ios->timing) || (ios->timing == MMC_TIMING_MMC_HS400)) {
395 mod_clk = ios->clock << 2;
396 div = 1;
397 } else {
398 mod_clk = ios->clock << 1;
399 div = 0;
400 }
401 sclk = clk_get(dev, "osc24m");
402 sclk_name = "osc24m";
403 if (IS_ERR(sclk)) {
404 SM_ERR(mmc_dev(host->mmc), "Error to get source clock %s\n",
405 sclk_name);
406 return -1;
407 }
408
409 src_clk = clk_get_rate(sclk);
410 if (mod_clk > src_clk) {
411 clk_put(sclk);
412 sclk = clk_get(dev, "pll_periph");
413 sclk_name = "pll_periph";
414 }
415 if (IS_ERR(sclk)) {
416 SM_ERR(mmc_dev(host->mmc), "Error to get source clock %s\n",
417 sclk_name);
418 return -1;
419 }
420
421 sunxi_mmc_oclk_onoff(host, 0);
422
423 err = clk_set_parent(mclk, sclk);
424 if (err) {
425 SM_ERR(mmc_dev(host->mmc), "set parent failed\n");
426 clk_put(sclk);
427 return -1;
428 }
429
430 rate = clk_round_rate(mclk, mod_clk);
431
432 SM_DBG(mmc_dev(host->mmc), "get round rate %d\n", rate);
433
434 clk_disable_unprepare(host->clk_mmc);
435
436 err = clk_set_rate(mclk, rate);
437 if (err) {
438 SM_ERR(mmc_dev(host->mmc), "set mclk rate error, rate %dHz\n",
439 rate);
440 clk_put(sclk);
441 return -1;
442 }
443
444 rval = clk_prepare_enable(host->clk_mmc);
445 if (rval) {
446 SM_ERR(mmc_dev(host->mmc), "Enable mmc clk err %d\n", rval);
447 return -1;
448 }
449
450 src_clk = clk_get_rate(sclk);
451 clk_put(sclk);
452
453 SM_DBG(mmc_dev(host->mmc), "set round clock %d, soure clk is %d\n",
454 rate, src_clk);
455
456 #ifdef MMC_FPGA
457 if (sunxi_mmc_ddr_timing(ios->timing)) {
458 /* clear internal divider */
459 rval = mmc_readl(host, REG_CLKCR);
460 rval &= ~0xff;
461 rval |= 1;
462 }
463 mmc_writel(host, REG_CLKCR, rval);
464
465 rval = mmc_readl(host, REG_DRV_DL);
466 if (ios->clock > 400 * 1000) {
467 rval |= (1 << 7);
468 mmc_writel(host, REG_DRV_DL, rval);
469 } else {
470 if (sunxi_mmc_ddr_timing(ios->timing))
471 SM_INFO(mmc_dev(host->mmc), "Warning: is 400KHz DDR mode");
472 rval &= ~(1 << 7);
473 sunxi_r_op(host, mmc_writel(host, REG_DRV_DL, rval));
474 }
475 SM_INFO(mmc_dev(host->mmc), "FPGA REG_CLKCR: 0x%08x\n",
476 mmc_readl(host, REG_CLKCR));
477 #else
478 /* clear internal divider */
479 rval = mmc_readl(host, REG_CLKCR);
480 rval &= ~0xff;
481 rval |= div;
482 mmc_writel(host, REG_CLKCR, rval);
483 #endif
484 #ifdef MMC_FPGA
485 /* Because V7 FPGA board only support 1xmode, use 0x03000024 Bit3 to control 1xmode or 2xmode ,*/
486 rval = readl(ctl_2x_en);
487 /*sunxi_of_parse_clk_dly(host); */
488 if (rval & (0x1 << 3))
489 sunxi_mmc_2xmod_onoff(host, 1);
490 else
491 sunxi_mmc_2xmod_onoff(host, 0);
492
493 iounmap(ctl_2x_en);
494 #else
495 sunxi_mmc_2xmod_onoff(host, 1);
496 #endif
497 if ((ios->bus_width == MMC_BUS_WIDTH_8)
498 && (ios->timing == MMC_TIMING_MMC_HS400)
499 ) {
500 rval = mmc_readl(host, REG_EDSD);
501 rval |= SDXC_HS400_MD_EN;
502 mmc_writel(host, REG_EDSD, rval);
503 rval = mmc_readl(host, REG_CSDC);
504 rval &= ~SDXC_CRC_DET_PARA_MASK;
505 rval |= SDXC_CRC_DET_PARA_HS400;
506 mmc_writel(host, REG_CSDC, rval);
507
508 rval = mmc_readl(host, REG_SD_NTSR);
509 rval |= SDXC_HS400_NEW_SAMPLE_EN;
510 mmc_writel(host, REG_SD_NTSR, rval);
511 } else {
512 rval = mmc_readl(host, REG_EDSD);
513 rval &= ~SDXC_HS400_MD_EN;
514 mmc_writel(host, REG_EDSD, rval);
515 rval = mmc_readl(host, REG_CSDC);
516 rval &= ~SDXC_CRC_DET_PARA_MASK;
517 rval |= SDXC_CRC_DET_PARA_OTHER;
518 mmc_writel(host, REG_CSDC, rval);
519 }
520 SM_DBG(mmc_dev(host->mmc), "SDXC_REG_EDSD: 0x%08x\n",
521 mmc_readl(host, REG_EDSD));
522 SM_DBG(mmc_dev(host->mmc), "SDXC_REG_CSDC: 0x%08x\n",
523 mmc_readl(host, REG_CSDC));
524
525 if (sunxi_mmc_ddr_timing(ios->timing) || (ios->timing == MMC_TIMING_MMC_HS400))
526 ios->clock = rate >> 2;
527 else
528 ios->clock = rate >> 1;
529
530 sunxi_mmc_set_clk_dly(host, ios->clock, ios->bus_width, ios->timing);
531
532 return sunxi_mmc_oclk_onoff(host, 1);
533 }
534
sunxi_mmc_thld_ctl_for_sdmmc_v5p3x(struct sunxi_mmc_host * host,struct mmc_ios * ios,struct mmc_data * data)535 static void sunxi_mmc_thld_ctl_for_sdmmc_v5p3x(struct sunxi_mmc_host *host,
536 struct mmc_ios *ios,
537 struct mmc_data *data)
538 {
539 u32 bsz = data->blksz;
540 /*unit:byte */
541 /*u32 tdtl = (host->dma_tl & SDXC_TX_TL_MASK)<<2;*/
542 /*unit:byte */
543 u32 rdtl = ((host->dma_tl & SDXC_RX_TL_MASK) >> 16) << 2;
544 u32 rval = 0;
545
546 if ((data->flags & MMC_DATA_READ)
547 && (bsz <= SDXC_CARD_RD_THLD_SIZE)
548 /*((SDXC_FIFO_DETH<<2)-bsz) >= (rdtl) */
549 && ((SDXC_FIFO_DETH << 2) >= (rdtl + bsz))
550 && ((ios->timing == MMC_TIMING_MMC_HS200)
551 || (ios->timing == MMC_TIMING_UHS_SDR50)
552 || (ios->timing == MMC_TIMING_UHS_SDR104))) {
553 rval = mmc_readl(host, REG_THLD);
554 rval &= ~SDXC_CARD_RD_THLD_MASK;
555 rval |= data->blksz << SDXC_CARD_RD_THLD_SIZE_SHIFT;
556 rval |= SDXC_CARD_RD_THLD_ENB;
557 mmc_writel(host, REG_THLD, rval);
558 } else {
559 rval = mmc_readl(host, REG_THLD);
560 rval &= ~SDXC_CARD_RD_THLD_ENB;
561 mmc_writel(host, REG_THLD, rval);
562 }
563
564 SM_DBG(mmc_dev(host->mmc), "SDXC_REG_THLD: 0x%08x\n",
565 mmc_readl(host, REG_THLD));
566
567 }
568
sunxi_mmc_save_spec_reg_v5p3x(struct sunxi_mmc_host * host)569 static void sunxi_mmc_save_spec_reg_v5p3x(struct sunxi_mmc_host *host)
570 {
571 struct sunxi_mmc_spec_regs *spec_regs =
572 &((struct sunxi_mmc_ver_priv *)(host->version_priv_dat))->
573 bak_spec_regs;
574 spec_regs->drv_dl = mmc_readl(host, REG_DRV_DL);
575 spec_regs->samp_dl = mmc_readl(host, REG_SAMP_DL);
576 spec_regs->ds_dl = mmc_readl(host, REG_DS_DL);
577 spec_regs->sd_ntsr = mmc_readl(host, REG_SD_NTSR);
578 }
579
sunxi_mmc_restore_spec_reg_v5p3x(struct sunxi_mmc_host * host)580 static void sunxi_mmc_restore_spec_reg_v5p3x(struct sunxi_mmc_host *host)
581 {
582 struct sunxi_mmc_spec_regs *spec_regs =
583 &((struct sunxi_mmc_ver_priv *)(host->version_priv_dat))->
584 bak_spec_regs;
585 sunxi_r_op(host, mmc_writel(host, REG_DRV_DL, spec_regs->drv_dl));
586 mmc_writel(host, REG_SAMP_DL, spec_regs->samp_dl);
587 mmc_writel(host, REG_DS_DL, spec_regs->ds_dl);
588 mmc_writel(host, REG_SD_NTSR, spec_regs->sd_ntsr);
589 }
590
sunxi_mmc_set_dly_raw(struct sunxi_mmc_host * host,s32 opha_cmd,s32 ipha_cmd,s32 opha_dat,s32 ipha_dat,s32 samp_dl)591 static inline void sunxi_mmc_set_dly_raw(struct sunxi_mmc_host *host,
592 s32 opha_cmd, s32 ipha_cmd,
593 s32 opha_dat, s32 ipha_dat,
594 s32 samp_dl)
595 {
596 struct mmc_host *mmc = host->mmc;
597 u32 rval = mmc_readl(host, REG_DRV_DL);
598
599 if (opha_cmd > 0)
600 rval |= SDXC_CMD_DRV_PH_SEL; /*180 phase */
601 else if (opha_cmd == 0)
602 rval &= ~SDXC_CMD_DRV_PH_SEL; /*90 phase */
603
604 if (opha_dat > 0)
605 rval |= SDXC_DAT_DRV_PH_SEL; /*180 phase */
606 else if (opha_dat == 0)
607 rval &= ~SDXC_DAT_DRV_PH_SEL; /*90 phase */
608
609 sunxi_r_op(host, mmc_writel(host, REG_DRV_DL, rval));
610
611 rval = mmc_readl(host, REG_SD_NTSR);
612
613 if (ipha_cmd >= 0) {
614 rval &= ~SDXC_STIMING_CMD_PH_MASK;
615 rval |=
616 (ipha_cmd << SDXC_STIMING_CMD_PH_SHIFT) &
617 SDXC_STIMING_CMD_PH_MASK;
618 }
619
620 if (ipha_dat >= 0) {
621 rval &= ~SDXC_STIMING_DAT_PH_MASK;
622 rval |=
623 (ipha_dat << SDXC_STIMING_DAT_PH_SHIFT) &
624 SDXC_STIMING_DAT_PH_MASK;
625 }
626
627 rval &= ~SDXC_2X_TIMING_MODE;
628 mmc_writel(host, REG_SD_NTSR, rval);
629
630 /*select the 2x mode*/
631 rval |= SDXC_2X_TIMING_MODE;
632 mmc_writel(host, REG_SD_NTSR, rval);
633
634 /*only sdc2 with HS400 support the hs400 new sample enable on the 2x mode*/
635 if ((mmc->ios.timing == MMC_TIMING_MMC_HS400) && (rval & SDXC_HS400_NEW_SAMPLE_EN)) {
636 rval = mmc_readl(host, REG_NTDL_HS400);
637 rval &= ~SDXC_HS400_SAMP_DL_SW_MASK;
638 rval |= samp_dl;
639 rval |= SDXC_DS_DL_SW_EN;
640 mmc_writel(host, REG_NTDL_HS400, rval);
641 }
642
643 SM_INFO(mmc_dev(host->mmc), "REG_DRV_DL: 0x%08x\n",
644 mmc_readl(host, REG_DRV_DL));
645 SM_INFO(mmc_dev(host->mmc), "REG_SD_NTSR: 0x%08x\n",
646 mmc_readl(host, REG_SD_NTSR));
647 SM_INFO(mmc_dev(host->mmc), "REG_NTDL_HS400: 0x%08x\n",
648 mmc_readl(host, REG_NTDL_HS400));
649 }
650
sunxi_mmc_judge_retry_v5p3x(struct sunxi_mmc_host * host,struct mmc_command * cmd,u32 rcnt,u32 errno,void * other)651 static int sunxi_mmc_judge_retry_v5p3x(struct sunxi_mmc_host *host,
652 struct mmc_command *cmd, u32 rcnt,
653 u32 errno, void *other)
654 {
655 struct mmc_host *mmc = host->mmc;
656 /****-1 means use default value***/
657 /*
658 *We use {-1,-1} as first member,because we want to
659 *retry current delay first.
660 *Only If current delay failed,we try new delay
661 */
662 const s32 sunxi_phase[][2] = { {-1, -1},
663 {1, 1}, {0, 0}, {1, 0}, {0, 1}, {1, 2}, {0, 2}, {1, 3}, {0, 3} };
664 u32 phase_num = ARRAY_SIZE(sunxi_phase);
665 /*
666 *only sdc2 with HS400 support the 0 degree phase
667 *and the HS400_NEW_TIMING_MODE_BY_2X
668 */
669 u32 rcnt_max = (host->phy_index == 2 && mmc->ios.timing == MMC_TIMING_MMC_HS400)\
670 ? (phase_num * 16) : (phase_num - 1);
671 u32 samp_unit = (rcnt_max + 1) / phase_num;
672
673 if (rcnt < (SUNXI_RETRY_CNT_PER_PHA_V5P3X * rcnt_max)) {
674 sunxi_mmc_set_dly_raw(host,
675 sunxi_phase[rcnt /
676 (SUNXI_RETRY_CNT_PER_PHA_V5P3X * samp_unit)]
677 [0],
678 sunxi_phase[rcnt /
679 (SUNXI_RETRY_CNT_PER_PHA_V5P3X * samp_unit)]
680 [1],
681 sunxi_phase[rcnt /
682 (SUNXI_RETRY_CNT_PER_PHA_V5P3X * samp_unit)]
683 [0],
684 sunxi_phase[rcnt /
685 (SUNXI_RETRY_CNT_PER_PHA_V5P3X * samp_unit)]
686 [1],
687 rcnt % samp_unit);
688 return 0;
689 }
690
691 sunxi_mmc_set_dly_raw(host, sunxi_phase[0][0],
692 sunxi_phase[0][1],
693 sunxi_phase[0][0], sunxi_phase[0][1], 0);
694 SM_INFO(mmc_dev(host->mmc), "sunxi v5p3x retry give up\n");
695 return -1;
696 }
697
sunxi_mmc_init_priv_v5p3x(struct sunxi_mmc_host * host,struct platform_device * pdev,int phy_index)698 void sunxi_mmc_init_priv_v5p3x(struct sunxi_mmc_host *host,
699 struct platform_device *pdev, int phy_index)
700 {
701 struct sunxi_mmc_ver_priv *ver_priv =
702 devm_kzalloc(&pdev->dev, sizeof(struct sunxi_mmc_ver_priv),
703 GFP_KERNEL);
704 host->version_priv_dat = ver_priv;
705 ver_priv->mmc_clk_dly[mmc_clk_400k].cmod = mmc_clk_400k;
706 ver_priv->mmc_clk_dly[mmc_clk_400k].mod_str = "sunxi-dly-400k";
707 ver_priv->mmc_clk_dly[mmc_clk_400k].cmd_drv_ph = 1;
708 ver_priv->mmc_clk_dly[mmc_clk_400k].dat_drv_ph = 0;
709 ver_priv->mmc_clk_dly[mmc_clk_400k].sam_dly = 0;
710 ver_priv->mmc_clk_dly[mmc_clk_400k].ds_dly = 0;
711 ver_priv->mmc_clk_dly[mmc_clk_400k].sam_ph_dat = 0;
712 ver_priv->mmc_clk_dly[mmc_clk_400k].sam_ph_cmd = 0;
713
714 ver_priv->mmc_clk_dly[mmc_clk_26M].cmod = mmc_clk_26M;
715 ver_priv->mmc_clk_dly[mmc_clk_26M].mod_str = "sunxi-dly-26M";
716 ver_priv->mmc_clk_dly[mmc_clk_26M].cmd_drv_ph = 1;
717 ver_priv->mmc_clk_dly[mmc_clk_26M].dat_drv_ph = 0;
718 ver_priv->mmc_clk_dly[mmc_clk_26M].sam_dly = 0;
719 ver_priv->mmc_clk_dly[mmc_clk_26M].ds_dly = 0;
720 ver_priv->mmc_clk_dly[mmc_clk_26M].sam_ph_dat = 0;
721 ver_priv->mmc_clk_dly[mmc_clk_26M].sam_ph_cmd = 0;
722
723 ver_priv->mmc_clk_dly[mmc_clk_52M].cmod = mmc_clk_52M,
724 ver_priv->mmc_clk_dly[mmc_clk_52M].mod_str = "sunxi-dly-52M";
725 ver_priv->mmc_clk_dly[mmc_clk_52M].cmd_drv_ph = 1;
726 ver_priv->mmc_clk_dly[mmc_clk_52M].dat_drv_ph = 1;
727 ver_priv->mmc_clk_dly[mmc_clk_52M].sam_dly = 0;
728 ver_priv->mmc_clk_dly[mmc_clk_52M].ds_dly = 0;
729 ver_priv->mmc_clk_dly[mmc_clk_52M].sam_ph_dat = 1;
730 ver_priv->mmc_clk_dly[mmc_clk_52M].sam_ph_cmd = 1;
731
732 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].cmod = mmc_clk_52M_DDR4;
733 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].mod_str = "sunxi-dly-52M-ddr4";
734 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].cmd_drv_ph = 1;
735 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].dat_drv_ph = 1;
736 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].sam_dly = 0;
737 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].ds_dly = 0;
738 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].sam_ph_dat = 1;
739 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].sam_ph_cmd = 1;
740
741 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].cmod = mmc_clk_52M_DDR8;
742 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].mod_str = "sunxi-dly-52M-ddr8";
743 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].cmd_drv_ph = 1;
744 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].dat_drv_ph = 1;
745 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].sam_dly = 0;
746 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].ds_dly = 0;
747 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].sam_ph_dat = 1;
748 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].sam_ph_cmd = 1;
749
750 ver_priv->mmc_clk_dly[mmc_clk_104M].cmod = mmc_clk_104M;
751 ver_priv->mmc_clk_dly[mmc_clk_104M].mod_str = "sunxi-dly-104M";
752 ver_priv->mmc_clk_dly[mmc_clk_104M].cmd_drv_ph = 1;
753 ver_priv->mmc_clk_dly[mmc_clk_104M].dat_drv_ph = 0;
754 ver_priv->mmc_clk_dly[mmc_clk_104M].sam_dly = 0;
755 ver_priv->mmc_clk_dly[mmc_clk_104M].ds_dly = 0;
756 ver_priv->mmc_clk_dly[mmc_clk_104M].sam_ph_dat = 0;
757 ver_priv->mmc_clk_dly[mmc_clk_104M].sam_ph_cmd = 0;
758
759 ver_priv->mmc_clk_dly[mmc_clk_208M].cmod = mmc_clk_208M;
760 ver_priv->mmc_clk_dly[mmc_clk_208M].mod_str = "sunxi-dly-208M";
761 ver_priv->mmc_clk_dly[mmc_clk_208M].cmd_drv_ph = 1;
762 ver_priv->mmc_clk_dly[mmc_clk_208M].dat_drv_ph = 0;
763 ver_priv->mmc_clk_dly[mmc_clk_208M].sam_dly = 0;
764 ver_priv->mmc_clk_dly[mmc_clk_208M].ds_dly = 0;
765 ver_priv->mmc_clk_dly[mmc_clk_208M].sam_ph_dat = 0;
766 ver_priv->mmc_clk_dly[mmc_clk_208M].sam_ph_cmd = 0;
767
768 ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].cmod = mmc_clk_104M_DDR;
769 ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].mod_str = "sunxi-dly-104M-ddr";
770 ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].cmd_drv_ph = 1;
771 ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].dat_drv_ph = 0;
772 ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].sam_dly = 0;
773 ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].ds_dly = 0;
774 ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].sam_ph_dat = 0;
775 ver_priv->mmc_clk_dly[mmc_clk_104M_DDR].sam_ph_cmd = 0;
776
777 ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].cmod = mmc_clk_208M_DDR;
778 ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].mod_str = "sunxi-dly-208M-ddr";
779 ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].cmd_drv_ph = 1;
780 ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].dat_drv_ph = 0;
781 ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].sam_dly = 0;
782 ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].ds_dly = 0;
783 ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].sam_ph_dat = 0;
784 ver_priv->mmc_clk_dly[mmc_clk_208M_DDR].sam_ph_cmd = 0;
785
786 host->sunxi_mmc_clk_set_rate = sunxi_mmc_clk_set_rate_for_sdmmc_v5p3x;
787 host->dma_tl = SUNXI_DMA_TL_SDMMC_V5P3X;
788 host->idma_des_size_bits = SUNXI_DES_SIZE_SDMMC_V5P3X;
789 host->sunxi_mmc_thld_ctl = sunxi_mmc_thld_ctl_for_sdmmc_v5p3x;
790 host->sunxi_mmc_save_spec_reg = sunxi_mmc_save_spec_reg_v5p3x;
791 host->sunxi_mmc_restore_spec_reg = sunxi_mmc_restore_spec_reg_v5p3x;
792 sunxi_mmc_reg_ex_res_inter(host, phy_index);
793 host->sunxi_mmc_set_acmda = sunxi_mmc_set_a12a;
794 host->phy_index = phy_index;
795 host->sunxi_mmc_oclk_en = sunxi_mmc_oclk_onoff;
796 host->sunxi_mmc_judge_retry = sunxi_mmc_judge_retry_v5p3x;
797 /*sunxi_of_parse_clk_dly(host); */
798 if (mmc_readl(host, REG_SMCV) >= SMHC_VERSION_V5P3) {
799 host->des_addr_shift = 2;
800 }
801
802 }
803
804 EXPORT_SYMBOL_GPL(sunxi_mmc_init_priv_v5p3x);
805