1 /*
2 * SUNXI EMMC/SD driver
3 *
4 * Copyright (C) 2015 AllWinnertech Ltd.
5 * Author: lijuan <lijuan@allwinnertech.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17 #include <linux/clk.h>
18 #include <linux/reset/sunxi.h>
19
20 #include <linux/gpio.h>
21 #include <linux/platform_device.h>
22 #include <linux/spinlock.h>
23 #include <linux/scatterlist.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h>
26 #include <linux/reset.h>
27
28 #include <linux/of_address.h>
29 #include <linux/of_gpio.h>
30 #include <linux/of_platform.h>
31
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/sd.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/core.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/slot-gpio.h>
39
40 #include "sunxi-mmc.h"
41 #include "sunxi-mmc-v4p00x.h"
42 #include "sunxi-mmc-export.h"
43 #include "sunxi-mmc-debug.h"
44
45 #define MMC_SRCCLK_PLL "pll_periph"
46 #define MMC_SRCCLK_HOSC "hosc"
47 #define SUNXI_RETRY_CNT_PER_PHA_V4P00X 3
48
49 /*dma triger level setting*/
50 #define SUNXI_DMA_TL_SDMMC_V4P0X ((0x2<<28)|(7<<16)|16)
51 /*one dma des can transfer data size = 1<<SUNXI_DES_SIZE_SDMMC*/
52 #define SUNXI_DES_SIZE_SDMMC_V4P0X (15)
53
54 /*reg*/
55 #define CCMU_BASE_ADDR (0x1c20000)
56
57 /*bit*/
58 #define SDXC_2X_TIMING_MODE (1U<<31)
59
60 /*mask*/
61 #define SDXC_TX_TL_MASK (0x0f)
62 #define SDXC_RX_TL_MASK (0x000F0000)
63 #define SDXC_STIMING_PH_MASK (0x00700000)
64 #define SDXC_DRV_PH_MASK (0x00000700)
65
66 /*shift*/
67 #define SDXC_STIMING_PH_SHIFT (20)
68 #define SDXC_DRV_PH_SHIFT (8)
69
70 enum sunxi_mmc_clk_mode {
71 mmc_clk_400k = 0,
72 mmc_clk_26M,
73 mmc_clk_52M,
74 mmc_clk_52M_DDR4,
75 mmc_clk_52M_DDR8,
76 mmc_clk_104M,
77 mmc_clk_208M,
78 mmc_clk_104M_DDR,
79 mmc_clk_208M_DDR,
80 mmc_clk_mod_num,
81 };
82
83 struct sunxi_mmc_clk_dly {
84 enum sunxi_mmc_clk_mode cmod;
85 char *mod_str;
86 u32 drv_ph;
87 u32 sam_ph;
88 };
89
90
91 struct sunxi_mmc_spec_regs {
92 u32 sd_ccmu;
93 };
94
95 struct sunxi_mmc_ver_priv {
96 struct sunxi_mmc_spec_regs bak_spec_regs;
97 struct sunxi_mmc_clk_dly mmc_clk_dly[mmc_clk_mod_num];
98 };
99
sunxi_mmc_set_clk_dly(struct sunxi_mmc_host * host,int clk,int bus_width,int timing)100 static void sunxi_mmc_set_clk_dly(struct sunxi_mmc_host *host, int clk,
101 int bus_width, int timing)
102 {
103 struct mmc_host *mhost = host->mmc;
104 u32 rval = 0;
105 enum sunxi_mmc_clk_mode cmod = mmc_clk_400k;
106 u32 in_clk_dly[5] = { 0 };
107 int ret = 0;
108 struct device_node *np = NULL;
109 void __iomem *ccmu_ptr = NULL;
110 struct sunxi_mmc_clk_dly *mmc_clk_dly =
111 ((struct sunxi_mmc_ver_priv *)host->version_priv_dat)->mmc_clk_dly;
112
113 if (!mhost->parent || !mhost->parent->of_node) {
114 SM_ERR(mmc_dev(host->mmc),
115 "no dts to parse clk dly,use default\n");
116 return;
117 }
118
119 np = mhost->parent->of_node;
120
121 if (clk <= 400 * 1000) {
122 cmod = mmc_clk_400k;
123 } else if (clk <= 26 * 1000 * 1000) {
124 cmod = mmc_clk_26M;
125 } else if (clk <= 52 * 1000 * 1000) {
126 if ((bus_width == MMC_BUS_WIDTH_4)
127 && (timing == MMC_TIMING_UHS_DDR50)) {
128 cmod = mmc_clk_52M_DDR4;
129 } else if ((bus_width == MMC_BUS_WIDTH_8)
130 && (timing == MMC_TIMING_UHS_DDR50)) {
131 cmod = mmc_clk_52M_DDR8;
132 } else {
133 cmod = mmc_clk_52M;
134 }
135 } else if (clk <= 104 * 1000 * 1000) {
136 if ((bus_width == MMC_BUS_WIDTH_8)
137 && (timing == MMC_TIMING_MMC_HS400)) {
138 cmod = mmc_clk_104M_DDR;
139 } else {
140 cmod = mmc_clk_104M;
141 }
142 } else if (clk <= 208 * 1000 * 1000) {
143 if ((bus_width == MMC_BUS_WIDTH_8)
144 && (timing == MMC_TIMING_MMC_HS400)) {
145 cmod = mmc_clk_208M_DDR;
146 } else {
147 cmod = mmc_clk_208M;
148 }
149 } else {
150 SM_ERR(mmc_dev(mhost), "clk %d is out of range\n", clk);
151 return;
152 }
153
154 ret = of_property_read_u32_array(np, mmc_clk_dly[cmod].mod_str,
155 in_clk_dly, ARRAY_SIZE(in_clk_dly));
156 if (ret) {
157 SM_DBG(mmc_dev(host->mmc), "failed to get %s used default\n",
158 mmc_clk_dly[cmod].mod_str);
159 } else {
160 mmc_clk_dly[cmod].drv_ph = in_clk_dly[0];
161 /*mmc_clk_dly[cmod].sam_dly = in_clk_dly[2];*/
162 /*mmc_clk_dly[cmod].ds_dly =in_clk_dly[3];*/
163 mmc_clk_dly[cmod].sam_ph = in_clk_dly[4];
164 SM_DBG(mmc_dev(host->mmc), "Get %s clk dly ok\n",
165 mmc_clk_dly[cmod].mod_str);
166 }
167 SM_DBG(mmc_dev(host->mmc), "Try set %s clk dly ok\n",
168 mmc_clk_dly[cmod].mod_str);
169 SM_DBG(mmc_dev(host->mmc), "drv_ph %d\n",
170 mmc_clk_dly[cmod].drv_ph);
171 SM_DBG(mmc_dev(host->mmc), "sam_ph %d\n",
172 mmc_clk_dly[cmod].sam_ph);
173
174 /*
175 * rval = mmc_readl(host,REG_SAMP_DL);
176 * rval &= ~SDXC_SAMP_DL_SW_MASK;
177 * rval |= mmc_clk_dly[cmod].sam_dly & SDXC_SAMP_DL_SW_MASK;
178 * rval |= SDXC_SAMP_DL_SW_EN;
179 * mmc_writel(host,REG_SAMP_DL,rval);
180
181 * rval = mmc_readl(host,REG_DS_DL);
182 * rval &= ~SDXC_DS_DL_SW_MASK;
183 * rval |= mmc_clk_dly[cmod].ds_dly & SDXC_DS_DL_SW_MASK;
184 * rval |= SDXC_DS_DL_SW_EN;
185 * mmc_writel(host,REG_DS_DL,rval);
186 */
187 ccmu_ptr = ioremap(CCMU_BASE_ADDR+0x88+0x4*
188 (host->phy_index), 0x4);
189 rval = readl(ccmu_ptr);
190 rval &= ~SDXC_DRV_PH_MASK;
191 rval |=
192 (mmc_clk_dly[cmod].
193 drv_ph << SDXC_DRV_PH_SHIFT) & SDXC_DRV_PH_MASK;
194 writel(rval, ccmu_ptr);
195 rval = readl(ccmu_ptr);
196 rval &= ~SDXC_STIMING_PH_MASK;
197 rval |=
198 (mmc_clk_dly[cmod].
199 sam_ph << SDXC_STIMING_PH_SHIFT) & SDXC_STIMING_PH_MASK;
200 writel(rval, ccmu_ptr);
201 SM_DBG(mmc_dev(host->mmc), " CCMU_BASE_ADDR %08x\n",
202 readl(ccmu_ptr));
203 iounmap(ccmu_ptr);
204
205 }
206
__sunxi_mmc_do_oclk_onoff(struct sunxi_mmc_host * host,u32 oclk_en,u32 pwr_save,u32 ignore_dat0)207 static int __sunxi_mmc_do_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en,
208 u32 pwr_save, u32 ignore_dat0)
209 {
210 unsigned long expire = jiffies + msecs_to_jiffies(250);
211 u32 rval;
212
213 rval = mmc_readl(host, REG_CLKCR);
214 rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON | SDXC_MASK_DATA0);
215
216 if (oclk_en)
217 rval |= SDXC_CARD_CLOCK_ON;
218 if (pwr_save)
219 rval |= SDXC_LOW_POWER_ON;
220 if (ignore_dat0)
221 rval |= SDXC_MASK_DATA0;
222
223 mmc_writel(host, REG_CLKCR, rval);
224
225 SM_DBG(mmc_dev(host->mmc), "%s REG_CLKCR:%x\n", __func__,
226 mmc_readl(host, REG_CLKCR));
227
228 rval = SDXC_START | SDXC_UPCLK_ONLY | SDXC_WAIT_PRE_OVER;
229 mmc_writel(host, REG_CMDR, rval);
230
231 do {
232 rval = mmc_readl(host, REG_CMDR);
233 } while (time_before(jiffies, expire) && (rval & SDXC_START));
234
235 /* clear irq status bits set by the command */
236 mmc_writel(host, REG_RINTR,
237 mmc_readl(host, REG_RINTR) & ~SDXC_SDIO_INTERRUPT);
238
239 if (rval & SDXC_START) {
240 SM_ERR(mmc_dev(host->mmc), "fatal err update clk timeout\n");
241 return -EIO;
242 }
243
244 /*only use mask data0 when update clk,clear it when not update clk */
245 if (ignore_dat0)
246 mmc_writel(host, REG_CLKCR,
247 mmc_readl(host, REG_CLKCR) & ~SDXC_MASK_DATA0);
248
249 return 0;
250 }
251
sunxi_mmc_oclk_onoff(struct sunxi_mmc_host * host,u32 oclk_en)252 static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
253 {
254 struct device_node *np = NULL;
255 struct mmc_host *mmc = host->mmc;
256 int pwr_save = 0;
257 int len = 0;
258
259 if (!mmc->parent || !mmc->parent->of_node) {
260 SM_ERR(mmc_dev(host->mmc),
261 "no dts to parse power save mode\n");
262 return -EIO;
263 }
264
265 np = mmc->parent->of_node;
266 if (of_find_property(np, "sunxi-power-save-mode", &len))
267 pwr_save = 1;
268 return __sunxi_mmc_do_oclk_onoff(host, oclk_en, pwr_save, 1);
269 }
270
sunxi_mmc_clk_set_rate_for_sdmmc_v4p00x(struct sunxi_mmc_host * host,struct mmc_ios * ios)271 static int sunxi_mmc_clk_set_rate_for_sdmmc_v4p00x(
272 struct sunxi_mmc_host *host, struct mmc_ios *ios)
273 {
274 u32 mod_clk = 0;
275 u32 src_clk = 0;
276 u32 rval1 = 0;
277 s32 err = 0;
278 u32 rate = 0;
279 char *sclk_name = NULL;
280 struct clk *sclk = NULL;
281 struct device *dev = mmc_dev(host->mmc);
282
283 if (ios->clock == 0) {
284 __sunxi_mmc_do_oclk_onoff(host, 0, 0, 1);
285 return 0;
286 }
287
288 mod_clk = ios->clock;
289
290 if (ios->clock <= 400000) {
291 sclk_name = MMC_SRCCLK_HOSC;
292 sclk = clk_get(dev, sclk_name);
293 } else {
294 sclk_name = MMC_SRCCLK_PLL;
295 sclk = clk_get(dev, sclk_name);
296 }
297 if ((sclk == NULL) || IS_ERR(sclk)) {
298 SM_ERR(mmc_dev(host->mmc),
299 "Error to get source clock %s %ld\n",
300 sclk_name, (long)sclk);
301 return -1;
302 }
303
304
305 err = clk_set_parent(host->clk_mmc, sclk);
306
307 rate = clk_round_rate(host->clk_mmc, mod_clk);
308
309 SM_DBG(mmc_dev(host->mmc), "get round rate %d\n", rate);
310
311 /*clk_disable_unprepare(host->clk_mmc); */
312 /*sunxi_dump_reg(NULL);*/
313 err = clk_set_rate(host->clk_mmc, rate);
314 if (err) {
315 SM_ERR(mmc_dev(host->mmc),
316 "set mclk rate error, rate %dHz\n", rate);
317 clk_put(sclk);
318 return -1;
319 }
320
321 /*
322 *rval1 = clk_prepare_enable(host->clk_mmc);
323 *if (rval1) {
324 * SM_ERR(mmc_dev(host->mmc), "Enable mmc clk err %d\n", rval1);
325 * return -1;
326 *}
327 */
328 /*sunxi_dump_reg(NULL);*/
329 src_clk = clk_get_rate(sclk);
330 clk_put(sclk);
331
332 rval1 = mmc_readl(host, REG_CLKCR);
333 rval1 &= ~0xff;
334 mmc_writel(host, REG_CLKCR, rval1);
335
336 SM_DBG(mmc_dev(host->mmc),
337 "set round clock %d, soure clk is %d\n",
338 rate, src_clk);
339
340 sunxi_mmc_set_clk_dly(host, ios->clock, ios->bus_width, ios->timing);
341 return sunxi_mmc_oclk_onoff(host, 1);
342 }
343
sunxi_mmc_save_spec_reg_v4p00x(struct sunxi_mmc_host * host)344 static void sunxi_mmc_save_spec_reg_v4p00x(struct sunxi_mmc_host *host)
345 {
346 struct sunxi_mmc_spec_regs *spec_regs =
347 &((struct sunxi_mmc_ver_priv *)(host->version_priv_dat))->
348 bak_spec_regs;
349 void __iomem *ccmu_ptr = ioremap(CCMU_BASE_ADDR + 0x88
350 + 0x4 * (host->phy_index), 0x4);
351
352 spec_regs->sd_ccmu = readl(ccmu_ptr);
353 iounmap(ccmu_ptr);
354 }
355
sunxi_mmc_restore_spec_reg_v4p00x(struct sunxi_mmc_host * host)356 static void sunxi_mmc_restore_spec_reg_v4p00x(struct sunxi_mmc_host *host)
357 {
358 struct sunxi_mmc_spec_regs *spec_regs =
359 &((struct sunxi_mmc_ver_priv *)(host->version_priv_dat))->
360 bak_spec_regs;
361 void __iomem *ccmu_ptr = ioremap(CCMU_BASE_ADDR + 0x88
362 + 0x4 * (host->phy_index), 0x4);
363
364 writel(spec_regs->sd_ccmu, ccmu_ptr);
365 iounmap(ccmu_ptr);
366 }
367
sunxi_mmc_set_dly_raw(struct sunxi_mmc_host * host,s32 opha,s32 ipha)368 static inline void sunxi_mmc_set_dly_raw(struct sunxi_mmc_host *host,
369 s32 opha, s32 ipha)
370 {
371 void __iomem *ccmu_ptr = ioremap(CCMU_BASE_ADDR + 0x88
372 + 0x4 * (host->phy_index), 0x4);
373 u32 rval = readl(ccmu_ptr);
374
375 if (ipha >= 0) {
376 rval &= ~SDXC_STIMING_PH_MASK;
377 rval |= (ipha << SDXC_STIMING_PH_SHIFT) & SDXC_STIMING_PH_MASK;
378 }
379
380 if (opha >= 0) {
381 rval &= ~SDXC_DRV_PH_MASK;
382 rval |= (opha << SDXC_DRV_PH_SHIFT) & SDXC_DRV_PH_MASK;
383 }
384
385 writel(rval, ccmu_ptr);
386 SM_INFO(mmc_dev(host->mmc), "CCMU_BASE_ADDR: 0x%08x\n",
387 readl(ccmu_ptr));
388 iounmap(ccmu_ptr);
389 }
390
391
sunxi_mmc_judge_retry_v4p00x(struct sunxi_mmc_host * host,struct mmc_command * cmd,u32 rcnt,u32 errno,void * other)392 static int sunxi_mmc_judge_retry_v4p00x(
393 struct sunxi_mmc_host *host, struct mmc_command *cmd,
394 u32 rcnt, u32 errno, void *other)
395 {
396
397 const s32 sunxi_phase[10][2] = {{-1, -1}, {1, 1},
398 {0, 0}, {1, 0}, {0, 1}, {1, 2}, {0, 2} };
399
400 if (rcnt < (SUNXI_RETRY_CNT_PER_PHA_V4P00X*10)) {
401 sunxi_mmc_set_dly_raw(host,
402 sunxi_phase[rcnt/SUNXI_RETRY_CNT_PER_PHA_V4P00X][0],
403 sunxi_phase[rcnt/SUNXI_RETRY_CNT_PER_PHA_V4P00X][1]);
404 } else {
405 sunxi_mmc_set_dly_raw(host, sunxi_phase[0][0],
406 sunxi_phase[0][1]);
407 SM_INFO(mmc_dev(host->mmc), "sunxi v4p00x retry give up\n");
408 return -1;
409 }
410 return 0;
411 }
412
sunxi_mmc_init_priv_v4p00x(struct sunxi_mmc_host * host,struct platform_device * pdev,int phy_index)413 void sunxi_mmc_init_priv_v4p00x(struct sunxi_mmc_host *host,
414 struct platform_device *pdev, int phy_index)
415 {
416 struct sunxi_mmc_ver_priv *ver_priv =
417 devm_kzalloc(&pdev->dev, sizeof(struct sunxi_mmc_ver_priv),
418 GFP_KERNEL);
419 host->version_priv_dat = ver_priv;
420 ver_priv->mmc_clk_dly[mmc_clk_400k].cmod = mmc_clk_400k;
421 ver_priv->mmc_clk_dly[mmc_clk_400k].mod_str = "sunxi-dly-400k";
422 ver_priv->mmc_clk_dly[mmc_clk_400k].drv_ph = 0;
423 ver_priv->mmc_clk_dly[mmc_clk_400k].sam_ph = 0;
424 ver_priv->mmc_clk_dly[mmc_clk_26M].cmod = mmc_clk_26M;
425 ver_priv->mmc_clk_dly[mmc_clk_26M].mod_str = "sunxi-dly-26M";
426 ver_priv->mmc_clk_dly[mmc_clk_26M].drv_ph = 0;
427 ver_priv->mmc_clk_dly[mmc_clk_26M].sam_ph = 5;
428 ver_priv->mmc_clk_dly[mmc_clk_52M].cmod = mmc_clk_52M,
429 ver_priv->mmc_clk_dly[mmc_clk_52M].mod_str = "sunxi-dly-52M";
430 ver_priv->mmc_clk_dly[mmc_clk_52M].drv_ph = 3;
431 ver_priv->mmc_clk_dly[mmc_clk_52M].sam_ph = 4;
432 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].cmod = mmc_clk_52M_DDR4;
433 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].mod_str =
434 "sunxi-dly-52M-ddr4";
435 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].drv_ph = 2;
436 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR4].sam_ph = 4;
437 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].cmod = mmc_clk_52M_DDR8;
438 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].mod_str =
439 "sunxi-dly-52M-ddr8";
440 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].drv_ph = 2;
441 ver_priv->mmc_clk_dly[mmc_clk_52M_DDR8].sam_ph = 4;
442 ver_priv->mmc_clk_dly[mmc_clk_104M].cmod = mmc_clk_104M;
443 ver_priv->mmc_clk_dly[mmc_clk_104M].mod_str =
444 "sunxi-dly-104M";
445 ver_priv->mmc_clk_dly[mmc_clk_104M].drv_ph = 1;
446 ver_priv->mmc_clk_dly[mmc_clk_104M].sam_ph = 4;
447 ver_priv->mmc_clk_dly[mmc_clk_208M].cmod = mmc_clk_208M;
448 ver_priv->mmc_clk_dly[mmc_clk_208M].mod_str =
449 "sunxi-dly-208M";
450 ver_priv->mmc_clk_dly[mmc_clk_208M].drv_ph = 1;
451 ver_priv->mmc_clk_dly[mmc_clk_208M].sam_ph = 4;
452 host->sunxi_mmc_clk_set_rate = sunxi_mmc_clk_set_rate_for_sdmmc_v4p00x;
453 host->dma_tl = SUNXI_DMA_TL_SDMMC_V4P0X;
454 host->idma_des_size_bits = SUNXI_DES_SIZE_SDMMC_V4P0X;
455 host->sunxi_mmc_thld_ctl = NULL;
456 host->sunxi_mmc_save_spec_reg = sunxi_mmc_save_spec_reg_v4p00x;
457 host->sunxi_mmc_restore_spec_reg = sunxi_mmc_restore_spec_reg_v4p00x;
458 sunxi_mmc_reg_ex_res_inter(host, phy_index);
459 host->sunxi_mmc_set_acmda = sunxi_mmc_set_a12a;
460 host->phy_index = phy_index;
461 host->sunxi_mmc_oclk_en = sunxi_mmc_oclk_onoff;
462 host->sunxi_mmc_judge_retry = sunxi_mmc_judge_retry_v4p00x;
463 /*sunxi_of_parse_clk_dly(host);*/
464 }
465 EXPORT_SYMBOL_GPL(sunxi_mmc_init_priv_v4p00x);
466