1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (C) Copyright 2012 SAMSUNG Electronics
4 * Jaehoon Chung <jh80.chung@samsung.com>
5 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
6 */
7
8 #include <bouncebuf.h>
9 #include <common.h>
10 #include <cpu_func.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <memalign.h>
14 #include <mmc.h>
15 #include <dwmmc.h>
16 #include <wait_bit.h>
17 #include <power/regulator.h>
18
19 #define PAGE_SIZE 4096
20
dwmci_wait_reset(struct dwmci_host * host,u32 value)21 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
22 {
23 unsigned long timeout = 1000;
24 u32 ctrl;
25
26 dwmci_writel(host, DWMCI_CTRL, value);
27
28 while (timeout--) {
29 ctrl = dwmci_readl(host, DWMCI_CTRL);
30 if (!(ctrl & DWMCI_RESET_ALL))
31 return 1;
32 }
33 return 0;
34 }
35
dwmci_set_idma_desc(struct dwmci_idmac * idmac,u32 desc0,u32 desc1,u32 desc2)36 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
37 u32 desc0, u32 desc1, u32 desc2)
38 {
39 struct dwmci_idmac *desc = idmac;
40
41 desc->flags = desc0;
42 desc->cnt = desc1;
43 desc->addr = desc2;
44 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
45 }
46
dwmci_prepare_data(struct dwmci_host * host,struct mmc_data * data,struct dwmci_idmac * cur_idmac,void * bounce_buffer)47 static void dwmci_prepare_data(struct dwmci_host *host,
48 struct mmc_data *data,
49 struct dwmci_idmac *cur_idmac,
50 void *bounce_buffer)
51 {
52 unsigned long ctrl;
53 unsigned int i = 0, flags, cnt, blk_cnt;
54 ulong data_start, data_end;
55
56
57 blk_cnt = data->blocks;
58
59 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
60
61 /* Clear IDMAC interrupt */
62 dwmci_writel(host, DWMCI_IDSTS, 0xFFFFFFFF);
63
64 data_start = (ulong)cur_idmac;
65 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
66
67 do {
68 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
69 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
70 if (blk_cnt <= 8) {
71 flags |= DWMCI_IDMAC_LD;
72 cnt = data->blocksize * blk_cnt;
73 } else
74 cnt = data->blocksize * 8;
75
76 dwmci_set_idma_desc(cur_idmac, flags, cnt,
77 (ulong)bounce_buffer + (i * PAGE_SIZE));
78
79 cur_idmac++;
80 if (blk_cnt <= 8)
81 break;
82 blk_cnt -= 8;
83 i++;
84 } while(1);
85
86 data_end = (ulong)cur_idmac;
87 flush_dcache_range(data_start, roundup(data_end, ARCH_DMA_MINALIGN));
88
89 ctrl = dwmci_readl(host, DWMCI_CTRL);
90 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
91 dwmci_writel(host, DWMCI_CTRL, ctrl);
92
93 ctrl = dwmci_readl(host, DWMCI_BMOD);
94 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
95 dwmci_writel(host, DWMCI_BMOD, ctrl);
96
97 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
98 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
99 }
100
dwmci_fifo_ready(struct dwmci_host * host,u32 bit,u32 * len)101 static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
102 {
103 u32 timeout = 20000;
104
105 *len = dwmci_readl(host, DWMCI_STATUS);
106 while (--timeout && (*len & bit)) {
107 udelay(200);
108 *len = dwmci_readl(host, DWMCI_STATUS);
109 }
110
111 if (!timeout) {
112 debug("%s: FIFO underflow timeout\n", __func__);
113 return -ETIMEDOUT;
114 }
115
116 return 0;
117 }
118
dwmci_get_timeout(struct mmc * mmc,const unsigned int size)119 static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
120 {
121 unsigned int timeout;
122
123 timeout = size * 8; /* counting in bits */
124 timeout *= 10; /* wait 10 times as long */
125 timeout /= mmc->clock;
126 timeout /= mmc->bus_width;
127 timeout /= mmc->ddr_mode ? 2 : 1;
128 timeout *= 1000; /* counting in msec */
129 timeout = (timeout < 1000) ? 1000 : timeout;
130
131 return timeout;
132 }
133
dwmci_data_transfer(struct dwmci_host * host,struct mmc_data * data)134 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
135 {
136 struct mmc *mmc = host->mmc;
137 int ret = 0;
138 u32 timeout, mask, size, i, len = 0;
139 u32 *buf = NULL;
140 ulong start = get_timer(0);
141 u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
142 RX_WMARK_SHIFT) + 1) * 2;
143
144 size = data->blocksize * data->blocks;
145 if (data->flags == MMC_DATA_READ)
146 buf = (unsigned int *)data->dest;
147 else
148 buf = (unsigned int *)data->src;
149
150 timeout = dwmci_get_timeout(mmc, size);
151
152 size /= 4;
153
154 for (;;) {
155 mask = dwmci_readl(host, DWMCI_RINTSTS);
156 /* Error during data transfer. */
157 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
158 debug("%s: DATA ERROR!\n", __func__);
159 ret = -EINVAL;
160 break;
161 }
162
163 if (host->fifo_mode && size) {
164 len = 0;
165 if (data->flags == MMC_DATA_READ &&
166 (mask & DWMCI_INTMSK_RXDR)) {
167 while (size) {
168 ret = dwmci_fifo_ready(host,
169 DWMCI_FIFO_EMPTY,
170 &len);
171 if (ret < 0)
172 break;
173
174 len = (len >> DWMCI_FIFO_SHIFT) &
175 DWMCI_FIFO_MASK;
176 len = min(size, len);
177 for (i = 0; i < len; i++)
178 *buf++ =
179 dwmci_readl(host, DWMCI_DATA);
180 size = size > len ? (size - len) : 0;
181 }
182 dwmci_writel(host, DWMCI_RINTSTS,
183 DWMCI_INTMSK_RXDR);
184 } else if (data->flags == MMC_DATA_WRITE &&
185 (mask & DWMCI_INTMSK_TXDR)) {
186 while (size) {
187 ret = dwmci_fifo_ready(host,
188 DWMCI_FIFO_FULL,
189 &len);
190 if (ret < 0)
191 break;
192
193 len = fifo_depth - ((len >>
194 DWMCI_FIFO_SHIFT) &
195 DWMCI_FIFO_MASK);
196 len = min(size, len);
197 for (i = 0; i < len; i++)
198 dwmci_writel(host, DWMCI_DATA,
199 *buf++);
200 size = size > len ? (size - len) : 0;
201 }
202 dwmci_writel(host, DWMCI_RINTSTS,
203 DWMCI_INTMSK_TXDR);
204 }
205 }
206
207 /* Data arrived correctly. */
208 if (mask & DWMCI_INTMSK_DTO) {
209 ret = 0;
210 break;
211 }
212
213 /* Check for timeout. */
214 if (get_timer(start) > timeout) {
215 debug("%s: Timeout waiting for data!\n",
216 __func__);
217 ret = -ETIMEDOUT;
218 break;
219 }
220 }
221
222 dwmci_writel(host, DWMCI_RINTSTS, mask);
223
224 return ret;
225 }
226
dwmci_set_transfer_mode(struct dwmci_host * host,struct mmc_data * data)227 static int dwmci_set_transfer_mode(struct dwmci_host *host,
228 struct mmc_data *data)
229 {
230 unsigned long mode;
231
232 mode = DWMCI_CMD_DATA_EXP;
233 if (data->flags & MMC_DATA_WRITE)
234 mode |= DWMCI_CMD_RW;
235
236 return mode;
237 }
238
239 #ifdef CONFIG_DM_MMC
dwmci_send_cmd(struct udevice * dev,struct mmc_cmd * cmd,struct mmc_data * data)240 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
241 struct mmc_data *data)
242 {
243 struct mmc *mmc = mmc_get_mmc_dev(dev);
244 #else
245 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
246 struct mmc_data *data)
247 {
248 #endif
249 struct dwmci_host *host = mmc->priv;
250 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
251 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
252 int ret = 0, flags = 0, i;
253 unsigned int timeout = 500;
254 u32 retry = 100000;
255 u32 mask, ctrl;
256 ulong start = get_timer(0);
257 struct bounce_buffer bbstate;
258
259 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
260 if (get_timer(start) > timeout) {
261 debug("%s: Timeout on data busy\n", __func__);
262 return -ETIMEDOUT;
263 }
264 }
265
266 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
267
268 if (data) {
269 if (host->fifo_mode) {
270 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
271 dwmci_writel(host, DWMCI_BYTCNT,
272 data->blocksize * data->blocks);
273 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
274 } else {
275 if (data->flags == MMC_DATA_READ) {
276 ret = bounce_buffer_start(&bbstate,
277 (void*)data->dest,
278 data->blocksize *
279 data->blocks, GEN_BB_WRITE);
280 } else {
281 ret = bounce_buffer_start(&bbstate,
282 (void*)data->src,
283 data->blocksize *
284 data->blocks, GEN_BB_READ);
285 }
286
287 if (ret)
288 return ret;
289
290 dwmci_prepare_data(host, data, cur_idmac,
291 bbstate.bounce_buffer);
292 }
293 }
294
295 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
296
297 if (data)
298 flags = dwmci_set_transfer_mode(host, data);
299
300 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
301 return -1;
302
303 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
304 flags |= DWMCI_CMD_ABORT_STOP;
305 else
306 flags |= DWMCI_CMD_PRV_DAT_WAIT;
307
308 if (cmd->resp_type & MMC_RSP_PRESENT) {
309 flags |= DWMCI_CMD_RESP_EXP;
310 if (cmd->resp_type & MMC_RSP_136)
311 flags |= DWMCI_CMD_RESP_LENGTH;
312 }
313
314 if (cmd->resp_type & MMC_RSP_CRC)
315 flags |= DWMCI_CMD_CHECK_CRC;
316
317 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
318
319 debug("Sending CMD%d\n",cmd->cmdidx);
320
321 dwmci_writel(host, DWMCI_CMD, flags);
322
323 for (i = 0; i < retry; i++) {
324 mask = dwmci_readl(host, DWMCI_RINTSTS);
325 if (mask & DWMCI_INTMSK_CDONE) {
326 if (!data)
327 dwmci_writel(host, DWMCI_RINTSTS, mask);
328 break;
329 }
330 }
331
332 if (i == retry) {
333 debug("%s: Timeout.\n", __func__);
334 return -ETIMEDOUT;
335 }
336
337 if (mask & DWMCI_INTMSK_RTO) {
338 /*
339 * Timeout here is not necessarily fatal. (e)MMC cards
340 * will splat here when they receive CMD55 as they do
341 * not support this command and that is exactly the way
342 * to tell them apart from SD cards. Thus, this output
343 * below shall be debug(). eMMC cards also do not favor
344 * CMD8, please keep that in mind.
345 */
346 debug("%s: Response Timeout.\n", __func__);
347 return -ETIMEDOUT;
348 } else if (mask & DWMCI_INTMSK_RE) {
349 debug("%s: Response Error.\n", __func__);
350 return -EIO;
351 } else if ((cmd->resp_type & MMC_RSP_CRC) &&
352 (mask & DWMCI_INTMSK_RCRC)) {
353 debug("%s: Response CRC Error.\n", __func__);
354 return -EIO;
355 }
356
357
358 if (cmd->resp_type & MMC_RSP_PRESENT) {
359 if (cmd->resp_type & MMC_RSP_136) {
360 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
361 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
362 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
363 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
364 } else {
365 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
366 }
367 }
368
369 if (data) {
370 ret = dwmci_data_transfer(host, data);
371
372 /* only dma mode need it */
373 if (!host->fifo_mode) {
374 if (data->flags == MMC_DATA_READ)
375 mask = DWMCI_IDINTEN_RI;
376 else
377 mask = DWMCI_IDINTEN_TI;
378 ret = wait_for_bit_le32(host->ioaddr + DWMCI_IDSTS,
379 mask, true, 1000, false);
380 if (ret)
381 debug("%s: DWMCI_IDINTEN mask 0x%x timeout.\n",
382 __func__, mask);
383 /* clear interrupts */
384 dwmci_writel(host, DWMCI_IDSTS, DWMCI_IDINTEN_MASK);
385
386 ctrl = dwmci_readl(host, DWMCI_CTRL);
387 ctrl &= ~(DWMCI_DMA_EN);
388 dwmci_writel(host, DWMCI_CTRL, ctrl);
389 bounce_buffer_stop(&bbstate);
390 }
391 }
392
393 udelay(100);
394
395 return ret;
396 }
397
398 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
399 {
400 u32 div, status;
401 int timeout = 10000;
402 unsigned long sclk;
403
404 if ((freq == host->clock) || (freq == 0))
405 return 0;
406 /*
407 * If host->get_mmc_clk isn't defined,
408 * then assume that host->bus_hz is source clock value.
409 * host->bus_hz should be set by user.
410 */
411 if (host->get_mmc_clk)
412 sclk = host->get_mmc_clk(host, freq);
413 else if (host->bus_hz)
414 sclk = host->bus_hz;
415 else {
416 debug("%s: Didn't get source clock value.\n", __func__);
417 return -EINVAL;
418 }
419
420 if (sclk == freq)
421 div = 0; /* bypass mode */
422 else
423 div = DIV_ROUND_UP(sclk, 2 * freq);
424
425 dwmci_writel(host, DWMCI_CLKENA, 0);
426 dwmci_writel(host, DWMCI_CLKSRC, 0);
427
428 dwmci_writel(host, DWMCI_CLKDIV, div);
429 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
430 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
431
432 do {
433 status = dwmci_readl(host, DWMCI_CMD);
434 if (timeout-- < 0) {
435 debug("%s: Timeout!\n", __func__);
436 return -ETIMEDOUT;
437 }
438 } while (status & DWMCI_CMD_START);
439
440 dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
441 DWMCI_CLKEN_LOW_PWR);
442
443 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
444 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
445
446 timeout = 10000;
447 do {
448 status = dwmci_readl(host, DWMCI_CMD);
449 if (timeout-- < 0) {
450 debug("%s: Timeout!\n", __func__);
451 return -ETIMEDOUT;
452 }
453 } while (status & DWMCI_CMD_START);
454
455 host->clock = freq;
456
457 return 0;
458 }
459
460 #ifdef CONFIG_DM_MMC
461 static int dwmci_set_ios(struct udevice *dev)
462 {
463 struct mmc *mmc = mmc_get_mmc_dev(dev);
464 #else
465 static int dwmci_set_ios(struct mmc *mmc)
466 {
467 #endif
468 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
469 u32 ctype, regs;
470
471 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
472
473 dwmci_setup_bus(host, mmc->clock);
474 switch (mmc->bus_width) {
475 case 8:
476 ctype = DWMCI_CTYPE_8BIT;
477 break;
478 case 4:
479 ctype = DWMCI_CTYPE_4BIT;
480 break;
481 default:
482 ctype = DWMCI_CTYPE_1BIT;
483 break;
484 }
485
486 dwmci_writel(host, DWMCI_CTYPE, ctype);
487
488 regs = dwmci_readl(host, DWMCI_UHS_REG);
489 if (mmc->ddr_mode)
490 regs |= DWMCI_DDR_MODE;
491 else
492 regs &= ~DWMCI_DDR_MODE;
493
494 dwmci_writel(host, DWMCI_UHS_REG, regs);
495
496 if (host->clksel)
497 host->clksel(host);
498
499 #if CONFIG_IS_ENABLED(DM_REGULATOR)
500 if (mmc->vqmmc_supply) {
501 int ret;
502
503 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
504 regulator_set_value(mmc->vqmmc_supply, 1800000);
505 else
506 regulator_set_value(mmc->vqmmc_supply, 3300000);
507
508 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, true);
509 if (ret)
510 return ret;
511 }
512 #endif
513
514 return 0;
515 }
516
517 static int dwmci_init(struct mmc *mmc)
518 {
519 struct dwmci_host *host = mmc->priv;
520
521 if (host->board_init)
522 host->board_init(host);
523
524 dwmci_writel(host, DWMCI_PWREN, 1);
525
526 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
527 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
528 return -EIO;
529 }
530
531 /* Enumerate at 400KHz */
532 dwmci_setup_bus(host, mmc->cfg->f_min);
533
534 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
535 dwmci_writel(host, DWMCI_INTMASK, 0);
536
537 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
538
539 dwmci_writel(host, DWMCI_IDINTEN, 0);
540 dwmci_writel(host, DWMCI_BMOD, 1);
541
542 if (!host->fifoth_val) {
543 uint32_t fifo_size;
544
545 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
546 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
547 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
548 TX_WMARK(fifo_size / 2);
549 }
550 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
551
552 dwmci_writel(host, DWMCI_CLKENA, 0);
553 dwmci_writel(host, DWMCI_CLKSRC, 0);
554
555 if (!host->fifo_mode)
556 dwmci_writel(host, DWMCI_IDINTEN, DWMCI_IDINTEN_MASK);
557
558 return 0;
559 }
560
561 #ifdef CONFIG_DM_MMC
562 int dwmci_probe(struct udevice *dev)
563 {
564 struct mmc *mmc = mmc_get_mmc_dev(dev);
565
566 return dwmci_init(mmc);
567 }
568
569 const struct dm_mmc_ops dm_dwmci_ops = {
570 .send_cmd = dwmci_send_cmd,
571 .set_ios = dwmci_set_ios,
572 };
573
574 #else
575 static const struct mmc_ops dwmci_ops = {
576 .send_cmd = dwmci_send_cmd,
577 .set_ios = dwmci_set_ios,
578 .init = dwmci_init,
579 };
580 #endif
581
582 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
583 u32 max_clk, u32 min_clk)
584 {
585 cfg->name = host->name;
586 #ifndef CONFIG_DM_MMC
587 cfg->ops = &dwmci_ops;
588 #endif
589 cfg->f_min = min_clk;
590 cfg->f_max = max_clk;
591
592 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
593
594 cfg->host_caps = host->caps;
595
596 if (host->buswidth == 8) {
597 cfg->host_caps |= MMC_MODE_8BIT;
598 cfg->host_caps &= ~MMC_MODE_4BIT;
599 } else {
600 cfg->host_caps |= MMC_MODE_4BIT;
601 cfg->host_caps &= ~MMC_MODE_8BIT;
602 }
603 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
604
605 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
606 }
607
608 #ifdef CONFIG_BLK
609 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
610 {
611 return mmc_bind(dev, mmc, cfg);
612 }
613 #else
614 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
615 {
616 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
617
618 host->mmc = mmc_create(&host->cfg, host);
619 if (host->mmc == NULL)
620 return -1;
621
622 return 0;
623 }
624 #endif
625