1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (C) Copyright 2012 SAMSUNG Electronics
4 * Jaehoon Chung <jh80.chung@samsung.com>
5 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
6 */
7
8 #include <bouncebuf.h>
9 #include <common.h>
10 #include <errno.h>
11 #include <malloc.h>
12 #include <memalign.h>
13 #include <mmc.h>
14 #include <dwmmc.h>
15
16 #define PAGE_SIZE 4096
17
dwmci_wait_reset(struct dwmci_host * host,u32 value)18 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
19 {
20 unsigned long timeout = 1000;
21 u32 ctrl;
22
23 dwmci_writel(host, DWMCI_CTRL, value);
24
25 while (timeout--) {
26 ctrl = dwmci_readl(host, DWMCI_CTRL);
27 if (!(ctrl & DWMCI_RESET_ALL))
28 return 1;
29 }
30 return 0;
31 }
32
dwmci_set_idma_desc(struct dwmci_idmac * idmac,u32 desc0,u32 desc1,u32 desc2)33 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
34 u32 desc0, u32 desc1, u32 desc2)
35 {
36 struct dwmci_idmac *desc = idmac;
37
38 desc->flags = desc0;
39 desc->cnt = desc1;
40 desc->addr = desc2;
41 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
42 }
43
dwmci_prepare_data(struct dwmci_host * host,struct mmc_data * data,struct dwmci_idmac * cur_idmac,void * bounce_buffer)44 static void dwmci_prepare_data(struct dwmci_host *host,
45 struct mmc_data *data,
46 struct dwmci_idmac *cur_idmac,
47 void *bounce_buffer)
48 {
49 unsigned long ctrl;
50 unsigned int i = 0, flags, cnt, blk_cnt;
51 ulong data_start, data_end;
52
53
54 blk_cnt = data->blocks;
55
56 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
57
58 data_start = (ulong)cur_idmac;
59 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
60
61 do {
62 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
63 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
64 if (blk_cnt <= 8) {
65 flags |= DWMCI_IDMAC_LD;
66 cnt = data->blocksize * blk_cnt;
67 } else
68 cnt = data->blocksize * 8;
69
70 dwmci_set_idma_desc(cur_idmac, flags, cnt,
71 (ulong)bounce_buffer + (i * PAGE_SIZE));
72
73 if (blk_cnt <= 8)
74 break;
75 blk_cnt -= 8;
76 cur_idmac++;
77 i++;
78 } while(1);
79
80 data_end = (ulong)cur_idmac;
81 flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
82
83 ctrl = dwmci_readl(host, DWMCI_CTRL);
84 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
85 dwmci_writel(host, DWMCI_CTRL, ctrl);
86
87 ctrl = dwmci_readl(host, DWMCI_BMOD);
88 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
89 dwmci_writel(host, DWMCI_BMOD, ctrl);
90
91 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
92 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
93 }
94
dwmci_data_transfer(struct dwmci_host * host,struct mmc_data * data)95 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
96 {
97 int ret = 0;
98 u32 timeout = 240000;
99 u32 mask, size, i, len = 0;
100 u32 *buf = NULL;
101 ulong start = get_timer(0);
102 u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
103 RX_WMARK_SHIFT) + 1) * 2;
104
105 size = data->blocksize * data->blocks / 4;
106 if (data->flags == MMC_DATA_READ)
107 buf = (unsigned int *)data->dest;
108 else
109 buf = (unsigned int *)data->src;
110
111 for (;;) {
112 mask = dwmci_readl(host, DWMCI_RINTSTS);
113 /* Error during data transfer. */
114 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
115 debug("%s: DATA ERROR!\n", __func__);
116 ret = -EINVAL;
117 break;
118 }
119
120 if (host->fifo_mode && size) {
121 len = 0;
122 if (data->flags == MMC_DATA_READ &&
123 (mask & DWMCI_INTMSK_RXDR)) {
124 while (size) {
125 len = dwmci_readl(host, DWMCI_STATUS);
126 len = (len >> DWMCI_FIFO_SHIFT) &
127 DWMCI_FIFO_MASK;
128 len = min(size, len);
129 for (i = 0; i < len; i++)
130 *buf++ =
131 dwmci_readl(host, DWMCI_DATA);
132 size = size > len ? (size - len) : 0;
133 }
134 dwmci_writel(host, DWMCI_RINTSTS,
135 DWMCI_INTMSK_RXDR);
136 } else if (data->flags == MMC_DATA_WRITE &&
137 (mask & DWMCI_INTMSK_TXDR)) {
138 while (size) {
139 len = dwmci_readl(host, DWMCI_STATUS);
140 len = fifo_depth - ((len >>
141 DWMCI_FIFO_SHIFT) &
142 DWMCI_FIFO_MASK);
143 len = min(size, len);
144 for (i = 0; i < len; i++)
145 dwmci_writel(host, DWMCI_DATA,
146 *buf++);
147 size = size > len ? (size - len) : 0;
148 }
149 dwmci_writel(host, DWMCI_RINTSTS,
150 DWMCI_INTMSK_TXDR);
151 }
152 }
153
154 /* Data arrived correctly. */
155 if (mask & DWMCI_INTMSK_DTO) {
156 ret = 0;
157 break;
158 }
159
160 /* Check for timeout. */
161 if (get_timer(start) > timeout) {
162 debug("%s: Timeout waiting for data!\n",
163 __func__);
164 ret = -ETIMEDOUT;
165 break;
166 }
167 }
168
169 dwmci_writel(host, DWMCI_RINTSTS, mask);
170
171 return ret;
172 }
173
dwmci_set_transfer_mode(struct dwmci_host * host,struct mmc_data * data)174 static int dwmci_set_transfer_mode(struct dwmci_host *host,
175 struct mmc_data *data)
176 {
177 unsigned long mode;
178
179 mode = DWMCI_CMD_DATA_EXP;
180 if (data->flags & MMC_DATA_WRITE)
181 mode |= DWMCI_CMD_RW;
182
183 return mode;
184 }
185
186 #ifdef CONFIG_DM_MMC
dwmci_send_cmd(struct udevice * dev,struct mmc_cmd * cmd,struct mmc_data * data)187 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
188 struct mmc_data *data)
189 {
190 struct mmc *mmc = mmc_get_mmc_dev(dev);
191 #else
192 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
193 struct mmc_data *data)
194 {
195 #endif
196 struct dwmci_host *host = mmc->priv;
197 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
198 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
199 int ret = 0, flags = 0, i;
200 unsigned int timeout = 500;
201 u32 retry = 100000;
202 u32 mask, ctrl;
203 ulong start = get_timer(0);
204 struct bounce_buffer bbstate;
205
206 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
207 if (get_timer(start) > timeout) {
208 debug("%s: Timeout on data busy\n", __func__);
209 return -ETIMEDOUT;
210 }
211 }
212
213 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
214
215 if (data) {
216 if (host->fifo_mode) {
217 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
218 dwmci_writel(host, DWMCI_BYTCNT,
219 data->blocksize * data->blocks);
220 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
221 } else {
222 if (data->flags == MMC_DATA_READ) {
223 bounce_buffer_start(&bbstate, (void*)data->dest,
224 data->blocksize *
225 data->blocks, GEN_BB_WRITE);
226 } else {
227 bounce_buffer_start(&bbstate, (void*)data->src,
228 data->blocksize *
229 data->blocks, GEN_BB_READ);
230 }
231 dwmci_prepare_data(host, data, cur_idmac,
232 bbstate.bounce_buffer);
233 }
234 }
235
236 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
237
238 if (data)
239 flags = dwmci_set_transfer_mode(host, data);
240
241 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
242 return -1;
243
244 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
245 flags |= DWMCI_CMD_ABORT_STOP;
246 else
247 flags |= DWMCI_CMD_PRV_DAT_WAIT;
248
249 if (cmd->resp_type & MMC_RSP_PRESENT) {
250 flags |= DWMCI_CMD_RESP_EXP;
251 if (cmd->resp_type & MMC_RSP_136)
252 flags |= DWMCI_CMD_RESP_LENGTH;
253 }
254
255 if (cmd->resp_type & MMC_RSP_CRC)
256 flags |= DWMCI_CMD_CHECK_CRC;
257
258 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
259
260 debug("Sending CMD%d\n",cmd->cmdidx);
261
262 dwmci_writel(host, DWMCI_CMD, flags);
263
264 for (i = 0; i < retry; i++) {
265 mask = dwmci_readl(host, DWMCI_RINTSTS);
266 if (mask & DWMCI_INTMSK_CDONE) {
267 if (!data)
268 dwmci_writel(host, DWMCI_RINTSTS, mask);
269 break;
270 }
271 }
272
273 if (i == retry) {
274 debug("%s: Timeout.\n", __func__);
275 return -ETIMEDOUT;
276 }
277
278 if (mask & DWMCI_INTMSK_RTO) {
279 /*
280 * Timeout here is not necessarily fatal. (e)MMC cards
281 * will splat here when they receive CMD55 as they do
282 * not support this command and that is exactly the way
283 * to tell them apart from SD cards. Thus, this output
284 * below shall be debug(). eMMC cards also do not favor
285 * CMD8, please keep that in mind.
286 */
287 debug("%s: Response Timeout.\n", __func__);
288 return -ETIMEDOUT;
289 } else if (mask & DWMCI_INTMSK_RE) {
290 debug("%s: Response Error.\n", __func__);
291 return -EIO;
292 }
293
294
295 if (cmd->resp_type & MMC_RSP_PRESENT) {
296 if (cmd->resp_type & MMC_RSP_136) {
297 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
298 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
299 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
300 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
301 } else {
302 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
303 }
304 }
305
306 if (data) {
307 ret = dwmci_data_transfer(host, data);
308
309 /* only dma mode need it */
310 if (!host->fifo_mode) {
311 ctrl = dwmci_readl(host, DWMCI_CTRL);
312 ctrl &= ~(DWMCI_DMA_EN);
313 dwmci_writel(host, DWMCI_CTRL, ctrl);
314 bounce_buffer_stop(&bbstate);
315 }
316 }
317
318 udelay(100);
319
320 return ret;
321 }
322
323 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
324 {
325 u32 div, status;
326 int timeout = 10000;
327 unsigned long sclk;
328
329 if ((freq == host->clock) || (freq == 0))
330 return 0;
331 /*
332 * If host->get_mmc_clk isn't defined,
333 * then assume that host->bus_hz is source clock value.
334 * host->bus_hz should be set by user.
335 */
336 if (host->get_mmc_clk)
337 sclk = host->get_mmc_clk(host, freq);
338 else if (host->bus_hz)
339 sclk = host->bus_hz;
340 else {
341 debug("%s: Didn't get source clock value.\n", __func__);
342 return -EINVAL;
343 }
344
345 if (sclk == freq)
346 div = 0; /* bypass mode */
347 else
348 div = DIV_ROUND_UP(sclk, 2 * freq);
349
350 dwmci_writel(host, DWMCI_CLKENA, 0);
351 dwmci_writel(host, DWMCI_CLKSRC, 0);
352
353 dwmci_writel(host, DWMCI_CLKDIV, div);
354 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
355 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
356
357 do {
358 status = dwmci_readl(host, DWMCI_CMD);
359 if (timeout-- < 0) {
360 debug("%s: Timeout!\n", __func__);
361 return -ETIMEDOUT;
362 }
363 } while (status & DWMCI_CMD_START);
364
365 dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
366 DWMCI_CLKEN_LOW_PWR);
367
368 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
369 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
370
371 timeout = 10000;
372 do {
373 status = dwmci_readl(host, DWMCI_CMD);
374 if (timeout-- < 0) {
375 debug("%s: Timeout!\n", __func__);
376 return -ETIMEDOUT;
377 }
378 } while (status & DWMCI_CMD_START);
379
380 host->clock = freq;
381
382 return 0;
383 }
384
385 #ifdef CONFIG_DM_MMC
386 static int dwmci_set_ios(struct udevice *dev)
387 {
388 struct mmc *mmc = mmc_get_mmc_dev(dev);
389 #else
390 static int dwmci_set_ios(struct mmc *mmc)
391 {
392 #endif
393 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
394 u32 ctype, regs;
395
396 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
397
398 dwmci_setup_bus(host, mmc->clock);
399 switch (mmc->bus_width) {
400 case 8:
401 ctype = DWMCI_CTYPE_8BIT;
402 break;
403 case 4:
404 ctype = DWMCI_CTYPE_4BIT;
405 break;
406 default:
407 ctype = DWMCI_CTYPE_1BIT;
408 break;
409 }
410
411 dwmci_writel(host, DWMCI_CTYPE, ctype);
412
413 regs = dwmci_readl(host, DWMCI_UHS_REG);
414 if (mmc->ddr_mode)
415 regs |= DWMCI_DDR_MODE;
416 else
417 regs &= ~DWMCI_DDR_MODE;
418
419 dwmci_writel(host, DWMCI_UHS_REG, regs);
420
421 if (host->clksel)
422 host->clksel(host);
423
424 return 0;
425 }
426
427 static int dwmci_init(struct mmc *mmc)
428 {
429 struct dwmci_host *host = mmc->priv;
430
431 if (host->board_init)
432 host->board_init(host);
433
434 dwmci_writel(host, DWMCI_PWREN, 1);
435
436 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
437 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
438 return -EIO;
439 }
440
441 /* Enumerate at 400KHz */
442 dwmci_setup_bus(host, mmc->cfg->f_min);
443
444 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
445 dwmci_writel(host, DWMCI_INTMASK, 0);
446
447 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
448
449 dwmci_writel(host, DWMCI_IDINTEN, 0);
450 dwmci_writel(host, DWMCI_BMOD, 1);
451
452 if (!host->fifoth_val) {
453 uint32_t fifo_size;
454
455 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
456 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
457 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
458 TX_WMARK(fifo_size / 2);
459 }
460 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
461
462 dwmci_writel(host, DWMCI_CLKENA, 0);
463 dwmci_writel(host, DWMCI_CLKSRC, 0);
464
465 return 0;
466 }
467
468 #ifdef CONFIG_DM_MMC
469 int dwmci_probe(struct udevice *dev)
470 {
471 struct mmc *mmc = mmc_get_mmc_dev(dev);
472
473 return dwmci_init(mmc);
474 }
475
476 const struct dm_mmc_ops dm_dwmci_ops = {
477 .send_cmd = dwmci_send_cmd,
478 .set_ios = dwmci_set_ios,
479 };
480
481 #else
482 static const struct mmc_ops dwmci_ops = {
483 .send_cmd = dwmci_send_cmd,
484 .set_ios = dwmci_set_ios,
485 .init = dwmci_init,
486 };
487 #endif
488
489 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
490 u32 max_clk, u32 min_clk)
491 {
492 cfg->name = host->name;
493 #ifndef CONFIG_DM_MMC
494 cfg->ops = &dwmci_ops;
495 #endif
496 cfg->f_min = min_clk;
497 cfg->f_max = max_clk;
498
499 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
500
501 cfg->host_caps = host->caps;
502
503 if (host->buswidth == 8) {
504 cfg->host_caps |= MMC_MODE_8BIT;
505 cfg->host_caps &= ~MMC_MODE_4BIT;
506 } else {
507 cfg->host_caps |= MMC_MODE_4BIT;
508 cfg->host_caps &= ~MMC_MODE_8BIT;
509 }
510 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
511
512 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
513 }
514
515 #ifdef CONFIG_BLK
516 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
517 {
518 return mmc_bind(dev, mmc, cfg);
519 }
520 #else
521 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
522 {
523 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
524
525 host->mmc = mmc_create(&host->cfg, host);
526 if (host->mmc == NULL)
527 return -1;
528
529 return 0;
530 }
531 #endif
532