1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2011, Marvell Semiconductor Inc.
4 * Lei Wen <leiwen@marvell.com>
5 *
6 * Back ported to the 8xx platform (from the 8260 platform) by
7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
8 */
9
10 #include <common.h>
11 #include <cpu_func.h>
12 #include <dm.h>
13 #include <errno.h>
14 #include <malloc.h>
15 #include <mmc.h>
16 #include <sdhci.h>
17 #include <dm.h>
18
19 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
20 void *aligned_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
21 #else
22 void *aligned_buffer;
23 #endif
24
sdhci_reset(struct sdhci_host * host,u8 mask)25 static void sdhci_reset(struct sdhci_host *host, u8 mask)
26 {
27 unsigned long timeout;
28
29 /* Wait max 100 ms */
30 timeout = 100;
31 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
32 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
33 if (timeout == 0) {
34 printf("%s: Reset 0x%x never completed.\n",
35 __func__, (int)mask);
36 return;
37 }
38 timeout--;
39 udelay(1000);
40 }
41 }
42
sdhci_cmd_done(struct sdhci_host * host,struct mmc_cmd * cmd)43 static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd)
44 {
45 int i;
46 if (cmd->resp_type & MMC_RSP_136) {
47 /* CRC is stripped so we need to do some shifting. */
48 for (i = 0; i < 4; i++) {
49 cmd->response[i] = sdhci_readl(host,
50 SDHCI_RESPONSE + (3-i)*4) << 8;
51 if (i != 3)
52 cmd->response[i] |= sdhci_readb(host,
53 SDHCI_RESPONSE + (3-i)*4-1);
54 }
55 } else {
56 cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE);
57 }
58 }
59
sdhci_transfer_pio(struct sdhci_host * host,struct mmc_data * data)60 static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data)
61 {
62 int i;
63 char *offs;
64 for (i = 0; i < data->blocksize; i += 4) {
65 offs = data->dest + i;
66 if (data->flags == MMC_DATA_READ)
67 *(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER);
68 else
69 sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER);
70 }
71 }
72
73 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
sdhci_adma_desc(struct sdhci_host * host,char * buf,u16 len,bool end)74 static void sdhci_adma_desc(struct sdhci_host *host, char *buf, u16 len,
75 bool end)
76 {
77 struct sdhci_adma_desc *desc;
78 u8 attr;
79
80 desc = &host->adma_desc_table[host->desc_slot];
81
82 attr = ADMA_DESC_ATTR_VALID | ADMA_DESC_TRANSFER_DATA;
83 if (!end)
84 host->desc_slot++;
85 else
86 attr |= ADMA_DESC_ATTR_END;
87
88 desc->attr = attr;
89 desc->len = len;
90 desc->reserved = 0;
91 desc->addr_lo = (dma_addr_t)buf;
92 #ifdef CONFIG_DMA_ADDR_T_64BIT
93 desc->addr_hi = (u64)buf >> 32;
94 #endif
95 }
96
sdhci_prepare_adma_table(struct sdhci_host * host,struct mmc_data * data)97 static void sdhci_prepare_adma_table(struct sdhci_host *host,
98 struct mmc_data *data)
99 {
100 uint trans_bytes = data->blocksize * data->blocks;
101 uint desc_count = DIV_ROUND_UP(trans_bytes, ADMA_MAX_LEN);
102 int i = desc_count;
103 char *buf;
104
105 host->desc_slot = 0;
106
107 if (data->flags & MMC_DATA_READ)
108 buf = data->dest;
109 else
110 buf = (char *)data->src;
111
112 while (--i) {
113 sdhci_adma_desc(host, buf, ADMA_MAX_LEN, false);
114 buf += ADMA_MAX_LEN;
115 trans_bytes -= ADMA_MAX_LEN;
116 }
117
118 sdhci_adma_desc(host, buf, trans_bytes, true);
119
120 flush_cache((dma_addr_t)host->adma_desc_table,
121 ROUND(desc_count * sizeof(struct sdhci_adma_desc),
122 ARCH_DMA_MINALIGN));
123 }
124 #elif defined(CONFIG_MMC_SDHCI_SDMA)
sdhci_prepare_adma_table(struct sdhci_host * host,struct mmc_data * data)125 static void sdhci_prepare_adma_table(struct sdhci_host *host,
126 struct mmc_data *data)
127 {}
128 #endif
129 #if (defined(CONFIG_MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
sdhci_prepare_dma(struct sdhci_host * host,struct mmc_data * data,int * is_aligned,int trans_bytes)130 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
131 int *is_aligned, int trans_bytes)
132 {
133 unsigned char ctrl;
134
135 if (data->flags == MMC_DATA_READ)
136 host->start_addr = (dma_addr_t)data->dest;
137 else
138 host->start_addr = (dma_addr_t)data->src;
139
140 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
141 ctrl &= ~SDHCI_CTRL_DMA_MASK;
142 if (host->flags & USE_ADMA64)
143 ctrl |= SDHCI_CTRL_ADMA64;
144 else if (host->flags & USE_ADMA)
145 ctrl |= SDHCI_CTRL_ADMA32;
146 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
147
148 if (host->flags & USE_SDMA) {
149 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
150 (host->start_addr & 0x7) != 0x0) {
151 *is_aligned = 0;
152 host->start_addr = (unsigned long)aligned_buffer;
153 if (data->flags != MMC_DATA_READ)
154 memcpy(aligned_buffer, data->src, trans_bytes);
155 }
156
157 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
158 /*
159 * Always use this bounce-buffer when
160 * CONFIG_FIXED_SDHCI_ALIGNED_BUFFER is defined
161 */
162 *is_aligned = 0;
163 host->start_addr = (unsigned long)aligned_buffer;
164 if (data->flags != MMC_DATA_READ)
165 memcpy(aligned_buffer, data->src, trans_bytes);
166 #endif
167 sdhci_writel(host, host->start_addr, SDHCI_DMA_ADDRESS);
168
169 } else if (host->flags & (USE_ADMA | USE_ADMA64)) {
170 sdhci_prepare_adma_table(host, data);
171
172 sdhci_writel(host, (u32)host->adma_addr, SDHCI_ADMA_ADDRESS);
173 if (host->flags & USE_ADMA64)
174 sdhci_writel(host, (u64)host->adma_addr >> 32,
175 SDHCI_ADMA_ADDRESS_HI);
176 }
177
178 flush_cache(host->start_addr, ROUND(trans_bytes, ARCH_DMA_MINALIGN));
179 }
180 #else
sdhci_prepare_dma(struct sdhci_host * host,struct mmc_data * data,int * is_aligned,int trans_bytes)181 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
182 int *is_aligned, int trans_bytes)
183 {}
184 #endif
sdhci_transfer_data(struct sdhci_host * host,struct mmc_data * data)185 static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data)
186 {
187 dma_addr_t start_addr = host->start_addr;
188 unsigned int stat, rdy, mask, timeout, block = 0;
189 bool transfer_done = false;
190
191 timeout = 1000000;
192 rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
193 mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;
194 do {
195 stat = sdhci_readl(host, SDHCI_INT_STATUS);
196 if (stat & SDHCI_INT_ERROR) {
197 pr_debug("%s: Error detected in status(0x%X)!\n",
198 __func__, stat);
199 return -EIO;
200 }
201 if (!transfer_done && (stat & rdy)) {
202 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask))
203 continue;
204 sdhci_writel(host, rdy, SDHCI_INT_STATUS);
205 sdhci_transfer_pio(host, data);
206 data->dest += data->blocksize;
207 if (++block >= data->blocks) {
208 /* Keep looping until the SDHCI_INT_DATA_END is
209 * cleared, even if we finished sending all the
210 * blocks.
211 */
212 transfer_done = true;
213 continue;
214 }
215 }
216 if ((host->flags & USE_DMA) && !transfer_done &&
217 (stat & SDHCI_INT_DMA_END)) {
218 sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS);
219 if (host->flags & USE_SDMA) {
220 start_addr &=
221 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1);
222 start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE;
223 sdhci_writel(host, start_addr,
224 SDHCI_DMA_ADDRESS);
225 }
226 }
227 if (timeout-- > 0)
228 udelay(10);
229 else {
230 printf("%s: Transfer data timeout\n", __func__);
231 return -ETIMEDOUT;
232 }
233 } while (!(stat & SDHCI_INT_DATA_END));
234 return 0;
235 }
236
237 /*
238 * No command will be sent by driver if card is busy, so driver must wait
239 * for card ready state.
240 * Every time when card is busy after timeout then (last) timeout value will be
241 * increased twice but only if it doesn't exceed global defined maximum.
242 * Each function call will use last timeout value.
243 */
244 #define SDHCI_CMD_MAX_TIMEOUT 3200
245 #define SDHCI_CMD_DEFAULT_TIMEOUT 100
246 #define SDHCI_READ_STATUS_TIMEOUT 1000
247
248 #ifdef CONFIG_DM_MMC
sdhci_send_command(struct udevice * dev,struct mmc_cmd * cmd,struct mmc_data * data)249 static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd,
250 struct mmc_data *data)
251 {
252 struct mmc *mmc = mmc_get_mmc_dev(dev);
253
254 #else
255 static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd,
256 struct mmc_data *data)
257 {
258 #endif
259 struct sdhci_host *host = mmc->priv;
260 unsigned int stat = 0;
261 int ret = 0;
262 int trans_bytes = 0, is_aligned = 1;
263 u32 mask, flags, mode;
264 unsigned int time = 0;
265 int mmc_dev = mmc_get_blk_desc(mmc)->devnum;
266 ulong start = get_timer(0);
267
268 host->start_addr = 0;
269 /* Timeout unit - ms */
270 static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT;
271
272 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
273
274 /* We shouldn't wait for data inihibit for stop commands, even
275 though they might use busy signaling */
276 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION ||
277 ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
278 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data))
279 mask &= ~SDHCI_DATA_INHIBIT;
280
281 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
282 if (time >= cmd_timeout) {
283 printf("%s: MMC: %d busy ", __func__, mmc_dev);
284 if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) {
285 cmd_timeout += cmd_timeout;
286 printf("timeout increasing to: %u ms.\n",
287 cmd_timeout);
288 } else {
289 puts("timeout.\n");
290 return -ECOMM;
291 }
292 }
293 time++;
294 udelay(1000);
295 }
296
297 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
298
299 mask = SDHCI_INT_RESPONSE;
300 if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
301 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)
302 mask = SDHCI_INT_DATA_AVAIL;
303
304 if (!(cmd->resp_type & MMC_RSP_PRESENT))
305 flags = SDHCI_CMD_RESP_NONE;
306 else if (cmd->resp_type & MMC_RSP_136)
307 flags = SDHCI_CMD_RESP_LONG;
308 else if (cmd->resp_type & MMC_RSP_BUSY) {
309 flags = SDHCI_CMD_RESP_SHORT_BUSY;
310 if (data)
311 mask |= SDHCI_INT_DATA_END;
312 } else
313 flags = SDHCI_CMD_RESP_SHORT;
314
315 if (cmd->resp_type & MMC_RSP_CRC)
316 flags |= SDHCI_CMD_CRC;
317 if (cmd->resp_type & MMC_RSP_OPCODE)
318 flags |= SDHCI_CMD_INDEX;
319 if (data || cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
320 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
321 flags |= SDHCI_CMD_DATA;
322
323 /* Set Transfer mode regarding to data flag */
324 if (data) {
325 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
326 mode = SDHCI_TRNS_BLK_CNT_EN;
327 trans_bytes = data->blocks * data->blocksize;
328 if (data->blocks > 1)
329 mode |= SDHCI_TRNS_MULTI;
330
331 if (data->flags == MMC_DATA_READ)
332 mode |= SDHCI_TRNS_READ;
333
334 if (host->flags & USE_DMA) {
335 mode |= SDHCI_TRNS_DMA;
336 sdhci_prepare_dma(host, data, &is_aligned, trans_bytes);
337 }
338
339 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
340 data->blocksize),
341 SDHCI_BLOCK_SIZE);
342 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
343 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
344 } else if (cmd->resp_type & MMC_RSP_BUSY) {
345 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
346 }
347
348 sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT);
349 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND);
350 start = get_timer(0);
351 do {
352 stat = sdhci_readl(host, SDHCI_INT_STATUS);
353 if (stat & SDHCI_INT_ERROR)
354 break;
355
356 if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) {
357 if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) {
358 return 0;
359 } else {
360 printf("%s: Timeout for status update!\n",
361 __func__);
362 return -ETIMEDOUT;
363 }
364 }
365 } while ((stat & mask) != mask);
366
367 if ((stat & (SDHCI_INT_ERROR | mask)) == mask) {
368 sdhci_cmd_done(host, cmd);
369 sdhci_writel(host, mask, SDHCI_INT_STATUS);
370 } else
371 ret = -1;
372
373 if (!ret && data)
374 ret = sdhci_transfer_data(host, data);
375
376 if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD)
377 udelay(1000);
378
379 stat = sdhci_readl(host, SDHCI_INT_STATUS);
380 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
381 if (!ret) {
382 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
383 !is_aligned && (data->flags == MMC_DATA_READ))
384 memcpy(data->dest, aligned_buffer, trans_bytes);
385 return 0;
386 }
387
388 sdhci_reset(host, SDHCI_RESET_CMD);
389 sdhci_reset(host, SDHCI_RESET_DATA);
390 if (stat & SDHCI_INT_TIMEOUT)
391 return -ETIMEDOUT;
392 else
393 return -ECOMM;
394 }
395
396 #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING)
397 static int sdhci_execute_tuning(struct udevice *dev, uint opcode)
398 {
399 int err;
400 struct mmc *mmc = mmc_get_mmc_dev(dev);
401 struct sdhci_host *host = mmc->priv;
402
403 debug("%s\n", __func__);
404
405 if (host->ops && host->ops->platform_execute_tuning) {
406 err = host->ops->platform_execute_tuning(mmc, opcode);
407 if (err)
408 return err;
409 return 0;
410 }
411 return 0;
412 }
413 #endif
414 int sdhci_set_clock(struct mmc *mmc, unsigned int clock)
415 {
416 struct sdhci_host *host = mmc->priv;
417 unsigned int div, clk = 0, timeout;
418
419 /* Wait max 20 ms */
420 timeout = 200;
421 while (sdhci_readl(host, SDHCI_PRESENT_STATE) &
422 (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) {
423 if (timeout == 0) {
424 printf("%s: Timeout to wait cmd & data inhibit\n",
425 __func__);
426 return -EBUSY;
427 }
428
429 timeout--;
430 udelay(100);
431 }
432
433 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
434
435 if (clock == 0)
436 return 0;
437
438 if (host->ops && host->ops->set_delay)
439 host->ops->set_delay(host);
440
441 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
442 /*
443 * Check if the Host Controller supports Programmable Clock
444 * Mode.
445 */
446 if (host->clk_mul) {
447 for (div = 1; div <= 1024; div++) {
448 if ((host->max_clk / div) <= clock)
449 break;
450 }
451
452 /*
453 * Set Programmable Clock Mode in the Clock
454 * Control register.
455 */
456 clk = SDHCI_PROG_CLOCK_MODE;
457 div--;
458 } else {
459 /* Version 3.00 divisors must be a multiple of 2. */
460 if (host->max_clk <= clock) {
461 div = 1;
462 } else {
463 for (div = 2;
464 div < SDHCI_MAX_DIV_SPEC_300;
465 div += 2) {
466 if ((host->max_clk / div) <= clock)
467 break;
468 }
469 }
470 div >>= 1;
471 }
472 } else {
473 /* Version 2.00 divisors must be a power of 2. */
474 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
475 if ((host->max_clk / div) <= clock)
476 break;
477 }
478 div >>= 1;
479 }
480
481 if (host->ops && host->ops->set_clock)
482 host->ops->set_clock(host, div);
483
484 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
485 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
486 << SDHCI_DIVIDER_HI_SHIFT;
487 clk |= SDHCI_CLOCK_INT_EN;
488 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
489
490 /* Wait max 20 ms */
491 timeout = 20;
492 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
493 & SDHCI_CLOCK_INT_STABLE)) {
494 if (timeout == 0) {
495 printf("%s: Internal clock never stabilised.\n",
496 __func__);
497 return -EBUSY;
498 }
499 timeout--;
500 udelay(1000);
501 }
502
503 clk |= SDHCI_CLOCK_CARD_EN;
504 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
505 return 0;
506 }
507
508 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
509 {
510 u8 pwr = 0;
511
512 if (power != (unsigned short)-1) {
513 switch (1 << power) {
514 case MMC_VDD_165_195:
515 pwr = SDHCI_POWER_180;
516 break;
517 case MMC_VDD_29_30:
518 case MMC_VDD_30_31:
519 pwr = SDHCI_POWER_300;
520 break;
521 case MMC_VDD_32_33:
522 case MMC_VDD_33_34:
523 pwr = SDHCI_POWER_330;
524 break;
525 }
526 }
527
528 if (pwr == 0) {
529 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
530 return;
531 }
532
533 pwr |= SDHCI_POWER_ON;
534
535 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
536 }
537
538 void sdhci_set_uhs_timing(struct sdhci_host *host)
539 {
540 struct mmc *mmc = (struct mmc *)host->mmc;
541 u32 reg;
542
543 reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
544 reg &= ~SDHCI_CTRL_UHS_MASK;
545
546 switch (mmc->selected_mode) {
547 case UHS_SDR50:
548 case MMC_HS_52:
549 reg |= SDHCI_CTRL_UHS_SDR50;
550 break;
551 case UHS_DDR50:
552 case MMC_DDR_52:
553 reg |= SDHCI_CTRL_UHS_DDR50;
554 break;
555 case UHS_SDR104:
556 case MMC_HS_200:
557 reg |= SDHCI_CTRL_UHS_SDR104;
558 break;
559 default:
560 reg |= SDHCI_CTRL_UHS_SDR12;
561 }
562
563 sdhci_writew(host, reg, SDHCI_HOST_CONTROL2);
564 }
565
566 #ifdef CONFIG_DM_MMC
567 static int sdhci_set_ios(struct udevice *dev)
568 {
569 struct mmc *mmc = mmc_get_mmc_dev(dev);
570 #else
571 static int sdhci_set_ios(struct mmc *mmc)
572 {
573 #endif
574 u32 ctrl;
575 struct sdhci_host *host = mmc->priv;
576
577 if (host->ops && host->ops->set_control_reg)
578 host->ops->set_control_reg(host);
579
580 if (mmc->clock != host->clock)
581 sdhci_set_clock(mmc, mmc->clock);
582
583 if (mmc->clk_disable)
584 sdhci_set_clock(mmc, 0);
585
586 /* Set bus width */
587 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
588 if (mmc->bus_width == 8) {
589 ctrl &= ~SDHCI_CTRL_4BITBUS;
590 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
591 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
592 ctrl |= SDHCI_CTRL_8BITBUS;
593 } else {
594 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
595 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
596 ctrl &= ~SDHCI_CTRL_8BITBUS;
597 if (mmc->bus_width == 4)
598 ctrl |= SDHCI_CTRL_4BITBUS;
599 else
600 ctrl &= ~SDHCI_CTRL_4BITBUS;
601 }
602
603 if (mmc->clock > 26000000)
604 ctrl |= SDHCI_CTRL_HISPD;
605 else
606 ctrl &= ~SDHCI_CTRL_HISPD;
607
608 if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) ||
609 (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE))
610 ctrl &= ~SDHCI_CTRL_HISPD;
611
612 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
613
614 /* If available, call the driver specific "post" set_ios() function */
615 if (host->ops && host->ops->set_ios_post)
616 return host->ops->set_ios_post(host);
617
618 return 0;
619 }
620
621 static int sdhci_init(struct mmc *mmc)
622 {
623 struct sdhci_host *host = mmc->priv;
624 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_GPIO)
625 struct udevice *dev = mmc->dev;
626
627 gpio_request_by_name(dev, "cd-gpios", 0,
628 &host->cd_gpio, GPIOD_IS_IN);
629 #endif
630
631 sdhci_reset(host, SDHCI_RESET_ALL);
632
633 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && !aligned_buffer) {
634 aligned_buffer = memalign(8, 512*1024);
635 if (!aligned_buffer) {
636 printf("%s: Aligned buffer alloc failed!!!\n",
637 __func__);
638 return -ENOMEM;
639 }
640 }
641
642 sdhci_set_power(host, fls(mmc->cfg->voltages) - 1);
643
644 if (host->ops && host->ops->get_cd)
645 host->ops->get_cd(host);
646
647 /* Enable only interrupts served by the SD controller */
648 sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK,
649 SDHCI_INT_ENABLE);
650 /* Mask all sdhci interrupt sources */
651 sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE);
652
653 return 0;
654 }
655
656 #ifdef CONFIG_DM_MMC
657 int sdhci_probe(struct udevice *dev)
658 {
659 struct mmc *mmc = mmc_get_mmc_dev(dev);
660
661 return sdhci_init(mmc);
662 }
663
664 static int sdhci_get_cd(struct udevice *dev)
665 {
666 struct mmc *mmc = mmc_get_mmc_dev(dev);
667 struct sdhci_host *host = mmc->priv;
668 int value;
669
670 /* If nonremovable, assume that the card is always present. */
671 if (mmc->cfg->host_caps & MMC_CAP_NONREMOVABLE)
672 return 1;
673 /* If polling, assume that the card is always present. */
674 if (mmc->cfg->host_caps & MMC_CAP_NEEDS_POLL)
675 return 1;
676
677 #if CONFIG_IS_ENABLED(DM_GPIO)
678 value = dm_gpio_get_value(&host->cd_gpio);
679 if (value >= 0) {
680 if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
681 return !value;
682 else
683 return value;
684 }
685 #endif
686 value = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
687 SDHCI_CARD_PRESENT);
688 if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
689 return !value;
690 else
691 return value;
692 }
693
694 const struct dm_mmc_ops sdhci_ops = {
695 .send_cmd = sdhci_send_command,
696 .set_ios = sdhci_set_ios,
697 .get_cd = sdhci_get_cd,
698 #ifdef MMC_SUPPORTS_TUNING
699 .execute_tuning = sdhci_execute_tuning,
700 #endif
701 };
702 #else
703 static const struct mmc_ops sdhci_ops = {
704 .send_cmd = sdhci_send_command,
705 .set_ios = sdhci_set_ios,
706 .init = sdhci_init,
707 };
708 #endif
709
710 int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host,
711 u32 f_max, u32 f_min)
712 {
713 u32 caps, caps_1 = 0;
714 #if CONFIG_IS_ENABLED(DM_MMC)
715 u64 dt_caps, dt_caps_mask;
716
717 dt_caps_mask = dev_read_u64_default(host->mmc->dev,
718 "sdhci-caps-mask", 0);
719 dt_caps = dev_read_u64_default(host->mmc->dev,
720 "sdhci-caps", 0);
721 caps = ~(u32)dt_caps_mask &
722 sdhci_readl(host, SDHCI_CAPABILITIES);
723 caps |= (u32)dt_caps;
724 #else
725 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
726 #endif
727 debug("%s, caps: 0x%x\n", __func__, caps);
728
729 #ifdef CONFIG_MMC_SDHCI_SDMA
730 if (!(caps & SDHCI_CAN_DO_SDMA)) {
731 printf("%s: Your controller doesn't support SDMA!!\n",
732 __func__);
733 return -EINVAL;
734 }
735
736 host->flags |= USE_SDMA;
737 #endif
738 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
739 if (!(caps & SDHCI_CAN_DO_ADMA2)) {
740 printf("%s: Your controller doesn't support SDMA!!\n",
741 __func__);
742 return -EINVAL;
743 }
744 host->adma_desc_table = (struct sdhci_adma_desc *)
745 memalign(ARCH_DMA_MINALIGN, ADMA_TABLE_SZ);
746
747 host->adma_addr = (dma_addr_t)host->adma_desc_table;
748 #ifdef CONFIG_DMA_ADDR_T_64BIT
749 host->flags |= USE_ADMA64;
750 #else
751 host->flags |= USE_ADMA;
752 #endif
753 #endif
754 if (host->quirks & SDHCI_QUIRK_REG32_RW)
755 host->version =
756 sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16;
757 else
758 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
759
760 cfg->name = host->name;
761 #ifndef CONFIG_DM_MMC
762 cfg->ops = &sdhci_ops;
763 #endif
764
765 /* Check whether the clock multiplier is supported or not */
766 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
767 #if CONFIG_IS_ENABLED(DM_MMC)
768 caps_1 = ~(u32)(dt_caps_mask >> 32) &
769 sdhci_readl(host, SDHCI_CAPABILITIES_1);
770 caps_1 |= (u32)(dt_caps >> 32);
771 #else
772 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
773 #endif
774 debug("%s, caps_1: 0x%x\n", __func__, caps_1);
775 host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >>
776 SDHCI_CLOCK_MUL_SHIFT;
777 }
778
779 if (host->max_clk == 0) {
780 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
781 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
782 SDHCI_CLOCK_BASE_SHIFT;
783 else
784 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >>
785 SDHCI_CLOCK_BASE_SHIFT;
786 host->max_clk *= 1000000;
787 if (host->clk_mul)
788 host->max_clk *= host->clk_mul;
789 }
790 if (host->max_clk == 0) {
791 printf("%s: Hardware doesn't specify base clock frequency\n",
792 __func__);
793 return -EINVAL;
794 }
795 if (f_max && (f_max < host->max_clk))
796 cfg->f_max = f_max;
797 else
798 cfg->f_max = host->max_clk;
799 if (f_min)
800 cfg->f_min = f_min;
801 else {
802 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
803 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300;
804 else
805 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200;
806 }
807 cfg->voltages = 0;
808 if (caps & SDHCI_CAN_VDD_330)
809 cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
810 if (caps & SDHCI_CAN_VDD_300)
811 cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
812 if (caps & SDHCI_CAN_VDD_180)
813 cfg->voltages |= MMC_VDD_165_195;
814
815 if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE)
816 cfg->voltages |= host->voltages;
817
818 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz | MMC_MODE_4BIT;
819
820 /* Since Host Controller Version3.0 */
821 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
822 if (!(caps & SDHCI_CAN_DO_8BIT))
823 cfg->host_caps &= ~MMC_MODE_8BIT;
824 }
825
826 if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) {
827 cfg->host_caps &= ~MMC_MODE_HS;
828 cfg->host_caps &= ~MMC_MODE_HS_52MHz;
829 }
830
831 if (!(cfg->voltages & MMC_VDD_165_195) ||
832 (host->quirks & SDHCI_QUIRK_NO_1_8_V))
833 caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
834 SDHCI_SUPPORT_DDR50);
835
836 if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
837 SDHCI_SUPPORT_DDR50))
838 cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25);
839
840 if (caps_1 & SDHCI_SUPPORT_SDR104) {
841 cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50);
842 /*
843 * SD3.0: SDR104 is supported so (for eMMC) the caps2
844 * field can be promoted to support HS200.
845 */
846 cfg->host_caps |= MMC_CAP(MMC_HS_200);
847 } else if (caps_1 & SDHCI_SUPPORT_SDR50) {
848 cfg->host_caps |= MMC_CAP(UHS_SDR50);
849 }
850
851 if (caps_1 & SDHCI_SUPPORT_DDR50)
852 cfg->host_caps |= MMC_CAP(UHS_DDR50);
853
854 if (host->host_caps)
855 cfg->host_caps |= host->host_caps;
856
857 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
858
859 return 0;
860 }
861
862 #ifdef CONFIG_BLK
863 int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
864 {
865 return mmc_bind(dev, mmc, cfg);
866 }
867 #else
868 int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min)
869 {
870 int ret;
871
872 ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min);
873 if (ret)
874 return ret;
875
876 host->mmc = mmc_create(&host->cfg, host);
877 if (host->mmc == NULL) {
878 printf("%s: mmc create fail!\n", __func__);
879 return -ENOMEM;
880 }
881
882 return 0;
883 }
884 #endif
885