1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2008, Freescale Semiconductor, Inc
4 * Andy Fleming
5 *
6 * Based vaguely on the Linux code
7 */
8
9 #include <config.h>
10 #include <common.h>
11 #include <command.h>
12 #include <dm.h>
13 #include <dm/device-internal.h>
14 #include <errno.h>
15 #include <mmc.h>
16 #include <part.h>
17 #include <power/regulator.h>
18 #include <malloc.h>
19 #include <memalign.h>
20 #include <linux/list.h>
21 #include <div64.h>
22 #include "mmc_private.h"
23
24 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
25 static int mmc_power_cycle(struct mmc *mmc);
26 #if !CONFIG_IS_ENABLED(MMC_TINY)
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
28 #endif
29
30 #if !CONFIG_IS_ENABLED(DM_MMC)
31
32 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
mmc_wait_dat0(struct mmc * mmc,int state,int timeout)33 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
34 {
35 return -ENOSYS;
36 }
37 #endif
38
board_mmc_getwp(struct mmc * mmc)39 __weak int board_mmc_getwp(struct mmc *mmc)
40 {
41 return -1;
42 }
43
mmc_getwp(struct mmc * mmc)44 int mmc_getwp(struct mmc *mmc)
45 {
46 int wp;
47
48 wp = board_mmc_getwp(mmc);
49
50 if (wp < 0) {
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
53 else
54 wp = 0;
55 }
56
57 return wp;
58 }
59
board_mmc_getcd(struct mmc * mmc)60 __weak int board_mmc_getcd(struct mmc *mmc)
61 {
62 return -1;
63 }
64 #endif
65
66 #ifdef CONFIG_MMC_TRACE
mmmc_trace_before_send(struct mmc * mmc,struct mmc_cmd * cmd)67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
68 {
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
71 }
72
mmmc_trace_after_send(struct mmc * mmc,struct mmc_cmd * cmd,int ret)73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
74 {
75 int i;
76 u8 *ptr;
77
78 if (ret) {
79 printf("\t\tRET\t\t\t %d\n", ret);
80 } else {
81 switch (cmd->resp_type) {
82 case MMC_RSP_NONE:
83 printf("\t\tMMC_RSP_NONE\n");
84 break;
85 case MMC_RSP_R1:
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
87 cmd->response[0]);
88 break;
89 case MMC_RSP_R1b:
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
91 cmd->response[0]);
92 break;
93 case MMC_RSP_R2:
94 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
95 cmd->response[0]);
96 printf("\t\t \t\t 0x%08X \n",
97 cmd->response[1]);
98 printf("\t\t \t\t 0x%08X \n",
99 cmd->response[2]);
100 printf("\t\t \t\t 0x%08X \n",
101 cmd->response[3]);
102 printf("\n");
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
105 int j;
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
108 ptr += 3;
109 for (j = 0; j < 4; j++)
110 printf("%02X ", *ptr--);
111 printf("\n");
112 }
113 break;
114 case MMC_RSP_R3:
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
116 cmd->response[0]);
117 break;
118 default:
119 printf("\t\tERROR MMC rsp not supported\n");
120 break;
121 }
122 }
123 }
124
mmc_trace_state(struct mmc * mmc,struct mmc_cmd * cmd)125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
126 {
127 int status;
128
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
131 }
132 #endif
133
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
mmc_mode_name(enum bus_mode mode)135 const char *mmc_mode_name(enum bus_mode mode)
136 {
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [SD_LEGACY] = "SD Legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 };
151
152 if (mode >= MMC_MODES_END)
153 return "Unknown mode";
154 else
155 return names[mode];
156 }
157 #endif
158
mmc_mode2freq(struct mmc * mmc,enum bus_mode mode)159 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
160 {
161 static const int freqs[] = {
162 [MMC_LEGACY] = 25000000,
163 [SD_LEGACY] = 25000000,
164 [MMC_HS] = 26000000,
165 [SD_HS] = 50000000,
166 [MMC_HS_52] = 52000000,
167 [MMC_DDR_52] = 52000000,
168 [UHS_SDR12] = 25000000,
169 [UHS_SDR25] = 50000000,
170 [UHS_SDR50] = 100000000,
171 [UHS_DDR50] = 50000000,
172 [UHS_SDR104] = 208000000,
173 [MMC_HS_200] = 200000000,
174 };
175
176 if (mode == MMC_LEGACY)
177 return mmc->legacy_speed;
178 else if (mode >= MMC_MODES_END)
179 return 0;
180 else
181 return freqs[mode];
182 }
183
mmc_select_mode(struct mmc * mmc,enum bus_mode mode)184 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
185 {
186 mmc->selected_mode = mode;
187 mmc->tran_speed = mmc_mode2freq(mmc, mode);
188 mmc->ddr_mode = mmc_is_mode_ddr(mode);
189 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
190 mmc->tran_speed / 1000000);
191 return 0;
192 }
193
194 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_send_cmd(struct mmc * mmc,struct mmc_cmd * cmd,struct mmc_data * data)195 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
196 {
197 int ret;
198
199 mmmc_trace_before_send(mmc, cmd);
200 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
201 mmmc_trace_after_send(mmc, cmd, ret);
202
203 return ret;
204 }
205 #endif
206
mmc_send_status(struct mmc * mmc,int timeout)207 int mmc_send_status(struct mmc *mmc, int timeout)
208 {
209 struct mmc_cmd cmd;
210 int err, retries = 5;
211
212 cmd.cmdidx = MMC_CMD_SEND_STATUS;
213 cmd.resp_type = MMC_RSP_R1;
214 if (!mmc_host_is_spi(mmc))
215 cmd.cmdarg = mmc->rca << 16;
216
217 while (1) {
218 err = mmc_send_cmd(mmc, &cmd, NULL);
219 if (!err) {
220 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
221 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
222 MMC_STATE_PRG)
223 break;
224
225 if (cmd.response[0] & MMC_STATUS_MASK) {
226 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
227 pr_err("Status Error: 0x%08X\n",
228 cmd.response[0]);
229 #endif
230 return -ECOMM;
231 }
232 } else if (--retries < 0)
233 return err;
234
235 if (timeout-- <= 0)
236 break;
237
238 udelay(1000);
239 }
240
241 mmc_trace_state(mmc, &cmd);
242 if (timeout <= 0) {
243 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
244 pr_err("Timeout waiting card ready\n");
245 #endif
246 return -ETIMEDOUT;
247 }
248
249 return 0;
250 }
251
mmc_set_blocklen(struct mmc * mmc,int len)252 int mmc_set_blocklen(struct mmc *mmc, int len)
253 {
254 struct mmc_cmd cmd;
255 int err;
256
257 if (mmc->ddr_mode)
258 return 0;
259
260 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
261 cmd.resp_type = MMC_RSP_R1;
262 cmd.cmdarg = len;
263
264 err = mmc_send_cmd(mmc, &cmd, NULL);
265
266 #ifdef CONFIG_MMC_QUIRKS
267 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
268 int retries = 4;
269 /*
270 * It has been seen that SET_BLOCKLEN may fail on the first
271 * attempt, let's try a few more time
272 */
273 do {
274 err = mmc_send_cmd(mmc, &cmd, NULL);
275 if (!err)
276 break;
277 } while (retries--);
278 }
279 #endif
280
281 return err;
282 }
283
284 #ifdef MMC_SUPPORTS_TUNING
285 static const u8 tuning_blk_pattern_4bit[] = {
286 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
287 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
288 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
289 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
290 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
291 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
292 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
293 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
294 };
295
296 static const u8 tuning_blk_pattern_8bit[] = {
297 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
298 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
299 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
300 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
301 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
302 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
303 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
304 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
305 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
306 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
307 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
308 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
309 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
310 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
311 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
312 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
313 };
314
mmc_send_tuning(struct mmc * mmc,u32 opcode,int * cmd_error)315 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
316 {
317 struct mmc_cmd cmd;
318 struct mmc_data data;
319 const u8 *tuning_block_pattern;
320 int size, err;
321
322 if (mmc->bus_width == 8) {
323 tuning_block_pattern = tuning_blk_pattern_8bit;
324 size = sizeof(tuning_blk_pattern_8bit);
325 } else if (mmc->bus_width == 4) {
326 tuning_block_pattern = tuning_blk_pattern_4bit;
327 size = sizeof(tuning_blk_pattern_4bit);
328 } else {
329 return -EINVAL;
330 }
331
332 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
333
334 cmd.cmdidx = opcode;
335 cmd.cmdarg = 0;
336 cmd.resp_type = MMC_RSP_R1;
337
338 data.dest = (void *)data_buf;
339 data.blocks = 1;
340 data.blocksize = size;
341 data.flags = MMC_DATA_READ;
342
343 err = mmc_send_cmd(mmc, &cmd, &data);
344 if (err)
345 return err;
346
347 if (memcmp(data_buf, tuning_block_pattern, size))
348 return -EIO;
349
350 return 0;
351 }
352 #endif
353
mmc_read_blocks(struct mmc * mmc,void * dst,lbaint_t start,lbaint_t blkcnt)354 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
355 lbaint_t blkcnt)
356 {
357 struct mmc_cmd cmd;
358 struct mmc_data data;
359
360 if (blkcnt > 1)
361 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
362 else
363 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
364
365 if (mmc->high_capacity)
366 cmd.cmdarg = start;
367 else
368 cmd.cmdarg = start * mmc->read_bl_len;
369
370 cmd.resp_type = MMC_RSP_R1;
371
372 data.dest = dst;
373 data.blocks = blkcnt;
374 data.blocksize = mmc->read_bl_len;
375 data.flags = MMC_DATA_READ;
376
377 if (mmc_send_cmd(mmc, &cmd, &data))
378 return 0;
379
380 if (blkcnt > 1) {
381 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
382 cmd.cmdarg = 0;
383 cmd.resp_type = MMC_RSP_R1b;
384 if (mmc_send_cmd(mmc, &cmd, NULL)) {
385 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
386 pr_err("mmc fail to send stop cmd\n");
387 #endif
388 return 0;
389 }
390 }
391
392 return blkcnt;
393 }
394
395 #if CONFIG_IS_ENABLED(BLK)
mmc_bread(struct udevice * dev,lbaint_t start,lbaint_t blkcnt,void * dst)396 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
397 #else
398 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
399 void *dst)
400 #endif
401 {
402 #if CONFIG_IS_ENABLED(BLK)
403 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
404 #endif
405 int dev_num = block_dev->devnum;
406 int err;
407 lbaint_t cur, blocks_todo = blkcnt;
408
409 if (blkcnt == 0)
410 return 0;
411
412 struct mmc *mmc = find_mmc_device(dev_num);
413 if (!mmc)
414 return 0;
415
416 if (CONFIG_IS_ENABLED(MMC_TINY))
417 err = mmc_switch_part(mmc, block_dev->hwpart);
418 else
419 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
420
421 if (err < 0)
422 return 0;
423
424 if ((start + blkcnt) > block_dev->lba) {
425 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
426 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
427 start + blkcnt, block_dev->lba);
428 #endif
429 return 0;
430 }
431
432 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
433 pr_debug("%s: Failed to set blocklen\n", __func__);
434 return 0;
435 }
436
437 do {
438 cur = (blocks_todo > mmc->cfg->b_max) ?
439 mmc->cfg->b_max : blocks_todo;
440 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
441 pr_debug("%s: Failed to read blocks\n", __func__);
442 return 0;
443 }
444 blocks_todo -= cur;
445 start += cur;
446 dst += cur * mmc->read_bl_len;
447 } while (blocks_todo > 0);
448
449 return blkcnt;
450 }
451
mmc_go_idle(struct mmc * mmc)452 static int mmc_go_idle(struct mmc *mmc)
453 {
454 struct mmc_cmd cmd;
455 int err;
456
457 udelay(1000);
458
459 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
460 cmd.cmdarg = 0;
461 cmd.resp_type = MMC_RSP_NONE;
462
463 err = mmc_send_cmd(mmc, &cmd, NULL);
464
465 if (err)
466 return err;
467
468 udelay(2000);
469
470 return 0;
471 }
472
473 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
mmc_switch_voltage(struct mmc * mmc,int signal_voltage)474 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
475 {
476 struct mmc_cmd cmd;
477 int err = 0;
478
479 /*
480 * Send CMD11 only if the request is to switch the card to
481 * 1.8V signalling.
482 */
483 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
484 return mmc_set_signal_voltage(mmc, signal_voltage);
485
486 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
487 cmd.cmdarg = 0;
488 cmd.resp_type = MMC_RSP_R1;
489
490 err = mmc_send_cmd(mmc, &cmd, NULL);
491 if (err)
492 return err;
493
494 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
495 return -EIO;
496
497 /*
498 * The card should drive cmd and dat[0:3] low immediately
499 * after the response of cmd11, but wait 100 us to be sure
500 */
501 err = mmc_wait_dat0(mmc, 0, 100);
502 if (err == -ENOSYS)
503 udelay(100);
504 else if (err)
505 return -ETIMEDOUT;
506
507 /*
508 * During a signal voltage level switch, the clock must be gated
509 * for 5 ms according to the SD spec
510 */
511 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
512
513 err = mmc_set_signal_voltage(mmc, signal_voltage);
514 if (err)
515 return err;
516
517 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
518 mdelay(10);
519 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
520
521 /*
522 * Failure to switch is indicated by the card holding
523 * dat[0:3] low. Wait for at least 1 ms according to spec
524 */
525 err = mmc_wait_dat0(mmc, 1, 1000);
526 if (err == -ENOSYS)
527 udelay(1000);
528 else if (err)
529 return -ETIMEDOUT;
530
531 return 0;
532 }
533 #endif
534
sd_send_op_cond(struct mmc * mmc,bool uhs_en)535 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
536 {
537 int timeout = 1000;
538 int err;
539 struct mmc_cmd cmd;
540
541 while (1) {
542 cmd.cmdidx = MMC_CMD_APP_CMD;
543 cmd.resp_type = MMC_RSP_R1;
544 cmd.cmdarg = 0;
545
546 err = mmc_send_cmd(mmc, &cmd, NULL);
547
548 if (err)
549 return err;
550
551 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
552 cmd.resp_type = MMC_RSP_R3;
553
554 /*
555 * Most cards do not answer if some reserved bits
556 * in the ocr are set. However, Some controller
557 * can set bit 7 (reserved for low voltages), but
558 * how to manage low voltages SD card is not yet
559 * specified.
560 */
561 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
562 (mmc->cfg->voltages & 0xff8000);
563
564 if (mmc->version == SD_VERSION_2)
565 cmd.cmdarg |= OCR_HCS;
566
567 if (uhs_en)
568 cmd.cmdarg |= OCR_S18R;
569
570 err = mmc_send_cmd(mmc, &cmd, NULL);
571
572 if (err)
573 return err;
574
575 if (cmd.response[0] & OCR_BUSY)
576 break;
577
578 if (timeout-- <= 0)
579 return -EOPNOTSUPP;
580
581 udelay(1000);
582 }
583
584 if (mmc->version != SD_VERSION_2)
585 mmc->version = SD_VERSION_1_0;
586
587 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
588 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
589 cmd.resp_type = MMC_RSP_R3;
590 cmd.cmdarg = 0;
591
592 err = mmc_send_cmd(mmc, &cmd, NULL);
593
594 if (err)
595 return err;
596 }
597
598 mmc->ocr = cmd.response[0];
599
600 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
601 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
602 == 0x41000000) {
603 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
604 if (err)
605 return err;
606 }
607 #endif
608
609 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
610 mmc->rca = 0;
611
612 return 0;
613 }
614
mmc_send_op_cond_iter(struct mmc * mmc,int use_arg)615 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
616 {
617 struct mmc_cmd cmd;
618 int err;
619
620 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
621 cmd.resp_type = MMC_RSP_R3;
622 cmd.cmdarg = 0;
623 if (use_arg && !mmc_host_is_spi(mmc))
624 cmd.cmdarg = OCR_HCS |
625 (mmc->cfg->voltages &
626 (mmc->ocr & OCR_VOLTAGE_MASK)) |
627 (mmc->ocr & OCR_ACCESS_MODE);
628
629 err = mmc_send_cmd(mmc, &cmd, NULL);
630 if (err)
631 return err;
632 mmc->ocr = cmd.response[0];
633 return 0;
634 }
635
mmc_send_op_cond(struct mmc * mmc)636 static int mmc_send_op_cond(struct mmc *mmc)
637 {
638 int err, i;
639
640 /* Some cards seem to need this */
641 mmc_go_idle(mmc);
642
643 /* Asking to the card its capabilities */
644 for (i = 0; i < 2; i++) {
645 err = mmc_send_op_cond_iter(mmc, i != 0);
646 if (err)
647 return err;
648
649 /* exit if not busy (flag seems to be inverted) */
650 if (mmc->ocr & OCR_BUSY)
651 break;
652 }
653 mmc->op_cond_pending = 1;
654 return 0;
655 }
656
mmc_complete_op_cond(struct mmc * mmc)657 static int mmc_complete_op_cond(struct mmc *mmc)
658 {
659 struct mmc_cmd cmd;
660 int timeout = 1000;
661 ulong start;
662 int err;
663
664 mmc->op_cond_pending = 0;
665 if (!(mmc->ocr & OCR_BUSY)) {
666 /* Some cards seem to need this */
667 mmc_go_idle(mmc);
668
669 start = get_timer(0);
670 while (1) {
671 err = mmc_send_op_cond_iter(mmc, 1);
672 if (err)
673 return err;
674 if (mmc->ocr & OCR_BUSY)
675 break;
676 if (get_timer(start) > timeout)
677 return -EOPNOTSUPP;
678 udelay(100);
679 }
680 }
681
682 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
683 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
684 cmd.resp_type = MMC_RSP_R3;
685 cmd.cmdarg = 0;
686
687 err = mmc_send_cmd(mmc, &cmd, NULL);
688
689 if (err)
690 return err;
691
692 mmc->ocr = cmd.response[0];
693 }
694
695 mmc->version = MMC_VERSION_UNKNOWN;
696
697 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
698 mmc->rca = 1;
699
700 return 0;
701 }
702
703
mmc_send_ext_csd(struct mmc * mmc,u8 * ext_csd)704 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
705 {
706 struct mmc_cmd cmd;
707 struct mmc_data data;
708 int err;
709
710 /* Get the Card Status Register */
711 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
712 cmd.resp_type = MMC_RSP_R1;
713 cmd.cmdarg = 0;
714
715 data.dest = (char *)ext_csd;
716 data.blocks = 1;
717 data.blocksize = MMC_MAX_BLOCK_LEN;
718 data.flags = MMC_DATA_READ;
719
720 err = mmc_send_cmd(mmc, &cmd, &data);
721
722 return err;
723 }
724
mmc_switch(struct mmc * mmc,u8 set,u8 index,u8 value)725 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
726 {
727 struct mmc_cmd cmd;
728 int timeout = 1000;
729 int retries = 3;
730 int ret;
731
732 cmd.cmdidx = MMC_CMD_SWITCH;
733 cmd.resp_type = MMC_RSP_R1b;
734 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
735 (index << 16) |
736 (value << 8);
737
738 while (retries > 0) {
739 ret = mmc_send_cmd(mmc, &cmd, NULL);
740
741 /* Waiting for the ready status */
742 if (!ret) {
743 ret = mmc_send_status(mmc, timeout);
744 return ret;
745 }
746
747 retries--;
748 }
749
750 return ret;
751
752 }
753
754 #if !CONFIG_IS_ENABLED(MMC_TINY)
mmc_set_card_speed(struct mmc * mmc,enum bus_mode mode)755 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
756 {
757 int err;
758 int speed_bits;
759
760 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
761
762 switch (mode) {
763 case MMC_HS:
764 case MMC_HS_52:
765 case MMC_DDR_52:
766 speed_bits = EXT_CSD_TIMING_HS;
767 break;
768 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
769 case MMC_HS_200:
770 speed_bits = EXT_CSD_TIMING_HS200;
771 break;
772 #endif
773 case MMC_LEGACY:
774 speed_bits = EXT_CSD_TIMING_LEGACY;
775 break;
776 default:
777 return -EINVAL;
778 }
779 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
780 speed_bits);
781 if (err)
782 return err;
783
784 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
785 /* Now check to see that it worked */
786 err = mmc_send_ext_csd(mmc, test_csd);
787 if (err)
788 return err;
789
790 /* No high-speed support */
791 if (!test_csd[EXT_CSD_HS_TIMING])
792 return -ENOTSUPP;
793 }
794
795 return 0;
796 }
797
mmc_get_capabilities(struct mmc * mmc)798 static int mmc_get_capabilities(struct mmc *mmc)
799 {
800 u8 *ext_csd = mmc->ext_csd;
801 char cardtype;
802
803 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
804
805 if (mmc_host_is_spi(mmc))
806 return 0;
807
808 /* Only version 4 supports high-speed */
809 if (mmc->version < MMC_VERSION_4)
810 return 0;
811
812 if (!ext_csd) {
813 pr_err("No ext_csd found!\n"); /* this should enver happen */
814 return -ENOTSUPP;
815 }
816
817 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
818
819 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
820 mmc->cardtype = cardtype;
821
822 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
823 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
824 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
825 mmc->card_caps |= MMC_MODE_HS200;
826 }
827 #endif
828 if (cardtype & EXT_CSD_CARD_TYPE_52) {
829 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
830 mmc->card_caps |= MMC_MODE_DDR_52MHz;
831 mmc->card_caps |= MMC_MODE_HS_52MHz;
832 }
833 if (cardtype & EXT_CSD_CARD_TYPE_26)
834 mmc->card_caps |= MMC_MODE_HS;
835
836 return 0;
837 }
838 #endif
839
mmc_set_capacity(struct mmc * mmc,int part_num)840 static int mmc_set_capacity(struct mmc *mmc, int part_num)
841 {
842 switch (part_num) {
843 case 0:
844 mmc->capacity = mmc->capacity_user;
845 break;
846 case 1:
847 case 2:
848 mmc->capacity = mmc->capacity_boot;
849 break;
850 case 3:
851 mmc->capacity = mmc->capacity_rpmb;
852 break;
853 case 4:
854 case 5:
855 case 6:
856 case 7:
857 mmc->capacity = mmc->capacity_gp[part_num - 4];
858 break;
859 default:
860 return -1;
861 }
862
863 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
864
865 return 0;
866 }
867
868 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
mmc_boot_part_access_chk(struct mmc * mmc,unsigned int part_num)869 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
870 {
871 int forbidden = 0;
872 bool change = false;
873
874 if (part_num & PART_ACCESS_MASK)
875 forbidden = MMC_CAP(MMC_HS_200);
876
877 if (MMC_CAP(mmc->selected_mode) & forbidden) {
878 pr_debug("selected mode (%s) is forbidden for part %d\n",
879 mmc_mode_name(mmc->selected_mode), part_num);
880 change = true;
881 } else if (mmc->selected_mode != mmc->best_mode) {
882 pr_debug("selected mode is not optimal\n");
883 change = true;
884 }
885
886 if (change)
887 return mmc_select_mode_and_width(mmc,
888 mmc->card_caps & ~forbidden);
889
890 return 0;
891 }
892 #else
mmc_boot_part_access_chk(struct mmc * mmc,unsigned int part_num)893 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
894 unsigned int part_num)
895 {
896 return 0;
897 }
898 #endif
899
mmc_switch_part(struct mmc * mmc,unsigned int part_num)900 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
901 {
902 int ret;
903
904 ret = mmc_boot_part_access_chk(mmc, part_num);
905 if (ret)
906 return ret;
907
908 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
909 (mmc->part_config & ~PART_ACCESS_MASK)
910 | (part_num & PART_ACCESS_MASK));
911
912 /*
913 * Set the capacity if the switch succeeded or was intended
914 * to return to representing the raw device.
915 */
916 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
917 ret = mmc_set_capacity(mmc, part_num);
918 mmc_get_blk_desc(mmc)->hwpart = part_num;
919 }
920
921 return ret;
922 }
923
924 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
mmc_hwpart_config(struct mmc * mmc,const struct mmc_hwpart_conf * conf,enum mmc_hwpart_conf_mode mode)925 int mmc_hwpart_config(struct mmc *mmc,
926 const struct mmc_hwpart_conf *conf,
927 enum mmc_hwpart_conf_mode mode)
928 {
929 u8 part_attrs = 0;
930 u32 enh_size_mult;
931 u32 enh_start_addr;
932 u32 gp_size_mult[4];
933 u32 max_enh_size_mult;
934 u32 tot_enh_size_mult = 0;
935 u8 wr_rel_set;
936 int i, pidx, err;
937 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
938
939 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
940 return -EINVAL;
941
942 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
943 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
944 return -EMEDIUMTYPE;
945 }
946
947 if (!(mmc->part_support & PART_SUPPORT)) {
948 pr_err("Card does not support partitioning\n");
949 return -EMEDIUMTYPE;
950 }
951
952 if (!mmc->hc_wp_grp_size) {
953 pr_err("Card does not define HC WP group size\n");
954 return -EMEDIUMTYPE;
955 }
956
957 /* check partition alignment and total enhanced size */
958 if (conf->user.enh_size) {
959 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
960 conf->user.enh_start % mmc->hc_wp_grp_size) {
961 pr_err("User data enhanced area not HC WP group "
962 "size aligned\n");
963 return -EINVAL;
964 }
965 part_attrs |= EXT_CSD_ENH_USR;
966 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
967 if (mmc->high_capacity) {
968 enh_start_addr = conf->user.enh_start;
969 } else {
970 enh_start_addr = (conf->user.enh_start << 9);
971 }
972 } else {
973 enh_size_mult = 0;
974 enh_start_addr = 0;
975 }
976 tot_enh_size_mult += enh_size_mult;
977
978 for (pidx = 0; pidx < 4; pidx++) {
979 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
980 pr_err("GP%i partition not HC WP group size "
981 "aligned\n", pidx+1);
982 return -EINVAL;
983 }
984 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
985 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
986 part_attrs |= EXT_CSD_ENH_GP(pidx);
987 tot_enh_size_mult += gp_size_mult[pidx];
988 }
989 }
990
991 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
992 pr_err("Card does not support enhanced attribute\n");
993 return -EMEDIUMTYPE;
994 }
995
996 err = mmc_send_ext_csd(mmc, ext_csd);
997 if (err)
998 return err;
999
1000 max_enh_size_mult =
1001 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1002 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1003 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1004 if (tot_enh_size_mult > max_enh_size_mult) {
1005 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1006 tot_enh_size_mult, max_enh_size_mult);
1007 return -EMEDIUMTYPE;
1008 }
1009
1010 /* The default value of EXT_CSD_WR_REL_SET is device
1011 * dependent, the values can only be changed if the
1012 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1013 * changed only once and before partitioning is completed. */
1014 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1015 if (conf->user.wr_rel_change) {
1016 if (conf->user.wr_rel_set)
1017 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1018 else
1019 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1020 }
1021 for (pidx = 0; pidx < 4; pidx++) {
1022 if (conf->gp_part[pidx].wr_rel_change) {
1023 if (conf->gp_part[pidx].wr_rel_set)
1024 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1025 else
1026 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1027 }
1028 }
1029
1030 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1031 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1032 puts("Card does not support host controlled partition write "
1033 "reliability settings\n");
1034 return -EMEDIUMTYPE;
1035 }
1036
1037 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1038 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1039 pr_err("Card already partitioned\n");
1040 return -EPERM;
1041 }
1042
1043 if (mode == MMC_HWPART_CONF_CHECK)
1044 return 0;
1045
1046 /* Partitioning requires high-capacity size definitions */
1047 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1048 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1049 EXT_CSD_ERASE_GROUP_DEF, 1);
1050
1051 if (err)
1052 return err;
1053
1054 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1055
1056 /* update erase group size to be high-capacity */
1057 mmc->erase_grp_size =
1058 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1059
1060 }
1061
1062 /* all OK, write the configuration */
1063 for (i = 0; i < 4; i++) {
1064 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1065 EXT_CSD_ENH_START_ADDR+i,
1066 (enh_start_addr >> (i*8)) & 0xFF);
1067 if (err)
1068 return err;
1069 }
1070 for (i = 0; i < 3; i++) {
1071 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1072 EXT_CSD_ENH_SIZE_MULT+i,
1073 (enh_size_mult >> (i*8)) & 0xFF);
1074 if (err)
1075 return err;
1076 }
1077 for (pidx = 0; pidx < 4; pidx++) {
1078 for (i = 0; i < 3; i++) {
1079 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1080 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1081 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1082 if (err)
1083 return err;
1084 }
1085 }
1086 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1087 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1088 if (err)
1089 return err;
1090
1091 if (mode == MMC_HWPART_CONF_SET)
1092 return 0;
1093
1094 /* The WR_REL_SET is a write-once register but shall be
1095 * written before setting PART_SETTING_COMPLETED. As it is
1096 * write-once we can only write it when completing the
1097 * partitioning. */
1098 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1099 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1100 EXT_CSD_WR_REL_SET, wr_rel_set);
1101 if (err)
1102 return err;
1103 }
1104
1105 /* Setting PART_SETTING_COMPLETED confirms the partition
1106 * configuration but it only becomes effective after power
1107 * cycle, so we do not adjust the partition related settings
1108 * in the mmc struct. */
1109
1110 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1111 EXT_CSD_PARTITION_SETTING,
1112 EXT_CSD_PARTITION_SETTING_COMPLETED);
1113 if (err)
1114 return err;
1115
1116 return 0;
1117 }
1118 #endif
1119
1120 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_getcd(struct mmc * mmc)1121 int mmc_getcd(struct mmc *mmc)
1122 {
1123 int cd;
1124
1125 cd = board_mmc_getcd(mmc);
1126
1127 if (cd < 0) {
1128 if (mmc->cfg->ops->getcd)
1129 cd = mmc->cfg->ops->getcd(mmc);
1130 else
1131 cd = 1;
1132 }
1133
1134 return cd;
1135 }
1136 #endif
1137
1138 #if !CONFIG_IS_ENABLED(MMC_TINY)
sd_switch(struct mmc * mmc,int mode,int group,u8 value,u8 * resp)1139 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1140 {
1141 struct mmc_cmd cmd;
1142 struct mmc_data data;
1143
1144 /* Switch the frequency */
1145 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1146 cmd.resp_type = MMC_RSP_R1;
1147 cmd.cmdarg = (mode << 31) | 0xffffff;
1148 cmd.cmdarg &= ~(0xf << (group * 4));
1149 cmd.cmdarg |= value << (group * 4);
1150
1151 data.dest = (char *)resp;
1152 data.blocksize = 64;
1153 data.blocks = 1;
1154 data.flags = MMC_DATA_READ;
1155
1156 return mmc_send_cmd(mmc, &cmd, &data);
1157 }
1158
sd_get_capabilities(struct mmc * mmc)1159 static int sd_get_capabilities(struct mmc *mmc)
1160 {
1161 int err;
1162 struct mmc_cmd cmd;
1163 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1164 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1165 struct mmc_data data;
1166 int timeout;
1167 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1168 u32 sd3_bus_mode;
1169 #endif
1170
1171 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1172
1173 if (mmc_host_is_spi(mmc))
1174 return 0;
1175
1176 /* Read the SCR to find out if this card supports higher speeds */
1177 cmd.cmdidx = MMC_CMD_APP_CMD;
1178 cmd.resp_type = MMC_RSP_R1;
1179 cmd.cmdarg = mmc->rca << 16;
1180
1181 err = mmc_send_cmd(mmc, &cmd, NULL);
1182
1183 if (err)
1184 return err;
1185
1186 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1187 cmd.resp_type = MMC_RSP_R1;
1188 cmd.cmdarg = 0;
1189
1190 timeout = 3;
1191
1192 retry_scr:
1193 data.dest = (char *)scr;
1194 data.blocksize = 8;
1195 data.blocks = 1;
1196 data.flags = MMC_DATA_READ;
1197
1198 err = mmc_send_cmd(mmc, &cmd, &data);
1199
1200 if (err) {
1201 if (timeout--)
1202 goto retry_scr;
1203
1204 return err;
1205 }
1206
1207 mmc->scr[0] = __be32_to_cpu(scr[0]);
1208 mmc->scr[1] = __be32_to_cpu(scr[1]);
1209
1210 switch ((mmc->scr[0] >> 24) & 0xf) {
1211 case 0:
1212 mmc->version = SD_VERSION_1_0;
1213 break;
1214 case 1:
1215 mmc->version = SD_VERSION_1_10;
1216 break;
1217 case 2:
1218 mmc->version = SD_VERSION_2;
1219 if ((mmc->scr[0] >> 15) & 0x1)
1220 mmc->version = SD_VERSION_3;
1221 break;
1222 default:
1223 mmc->version = SD_VERSION_1_0;
1224 break;
1225 }
1226
1227 if (mmc->scr[0] & SD_DATA_4BIT)
1228 mmc->card_caps |= MMC_MODE_4BIT;
1229
1230 /* Version 1.0 doesn't support switching */
1231 if (mmc->version == SD_VERSION_1_0)
1232 return 0;
1233
1234 timeout = 4;
1235 while (timeout--) {
1236 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1237 (u8 *)switch_status);
1238
1239 if (err)
1240 return err;
1241
1242 /* The high-speed function is busy. Try again */
1243 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1244 break;
1245 }
1246
1247 /* If high-speed isn't supported, we return */
1248 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1249 mmc->card_caps |= MMC_CAP(SD_HS);
1250
1251 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1252 /* Version before 3.0 don't support UHS modes */
1253 if (mmc->version < SD_VERSION_3)
1254 return 0;
1255
1256 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1257 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1258 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1259 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1260 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1261 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1262 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1263 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1264 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1265 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1266 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1267 #endif
1268
1269 return 0;
1270 }
1271
sd_set_card_speed(struct mmc * mmc,enum bus_mode mode)1272 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1273 {
1274 int err;
1275
1276 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1277 int speed;
1278
1279 switch (mode) {
1280 case SD_LEGACY:
1281 speed = UHS_SDR12_BUS_SPEED;
1282 break;
1283 case SD_HS:
1284 speed = HIGH_SPEED_BUS_SPEED;
1285 break;
1286 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1287 case UHS_SDR12:
1288 speed = UHS_SDR12_BUS_SPEED;
1289 break;
1290 case UHS_SDR25:
1291 speed = UHS_SDR25_BUS_SPEED;
1292 break;
1293 case UHS_SDR50:
1294 speed = UHS_SDR50_BUS_SPEED;
1295 break;
1296 case UHS_DDR50:
1297 speed = UHS_DDR50_BUS_SPEED;
1298 break;
1299 case UHS_SDR104:
1300 speed = UHS_SDR104_BUS_SPEED;
1301 break;
1302 #endif
1303 default:
1304 return -EINVAL;
1305 }
1306
1307 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1308 if (err)
1309 return err;
1310
1311 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1312 return -ENOTSUPP;
1313
1314 return 0;
1315 }
1316
sd_select_bus_width(struct mmc * mmc,int w)1317 static int sd_select_bus_width(struct mmc *mmc, int w)
1318 {
1319 int err;
1320 struct mmc_cmd cmd;
1321
1322 if ((w != 4) && (w != 1))
1323 return -EINVAL;
1324
1325 cmd.cmdidx = MMC_CMD_APP_CMD;
1326 cmd.resp_type = MMC_RSP_R1;
1327 cmd.cmdarg = mmc->rca << 16;
1328
1329 err = mmc_send_cmd(mmc, &cmd, NULL);
1330 if (err)
1331 return err;
1332
1333 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1334 cmd.resp_type = MMC_RSP_R1;
1335 if (w == 4)
1336 cmd.cmdarg = 2;
1337 else if (w == 1)
1338 cmd.cmdarg = 0;
1339 err = mmc_send_cmd(mmc, &cmd, NULL);
1340 if (err)
1341 return err;
1342
1343 return 0;
1344 }
1345 #endif
1346
1347 #if CONFIG_IS_ENABLED(MMC_WRITE)
sd_read_ssr(struct mmc * mmc)1348 static int sd_read_ssr(struct mmc *mmc)
1349 {
1350 static const unsigned int sd_au_size[] = {
1351 0, SZ_16K / 512, SZ_32K / 512,
1352 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1353 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1354 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1355 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1356 SZ_64M / 512,
1357 };
1358 int err, i;
1359 struct mmc_cmd cmd;
1360 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1361 struct mmc_data data;
1362 int timeout = 3;
1363 unsigned int au, eo, et, es;
1364
1365 cmd.cmdidx = MMC_CMD_APP_CMD;
1366 cmd.resp_type = MMC_RSP_R1;
1367 cmd.cmdarg = mmc->rca << 16;
1368
1369 err = mmc_send_cmd(mmc, &cmd, NULL);
1370 if (err)
1371 return err;
1372
1373 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1374 cmd.resp_type = MMC_RSP_R1;
1375 cmd.cmdarg = 0;
1376
1377 retry_ssr:
1378 data.dest = (char *)ssr;
1379 data.blocksize = 64;
1380 data.blocks = 1;
1381 data.flags = MMC_DATA_READ;
1382
1383 err = mmc_send_cmd(mmc, &cmd, &data);
1384 if (err) {
1385 if (timeout--)
1386 goto retry_ssr;
1387
1388 return err;
1389 }
1390
1391 for (i = 0; i < 16; i++)
1392 ssr[i] = be32_to_cpu(ssr[i]);
1393
1394 au = (ssr[2] >> 12) & 0xF;
1395 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1396 mmc->ssr.au = sd_au_size[au];
1397 es = (ssr[3] >> 24) & 0xFF;
1398 es |= (ssr[2] & 0xFF) << 8;
1399 et = (ssr[3] >> 18) & 0x3F;
1400 if (es && et) {
1401 eo = (ssr[3] >> 16) & 0x3;
1402 mmc->ssr.erase_timeout = (et * 1000) / es;
1403 mmc->ssr.erase_offset = eo * 1000;
1404 }
1405 } else {
1406 pr_debug("Invalid Allocation Unit Size.\n");
1407 }
1408
1409 return 0;
1410 }
1411 #endif
1412 /* frequency bases */
1413 /* divided by 10 to be nice to platforms without floating point */
1414 static const int fbase[] = {
1415 10000,
1416 100000,
1417 1000000,
1418 10000000,
1419 };
1420
1421 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1422 * to platforms without floating point.
1423 */
1424 static const u8 multipliers[] = {
1425 0, /* reserved */
1426 10,
1427 12,
1428 13,
1429 15,
1430 20,
1431 25,
1432 30,
1433 35,
1434 40,
1435 45,
1436 50,
1437 55,
1438 60,
1439 70,
1440 80,
1441 };
1442
bus_width(uint cap)1443 static inline int bus_width(uint cap)
1444 {
1445 if (cap == MMC_MODE_8BIT)
1446 return 8;
1447 if (cap == MMC_MODE_4BIT)
1448 return 4;
1449 if (cap == MMC_MODE_1BIT)
1450 return 1;
1451 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1452 return 0;
1453 }
1454
1455 #if !CONFIG_IS_ENABLED(DM_MMC)
1456 #ifdef MMC_SUPPORTS_TUNING
mmc_execute_tuning(struct mmc * mmc,uint opcode)1457 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1458 {
1459 return -ENOTSUPP;
1460 }
1461 #endif
1462
mmc_send_init_stream(struct mmc * mmc)1463 static void mmc_send_init_stream(struct mmc *mmc)
1464 {
1465 }
1466
mmc_set_ios(struct mmc * mmc)1467 static int mmc_set_ios(struct mmc *mmc)
1468 {
1469 int ret = 0;
1470
1471 if (mmc->cfg->ops->set_ios)
1472 ret = mmc->cfg->ops->set_ios(mmc);
1473
1474 return ret;
1475 }
1476 #endif
1477
mmc_set_clock(struct mmc * mmc,uint clock,bool disable)1478 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1479 {
1480 if (!disable) {
1481 if (clock > mmc->cfg->f_max)
1482 clock = mmc->cfg->f_max;
1483
1484 if (clock < mmc->cfg->f_min)
1485 clock = mmc->cfg->f_min;
1486 }
1487
1488 mmc->clock = clock;
1489 mmc->clk_disable = disable;
1490
1491 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1492
1493 return mmc_set_ios(mmc);
1494 }
1495
mmc_set_bus_width(struct mmc * mmc,uint width)1496 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1497 {
1498 mmc->bus_width = width;
1499
1500 return mmc_set_ios(mmc);
1501 }
1502
1503 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1504 /*
1505 * helper function to display the capabilities in a human
1506 * friendly manner. The capabilities include bus width and
1507 * supported modes.
1508 */
mmc_dump_capabilities(const char * text,uint caps)1509 void mmc_dump_capabilities(const char *text, uint caps)
1510 {
1511 enum bus_mode mode;
1512
1513 pr_debug("%s: widths [", text);
1514 if (caps & MMC_MODE_8BIT)
1515 pr_debug("8, ");
1516 if (caps & MMC_MODE_4BIT)
1517 pr_debug("4, ");
1518 if (caps & MMC_MODE_1BIT)
1519 pr_debug("1, ");
1520 pr_debug("\b\b] modes [");
1521 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1522 if (MMC_CAP(mode) & caps)
1523 pr_debug("%s, ", mmc_mode_name(mode));
1524 pr_debug("\b\b]\n");
1525 }
1526 #endif
1527
1528 struct mode_width_tuning {
1529 enum bus_mode mode;
1530 uint widths;
1531 #ifdef MMC_SUPPORTS_TUNING
1532 uint tuning;
1533 #endif
1534 };
1535
1536 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
mmc_voltage_to_mv(enum mmc_voltage voltage)1537 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1538 {
1539 switch (voltage) {
1540 case MMC_SIGNAL_VOLTAGE_000: return 0;
1541 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1542 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1543 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1544 }
1545 return -EINVAL;
1546 }
1547
mmc_set_signal_voltage(struct mmc * mmc,uint signal_voltage)1548 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1549 {
1550 int err;
1551
1552 if (mmc->signal_voltage == signal_voltage)
1553 return 0;
1554
1555 mmc->signal_voltage = signal_voltage;
1556 err = mmc_set_ios(mmc);
1557 if (err)
1558 pr_debug("unable to set voltage (err %d)\n", err);
1559
1560 return err;
1561 }
1562 #else
mmc_set_signal_voltage(struct mmc * mmc,uint signal_voltage)1563 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1564 {
1565 return 0;
1566 }
1567 #endif
1568
1569 #if !CONFIG_IS_ENABLED(MMC_TINY)
1570 static const struct mode_width_tuning sd_modes_by_pref[] = {
1571 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1572 #ifdef MMC_SUPPORTS_TUNING
1573 {
1574 .mode = UHS_SDR104,
1575 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1576 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1577 },
1578 #endif
1579 {
1580 .mode = UHS_SDR50,
1581 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1582 },
1583 {
1584 .mode = UHS_DDR50,
1585 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1586 },
1587 {
1588 .mode = UHS_SDR25,
1589 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1590 },
1591 #endif
1592 {
1593 .mode = SD_HS,
1594 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1595 },
1596 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1597 {
1598 .mode = UHS_SDR12,
1599 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1600 },
1601 #endif
1602 {
1603 .mode = SD_LEGACY,
1604 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1605 }
1606 };
1607
1608 #define for_each_sd_mode_by_pref(caps, mwt) \
1609 for (mwt = sd_modes_by_pref;\
1610 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1611 mwt++) \
1612 if (caps & MMC_CAP(mwt->mode))
1613
sd_select_mode_and_width(struct mmc * mmc,uint card_caps)1614 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1615 {
1616 int err;
1617 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1618 const struct mode_width_tuning *mwt;
1619 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1620 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1621 #else
1622 bool uhs_en = false;
1623 #endif
1624 uint caps;
1625
1626 #ifdef DEBUG
1627 mmc_dump_capabilities("sd card", card_caps);
1628 mmc_dump_capabilities("host", mmc->host_caps);
1629 #endif
1630
1631 /* Restrict card's capabilities by what the host can do */
1632 caps = card_caps & mmc->host_caps;
1633
1634 if (!uhs_en)
1635 caps &= ~UHS_CAPS;
1636
1637 for_each_sd_mode_by_pref(caps, mwt) {
1638 uint *w;
1639
1640 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1641 if (*w & caps & mwt->widths) {
1642 pr_debug("trying mode %s width %d (at %d MHz)\n",
1643 mmc_mode_name(mwt->mode),
1644 bus_width(*w),
1645 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1646
1647 /* configure the bus width (card + host) */
1648 err = sd_select_bus_width(mmc, bus_width(*w));
1649 if (err)
1650 goto error;
1651 mmc_set_bus_width(mmc, bus_width(*w));
1652
1653 /* configure the bus mode (card) */
1654 err = sd_set_card_speed(mmc, mwt->mode);
1655 if (err)
1656 goto error;
1657
1658 /* configure the bus mode (host) */
1659 mmc_select_mode(mmc, mwt->mode);
1660 mmc_set_clock(mmc, mmc->tran_speed,
1661 MMC_CLK_ENABLE);
1662
1663 #ifdef MMC_SUPPORTS_TUNING
1664 /* execute tuning if needed */
1665 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1666 err = mmc_execute_tuning(mmc,
1667 mwt->tuning);
1668 if (err) {
1669 pr_debug("tuning failed\n");
1670 goto error;
1671 }
1672 }
1673 #endif
1674
1675 #if CONFIG_IS_ENABLED(MMC_WRITE)
1676 err = sd_read_ssr(mmc);
1677 if (err)
1678 pr_warn("unable to read ssr\n");
1679 #endif
1680 if (!err)
1681 return 0;
1682
1683 error:
1684 /* revert to a safer bus speed */
1685 mmc_select_mode(mmc, SD_LEGACY);
1686 mmc_set_clock(mmc, mmc->tran_speed,
1687 MMC_CLK_ENABLE);
1688 }
1689 }
1690 }
1691
1692 pr_err("unable to select a mode\n");
1693 return -ENOTSUPP;
1694 }
1695
1696 /*
1697 * read the compare the part of ext csd that is constant.
1698 * This can be used to check that the transfer is working
1699 * as expected.
1700 */
mmc_read_and_compare_ext_csd(struct mmc * mmc)1701 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1702 {
1703 int err;
1704 const u8 *ext_csd = mmc->ext_csd;
1705 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1706
1707 if (mmc->version < MMC_VERSION_4)
1708 return 0;
1709
1710 err = mmc_send_ext_csd(mmc, test_csd);
1711 if (err)
1712 return err;
1713
1714 /* Only compare read only fields */
1715 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1716 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1717 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1718 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1719 ext_csd[EXT_CSD_REV]
1720 == test_csd[EXT_CSD_REV] &&
1721 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1722 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1723 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1724 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1725 return 0;
1726
1727 return -EBADMSG;
1728 }
1729
1730 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
mmc_set_lowest_voltage(struct mmc * mmc,enum bus_mode mode,uint32_t allowed_mask)1731 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1732 uint32_t allowed_mask)
1733 {
1734 u32 card_mask = 0;
1735
1736 switch (mode) {
1737 case MMC_HS_200:
1738 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1739 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1740 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1741 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1742 break;
1743 case MMC_DDR_52:
1744 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1745 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1746 MMC_SIGNAL_VOLTAGE_180;
1747 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1748 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1749 break;
1750 default:
1751 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1752 break;
1753 }
1754
1755 while (card_mask & allowed_mask) {
1756 enum mmc_voltage best_match;
1757
1758 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1759 if (!mmc_set_signal_voltage(mmc, best_match))
1760 return 0;
1761
1762 allowed_mask &= ~best_match;
1763 }
1764
1765 return -ENOTSUPP;
1766 }
1767 #else
mmc_set_lowest_voltage(struct mmc * mmc,enum bus_mode mode,uint32_t allowed_mask)1768 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1769 uint32_t allowed_mask)
1770 {
1771 return 0;
1772 }
1773 #endif
1774
1775 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1776 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1777 {
1778 .mode = MMC_HS_200,
1779 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1780 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1781 },
1782 #endif
1783 {
1784 .mode = MMC_DDR_52,
1785 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1786 },
1787 {
1788 .mode = MMC_HS_52,
1789 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1790 },
1791 {
1792 .mode = MMC_HS,
1793 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1794 },
1795 {
1796 .mode = MMC_LEGACY,
1797 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1798 }
1799 };
1800
1801 #define for_each_mmc_mode_by_pref(caps, mwt) \
1802 for (mwt = mmc_modes_by_pref;\
1803 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1804 mwt++) \
1805 if (caps & MMC_CAP(mwt->mode))
1806
1807 static const struct ext_csd_bus_width {
1808 uint cap;
1809 bool is_ddr;
1810 uint ext_csd_bits;
1811 } ext_csd_bus_width[] = {
1812 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1813 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1814 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1815 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1816 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1817 };
1818
1819 #define for_each_supported_width(caps, ddr, ecbv) \
1820 for (ecbv = ext_csd_bus_width;\
1821 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1822 ecbv++) \
1823 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1824
mmc_select_mode_and_width(struct mmc * mmc,uint card_caps)1825 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1826 {
1827 int err;
1828 const struct mode_width_tuning *mwt;
1829 const struct ext_csd_bus_width *ecbw;
1830
1831 #ifdef DEBUG
1832 mmc_dump_capabilities("mmc", card_caps);
1833 mmc_dump_capabilities("host", mmc->host_caps);
1834 #endif
1835
1836 /* Restrict card's capabilities by what the host can do */
1837 card_caps &= mmc->host_caps;
1838
1839 /* Only version 4 of MMC supports wider bus widths */
1840 if (mmc->version < MMC_VERSION_4)
1841 return 0;
1842
1843 if (!mmc->ext_csd) {
1844 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1845 return -ENOTSUPP;
1846 }
1847
1848 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1849
1850 for_each_mmc_mode_by_pref(card_caps, mwt) {
1851 for_each_supported_width(card_caps & mwt->widths,
1852 mmc_is_mode_ddr(mwt->mode), ecbw) {
1853 enum mmc_voltage old_voltage;
1854 pr_debug("trying mode %s width %d (at %d MHz)\n",
1855 mmc_mode_name(mwt->mode),
1856 bus_width(ecbw->cap),
1857 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1858 old_voltage = mmc->signal_voltage;
1859 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1860 MMC_ALL_SIGNAL_VOLTAGE);
1861 if (err)
1862 continue;
1863
1864 /* configure the bus width (card + host) */
1865 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1866 EXT_CSD_BUS_WIDTH,
1867 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1868 if (err)
1869 goto error;
1870 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1871
1872 /* configure the bus speed (card) */
1873 err = mmc_set_card_speed(mmc, mwt->mode);
1874 if (err)
1875 goto error;
1876
1877 /*
1878 * configure the bus width AND the ddr mode (card)
1879 * The host side will be taken care of in the next step
1880 */
1881 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1882 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1883 EXT_CSD_BUS_WIDTH,
1884 ecbw->ext_csd_bits);
1885 if (err)
1886 goto error;
1887 }
1888
1889 /* configure the bus mode (host) */
1890 mmc_select_mode(mmc, mwt->mode);
1891 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1892 #ifdef MMC_SUPPORTS_TUNING
1893
1894 /* execute tuning if needed */
1895 if (mwt->tuning) {
1896 err = mmc_execute_tuning(mmc, mwt->tuning);
1897 if (err) {
1898 pr_debug("tuning failed\n");
1899 goto error;
1900 }
1901 }
1902 #endif
1903
1904 /* do a transfer to check the configuration */
1905 err = mmc_read_and_compare_ext_csd(mmc);
1906 if (!err)
1907 return 0;
1908 error:
1909 mmc_set_signal_voltage(mmc, old_voltage);
1910 /* if an error occured, revert to a safer bus mode */
1911 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1912 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1913 mmc_select_mode(mmc, MMC_LEGACY);
1914 mmc_set_bus_width(mmc, 1);
1915 }
1916 }
1917
1918 pr_err("unable to select a mode\n");
1919
1920 return -ENOTSUPP;
1921 }
1922 #endif
1923
1924 #if CONFIG_IS_ENABLED(MMC_TINY)
1925 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
1926 #endif
1927
mmc_startup_v4(struct mmc * mmc)1928 static int mmc_startup_v4(struct mmc *mmc)
1929 {
1930 int err, i;
1931 u64 capacity;
1932 bool has_parts = false;
1933 bool part_completed;
1934 static const u32 mmc_versions[] = {
1935 MMC_VERSION_4,
1936 MMC_VERSION_4_1,
1937 MMC_VERSION_4_2,
1938 MMC_VERSION_4_3,
1939 MMC_VERSION_4_4,
1940 MMC_VERSION_4_41,
1941 MMC_VERSION_4_5,
1942 MMC_VERSION_5_0,
1943 MMC_VERSION_5_1
1944 };
1945
1946 #if CONFIG_IS_ENABLED(MMC_TINY)
1947 u8 *ext_csd = ext_csd_bkup;
1948
1949 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
1950 return 0;
1951
1952 if (!mmc->ext_csd)
1953 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
1954
1955 err = mmc_send_ext_csd(mmc, ext_csd);
1956 if (err)
1957 goto error;
1958
1959 /* store the ext csd for future reference */
1960 if (!mmc->ext_csd)
1961 mmc->ext_csd = ext_csd;
1962 #else
1963 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1964
1965 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1966 return 0;
1967
1968 /* check ext_csd version and capacity */
1969 err = mmc_send_ext_csd(mmc, ext_csd);
1970 if (err)
1971 goto error;
1972
1973 /* store the ext csd for future reference */
1974 if (!mmc->ext_csd)
1975 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1976 if (!mmc->ext_csd)
1977 return -ENOMEM;
1978 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1979 #endif
1980 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
1981 return -EINVAL;
1982
1983 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
1984
1985 if (mmc->version >= MMC_VERSION_4_2) {
1986 /*
1987 * According to the JEDEC Standard, the value of
1988 * ext_csd's capacity is valid if the value is more
1989 * than 2GB
1990 */
1991 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1992 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1993 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1994 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1995 capacity *= MMC_MAX_BLOCK_LEN;
1996 if ((capacity >> 20) > 2 * 1024)
1997 mmc->capacity_user = capacity;
1998 }
1999
2000 /* The partition data may be non-zero but it is only
2001 * effective if PARTITION_SETTING_COMPLETED is set in
2002 * EXT_CSD, so ignore any data if this bit is not set,
2003 * except for enabling the high-capacity group size
2004 * definition (see below).
2005 */
2006 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2007 EXT_CSD_PARTITION_SETTING_COMPLETED);
2008
2009 /* store the partition info of emmc */
2010 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2011 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2012 ext_csd[EXT_CSD_BOOT_MULT])
2013 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2014 if (part_completed &&
2015 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2016 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2017
2018 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2019
2020 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2021
2022 for (i = 0; i < 4; i++) {
2023 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2024 uint mult = (ext_csd[idx + 2] << 16) +
2025 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2026 if (mult)
2027 has_parts = true;
2028 if (!part_completed)
2029 continue;
2030 mmc->capacity_gp[i] = mult;
2031 mmc->capacity_gp[i] *=
2032 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2033 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2034 mmc->capacity_gp[i] <<= 19;
2035 }
2036
2037 #ifndef CONFIG_SPL_BUILD
2038 if (part_completed) {
2039 mmc->enh_user_size =
2040 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2041 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2042 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2043 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2044 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2045 mmc->enh_user_size <<= 19;
2046 mmc->enh_user_start =
2047 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2048 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2049 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2050 ext_csd[EXT_CSD_ENH_START_ADDR];
2051 if (mmc->high_capacity)
2052 mmc->enh_user_start <<= 9;
2053 }
2054 #endif
2055
2056 /*
2057 * Host needs to enable ERASE_GRP_DEF bit if device is
2058 * partitioned. This bit will be lost every time after a reset
2059 * or power off. This will affect erase size.
2060 */
2061 if (part_completed)
2062 has_parts = true;
2063 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2064 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2065 has_parts = true;
2066 if (has_parts) {
2067 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2068 EXT_CSD_ERASE_GROUP_DEF, 1);
2069
2070 if (err)
2071 goto error;
2072
2073 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2074 }
2075
2076 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2077 #if CONFIG_IS_ENABLED(MMC_WRITE)
2078 /* Read out group size from ext_csd */
2079 mmc->erase_grp_size =
2080 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2081 #endif
2082 /*
2083 * if high capacity and partition setting completed
2084 * SEC_COUNT is valid even if it is smaller than 2 GiB
2085 * JEDEC Standard JESD84-B45, 6.2.4
2086 */
2087 if (mmc->high_capacity && part_completed) {
2088 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2089 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2090 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2091 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2092 capacity *= MMC_MAX_BLOCK_LEN;
2093 mmc->capacity_user = capacity;
2094 }
2095 }
2096 #if CONFIG_IS_ENABLED(MMC_WRITE)
2097 else {
2098 /* Calculate the group size from the csd value. */
2099 int erase_gsz, erase_gmul;
2100
2101 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2102 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2103 mmc->erase_grp_size = (erase_gsz + 1)
2104 * (erase_gmul + 1);
2105 }
2106 #endif
2107 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2108 mmc->hc_wp_grp_size = 1024
2109 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2110 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2111 #endif
2112
2113 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2114
2115 return 0;
2116 error:
2117 if (mmc->ext_csd) {
2118 #if !CONFIG_IS_ENABLED(MMC_TINY)
2119 free(mmc->ext_csd);
2120 #endif
2121 mmc->ext_csd = NULL;
2122 }
2123 return err;
2124 }
2125
mmc_startup(struct mmc * mmc)2126 static int mmc_startup(struct mmc *mmc)
2127 {
2128 int err, i;
2129 uint mult, freq;
2130 u64 cmult, csize;
2131 struct mmc_cmd cmd;
2132 struct blk_desc *bdesc;
2133
2134 #ifdef CONFIG_MMC_SPI_CRC_ON
2135 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2136 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2137 cmd.resp_type = MMC_RSP_R1;
2138 cmd.cmdarg = 1;
2139 err = mmc_send_cmd(mmc, &cmd, NULL);
2140 if (err)
2141 return err;
2142 }
2143 #endif
2144
2145 /* Put the Card in Identify Mode */
2146 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2147 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2148 cmd.resp_type = MMC_RSP_R2;
2149 cmd.cmdarg = 0;
2150
2151 err = mmc_send_cmd(mmc, &cmd, NULL);
2152
2153 #ifdef CONFIG_MMC_QUIRKS
2154 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2155 int retries = 4;
2156 /*
2157 * It has been seen that SEND_CID may fail on the first
2158 * attempt, let's try a few more time
2159 */
2160 do {
2161 err = mmc_send_cmd(mmc, &cmd, NULL);
2162 if (!err)
2163 break;
2164 } while (retries--);
2165 }
2166 #endif
2167
2168 if (err)
2169 return err;
2170
2171 memcpy(mmc->cid, cmd.response, 16);
2172
2173 /*
2174 * For MMC cards, set the Relative Address.
2175 * For SD cards, get the Relatvie Address.
2176 * This also puts the cards into Standby State
2177 */
2178 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2179 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2180 cmd.cmdarg = mmc->rca << 16;
2181 cmd.resp_type = MMC_RSP_R6;
2182
2183 err = mmc_send_cmd(mmc, &cmd, NULL);
2184
2185 if (err)
2186 return err;
2187
2188 if (IS_SD(mmc))
2189 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2190 }
2191
2192 /* Get the Card-Specific Data */
2193 cmd.cmdidx = MMC_CMD_SEND_CSD;
2194 cmd.resp_type = MMC_RSP_R2;
2195 cmd.cmdarg = mmc->rca << 16;
2196
2197 err = mmc_send_cmd(mmc, &cmd, NULL);
2198
2199 if (err)
2200 return err;
2201
2202 mmc->csd[0] = cmd.response[0];
2203 mmc->csd[1] = cmd.response[1];
2204 mmc->csd[2] = cmd.response[2];
2205 mmc->csd[3] = cmd.response[3];
2206
2207 if (mmc->version == MMC_VERSION_UNKNOWN) {
2208 int version = (cmd.response[0] >> 26) & 0xf;
2209
2210 switch (version) {
2211 case 0:
2212 mmc->version = MMC_VERSION_1_2;
2213 break;
2214 case 1:
2215 mmc->version = MMC_VERSION_1_4;
2216 break;
2217 case 2:
2218 mmc->version = MMC_VERSION_2_2;
2219 break;
2220 case 3:
2221 mmc->version = MMC_VERSION_3;
2222 break;
2223 case 4:
2224 mmc->version = MMC_VERSION_4;
2225 break;
2226 default:
2227 mmc->version = MMC_VERSION_1_2;
2228 break;
2229 }
2230 }
2231
2232 /* divide frequency by 10, since the mults are 10x bigger */
2233 freq = fbase[(cmd.response[0] & 0x7)];
2234 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2235
2236 mmc->legacy_speed = freq * mult;
2237 mmc_select_mode(mmc, MMC_LEGACY);
2238
2239 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2240 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2241 #if CONFIG_IS_ENABLED(MMC_WRITE)
2242
2243 if (IS_SD(mmc))
2244 mmc->write_bl_len = mmc->read_bl_len;
2245 else
2246 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2247 #endif
2248
2249 if (mmc->high_capacity) {
2250 csize = (mmc->csd[1] & 0x3f) << 16
2251 | (mmc->csd[2] & 0xffff0000) >> 16;
2252 cmult = 8;
2253 } else {
2254 csize = (mmc->csd[1] & 0x3ff) << 2
2255 | (mmc->csd[2] & 0xc0000000) >> 30;
2256 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2257 }
2258
2259 mmc->capacity_user = (csize + 1) << (cmult + 2);
2260 mmc->capacity_user *= mmc->read_bl_len;
2261 mmc->capacity_boot = 0;
2262 mmc->capacity_rpmb = 0;
2263 for (i = 0; i < 4; i++)
2264 mmc->capacity_gp[i] = 0;
2265
2266 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2267 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2268
2269 #if CONFIG_IS_ENABLED(MMC_WRITE)
2270 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2271 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2272 #endif
2273
2274 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2275 cmd.cmdidx = MMC_CMD_SET_DSR;
2276 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2277 cmd.resp_type = MMC_RSP_NONE;
2278 if (mmc_send_cmd(mmc, &cmd, NULL))
2279 pr_warn("MMC: SET_DSR failed\n");
2280 }
2281
2282 /* Select the card, and put it into Transfer Mode */
2283 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2284 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2285 cmd.resp_type = MMC_RSP_R1;
2286 cmd.cmdarg = mmc->rca << 16;
2287 err = mmc_send_cmd(mmc, &cmd, NULL);
2288
2289 if (err)
2290 return err;
2291 }
2292
2293 /*
2294 * For SD, its erase group is always one sector
2295 */
2296 #if CONFIG_IS_ENABLED(MMC_WRITE)
2297 mmc->erase_grp_size = 1;
2298 #endif
2299 mmc->part_config = MMCPART_NOAVAILABLE;
2300
2301 err = mmc_startup_v4(mmc);
2302 if (err)
2303 return err;
2304
2305 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2306 if (err)
2307 return err;
2308
2309 #if CONFIG_IS_ENABLED(MMC_TINY)
2310 mmc_set_clock(mmc, mmc->legacy_speed, false);
2311 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2312 mmc_set_bus_width(mmc, 1);
2313 #else
2314 if (IS_SD(mmc)) {
2315 err = sd_get_capabilities(mmc);
2316 if (err)
2317 return err;
2318 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2319 } else {
2320 err = mmc_get_capabilities(mmc);
2321 if (err)
2322 return err;
2323 mmc_select_mode_and_width(mmc, mmc->card_caps);
2324 }
2325 #endif
2326 if (err)
2327 return err;
2328
2329 mmc->best_mode = mmc->selected_mode;
2330
2331 /* Fix the block length for DDR mode */
2332 if (mmc->ddr_mode) {
2333 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2334 #if CONFIG_IS_ENABLED(MMC_WRITE)
2335 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2336 #endif
2337 }
2338
2339 /* fill in device description */
2340 bdesc = mmc_get_blk_desc(mmc);
2341 bdesc->lun = 0;
2342 bdesc->hwpart = 0;
2343 bdesc->type = 0;
2344 bdesc->blksz = mmc->read_bl_len;
2345 bdesc->log2blksz = LOG2(bdesc->blksz);
2346 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2347 #if !defined(CONFIG_SPL_BUILD) || \
2348 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2349 !defined(CONFIG_USE_TINY_PRINTF))
2350 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2351 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2352 (mmc->cid[3] >> 16) & 0xffff);
2353 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2354 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2355 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2356 (mmc->cid[2] >> 24) & 0xff);
2357 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2358 (mmc->cid[2] >> 16) & 0xf);
2359 #else
2360 bdesc->vendor[0] = 0;
2361 bdesc->product[0] = 0;
2362 bdesc->revision[0] = 0;
2363 #endif
2364 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2365 part_init(bdesc);
2366 #endif
2367
2368 return 0;
2369 }
2370
mmc_send_if_cond(struct mmc * mmc)2371 static int mmc_send_if_cond(struct mmc *mmc)
2372 {
2373 struct mmc_cmd cmd;
2374 int err;
2375
2376 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2377 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2378 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2379 cmd.resp_type = MMC_RSP_R7;
2380
2381 err = mmc_send_cmd(mmc, &cmd, NULL);
2382
2383 if (err)
2384 return err;
2385
2386 if ((cmd.response[0] & 0xff) != 0xaa)
2387 return -EOPNOTSUPP;
2388 else
2389 mmc->version = SD_VERSION_2;
2390
2391 return 0;
2392 }
2393
2394 #if !CONFIG_IS_ENABLED(DM_MMC)
2395 /* board-specific MMC power initializations. */
board_mmc_power_init(void)2396 __weak void board_mmc_power_init(void)
2397 {
2398 }
2399 #endif
2400
mmc_power_init(struct mmc * mmc)2401 static int mmc_power_init(struct mmc *mmc)
2402 {
2403 #if CONFIG_IS_ENABLED(DM_MMC)
2404 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2405 int ret;
2406
2407 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2408 &mmc->vmmc_supply);
2409 if (ret)
2410 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2411
2412 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2413 &mmc->vqmmc_supply);
2414 if (ret)
2415 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2416 #endif
2417 #else /* !CONFIG_DM_MMC */
2418 /*
2419 * Driver model should use a regulator, as above, rather than calling
2420 * out to board code.
2421 */
2422 board_mmc_power_init();
2423 #endif
2424 return 0;
2425 }
2426
2427 /*
2428 * put the host in the initial state:
2429 * - turn on Vdd (card power supply)
2430 * - configure the bus width and clock to minimal values
2431 */
mmc_set_initial_state(struct mmc * mmc)2432 static void mmc_set_initial_state(struct mmc *mmc)
2433 {
2434 int err;
2435
2436 /* First try to set 3.3V. If it fails set to 1.8V */
2437 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2438 if (err != 0)
2439 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2440 if (err != 0)
2441 pr_warn("mmc: failed to set signal voltage\n");
2442
2443 mmc_select_mode(mmc, MMC_LEGACY);
2444 mmc_set_bus_width(mmc, 1);
2445 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2446 }
2447
mmc_power_on(struct mmc * mmc)2448 static int mmc_power_on(struct mmc *mmc)
2449 {
2450 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2451 if (mmc->vmmc_supply) {
2452 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2453
2454 if (ret) {
2455 puts("Error enabling VMMC supply\n");
2456 return ret;
2457 }
2458 }
2459 #endif
2460 return 0;
2461 }
2462
mmc_power_off(struct mmc * mmc)2463 static int mmc_power_off(struct mmc *mmc)
2464 {
2465 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2466 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2467 if (mmc->vmmc_supply) {
2468 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2469
2470 if (ret) {
2471 pr_debug("Error disabling VMMC supply\n");
2472 return ret;
2473 }
2474 }
2475 #endif
2476 return 0;
2477 }
2478
mmc_power_cycle(struct mmc * mmc)2479 static int mmc_power_cycle(struct mmc *mmc)
2480 {
2481 int ret;
2482
2483 ret = mmc_power_off(mmc);
2484 if (ret)
2485 return ret;
2486 /*
2487 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2488 * to be on the safer side.
2489 */
2490 udelay(2000);
2491 return mmc_power_on(mmc);
2492 }
2493
mmc_start_init(struct mmc * mmc)2494 int mmc_start_init(struct mmc *mmc)
2495 {
2496 bool no_card;
2497 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2498 int err;
2499
2500 /*
2501 * all hosts are capable of 1 bit bus-width and able to use the legacy
2502 * timings.
2503 */
2504 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2505 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2506
2507 #if !defined(CONFIG_MMC_BROKEN_CD)
2508 /* we pretend there's no card when init is NULL */
2509 no_card = mmc_getcd(mmc) == 0;
2510 #else
2511 no_card = 0;
2512 #endif
2513 #if !CONFIG_IS_ENABLED(DM_MMC)
2514 no_card = no_card || (mmc->cfg->ops->init == NULL);
2515 #endif
2516 if (no_card) {
2517 mmc->has_init = 0;
2518 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2519 pr_err("MMC: no card present\n");
2520 #endif
2521 return -ENOMEDIUM;
2522 }
2523
2524 if (mmc->has_init)
2525 return 0;
2526
2527 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2528 mmc_adapter_card_type_ident();
2529 #endif
2530 err = mmc_power_init(mmc);
2531 if (err)
2532 return err;
2533
2534 #ifdef CONFIG_MMC_QUIRKS
2535 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2536 MMC_QUIRK_RETRY_SEND_CID;
2537 #endif
2538
2539 err = mmc_power_cycle(mmc);
2540 if (err) {
2541 /*
2542 * if power cycling is not supported, we should not try
2543 * to use the UHS modes, because we wouldn't be able to
2544 * recover from an error during the UHS initialization.
2545 */
2546 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2547 uhs_en = false;
2548 mmc->host_caps &= ~UHS_CAPS;
2549 err = mmc_power_on(mmc);
2550 }
2551 if (err)
2552 return err;
2553
2554 #if CONFIG_IS_ENABLED(DM_MMC)
2555 /* The device has already been probed ready for use */
2556 #else
2557 /* made sure it's not NULL earlier */
2558 err = mmc->cfg->ops->init(mmc);
2559 if (err)
2560 return err;
2561 #endif
2562 mmc->ddr_mode = 0;
2563
2564 retry:
2565 mmc_set_initial_state(mmc);
2566 mmc_send_init_stream(mmc);
2567
2568 /* Reset the Card */
2569 err = mmc_go_idle(mmc);
2570
2571 if (err)
2572 return err;
2573
2574 /* The internal partition reset to user partition(0) at every CMD0*/
2575 mmc_get_blk_desc(mmc)->hwpart = 0;
2576
2577 /* Test for SD version 2 */
2578 err = mmc_send_if_cond(mmc);
2579
2580 /* Now try to get the SD card's operating condition */
2581 err = sd_send_op_cond(mmc, uhs_en);
2582 if (err && uhs_en) {
2583 uhs_en = false;
2584 mmc_power_cycle(mmc);
2585 goto retry;
2586 }
2587
2588 /* If the command timed out, we check for an MMC card */
2589 if (err == -ETIMEDOUT) {
2590 err = mmc_send_op_cond(mmc);
2591
2592 if (err) {
2593 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2594 pr_err("Card did not respond to voltage select!\n");
2595 #endif
2596 return -EOPNOTSUPP;
2597 }
2598 }
2599
2600 if (!err)
2601 mmc->init_in_progress = 1;
2602
2603 return err;
2604 }
2605
mmc_complete_init(struct mmc * mmc)2606 static int mmc_complete_init(struct mmc *mmc)
2607 {
2608 int err = 0;
2609
2610 mmc->init_in_progress = 0;
2611 if (mmc->op_cond_pending)
2612 err = mmc_complete_op_cond(mmc);
2613
2614 if (!err)
2615 err = mmc_startup(mmc);
2616 if (err)
2617 mmc->has_init = 0;
2618 else
2619 mmc->has_init = 1;
2620 return err;
2621 }
2622
mmc_init(struct mmc * mmc)2623 int mmc_init(struct mmc *mmc)
2624 {
2625 int err = 0;
2626 __maybe_unused ulong start;
2627 #if CONFIG_IS_ENABLED(DM_MMC)
2628 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2629
2630 upriv->mmc = mmc;
2631 #endif
2632 if (mmc->has_init)
2633 return 0;
2634
2635 start = get_timer(0);
2636
2637 if (!mmc->init_in_progress)
2638 err = mmc_start_init(mmc);
2639
2640 if (!err)
2641 err = mmc_complete_init(mmc);
2642 if (err)
2643 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2644
2645 return err;
2646 }
2647
mmc_set_dsr(struct mmc * mmc,u16 val)2648 int mmc_set_dsr(struct mmc *mmc, u16 val)
2649 {
2650 mmc->dsr = val;
2651 return 0;
2652 }
2653
2654 /* CPU-specific MMC initializations */
cpu_mmc_init(bd_t * bis)2655 __weak int cpu_mmc_init(bd_t *bis)
2656 {
2657 return -1;
2658 }
2659
2660 /* board-specific MMC initializations. */
board_mmc_init(bd_t * bis)2661 __weak int board_mmc_init(bd_t *bis)
2662 {
2663 return -1;
2664 }
2665
mmc_set_preinit(struct mmc * mmc,int preinit)2666 void mmc_set_preinit(struct mmc *mmc, int preinit)
2667 {
2668 mmc->preinit = preinit;
2669 }
2670
2671 #if CONFIG_IS_ENABLED(DM_MMC)
mmc_probe(bd_t * bis)2672 static int mmc_probe(bd_t *bis)
2673 {
2674 int ret, i;
2675 struct uclass *uc;
2676 struct udevice *dev;
2677
2678 ret = uclass_get(UCLASS_MMC, &uc);
2679 if (ret)
2680 return ret;
2681
2682 /*
2683 * Try to add them in sequence order. Really with driver model we
2684 * should allow holes, but the current MMC list does not allow that.
2685 * So if we request 0, 1, 3 we will get 0, 1, 2.
2686 */
2687 for (i = 0; ; i++) {
2688 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2689 if (ret == -ENODEV)
2690 break;
2691 }
2692 uclass_foreach_dev(dev, uc) {
2693 ret = device_probe(dev);
2694 if (ret)
2695 pr_err("%s - probe failed: %d\n", dev->name, ret);
2696 }
2697
2698 return 0;
2699 }
2700 #else
mmc_probe(bd_t * bis)2701 static int mmc_probe(bd_t *bis)
2702 {
2703 if (board_mmc_init(bis) < 0)
2704 cpu_mmc_init(bis);
2705
2706 return 0;
2707 }
2708 #endif
2709
mmc_initialize(bd_t * bis)2710 int mmc_initialize(bd_t *bis)
2711 {
2712 static int initialized = 0;
2713 int ret;
2714 if (initialized) /* Avoid initializing mmc multiple times */
2715 return 0;
2716 initialized = 1;
2717
2718 #if !CONFIG_IS_ENABLED(BLK)
2719 #if !CONFIG_IS_ENABLED(MMC_TINY)
2720 mmc_list_init();
2721 #endif
2722 #endif
2723 ret = mmc_probe(bis);
2724 if (ret)
2725 return ret;
2726
2727 #ifndef CONFIG_SPL_BUILD
2728 print_mmc_devices(',');
2729 #endif
2730
2731 mmc_do_preinit();
2732 return 0;
2733 }
2734
2735 #ifdef CONFIG_CMD_BKOPS_ENABLE
mmc_set_bkops_enable(struct mmc * mmc)2736 int mmc_set_bkops_enable(struct mmc *mmc)
2737 {
2738 int err;
2739 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2740
2741 err = mmc_send_ext_csd(mmc, ext_csd);
2742 if (err) {
2743 puts("Could not get ext_csd register values\n");
2744 return err;
2745 }
2746
2747 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2748 puts("Background operations not supported on device\n");
2749 return -EMEDIUMTYPE;
2750 }
2751
2752 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2753 puts("Background operations already enabled\n");
2754 return 0;
2755 }
2756
2757 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2758 if (err) {
2759 puts("Failed to enable manual background operations\n");
2760 return err;
2761 }
2762
2763 puts("Enabled manual background operations\n");
2764
2765 return 0;
2766 }
2767 #endif
2768