• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2008, Freescale Semiconductor, Inc
4  * Andy Fleming
5  *
6  * Based vaguely on the Linux code
7  */
8 
9 #include <config.h>
10 #include <common.h>
11 #include <command.h>
12 #include <dm.h>
13 #include <dm/device-internal.h>
14 #include <errno.h>
15 #include <mmc.h>
16 #include <part.h>
17 #include <power/regulator.h>
18 #include <malloc.h>
19 #include <memalign.h>
20 #include <linux/list.h>
21 #include <div64.h>
22 #include "mmc_private.h"
23 
24 #define DEFAULT_CMD6_TIMEOUT_MS  500
25 
26 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
27 static int mmc_power_cycle(struct mmc *mmc);
28 #if !CONFIG_IS_ENABLED(MMC_TINY)
29 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
30 #endif
31 
32 #if !CONFIG_IS_ENABLED(DM_MMC)
33 
mmc_wait_dat0(struct mmc * mmc,int state,int timeout_us)34 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
35 {
36 	return -ENOSYS;
37 }
38 
board_mmc_getwp(struct mmc * mmc)39 __weak int board_mmc_getwp(struct mmc *mmc)
40 {
41 	return -1;
42 }
43 
mmc_getwp(struct mmc * mmc)44 int mmc_getwp(struct mmc *mmc)
45 {
46 	int wp;
47 
48 	wp = board_mmc_getwp(mmc);
49 
50 	if (wp < 0) {
51 		if (mmc->cfg->ops->getwp)
52 			wp = mmc->cfg->ops->getwp(mmc);
53 		else
54 			wp = 0;
55 	}
56 
57 	return wp;
58 }
59 
board_mmc_getcd(struct mmc * mmc)60 __weak int board_mmc_getcd(struct mmc *mmc)
61 {
62 	return -1;
63 }
64 #endif
65 
66 #ifdef CONFIG_MMC_TRACE
mmmc_trace_before_send(struct mmc * mmc,struct mmc_cmd * cmd)67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
68 {
69 	printf("CMD_SEND:%d\n", cmd->cmdidx);
70 	printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
71 }
72 
mmmc_trace_after_send(struct mmc * mmc,struct mmc_cmd * cmd,int ret)73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
74 {
75 	int i;
76 	u8 *ptr;
77 
78 	if (ret) {
79 		printf("\t\tRET\t\t\t %d\n", ret);
80 	} else {
81 		switch (cmd->resp_type) {
82 		case MMC_RSP_NONE:
83 			printf("\t\tMMC_RSP_NONE\n");
84 			break;
85 		case MMC_RSP_R1:
86 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
87 				cmd->response[0]);
88 			break;
89 		case MMC_RSP_R1b:
90 			printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
91 				cmd->response[0]);
92 			break;
93 		case MMC_RSP_R2:
94 			printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
95 				cmd->response[0]);
96 			printf("\t\t          \t\t 0x%08x \n",
97 				cmd->response[1]);
98 			printf("\t\t          \t\t 0x%08x \n",
99 				cmd->response[2]);
100 			printf("\t\t          \t\t 0x%08x \n",
101 				cmd->response[3]);
102 			printf("\n");
103 			printf("\t\t\t\t\tDUMPING DATA\n");
104 			for (i = 0; i < 4; i++) {
105 				int j;
106 				printf("\t\t\t\t\t%03d - ", i*4);
107 				ptr = (u8 *)&cmd->response[i];
108 				ptr += 3;
109 				for (j = 0; j < 4; j++)
110 					printf("%02x ", *ptr--);
111 				printf("\n");
112 			}
113 			break;
114 		case MMC_RSP_R3:
115 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
116 				cmd->response[0]);
117 			break;
118 		default:
119 			printf("\t\tERROR MMC rsp not supported\n");
120 			break;
121 		}
122 	}
123 }
124 
mmc_trace_state(struct mmc * mmc,struct mmc_cmd * cmd)125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
126 {
127 	int status;
128 
129 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 	printf("CURR STATE:%d\n", status);
131 }
132 #endif
133 
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
mmc_mode_name(enum bus_mode mode)135 const char *mmc_mode_name(enum bus_mode mode)
136 {
137 	static const char *const names[] = {
138 	      [MMC_LEGACY]	= "MMC legacy",
139 	      [SD_LEGACY]	= "SD Legacy",
140 	      [MMC_HS]		= "MMC High Speed (26MHz)",
141 	      [SD_HS]		= "SD High Speed (50MHz)",
142 	      [UHS_SDR12]	= "UHS SDR12 (25MHz)",
143 	      [UHS_SDR25]	= "UHS SDR25 (50MHz)",
144 	      [UHS_SDR50]	= "UHS SDR50 (100MHz)",
145 	      [UHS_SDR104]	= "UHS SDR104 (208MHz)",
146 	      [UHS_DDR50]	= "UHS DDR50 (50MHz)",
147 	      [MMC_HS_52]	= "MMC High Speed (52MHz)",
148 	      [MMC_DDR_52]	= "MMC DDR52 (52MHz)",
149 	      [MMC_HS_200]	= "HS200 (200MHz)",
150 	      [MMC_HS_400]	= "HS400 (200MHz)",
151 	      [MMC_HS_400_ES]	= "HS400ES (200MHz)",
152 	};
153 
154 	if (mode >= MMC_MODES_END)
155 		return "Unknown mode";
156 	else
157 		return names[mode];
158 }
159 #endif
160 
mmc_mode2freq(struct mmc * mmc,enum bus_mode mode)161 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 {
163 	static const int freqs[] = {
164 	      [MMC_LEGACY]	= 25000000,
165 	      [SD_LEGACY]	= 25000000,
166 	      [MMC_HS]		= 26000000,
167 	      [SD_HS]		= 50000000,
168 	      [MMC_HS_52]	= 52000000,
169 	      [MMC_DDR_52]	= 52000000,
170 	      [UHS_SDR12]	= 25000000,
171 	      [UHS_SDR25]	= 50000000,
172 	      [UHS_SDR50]	= 100000000,
173 	      [UHS_DDR50]	= 50000000,
174 	      [UHS_SDR104]	= 208000000,
175 	      [MMC_HS_200]	= 200000000,
176 	      [MMC_HS_400]	= 200000000,
177 	      [MMC_HS_400_ES]	= 200000000,
178 	};
179 
180 	if (mode == MMC_LEGACY)
181 		return mmc->legacy_speed;
182 	else if (mode >= MMC_MODES_END)
183 		return 0;
184 	else
185 		return freqs[mode];
186 }
187 
mmc_select_mode(struct mmc * mmc,enum bus_mode mode)188 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
189 {
190 	mmc->selected_mode = mode;
191 	mmc->tran_speed = mmc_mode2freq(mmc, mode);
192 	mmc->ddr_mode = mmc_is_mode_ddr(mode);
193 	pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
194 		 mmc->tran_speed / 1000000);
195 	return 0;
196 }
197 
198 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_send_cmd(struct mmc * mmc,struct mmc_cmd * cmd,struct mmc_data * data)199 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
200 {
201 	int ret;
202 
203 	mmmc_trace_before_send(mmc, cmd);
204 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
205 	mmmc_trace_after_send(mmc, cmd, ret);
206 
207 	return ret;
208 }
209 #endif
210 
mmc_send_status(struct mmc * mmc,unsigned int * status)211 int mmc_send_status(struct mmc *mmc, unsigned int *status)
212 {
213 	struct mmc_cmd cmd;
214 	int err, retries = 5;
215 
216 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
217 	cmd.resp_type = MMC_RSP_R1;
218 	if (!mmc_host_is_spi(mmc))
219 		cmd.cmdarg = mmc->rca << 16;
220 
221 	while (retries--) {
222 		err = mmc_send_cmd(mmc, &cmd, NULL);
223 		if (!err) {
224 			mmc_trace_state(mmc, &cmd);
225 			*status = cmd.response[0];
226 			return 0;
227 		}
228 	}
229 	mmc_trace_state(mmc, &cmd);
230 	return -ECOMM;
231 }
232 
mmc_poll_for_busy(struct mmc * mmc,int timeout_ms)233 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
234 {
235 	unsigned int status;
236 	int err;
237 
238 	err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
239 	if (err != -ENOSYS)
240 		return err;
241 
242 	while (1) {
243 		err = mmc_send_status(mmc, &status);
244 		if (err)
245 			return err;
246 
247 		if ((status & MMC_STATUS_RDY_FOR_DATA) &&
248 		    (status & MMC_STATUS_CURR_STATE) !=
249 		     MMC_STATE_PRG)
250 			break;
251 
252 		if (status & MMC_STATUS_MASK) {
253 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
254 			pr_err("Status Error: 0x%08x\n", status);
255 #endif
256 			return -ECOMM;
257 		}
258 
259 		if (timeout_ms-- <= 0)
260 			break;
261 
262 		udelay(1000);
263 	}
264 
265 	if (timeout_ms <= 0) {
266 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
267 		pr_err("Timeout waiting card ready\n");
268 #endif
269 		return -ETIMEDOUT;
270 	}
271 
272 	return 0;
273 }
274 
mmc_set_blocklen(struct mmc * mmc,int len)275 int mmc_set_blocklen(struct mmc *mmc, int len)
276 {
277 	struct mmc_cmd cmd;
278 	int err;
279 
280 	if (mmc->ddr_mode)
281 		return 0;
282 
283 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
284 	cmd.resp_type = MMC_RSP_R1;
285 	cmd.cmdarg = len;
286 
287 	err = mmc_send_cmd(mmc, &cmd, NULL);
288 
289 #ifdef CONFIG_MMC_QUIRKS
290 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
291 		int retries = 4;
292 		/*
293 		 * It has been seen that SET_BLOCKLEN may fail on the first
294 		 * attempt, let's try a few more time
295 		 */
296 		do {
297 			err = mmc_send_cmd(mmc, &cmd, NULL);
298 			if (!err)
299 				break;
300 		} while (retries--);
301 	}
302 #endif
303 
304 	return err;
305 }
306 
307 #ifdef MMC_SUPPORTS_TUNING
308 static const u8 tuning_blk_pattern_4bit[] = {
309 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
310 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
311 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
312 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
313 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
314 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
315 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
316 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
317 };
318 
319 static const u8 tuning_blk_pattern_8bit[] = {
320 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
321 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
322 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
323 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
324 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
325 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
326 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
327 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
328 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
329 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
330 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
331 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
332 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
333 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
334 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
335 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
336 };
337 
mmc_send_tuning(struct mmc * mmc,u32 opcode,int * cmd_error)338 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
339 {
340 	struct mmc_cmd cmd;
341 	struct mmc_data data;
342 	const u8 *tuning_block_pattern;
343 	int size, err;
344 
345 	if (mmc->bus_width == 8) {
346 		tuning_block_pattern = tuning_blk_pattern_8bit;
347 		size = sizeof(tuning_blk_pattern_8bit);
348 	} else if (mmc->bus_width == 4) {
349 		tuning_block_pattern = tuning_blk_pattern_4bit;
350 		size = sizeof(tuning_blk_pattern_4bit);
351 	} else {
352 		return -EINVAL;
353 	}
354 
355 	ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
356 
357 	cmd.cmdidx = opcode;
358 	cmd.cmdarg = 0;
359 	cmd.resp_type = MMC_RSP_R1;
360 
361 	data.dest = (void *)data_buf;
362 	data.blocks = 1;
363 	data.blocksize = size;
364 	data.flags = MMC_DATA_READ;
365 
366 	err = mmc_send_cmd(mmc, &cmd, &data);
367 	if (err)
368 		return err;
369 
370 	if (memcmp(data_buf, tuning_block_pattern, size))
371 		return -EIO;
372 
373 	return 0;
374 }
375 #endif
376 
mmc_read_blocks(struct mmc * mmc,void * dst,lbaint_t start,lbaint_t blkcnt)377 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
378 			   lbaint_t blkcnt)
379 {
380 	struct mmc_cmd cmd;
381 	struct mmc_data data;
382 
383 	if (blkcnt > 1)
384 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
385 	else
386 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
387 
388 	if (mmc->high_capacity)
389 		cmd.cmdarg = start;
390 	else
391 		cmd.cmdarg = start * mmc->read_bl_len;
392 
393 	cmd.resp_type = MMC_RSP_R1;
394 
395 	data.dest = dst;
396 	data.blocks = blkcnt;
397 	data.blocksize = mmc->read_bl_len;
398 	data.flags = MMC_DATA_READ;
399 
400 	if (mmc_send_cmd(mmc, &cmd, &data))
401 		return 0;
402 
403 	if (blkcnt > 1) {
404 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
405 		cmd.cmdarg = 0;
406 		cmd.resp_type = MMC_RSP_R1b;
407 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
408 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
409 			pr_err("mmc fail to send stop cmd\n");
410 #endif
411 			return 0;
412 		}
413 	}
414 
415 	return blkcnt;
416 }
417 
418 #if CONFIG_IS_ENABLED(BLK)
mmc_bread(struct udevice * dev,lbaint_t start,lbaint_t blkcnt,void * dst)419 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
420 #else
421 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
422 		void *dst)
423 #endif
424 {
425 #if CONFIG_IS_ENABLED(BLK)
426 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
427 #endif
428 	int dev_num = block_dev->devnum;
429 	int err;
430 	lbaint_t cur, blocks_todo = blkcnt;
431 
432 	if (blkcnt == 0)
433 		return 0;
434 
435 	struct mmc *mmc = find_mmc_device(dev_num);
436 	if (!mmc)
437 		return 0;
438 
439 	if (CONFIG_IS_ENABLED(MMC_TINY))
440 		err = mmc_switch_part(mmc, block_dev->hwpart);
441 	else
442 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
443 
444 	if (err < 0)
445 		return 0;
446 
447 	if ((start + blkcnt) > block_dev->lba) {
448 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
449 		pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
450 		       start + blkcnt, block_dev->lba);
451 #endif
452 		return 0;
453 	}
454 
455 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
456 		pr_debug("%s: Failed to set blocklen\n", __func__);
457 		return 0;
458 	}
459 
460 	do {
461 		cur = (blocks_todo > mmc->cfg->b_max) ?
462 			mmc->cfg->b_max : blocks_todo;
463 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
464 			pr_debug("%s: Failed to read blocks\n", __func__);
465 			return 0;
466 		}
467 		blocks_todo -= cur;
468 		start += cur;
469 		dst += cur * mmc->read_bl_len;
470 	} while (blocks_todo > 0);
471 
472 	return blkcnt;
473 }
474 
mmc_go_idle(struct mmc * mmc)475 static int mmc_go_idle(struct mmc *mmc)
476 {
477 	struct mmc_cmd cmd;
478 	int err;
479 
480 	udelay(1000);
481 
482 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
483 	cmd.cmdarg = 0;
484 	cmd.resp_type = MMC_RSP_NONE;
485 
486 	err = mmc_send_cmd(mmc, &cmd, NULL);
487 
488 	if (err)
489 		return err;
490 
491 	udelay(2000);
492 
493 	return 0;
494 }
495 
496 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
mmc_switch_voltage(struct mmc * mmc,int signal_voltage)497 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
498 {
499 	struct mmc_cmd cmd;
500 	int err = 0;
501 
502 	/*
503 	 * Send CMD11 only if the request is to switch the card to
504 	 * 1.8V signalling.
505 	 */
506 	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
507 		return mmc_set_signal_voltage(mmc, signal_voltage);
508 
509 	cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
510 	cmd.cmdarg = 0;
511 	cmd.resp_type = MMC_RSP_R1;
512 
513 	err = mmc_send_cmd(mmc, &cmd, NULL);
514 	if (err)
515 		return err;
516 
517 	if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
518 		return -EIO;
519 
520 	/*
521 	 * The card should drive cmd and dat[0:3] low immediately
522 	 * after the response of cmd11, but wait 100 us to be sure
523 	 */
524 	err = mmc_wait_dat0(mmc, 0, 100);
525 	if (err == -ENOSYS)
526 		udelay(100);
527 	else if (err)
528 		return -ETIMEDOUT;
529 
530 	/*
531 	 * During a signal voltage level switch, the clock must be gated
532 	 * for 5 ms according to the SD spec
533 	 */
534 	mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
535 
536 	err = mmc_set_signal_voltage(mmc, signal_voltage);
537 	if (err)
538 		return err;
539 
540 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
541 	mdelay(10);
542 	mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
543 
544 	/*
545 	 * Failure to switch is indicated by the card holding
546 	 * dat[0:3] low. Wait for at least 1 ms according to spec
547 	 */
548 	err = mmc_wait_dat0(mmc, 1, 1000);
549 	if (err == -ENOSYS)
550 		udelay(1000);
551 	else if (err)
552 		return -ETIMEDOUT;
553 
554 	return 0;
555 }
556 #endif
557 
sd_send_op_cond(struct mmc * mmc,bool uhs_en)558 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
559 {
560 	int timeout = 1000;
561 	int err;
562 	struct mmc_cmd cmd;
563 
564 	while (1) {
565 		cmd.cmdidx = MMC_CMD_APP_CMD;
566 		cmd.resp_type = MMC_RSP_R1;
567 		cmd.cmdarg = 0;
568 
569 		err = mmc_send_cmd(mmc, &cmd, NULL);
570 
571 		if (err)
572 			return err;
573 
574 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
575 		cmd.resp_type = MMC_RSP_R3;
576 
577 		/*
578 		 * Most cards do not answer if some reserved bits
579 		 * in the ocr are set. However, Some controller
580 		 * can set bit 7 (reserved for low voltages), but
581 		 * how to manage low voltages SD card is not yet
582 		 * specified.
583 		 */
584 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
585 			(mmc->cfg->voltages & 0xff8000);
586 
587 		if (mmc->version == SD_VERSION_2)
588 			cmd.cmdarg |= OCR_HCS;
589 
590 		if (uhs_en)
591 			cmd.cmdarg |= OCR_S18R;
592 
593 		err = mmc_send_cmd(mmc, &cmd, NULL);
594 
595 		if (err)
596 			return err;
597 
598 		if (cmd.response[0] & OCR_BUSY)
599 			break;
600 
601 		if (timeout-- <= 0)
602 			return -EOPNOTSUPP;
603 
604 		udelay(1000);
605 	}
606 
607 	if (mmc->version != SD_VERSION_2)
608 		mmc->version = SD_VERSION_1_0;
609 
610 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
611 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
612 		cmd.resp_type = MMC_RSP_R3;
613 		cmd.cmdarg = 0;
614 
615 		err = mmc_send_cmd(mmc, &cmd, NULL);
616 
617 		if (err)
618 			return err;
619 	}
620 
621 	mmc->ocr = cmd.response[0];
622 
623 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
624 	if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
625 	    == 0x41000000) {
626 		err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
627 		if (err)
628 			return err;
629 	}
630 #endif
631 
632 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
633 	mmc->rca = 0;
634 
635 	return 0;
636 }
637 
mmc_send_op_cond_iter(struct mmc * mmc,int use_arg)638 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
639 {
640 	struct mmc_cmd cmd;
641 	int err;
642 
643 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
644 	cmd.resp_type = MMC_RSP_R3;
645 	cmd.cmdarg = 0;
646 	if (use_arg && !mmc_host_is_spi(mmc))
647 		cmd.cmdarg = OCR_HCS |
648 			(mmc->cfg->voltages &
649 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
650 			(mmc->ocr & OCR_ACCESS_MODE);
651 
652 	err = mmc_send_cmd(mmc, &cmd, NULL);
653 	if (err)
654 		return err;
655 	mmc->ocr = cmd.response[0];
656 	return 0;
657 }
658 
mmc_send_op_cond(struct mmc * mmc)659 static int mmc_send_op_cond(struct mmc *mmc)
660 {
661 	int err, i;
662 
663 	/* Some cards seem to need this */
664 	mmc_go_idle(mmc);
665 
666  	/* Asking to the card its capabilities */
667 	for (i = 0; i < 2; i++) {
668 		err = mmc_send_op_cond_iter(mmc, i != 0);
669 		if (err)
670 			return err;
671 
672 		/* exit if not busy (flag seems to be inverted) */
673 		if (mmc->ocr & OCR_BUSY)
674 			break;
675 	}
676 	mmc->op_cond_pending = 1;
677 	return 0;
678 }
679 
mmc_complete_op_cond(struct mmc * mmc)680 static int mmc_complete_op_cond(struct mmc *mmc)
681 {
682 	struct mmc_cmd cmd;
683 	int timeout = 1000;
684 	ulong start;
685 	int err;
686 
687 	mmc->op_cond_pending = 0;
688 	if (!(mmc->ocr & OCR_BUSY)) {
689 		/* Some cards seem to need this */
690 		mmc_go_idle(mmc);
691 
692 		start = get_timer(0);
693 		while (1) {
694 			err = mmc_send_op_cond_iter(mmc, 1);
695 			if (err)
696 				return err;
697 			if (mmc->ocr & OCR_BUSY)
698 				break;
699 			if (get_timer(start) > timeout)
700 				return -EOPNOTSUPP;
701 			udelay(100);
702 		}
703 	}
704 
705 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
706 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
707 		cmd.resp_type = MMC_RSP_R3;
708 		cmd.cmdarg = 0;
709 
710 		err = mmc_send_cmd(mmc, &cmd, NULL);
711 
712 		if (err)
713 			return err;
714 
715 		mmc->ocr = cmd.response[0];
716 	}
717 
718 	mmc->version = MMC_VERSION_UNKNOWN;
719 
720 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
721 	mmc->rca = 1;
722 
723 	return 0;
724 }
725 
726 
mmc_send_ext_csd(struct mmc * mmc,u8 * ext_csd)727 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
728 {
729 	struct mmc_cmd cmd;
730 	struct mmc_data data;
731 	int err;
732 
733 	/* Get the Card Status Register */
734 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
735 	cmd.resp_type = MMC_RSP_R1;
736 	cmd.cmdarg = 0;
737 
738 	data.dest = (char *)ext_csd;
739 	data.blocks = 1;
740 	data.blocksize = MMC_MAX_BLOCK_LEN;
741 	data.flags = MMC_DATA_READ;
742 
743 	err = mmc_send_cmd(mmc, &cmd, &data);
744 
745 	return err;
746 }
747 
__mmc_switch(struct mmc * mmc,u8 set,u8 index,u8 value,bool send_status)748 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
749 			bool send_status)
750 {
751 	unsigned int status, start;
752 	struct mmc_cmd cmd;
753 	int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
754 	bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
755 			      (index == EXT_CSD_PART_CONF);
756 	int retries = 3;
757 	int ret;
758 
759 	if (mmc->gen_cmd6_time)
760 		timeout_ms = mmc->gen_cmd6_time * 10;
761 
762 	if (is_part_switch  && mmc->part_switch_time)
763 		timeout_ms = mmc->part_switch_time * 10;
764 
765 	cmd.cmdidx = MMC_CMD_SWITCH;
766 	cmd.resp_type = MMC_RSP_R1b;
767 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
768 				 (index << 16) |
769 				 (value << 8);
770 
771 	do {
772 		ret = mmc_send_cmd(mmc, &cmd, NULL);
773 	} while (ret && retries-- > 0);
774 
775 	if (ret)
776 		return ret;
777 
778 	start = get_timer(0);
779 
780 	/* poll dat0 for rdy/buys status */
781 	ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
782 	if (ret && ret != -ENOSYS)
783 		return ret;
784 
785 	/*
786 	 * In cases when not allowed to poll by using CMD13 or because we aren't
787 	 * capable of polling by using mmc_wait_dat0, then rely on waiting the
788 	 * stated timeout to be sufficient.
789 	 */
790 	if (ret == -ENOSYS && !send_status)
791 		mdelay(timeout_ms);
792 
793 	/* Finally wait until the card is ready or indicates a failure
794 	 * to switch. It doesn't hurt to use CMD13 here even if send_status
795 	 * is false, because by now (after 'timeout_ms' ms) the bus should be
796 	 * reliable.
797 	 */
798 	do {
799 		ret = mmc_send_status(mmc, &status);
800 
801 		if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
802 			pr_debug("switch failed %d/%d/0x%x !\n", set, index,
803 				 value);
804 			return -EIO;
805 		}
806 		if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
807 			return 0;
808 		udelay(100);
809 	} while (get_timer(start) < timeout_ms);
810 
811 	return -ETIMEDOUT;
812 }
813 
mmc_switch(struct mmc * mmc,u8 set,u8 index,u8 value)814 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
815 {
816 	return __mmc_switch(mmc, set, index, value, true);
817 }
818 
819 #if !CONFIG_IS_ENABLED(MMC_TINY)
mmc_set_card_speed(struct mmc * mmc,enum bus_mode mode,bool hsdowngrade)820 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
821 			      bool hsdowngrade)
822 {
823 	int err;
824 	int speed_bits;
825 
826 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
827 
828 	switch (mode) {
829 	case MMC_HS:
830 	case MMC_HS_52:
831 	case MMC_DDR_52:
832 		speed_bits = EXT_CSD_TIMING_HS;
833 		break;
834 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
835 	case MMC_HS_200:
836 		speed_bits = EXT_CSD_TIMING_HS200;
837 		break;
838 #endif
839 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
840 	case MMC_HS_400:
841 		speed_bits = EXT_CSD_TIMING_HS400;
842 		break;
843 #endif
844 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
845 	case MMC_HS_400_ES:
846 		speed_bits = EXT_CSD_TIMING_HS400;
847 		break;
848 #endif
849 	case MMC_LEGACY:
850 		speed_bits = EXT_CSD_TIMING_LEGACY;
851 		break;
852 	default:
853 		return -EINVAL;
854 	}
855 
856 	err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
857 			   speed_bits, !hsdowngrade);
858 	if (err)
859 		return err;
860 
861 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
862     CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
863 	/*
864 	 * In case the eMMC is in HS200/HS400 mode and we are downgrading
865 	 * to HS mode, the card clock are still running much faster than
866 	 * the supported HS mode clock, so we can not reliably read out
867 	 * Extended CSD. Reconfigure the controller to run at HS mode.
868 	 */
869 	if (hsdowngrade) {
870 		mmc_select_mode(mmc, MMC_HS);
871 		mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
872 	}
873 #endif
874 
875 	if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
876 		/* Now check to see that it worked */
877 		err = mmc_send_ext_csd(mmc, test_csd);
878 		if (err)
879 			return err;
880 
881 		/* No high-speed support */
882 		if (!test_csd[EXT_CSD_HS_TIMING])
883 			return -ENOTSUPP;
884 	}
885 
886 	return 0;
887 }
888 
mmc_get_capabilities(struct mmc * mmc)889 static int mmc_get_capabilities(struct mmc *mmc)
890 {
891 	u8 *ext_csd = mmc->ext_csd;
892 	char cardtype;
893 
894 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
895 
896 	if (mmc_host_is_spi(mmc))
897 		return 0;
898 
899 	/* Only version 4 supports high-speed */
900 	if (mmc->version < MMC_VERSION_4)
901 		return 0;
902 
903 	if (!ext_csd) {
904 		pr_err("No ext_csd found!\n"); /* this should enver happen */
905 		return -ENOTSUPP;
906 	}
907 
908 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
909 
910 	cardtype = ext_csd[EXT_CSD_CARD_TYPE];
911 	mmc->cardtype = cardtype;
912 
913 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
914 	if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
915 			EXT_CSD_CARD_TYPE_HS200_1_8V)) {
916 		mmc->card_caps |= MMC_MODE_HS200;
917 	}
918 #endif
919 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
920 	CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
921 	if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
922 			EXT_CSD_CARD_TYPE_HS400_1_8V)) {
923 		mmc->card_caps |= MMC_MODE_HS400;
924 	}
925 #endif
926 	if (cardtype & EXT_CSD_CARD_TYPE_52) {
927 		if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
928 			mmc->card_caps |= MMC_MODE_DDR_52MHz;
929 		mmc->card_caps |= MMC_MODE_HS_52MHz;
930 	}
931 	if (cardtype & EXT_CSD_CARD_TYPE_26)
932 		mmc->card_caps |= MMC_MODE_HS;
933 
934 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
935 	if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
936 	    (mmc->card_caps & MMC_MODE_HS400)) {
937 		mmc->card_caps |= MMC_MODE_HS400_ES;
938 	}
939 #endif
940 
941 	return 0;
942 }
943 #endif
944 
mmc_set_capacity(struct mmc * mmc,int part_num)945 static int mmc_set_capacity(struct mmc *mmc, int part_num)
946 {
947 	switch (part_num) {
948 	case 0:
949 		mmc->capacity = mmc->capacity_user;
950 		break;
951 	case 1:
952 	case 2:
953 		mmc->capacity = mmc->capacity_boot;
954 		break;
955 	case 3:
956 		mmc->capacity = mmc->capacity_rpmb;
957 		break;
958 	case 4:
959 	case 5:
960 	case 6:
961 	case 7:
962 		mmc->capacity = mmc->capacity_gp[part_num - 4];
963 		break;
964 	default:
965 		return -1;
966 	}
967 
968 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
969 
970 	return 0;
971 }
972 
mmc_switch_part(struct mmc * mmc,unsigned int part_num)973 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
974 {
975 	int ret;
976 	int retry = 3;
977 
978 	do {
979 		ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
980 				 EXT_CSD_PART_CONF,
981 				 (mmc->part_config & ~PART_ACCESS_MASK)
982 				 | (part_num & PART_ACCESS_MASK));
983 	} while (ret && retry--);
984 
985 	/*
986 	 * Set the capacity if the switch succeeded or was intended
987 	 * to return to representing the raw device.
988 	 */
989 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
990 		ret = mmc_set_capacity(mmc, part_num);
991 		mmc_get_blk_desc(mmc)->hwpart = part_num;
992 	}
993 
994 	return ret;
995 }
996 
997 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
mmc_hwpart_config(struct mmc * mmc,const struct mmc_hwpart_conf * conf,enum mmc_hwpart_conf_mode mode)998 int mmc_hwpart_config(struct mmc *mmc,
999 		      const struct mmc_hwpart_conf *conf,
1000 		      enum mmc_hwpart_conf_mode mode)
1001 {
1002 	u8 part_attrs = 0;
1003 	u32 enh_size_mult;
1004 	u32 enh_start_addr;
1005 	u32 gp_size_mult[4];
1006 	u32 max_enh_size_mult;
1007 	u32 tot_enh_size_mult = 0;
1008 	u8 wr_rel_set;
1009 	int i, pidx, err;
1010 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1011 
1012 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1013 		return -EINVAL;
1014 
1015 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1016 		pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1017 		return -EMEDIUMTYPE;
1018 	}
1019 
1020 	if (!(mmc->part_support & PART_SUPPORT)) {
1021 		pr_err("Card does not support partitioning\n");
1022 		return -EMEDIUMTYPE;
1023 	}
1024 
1025 	if (!mmc->hc_wp_grp_size) {
1026 		pr_err("Card does not define HC WP group size\n");
1027 		return -EMEDIUMTYPE;
1028 	}
1029 
1030 	/* check partition alignment and total enhanced size */
1031 	if (conf->user.enh_size) {
1032 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1033 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
1034 			pr_err("User data enhanced area not HC WP group "
1035 			       "size aligned\n");
1036 			return -EINVAL;
1037 		}
1038 		part_attrs |= EXT_CSD_ENH_USR;
1039 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1040 		if (mmc->high_capacity) {
1041 			enh_start_addr = conf->user.enh_start;
1042 		} else {
1043 			enh_start_addr = (conf->user.enh_start << 9);
1044 		}
1045 	} else {
1046 		enh_size_mult = 0;
1047 		enh_start_addr = 0;
1048 	}
1049 	tot_enh_size_mult += enh_size_mult;
1050 
1051 	for (pidx = 0; pidx < 4; pidx++) {
1052 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1053 			pr_err("GP%i partition not HC WP group size "
1054 			       "aligned\n", pidx+1);
1055 			return -EINVAL;
1056 		}
1057 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1058 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1059 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1060 			tot_enh_size_mult += gp_size_mult[pidx];
1061 		}
1062 	}
1063 
1064 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1065 		pr_err("Card does not support enhanced attribute\n");
1066 		return -EMEDIUMTYPE;
1067 	}
1068 
1069 	err = mmc_send_ext_csd(mmc, ext_csd);
1070 	if (err)
1071 		return err;
1072 
1073 	max_enh_size_mult =
1074 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1075 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1076 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1077 	if (tot_enh_size_mult > max_enh_size_mult) {
1078 		pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1079 		       tot_enh_size_mult, max_enh_size_mult);
1080 		return -EMEDIUMTYPE;
1081 	}
1082 
1083 	/* The default value of EXT_CSD_WR_REL_SET is device
1084 	 * dependent, the values can only be changed if the
1085 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1086 	 * changed only once and before partitioning is completed. */
1087 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1088 	if (conf->user.wr_rel_change) {
1089 		if (conf->user.wr_rel_set)
1090 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1091 		else
1092 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1093 	}
1094 	for (pidx = 0; pidx < 4; pidx++) {
1095 		if (conf->gp_part[pidx].wr_rel_change) {
1096 			if (conf->gp_part[pidx].wr_rel_set)
1097 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1098 			else
1099 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1100 		}
1101 	}
1102 
1103 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1104 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1105 		puts("Card does not support host controlled partition write "
1106 		     "reliability settings\n");
1107 		return -EMEDIUMTYPE;
1108 	}
1109 
1110 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1111 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1112 		pr_err("Card already partitioned\n");
1113 		return -EPERM;
1114 	}
1115 
1116 	if (mode == MMC_HWPART_CONF_CHECK)
1117 		return 0;
1118 
1119 	/* Partitioning requires high-capacity size definitions */
1120 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1121 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1122 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1123 
1124 		if (err)
1125 			return err;
1126 
1127 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1128 
1129 		/* update erase group size to be high-capacity */
1130 		mmc->erase_grp_size =
1131 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1132 
1133 	}
1134 
1135 	/* all OK, write the configuration */
1136 	for (i = 0; i < 4; i++) {
1137 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1138 				 EXT_CSD_ENH_START_ADDR+i,
1139 				 (enh_start_addr >> (i*8)) & 0xFF);
1140 		if (err)
1141 			return err;
1142 	}
1143 	for (i = 0; i < 3; i++) {
1144 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1145 				 EXT_CSD_ENH_SIZE_MULT+i,
1146 				 (enh_size_mult >> (i*8)) & 0xFF);
1147 		if (err)
1148 			return err;
1149 	}
1150 	for (pidx = 0; pidx < 4; pidx++) {
1151 		for (i = 0; i < 3; i++) {
1152 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1153 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1154 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1155 			if (err)
1156 				return err;
1157 		}
1158 	}
1159 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1160 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1161 	if (err)
1162 		return err;
1163 
1164 	if (mode == MMC_HWPART_CONF_SET)
1165 		return 0;
1166 
1167 	/* The WR_REL_SET is a write-once register but shall be
1168 	 * written before setting PART_SETTING_COMPLETED. As it is
1169 	 * write-once we can only write it when completing the
1170 	 * partitioning. */
1171 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1172 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1173 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1174 		if (err)
1175 			return err;
1176 	}
1177 
1178 	/* Setting PART_SETTING_COMPLETED confirms the partition
1179 	 * configuration but it only becomes effective after power
1180 	 * cycle, so we do not adjust the partition related settings
1181 	 * in the mmc struct. */
1182 
1183 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1184 			 EXT_CSD_PARTITION_SETTING,
1185 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1186 	if (err)
1187 		return err;
1188 
1189 	return 0;
1190 }
1191 #endif
1192 
1193 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_getcd(struct mmc * mmc)1194 int mmc_getcd(struct mmc *mmc)
1195 {
1196 	int cd;
1197 
1198 	cd = board_mmc_getcd(mmc);
1199 
1200 	if (cd < 0) {
1201 		if (mmc->cfg->ops->getcd)
1202 			cd = mmc->cfg->ops->getcd(mmc);
1203 		else
1204 			cd = 1;
1205 	}
1206 
1207 	return cd;
1208 }
1209 #endif
1210 
1211 #if !CONFIG_IS_ENABLED(MMC_TINY)
sd_switch(struct mmc * mmc,int mode,int group,u8 value,u8 * resp)1212 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1213 {
1214 	struct mmc_cmd cmd;
1215 	struct mmc_data data;
1216 
1217 	/* Switch the frequency */
1218 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1219 	cmd.resp_type = MMC_RSP_R1;
1220 	cmd.cmdarg = (mode << 31) | 0xffffff;
1221 	cmd.cmdarg &= ~(0xf << (group * 4));
1222 	cmd.cmdarg |= value << (group * 4);
1223 
1224 	data.dest = (char *)resp;
1225 	data.blocksize = 64;
1226 	data.blocks = 1;
1227 	data.flags = MMC_DATA_READ;
1228 
1229 	return mmc_send_cmd(mmc, &cmd, &data);
1230 }
1231 
sd_get_capabilities(struct mmc * mmc)1232 static int sd_get_capabilities(struct mmc *mmc)
1233 {
1234 	int err;
1235 	struct mmc_cmd cmd;
1236 	ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1237 	ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1238 	struct mmc_data data;
1239 	int timeout;
1240 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1241 	u32 sd3_bus_mode;
1242 #endif
1243 
1244 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1245 
1246 	if (mmc_host_is_spi(mmc))
1247 		return 0;
1248 
1249 	/* Read the SCR to find out if this card supports higher speeds */
1250 	cmd.cmdidx = MMC_CMD_APP_CMD;
1251 	cmd.resp_type = MMC_RSP_R1;
1252 	cmd.cmdarg = mmc->rca << 16;
1253 
1254 	err = mmc_send_cmd(mmc, &cmd, NULL);
1255 
1256 	if (err)
1257 		return err;
1258 
1259 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1260 	cmd.resp_type = MMC_RSP_R1;
1261 	cmd.cmdarg = 0;
1262 
1263 	timeout = 3;
1264 
1265 retry_scr:
1266 	data.dest = (char *)scr;
1267 	data.blocksize = 8;
1268 	data.blocks = 1;
1269 	data.flags = MMC_DATA_READ;
1270 
1271 	err = mmc_send_cmd(mmc, &cmd, &data);
1272 
1273 	if (err) {
1274 		if (timeout--)
1275 			goto retry_scr;
1276 
1277 		return err;
1278 	}
1279 
1280 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1281 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1282 
1283 	switch ((mmc->scr[0] >> 24) & 0xf) {
1284 	case 0:
1285 		mmc->version = SD_VERSION_1_0;
1286 		break;
1287 	case 1:
1288 		mmc->version = SD_VERSION_1_10;
1289 		break;
1290 	case 2:
1291 		mmc->version = SD_VERSION_2;
1292 		if ((mmc->scr[0] >> 15) & 0x1)
1293 			mmc->version = SD_VERSION_3;
1294 		break;
1295 	default:
1296 		mmc->version = SD_VERSION_1_0;
1297 		break;
1298 	}
1299 
1300 	if (mmc->scr[0] & SD_DATA_4BIT)
1301 		mmc->card_caps |= MMC_MODE_4BIT;
1302 
1303 	/* Version 1.0 doesn't support switching */
1304 	if (mmc->version == SD_VERSION_1_0)
1305 		return 0;
1306 
1307 	timeout = 4;
1308 	while (timeout--) {
1309 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1310 				(u8 *)switch_status);
1311 
1312 		if (err)
1313 			return err;
1314 
1315 		/* The high-speed function is busy.  Try again */
1316 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1317 			break;
1318 	}
1319 
1320 	/* If high-speed isn't supported, we return */
1321 	if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1322 		mmc->card_caps |= MMC_CAP(SD_HS);
1323 
1324 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1325 	/* Version before 3.0 don't support UHS modes */
1326 	if (mmc->version < SD_VERSION_3)
1327 		return 0;
1328 
1329 	sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1330 	if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1331 		mmc->card_caps |= MMC_CAP(UHS_SDR104);
1332 	if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1333 		mmc->card_caps |= MMC_CAP(UHS_SDR50);
1334 	if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1335 		mmc->card_caps |= MMC_CAP(UHS_SDR25);
1336 	if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1337 		mmc->card_caps |= MMC_CAP(UHS_SDR12);
1338 	if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1339 		mmc->card_caps |= MMC_CAP(UHS_DDR50);
1340 #endif
1341 
1342 	return 0;
1343 }
1344 
sd_set_card_speed(struct mmc * mmc,enum bus_mode mode)1345 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1346 {
1347 	int err;
1348 
1349 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1350 	int speed;
1351 
1352 	/* SD version 1.00 and 1.01 does not support CMD 6 */
1353 	if (mmc->version == SD_VERSION_1_0)
1354 		return 0;
1355 
1356 	switch (mode) {
1357 	case SD_LEGACY:
1358 		speed = UHS_SDR12_BUS_SPEED;
1359 		break;
1360 	case SD_HS:
1361 		speed = HIGH_SPEED_BUS_SPEED;
1362 		break;
1363 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1364 	case UHS_SDR12:
1365 		speed = UHS_SDR12_BUS_SPEED;
1366 		break;
1367 	case UHS_SDR25:
1368 		speed = UHS_SDR25_BUS_SPEED;
1369 		break;
1370 	case UHS_SDR50:
1371 		speed = UHS_SDR50_BUS_SPEED;
1372 		break;
1373 	case UHS_DDR50:
1374 		speed = UHS_DDR50_BUS_SPEED;
1375 		break;
1376 	case UHS_SDR104:
1377 		speed = UHS_SDR104_BUS_SPEED;
1378 		break;
1379 #endif
1380 	default:
1381 		return -EINVAL;
1382 	}
1383 
1384 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1385 	if (err)
1386 		return err;
1387 
1388 	if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1389 		return -ENOTSUPP;
1390 
1391 	return 0;
1392 }
1393 
sd_select_bus_width(struct mmc * mmc,int w)1394 static int sd_select_bus_width(struct mmc *mmc, int w)
1395 {
1396 	int err;
1397 	struct mmc_cmd cmd;
1398 
1399 	if ((w != 4) && (w != 1))
1400 		return -EINVAL;
1401 
1402 	cmd.cmdidx = MMC_CMD_APP_CMD;
1403 	cmd.resp_type = MMC_RSP_R1;
1404 	cmd.cmdarg = mmc->rca << 16;
1405 
1406 	err = mmc_send_cmd(mmc, &cmd, NULL);
1407 	if (err)
1408 		return err;
1409 
1410 	cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1411 	cmd.resp_type = MMC_RSP_R1;
1412 	if (w == 4)
1413 		cmd.cmdarg = 2;
1414 	else if (w == 1)
1415 		cmd.cmdarg = 0;
1416 	err = mmc_send_cmd(mmc, &cmd, NULL);
1417 	if (err)
1418 		return err;
1419 
1420 	return 0;
1421 }
1422 #endif
1423 
1424 #if CONFIG_IS_ENABLED(MMC_WRITE)
sd_read_ssr(struct mmc * mmc)1425 static int sd_read_ssr(struct mmc *mmc)
1426 {
1427 	static const unsigned int sd_au_size[] = {
1428 		0,		SZ_16K / 512,		SZ_32K / 512,
1429 		SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
1430 		SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
1431 		SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
1432 		SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,
1433 		SZ_64M / 512,
1434 	};
1435 	int err, i;
1436 	struct mmc_cmd cmd;
1437 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1438 	struct mmc_data data;
1439 	int timeout = 3;
1440 	unsigned int au, eo, et, es;
1441 
1442 	cmd.cmdidx = MMC_CMD_APP_CMD;
1443 	cmd.resp_type = MMC_RSP_R1;
1444 	cmd.cmdarg = mmc->rca << 16;
1445 
1446 	err = mmc_send_cmd(mmc, &cmd, NULL);
1447 	if (err)
1448 		return err;
1449 
1450 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1451 	cmd.resp_type = MMC_RSP_R1;
1452 	cmd.cmdarg = 0;
1453 
1454 retry_ssr:
1455 	data.dest = (char *)ssr;
1456 	data.blocksize = 64;
1457 	data.blocks = 1;
1458 	data.flags = MMC_DATA_READ;
1459 
1460 	err = mmc_send_cmd(mmc, &cmd, &data);
1461 	if (err) {
1462 		if (timeout--)
1463 			goto retry_ssr;
1464 
1465 		return err;
1466 	}
1467 
1468 	for (i = 0; i < 16; i++)
1469 		ssr[i] = be32_to_cpu(ssr[i]);
1470 
1471 	au = (ssr[2] >> 12) & 0xF;
1472 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1473 		mmc->ssr.au = sd_au_size[au];
1474 		es = (ssr[3] >> 24) & 0xFF;
1475 		es |= (ssr[2] & 0xFF) << 8;
1476 		et = (ssr[3] >> 18) & 0x3F;
1477 		if (es && et) {
1478 			eo = (ssr[3] >> 16) & 0x3;
1479 			mmc->ssr.erase_timeout = (et * 1000) / es;
1480 			mmc->ssr.erase_offset = eo * 1000;
1481 		}
1482 	} else {
1483 		pr_debug("Invalid Allocation Unit Size.\n");
1484 	}
1485 
1486 	return 0;
1487 }
1488 #endif
1489 /* frequency bases */
1490 /* divided by 10 to be nice to platforms without floating point */
1491 static const int fbase[] = {
1492 	10000,
1493 	100000,
1494 	1000000,
1495 	10000000,
1496 };
1497 
1498 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1499  * to platforms without floating point.
1500  */
1501 static const u8 multipliers[] = {
1502 	0,	/* reserved */
1503 	10,
1504 	12,
1505 	13,
1506 	15,
1507 	20,
1508 	25,
1509 	30,
1510 	35,
1511 	40,
1512 	45,
1513 	50,
1514 	55,
1515 	60,
1516 	70,
1517 	80,
1518 };
1519 
bus_width(uint cap)1520 static inline int bus_width(uint cap)
1521 {
1522 	if (cap == MMC_MODE_8BIT)
1523 		return 8;
1524 	if (cap == MMC_MODE_4BIT)
1525 		return 4;
1526 	if (cap == MMC_MODE_1BIT)
1527 		return 1;
1528 	pr_warn("invalid bus witdh capability 0x%x\n", cap);
1529 	return 0;
1530 }
1531 
1532 #if !CONFIG_IS_ENABLED(DM_MMC)
1533 #ifdef MMC_SUPPORTS_TUNING
mmc_execute_tuning(struct mmc * mmc,uint opcode)1534 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1535 {
1536 	return -ENOTSUPP;
1537 }
1538 #endif
1539 
mmc_set_ios(struct mmc * mmc)1540 static int mmc_set_ios(struct mmc *mmc)
1541 {
1542 	int ret = 0;
1543 
1544 	if (mmc->cfg->ops->set_ios)
1545 		ret = mmc->cfg->ops->set_ios(mmc);
1546 
1547 	return ret;
1548 }
1549 
mmc_host_power_cycle(struct mmc * mmc)1550 static int mmc_host_power_cycle(struct mmc *mmc)
1551 {
1552 	int ret = 0;
1553 
1554 	if (mmc->cfg->ops->host_power_cycle)
1555 		ret = mmc->cfg->ops->host_power_cycle(mmc);
1556 
1557 	return ret;
1558 }
1559 #endif
1560 
mmc_set_clock(struct mmc * mmc,uint clock,bool disable)1561 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1562 {
1563 	if (!disable) {
1564 		if (clock > mmc->cfg->f_max)
1565 			clock = mmc->cfg->f_max;
1566 
1567 		if (clock < mmc->cfg->f_min)
1568 			clock = mmc->cfg->f_min;
1569 	}
1570 
1571 	mmc->clock = clock;
1572 	mmc->clk_disable = disable;
1573 
1574 	debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1575 
1576 	return mmc_set_ios(mmc);
1577 }
1578 
mmc_set_bus_width(struct mmc * mmc,uint width)1579 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1580 {
1581 	mmc->bus_width = width;
1582 
1583 	return mmc_set_ios(mmc);
1584 }
1585 
1586 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1587 /*
1588  * helper function to display the capabilities in a human
1589  * friendly manner. The capabilities include bus width and
1590  * supported modes.
1591  */
mmc_dump_capabilities(const char * text,uint caps)1592 void mmc_dump_capabilities(const char *text, uint caps)
1593 {
1594 	enum bus_mode mode;
1595 
1596 	pr_debug("%s: widths [", text);
1597 	if (caps & MMC_MODE_8BIT)
1598 		pr_debug("8, ");
1599 	if (caps & MMC_MODE_4BIT)
1600 		pr_debug("4, ");
1601 	if (caps & MMC_MODE_1BIT)
1602 		pr_debug("1, ");
1603 	pr_debug("\b\b] modes [");
1604 	for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1605 		if (MMC_CAP(mode) & caps)
1606 			pr_debug("%s, ", mmc_mode_name(mode));
1607 	pr_debug("\b\b]\n");
1608 }
1609 #endif
1610 
1611 struct mode_width_tuning {
1612 	enum bus_mode mode;
1613 	uint widths;
1614 #ifdef MMC_SUPPORTS_TUNING
1615 	uint tuning;
1616 #endif
1617 };
1618 
1619 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
mmc_voltage_to_mv(enum mmc_voltage voltage)1620 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1621 {
1622 	switch (voltage) {
1623 	case MMC_SIGNAL_VOLTAGE_000: return 0;
1624 	case MMC_SIGNAL_VOLTAGE_330: return 3300;
1625 	case MMC_SIGNAL_VOLTAGE_180: return 1800;
1626 	case MMC_SIGNAL_VOLTAGE_120: return 1200;
1627 	}
1628 	return -EINVAL;
1629 }
1630 
mmc_set_signal_voltage(struct mmc * mmc,uint signal_voltage)1631 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1632 {
1633 	int err;
1634 
1635 	if (mmc->signal_voltage == signal_voltage)
1636 		return 0;
1637 
1638 	mmc->signal_voltage = signal_voltage;
1639 	err = mmc_set_ios(mmc);
1640 	if (err)
1641 		pr_debug("unable to set voltage (err %d)\n", err);
1642 
1643 	return err;
1644 }
1645 #else
mmc_set_signal_voltage(struct mmc * mmc,uint signal_voltage)1646 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1647 {
1648 	return 0;
1649 }
1650 #endif
1651 
1652 #if !CONFIG_IS_ENABLED(MMC_TINY)
1653 static const struct mode_width_tuning sd_modes_by_pref[] = {
1654 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1655 #ifdef MMC_SUPPORTS_TUNING
1656 	{
1657 		.mode = UHS_SDR104,
1658 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1659 		.tuning = MMC_CMD_SEND_TUNING_BLOCK
1660 	},
1661 #endif
1662 	{
1663 		.mode = UHS_SDR50,
1664 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1665 	},
1666 	{
1667 		.mode = UHS_DDR50,
1668 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1669 	},
1670 	{
1671 		.mode = UHS_SDR25,
1672 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1673 	},
1674 #endif
1675 	{
1676 		.mode = SD_HS,
1677 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1678 	},
1679 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1680 	{
1681 		.mode = UHS_SDR12,
1682 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1683 	},
1684 #endif
1685 	{
1686 		.mode = SD_LEGACY,
1687 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1688 	}
1689 };
1690 
1691 #define for_each_sd_mode_by_pref(caps, mwt) \
1692 	for (mwt = sd_modes_by_pref;\
1693 	     mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1694 	     mwt++) \
1695 		if (caps & MMC_CAP(mwt->mode))
1696 
sd_select_mode_and_width(struct mmc * mmc,uint card_caps)1697 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1698 {
1699 	int err;
1700 	uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1701 	const struct mode_width_tuning *mwt;
1702 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1703 	bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1704 #else
1705 	bool uhs_en = false;
1706 #endif
1707 	uint caps;
1708 
1709 #ifdef DEBUG
1710 	mmc_dump_capabilities("sd card", card_caps);
1711 	mmc_dump_capabilities("host", mmc->host_caps);
1712 #endif
1713 
1714 	if (mmc_host_is_spi(mmc)) {
1715 		mmc_set_bus_width(mmc, 1);
1716 		mmc_select_mode(mmc, SD_LEGACY);
1717 		mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1718 		return 0;
1719 	}
1720 
1721 	/* Restrict card's capabilities by what the host can do */
1722 	caps = card_caps & mmc->host_caps;
1723 
1724 	if (!uhs_en)
1725 		caps &= ~UHS_CAPS;
1726 
1727 	for_each_sd_mode_by_pref(caps, mwt) {
1728 		uint *w;
1729 
1730 		for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1731 			if (*w & caps & mwt->widths) {
1732 				pr_debug("trying mode %s width %d (at %d MHz)\n",
1733 					 mmc_mode_name(mwt->mode),
1734 					 bus_width(*w),
1735 					 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1736 
1737 				/* configure the bus width (card + host) */
1738 				err = sd_select_bus_width(mmc, bus_width(*w));
1739 				if (err)
1740 					goto error;
1741 				mmc_set_bus_width(mmc, bus_width(*w));
1742 
1743 				/* configure the bus mode (card) */
1744 				err = sd_set_card_speed(mmc, mwt->mode);
1745 				if (err)
1746 					goto error;
1747 
1748 				/* configure the bus mode (host) */
1749 				mmc_select_mode(mmc, mwt->mode);
1750 				mmc_set_clock(mmc, mmc->tran_speed,
1751 						MMC_CLK_ENABLE);
1752 
1753 #ifdef MMC_SUPPORTS_TUNING
1754 				/* execute tuning if needed */
1755 				if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1756 					err = mmc_execute_tuning(mmc,
1757 								 mwt->tuning);
1758 					if (err) {
1759 						pr_debug("tuning failed\n");
1760 						goto error;
1761 					}
1762 				}
1763 #endif
1764 
1765 #if CONFIG_IS_ENABLED(MMC_WRITE)
1766 				err = sd_read_ssr(mmc);
1767 				if (err)
1768 					pr_warn("unable to read ssr\n");
1769 #endif
1770 				if (!err)
1771 					return 0;
1772 
1773 error:
1774 				/* revert to a safer bus speed */
1775 				mmc_select_mode(mmc, SD_LEGACY);
1776 				mmc_set_clock(mmc, mmc->tran_speed,
1777 						MMC_CLK_ENABLE);
1778 			}
1779 		}
1780 	}
1781 
1782 	pr_err("unable to select a mode\n");
1783 	return -ENOTSUPP;
1784 }
1785 
1786 /*
1787  * read the compare the part of ext csd that is constant.
1788  * This can be used to check that the transfer is working
1789  * as expected.
1790  */
mmc_read_and_compare_ext_csd(struct mmc * mmc)1791 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1792 {
1793 	int err;
1794 	const u8 *ext_csd = mmc->ext_csd;
1795 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1796 
1797 	if (mmc->version < MMC_VERSION_4)
1798 		return 0;
1799 
1800 	err = mmc_send_ext_csd(mmc, test_csd);
1801 	if (err)
1802 		return err;
1803 
1804 	/* Only compare read only fields */
1805 	if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1806 		== test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1807 	    ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1808 		== test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1809 	    ext_csd[EXT_CSD_REV]
1810 		== test_csd[EXT_CSD_REV] &&
1811 	    ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1812 		== test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1813 	    memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1814 		   &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1815 		return 0;
1816 
1817 	return -EBADMSG;
1818 }
1819 
1820 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
mmc_set_lowest_voltage(struct mmc * mmc,enum bus_mode mode,uint32_t allowed_mask)1821 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1822 				  uint32_t allowed_mask)
1823 {
1824 	u32 card_mask = 0;
1825 
1826 	switch (mode) {
1827 	case MMC_HS_400_ES:
1828 	case MMC_HS_400:
1829 	case MMC_HS_200:
1830 		if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1831 		    EXT_CSD_CARD_TYPE_HS400_1_8V))
1832 			card_mask |= MMC_SIGNAL_VOLTAGE_180;
1833 		if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1834 		    EXT_CSD_CARD_TYPE_HS400_1_2V))
1835 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1836 		break;
1837 	case MMC_DDR_52:
1838 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1839 			card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1840 				     MMC_SIGNAL_VOLTAGE_180;
1841 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1842 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1843 		break;
1844 	default:
1845 		card_mask |= MMC_SIGNAL_VOLTAGE_330;
1846 		break;
1847 	}
1848 
1849 	while (card_mask & allowed_mask) {
1850 		enum mmc_voltage best_match;
1851 
1852 		best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1853 		if (!mmc_set_signal_voltage(mmc,  best_match))
1854 			return 0;
1855 
1856 		allowed_mask &= ~best_match;
1857 	}
1858 
1859 	return -ENOTSUPP;
1860 }
1861 #else
mmc_set_lowest_voltage(struct mmc * mmc,enum bus_mode mode,uint32_t allowed_mask)1862 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1863 					 uint32_t allowed_mask)
1864 {
1865 	return 0;
1866 }
1867 #endif
1868 
1869 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1870 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1871 	{
1872 		.mode = MMC_HS_400_ES,
1873 		.widths = MMC_MODE_8BIT,
1874 	},
1875 #endif
1876 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1877 	{
1878 		.mode = MMC_HS_400,
1879 		.widths = MMC_MODE_8BIT,
1880 		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1881 	},
1882 #endif
1883 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1884 	{
1885 		.mode = MMC_HS_200,
1886 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1887 		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1888 	},
1889 #endif
1890 	{
1891 		.mode = MMC_DDR_52,
1892 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1893 	},
1894 	{
1895 		.mode = MMC_HS_52,
1896 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1897 	},
1898 	{
1899 		.mode = MMC_HS,
1900 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1901 	},
1902 	{
1903 		.mode = MMC_LEGACY,
1904 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1905 	}
1906 };
1907 
1908 #define for_each_mmc_mode_by_pref(caps, mwt) \
1909 	for (mwt = mmc_modes_by_pref;\
1910 	    mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1911 	    mwt++) \
1912 		if (caps & MMC_CAP(mwt->mode))
1913 
1914 static const struct ext_csd_bus_width {
1915 	uint cap;
1916 	bool is_ddr;
1917 	uint ext_csd_bits;
1918 } ext_csd_bus_width[] = {
1919 	{MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1920 	{MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1921 	{MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1922 	{MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1923 	{MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1924 };
1925 
1926 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
mmc_select_hs400(struct mmc * mmc)1927 static int mmc_select_hs400(struct mmc *mmc)
1928 {
1929 	int err;
1930 
1931 	/* Set timing to HS200 for tuning */
1932 	err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1933 	if (err)
1934 		return err;
1935 
1936 	/* configure the bus mode (host) */
1937 	mmc_select_mode(mmc, MMC_HS_200);
1938 	mmc_set_clock(mmc, mmc->tran_speed, false);
1939 
1940 	/* execute tuning if needed */
1941 	err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1942 	if (err) {
1943 		debug("tuning failed\n");
1944 		return err;
1945 	}
1946 
1947 	/* Set back to HS */
1948 	mmc_set_card_speed(mmc, MMC_HS, true);
1949 
1950 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1951 			 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1952 	if (err)
1953 		return err;
1954 
1955 	err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1956 	if (err)
1957 		return err;
1958 
1959 	mmc_select_mode(mmc, MMC_HS_400);
1960 	err = mmc_set_clock(mmc, mmc->tran_speed, false);
1961 	if (err)
1962 		return err;
1963 
1964 	return 0;
1965 }
1966 #else
mmc_select_hs400(struct mmc * mmc)1967 static int mmc_select_hs400(struct mmc *mmc)
1968 {
1969 	return -ENOTSUPP;
1970 }
1971 #endif
1972 
1973 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1974 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_set_enhanced_strobe(struct mmc * mmc)1975 static int mmc_set_enhanced_strobe(struct mmc *mmc)
1976 {
1977 	return -ENOTSUPP;
1978 }
1979 #endif
mmc_select_hs400es(struct mmc * mmc)1980 static int mmc_select_hs400es(struct mmc *mmc)
1981 {
1982 	int err;
1983 
1984 	err = mmc_set_card_speed(mmc, MMC_HS, true);
1985 	if (err)
1986 		return err;
1987 
1988 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1989 			 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
1990 			 EXT_CSD_BUS_WIDTH_STROBE);
1991 	if (err) {
1992 		printf("switch to bus width for hs400 failed\n");
1993 		return err;
1994 	}
1995 	/* TODO: driver strength */
1996 	err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
1997 	if (err)
1998 		return err;
1999 
2000 	mmc_select_mode(mmc, MMC_HS_400_ES);
2001 	err = mmc_set_clock(mmc, mmc->tran_speed, false);
2002 	if (err)
2003 		return err;
2004 
2005 	return mmc_set_enhanced_strobe(mmc);
2006 }
2007 #else
mmc_select_hs400es(struct mmc * mmc)2008 static int mmc_select_hs400es(struct mmc *mmc)
2009 {
2010 	return -ENOTSUPP;
2011 }
2012 #endif
2013 
2014 #define for_each_supported_width(caps, ddr, ecbv) \
2015 	for (ecbv = ext_csd_bus_width;\
2016 	    ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2017 	    ecbv++) \
2018 		if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2019 
mmc_select_mode_and_width(struct mmc * mmc,uint card_caps)2020 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2021 {
2022 	int err;
2023 	const struct mode_width_tuning *mwt;
2024 	const struct ext_csd_bus_width *ecbw;
2025 
2026 #ifdef DEBUG
2027 	mmc_dump_capabilities("mmc", card_caps);
2028 	mmc_dump_capabilities("host", mmc->host_caps);
2029 #endif
2030 
2031 	if (mmc_host_is_spi(mmc)) {
2032 		mmc_set_bus_width(mmc, 1);
2033 		mmc_select_mode(mmc, MMC_LEGACY);
2034 		mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2035 		return 0;
2036 	}
2037 
2038 	/* Restrict card's capabilities by what the host can do */
2039 	card_caps &= mmc->host_caps;
2040 
2041 	/* Only version 4 of MMC supports wider bus widths */
2042 	if (mmc->version < MMC_VERSION_4)
2043 		return 0;
2044 
2045 	if (!mmc->ext_csd) {
2046 		pr_debug("No ext_csd found!\n"); /* this should enver happen */
2047 		return -ENOTSUPP;
2048 	}
2049 
2050 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2051     CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2052 	/*
2053 	 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2054 	 * before doing anything else, since a transition from either of
2055 	 * the HS200/HS400 mode directly to legacy mode is not supported.
2056 	 */
2057 	if (mmc->selected_mode == MMC_HS_200 ||
2058 	    mmc->selected_mode == MMC_HS_400)
2059 		mmc_set_card_speed(mmc, MMC_HS, true);
2060 	else
2061 #endif
2062 		mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2063 
2064 	for_each_mmc_mode_by_pref(card_caps, mwt) {
2065 		for_each_supported_width(card_caps & mwt->widths,
2066 					 mmc_is_mode_ddr(mwt->mode), ecbw) {
2067 			enum mmc_voltage old_voltage;
2068 			pr_debug("trying mode %s width %d (at %d MHz)\n",
2069 				 mmc_mode_name(mwt->mode),
2070 				 bus_width(ecbw->cap),
2071 				 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2072 			old_voltage = mmc->signal_voltage;
2073 			err = mmc_set_lowest_voltage(mmc, mwt->mode,
2074 						     MMC_ALL_SIGNAL_VOLTAGE);
2075 			if (err)
2076 				continue;
2077 
2078 			/* configure the bus width (card + host) */
2079 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2080 				    EXT_CSD_BUS_WIDTH,
2081 				    ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2082 			if (err)
2083 				goto error;
2084 			mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2085 
2086 			if (mwt->mode == MMC_HS_400) {
2087 				err = mmc_select_hs400(mmc);
2088 				if (err) {
2089 					printf("Select HS400 failed %d\n", err);
2090 					goto error;
2091 				}
2092 			} else if (mwt->mode == MMC_HS_400_ES) {
2093 				err = mmc_select_hs400es(mmc);
2094 				if (err) {
2095 					printf("Select HS400ES failed %d\n",
2096 					       err);
2097 					goto error;
2098 				}
2099 			} else {
2100 				/* configure the bus speed (card) */
2101 				err = mmc_set_card_speed(mmc, mwt->mode, false);
2102 				if (err)
2103 					goto error;
2104 
2105 				/*
2106 				 * configure the bus width AND the ddr mode
2107 				 * (card). The host side will be taken care
2108 				 * of in the next step
2109 				 */
2110 				if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2111 					err = mmc_switch(mmc,
2112 							 EXT_CSD_CMD_SET_NORMAL,
2113 							 EXT_CSD_BUS_WIDTH,
2114 							 ecbw->ext_csd_bits);
2115 					if (err)
2116 						goto error;
2117 				}
2118 
2119 				/* configure the bus mode (host) */
2120 				mmc_select_mode(mmc, mwt->mode);
2121 				mmc_set_clock(mmc, mmc->tran_speed,
2122 					      MMC_CLK_ENABLE);
2123 #ifdef MMC_SUPPORTS_TUNING
2124 
2125 				/* execute tuning if needed */
2126 				if (mwt->tuning) {
2127 					err = mmc_execute_tuning(mmc,
2128 								 mwt->tuning);
2129 					if (err) {
2130 						pr_debug("tuning failed\n");
2131 						goto error;
2132 					}
2133 				}
2134 #endif
2135 			}
2136 
2137 			/* do a transfer to check the configuration */
2138 			err = mmc_read_and_compare_ext_csd(mmc);
2139 			if (!err)
2140 				return 0;
2141 error:
2142 			mmc_set_signal_voltage(mmc, old_voltage);
2143 			/* if an error occured, revert to a safer bus mode */
2144 			mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2145 				   EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2146 			mmc_select_mode(mmc, MMC_LEGACY);
2147 			mmc_set_bus_width(mmc, 1);
2148 		}
2149 	}
2150 
2151 	pr_err("unable to select a mode\n");
2152 
2153 	return -ENOTSUPP;
2154 }
2155 #endif
2156 
2157 #if CONFIG_IS_ENABLED(MMC_TINY)
2158 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2159 #endif
2160 
mmc_startup_v4(struct mmc * mmc)2161 static int mmc_startup_v4(struct mmc *mmc)
2162 {
2163 	int err, i;
2164 	u64 capacity;
2165 	bool has_parts = false;
2166 	bool part_completed;
2167 	static const u32 mmc_versions[] = {
2168 		MMC_VERSION_4,
2169 		MMC_VERSION_4_1,
2170 		MMC_VERSION_4_2,
2171 		MMC_VERSION_4_3,
2172 		MMC_VERSION_4_4,
2173 		MMC_VERSION_4_41,
2174 		MMC_VERSION_4_5,
2175 		MMC_VERSION_5_0,
2176 		MMC_VERSION_5_1
2177 	};
2178 
2179 #if CONFIG_IS_ENABLED(MMC_TINY)
2180 	u8 *ext_csd = ext_csd_bkup;
2181 
2182 	if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2183 		return 0;
2184 
2185 	if (!mmc->ext_csd)
2186 		memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2187 
2188 	err = mmc_send_ext_csd(mmc, ext_csd);
2189 	if (err)
2190 		goto error;
2191 
2192 	/* store the ext csd for future reference */
2193 	if (!mmc->ext_csd)
2194 		mmc->ext_csd = ext_csd;
2195 #else
2196 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2197 
2198 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2199 		return 0;
2200 
2201 	/* check  ext_csd version and capacity */
2202 	err = mmc_send_ext_csd(mmc, ext_csd);
2203 	if (err)
2204 		goto error;
2205 
2206 	/* store the ext csd for future reference */
2207 	if (!mmc->ext_csd)
2208 		mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2209 	if (!mmc->ext_csd)
2210 		return -ENOMEM;
2211 	memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2212 #endif
2213 	if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2214 		return -EINVAL;
2215 
2216 	mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2217 
2218 	if (mmc->version >= MMC_VERSION_4_2) {
2219 		/*
2220 		 * According to the JEDEC Standard, the value of
2221 		 * ext_csd's capacity is valid if the value is more
2222 		 * than 2GB
2223 		 */
2224 		capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2225 				| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2226 				| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2227 				| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2228 		capacity *= MMC_MAX_BLOCK_LEN;
2229 		if ((capacity >> 20) > 2 * 1024)
2230 			mmc->capacity_user = capacity;
2231 	}
2232 
2233 	if (mmc->version >= MMC_VERSION_4_5)
2234 		mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2235 
2236 	/* The partition data may be non-zero but it is only
2237 	 * effective if PARTITION_SETTING_COMPLETED is set in
2238 	 * EXT_CSD, so ignore any data if this bit is not set,
2239 	 * except for enabling the high-capacity group size
2240 	 * definition (see below).
2241 	 */
2242 	part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2243 			    EXT_CSD_PARTITION_SETTING_COMPLETED);
2244 
2245 	mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2246 	/* Some eMMC set the value too low so set a minimum */
2247 	if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2248 		mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2249 
2250 	/* store the partition info of emmc */
2251 	mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2252 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2253 	    ext_csd[EXT_CSD_BOOT_MULT])
2254 		mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2255 	if (part_completed &&
2256 	    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2257 		mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2258 
2259 	mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2260 
2261 	mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2262 
2263 	for (i = 0; i < 4; i++) {
2264 		int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2265 		uint mult = (ext_csd[idx + 2] << 16) +
2266 			(ext_csd[idx + 1] << 8) + ext_csd[idx];
2267 		if (mult)
2268 			has_parts = true;
2269 		if (!part_completed)
2270 			continue;
2271 		mmc->capacity_gp[i] = mult;
2272 		mmc->capacity_gp[i] *=
2273 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2274 		mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2275 		mmc->capacity_gp[i] <<= 19;
2276 	}
2277 
2278 #ifndef CONFIG_SPL_BUILD
2279 	if (part_completed) {
2280 		mmc->enh_user_size =
2281 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2282 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2283 			ext_csd[EXT_CSD_ENH_SIZE_MULT];
2284 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2285 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2286 		mmc->enh_user_size <<= 19;
2287 		mmc->enh_user_start =
2288 			(ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2289 			(ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2290 			(ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2291 			ext_csd[EXT_CSD_ENH_START_ADDR];
2292 		if (mmc->high_capacity)
2293 			mmc->enh_user_start <<= 9;
2294 	}
2295 #endif
2296 
2297 	/*
2298 	 * Host needs to enable ERASE_GRP_DEF bit if device is
2299 	 * partitioned. This bit will be lost every time after a reset
2300 	 * or power off. This will affect erase size.
2301 	 */
2302 	if (part_completed)
2303 		has_parts = true;
2304 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2305 	    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2306 		has_parts = true;
2307 	if (has_parts) {
2308 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2309 				 EXT_CSD_ERASE_GROUP_DEF, 1);
2310 
2311 		if (err)
2312 			goto error;
2313 
2314 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2315 	}
2316 
2317 	if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2318 #if CONFIG_IS_ENABLED(MMC_WRITE)
2319 		/* Read out group size from ext_csd */
2320 		mmc->erase_grp_size =
2321 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2322 #endif
2323 		/*
2324 		 * if high capacity and partition setting completed
2325 		 * SEC_COUNT is valid even if it is smaller than 2 GiB
2326 		 * JEDEC Standard JESD84-B45, 6.2.4
2327 		 */
2328 		if (mmc->high_capacity && part_completed) {
2329 			capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2330 				(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2331 				(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2332 				(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2333 			capacity *= MMC_MAX_BLOCK_LEN;
2334 			mmc->capacity_user = capacity;
2335 		}
2336 	}
2337 #if CONFIG_IS_ENABLED(MMC_WRITE)
2338 	else {
2339 		/* Calculate the group size from the csd value. */
2340 		int erase_gsz, erase_gmul;
2341 
2342 		erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2343 		erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2344 		mmc->erase_grp_size = (erase_gsz + 1)
2345 			* (erase_gmul + 1);
2346 	}
2347 #endif
2348 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2349 	mmc->hc_wp_grp_size = 1024
2350 		* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2351 		* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2352 #endif
2353 
2354 	mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2355 
2356 	return 0;
2357 error:
2358 	if (mmc->ext_csd) {
2359 #if !CONFIG_IS_ENABLED(MMC_TINY)
2360 		free(mmc->ext_csd);
2361 #endif
2362 		mmc->ext_csd = NULL;
2363 	}
2364 	return err;
2365 }
2366 
mmc_startup(struct mmc * mmc)2367 static int mmc_startup(struct mmc *mmc)
2368 {
2369 	int err, i;
2370 	uint mult, freq;
2371 	u64 cmult, csize;
2372 	struct mmc_cmd cmd;
2373 	struct blk_desc *bdesc;
2374 
2375 #ifdef CONFIG_MMC_SPI_CRC_ON
2376 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2377 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2378 		cmd.resp_type = MMC_RSP_R1;
2379 		cmd.cmdarg = 1;
2380 		err = mmc_send_cmd(mmc, &cmd, NULL);
2381 		if (err)
2382 			return err;
2383 	}
2384 #endif
2385 
2386 	/* Put the Card in Identify Mode */
2387 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2388 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2389 	cmd.resp_type = MMC_RSP_R2;
2390 	cmd.cmdarg = 0;
2391 
2392 	err = mmc_send_cmd(mmc, &cmd, NULL);
2393 
2394 #ifdef CONFIG_MMC_QUIRKS
2395 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2396 		int retries = 4;
2397 		/*
2398 		 * It has been seen that SEND_CID may fail on the first
2399 		 * attempt, let's try a few more time
2400 		 */
2401 		do {
2402 			err = mmc_send_cmd(mmc, &cmd, NULL);
2403 			if (!err)
2404 				break;
2405 		} while (retries--);
2406 	}
2407 #endif
2408 
2409 	if (err)
2410 		return err;
2411 
2412 	memcpy(mmc->cid, cmd.response, 16);
2413 
2414 	/*
2415 	 * For MMC cards, set the Relative Address.
2416 	 * For SD cards, get the Relatvie Address.
2417 	 * This also puts the cards into Standby State
2418 	 */
2419 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2420 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2421 		cmd.cmdarg = mmc->rca << 16;
2422 		cmd.resp_type = MMC_RSP_R6;
2423 
2424 		err = mmc_send_cmd(mmc, &cmd, NULL);
2425 
2426 		if (err)
2427 			return err;
2428 
2429 		if (IS_SD(mmc))
2430 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2431 	}
2432 
2433 	/* Get the Card-Specific Data */
2434 	cmd.cmdidx = MMC_CMD_SEND_CSD;
2435 	cmd.resp_type = MMC_RSP_R2;
2436 	cmd.cmdarg = mmc->rca << 16;
2437 
2438 	err = mmc_send_cmd(mmc, &cmd, NULL);
2439 
2440 	if (err)
2441 		return err;
2442 
2443 	mmc->csd[0] = cmd.response[0];
2444 	mmc->csd[1] = cmd.response[1];
2445 	mmc->csd[2] = cmd.response[2];
2446 	mmc->csd[3] = cmd.response[3];
2447 
2448 	if (mmc->version == MMC_VERSION_UNKNOWN) {
2449 		int version = (cmd.response[0] >> 26) & 0xf;
2450 
2451 		switch (version) {
2452 		case 0:
2453 			mmc->version = MMC_VERSION_1_2;
2454 			break;
2455 		case 1:
2456 			mmc->version = MMC_VERSION_1_4;
2457 			break;
2458 		case 2:
2459 			mmc->version = MMC_VERSION_2_2;
2460 			break;
2461 		case 3:
2462 			mmc->version = MMC_VERSION_3;
2463 			break;
2464 		case 4:
2465 			mmc->version = MMC_VERSION_4;
2466 			break;
2467 		default:
2468 			mmc->version = MMC_VERSION_1_2;
2469 			break;
2470 		}
2471 	}
2472 
2473 	/* divide frequency by 10, since the mults are 10x bigger */
2474 	freq = fbase[(cmd.response[0] & 0x7)];
2475 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2476 
2477 	mmc->legacy_speed = freq * mult;
2478 	mmc_select_mode(mmc, MMC_LEGACY);
2479 
2480 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2481 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2482 #if CONFIG_IS_ENABLED(MMC_WRITE)
2483 
2484 	if (IS_SD(mmc))
2485 		mmc->write_bl_len = mmc->read_bl_len;
2486 	else
2487 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2488 #endif
2489 
2490 	if (mmc->high_capacity) {
2491 		csize = (mmc->csd[1] & 0x3f) << 16
2492 			| (mmc->csd[2] & 0xffff0000) >> 16;
2493 		cmult = 8;
2494 	} else {
2495 		csize = (mmc->csd[1] & 0x3ff) << 2
2496 			| (mmc->csd[2] & 0xc0000000) >> 30;
2497 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
2498 	}
2499 
2500 	mmc->capacity_user = (csize + 1) << (cmult + 2);
2501 	mmc->capacity_user *= mmc->read_bl_len;
2502 	mmc->capacity_boot = 0;
2503 	mmc->capacity_rpmb = 0;
2504 	for (i = 0; i < 4; i++)
2505 		mmc->capacity_gp[i] = 0;
2506 
2507 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2508 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2509 
2510 #if CONFIG_IS_ENABLED(MMC_WRITE)
2511 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2512 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2513 #endif
2514 
2515 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2516 		cmd.cmdidx = MMC_CMD_SET_DSR;
2517 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2518 		cmd.resp_type = MMC_RSP_NONE;
2519 		if (mmc_send_cmd(mmc, &cmd, NULL))
2520 			pr_warn("MMC: SET_DSR failed\n");
2521 	}
2522 
2523 	/* Select the card, and put it into Transfer Mode */
2524 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2525 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2526 		cmd.resp_type = MMC_RSP_R1;
2527 		cmd.cmdarg = mmc->rca << 16;
2528 		err = mmc_send_cmd(mmc, &cmd, NULL);
2529 
2530 		if (err)
2531 			return err;
2532 	}
2533 
2534 	/*
2535 	 * For SD, its erase group is always one sector
2536 	 */
2537 #if CONFIG_IS_ENABLED(MMC_WRITE)
2538 	mmc->erase_grp_size = 1;
2539 #endif
2540 	mmc->part_config = MMCPART_NOAVAILABLE;
2541 
2542 	err = mmc_startup_v4(mmc);
2543 	if (err)
2544 		return err;
2545 
2546 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2547 	if (err)
2548 		return err;
2549 
2550 #if CONFIG_IS_ENABLED(MMC_TINY)
2551 	mmc_set_clock(mmc, mmc->legacy_speed, false);
2552 	mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2553 	mmc_set_bus_width(mmc, 1);
2554 #else
2555 	if (IS_SD(mmc)) {
2556 		err = sd_get_capabilities(mmc);
2557 		if (err)
2558 			return err;
2559 		err = sd_select_mode_and_width(mmc, mmc->card_caps);
2560 	} else {
2561 		err = mmc_get_capabilities(mmc);
2562 		if (err)
2563 			return err;
2564 		mmc_select_mode_and_width(mmc, mmc->card_caps);
2565 	}
2566 #endif
2567 	if (err)
2568 		return err;
2569 
2570 	mmc->best_mode = mmc->selected_mode;
2571 
2572 	/* Fix the block length for DDR mode */
2573 	if (mmc->ddr_mode) {
2574 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2575 #if CONFIG_IS_ENABLED(MMC_WRITE)
2576 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2577 #endif
2578 	}
2579 
2580 	/* fill in device description */
2581 	bdesc = mmc_get_blk_desc(mmc);
2582 	bdesc->lun = 0;
2583 	bdesc->hwpart = 0;
2584 	bdesc->type = 0;
2585 	bdesc->blksz = mmc->read_bl_len;
2586 	bdesc->log2blksz = LOG2(bdesc->blksz);
2587 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2588 #if !defined(CONFIG_SPL_BUILD) || \
2589 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2590 		!CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2591 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2592 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2593 		(mmc->cid[3] >> 16) & 0xffff);
2594 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2595 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2596 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2597 		(mmc->cid[2] >> 24) & 0xff);
2598 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2599 		(mmc->cid[2] >> 16) & 0xf);
2600 #else
2601 	bdesc->vendor[0] = 0;
2602 	bdesc->product[0] = 0;
2603 	bdesc->revision[0] = 0;
2604 #endif
2605 
2606 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2607 	part_init(bdesc);
2608 #endif
2609 
2610 	return 0;
2611 }
2612 
mmc_send_if_cond(struct mmc * mmc)2613 static int mmc_send_if_cond(struct mmc *mmc)
2614 {
2615 	struct mmc_cmd cmd;
2616 	int err;
2617 
2618 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2619 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2620 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2621 	cmd.resp_type = MMC_RSP_R7;
2622 
2623 	err = mmc_send_cmd(mmc, &cmd, NULL);
2624 
2625 	if (err)
2626 		return err;
2627 
2628 	if ((cmd.response[0] & 0xff) != 0xaa)
2629 		return -EOPNOTSUPP;
2630 	else
2631 		mmc->version = SD_VERSION_2;
2632 
2633 	return 0;
2634 }
2635 
2636 #if !CONFIG_IS_ENABLED(DM_MMC)
2637 /* board-specific MMC power initializations. */
board_mmc_power_init(void)2638 __weak void board_mmc_power_init(void)
2639 {
2640 }
2641 #endif
2642 
mmc_power_init(struct mmc * mmc)2643 static int mmc_power_init(struct mmc *mmc)
2644 {
2645 #if CONFIG_IS_ENABLED(DM_MMC)
2646 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2647 	int ret;
2648 
2649 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2650 					  &mmc->vmmc_supply);
2651 	if (ret)
2652 		pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2653 
2654 	ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2655 					  &mmc->vqmmc_supply);
2656 	if (ret)
2657 		pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2658 #endif
2659 #else /* !CONFIG_DM_MMC */
2660 	/*
2661 	 * Driver model should use a regulator, as above, rather than calling
2662 	 * out to board code.
2663 	 */
2664 	board_mmc_power_init();
2665 #endif
2666 	return 0;
2667 }
2668 
2669 /*
2670  * put the host in the initial state:
2671  * - turn on Vdd (card power supply)
2672  * - configure the bus width and clock to minimal values
2673  */
mmc_set_initial_state(struct mmc * mmc)2674 static void mmc_set_initial_state(struct mmc *mmc)
2675 {
2676 	int err;
2677 
2678 	/* First try to set 3.3V. If it fails set to 1.8V */
2679 	err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2680 	if (err != 0)
2681 		err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2682 	if (err != 0)
2683 		pr_warn("mmc: failed to set signal voltage\n");
2684 
2685 	mmc_select_mode(mmc, MMC_LEGACY);
2686 	mmc_set_bus_width(mmc, 1);
2687 	mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2688 }
2689 
mmc_power_on(struct mmc * mmc)2690 static int mmc_power_on(struct mmc *mmc)
2691 {
2692 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2693 	if (mmc->vmmc_supply) {
2694 		int ret = regulator_set_enable(mmc->vmmc_supply, true);
2695 
2696 		if (ret) {
2697 			puts("Error enabling VMMC supply\n");
2698 			return ret;
2699 		}
2700 	}
2701 #endif
2702 	return 0;
2703 }
2704 
mmc_power_off(struct mmc * mmc)2705 static int mmc_power_off(struct mmc *mmc)
2706 {
2707 	mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2708 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2709 	if (mmc->vmmc_supply) {
2710 		int ret = regulator_set_enable(mmc->vmmc_supply, false);
2711 
2712 		if (ret) {
2713 			pr_debug("Error disabling VMMC supply\n");
2714 			return ret;
2715 		}
2716 	}
2717 #endif
2718 	return 0;
2719 }
2720 
mmc_power_cycle(struct mmc * mmc)2721 static int mmc_power_cycle(struct mmc *mmc)
2722 {
2723 	int ret;
2724 
2725 	ret = mmc_power_off(mmc);
2726 	if (ret)
2727 		return ret;
2728 
2729 	ret = mmc_host_power_cycle(mmc);
2730 	if (ret)
2731 		return ret;
2732 
2733 	/*
2734 	 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2735 	 * to be on the safer side.
2736 	 */
2737 	udelay(2000);
2738 	return mmc_power_on(mmc);
2739 }
2740 
mmc_get_op_cond(struct mmc * mmc)2741 int mmc_get_op_cond(struct mmc *mmc)
2742 {
2743 	bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2744 	int err;
2745 
2746 	if (mmc->has_init)
2747 		return 0;
2748 
2749 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2750 	mmc_adapter_card_type_ident();
2751 #endif
2752 	err = mmc_power_init(mmc);
2753 	if (err)
2754 		return err;
2755 
2756 #ifdef CONFIG_MMC_QUIRKS
2757 	mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2758 		      MMC_QUIRK_RETRY_SEND_CID;
2759 #endif
2760 
2761 	err = mmc_power_cycle(mmc);
2762 	if (err) {
2763 		/*
2764 		 * if power cycling is not supported, we should not try
2765 		 * to use the UHS modes, because we wouldn't be able to
2766 		 * recover from an error during the UHS initialization.
2767 		 */
2768 		pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2769 		uhs_en = false;
2770 		mmc->host_caps &= ~UHS_CAPS;
2771 		err = mmc_power_on(mmc);
2772 	}
2773 	if (err)
2774 		return err;
2775 
2776 #if CONFIG_IS_ENABLED(DM_MMC)
2777 	/* The device has already been probed ready for use */
2778 #else
2779 	/* made sure it's not NULL earlier */
2780 	err = mmc->cfg->ops->init(mmc);
2781 	if (err)
2782 		return err;
2783 #endif
2784 	mmc->ddr_mode = 0;
2785 
2786 retry:
2787 	mmc_set_initial_state(mmc);
2788 
2789 	/* Reset the Card */
2790 	err = mmc_go_idle(mmc);
2791 
2792 	if (err)
2793 		return err;
2794 
2795 	/* The internal partition reset to user partition(0) at every CMD0*/
2796 	mmc_get_blk_desc(mmc)->hwpart = 0;
2797 
2798 	/* Test for SD version 2 */
2799 	err = mmc_send_if_cond(mmc);
2800 
2801 	/* Now try to get the SD card's operating condition */
2802 	err = sd_send_op_cond(mmc, uhs_en);
2803 	if (err && uhs_en) {
2804 		uhs_en = false;
2805 		mmc_power_cycle(mmc);
2806 		goto retry;
2807 	}
2808 
2809 	/* If the command timed out, we check for an MMC card */
2810 	if (err == -ETIMEDOUT) {
2811 		err = mmc_send_op_cond(mmc);
2812 
2813 		if (err) {
2814 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2815 			pr_err("Card did not respond to voltage select!\n");
2816 #endif
2817 			return -EOPNOTSUPP;
2818 		}
2819 	}
2820 
2821 	return err;
2822 }
2823 
mmc_start_init(struct mmc * mmc)2824 int mmc_start_init(struct mmc *mmc)
2825 {
2826 	bool no_card;
2827 	int err = 0;
2828 
2829 	/*
2830 	 * all hosts are capable of 1 bit bus-width and able to use the legacy
2831 	 * timings.
2832 	 */
2833 	mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2834 			 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2835 
2836 #if !defined(CONFIG_MMC_BROKEN_CD)
2837 	no_card = mmc_getcd(mmc) == 0;
2838 #else
2839 	no_card = 0;
2840 #endif
2841 #if !CONFIG_IS_ENABLED(DM_MMC)
2842 	/* we pretend there's no card when init is NULL */
2843 	no_card = no_card || (mmc->cfg->ops->init == NULL);
2844 #endif
2845 	if (no_card) {
2846 		mmc->has_init = 0;
2847 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2848 		pr_err("MMC: no card present\n");
2849 #endif
2850 		return -ENOMEDIUM;
2851 	}
2852 
2853 	err = mmc_get_op_cond(mmc);
2854 
2855 	if (!err)
2856 		mmc->init_in_progress = 1;
2857 
2858 	return err;
2859 }
2860 
mmc_complete_init(struct mmc * mmc)2861 static int mmc_complete_init(struct mmc *mmc)
2862 {
2863 	int err = 0;
2864 
2865 	mmc->init_in_progress = 0;
2866 	if (mmc->op_cond_pending)
2867 		err = mmc_complete_op_cond(mmc);
2868 
2869 	if (!err)
2870 		err = mmc_startup(mmc);
2871 	if (err)
2872 		mmc->has_init = 0;
2873 	else
2874 		mmc->has_init = 1;
2875 	return err;
2876 }
2877 
mmc_init(struct mmc * mmc)2878 int mmc_init(struct mmc *mmc)
2879 {
2880 	int err = 0;
2881 	__maybe_unused ulong start;
2882 #if CONFIG_IS_ENABLED(DM_MMC)
2883 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2884 
2885 	upriv->mmc = mmc;
2886 #endif
2887 	if (mmc->has_init)
2888 		return 0;
2889 
2890 	start = get_timer(0);
2891 
2892 	if (!mmc->init_in_progress)
2893 		err = mmc_start_init(mmc);
2894 
2895 	if (!err)
2896 		err = mmc_complete_init(mmc);
2897 	if (err)
2898 		pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2899 
2900 	return err;
2901 }
2902 
2903 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2904     CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2905     CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
mmc_deinit(struct mmc * mmc)2906 int mmc_deinit(struct mmc *mmc)
2907 {
2908 	u32 caps_filtered;
2909 
2910 	if (!mmc->has_init)
2911 		return 0;
2912 
2913 	if (IS_SD(mmc)) {
2914 		caps_filtered = mmc->card_caps &
2915 			~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2916 			  MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2917 			  MMC_CAP(UHS_SDR104));
2918 
2919 		return sd_select_mode_and_width(mmc, caps_filtered);
2920 	} else {
2921 		caps_filtered = mmc->card_caps &
2922 			~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2923 
2924 		return mmc_select_mode_and_width(mmc, caps_filtered);
2925 	}
2926 }
2927 #endif
2928 
mmc_set_dsr(struct mmc * mmc,u16 val)2929 int mmc_set_dsr(struct mmc *mmc, u16 val)
2930 {
2931 	mmc->dsr = val;
2932 	return 0;
2933 }
2934 
2935 /* CPU-specific MMC initializations */
cpu_mmc_init(bd_t * bis)2936 __weak int cpu_mmc_init(bd_t *bis)
2937 {
2938 	return -1;
2939 }
2940 
2941 /* board-specific MMC initializations. */
board_mmc_init(bd_t * bis)2942 __weak int board_mmc_init(bd_t *bis)
2943 {
2944 	return -1;
2945 }
2946 
mmc_set_preinit(struct mmc * mmc,int preinit)2947 void mmc_set_preinit(struct mmc *mmc, int preinit)
2948 {
2949 	mmc->preinit = preinit;
2950 }
2951 
2952 #if CONFIG_IS_ENABLED(DM_MMC)
mmc_probe(bd_t * bis)2953 static int mmc_probe(bd_t *bis)
2954 {
2955 	int ret, i;
2956 	struct uclass *uc;
2957 	struct udevice *dev;
2958 
2959 	ret = uclass_get(UCLASS_MMC, &uc);
2960 	if (ret)
2961 		return ret;
2962 
2963 	/*
2964 	 * Try to add them in sequence order. Really with driver model we
2965 	 * should allow holes, but the current MMC list does not allow that.
2966 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2967 	 */
2968 	for (i = 0; ; i++) {
2969 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2970 		if (ret == -ENODEV)
2971 			break;
2972 	}
2973 	uclass_foreach_dev(dev, uc) {
2974 		ret = device_probe(dev);
2975 		if (ret)
2976 			pr_err("%s - probe failed: %d\n", dev->name, ret);
2977 	}
2978 
2979 	return 0;
2980 }
2981 #else
mmc_probe(bd_t * bis)2982 static int mmc_probe(bd_t *bis)
2983 {
2984 	if (board_mmc_init(bis) < 0)
2985 		cpu_mmc_init(bis);
2986 
2987 	return 0;
2988 }
2989 #endif
2990 
mmc_initialize(bd_t * bis)2991 int mmc_initialize(bd_t *bis)
2992 {
2993 	static int initialized = 0;
2994 	int ret;
2995 	if (initialized)	/* Avoid initializing mmc multiple times */
2996 		return 0;
2997 	initialized = 1;
2998 
2999 #if !CONFIG_IS_ENABLED(BLK)
3000 #if !CONFIG_IS_ENABLED(MMC_TINY)
3001 	mmc_list_init();
3002 #endif
3003 #endif
3004 	ret = mmc_probe(bis);
3005 	if (ret)
3006 		return ret;
3007 
3008 #ifndef CONFIG_SPL_BUILD
3009 	print_mmc_devices(',');
3010 #endif
3011 
3012 	mmc_do_preinit();
3013 	return 0;
3014 }
3015 
3016 #if CONFIG_IS_ENABLED(DM_MMC)
mmc_init_device(int num)3017 int mmc_init_device(int num)
3018 {
3019 	struct udevice *dev;
3020 	struct mmc *m;
3021 	int ret;
3022 
3023 	ret = uclass_get_device(UCLASS_MMC, num, &dev);
3024 	if (ret)
3025 		return ret;
3026 
3027 	m = mmc_get_mmc_dev(dev);
3028 	if (!m)
3029 		return 0;
3030 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
3031 	mmc_set_preinit(m, 1);
3032 #endif
3033 	if (m->preinit)
3034 		mmc_start_init(m);
3035 
3036 	return 0;
3037 }
3038 #endif
3039 
3040 #ifdef CONFIG_CMD_BKOPS_ENABLE
mmc_set_bkops_enable(struct mmc * mmc)3041 int mmc_set_bkops_enable(struct mmc *mmc)
3042 {
3043 	int err;
3044 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3045 
3046 	err = mmc_send_ext_csd(mmc, ext_csd);
3047 	if (err) {
3048 		puts("Could not get ext_csd register values\n");
3049 		return err;
3050 	}
3051 
3052 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3053 		puts("Background operations not supported on device\n");
3054 		return -EMEDIUMTYPE;
3055 	}
3056 
3057 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3058 		puts("Background operations already enabled\n");
3059 		return 0;
3060 	}
3061 
3062 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3063 	if (err) {
3064 		puts("Failed to enable manual background operations\n");
3065 		return err;
3066 	}
3067 
3068 	puts("Enabled manual background operations\n");
3069 
3070 	return 0;
3071 }
3072 #endif
3073