xref: /rk3399_rockchip-uboot/drivers/mmc/mmc.c (revision caa21a21f1c85abdcf83060db76159fe85e8e540)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static const unsigned int sd_au_size[] = {
26 	0,		SZ_16K / 512,		SZ_32K / 512,
27 	SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
28 	SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
29 	SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
30 	SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,	SZ_64M / 512,
31 };
32 
33 #if CONFIG_IS_ENABLED(MMC_TINY)
34 static struct mmc mmc_static;
35 struct mmc *find_mmc_device(int dev_num)
36 {
37 	return &mmc_static;
38 }
39 
40 void mmc_do_preinit(void)
41 {
42 	struct mmc *m = &mmc_static;
43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
44 	mmc_set_preinit(m, 1);
45 #endif
46 	if (m->preinit)
47 		mmc_start_init(m);
48 }
49 
50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
51 {
52 	return &mmc->block_dev;
53 }
54 #endif
55 
56 #if !CONFIG_IS_ENABLED(DM_MMC)
57 __weak int board_mmc_getwp(struct mmc *mmc)
58 {
59 	return -1;
60 }
61 
62 int mmc_getwp(struct mmc *mmc)
63 {
64 	int wp;
65 
66 	wp = board_mmc_getwp(mmc);
67 
68 	if (wp < 0) {
69 		if (mmc->cfg->ops->getwp)
70 			wp = mmc->cfg->ops->getwp(mmc);
71 		else
72 			wp = 0;
73 	}
74 
75 	return wp;
76 }
77 
78 __weak int board_mmc_getcd(struct mmc *mmc)
79 {
80 	return -1;
81 }
82 #endif
83 
84 #ifdef CONFIG_MMC_TRACE
85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
86 {
87 	printf("CMD_SEND:%d\n", cmd->cmdidx);
88 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
89 }
90 
91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
92 {
93 	int i;
94 	u8 *ptr;
95 
96 	if (ret) {
97 		printf("\t\tRET\t\t\t %d\n", ret);
98 	} else {
99 		switch (cmd->resp_type) {
100 		case MMC_RSP_NONE:
101 			printf("\t\tMMC_RSP_NONE\n");
102 			break;
103 		case MMC_RSP_R1:
104 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
105 				cmd->response[0]);
106 			break;
107 		case MMC_RSP_R1b:
108 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
109 				cmd->response[0]);
110 			break;
111 		case MMC_RSP_R2:
112 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
113 				cmd->response[0]);
114 			printf("\t\t          \t\t 0x%08X \n",
115 				cmd->response[1]);
116 			printf("\t\t          \t\t 0x%08X \n",
117 				cmd->response[2]);
118 			printf("\t\t          \t\t 0x%08X \n",
119 				cmd->response[3]);
120 			printf("\n");
121 			printf("\t\t\t\t\tDUMPING DATA\n");
122 			for (i = 0; i < 4; i++) {
123 				int j;
124 				printf("\t\t\t\t\t%03d - ", i*4);
125 				ptr = (u8 *)&cmd->response[i];
126 				ptr += 3;
127 				for (j = 0; j < 4; j++)
128 					printf("%02X ", *ptr--);
129 				printf("\n");
130 			}
131 			break;
132 		case MMC_RSP_R3:
133 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
134 				cmd->response[0]);
135 			break;
136 		default:
137 			printf("\t\tERROR MMC rsp not supported\n");
138 			break;
139 		}
140 	}
141 }
142 
143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
144 {
145 	int status;
146 
147 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
148 	printf("CURR STATE:%d\n", status);
149 }
150 #endif
151 
152 #if !CONFIG_IS_ENABLED(DM_MMC)
153 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
154 {
155 	int ret;
156 
157 	mmmc_trace_before_send(mmc, cmd);
158 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
159 	mmmc_trace_after_send(mmc, cmd, ret);
160 
161 	return ret;
162 }
163 #endif
164 
165 int mmc_send_status(struct mmc *mmc, int timeout)
166 {
167 	struct mmc_cmd cmd;
168 	int err, retries = 5;
169 
170 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
171 	cmd.resp_type = MMC_RSP_R1;
172 	if (!mmc_host_is_spi(mmc))
173 		cmd.cmdarg = mmc->rca << 16;
174 
175 	while (1) {
176 		err = mmc_send_cmd(mmc, &cmd, NULL);
177 		if (!err) {
178 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
179 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
180 			     MMC_STATE_PRG)
181 				break;
182 			else if (cmd.response[0] & MMC_STATUS_MASK) {
183 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
184 				printf("Status Error: 0x%08X\n",
185 					cmd.response[0]);
186 #endif
187 				return -ECOMM;
188 			}
189 		} else if (--retries < 0)
190 			return err;
191 
192 		if (timeout-- <= 0)
193 			break;
194 
195 		udelay(1000);
196 	}
197 
198 	mmc_trace_state(mmc, &cmd);
199 	if (timeout <= 0) {
200 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
201 		printf("Timeout waiting card ready\n");
202 #endif
203 		return -ETIMEDOUT;
204 	}
205 
206 	return 0;
207 }
208 
209 int mmc_set_blocklen(struct mmc *mmc, int len)
210 {
211 	struct mmc_cmd cmd;
212 
213 	if (mmc_card_ddr(mmc))
214 		return 0;
215 
216 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
217 	cmd.resp_type = MMC_RSP_R1;
218 	cmd.cmdarg = len;
219 
220 	return mmc_send_cmd(mmc, &cmd, NULL);
221 }
222 
223 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
224 			   lbaint_t blkcnt)
225 {
226 	struct mmc_cmd cmd;
227 	struct mmc_data data;
228 
229 	if (blkcnt > 1)
230 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
231 	else
232 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
233 
234 	if (mmc->high_capacity)
235 		cmd.cmdarg = start;
236 	else
237 		cmd.cmdarg = start * mmc->read_bl_len;
238 
239 	cmd.resp_type = MMC_RSP_R1;
240 
241 	data.dest = dst;
242 	data.blocks = blkcnt;
243 	data.blocksize = mmc->read_bl_len;
244 	data.flags = MMC_DATA_READ;
245 
246 	if (mmc_send_cmd(mmc, &cmd, &data))
247 		return 0;
248 
249 	if (blkcnt > 1) {
250 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
251 		cmd.cmdarg = 0;
252 		cmd.resp_type = MMC_RSP_R1b;
253 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
254 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
255 			printf("mmc fail to send stop cmd\n");
256 #endif
257 			return 0;
258 		}
259 	}
260 
261 	return blkcnt;
262 }
263 
264 #if CONFIG_IS_ENABLED(BLK)
265 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
266 #else
267 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
268 		void *dst)
269 #endif
270 {
271 #if CONFIG_IS_ENABLED(BLK)
272 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
273 #endif
274 	int dev_num = block_dev->devnum;
275 	int err;
276 	lbaint_t cur, blocks_todo = blkcnt;
277 
278 	if (blkcnt == 0)
279 		return 0;
280 
281 	struct mmc *mmc = find_mmc_device(dev_num);
282 	if (!mmc)
283 		return 0;
284 
285 	if (CONFIG_IS_ENABLED(MMC_TINY))
286 		err = mmc_switch_part(mmc, block_dev->hwpart);
287 	else
288 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
289 
290 	if (err < 0)
291 		return 0;
292 
293 	if ((start + blkcnt) > block_dev->lba) {
294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
295 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
296 			start + blkcnt, block_dev->lba);
297 #endif
298 		return 0;
299 	}
300 
301 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
302 		debug("%s: Failed to set blocklen\n", __func__);
303 		return 0;
304 	}
305 
306 	do {
307 		cur = (blocks_todo > mmc->cfg->b_max) ?
308 			mmc->cfg->b_max : blocks_todo;
309 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
310 			debug("%s: Failed to read blocks\n", __func__);
311 			return 0;
312 		}
313 		blocks_todo -= cur;
314 		start += cur;
315 		dst += cur * mmc->read_bl_len;
316 	} while (blocks_todo > 0);
317 
318 	return blkcnt;
319 }
320 
321 void mmc_set_clock(struct mmc *mmc, uint clock)
322 {
323 	if (clock > mmc->cfg->f_max)
324 		clock = mmc->cfg->f_max;
325 
326 	if (clock < mmc->cfg->f_min)
327 		clock = mmc->cfg->f_min;
328 
329 	mmc->clock = clock;
330 
331 	mmc_set_ios(mmc);
332 }
333 
334 static void mmc_set_bus_width(struct mmc *mmc, uint width)
335 {
336 	mmc->bus_width = width;
337 
338 	mmc_set_ios(mmc);
339 }
340 
341 static void mmc_set_timing(struct mmc *mmc, uint timing)
342 {
343 	mmc->timing = timing;
344 	mmc_set_ios(mmc);
345 }
346 
347 static int mmc_go_idle(struct mmc *mmc)
348 {
349 	struct mmc_cmd cmd;
350 	int err;
351 
352 	udelay(1000);
353 
354 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
355 	cmd.cmdarg = 0;
356 	cmd.resp_type = MMC_RSP_NONE;
357 
358 	err = mmc_send_cmd(mmc, &cmd, NULL);
359 
360 	if (err)
361 		return err;
362 
363 	udelay(2000);
364 
365 	return 0;
366 }
367 
368 static int sd_send_op_cond(struct mmc *mmc)
369 {
370 	int timeout = 1000;
371 	int err;
372 	struct mmc_cmd cmd;
373 
374 	while (1) {
375 		cmd.cmdidx = MMC_CMD_APP_CMD;
376 		cmd.resp_type = MMC_RSP_R1;
377 		cmd.cmdarg = 0;
378 
379 		err = mmc_send_cmd(mmc, &cmd, NULL);
380 
381 		if (err)
382 			return err;
383 
384 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
385 		cmd.resp_type = MMC_RSP_R3;
386 
387 		/*
388 		 * Most cards do not answer if some reserved bits
389 		 * in the ocr are set. However, Some controller
390 		 * can set bit 7 (reserved for low voltages), but
391 		 * how to manage low voltages SD card is not yet
392 		 * specified.
393 		 */
394 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
395 			(mmc->cfg->voltages & 0xff8000);
396 
397 		if (mmc->version == SD_VERSION_2)
398 			cmd.cmdarg |= OCR_HCS;
399 
400 		err = mmc_send_cmd(mmc, &cmd, NULL);
401 
402 		if (err)
403 			return err;
404 
405 		if (cmd.response[0] & OCR_BUSY)
406 			break;
407 
408 		if (timeout-- <= 0)
409 			return -EOPNOTSUPP;
410 
411 		udelay(1000);
412 	}
413 
414 	if (mmc->version != SD_VERSION_2)
415 		mmc->version = SD_VERSION_1_0;
416 
417 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
418 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
419 		cmd.resp_type = MMC_RSP_R3;
420 		cmd.cmdarg = 0;
421 
422 		err = mmc_send_cmd(mmc, &cmd, NULL);
423 
424 		if (err)
425 			return err;
426 	}
427 
428 	mmc->ocr = cmd.response[0];
429 
430 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
431 	mmc->rca = 0;
432 
433 	return 0;
434 }
435 
436 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
437 {
438 	struct mmc_cmd cmd;
439 	int err;
440 
441 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
442 	cmd.resp_type = MMC_RSP_R3;
443 	cmd.cmdarg = 0;
444 	if (use_arg && !mmc_host_is_spi(mmc))
445 		cmd.cmdarg = OCR_HCS |
446 			(mmc->cfg->voltages &
447 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
448 			(mmc->ocr & OCR_ACCESS_MODE);
449 
450 	err = mmc_send_cmd(mmc, &cmd, NULL);
451 	if (err)
452 		return err;
453 	mmc->ocr = cmd.response[0];
454 	return 0;
455 }
456 
457 static int mmc_send_op_cond(struct mmc *mmc)
458 {
459 	int err, i;
460 
461 	/* Some cards seem to need this */
462 	mmc_go_idle(mmc);
463 
464  	/* Asking to the card its capabilities */
465 	for (i = 0; i < 2; i++) {
466 		err = mmc_send_op_cond_iter(mmc, i != 0);
467 		if (err)
468 			return err;
469 
470 		/* exit if not busy (flag seems to be inverted) */
471 		if (mmc->ocr & OCR_BUSY)
472 			break;
473 	}
474 	mmc->op_cond_pending = 1;
475 	return 0;
476 }
477 
478 static int mmc_complete_op_cond(struct mmc *mmc)
479 {
480 	struct mmc_cmd cmd;
481 	int timeout = 1000;
482 	uint start;
483 	int err;
484 
485 	mmc->op_cond_pending = 0;
486 	if (!(mmc->ocr & OCR_BUSY)) {
487 		/* Some cards seem to need this */
488 		mmc_go_idle(mmc);
489 
490 		start = get_timer(0);
491 		while (1) {
492 			err = mmc_send_op_cond_iter(mmc, 1);
493 			if (err)
494 				return err;
495 			if (mmc->ocr & OCR_BUSY)
496 				break;
497 			if (get_timer(start) > timeout)
498 				return -EOPNOTSUPP;
499 			udelay(100);
500 		}
501 	}
502 
503 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
504 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
505 		cmd.resp_type = MMC_RSP_R3;
506 		cmd.cmdarg = 0;
507 
508 		err = mmc_send_cmd(mmc, &cmd, NULL);
509 
510 		if (err)
511 			return err;
512 
513 		mmc->ocr = cmd.response[0];
514 	}
515 
516 	mmc->version = MMC_VERSION_UNKNOWN;
517 
518 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
519 	mmc->rca = 1;
520 
521 	return 0;
522 }
523 
524 
525 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
526 {
527 	struct mmc_cmd cmd;
528 	struct mmc_data data;
529 	int err;
530 
531 	/* Get the Card Status Register */
532 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
533 	cmd.resp_type = MMC_RSP_R1;
534 	cmd.cmdarg = 0;
535 
536 	data.dest = (char *)ext_csd;
537 	data.blocks = 1;
538 	data.blocksize = MMC_MAX_BLOCK_LEN;
539 	data.flags = MMC_DATA_READ;
540 
541 	err = mmc_send_cmd(mmc, &cmd, &data);
542 
543 	return err;
544 }
545 
546 static int mmc_poll_for_busy(struct mmc *mmc)
547 {
548 	struct mmc_cmd cmd;
549 	u8 busy = true;
550 	uint start;
551 	int ret;
552 	int timeout = 1000;
553 
554 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
555 	cmd.resp_type = MMC_RSP_R1;
556 	cmd.cmdarg = mmc->rca << 16;
557 
558 	start = get_timer(0);
559 
560 	do {
561 		if (mmc_can_card_busy(mmc)) {
562 			busy = mmc_card_busy(mmc);
563 		} else {
564 			ret = mmc_send_cmd(mmc, &cmd, NULL);
565 
566 			if (ret)
567 				return ret;
568 
569 			if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
570 				return -EBADMSG;
571 			busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) ==
572 				MMC_STATE_PRG;
573 		}
574 
575 		if (get_timer(start) > timeout && busy)
576 			return -ETIMEDOUT;
577 	} while (busy);
578 
579 	return 0;
580 }
581 
582 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
583 			u8 send_status)
584 {
585 	struct mmc_cmd cmd;
586 	int retries = 3;
587 	int ret;
588 
589 	cmd.cmdidx = MMC_CMD_SWITCH;
590 	cmd.resp_type = MMC_RSP_R1b;
591 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
592 				 (index << 16) |
593 				 (value << 8);
594 
595 	do {
596 		ret = mmc_send_cmd(mmc, &cmd, NULL);
597 
598 		if (!ret && send_status)
599 			return mmc_poll_for_busy(mmc);
600 	} while (--retries > 0 && ret);
601 
602 	return ret;
603 }
604 
605 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
606 {
607 	return __mmc_switch(mmc, set, index, value, true);
608 }
609 
610 static int mmc_select_bus_width(struct mmc *mmc)
611 {
612 	u32 ext_csd_bits[] = {
613 		EXT_CSD_BUS_WIDTH_8,
614 		EXT_CSD_BUS_WIDTH_4,
615 	};
616 	u32 bus_widths[] = {
617 		MMC_BUS_WIDTH_8BIT,
618 		MMC_BUS_WIDTH_4BIT,
619 	};
620 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
621 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
622 	u32 idx, bus_width = 0;
623 	int err = 0;
624 
625 	if (mmc->version < MMC_VERSION_4 ||
626 	    !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT)))
627 		return 0;
628 
629 	err = mmc_send_ext_csd(mmc, ext_csd);
630 
631 	if (err)
632 		return err;
633 
634 	idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1;
635 
636 	/*
637 	 * Unlike SD, MMC cards dont have a configuration register to notify
638 	 * supported bus width. So bus test command should be run to identify
639 	 * the supported bus width or compare the ext csd values of current
640 	 * bus width and ext csd values of 1 bit mode read earlier.
641 	 */
642 	for (; idx < ARRAY_SIZE(bus_widths); idx++) {
643 		/*
644 		 * Host is capable of 8bit transfer, then switch
645 		 * the device to work in 8bit transfer mode. If the
646 		 * mmc switch command returns error then switch to
647 		 * 4bit transfer mode. On success set the corresponding
648 		 * bus width on the host.
649 		 */
650 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
651 				 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]);
652 		if (err)
653 			continue;
654 
655 		bus_width = bus_widths[idx];
656 		mmc_set_bus_width(mmc, bus_width);
657 
658 		err = mmc_send_ext_csd(mmc, test_csd);
659 
660 		if (err)
661 			continue;
662 
663 		/* Only compare read only fields */
664 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] ==
665 			test_csd[EXT_CSD_PARTITIONING_SUPPORT]) &&
666 		    (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
667 			test_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
668 		    (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) &&
669 			(ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
670 			test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
671 		    !memcmp(&ext_csd[EXT_CSD_SEC_CNT],
672 			&test_csd[EXT_CSD_SEC_CNT], 4)) {
673 			err = bus_width;
674 			break;
675 		} else {
676 			err = -EBADMSG;
677 		}
678 	}
679 
680 	return err;
681 }
682 
683 static const u8 tuning_blk_pattern_4bit[] = {
684 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
685 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
686 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
687 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
688 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
689 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
690 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
691 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
692 };
693 
694 static const u8 tuning_blk_pattern_8bit[] = {
695 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
696 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
697 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
698 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
699 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
700 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
701 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
702 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
703 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
704 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
705 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
706 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
707 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
708 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
709 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
710 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
711 };
712 
713 int mmc_send_tuning(struct mmc *mmc, u32 opcode)
714 {
715 	struct mmc_cmd cmd;
716 	struct mmc_data data;
717 	const u8 *tuning_block_pattern;
718 	int size, err = 0;
719 	u8 *data_buf;
720 
721 	if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
722 		tuning_block_pattern = tuning_blk_pattern_8bit;
723 		size = sizeof(tuning_blk_pattern_8bit);
724 	} else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) {
725 		tuning_block_pattern = tuning_blk_pattern_4bit;
726 		size = sizeof(tuning_blk_pattern_4bit);
727 	} else {
728 		return -EINVAL;
729 	}
730 
731 	data_buf = calloc(1, size);
732 	if (!data_buf)
733 		return -ENOMEM;
734 
735 	cmd.cmdidx = opcode;
736 	cmd.resp_type = MMC_RSP_R1;
737 	cmd.cmdarg = 0;
738 
739 	data.dest = (char *)data_buf;
740 	data.blocksize = size;
741 	data.blocks = 1;
742 	data.flags = MMC_DATA_READ;
743 
744 	err = mmc_send_cmd(mmc, &cmd, &data);
745 	if (err)
746 		goto out;
747 
748 	if (memcmp(data_buf, tuning_block_pattern, size))
749 		err = -EIO;
750 out:
751 	free(data_buf);
752 	return err;
753 }
754 
755 static int mmc_execute_tuning(struct mmc *mmc)
756 {
757 #ifdef CONFIG_DM_MMC
758 	struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev);
759 #endif
760 	u32 opcode;
761 
762 	if (IS_SD(mmc))
763 		opcode = MMC_SEND_TUNING_BLOCK;
764 	else
765 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
766 
767 #ifndef CONFIG_DM_MMC
768 	if (mmc->cfg->ops->execute_tuning) {
769 		return mmc->cfg->ops->execute_tuning(mmc, opcode);
770 #else
771 	if (ops->execute_tuning) {
772 		return ops->execute_tuning(mmc->dev, opcode);
773 #endif
774 	} else {
775 		debug("Tuning feature required for HS200 mode.\n");
776 		return -EIO;
777 	}
778 }
779 
780 static int mmc_hs200_tuning(struct mmc *mmc)
781 {
782 	return mmc_execute_tuning(mmc);
783 }
784 
785 static int mmc_select_hs(struct mmc *mmc)
786 {
787 	int ret;
788 
789 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
790 			 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS);
791 
792 	if (!ret)
793 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
794 
795 	return ret;
796 }
797 
798 #ifndef CONFIG_SPL_BUILD
799 static int mmc_select_hs200(struct mmc *mmc)
800 {
801 	int ret;
802 	struct mmc_cmd cmd;
803 
804 	/*
805 	 * Set the bus width(4 or 8) with host's support and
806 	 * switch to HS200 mode if bus width is set successfully.
807 	 */
808 	ret = mmc_select_bus_width(mmc);
809 
810 	if (ret > 0) {
811 		ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
812 				   EXT_CSD_HS_TIMING,
813 				   EXT_CSD_TIMING_HS200, false);
814 
815 		if (ret)
816 			return ret;
817 
818 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS200);
819 
820 		cmd.cmdidx = MMC_CMD_SEND_STATUS;
821 		cmd.resp_type = MMC_RSP_R1;
822 		cmd.cmdarg = mmc->rca << 16;
823 
824 		ret = mmc_send_cmd(mmc, &cmd, NULL);
825 
826 		if (ret)
827 			return ret;
828 
829 		if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
830 			return -EBADMSG;
831 	}
832 
833 	return ret;
834 }
835 #endif
836 
837 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd)
838 {
839 	u8 card_type;
840 	u32 host_caps, avail_type = 0;
841 
842 	card_type = ext_csd[EXT_CSD_CARD_TYPE];
843 	host_caps = mmc->cfg->host_caps;
844 
845 	if ((host_caps & MMC_MODE_HS) &&
846 	    (card_type & EXT_CSD_CARD_TYPE_26))
847 		avail_type |= EXT_CSD_CARD_TYPE_26;
848 
849 	if ((host_caps & MMC_MODE_HS) &&
850 	    (card_type & EXT_CSD_CARD_TYPE_52))
851 		avail_type |= EXT_CSD_CARD_TYPE_52;
852 
853 	/*
854 	 * For the moment, u-boot doesn't support signal voltage
855 	 * switch, therefor we assume that host support ddr52
856 	 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and
857 	 * hs400 are the same).
858 	 */
859 	if ((host_caps & MMC_MODE_DDR_52MHz) &&
860 	    (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
861 		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
862 
863 	if ((host_caps & MMC_MODE_HS200) &&
864 	    (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V))
865 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
866 
867 	/*
868 	 * If host can support HS400, it means that host can also
869 	 * support HS200.
870 	 */
871 	if ((host_caps & MMC_MODE_HS400) &&
872 	    (host_caps & MMC_MODE_8BIT) &&
873 	    (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
874 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
875 				EXT_CSD_CARD_TYPE_HS400_1_8V;
876 
877 	if ((host_caps & MMC_MODE_HS400ES) &&
878 	    (host_caps & MMC_MODE_8BIT) &&
879 	    ext_csd[EXT_CSD_STROBE_SUPPORT] &&
880 	    (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
881 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
882 				EXT_CSD_CARD_TYPE_HS400_1_8V |
883 				EXT_CSD_CARD_TYPE_HS400ES;
884 
885 	return avail_type;
886 }
887 
888 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type)
889 {
890 	int clock = 0;
891 
892 	if (mmc_card_hs(mmc))
893 		clock = (avail_type & EXT_CSD_CARD_TYPE_52) ?
894 			MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR;
895 	else if (mmc_card_hs200(mmc) ||
896 		 mmc_card_hs400(mmc) ||
897 		 mmc_card_hs400es(mmc))
898 		clock = MMC_HS200_MAX_DTR;
899 
900 	mmc_set_clock(mmc, clock);
901 }
902 
903 static int mmc_change_freq(struct mmc *mmc)
904 {
905 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
906 	u32 avail_type;
907 	int err;
908 
909 	mmc->card_caps = 0;
910 
911 	if (mmc_host_is_spi(mmc))
912 		return 0;
913 
914 	/* Only version 4 supports high-speed */
915 	if (mmc->version < MMC_VERSION_4)
916 		return 0;
917 
918 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
919 
920 	err = mmc_send_ext_csd(mmc, ext_csd);
921 
922 	if (err)
923 		return err;
924 
925 	avail_type = mmc_select_card_type(mmc, ext_csd);
926 
927 #ifndef CONFIG_SPL_BUILD
928 	if (avail_type & EXT_CSD_CARD_TYPE_HS200)
929 		err = mmc_select_hs200(mmc);
930 	else
931 #endif
932 	if (avail_type & EXT_CSD_CARD_TYPE_HS)
933 		err = mmc_select_hs(mmc);
934 	else
935 		err = -EINVAL;
936 
937 	if (err)
938 		return err;
939 
940 	mmc_set_bus_speed(mmc, avail_type);
941 
942 	if (mmc_card_hs200(mmc))
943 		err = mmc_hs200_tuning(mmc);
944 	else
945 		err = mmc_select_bus_width(mmc) > 0 ? 0 : err;
946 
947 	return err;
948 }
949 
950 static int mmc_set_capacity(struct mmc *mmc, int part_num)
951 {
952 	switch (part_num) {
953 	case 0:
954 		mmc->capacity = mmc->capacity_user;
955 		break;
956 	case 1:
957 	case 2:
958 		mmc->capacity = mmc->capacity_boot;
959 		break;
960 	case 3:
961 		mmc->capacity = mmc->capacity_rpmb;
962 		break;
963 	case 4:
964 	case 5:
965 	case 6:
966 	case 7:
967 		mmc->capacity = mmc->capacity_gp[part_num - 4];
968 		break;
969 	default:
970 		return -1;
971 	}
972 
973 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
974 
975 	return 0;
976 }
977 
978 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
979 {
980 	int ret;
981 
982 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
983 			 (mmc->part_config & ~PART_ACCESS_MASK)
984 			 | (part_num & PART_ACCESS_MASK));
985 
986 	/*
987 	 * Set the capacity if the switch succeeded or was intended
988 	 * to return to representing the raw device.
989 	 */
990 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
991 		ret = mmc_set_capacity(mmc, part_num);
992 		mmc_get_blk_desc(mmc)->hwpart = part_num;
993 	}
994 
995 	return ret;
996 }
997 
998 int mmc_hwpart_config(struct mmc *mmc,
999 		      const struct mmc_hwpart_conf *conf,
1000 		      enum mmc_hwpart_conf_mode mode)
1001 {
1002 	u8 part_attrs = 0;
1003 	u32 enh_size_mult;
1004 	u32 enh_start_addr;
1005 	u32 gp_size_mult[4];
1006 	u32 max_enh_size_mult;
1007 	u32 tot_enh_size_mult = 0;
1008 	u8 wr_rel_set;
1009 	int i, pidx, err;
1010 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1011 
1012 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1013 		return -EINVAL;
1014 
1015 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1016 		printf("eMMC >= 4.4 required for enhanced user data area\n");
1017 		return -EMEDIUMTYPE;
1018 	}
1019 
1020 	if (!(mmc->part_support & PART_SUPPORT)) {
1021 		printf("Card does not support partitioning\n");
1022 		return -EMEDIUMTYPE;
1023 	}
1024 
1025 	if (!mmc->hc_wp_grp_size) {
1026 		printf("Card does not define HC WP group size\n");
1027 		return -EMEDIUMTYPE;
1028 	}
1029 
1030 	/* check partition alignment and total enhanced size */
1031 	if (conf->user.enh_size) {
1032 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1033 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
1034 			printf("User data enhanced area not HC WP group "
1035 			       "size aligned\n");
1036 			return -EINVAL;
1037 		}
1038 		part_attrs |= EXT_CSD_ENH_USR;
1039 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1040 		if (mmc->high_capacity) {
1041 			enh_start_addr = conf->user.enh_start;
1042 		} else {
1043 			enh_start_addr = (conf->user.enh_start << 9);
1044 		}
1045 	} else {
1046 		enh_size_mult = 0;
1047 		enh_start_addr = 0;
1048 	}
1049 	tot_enh_size_mult += enh_size_mult;
1050 
1051 	for (pidx = 0; pidx < 4; pidx++) {
1052 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1053 			printf("GP%i partition not HC WP group size "
1054 			       "aligned\n", pidx+1);
1055 			return -EINVAL;
1056 		}
1057 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1058 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1059 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1060 			tot_enh_size_mult += gp_size_mult[pidx];
1061 		}
1062 	}
1063 
1064 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1065 		printf("Card does not support enhanced attribute\n");
1066 		return -EMEDIUMTYPE;
1067 	}
1068 
1069 	err = mmc_send_ext_csd(mmc, ext_csd);
1070 	if (err)
1071 		return err;
1072 
1073 	max_enh_size_mult =
1074 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1075 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1076 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1077 	if (tot_enh_size_mult > max_enh_size_mult) {
1078 		printf("Total enhanced size exceeds maximum (%u > %u)\n",
1079 		       tot_enh_size_mult, max_enh_size_mult);
1080 		return -EMEDIUMTYPE;
1081 	}
1082 
1083 	/* The default value of EXT_CSD_WR_REL_SET is device
1084 	 * dependent, the values can only be changed if the
1085 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1086 	 * changed only once and before partitioning is completed. */
1087 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1088 	if (conf->user.wr_rel_change) {
1089 		if (conf->user.wr_rel_set)
1090 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1091 		else
1092 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1093 	}
1094 	for (pidx = 0; pidx < 4; pidx++) {
1095 		if (conf->gp_part[pidx].wr_rel_change) {
1096 			if (conf->gp_part[pidx].wr_rel_set)
1097 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1098 			else
1099 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1100 		}
1101 	}
1102 
1103 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1104 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1105 		puts("Card does not support host controlled partition write "
1106 		     "reliability settings\n");
1107 		return -EMEDIUMTYPE;
1108 	}
1109 
1110 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1111 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1112 		printf("Card already partitioned\n");
1113 		return -EPERM;
1114 	}
1115 
1116 	if (mode == MMC_HWPART_CONF_CHECK)
1117 		return 0;
1118 
1119 	/* Partitioning requires high-capacity size definitions */
1120 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1121 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1122 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1123 
1124 		if (err)
1125 			return err;
1126 
1127 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1128 
1129 		/* update erase group size to be high-capacity */
1130 		mmc->erase_grp_size =
1131 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1132 
1133 	}
1134 
1135 	/* all OK, write the configuration */
1136 	for (i = 0; i < 4; i++) {
1137 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1138 				 EXT_CSD_ENH_START_ADDR+i,
1139 				 (enh_start_addr >> (i*8)) & 0xFF);
1140 		if (err)
1141 			return err;
1142 	}
1143 	for (i = 0; i < 3; i++) {
1144 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1145 				 EXT_CSD_ENH_SIZE_MULT+i,
1146 				 (enh_size_mult >> (i*8)) & 0xFF);
1147 		if (err)
1148 			return err;
1149 	}
1150 	for (pidx = 0; pidx < 4; pidx++) {
1151 		for (i = 0; i < 3; i++) {
1152 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1153 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1154 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1155 			if (err)
1156 				return err;
1157 		}
1158 	}
1159 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1160 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1161 	if (err)
1162 		return err;
1163 
1164 	if (mode == MMC_HWPART_CONF_SET)
1165 		return 0;
1166 
1167 	/* The WR_REL_SET is a write-once register but shall be
1168 	 * written before setting PART_SETTING_COMPLETED. As it is
1169 	 * write-once we can only write it when completing the
1170 	 * partitioning. */
1171 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1172 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1173 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1174 		if (err)
1175 			return err;
1176 	}
1177 
1178 	/* Setting PART_SETTING_COMPLETED confirms the partition
1179 	 * configuration but it only becomes effective after power
1180 	 * cycle, so we do not adjust the partition related settings
1181 	 * in the mmc struct. */
1182 
1183 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1184 			 EXT_CSD_PARTITION_SETTING,
1185 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1186 	if (err)
1187 		return err;
1188 
1189 	return 0;
1190 }
1191 
1192 #if !CONFIG_IS_ENABLED(DM_MMC)
1193 int mmc_getcd(struct mmc *mmc)
1194 {
1195 	int cd;
1196 
1197 	cd = board_mmc_getcd(mmc);
1198 
1199 	if (cd < 0) {
1200 		if (mmc->cfg->ops->getcd)
1201 			cd = mmc->cfg->ops->getcd(mmc);
1202 		else
1203 			cd = 1;
1204 	}
1205 
1206 	return cd;
1207 }
1208 #endif
1209 
1210 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1211 {
1212 	struct mmc_cmd cmd;
1213 	struct mmc_data data;
1214 
1215 	/* Switch the frequency */
1216 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1217 	cmd.resp_type = MMC_RSP_R1;
1218 	cmd.cmdarg = (mode << 31) | 0xffffff;
1219 	cmd.cmdarg &= ~(0xf << (group * 4));
1220 	cmd.cmdarg |= value << (group * 4);
1221 
1222 	data.dest = (char *)resp;
1223 	data.blocksize = 64;
1224 	data.blocks = 1;
1225 	data.flags = MMC_DATA_READ;
1226 
1227 	return mmc_send_cmd(mmc, &cmd, &data);
1228 }
1229 
1230 
1231 static int sd_change_freq(struct mmc *mmc)
1232 {
1233 	int err;
1234 	struct mmc_cmd cmd;
1235 	ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2);
1236 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1237 	struct mmc_data data;
1238 	int timeout;
1239 
1240 	mmc->card_caps = 0;
1241 
1242 	if (mmc_host_is_spi(mmc))
1243 		return 0;
1244 
1245 	/* Read the SCR to find out if this card supports higher speeds */
1246 	cmd.cmdidx = MMC_CMD_APP_CMD;
1247 	cmd.resp_type = MMC_RSP_R1;
1248 	cmd.cmdarg = mmc->rca << 16;
1249 
1250 	err = mmc_send_cmd(mmc, &cmd, NULL);
1251 
1252 	if (err)
1253 		return err;
1254 
1255 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1256 	cmd.resp_type = MMC_RSP_R1;
1257 	cmd.cmdarg = 0;
1258 
1259 	timeout = 3;
1260 
1261 retry_scr:
1262 	data.dest = (char *)scr;
1263 	data.blocksize = 8;
1264 	data.blocks = 1;
1265 	data.flags = MMC_DATA_READ;
1266 
1267 	err = mmc_send_cmd(mmc, &cmd, &data);
1268 
1269 	if (err) {
1270 		if (timeout--)
1271 			goto retry_scr;
1272 
1273 		return err;
1274 	}
1275 
1276 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1277 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1278 
1279 	switch ((mmc->scr[0] >> 24) & 0xf) {
1280 	case 0:
1281 		mmc->version = SD_VERSION_1_0;
1282 		break;
1283 	case 1:
1284 		mmc->version = SD_VERSION_1_10;
1285 		break;
1286 	case 2:
1287 		mmc->version = SD_VERSION_2;
1288 		if ((mmc->scr[0] >> 15) & 0x1)
1289 			mmc->version = SD_VERSION_3;
1290 		break;
1291 	default:
1292 		mmc->version = SD_VERSION_1_0;
1293 		break;
1294 	}
1295 
1296 	if (mmc->scr[0] & SD_DATA_4BIT)
1297 		mmc->card_caps |= MMC_MODE_4BIT;
1298 
1299 	/* Version 1.0 doesn't support switching */
1300 	if (mmc->version == SD_VERSION_1_0)
1301 		return 0;
1302 
1303 	timeout = 4;
1304 	while (timeout--) {
1305 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1306 				(u8 *)switch_status);
1307 
1308 		if (err)
1309 			return err;
1310 
1311 		/* The high-speed function is busy.  Try again */
1312 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1313 			break;
1314 	}
1315 
1316 	/* If high-speed isn't supported, we return */
1317 	if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
1318 		return 0;
1319 
1320 	/*
1321 	 * If the host doesn't support SD_HIGHSPEED, do not switch card to
1322 	 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
1323 	 * This can avoid furthur problem when the card runs in different
1324 	 * mode between the host.
1325 	 */
1326 	if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
1327 		(mmc->cfg->host_caps & MMC_MODE_HS)))
1328 		return 0;
1329 
1330 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1331 
1332 	if (err)
1333 		return err;
1334 
1335 	if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
1336 		mmc->card_caps |= MMC_MODE_HS;
1337 
1338 	return 0;
1339 }
1340 
1341 static int sd_read_ssr(struct mmc *mmc)
1342 {
1343 	int err, i;
1344 	struct mmc_cmd cmd;
1345 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1346 	struct mmc_data data;
1347 	int timeout = 3;
1348 	unsigned int au, eo, et, es;
1349 
1350 	cmd.cmdidx = MMC_CMD_APP_CMD;
1351 	cmd.resp_type = MMC_RSP_R1;
1352 	cmd.cmdarg = mmc->rca << 16;
1353 
1354 	err = mmc_send_cmd(mmc, &cmd, NULL);
1355 	if (err)
1356 		return err;
1357 
1358 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1359 	cmd.resp_type = MMC_RSP_R1;
1360 	cmd.cmdarg = 0;
1361 
1362 retry_ssr:
1363 	data.dest = (char *)ssr;
1364 	data.blocksize = 64;
1365 	data.blocks = 1;
1366 	data.flags = MMC_DATA_READ;
1367 
1368 	err = mmc_send_cmd(mmc, &cmd, &data);
1369 	if (err) {
1370 		if (timeout--)
1371 			goto retry_ssr;
1372 
1373 		return err;
1374 	}
1375 
1376 	for (i = 0; i < 16; i++)
1377 		ssr[i] = be32_to_cpu(ssr[i]);
1378 
1379 	au = (ssr[2] >> 12) & 0xF;
1380 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1381 		mmc->ssr.au = sd_au_size[au];
1382 		es = (ssr[3] >> 24) & 0xFF;
1383 		es |= (ssr[2] & 0xFF) << 8;
1384 		et = (ssr[3] >> 18) & 0x3F;
1385 		if (es && et) {
1386 			eo = (ssr[3] >> 16) & 0x3;
1387 			mmc->ssr.erase_timeout = (et * 1000) / es;
1388 			mmc->ssr.erase_offset = eo * 1000;
1389 		}
1390 	} else {
1391 		debug("Invalid Allocation Unit Size.\n");
1392 	}
1393 
1394 	return 0;
1395 }
1396 
1397 /* frequency bases */
1398 /* divided by 10 to be nice to platforms without floating point */
1399 static const int fbase[] = {
1400 	10000,
1401 	100000,
1402 	1000000,
1403 	10000000,
1404 };
1405 
1406 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1407  * to platforms without floating point.
1408  */
1409 static const u8 multipliers[] = {
1410 	0,	/* reserved */
1411 	10,
1412 	12,
1413 	13,
1414 	15,
1415 	20,
1416 	25,
1417 	30,
1418 	35,
1419 	40,
1420 	45,
1421 	50,
1422 	55,
1423 	60,
1424 	70,
1425 	80,
1426 };
1427 
1428 #if !CONFIG_IS_ENABLED(DM_MMC)
1429 static void mmc_set_ios(struct mmc *mmc)
1430 {
1431 	if (mmc->cfg->ops->set_ios)
1432 		mmc->cfg->ops->set_ios(mmc);
1433 }
1434 
1435 static bool mmc_card_busy(struct mmc *mmc)
1436 {
1437 	if (!mmc->cfg->ops->card_busy)
1438 		return -ENOSYS;
1439 
1440 	return mmc->cfg->ops->card_busy(mmc);
1441 }
1442 
1443 static bool mmc_can_card_busy(struct mmc *)
1444 {
1445 	return !!mmc->cfg->ops->card_busy;
1446 }
1447 #endif
1448 
1449 static int mmc_startup(struct mmc *mmc)
1450 {
1451 	int err, i;
1452 	uint mult, freq;
1453 	u64 cmult, csize, capacity;
1454 	struct mmc_cmd cmd;
1455 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1456 	bool has_parts = false;
1457 	bool part_completed;
1458 	struct blk_desc *bdesc;
1459 
1460 #ifdef CONFIG_MMC_SPI_CRC_ON
1461 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1462 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1463 		cmd.resp_type = MMC_RSP_R1;
1464 		cmd.cmdarg = 1;
1465 		err = mmc_send_cmd(mmc, &cmd, NULL);
1466 
1467 		if (err)
1468 			return err;
1469 	}
1470 #endif
1471 
1472 	/* Put the Card in Identify Mode */
1473 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1474 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1475 	cmd.resp_type = MMC_RSP_R2;
1476 	cmd.cmdarg = 0;
1477 
1478 	err = mmc_send_cmd(mmc, &cmd, NULL);
1479 
1480 	if (err)
1481 		return err;
1482 
1483 	memcpy(mmc->cid, cmd.response, 16);
1484 
1485 	/*
1486 	 * For MMC cards, set the Relative Address.
1487 	 * For SD cards, get the Relatvie Address.
1488 	 * This also puts the cards into Standby State
1489 	 */
1490 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1491 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1492 		cmd.cmdarg = mmc->rca << 16;
1493 		cmd.resp_type = MMC_RSP_R6;
1494 
1495 		err = mmc_send_cmd(mmc, &cmd, NULL);
1496 
1497 		if (err)
1498 			return err;
1499 
1500 		if (IS_SD(mmc))
1501 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1502 	}
1503 
1504 	/* Get the Card-Specific Data */
1505 	cmd.cmdidx = MMC_CMD_SEND_CSD;
1506 	cmd.resp_type = MMC_RSP_R2;
1507 	cmd.cmdarg = mmc->rca << 16;
1508 
1509 	err = mmc_send_cmd(mmc, &cmd, NULL);
1510 
1511 	if (err)
1512 		return err;
1513 
1514 	mmc->csd[0] = cmd.response[0];
1515 	mmc->csd[1] = cmd.response[1];
1516 	mmc->csd[2] = cmd.response[2];
1517 	mmc->csd[3] = cmd.response[3];
1518 
1519 	if (mmc->version == MMC_VERSION_UNKNOWN) {
1520 		int version = (cmd.response[0] >> 26) & 0xf;
1521 
1522 		switch (version) {
1523 		case 0:
1524 			mmc->version = MMC_VERSION_1_2;
1525 			break;
1526 		case 1:
1527 			mmc->version = MMC_VERSION_1_4;
1528 			break;
1529 		case 2:
1530 			mmc->version = MMC_VERSION_2_2;
1531 			break;
1532 		case 3:
1533 			mmc->version = MMC_VERSION_3;
1534 			break;
1535 		case 4:
1536 			mmc->version = MMC_VERSION_4;
1537 			break;
1538 		default:
1539 			mmc->version = MMC_VERSION_1_2;
1540 			break;
1541 		}
1542 	}
1543 
1544 	/* divide frequency by 10, since the mults are 10x bigger */
1545 	freq = fbase[(cmd.response[0] & 0x7)];
1546 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1547 
1548 	mmc->tran_speed = freq * mult;
1549 
1550 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1551 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1552 
1553 	if (IS_SD(mmc))
1554 		mmc->write_bl_len = mmc->read_bl_len;
1555 	else
1556 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1557 
1558 	if (mmc->high_capacity) {
1559 		csize = (mmc->csd[1] & 0x3f) << 16
1560 			| (mmc->csd[2] & 0xffff0000) >> 16;
1561 		cmult = 8;
1562 	} else {
1563 		csize = (mmc->csd[1] & 0x3ff) << 2
1564 			| (mmc->csd[2] & 0xc0000000) >> 30;
1565 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
1566 	}
1567 
1568 	mmc->capacity_user = (csize + 1) << (cmult + 2);
1569 	mmc->capacity_user *= mmc->read_bl_len;
1570 	mmc->capacity_boot = 0;
1571 	mmc->capacity_rpmb = 0;
1572 	for (i = 0; i < 4; i++)
1573 		mmc->capacity_gp[i] = 0;
1574 
1575 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1576 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1577 
1578 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1579 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1580 
1581 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1582 		cmd.cmdidx = MMC_CMD_SET_DSR;
1583 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1584 		cmd.resp_type = MMC_RSP_NONE;
1585 		if (mmc_send_cmd(mmc, &cmd, NULL))
1586 			printf("MMC: SET_DSR failed\n");
1587 	}
1588 
1589 	/* Select the card, and put it into Transfer Mode */
1590 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1591 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
1592 		cmd.resp_type = MMC_RSP_R1;
1593 		cmd.cmdarg = mmc->rca << 16;
1594 		err = mmc_send_cmd(mmc, &cmd, NULL);
1595 
1596 		if (err)
1597 			return err;
1598 	}
1599 
1600 	/*
1601 	 * For SD, its erase group is always one sector
1602 	 */
1603 	mmc->erase_grp_size = 1;
1604 	mmc->part_config = MMCPART_NOAVAILABLE;
1605 	if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) {
1606 		/* check  ext_csd version and capacity */
1607 		err = mmc_send_ext_csd(mmc, ext_csd);
1608 		if (err)
1609 			return err;
1610 		if (ext_csd[EXT_CSD_REV] >= 2) {
1611 			/*
1612 			 * According to the JEDEC Standard, the value of
1613 			 * ext_csd's capacity is valid if the value is more
1614 			 * than 2GB
1615 			 */
1616 			capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1617 					| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1618 					| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1619 					| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1620 			capacity *= MMC_MAX_BLOCK_LEN;
1621 			if ((capacity >> 20) > 2 * 1024)
1622 				mmc->capacity_user = capacity;
1623 		}
1624 
1625 		switch (ext_csd[EXT_CSD_REV]) {
1626 		case 1:
1627 			mmc->version = MMC_VERSION_4_1;
1628 			break;
1629 		case 2:
1630 			mmc->version = MMC_VERSION_4_2;
1631 			break;
1632 		case 3:
1633 			mmc->version = MMC_VERSION_4_3;
1634 			break;
1635 		case 5:
1636 			mmc->version = MMC_VERSION_4_41;
1637 			break;
1638 		case 6:
1639 			mmc->version = MMC_VERSION_4_5;
1640 			break;
1641 		case 7:
1642 			mmc->version = MMC_VERSION_5_0;
1643 			break;
1644 		case 8:
1645 			mmc->version = MMC_VERSION_5_1;
1646 			break;
1647 		}
1648 
1649 		/* The partition data may be non-zero but it is only
1650 		 * effective if PARTITION_SETTING_COMPLETED is set in
1651 		 * EXT_CSD, so ignore any data if this bit is not set,
1652 		 * except for enabling the high-capacity group size
1653 		 * definition (see below). */
1654 		part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1655 				    EXT_CSD_PARTITION_SETTING_COMPLETED);
1656 
1657 		/* store the partition info of emmc */
1658 		mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1659 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1660 		    ext_csd[EXT_CSD_BOOT_MULT])
1661 			mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1662 		if (part_completed &&
1663 		    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1664 			mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1665 
1666 		mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1667 
1668 		mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1669 
1670 		for (i = 0; i < 4; i++) {
1671 			int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1672 			uint mult = (ext_csd[idx + 2] << 16) +
1673 				(ext_csd[idx + 1] << 8) + ext_csd[idx];
1674 			if (mult)
1675 				has_parts = true;
1676 			if (!part_completed)
1677 				continue;
1678 			mmc->capacity_gp[i] = mult;
1679 			mmc->capacity_gp[i] *=
1680 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1681 			mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1682 			mmc->capacity_gp[i] <<= 19;
1683 		}
1684 
1685 		if (part_completed) {
1686 			mmc->enh_user_size =
1687 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) +
1688 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) +
1689 				ext_csd[EXT_CSD_ENH_SIZE_MULT];
1690 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1691 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1692 			mmc->enh_user_size <<= 19;
1693 			mmc->enh_user_start =
1694 				(ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) +
1695 				(ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) +
1696 				(ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) +
1697 				ext_csd[EXT_CSD_ENH_START_ADDR];
1698 			if (mmc->high_capacity)
1699 				mmc->enh_user_start <<= 9;
1700 		}
1701 
1702 		/*
1703 		 * Host needs to enable ERASE_GRP_DEF bit if device is
1704 		 * partitioned. This bit will be lost every time after a reset
1705 		 * or power off. This will affect erase size.
1706 		 */
1707 		if (part_completed)
1708 			has_parts = true;
1709 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1710 		    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1711 			has_parts = true;
1712 		if (has_parts) {
1713 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1714 				EXT_CSD_ERASE_GROUP_DEF, 1);
1715 
1716 			if (err)
1717 				return err;
1718 			else
1719 				ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1720 		}
1721 
1722 		if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1723 			/* Read out group size from ext_csd */
1724 			mmc->erase_grp_size =
1725 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1726 			/*
1727 			 * if high capacity and partition setting completed
1728 			 * SEC_COUNT is valid even if it is smaller than 2 GiB
1729 			 * JEDEC Standard JESD84-B45, 6.2.4
1730 			 */
1731 			if (mmc->high_capacity && part_completed) {
1732 				capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1733 					(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1734 					(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1735 					(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1736 				capacity *= MMC_MAX_BLOCK_LEN;
1737 				mmc->capacity_user = capacity;
1738 			}
1739 		} else {
1740 			/* Calculate the group size from the csd value. */
1741 			int erase_gsz, erase_gmul;
1742 			erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1743 			erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1744 			mmc->erase_grp_size = (erase_gsz + 1)
1745 				* (erase_gmul + 1);
1746 		}
1747 
1748 		mmc->hc_wp_grp_size = 1024
1749 			* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1750 			* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1751 
1752 		mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1753 	}
1754 
1755 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1756 	if (err)
1757 		return err;
1758 
1759 	if (IS_SD(mmc))
1760 		err = sd_change_freq(mmc);
1761 	else
1762 		err = mmc_change_freq(mmc);
1763 
1764 	if (err)
1765 		return err;
1766 
1767 	/* Restrict card's capabilities by what the host can do */
1768 	mmc->card_caps &= mmc->cfg->host_caps;
1769 
1770 	if (IS_SD(mmc)) {
1771 		if (mmc->card_caps & MMC_MODE_4BIT) {
1772 			cmd.cmdidx = MMC_CMD_APP_CMD;
1773 			cmd.resp_type = MMC_RSP_R1;
1774 			cmd.cmdarg = mmc->rca << 16;
1775 
1776 			err = mmc_send_cmd(mmc, &cmd, NULL);
1777 			if (err)
1778 				return err;
1779 
1780 			cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1781 			cmd.resp_type = MMC_RSP_R1;
1782 			cmd.cmdarg = 2;
1783 			err = mmc_send_cmd(mmc, &cmd, NULL);
1784 			if (err)
1785 				return err;
1786 
1787 			mmc_set_bus_width(mmc, 4);
1788 		}
1789 
1790 		err = sd_read_ssr(mmc);
1791 		if (err)
1792 			return err;
1793 
1794 		if (mmc->card_caps & MMC_MODE_HS)
1795 			mmc->tran_speed = 50000000;
1796 		else
1797 			mmc->tran_speed = 25000000;
1798 
1799 		mmc_set_clock(mmc, mmc->tran_speed);
1800 	}
1801 
1802 	/* Fix the block length for DDR mode */
1803 	if (mmc_card_ddr(mmc)) {
1804 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1805 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1806 	}
1807 
1808 	/* fill in device description */
1809 	bdesc = mmc_get_blk_desc(mmc);
1810 	bdesc->lun = 0;
1811 	bdesc->hwpart = 0;
1812 	bdesc->type = 0;
1813 	bdesc->blksz = mmc->read_bl_len;
1814 	bdesc->log2blksz = LOG2(bdesc->blksz);
1815 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1816 #if !defined(CONFIG_SPL_BUILD) || \
1817 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
1818 		!defined(CONFIG_USE_TINY_PRINTF))
1819 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
1820 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
1821 		(mmc->cid[3] >> 16) & 0xffff);
1822 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
1823 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
1824 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
1825 		(mmc->cid[2] >> 24) & 0xff);
1826 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
1827 		(mmc->cid[2] >> 16) & 0xf);
1828 #else
1829 	bdesc->vendor[0] = 0;
1830 	bdesc->product[0] = 0;
1831 	bdesc->revision[0] = 0;
1832 #endif
1833 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
1834 	part_init(bdesc);
1835 #endif
1836 
1837 	return 0;
1838 }
1839 
1840 static int mmc_send_if_cond(struct mmc *mmc)
1841 {
1842 	struct mmc_cmd cmd;
1843 	int err;
1844 
1845 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
1846 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
1847 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
1848 	cmd.resp_type = MMC_RSP_R7;
1849 
1850 	err = mmc_send_cmd(mmc, &cmd, NULL);
1851 
1852 	if (err)
1853 		return err;
1854 
1855 	if ((cmd.response[0] & 0xff) != 0xaa)
1856 		return -EOPNOTSUPP;
1857 	else
1858 		mmc->version = SD_VERSION_2;
1859 
1860 	return 0;
1861 }
1862 
1863 #if !CONFIG_IS_ENABLED(DM_MMC)
1864 /* board-specific MMC power initializations. */
1865 __weak void board_mmc_power_init(void)
1866 {
1867 }
1868 #endif
1869 
1870 static int mmc_power_init(struct mmc *mmc)
1871 {
1872 #if CONFIG_IS_ENABLED(DM_MMC)
1873 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD)
1874 	struct udevice *vmmc_supply;
1875 	int ret;
1876 
1877 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
1878 					  &vmmc_supply);
1879 	if (ret) {
1880 		debug("%s: No vmmc supply\n", mmc->dev->name);
1881 		return 0;
1882 	}
1883 
1884 	ret = regulator_set_enable(vmmc_supply, true);
1885 	if (ret) {
1886 		puts("Error enabling VMMC supply\n");
1887 		return ret;
1888 	}
1889 #endif
1890 #else /* !CONFIG_DM_MMC */
1891 	/*
1892 	 * Driver model should use a regulator, as above, rather than calling
1893 	 * out to board code.
1894 	 */
1895 	board_mmc_power_init();
1896 #endif
1897 	return 0;
1898 }
1899 
1900 int mmc_start_init(struct mmc *mmc)
1901 {
1902 	bool no_card;
1903 	int err;
1904 
1905 	/* we pretend there's no card when init is NULL */
1906 	no_card = mmc_getcd(mmc) == 0;
1907 #if !CONFIG_IS_ENABLED(DM_MMC)
1908 	no_card = no_card || (mmc->cfg->ops->init == NULL);
1909 #endif
1910 	if (no_card) {
1911 		mmc->has_init = 0;
1912 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1913 		printf("MMC: no card present\n");
1914 #endif
1915 		return -ENOMEDIUM;
1916 	}
1917 
1918 	if (mmc->has_init)
1919 		return 0;
1920 
1921 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
1922 	mmc_adapter_card_type_ident();
1923 #endif
1924 	err = mmc_power_init(mmc);
1925 	if (err)
1926 		return err;
1927 
1928 #if CONFIG_IS_ENABLED(DM_MMC)
1929 	/* The device has already been probed ready for use */
1930 #else
1931 	/* made sure it's not NULL earlier */
1932 	err = mmc->cfg->ops->init(mmc);
1933 	if (err)
1934 		return err;
1935 #endif
1936 	mmc_set_bus_width(mmc, 1);
1937 	mmc_set_clock(mmc, 1);
1938 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
1939 
1940 	/* Reset the Card */
1941 	err = mmc_go_idle(mmc);
1942 
1943 	if (err)
1944 		return err;
1945 
1946 	/* The internal partition reset to user partition(0) at every CMD0*/
1947 	mmc_get_blk_desc(mmc)->hwpart = 0;
1948 
1949 	/* Test for SD version 2 */
1950 	err = mmc_send_if_cond(mmc);
1951 
1952 	/* Now try to get the SD card's operating condition */
1953 	err = sd_send_op_cond(mmc);
1954 
1955 	/* If the command timed out, we check for an MMC card */
1956 	if (err == -ETIMEDOUT) {
1957 		err = mmc_send_op_cond(mmc);
1958 
1959 		if (err) {
1960 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1961 			printf("Card did not respond to voltage select!\n");
1962 #endif
1963 			return -EOPNOTSUPP;
1964 		}
1965 	}
1966 
1967 	if (!err)
1968 		mmc->init_in_progress = 1;
1969 
1970 	return err;
1971 }
1972 
1973 static int mmc_complete_init(struct mmc *mmc)
1974 {
1975 	int err = 0;
1976 
1977 	mmc->init_in_progress = 0;
1978 	if (mmc->op_cond_pending)
1979 		err = mmc_complete_op_cond(mmc);
1980 
1981 	if (!err)
1982 		err = mmc_startup(mmc);
1983 	if (err)
1984 		mmc->has_init = 0;
1985 	else
1986 		mmc->has_init = 1;
1987 	return err;
1988 }
1989 
1990 int mmc_init(struct mmc *mmc)
1991 {
1992 	int err = 0;
1993 	__maybe_unused unsigned start;
1994 #if CONFIG_IS_ENABLED(DM_MMC)
1995 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
1996 
1997 	upriv->mmc = mmc;
1998 #endif
1999 	if (mmc->has_init)
2000 		return 0;
2001 
2002 	start = get_timer(0);
2003 
2004 	if (!mmc->init_in_progress)
2005 		err = mmc_start_init(mmc);
2006 
2007 	if (!err)
2008 		err = mmc_complete_init(mmc);
2009 	if (err)
2010 		printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2011 
2012 	return err;
2013 }
2014 
2015 int mmc_set_dsr(struct mmc *mmc, u16 val)
2016 {
2017 	mmc->dsr = val;
2018 	return 0;
2019 }
2020 
2021 /* CPU-specific MMC initializations */
2022 __weak int cpu_mmc_init(bd_t *bis)
2023 {
2024 	return -1;
2025 }
2026 
2027 /* board-specific MMC initializations. */
2028 __weak int board_mmc_init(bd_t *bis)
2029 {
2030 	return -1;
2031 }
2032 
2033 void mmc_set_preinit(struct mmc *mmc, int preinit)
2034 {
2035 	mmc->preinit = preinit;
2036 }
2037 
2038 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2039 static int mmc_probe(bd_t *bis)
2040 {
2041 	return 0;
2042 }
2043 #elif CONFIG_IS_ENABLED(DM_MMC)
2044 static int mmc_probe(bd_t *bis)
2045 {
2046 	int ret, i;
2047 	struct uclass *uc;
2048 	struct udevice *dev;
2049 
2050 	ret = uclass_get(UCLASS_MMC, &uc);
2051 	if (ret)
2052 		return ret;
2053 
2054 	/*
2055 	 * Try to add them in sequence order. Really with driver model we
2056 	 * should allow holes, but the current MMC list does not allow that.
2057 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2058 	 */
2059 	for (i = 0; ; i++) {
2060 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2061 		if (ret == -ENODEV)
2062 			break;
2063 	}
2064 	uclass_foreach_dev(dev, uc) {
2065 		ret = device_probe(dev);
2066 		if (ret)
2067 			printf("%s - probe failed: %d\n", dev->name, ret);
2068 	}
2069 
2070 	return 0;
2071 }
2072 #else
2073 static int mmc_probe(bd_t *bis)
2074 {
2075 	if (board_mmc_init(bis) < 0)
2076 		cpu_mmc_init(bis);
2077 
2078 	return 0;
2079 }
2080 #endif
2081 
2082 int mmc_initialize(bd_t *bis)
2083 {
2084 	static int initialized = 0;
2085 	int ret;
2086 	if (initialized)	/* Avoid initializing mmc multiple times */
2087 		return 0;
2088 	initialized = 1;
2089 
2090 #if !CONFIG_IS_ENABLED(BLK)
2091 #if !CONFIG_IS_ENABLED(MMC_TINY)
2092 	mmc_list_init();
2093 #endif
2094 #endif
2095 	ret = mmc_probe(bis);
2096 	if (ret)
2097 		return ret;
2098 
2099 #ifndef CONFIG_SPL_BUILD
2100 	print_mmc_devices(',');
2101 #endif
2102 
2103 	mmc_do_preinit();
2104 	return 0;
2105 }
2106 
2107 #ifdef CONFIG_CMD_BKOPS_ENABLE
2108 int mmc_set_bkops_enable(struct mmc *mmc)
2109 {
2110 	int err;
2111 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2112 
2113 	err = mmc_send_ext_csd(mmc, ext_csd);
2114 	if (err) {
2115 		puts("Could not get ext_csd register values\n");
2116 		return err;
2117 	}
2118 
2119 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2120 		puts("Background operations not supported on device\n");
2121 		return -EMEDIUMTYPE;
2122 	}
2123 
2124 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2125 		puts("Background operations already enabled\n");
2126 		return 0;
2127 	}
2128 
2129 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2130 	if (err) {
2131 		puts("Failed to enable manual background operations\n");
2132 		return err;
2133 	}
2134 
2135 	puts("Enabled manual background operations\n");
2136 
2137 	return 0;
2138 }
2139 #endif
2140