xref: /rk3399_rockchip-uboot/drivers/mmc/mmc.c (revision 5545757f7791e4b522a775b3f495feb508f659a1)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static const unsigned int sd_au_size[] = {
26 	0,		SZ_16K / 512,		SZ_32K / 512,
27 	SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
28 	SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
29 	SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
30 	SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,	SZ_64M / 512,
31 };
32 
33 #if CONFIG_IS_ENABLED(MMC_TINY)
34 static struct mmc mmc_static;
35 struct mmc *find_mmc_device(int dev_num)
36 {
37 	return &mmc_static;
38 }
39 
40 void mmc_do_preinit(void)
41 {
42 	struct mmc *m = &mmc_static;
43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
44 	mmc_set_preinit(m, 1);
45 #endif
46 	if (m->preinit)
47 		mmc_start_init(m);
48 }
49 
50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
51 {
52 	return &mmc->block_dev;
53 }
54 #endif
55 
56 #if !CONFIG_IS_ENABLED(DM_MMC)
57 __weak int board_mmc_getwp(struct mmc *mmc)
58 {
59 	return -1;
60 }
61 
62 int mmc_getwp(struct mmc *mmc)
63 {
64 	int wp;
65 
66 	wp = board_mmc_getwp(mmc);
67 
68 	if (wp < 0) {
69 		if (mmc->cfg->ops->getwp)
70 			wp = mmc->cfg->ops->getwp(mmc);
71 		else
72 			wp = 0;
73 	}
74 
75 	return wp;
76 }
77 
78 __weak int board_mmc_getcd(struct mmc *mmc)
79 {
80 	return -1;
81 }
82 #endif
83 
84 #ifdef CONFIG_MMC_TRACE
85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
86 {
87 	printf("CMD_SEND:%d\n", cmd->cmdidx);
88 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
89 }
90 
91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
92 {
93 	int i;
94 	u8 *ptr;
95 
96 	if (ret) {
97 		printf("\t\tRET\t\t\t %d\n", ret);
98 	} else {
99 		switch (cmd->resp_type) {
100 		case MMC_RSP_NONE:
101 			printf("\t\tMMC_RSP_NONE\n");
102 			break;
103 		case MMC_RSP_R1:
104 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
105 				cmd->response[0]);
106 			break;
107 		case MMC_RSP_R1b:
108 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
109 				cmd->response[0]);
110 			break;
111 		case MMC_RSP_R2:
112 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
113 				cmd->response[0]);
114 			printf("\t\t          \t\t 0x%08X \n",
115 				cmd->response[1]);
116 			printf("\t\t          \t\t 0x%08X \n",
117 				cmd->response[2]);
118 			printf("\t\t          \t\t 0x%08X \n",
119 				cmd->response[3]);
120 			printf("\n");
121 			printf("\t\t\t\t\tDUMPING DATA\n");
122 			for (i = 0; i < 4; i++) {
123 				int j;
124 				printf("\t\t\t\t\t%03d - ", i*4);
125 				ptr = (u8 *)&cmd->response[i];
126 				ptr += 3;
127 				for (j = 0; j < 4; j++)
128 					printf("%02X ", *ptr--);
129 				printf("\n");
130 			}
131 			break;
132 		case MMC_RSP_R3:
133 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
134 				cmd->response[0]);
135 			break;
136 		default:
137 			printf("\t\tERROR MMC rsp not supported\n");
138 			break;
139 		}
140 	}
141 }
142 
143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
144 {
145 	int status;
146 
147 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
148 	printf("CURR STATE:%d\n", status);
149 }
150 #endif
151 
152 #if !CONFIG_IS_ENABLED(DM_MMC)
153 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
154 {
155 	int ret;
156 
157 	mmmc_trace_before_send(mmc, cmd);
158 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
159 	mmmc_trace_after_send(mmc, cmd, ret);
160 
161 	return ret;
162 }
163 #endif
164 
165 int mmc_send_status(struct mmc *mmc, int timeout)
166 {
167 	struct mmc_cmd cmd;
168 	int err, retries = 5;
169 
170 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
171 	cmd.resp_type = MMC_RSP_R1;
172 	if (!mmc_host_is_spi(mmc))
173 		cmd.cmdarg = mmc->rca << 16;
174 
175 	while (1) {
176 		err = mmc_send_cmd(mmc, &cmd, NULL);
177 		if (!err) {
178 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
179 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
180 			     MMC_STATE_PRG)
181 				break;
182 			else if (cmd.response[0] & MMC_STATUS_MASK) {
183 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
184 				printf("Status Error: 0x%08X\n",
185 					cmd.response[0]);
186 #endif
187 				return -ECOMM;
188 			}
189 		} else if (--retries < 0)
190 			return err;
191 
192 		if (timeout-- <= 0)
193 			break;
194 
195 		udelay(1000);
196 	}
197 
198 	mmc_trace_state(mmc, &cmd);
199 	if (timeout <= 0) {
200 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
201 		printf("Timeout waiting card ready\n");
202 #endif
203 		return -ETIMEDOUT;
204 	}
205 
206 	return 0;
207 }
208 
209 int mmc_set_blocklen(struct mmc *mmc, int len)
210 {
211 	struct mmc_cmd cmd;
212 
213 	if (mmc_card_ddr(mmc))
214 		return 0;
215 
216 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
217 	cmd.resp_type = MMC_RSP_R1;
218 	cmd.cmdarg = len;
219 
220 	return mmc_send_cmd(mmc, &cmd, NULL);
221 }
222 
223 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
224 			   lbaint_t blkcnt)
225 {
226 	struct mmc_cmd cmd;
227 	struct mmc_data data;
228 
229 	if (blkcnt > 1)
230 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
231 	else
232 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
233 
234 	if (mmc->high_capacity)
235 		cmd.cmdarg = start;
236 	else
237 		cmd.cmdarg = start * mmc->read_bl_len;
238 
239 	cmd.resp_type = MMC_RSP_R1;
240 
241 	data.dest = dst;
242 	data.blocks = blkcnt;
243 	data.blocksize = mmc->read_bl_len;
244 	data.flags = MMC_DATA_READ;
245 
246 	if (mmc_send_cmd(mmc, &cmd, &data))
247 		return 0;
248 
249 	if (blkcnt > 1) {
250 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
251 		cmd.cmdarg = 0;
252 		cmd.resp_type = MMC_RSP_R1b;
253 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
254 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
255 			printf("mmc fail to send stop cmd\n");
256 #endif
257 			return 0;
258 		}
259 	}
260 
261 	return blkcnt;
262 }
263 
264 #if CONFIG_IS_ENABLED(BLK)
265 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
266 #else
267 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
268 		void *dst)
269 #endif
270 {
271 #if CONFIG_IS_ENABLED(BLK)
272 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
273 #endif
274 	int dev_num = block_dev->devnum;
275 	int err;
276 	lbaint_t cur, blocks_todo = blkcnt;
277 
278 	if (blkcnt == 0)
279 		return 0;
280 
281 	struct mmc *mmc = find_mmc_device(dev_num);
282 	if (!mmc)
283 		return 0;
284 
285 	if (CONFIG_IS_ENABLED(MMC_TINY))
286 		err = mmc_switch_part(mmc, block_dev->hwpart);
287 	else
288 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
289 
290 	if (err < 0)
291 		return 0;
292 
293 	if ((start + blkcnt) > block_dev->lba) {
294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
295 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
296 			start + blkcnt, block_dev->lba);
297 #endif
298 		return 0;
299 	}
300 
301 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
302 		debug("%s: Failed to set blocklen\n", __func__);
303 		return 0;
304 	}
305 
306 	do {
307 		cur = (blocks_todo > mmc->cfg->b_max) ?
308 			mmc->cfg->b_max : blocks_todo;
309 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
310 			debug("%s: Failed to read blocks\n", __func__);
311 			return 0;
312 		}
313 		blocks_todo -= cur;
314 		start += cur;
315 		dst += cur * mmc->read_bl_len;
316 	} while (blocks_todo > 0);
317 
318 	return blkcnt;
319 }
320 
321 void mmc_set_clock(struct mmc *mmc, uint clock)
322 {
323 	if (clock > mmc->cfg->f_max)
324 		clock = mmc->cfg->f_max;
325 
326 	if (clock < mmc->cfg->f_min)
327 		clock = mmc->cfg->f_min;
328 
329 	mmc->clock = clock;
330 
331 	mmc_set_ios(mmc);
332 }
333 
334 static void mmc_set_bus_width(struct mmc *mmc, uint width)
335 {
336 	mmc->bus_width = width;
337 
338 	mmc_set_ios(mmc);
339 }
340 
341 static void mmc_set_timing(struct mmc *mmc, uint timing)
342 {
343 	mmc->timing = timing;
344 	mmc_set_ios(mmc);
345 }
346 
347 static int mmc_go_idle(struct mmc *mmc)
348 {
349 	struct mmc_cmd cmd;
350 	int err;
351 
352 	udelay(1000);
353 
354 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
355 	cmd.cmdarg = 0;
356 	cmd.resp_type = MMC_RSP_NONE;
357 
358 	err = mmc_send_cmd(mmc, &cmd, NULL);
359 
360 	if (err)
361 		return err;
362 
363 	udelay(2000);
364 
365 	return 0;
366 }
367 
368 static int sd_send_op_cond(struct mmc *mmc)
369 {
370 	int timeout = 1000;
371 	int err;
372 	struct mmc_cmd cmd;
373 
374 	while (1) {
375 		cmd.cmdidx = MMC_CMD_APP_CMD;
376 		cmd.resp_type = MMC_RSP_R1;
377 		cmd.cmdarg = 0;
378 
379 		err = mmc_send_cmd(mmc, &cmd, NULL);
380 
381 		if (err)
382 			return err;
383 
384 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
385 		cmd.resp_type = MMC_RSP_R3;
386 
387 		/*
388 		 * Most cards do not answer if some reserved bits
389 		 * in the ocr are set. However, Some controller
390 		 * can set bit 7 (reserved for low voltages), but
391 		 * how to manage low voltages SD card is not yet
392 		 * specified.
393 		 */
394 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
395 			(mmc->cfg->voltages & 0xff8000);
396 
397 		if (mmc->version == SD_VERSION_2)
398 			cmd.cmdarg |= OCR_HCS;
399 
400 		err = mmc_send_cmd(mmc, &cmd, NULL);
401 
402 		if (err)
403 			return err;
404 
405 		if (cmd.response[0] & OCR_BUSY)
406 			break;
407 
408 		if (timeout-- <= 0)
409 			return -EOPNOTSUPP;
410 
411 		udelay(1000);
412 	}
413 
414 	if (mmc->version != SD_VERSION_2)
415 		mmc->version = SD_VERSION_1_0;
416 
417 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
418 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
419 		cmd.resp_type = MMC_RSP_R3;
420 		cmd.cmdarg = 0;
421 
422 		err = mmc_send_cmd(mmc, &cmd, NULL);
423 
424 		if (err)
425 			return err;
426 	}
427 
428 	mmc->ocr = cmd.response[0];
429 
430 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
431 	mmc->rca = 0;
432 
433 	return 0;
434 }
435 
436 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
437 {
438 	struct mmc_cmd cmd;
439 	int err;
440 
441 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
442 	cmd.resp_type = MMC_RSP_R3;
443 	cmd.cmdarg = 0;
444 	if (use_arg && !mmc_host_is_spi(mmc))
445 		cmd.cmdarg = OCR_HCS |
446 			(mmc->cfg->voltages &
447 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
448 			(mmc->ocr & OCR_ACCESS_MODE);
449 
450 	err = mmc_send_cmd(mmc, &cmd, NULL);
451 	if (err)
452 		return err;
453 	mmc->ocr = cmd.response[0];
454 	return 0;
455 }
456 
457 static int mmc_send_op_cond(struct mmc *mmc)
458 {
459 	int err, i;
460 
461 	/* Some cards seem to need this */
462 	mmc_go_idle(mmc);
463 
464  	/* Asking to the card its capabilities */
465 	for (i = 0; i < 2; i++) {
466 		err = mmc_send_op_cond_iter(mmc, i != 0);
467 		if (err)
468 			return err;
469 
470 		/* exit if not busy (flag seems to be inverted) */
471 		if (mmc->ocr & OCR_BUSY)
472 			break;
473 	}
474 	mmc->op_cond_pending = 1;
475 	return 0;
476 }
477 
478 static int mmc_complete_op_cond(struct mmc *mmc)
479 {
480 	struct mmc_cmd cmd;
481 	int timeout = 1000;
482 	uint start;
483 	int err;
484 
485 	mmc->op_cond_pending = 0;
486 	if (!(mmc->ocr & OCR_BUSY)) {
487 		/* Some cards seem to need this */
488 		mmc_go_idle(mmc);
489 
490 		start = get_timer(0);
491 		while (1) {
492 			err = mmc_send_op_cond_iter(mmc, 1);
493 			if (err)
494 				return err;
495 			if (mmc->ocr & OCR_BUSY)
496 				break;
497 			if (get_timer(start) > timeout)
498 				return -EOPNOTSUPP;
499 			udelay(100);
500 		}
501 	}
502 
503 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
504 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
505 		cmd.resp_type = MMC_RSP_R3;
506 		cmd.cmdarg = 0;
507 
508 		err = mmc_send_cmd(mmc, &cmd, NULL);
509 
510 		if (err)
511 			return err;
512 
513 		mmc->ocr = cmd.response[0];
514 	}
515 
516 	mmc->version = MMC_VERSION_UNKNOWN;
517 
518 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
519 	mmc->rca = 1;
520 
521 	return 0;
522 }
523 
524 
525 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
526 {
527 	struct mmc_cmd cmd;
528 	struct mmc_data data;
529 	int err;
530 
531 	/* Get the Card Status Register */
532 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
533 	cmd.resp_type = MMC_RSP_R1;
534 	cmd.cmdarg = 0;
535 
536 	data.dest = (char *)ext_csd;
537 	data.blocks = 1;
538 	data.blocksize = MMC_MAX_BLOCK_LEN;
539 	data.flags = MMC_DATA_READ;
540 
541 	err = mmc_send_cmd(mmc, &cmd, &data);
542 
543 	return err;
544 }
545 
546 static int mmc_poll_for_busy(struct mmc *mmc)
547 {
548 	struct mmc_cmd cmd;
549 	u8 busy = true;
550 	uint start;
551 	int ret;
552 	int timeout = 1000;
553 
554 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
555 	cmd.resp_type = MMC_RSP_R1;
556 	cmd.cmdarg = mmc->rca << 16;
557 
558 	start = get_timer(0);
559 
560 	do {
561 		if (mmc_can_card_busy(mmc)) {
562 			busy = mmc_card_busy(mmc);
563 		} else {
564 			ret = mmc_send_cmd(mmc, &cmd, NULL);
565 
566 			if (ret)
567 				return ret;
568 
569 			if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
570 				return -EBADMSG;
571 			busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) ==
572 				MMC_STATE_PRG;
573 		}
574 
575 		if (get_timer(start) > timeout && busy)
576 			return -ETIMEDOUT;
577 	} while (busy);
578 
579 	return 0;
580 }
581 
582 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
583 			u8 send_status)
584 {
585 	struct mmc_cmd cmd;
586 	int retries = 3;
587 	int ret;
588 
589 	cmd.cmdidx = MMC_CMD_SWITCH;
590 	cmd.resp_type = MMC_RSP_R1b;
591 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
592 				 (index << 16) |
593 				 (value << 8);
594 
595 	do {
596 		ret = mmc_send_cmd(mmc, &cmd, NULL);
597 
598 		if (!ret && send_status)
599 			return mmc_poll_for_busy(mmc);
600 	} while (--retries > 0 && ret);
601 
602 	return ret;
603 }
604 
605 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
606 {
607 	return __mmc_switch(mmc, set, index, value, true);
608 }
609 
610 static int mmc_select_bus_width(struct mmc *mmc)
611 {
612 	u32 ext_csd_bits[] = {
613 		EXT_CSD_BUS_WIDTH_8,
614 		EXT_CSD_BUS_WIDTH_4,
615 	};
616 	u32 bus_widths[] = {
617 		MMC_BUS_WIDTH_8BIT,
618 		MMC_BUS_WIDTH_4BIT,
619 	};
620 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
621 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
622 	u32 idx, bus_width = 0;
623 	int err = 0;
624 
625 	if (mmc->version < MMC_VERSION_4 ||
626 	    !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT)))
627 		return 0;
628 
629 	err = mmc_send_ext_csd(mmc, ext_csd);
630 
631 	if (err)
632 		return err;
633 
634 	idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1;
635 
636 	/*
637 	 * Unlike SD, MMC cards dont have a configuration register to notify
638 	 * supported bus width. So bus test command should be run to identify
639 	 * the supported bus width or compare the ext csd values of current
640 	 * bus width and ext csd values of 1 bit mode read earlier.
641 	 */
642 	for (; idx < ARRAY_SIZE(bus_widths); idx++) {
643 		/*
644 		 * Host is capable of 8bit transfer, then switch
645 		 * the device to work in 8bit transfer mode. If the
646 		 * mmc switch command returns error then switch to
647 		 * 4bit transfer mode. On success set the corresponding
648 		 * bus width on the host.
649 		 */
650 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
651 				 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]);
652 		if (err)
653 			continue;
654 
655 		bus_width = bus_widths[idx];
656 		mmc_set_bus_width(mmc, bus_width);
657 
658 		err = mmc_send_ext_csd(mmc, test_csd);
659 
660 		if (err)
661 			continue;
662 
663 		/* Only compare read only fields */
664 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] ==
665 			test_csd[EXT_CSD_PARTITIONING_SUPPORT]) &&
666 		    (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
667 			test_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
668 		    (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) &&
669 			(ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
670 			test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
671 		    !memcmp(&ext_csd[EXT_CSD_SEC_CNT],
672 			&test_csd[EXT_CSD_SEC_CNT], 4)) {
673 			err = bus_width;
674 			break;
675 		} else {
676 			err = -EBADMSG;
677 		}
678 	}
679 
680 	return err;
681 }
682 
683 static const u8 tuning_blk_pattern_4bit[] = {
684 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
685 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
686 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
687 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
688 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
689 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
690 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
691 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
692 };
693 
694 static const u8 tuning_blk_pattern_8bit[] = {
695 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
696 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
697 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
698 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
699 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
700 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
701 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
702 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
703 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
704 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
705 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
706 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
707 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
708 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
709 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
710 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
711 };
712 
713 int mmc_send_tuning(struct mmc *mmc, u32 opcode)
714 {
715 	struct mmc_cmd cmd;
716 	struct mmc_data data;
717 	const u8 *tuning_block_pattern;
718 	int size, err = 0;
719 	u8 *data_buf;
720 
721 	if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
722 		tuning_block_pattern = tuning_blk_pattern_8bit;
723 		size = sizeof(tuning_blk_pattern_8bit);
724 	} else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) {
725 		tuning_block_pattern = tuning_blk_pattern_4bit;
726 		size = sizeof(tuning_blk_pattern_4bit);
727 	} else {
728 		return -EINVAL;
729 	}
730 
731 	data_buf = calloc(1, size);
732 	if (!data_buf)
733 		return -ENOMEM;
734 
735 	cmd.cmdidx = opcode;
736 	cmd.resp_type = MMC_RSP_R1;
737 	cmd.cmdarg = 0;
738 
739 	data.dest = (char *)data_buf;
740 	data.blocksize = size;
741 	data.blocks = 1;
742 	data.flags = MMC_DATA_READ;
743 
744 	err = mmc_send_cmd(mmc, &cmd, &data);
745 	if (err)
746 		goto out;
747 
748 	if (memcmp(data_buf, tuning_block_pattern, size))
749 		err = -EIO;
750 out:
751 	free(data_buf);
752 	return err;
753 }
754 
755 static int mmc_execute_tuning(struct mmc *mmc)
756 {
757 #ifdef CONFIG_DM_MMC
758 	struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev);
759 #endif
760 	u32 opcode;
761 
762 	if (IS_SD(mmc))
763 		opcode = MMC_SEND_TUNING_BLOCK;
764 	else
765 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
766 
767 #ifndef CONFIG_DM_MMC
768 	if (mmc->cfg->ops->execute_tuning) {
769 		return mmc->cfg->ops->execute_tuning(mmc, opcode);
770 #else
771 	if (ops->execute_tuning) {
772 		return ops->execute_tuning(mmc->dev, opcode);
773 #endif
774 	} else {
775 		debug("Tuning feature required for HS200 mode.\n");
776 		return -EIO;
777 	}
778 }
779 
780 static int mmc_hs200_tuning(struct mmc *mmc)
781 {
782 	return mmc_execute_tuning(mmc);
783 }
784 
785 static int mmc_select_hs(struct mmc *mmc)
786 {
787 	int ret;
788 
789 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
790 			 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS);
791 
792 	if (!ret)
793 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
794 
795 	return ret;
796 }
797 
798 static int mmc_select_hs_ddr(struct mmc *mmc)
799 {
800 	u32 ext_csd_bits;
801 	int err = 0;
802 
803 	if (mmc->bus_width == MMC_BUS_WIDTH_1BIT)
804 		return 0;
805 
806 	ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ?
807 			EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
808 
809 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
810 			 EXT_CSD_BUS_WIDTH, ext_csd_bits);
811 	if (err)
812 		return err;
813 
814 	mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52);
815 
816 	return 0;
817 }
818 
819 #ifndef CONFIG_SPL_BUILD
820 static int mmc_select_hs200(struct mmc *mmc)
821 {
822 	int ret;
823 	struct mmc_cmd cmd;
824 
825 	/*
826 	 * Set the bus width(4 or 8) with host's support and
827 	 * switch to HS200 mode if bus width is set successfully.
828 	 */
829 	ret = mmc_select_bus_width(mmc);
830 
831 	if (ret > 0) {
832 		ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
833 				   EXT_CSD_HS_TIMING,
834 				   EXT_CSD_TIMING_HS200, false);
835 
836 		if (ret)
837 			return ret;
838 
839 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS200);
840 
841 		cmd.cmdidx = MMC_CMD_SEND_STATUS;
842 		cmd.resp_type = MMC_RSP_R1;
843 		cmd.cmdarg = mmc->rca << 16;
844 
845 		ret = mmc_send_cmd(mmc, &cmd, NULL);
846 
847 		if (ret)
848 			return ret;
849 
850 		if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
851 			return -EBADMSG;
852 	}
853 
854 	return ret;
855 }
856 #endif
857 
858 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd)
859 {
860 	u8 card_type;
861 	u32 host_caps, avail_type = 0;
862 
863 	card_type = ext_csd[EXT_CSD_CARD_TYPE];
864 	host_caps = mmc->cfg->host_caps;
865 
866 	if ((host_caps & MMC_MODE_HS) &&
867 	    (card_type & EXT_CSD_CARD_TYPE_26))
868 		avail_type |= EXT_CSD_CARD_TYPE_26;
869 
870 	if ((host_caps & MMC_MODE_HS) &&
871 	    (card_type & EXT_CSD_CARD_TYPE_52))
872 		avail_type |= EXT_CSD_CARD_TYPE_52;
873 
874 	/*
875 	 * For the moment, u-boot doesn't support signal voltage
876 	 * switch, therefor we assume that host support ddr52
877 	 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and
878 	 * hs400 are the same).
879 	 */
880 	if ((host_caps & MMC_MODE_DDR_52MHz) &&
881 	    (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
882 		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
883 
884 	if ((host_caps & MMC_MODE_HS200) &&
885 	    (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V))
886 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
887 
888 	/*
889 	 * If host can support HS400, it means that host can also
890 	 * support HS200.
891 	 */
892 	if ((host_caps & MMC_MODE_HS400) &&
893 	    (host_caps & MMC_MODE_8BIT) &&
894 	    (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
895 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
896 				EXT_CSD_CARD_TYPE_HS400_1_8V;
897 
898 	if ((host_caps & MMC_MODE_HS400ES) &&
899 	    (host_caps & MMC_MODE_8BIT) &&
900 	    ext_csd[EXT_CSD_STROBE_SUPPORT] &&
901 	    (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
902 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
903 				EXT_CSD_CARD_TYPE_HS400_1_8V |
904 				EXT_CSD_CARD_TYPE_HS400ES;
905 
906 	return avail_type;
907 }
908 
909 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type)
910 {
911 	int clock = 0;
912 
913 	if (mmc_card_hs(mmc))
914 		clock = (avail_type & EXT_CSD_CARD_TYPE_52) ?
915 			MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR;
916 	else if (mmc_card_hs200(mmc) ||
917 		 mmc_card_hs400(mmc) ||
918 		 mmc_card_hs400es(mmc))
919 		clock = MMC_HS200_MAX_DTR;
920 
921 	mmc_set_clock(mmc, clock);
922 }
923 
924 static int mmc_change_freq(struct mmc *mmc)
925 {
926 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
927 	u32 avail_type;
928 	int err;
929 
930 	mmc->card_caps = 0;
931 
932 	if (mmc_host_is_spi(mmc))
933 		return 0;
934 
935 	/* Only version 4 supports high-speed */
936 	if (mmc->version < MMC_VERSION_4)
937 		return 0;
938 
939 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
940 
941 	err = mmc_send_ext_csd(mmc, ext_csd);
942 
943 	if (err)
944 		return err;
945 
946 	avail_type = mmc_select_card_type(mmc, ext_csd);
947 
948 #ifndef CONFIG_SPL_BUILD
949 	if (avail_type & EXT_CSD_CARD_TYPE_HS200)
950 		err = mmc_select_hs200(mmc);
951 	else
952 #endif
953 	if (avail_type & EXT_CSD_CARD_TYPE_HS)
954 		err = mmc_select_hs(mmc);
955 	else
956 		err = -EINVAL;
957 
958 	if (err)
959 		return err;
960 
961 	mmc_set_bus_speed(mmc, avail_type);
962 
963 	if (mmc_card_hs200(mmc))
964 		err = mmc_hs200_tuning(mmc);
965 	else if (!mmc_card_hs400es(mmc)) {
966 		err = mmc_select_bus_width(mmc) > 0 ? 0 : err;
967 		if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52)
968 			err = mmc_select_hs_ddr(mmc);
969 	}
970 
971 	return err;
972 }
973 
974 static int mmc_set_capacity(struct mmc *mmc, int part_num)
975 {
976 	switch (part_num) {
977 	case 0:
978 		mmc->capacity = mmc->capacity_user;
979 		break;
980 	case 1:
981 	case 2:
982 		mmc->capacity = mmc->capacity_boot;
983 		break;
984 	case 3:
985 		mmc->capacity = mmc->capacity_rpmb;
986 		break;
987 	case 4:
988 	case 5:
989 	case 6:
990 	case 7:
991 		mmc->capacity = mmc->capacity_gp[part_num - 4];
992 		break;
993 	default:
994 		return -1;
995 	}
996 
997 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
998 
999 	return 0;
1000 }
1001 
1002 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1003 {
1004 	int ret;
1005 
1006 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
1007 			 (mmc->part_config & ~PART_ACCESS_MASK)
1008 			 | (part_num & PART_ACCESS_MASK));
1009 
1010 	/*
1011 	 * Set the capacity if the switch succeeded or was intended
1012 	 * to return to representing the raw device.
1013 	 */
1014 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1015 		ret = mmc_set_capacity(mmc, part_num);
1016 		mmc_get_blk_desc(mmc)->hwpart = part_num;
1017 	}
1018 
1019 	return ret;
1020 }
1021 
1022 int mmc_hwpart_config(struct mmc *mmc,
1023 		      const struct mmc_hwpart_conf *conf,
1024 		      enum mmc_hwpart_conf_mode mode)
1025 {
1026 	u8 part_attrs = 0;
1027 	u32 enh_size_mult;
1028 	u32 enh_start_addr;
1029 	u32 gp_size_mult[4];
1030 	u32 max_enh_size_mult;
1031 	u32 tot_enh_size_mult = 0;
1032 	u8 wr_rel_set;
1033 	int i, pidx, err;
1034 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1035 
1036 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1037 		return -EINVAL;
1038 
1039 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1040 		printf("eMMC >= 4.4 required for enhanced user data area\n");
1041 		return -EMEDIUMTYPE;
1042 	}
1043 
1044 	if (!(mmc->part_support & PART_SUPPORT)) {
1045 		printf("Card does not support partitioning\n");
1046 		return -EMEDIUMTYPE;
1047 	}
1048 
1049 	if (!mmc->hc_wp_grp_size) {
1050 		printf("Card does not define HC WP group size\n");
1051 		return -EMEDIUMTYPE;
1052 	}
1053 
1054 	/* check partition alignment and total enhanced size */
1055 	if (conf->user.enh_size) {
1056 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1057 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
1058 			printf("User data enhanced area not HC WP group "
1059 			       "size aligned\n");
1060 			return -EINVAL;
1061 		}
1062 		part_attrs |= EXT_CSD_ENH_USR;
1063 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1064 		if (mmc->high_capacity) {
1065 			enh_start_addr = conf->user.enh_start;
1066 		} else {
1067 			enh_start_addr = (conf->user.enh_start << 9);
1068 		}
1069 	} else {
1070 		enh_size_mult = 0;
1071 		enh_start_addr = 0;
1072 	}
1073 	tot_enh_size_mult += enh_size_mult;
1074 
1075 	for (pidx = 0; pidx < 4; pidx++) {
1076 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1077 			printf("GP%i partition not HC WP group size "
1078 			       "aligned\n", pidx+1);
1079 			return -EINVAL;
1080 		}
1081 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1082 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1083 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1084 			tot_enh_size_mult += gp_size_mult[pidx];
1085 		}
1086 	}
1087 
1088 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1089 		printf("Card does not support enhanced attribute\n");
1090 		return -EMEDIUMTYPE;
1091 	}
1092 
1093 	err = mmc_send_ext_csd(mmc, ext_csd);
1094 	if (err)
1095 		return err;
1096 
1097 	max_enh_size_mult =
1098 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1099 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1100 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1101 	if (tot_enh_size_mult > max_enh_size_mult) {
1102 		printf("Total enhanced size exceeds maximum (%u > %u)\n",
1103 		       tot_enh_size_mult, max_enh_size_mult);
1104 		return -EMEDIUMTYPE;
1105 	}
1106 
1107 	/* The default value of EXT_CSD_WR_REL_SET is device
1108 	 * dependent, the values can only be changed if the
1109 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1110 	 * changed only once and before partitioning is completed. */
1111 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1112 	if (conf->user.wr_rel_change) {
1113 		if (conf->user.wr_rel_set)
1114 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1115 		else
1116 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1117 	}
1118 	for (pidx = 0; pidx < 4; pidx++) {
1119 		if (conf->gp_part[pidx].wr_rel_change) {
1120 			if (conf->gp_part[pidx].wr_rel_set)
1121 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1122 			else
1123 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1124 		}
1125 	}
1126 
1127 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1128 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1129 		puts("Card does not support host controlled partition write "
1130 		     "reliability settings\n");
1131 		return -EMEDIUMTYPE;
1132 	}
1133 
1134 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1135 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1136 		printf("Card already partitioned\n");
1137 		return -EPERM;
1138 	}
1139 
1140 	if (mode == MMC_HWPART_CONF_CHECK)
1141 		return 0;
1142 
1143 	/* Partitioning requires high-capacity size definitions */
1144 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1145 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1146 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1147 
1148 		if (err)
1149 			return err;
1150 
1151 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1152 
1153 		/* update erase group size to be high-capacity */
1154 		mmc->erase_grp_size =
1155 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1156 
1157 	}
1158 
1159 	/* all OK, write the configuration */
1160 	for (i = 0; i < 4; i++) {
1161 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1162 				 EXT_CSD_ENH_START_ADDR+i,
1163 				 (enh_start_addr >> (i*8)) & 0xFF);
1164 		if (err)
1165 			return err;
1166 	}
1167 	for (i = 0; i < 3; i++) {
1168 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1169 				 EXT_CSD_ENH_SIZE_MULT+i,
1170 				 (enh_size_mult >> (i*8)) & 0xFF);
1171 		if (err)
1172 			return err;
1173 	}
1174 	for (pidx = 0; pidx < 4; pidx++) {
1175 		for (i = 0; i < 3; i++) {
1176 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1177 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1178 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1179 			if (err)
1180 				return err;
1181 		}
1182 	}
1183 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1184 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1185 	if (err)
1186 		return err;
1187 
1188 	if (mode == MMC_HWPART_CONF_SET)
1189 		return 0;
1190 
1191 	/* The WR_REL_SET is a write-once register but shall be
1192 	 * written before setting PART_SETTING_COMPLETED. As it is
1193 	 * write-once we can only write it when completing the
1194 	 * partitioning. */
1195 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1196 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1197 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1198 		if (err)
1199 			return err;
1200 	}
1201 
1202 	/* Setting PART_SETTING_COMPLETED confirms the partition
1203 	 * configuration but it only becomes effective after power
1204 	 * cycle, so we do not adjust the partition related settings
1205 	 * in the mmc struct. */
1206 
1207 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1208 			 EXT_CSD_PARTITION_SETTING,
1209 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1210 	if (err)
1211 		return err;
1212 
1213 	return 0;
1214 }
1215 
1216 #if !CONFIG_IS_ENABLED(DM_MMC)
1217 int mmc_getcd(struct mmc *mmc)
1218 {
1219 	int cd;
1220 
1221 	cd = board_mmc_getcd(mmc);
1222 
1223 	if (cd < 0) {
1224 		if (mmc->cfg->ops->getcd)
1225 			cd = mmc->cfg->ops->getcd(mmc);
1226 		else
1227 			cd = 1;
1228 	}
1229 
1230 	return cd;
1231 }
1232 #endif
1233 
1234 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1235 {
1236 	struct mmc_cmd cmd;
1237 	struct mmc_data data;
1238 
1239 	/* Switch the frequency */
1240 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1241 	cmd.resp_type = MMC_RSP_R1;
1242 	cmd.cmdarg = (mode << 31) | 0xffffff;
1243 	cmd.cmdarg &= ~(0xf << (group * 4));
1244 	cmd.cmdarg |= value << (group * 4);
1245 
1246 	data.dest = (char *)resp;
1247 	data.blocksize = 64;
1248 	data.blocks = 1;
1249 	data.flags = MMC_DATA_READ;
1250 
1251 	return mmc_send_cmd(mmc, &cmd, &data);
1252 }
1253 
1254 
1255 static int sd_change_freq(struct mmc *mmc)
1256 {
1257 	int err;
1258 	struct mmc_cmd cmd;
1259 	ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2);
1260 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1261 	struct mmc_data data;
1262 	int timeout;
1263 
1264 	mmc->card_caps = 0;
1265 
1266 	if (mmc_host_is_spi(mmc))
1267 		return 0;
1268 
1269 	/* Read the SCR to find out if this card supports higher speeds */
1270 	cmd.cmdidx = MMC_CMD_APP_CMD;
1271 	cmd.resp_type = MMC_RSP_R1;
1272 	cmd.cmdarg = mmc->rca << 16;
1273 
1274 	err = mmc_send_cmd(mmc, &cmd, NULL);
1275 
1276 	if (err)
1277 		return err;
1278 
1279 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1280 	cmd.resp_type = MMC_RSP_R1;
1281 	cmd.cmdarg = 0;
1282 
1283 	timeout = 3;
1284 
1285 retry_scr:
1286 	data.dest = (char *)scr;
1287 	data.blocksize = 8;
1288 	data.blocks = 1;
1289 	data.flags = MMC_DATA_READ;
1290 
1291 	err = mmc_send_cmd(mmc, &cmd, &data);
1292 
1293 	if (err) {
1294 		if (timeout--)
1295 			goto retry_scr;
1296 
1297 		return err;
1298 	}
1299 
1300 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1301 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1302 
1303 	switch ((mmc->scr[0] >> 24) & 0xf) {
1304 	case 0:
1305 		mmc->version = SD_VERSION_1_0;
1306 		break;
1307 	case 1:
1308 		mmc->version = SD_VERSION_1_10;
1309 		break;
1310 	case 2:
1311 		mmc->version = SD_VERSION_2;
1312 		if ((mmc->scr[0] >> 15) & 0x1)
1313 			mmc->version = SD_VERSION_3;
1314 		break;
1315 	default:
1316 		mmc->version = SD_VERSION_1_0;
1317 		break;
1318 	}
1319 
1320 	if (mmc->scr[0] & SD_DATA_4BIT)
1321 		mmc->card_caps |= MMC_MODE_4BIT;
1322 
1323 	/* Version 1.0 doesn't support switching */
1324 	if (mmc->version == SD_VERSION_1_0)
1325 		return 0;
1326 
1327 	timeout = 4;
1328 	while (timeout--) {
1329 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1330 				(u8 *)switch_status);
1331 
1332 		if (err)
1333 			return err;
1334 
1335 		/* The high-speed function is busy.  Try again */
1336 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1337 			break;
1338 	}
1339 
1340 	/* If high-speed isn't supported, we return */
1341 	if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
1342 		return 0;
1343 
1344 	/*
1345 	 * If the host doesn't support SD_HIGHSPEED, do not switch card to
1346 	 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
1347 	 * This can avoid furthur problem when the card runs in different
1348 	 * mode between the host.
1349 	 */
1350 	if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
1351 		(mmc->cfg->host_caps & MMC_MODE_HS)))
1352 		return 0;
1353 
1354 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1355 
1356 	if (err)
1357 		return err;
1358 
1359 	if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
1360 		mmc->card_caps |= MMC_MODE_HS;
1361 
1362 	return 0;
1363 }
1364 
1365 static int sd_read_ssr(struct mmc *mmc)
1366 {
1367 	int err, i;
1368 	struct mmc_cmd cmd;
1369 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1370 	struct mmc_data data;
1371 	int timeout = 3;
1372 	unsigned int au, eo, et, es;
1373 
1374 	cmd.cmdidx = MMC_CMD_APP_CMD;
1375 	cmd.resp_type = MMC_RSP_R1;
1376 	cmd.cmdarg = mmc->rca << 16;
1377 
1378 	err = mmc_send_cmd(mmc, &cmd, NULL);
1379 	if (err)
1380 		return err;
1381 
1382 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1383 	cmd.resp_type = MMC_RSP_R1;
1384 	cmd.cmdarg = 0;
1385 
1386 retry_ssr:
1387 	data.dest = (char *)ssr;
1388 	data.blocksize = 64;
1389 	data.blocks = 1;
1390 	data.flags = MMC_DATA_READ;
1391 
1392 	err = mmc_send_cmd(mmc, &cmd, &data);
1393 	if (err) {
1394 		if (timeout--)
1395 			goto retry_ssr;
1396 
1397 		return err;
1398 	}
1399 
1400 	for (i = 0; i < 16; i++)
1401 		ssr[i] = be32_to_cpu(ssr[i]);
1402 
1403 	au = (ssr[2] >> 12) & 0xF;
1404 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1405 		mmc->ssr.au = sd_au_size[au];
1406 		es = (ssr[3] >> 24) & 0xFF;
1407 		es |= (ssr[2] & 0xFF) << 8;
1408 		et = (ssr[3] >> 18) & 0x3F;
1409 		if (es && et) {
1410 			eo = (ssr[3] >> 16) & 0x3;
1411 			mmc->ssr.erase_timeout = (et * 1000) / es;
1412 			mmc->ssr.erase_offset = eo * 1000;
1413 		}
1414 	} else {
1415 		debug("Invalid Allocation Unit Size.\n");
1416 	}
1417 
1418 	return 0;
1419 }
1420 
1421 /* frequency bases */
1422 /* divided by 10 to be nice to platforms without floating point */
1423 static const int fbase[] = {
1424 	10000,
1425 	100000,
1426 	1000000,
1427 	10000000,
1428 };
1429 
1430 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1431  * to platforms without floating point.
1432  */
1433 static const u8 multipliers[] = {
1434 	0,	/* reserved */
1435 	10,
1436 	12,
1437 	13,
1438 	15,
1439 	20,
1440 	25,
1441 	30,
1442 	35,
1443 	40,
1444 	45,
1445 	50,
1446 	55,
1447 	60,
1448 	70,
1449 	80,
1450 };
1451 
1452 #if !CONFIG_IS_ENABLED(DM_MMC)
1453 static void mmc_set_ios(struct mmc *mmc)
1454 {
1455 	if (mmc->cfg->ops->set_ios)
1456 		mmc->cfg->ops->set_ios(mmc);
1457 }
1458 
1459 static bool mmc_card_busy(struct mmc *mmc)
1460 {
1461 	if (!mmc->cfg->ops->card_busy)
1462 		return -ENOSYS;
1463 
1464 	return mmc->cfg->ops->card_busy(mmc);
1465 }
1466 
1467 static bool mmc_can_card_busy(struct mmc *)
1468 {
1469 	return !!mmc->cfg->ops->card_busy;
1470 }
1471 #endif
1472 
1473 static int mmc_startup(struct mmc *mmc)
1474 {
1475 	int err, i;
1476 	uint mult, freq, tran_speed;
1477 	u64 cmult, csize, capacity;
1478 	struct mmc_cmd cmd;
1479 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1480 	bool has_parts = false;
1481 	bool part_completed;
1482 	struct blk_desc *bdesc;
1483 
1484 #ifdef CONFIG_MMC_SPI_CRC_ON
1485 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1486 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1487 		cmd.resp_type = MMC_RSP_R1;
1488 		cmd.cmdarg = 1;
1489 		err = mmc_send_cmd(mmc, &cmd, NULL);
1490 
1491 		if (err)
1492 			return err;
1493 	}
1494 #endif
1495 
1496 	/* Put the Card in Identify Mode */
1497 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1498 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1499 	cmd.resp_type = MMC_RSP_R2;
1500 	cmd.cmdarg = 0;
1501 
1502 	err = mmc_send_cmd(mmc, &cmd, NULL);
1503 
1504 	if (err)
1505 		return err;
1506 
1507 	memcpy(mmc->cid, cmd.response, 16);
1508 
1509 	/*
1510 	 * For MMC cards, set the Relative Address.
1511 	 * For SD cards, get the Relatvie Address.
1512 	 * This also puts the cards into Standby State
1513 	 */
1514 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1515 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1516 		cmd.cmdarg = mmc->rca << 16;
1517 		cmd.resp_type = MMC_RSP_R6;
1518 
1519 		err = mmc_send_cmd(mmc, &cmd, NULL);
1520 
1521 		if (err)
1522 			return err;
1523 
1524 		if (IS_SD(mmc))
1525 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1526 	}
1527 
1528 	/* Get the Card-Specific Data */
1529 	cmd.cmdidx = MMC_CMD_SEND_CSD;
1530 	cmd.resp_type = MMC_RSP_R2;
1531 	cmd.cmdarg = mmc->rca << 16;
1532 
1533 	err = mmc_send_cmd(mmc, &cmd, NULL);
1534 
1535 	if (err)
1536 		return err;
1537 
1538 	mmc->csd[0] = cmd.response[0];
1539 	mmc->csd[1] = cmd.response[1];
1540 	mmc->csd[2] = cmd.response[2];
1541 	mmc->csd[3] = cmd.response[3];
1542 
1543 	if (mmc->version == MMC_VERSION_UNKNOWN) {
1544 		int version = (cmd.response[0] >> 26) & 0xf;
1545 
1546 		switch (version) {
1547 		case 0:
1548 			mmc->version = MMC_VERSION_1_2;
1549 			break;
1550 		case 1:
1551 			mmc->version = MMC_VERSION_1_4;
1552 			break;
1553 		case 2:
1554 			mmc->version = MMC_VERSION_2_2;
1555 			break;
1556 		case 3:
1557 			mmc->version = MMC_VERSION_3;
1558 			break;
1559 		case 4:
1560 			mmc->version = MMC_VERSION_4;
1561 			break;
1562 		default:
1563 			mmc->version = MMC_VERSION_1_2;
1564 			break;
1565 		}
1566 	}
1567 
1568 	/* divide frequency by 10, since the mults are 10x bigger */
1569 	freq = fbase[(cmd.response[0] & 0x7)];
1570 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1571 
1572 	tran_speed = freq * mult;
1573 
1574 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1575 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1576 
1577 	if (IS_SD(mmc))
1578 		mmc->write_bl_len = mmc->read_bl_len;
1579 	else
1580 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1581 
1582 	if (mmc->high_capacity) {
1583 		csize = (mmc->csd[1] & 0x3f) << 16
1584 			| (mmc->csd[2] & 0xffff0000) >> 16;
1585 		cmult = 8;
1586 	} else {
1587 		csize = (mmc->csd[1] & 0x3ff) << 2
1588 			| (mmc->csd[2] & 0xc0000000) >> 30;
1589 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
1590 	}
1591 
1592 	mmc->capacity_user = (csize + 1) << (cmult + 2);
1593 	mmc->capacity_user *= mmc->read_bl_len;
1594 	mmc->capacity_boot = 0;
1595 	mmc->capacity_rpmb = 0;
1596 	for (i = 0; i < 4; i++)
1597 		mmc->capacity_gp[i] = 0;
1598 
1599 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1600 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1601 
1602 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1603 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1604 
1605 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1606 		cmd.cmdidx = MMC_CMD_SET_DSR;
1607 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1608 		cmd.resp_type = MMC_RSP_NONE;
1609 		if (mmc_send_cmd(mmc, &cmd, NULL))
1610 			printf("MMC: SET_DSR failed\n");
1611 	}
1612 
1613 	/* Select the card, and put it into Transfer Mode */
1614 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1615 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
1616 		cmd.resp_type = MMC_RSP_R1;
1617 		cmd.cmdarg = mmc->rca << 16;
1618 		err = mmc_send_cmd(mmc, &cmd, NULL);
1619 
1620 		if (err)
1621 			return err;
1622 	}
1623 
1624 	/*
1625 	 * For SD, its erase group is always one sector
1626 	 */
1627 	mmc->erase_grp_size = 1;
1628 	mmc->part_config = MMCPART_NOAVAILABLE;
1629 	if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) {
1630 		/* check  ext_csd version and capacity */
1631 		err = mmc_send_ext_csd(mmc, ext_csd);
1632 		if (err)
1633 			return err;
1634 		if (ext_csd[EXT_CSD_REV] >= 2) {
1635 			/*
1636 			 * According to the JEDEC Standard, the value of
1637 			 * ext_csd's capacity is valid if the value is more
1638 			 * than 2GB
1639 			 */
1640 			capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1641 					| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1642 					| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1643 					| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1644 			capacity *= MMC_MAX_BLOCK_LEN;
1645 			if ((capacity >> 20) > 2 * 1024)
1646 				mmc->capacity_user = capacity;
1647 		}
1648 
1649 		switch (ext_csd[EXT_CSD_REV]) {
1650 		case 1:
1651 			mmc->version = MMC_VERSION_4_1;
1652 			break;
1653 		case 2:
1654 			mmc->version = MMC_VERSION_4_2;
1655 			break;
1656 		case 3:
1657 			mmc->version = MMC_VERSION_4_3;
1658 			break;
1659 		case 5:
1660 			mmc->version = MMC_VERSION_4_41;
1661 			break;
1662 		case 6:
1663 			mmc->version = MMC_VERSION_4_5;
1664 			break;
1665 		case 7:
1666 			mmc->version = MMC_VERSION_5_0;
1667 			break;
1668 		case 8:
1669 			mmc->version = MMC_VERSION_5_1;
1670 			break;
1671 		}
1672 
1673 		/* The partition data may be non-zero but it is only
1674 		 * effective if PARTITION_SETTING_COMPLETED is set in
1675 		 * EXT_CSD, so ignore any data if this bit is not set,
1676 		 * except for enabling the high-capacity group size
1677 		 * definition (see below). */
1678 		part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1679 				    EXT_CSD_PARTITION_SETTING_COMPLETED);
1680 
1681 		/* store the partition info of emmc */
1682 		mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1683 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1684 		    ext_csd[EXT_CSD_BOOT_MULT])
1685 			mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1686 		if (part_completed &&
1687 		    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1688 			mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1689 
1690 		mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1691 
1692 		mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1693 
1694 		for (i = 0; i < 4; i++) {
1695 			int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1696 			uint mult = (ext_csd[idx + 2] << 16) +
1697 				(ext_csd[idx + 1] << 8) + ext_csd[idx];
1698 			if (mult)
1699 				has_parts = true;
1700 			if (!part_completed)
1701 				continue;
1702 			mmc->capacity_gp[i] = mult;
1703 			mmc->capacity_gp[i] *=
1704 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1705 			mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1706 			mmc->capacity_gp[i] <<= 19;
1707 		}
1708 
1709 		if (part_completed) {
1710 			mmc->enh_user_size =
1711 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) +
1712 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) +
1713 				ext_csd[EXT_CSD_ENH_SIZE_MULT];
1714 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1715 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1716 			mmc->enh_user_size <<= 19;
1717 			mmc->enh_user_start =
1718 				(ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) +
1719 				(ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) +
1720 				(ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) +
1721 				ext_csd[EXT_CSD_ENH_START_ADDR];
1722 			if (mmc->high_capacity)
1723 				mmc->enh_user_start <<= 9;
1724 		}
1725 
1726 		/*
1727 		 * Host needs to enable ERASE_GRP_DEF bit if device is
1728 		 * partitioned. This bit will be lost every time after a reset
1729 		 * or power off. This will affect erase size.
1730 		 */
1731 		if (part_completed)
1732 			has_parts = true;
1733 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1734 		    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1735 			has_parts = true;
1736 		if (has_parts) {
1737 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1738 				EXT_CSD_ERASE_GROUP_DEF, 1);
1739 
1740 			if (err)
1741 				return err;
1742 			else
1743 				ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1744 		}
1745 
1746 		if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1747 			/* Read out group size from ext_csd */
1748 			mmc->erase_grp_size =
1749 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1750 			/*
1751 			 * if high capacity and partition setting completed
1752 			 * SEC_COUNT is valid even if it is smaller than 2 GiB
1753 			 * JEDEC Standard JESD84-B45, 6.2.4
1754 			 */
1755 			if (mmc->high_capacity && part_completed) {
1756 				capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1757 					(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1758 					(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1759 					(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1760 				capacity *= MMC_MAX_BLOCK_LEN;
1761 				mmc->capacity_user = capacity;
1762 			}
1763 		} else {
1764 			/* Calculate the group size from the csd value. */
1765 			int erase_gsz, erase_gmul;
1766 			erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1767 			erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1768 			mmc->erase_grp_size = (erase_gsz + 1)
1769 				* (erase_gmul + 1);
1770 		}
1771 
1772 		mmc->hc_wp_grp_size = 1024
1773 			* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1774 			* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1775 
1776 		mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1777 	}
1778 
1779 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1780 	if (err)
1781 		return err;
1782 
1783 	if (IS_SD(mmc))
1784 		err = sd_change_freq(mmc);
1785 	else
1786 		err = mmc_change_freq(mmc);
1787 
1788 	if (err)
1789 		return err;
1790 
1791 	/* Restrict card's capabilities by what the host can do */
1792 	mmc->card_caps &= mmc->cfg->host_caps;
1793 
1794 	if (IS_SD(mmc)) {
1795 		if (mmc->card_caps & MMC_MODE_4BIT) {
1796 			cmd.cmdidx = MMC_CMD_APP_CMD;
1797 			cmd.resp_type = MMC_RSP_R1;
1798 			cmd.cmdarg = mmc->rca << 16;
1799 
1800 			err = mmc_send_cmd(mmc, &cmd, NULL);
1801 			if (err)
1802 				return err;
1803 
1804 			cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1805 			cmd.resp_type = MMC_RSP_R1;
1806 			cmd.cmdarg = 2;
1807 			err = mmc_send_cmd(mmc, &cmd, NULL);
1808 			if (err)
1809 				return err;
1810 
1811 			mmc_set_bus_width(mmc, 4);
1812 		}
1813 
1814 		err = sd_read_ssr(mmc);
1815 		if (err)
1816 			return err;
1817 
1818 		if (mmc->card_caps & MMC_MODE_HS)
1819 			tran_speed = 50000000;
1820 		else
1821 			tran_speed = 25000000;
1822 
1823 		mmc_set_clock(mmc, tran_speed);
1824 	}
1825 
1826 	/* Fix the block length for DDR mode */
1827 	if (mmc_card_ddr(mmc)) {
1828 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1829 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1830 	}
1831 
1832 	/* fill in device description */
1833 	bdesc = mmc_get_blk_desc(mmc);
1834 	bdesc->lun = 0;
1835 	bdesc->hwpart = 0;
1836 	bdesc->type = 0;
1837 	bdesc->blksz = mmc->read_bl_len;
1838 	bdesc->log2blksz = LOG2(bdesc->blksz);
1839 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1840 #if !defined(CONFIG_SPL_BUILD) || \
1841 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
1842 		!defined(CONFIG_USE_TINY_PRINTF))
1843 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
1844 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
1845 		(mmc->cid[3] >> 16) & 0xffff);
1846 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
1847 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
1848 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
1849 		(mmc->cid[2] >> 24) & 0xff);
1850 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
1851 		(mmc->cid[2] >> 16) & 0xf);
1852 #else
1853 	bdesc->vendor[0] = 0;
1854 	bdesc->product[0] = 0;
1855 	bdesc->revision[0] = 0;
1856 #endif
1857 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
1858 	part_init(bdesc);
1859 #endif
1860 
1861 	return 0;
1862 }
1863 
1864 static int mmc_send_if_cond(struct mmc *mmc)
1865 {
1866 	struct mmc_cmd cmd;
1867 	int err;
1868 
1869 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
1870 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
1871 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
1872 	cmd.resp_type = MMC_RSP_R7;
1873 
1874 	err = mmc_send_cmd(mmc, &cmd, NULL);
1875 
1876 	if (err)
1877 		return err;
1878 
1879 	if ((cmd.response[0] & 0xff) != 0xaa)
1880 		return -EOPNOTSUPP;
1881 	else
1882 		mmc->version = SD_VERSION_2;
1883 
1884 	return 0;
1885 }
1886 
1887 #if !CONFIG_IS_ENABLED(DM_MMC)
1888 /* board-specific MMC power initializations. */
1889 __weak void board_mmc_power_init(void)
1890 {
1891 }
1892 #endif
1893 
1894 static int mmc_power_init(struct mmc *mmc)
1895 {
1896 #if CONFIG_IS_ENABLED(DM_MMC)
1897 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD)
1898 	struct udevice *vmmc_supply;
1899 	int ret;
1900 
1901 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
1902 					  &vmmc_supply);
1903 	if (ret) {
1904 		debug("%s: No vmmc supply\n", mmc->dev->name);
1905 		return 0;
1906 	}
1907 
1908 	ret = regulator_set_enable(vmmc_supply, true);
1909 	if (ret) {
1910 		puts("Error enabling VMMC supply\n");
1911 		return ret;
1912 	}
1913 #endif
1914 #else /* !CONFIG_DM_MMC */
1915 	/*
1916 	 * Driver model should use a regulator, as above, rather than calling
1917 	 * out to board code.
1918 	 */
1919 	board_mmc_power_init();
1920 #endif
1921 	return 0;
1922 }
1923 
1924 int mmc_start_init(struct mmc *mmc)
1925 {
1926 	bool no_card;
1927 	int err;
1928 
1929 	/* we pretend there's no card when init is NULL */
1930 	no_card = mmc_getcd(mmc) == 0;
1931 #if !CONFIG_IS_ENABLED(DM_MMC)
1932 	no_card = no_card || (mmc->cfg->ops->init == NULL);
1933 #endif
1934 	if (no_card) {
1935 		mmc->has_init = 0;
1936 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1937 		printf("MMC: no card present\n");
1938 #endif
1939 		return -ENOMEDIUM;
1940 	}
1941 
1942 	if (mmc->has_init)
1943 		return 0;
1944 
1945 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
1946 	mmc_adapter_card_type_ident();
1947 #endif
1948 	err = mmc_power_init(mmc);
1949 	if (err)
1950 		return err;
1951 
1952 #if CONFIG_IS_ENABLED(DM_MMC)
1953 	/* The device has already been probed ready for use */
1954 #else
1955 	/* made sure it's not NULL earlier */
1956 	err = mmc->cfg->ops->init(mmc);
1957 	if (err)
1958 		return err;
1959 #endif
1960 	mmc_set_bus_width(mmc, 1);
1961 	mmc_set_clock(mmc, 1);
1962 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
1963 
1964 	/* Reset the Card */
1965 	err = mmc_go_idle(mmc);
1966 
1967 	if (err)
1968 		return err;
1969 
1970 	/* The internal partition reset to user partition(0) at every CMD0*/
1971 	mmc_get_blk_desc(mmc)->hwpart = 0;
1972 
1973 	/* Test for SD version 2 */
1974 	err = mmc_send_if_cond(mmc);
1975 
1976 	/* Now try to get the SD card's operating condition */
1977 	err = sd_send_op_cond(mmc);
1978 
1979 	/* If the command timed out, we check for an MMC card */
1980 	if (err == -ETIMEDOUT) {
1981 		err = mmc_send_op_cond(mmc);
1982 
1983 		if (err) {
1984 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1985 			printf("Card did not respond to voltage select!\n");
1986 #endif
1987 			return -EOPNOTSUPP;
1988 		}
1989 	}
1990 
1991 	if (!err)
1992 		mmc->init_in_progress = 1;
1993 
1994 	return err;
1995 }
1996 
1997 static int mmc_complete_init(struct mmc *mmc)
1998 {
1999 	int err = 0;
2000 
2001 	mmc->init_in_progress = 0;
2002 	if (mmc->op_cond_pending)
2003 		err = mmc_complete_op_cond(mmc);
2004 
2005 	if (!err)
2006 		err = mmc_startup(mmc);
2007 	if (err)
2008 		mmc->has_init = 0;
2009 	else
2010 		mmc->has_init = 1;
2011 	return err;
2012 }
2013 
2014 int mmc_init(struct mmc *mmc)
2015 {
2016 	int err = 0;
2017 	__maybe_unused unsigned start;
2018 #if CONFIG_IS_ENABLED(DM_MMC)
2019 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2020 
2021 	upriv->mmc = mmc;
2022 #endif
2023 	if (mmc->has_init)
2024 		return 0;
2025 
2026 	start = get_timer(0);
2027 
2028 	if (!mmc->init_in_progress)
2029 		err = mmc_start_init(mmc);
2030 
2031 	if (!err)
2032 		err = mmc_complete_init(mmc);
2033 	if (err)
2034 		printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2035 
2036 	return err;
2037 }
2038 
2039 int mmc_set_dsr(struct mmc *mmc, u16 val)
2040 {
2041 	mmc->dsr = val;
2042 	return 0;
2043 }
2044 
2045 /* CPU-specific MMC initializations */
2046 __weak int cpu_mmc_init(bd_t *bis)
2047 {
2048 	return -1;
2049 }
2050 
2051 /* board-specific MMC initializations. */
2052 __weak int board_mmc_init(bd_t *bis)
2053 {
2054 	return -1;
2055 }
2056 
2057 void mmc_set_preinit(struct mmc *mmc, int preinit)
2058 {
2059 	mmc->preinit = preinit;
2060 }
2061 
2062 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2063 static int mmc_probe(bd_t *bis)
2064 {
2065 	return 0;
2066 }
2067 #elif CONFIG_IS_ENABLED(DM_MMC)
2068 static int mmc_probe(bd_t *bis)
2069 {
2070 	int ret, i;
2071 	struct uclass *uc;
2072 	struct udevice *dev;
2073 
2074 	ret = uclass_get(UCLASS_MMC, &uc);
2075 	if (ret)
2076 		return ret;
2077 
2078 	/*
2079 	 * Try to add them in sequence order. Really with driver model we
2080 	 * should allow holes, but the current MMC list does not allow that.
2081 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2082 	 */
2083 	for (i = 0; ; i++) {
2084 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2085 		if (ret == -ENODEV)
2086 			break;
2087 	}
2088 	uclass_foreach_dev(dev, uc) {
2089 		ret = device_probe(dev);
2090 		if (ret)
2091 			printf("%s - probe failed: %d\n", dev->name, ret);
2092 	}
2093 
2094 	return 0;
2095 }
2096 #else
2097 static int mmc_probe(bd_t *bis)
2098 {
2099 	if (board_mmc_init(bis) < 0)
2100 		cpu_mmc_init(bis);
2101 
2102 	return 0;
2103 }
2104 #endif
2105 
2106 int mmc_initialize(bd_t *bis)
2107 {
2108 	static int initialized = 0;
2109 	int ret;
2110 	if (initialized)	/* Avoid initializing mmc multiple times */
2111 		return 0;
2112 	initialized = 1;
2113 
2114 #if !CONFIG_IS_ENABLED(BLK)
2115 #if !CONFIG_IS_ENABLED(MMC_TINY)
2116 	mmc_list_init();
2117 #endif
2118 #endif
2119 	ret = mmc_probe(bis);
2120 	if (ret)
2121 		return ret;
2122 
2123 #ifndef CONFIG_SPL_BUILD
2124 	print_mmc_devices(',');
2125 #endif
2126 
2127 	mmc_do_preinit();
2128 	return 0;
2129 }
2130 
2131 #ifdef CONFIG_CMD_BKOPS_ENABLE
2132 int mmc_set_bkops_enable(struct mmc *mmc)
2133 {
2134 	int err;
2135 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2136 
2137 	err = mmc_send_ext_csd(mmc, ext_csd);
2138 	if (err) {
2139 		puts("Could not get ext_csd register values\n");
2140 		return err;
2141 	}
2142 
2143 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2144 		puts("Background operations not supported on device\n");
2145 		return -EMEDIUMTYPE;
2146 	}
2147 
2148 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2149 		puts("Background operations already enabled\n");
2150 		return 0;
2151 	}
2152 
2153 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2154 	if (err) {
2155 		puts("Failed to enable manual background operations\n");
2156 		return err;
2157 	}
2158 
2159 	puts("Enabled manual background operations\n");
2160 
2161 	return 0;
2162 }
2163 #endif
2164