xref: /rk3399_rockchip-uboot/drivers/mmc/mmc.c (revision 2c6a058b7ea25398013cb25b4e3bb96fe40da1a5)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static const unsigned int sd_au_size[] = {
26 	0,		SZ_16K / 512,		SZ_32K / 512,
27 	SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
28 	SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
29 	SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
30 	SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,	SZ_64M / 512,
31 };
32 
33 #if CONFIG_IS_ENABLED(MMC_TINY)
34 static struct mmc mmc_static;
35 struct mmc *find_mmc_device(int dev_num)
36 {
37 	return &mmc_static;
38 }
39 
40 void mmc_do_preinit(void)
41 {
42 	struct mmc *m = &mmc_static;
43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
44 	mmc_set_preinit(m, 1);
45 #endif
46 	if (m->preinit)
47 		mmc_start_init(m);
48 }
49 
50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
51 {
52 	return &mmc->block_dev;
53 }
54 #endif
55 
56 #if !CONFIG_IS_ENABLED(DM_MMC)
57 __weak int board_mmc_getwp(struct mmc *mmc)
58 {
59 	return -1;
60 }
61 
62 int mmc_getwp(struct mmc *mmc)
63 {
64 	int wp;
65 
66 	wp = board_mmc_getwp(mmc);
67 
68 	if (wp < 0) {
69 		if (mmc->cfg->ops->getwp)
70 			wp = mmc->cfg->ops->getwp(mmc);
71 		else
72 			wp = 0;
73 	}
74 
75 	return wp;
76 }
77 
78 __weak int board_mmc_getcd(struct mmc *mmc)
79 {
80 	return -1;
81 }
82 #endif
83 
84 #ifdef CONFIG_MMC_TRACE
85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
86 {
87 	printf("CMD_SEND:%d\n", cmd->cmdidx);
88 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
89 }
90 
91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
92 {
93 	int i;
94 	u8 *ptr;
95 
96 	if (ret) {
97 		printf("\t\tRET\t\t\t %d\n", ret);
98 	} else {
99 		switch (cmd->resp_type) {
100 		case MMC_RSP_NONE:
101 			printf("\t\tMMC_RSP_NONE\n");
102 			break;
103 		case MMC_RSP_R1:
104 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
105 				cmd->response[0]);
106 			break;
107 		case MMC_RSP_R1b:
108 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
109 				cmd->response[0]);
110 			break;
111 		case MMC_RSP_R2:
112 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
113 				cmd->response[0]);
114 			printf("\t\t          \t\t 0x%08X \n",
115 				cmd->response[1]);
116 			printf("\t\t          \t\t 0x%08X \n",
117 				cmd->response[2]);
118 			printf("\t\t          \t\t 0x%08X \n",
119 				cmd->response[3]);
120 			printf("\n");
121 			printf("\t\t\t\t\tDUMPING DATA\n");
122 			for (i = 0; i < 4; i++) {
123 				int j;
124 				printf("\t\t\t\t\t%03d - ", i*4);
125 				ptr = (u8 *)&cmd->response[i];
126 				ptr += 3;
127 				for (j = 0; j < 4; j++)
128 					printf("%02X ", *ptr--);
129 				printf("\n");
130 			}
131 			break;
132 		case MMC_RSP_R3:
133 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
134 				cmd->response[0]);
135 			break;
136 		default:
137 			printf("\t\tERROR MMC rsp not supported\n");
138 			break;
139 		}
140 	}
141 }
142 
143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
144 {
145 	int status;
146 
147 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
148 	printf("CURR STATE:%d\n", status);
149 }
150 #endif
151 
152 #if !CONFIG_IS_ENABLED(DM_MMC)
153 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
154 {
155 	int ret;
156 
157 	mmmc_trace_before_send(mmc, cmd);
158 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
159 	mmmc_trace_after_send(mmc, cmd, ret);
160 
161 	return ret;
162 }
163 #endif
164 
165 int mmc_send_status(struct mmc *mmc, int timeout)
166 {
167 	struct mmc_cmd cmd;
168 	int err, retries = 5;
169 
170 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
171 	cmd.resp_type = MMC_RSP_R1;
172 	if (!mmc_host_is_spi(mmc))
173 		cmd.cmdarg = mmc->rca << 16;
174 
175 	while (1) {
176 		err = mmc_send_cmd(mmc, &cmd, NULL);
177 		if (!err) {
178 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
179 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
180 			     MMC_STATE_PRG)
181 				break;
182 			else if (cmd.response[0] & MMC_STATUS_MASK) {
183 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
184 				printf("Status Error: 0x%08X\n",
185 					cmd.response[0]);
186 #endif
187 				return -ECOMM;
188 			}
189 		} else if (--retries < 0)
190 			return err;
191 
192 		if (timeout-- <= 0)
193 			break;
194 
195 		udelay(1000);
196 	}
197 
198 	mmc_trace_state(mmc, &cmd);
199 	if (timeout <= 0) {
200 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
201 		printf("Timeout waiting card ready\n");
202 #endif
203 		return -ETIMEDOUT;
204 	}
205 
206 	return 0;
207 }
208 
209 int mmc_set_blocklen(struct mmc *mmc, int len)
210 {
211 	struct mmc_cmd cmd;
212 
213 	if (mmc_card_ddr(mmc))
214 		return 0;
215 
216 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
217 	cmd.resp_type = MMC_RSP_R1;
218 	cmd.cmdarg = len;
219 
220 	return mmc_send_cmd(mmc, &cmd, NULL);
221 }
222 
223 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
224 			   lbaint_t blkcnt)
225 {
226 	struct mmc_cmd cmd;
227 	struct mmc_data data;
228 
229 	if (blkcnt > 1)
230 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
231 	else
232 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
233 
234 	if (mmc->high_capacity)
235 		cmd.cmdarg = start;
236 	else
237 		cmd.cmdarg = start * mmc->read_bl_len;
238 
239 	cmd.resp_type = MMC_RSP_R1;
240 
241 	data.dest = dst;
242 	data.blocks = blkcnt;
243 	data.blocksize = mmc->read_bl_len;
244 	data.flags = MMC_DATA_READ;
245 
246 	if (mmc_send_cmd(mmc, &cmd, &data))
247 		return 0;
248 
249 	if (blkcnt > 1) {
250 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
251 		cmd.cmdarg = 0;
252 		cmd.resp_type = MMC_RSP_R1b;
253 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
254 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
255 			printf("mmc fail to send stop cmd\n");
256 #endif
257 			return 0;
258 		}
259 	}
260 
261 	return blkcnt;
262 }
263 
264 #if CONFIG_IS_ENABLED(BLK)
265 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
266 #else
267 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
268 		void *dst)
269 #endif
270 {
271 #if CONFIG_IS_ENABLED(BLK)
272 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
273 #endif
274 	int dev_num = block_dev->devnum;
275 	int err;
276 	lbaint_t cur, blocks_todo = blkcnt;
277 
278 	if (blkcnt == 0)
279 		return 0;
280 
281 	struct mmc *mmc = find_mmc_device(dev_num);
282 	if (!mmc)
283 		return 0;
284 
285 	if (CONFIG_IS_ENABLED(MMC_TINY))
286 		err = mmc_switch_part(mmc, block_dev->hwpart);
287 	else
288 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
289 
290 	if (err < 0)
291 		return 0;
292 
293 	if ((start + blkcnt) > block_dev->lba) {
294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
295 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
296 			start + blkcnt, block_dev->lba);
297 #endif
298 		return 0;
299 	}
300 
301 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
302 		debug("%s: Failed to set blocklen\n", __func__);
303 		return 0;
304 	}
305 
306 	do {
307 		cur = (blocks_todo > mmc->cfg->b_max) ?
308 			mmc->cfg->b_max : blocks_todo;
309 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
310 			debug("%s: Failed to read blocks\n", __func__);
311 			int timeout = 0;
312 re_init_retry:
313 			timeout++;
314 			/*
315 			 * Try re-init seven times.
316 			 */
317 			if (timeout > 7) {
318 				printf("Re-init retry timeout\n");
319 				return 0;
320 			}
321 
322 			mmc->has_init = 0;
323 			if (mmc_init(mmc))
324 				return 0;
325 
326 			if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
327 				printf("%s: Re-init mmc_read_blocks error\n",
328 				       __func__);
329 				goto re_init_retry;
330 			}
331 		}
332 		blocks_todo -= cur;
333 		start += cur;
334 		dst += cur * mmc->read_bl_len;
335 	} while (blocks_todo > 0);
336 
337 	return blkcnt;
338 }
339 
340 void mmc_set_clock(struct mmc *mmc, uint clock)
341 {
342 	if (clock > mmc->cfg->f_max)
343 		clock = mmc->cfg->f_max;
344 
345 	if (clock < mmc->cfg->f_min)
346 		clock = mmc->cfg->f_min;
347 
348 	mmc->clock = clock;
349 
350 	mmc_set_ios(mmc);
351 }
352 
353 static void mmc_set_bus_width(struct mmc *mmc, uint width)
354 {
355 	mmc->bus_width = width;
356 
357 	mmc_set_ios(mmc);
358 }
359 
360 static void mmc_set_timing(struct mmc *mmc, uint timing)
361 {
362 	mmc->timing = timing;
363 	mmc_set_ios(mmc);
364 }
365 
366 static int mmc_go_idle(struct mmc *mmc)
367 {
368 	struct mmc_cmd cmd;
369 	int err;
370 
371 	udelay(1000);
372 
373 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
374 	cmd.cmdarg = 0;
375 	cmd.resp_type = MMC_RSP_NONE;
376 
377 	err = mmc_send_cmd(mmc, &cmd, NULL);
378 
379 	if (err)
380 		return err;
381 
382 	udelay(2000);
383 
384 	return 0;
385 }
386 
387 static int sd_send_op_cond(struct mmc *mmc)
388 {
389 	int timeout = 1000;
390 	int err;
391 	struct mmc_cmd cmd;
392 
393 	while (1) {
394 		cmd.cmdidx = MMC_CMD_APP_CMD;
395 		cmd.resp_type = MMC_RSP_R1;
396 		cmd.cmdarg = 0;
397 
398 		err = mmc_send_cmd(mmc, &cmd, NULL);
399 
400 		if (err)
401 			return err;
402 
403 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
404 		cmd.resp_type = MMC_RSP_R3;
405 
406 		/*
407 		 * Most cards do not answer if some reserved bits
408 		 * in the ocr are set. However, Some controller
409 		 * can set bit 7 (reserved for low voltages), but
410 		 * how to manage low voltages SD card is not yet
411 		 * specified.
412 		 */
413 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
414 			(mmc->cfg->voltages & 0xff8000);
415 
416 		if (mmc->version == SD_VERSION_2)
417 			cmd.cmdarg |= OCR_HCS;
418 
419 		err = mmc_send_cmd(mmc, &cmd, NULL);
420 
421 		if (err)
422 			return err;
423 
424 		if (cmd.response[0] & OCR_BUSY)
425 			break;
426 
427 		if (timeout-- <= 0)
428 			return -EOPNOTSUPP;
429 
430 		udelay(1000);
431 	}
432 
433 	if (mmc->version != SD_VERSION_2)
434 		mmc->version = SD_VERSION_1_0;
435 
436 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
437 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
438 		cmd.resp_type = MMC_RSP_R3;
439 		cmd.cmdarg = 0;
440 
441 		err = mmc_send_cmd(mmc, &cmd, NULL);
442 
443 		if (err)
444 			return err;
445 	}
446 
447 	mmc->ocr = cmd.response[0];
448 
449 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
450 	mmc->rca = 0;
451 
452 	return 0;
453 }
454 
455 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
456 {
457 	struct mmc_cmd cmd;
458 	int err;
459 
460 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
461 	cmd.resp_type = MMC_RSP_R3;
462 	cmd.cmdarg = 0;
463 	if (use_arg && !mmc_host_is_spi(mmc))
464 		cmd.cmdarg = OCR_HCS |
465 			(mmc->cfg->voltages &
466 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
467 			(mmc->ocr & OCR_ACCESS_MODE);
468 
469 	err = mmc_send_cmd(mmc, &cmd, NULL);
470 	if (err)
471 		return err;
472 	mmc->ocr = cmd.response[0];
473 	return 0;
474 }
475 
476 static int mmc_send_op_cond(struct mmc *mmc)
477 {
478 	int err, i;
479 
480 	/* Some cards seem to need this */
481 	mmc_go_idle(mmc);
482 
483  	/* Asking to the card its capabilities */
484 	for (i = 0; i < 2; i++) {
485 		err = mmc_send_op_cond_iter(mmc, i != 0);
486 		if (err)
487 			return err;
488 
489 		/* exit if not busy (flag seems to be inverted) */
490 		if (mmc->ocr & OCR_BUSY)
491 			break;
492 	}
493 	mmc->op_cond_pending = 1;
494 	return 0;
495 }
496 
497 static int mmc_complete_op_cond(struct mmc *mmc)
498 {
499 	struct mmc_cmd cmd;
500 	int timeout = 1000;
501 	uint start;
502 	int err;
503 
504 	mmc->op_cond_pending = 0;
505 	if (!(mmc->ocr & OCR_BUSY)) {
506 		/* Some cards seem to need this */
507 		mmc_go_idle(mmc);
508 
509 		start = get_timer(0);
510 		while (1) {
511 			err = mmc_send_op_cond_iter(mmc, 1);
512 			if (err)
513 				return err;
514 			if (mmc->ocr & OCR_BUSY)
515 				break;
516 			if (get_timer(start) > timeout)
517 				return -EOPNOTSUPP;
518 			udelay(100);
519 		}
520 	}
521 
522 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
523 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
524 		cmd.resp_type = MMC_RSP_R3;
525 		cmd.cmdarg = 0;
526 
527 		err = mmc_send_cmd(mmc, &cmd, NULL);
528 
529 		if (err)
530 			return err;
531 
532 		mmc->ocr = cmd.response[0];
533 	}
534 
535 	mmc->version = MMC_VERSION_UNKNOWN;
536 
537 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
538 	mmc->rca = 1;
539 
540 	return 0;
541 }
542 
543 
544 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
545 {
546 	struct mmc_cmd cmd;
547 	struct mmc_data data;
548 	int err;
549 
550 	/* Get the Card Status Register */
551 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
552 	cmd.resp_type = MMC_RSP_R1;
553 	cmd.cmdarg = 0;
554 
555 	data.dest = (char *)ext_csd;
556 	data.blocks = 1;
557 	data.blocksize = MMC_MAX_BLOCK_LEN;
558 	data.flags = MMC_DATA_READ;
559 
560 	err = mmc_send_cmd(mmc, &cmd, &data);
561 
562 	return err;
563 }
564 
565 static int mmc_poll_for_busy(struct mmc *mmc)
566 {
567 	struct mmc_cmd cmd;
568 	u8 busy = true;
569 	uint start;
570 	int ret;
571 	int timeout = 1000;
572 
573 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
574 	cmd.resp_type = MMC_RSP_R1;
575 	cmd.cmdarg = mmc->rca << 16;
576 
577 	start = get_timer(0);
578 
579 	do {
580 		if (mmc_can_card_busy(mmc)) {
581 			busy = mmc_card_busy(mmc);
582 		} else {
583 			ret = mmc_send_cmd(mmc, &cmd, NULL);
584 
585 			if (ret)
586 				return ret;
587 
588 			if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
589 				return -EBADMSG;
590 			busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) ==
591 				MMC_STATE_PRG;
592 		}
593 
594 		if (get_timer(start) > timeout && busy)
595 			return -ETIMEDOUT;
596 	} while (busy);
597 
598 	return 0;
599 }
600 
601 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
602 			u8 send_status)
603 {
604 	struct mmc_cmd cmd;
605 	int retries = 3;
606 	int ret;
607 
608 	cmd.cmdidx = MMC_CMD_SWITCH;
609 	cmd.resp_type = MMC_RSP_R1b;
610 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
611 				 (index << 16) |
612 				 (value << 8);
613 
614 	do {
615 		ret = mmc_send_cmd(mmc, &cmd, NULL);
616 
617 		if (!ret && send_status)
618 			return mmc_poll_for_busy(mmc);
619 	} while (--retries > 0 && ret);
620 
621 	return ret;
622 }
623 
624 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
625 {
626 	return __mmc_switch(mmc, set, index, value, true);
627 }
628 
629 static int mmc_select_bus_width(struct mmc *mmc)
630 {
631 	u32 ext_csd_bits[] = {
632 		EXT_CSD_BUS_WIDTH_8,
633 		EXT_CSD_BUS_WIDTH_4,
634 	};
635 	u32 bus_widths[] = {
636 		MMC_BUS_WIDTH_8BIT,
637 		MMC_BUS_WIDTH_4BIT,
638 	};
639 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
640 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
641 	u32 idx, bus_width = 0;
642 	int err = 0;
643 
644 	if (mmc->version < MMC_VERSION_4 ||
645 	    !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT)))
646 		return 0;
647 
648 	err = mmc_send_ext_csd(mmc, ext_csd);
649 
650 	if (err)
651 		return err;
652 
653 	idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1;
654 
655 	/*
656 	 * Unlike SD, MMC cards dont have a configuration register to notify
657 	 * supported bus width. So bus test command should be run to identify
658 	 * the supported bus width or compare the ext csd values of current
659 	 * bus width and ext csd values of 1 bit mode read earlier.
660 	 */
661 	for (; idx < ARRAY_SIZE(bus_widths); idx++) {
662 		/*
663 		 * Host is capable of 8bit transfer, then switch
664 		 * the device to work in 8bit transfer mode. If the
665 		 * mmc switch command returns error then switch to
666 		 * 4bit transfer mode. On success set the corresponding
667 		 * bus width on the host.
668 		 */
669 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
670 				 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]);
671 		if (err)
672 			continue;
673 
674 		bus_width = bus_widths[idx];
675 		mmc_set_bus_width(mmc, bus_width);
676 
677 		err = mmc_send_ext_csd(mmc, test_csd);
678 
679 		if (err)
680 			continue;
681 
682 		/* Only compare read only fields */
683 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] ==
684 			test_csd[EXT_CSD_PARTITIONING_SUPPORT]) &&
685 		    (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
686 			test_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
687 		    (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) &&
688 			(ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
689 			test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
690 		    !memcmp(&ext_csd[EXT_CSD_SEC_CNT],
691 			&test_csd[EXT_CSD_SEC_CNT], 4)) {
692 			err = bus_width;
693 			break;
694 		} else {
695 			err = -EBADMSG;
696 		}
697 	}
698 
699 	return err;
700 }
701 
702 static const u8 tuning_blk_pattern_4bit[] = {
703 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
704 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
705 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
706 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
707 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
708 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
709 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
710 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
711 };
712 
713 static const u8 tuning_blk_pattern_8bit[] = {
714 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
715 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
716 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
717 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
718 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
719 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
720 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
721 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
722 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
723 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
724 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
725 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
726 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
727 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
728 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
729 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
730 };
731 
732 int mmc_send_tuning(struct mmc *mmc, u32 opcode)
733 {
734 	struct mmc_cmd cmd;
735 	struct mmc_data data;
736 	const u8 *tuning_block_pattern;
737 	int size, err = 0;
738 	u8 *data_buf;
739 
740 	if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
741 		tuning_block_pattern = tuning_blk_pattern_8bit;
742 		size = sizeof(tuning_blk_pattern_8bit);
743 	} else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) {
744 		tuning_block_pattern = tuning_blk_pattern_4bit;
745 		size = sizeof(tuning_blk_pattern_4bit);
746 	} else {
747 		return -EINVAL;
748 	}
749 
750 	data_buf = calloc(1, size);
751 	if (!data_buf)
752 		return -ENOMEM;
753 
754 	cmd.cmdidx = opcode;
755 	cmd.resp_type = MMC_RSP_R1;
756 	cmd.cmdarg = 0;
757 
758 	data.dest = (char *)data_buf;
759 	data.blocksize = size;
760 	data.blocks = 1;
761 	data.flags = MMC_DATA_READ;
762 
763 	err = mmc_send_cmd(mmc, &cmd, &data);
764 	if (err)
765 		goto out;
766 
767 	if (memcmp(data_buf, tuning_block_pattern, size))
768 		err = -EIO;
769 out:
770 	free(data_buf);
771 	return err;
772 }
773 
774 static int mmc_execute_tuning(struct mmc *mmc)
775 {
776 #ifdef CONFIG_DM_MMC
777 	struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev);
778 #endif
779 	u32 opcode;
780 
781 	if (IS_SD(mmc))
782 		opcode = MMC_SEND_TUNING_BLOCK;
783 	else
784 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
785 
786 #ifndef CONFIG_DM_MMC
787 	if (mmc->cfg->ops->execute_tuning) {
788 		return mmc->cfg->ops->execute_tuning(mmc, opcode);
789 #else
790 	if (ops->execute_tuning) {
791 		return ops->execute_tuning(mmc->dev, opcode);
792 #endif
793 	} else {
794 		debug("Tuning feature required for HS200 mode.\n");
795 		return -EIO;
796 	}
797 }
798 
799 static int mmc_hs200_tuning(struct mmc *mmc)
800 {
801 	return mmc_execute_tuning(mmc);
802 }
803 
804 static int mmc_select_hs(struct mmc *mmc)
805 {
806 	int ret;
807 
808 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
809 			 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS);
810 
811 	if (!ret)
812 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
813 
814 	return ret;
815 }
816 
817 static int mmc_select_hs_ddr(struct mmc *mmc)
818 {
819 	u32 ext_csd_bits;
820 	int err = 0;
821 
822 	if (mmc->bus_width == MMC_BUS_WIDTH_1BIT)
823 		return 0;
824 
825 	ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ?
826 			EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
827 
828 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
829 			 EXT_CSD_BUS_WIDTH, ext_csd_bits);
830 	if (err)
831 		return err;
832 
833 	mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52);
834 
835 	return 0;
836 }
837 
838 #ifndef CONFIG_SPL_BUILD
839 static int mmc_select_hs200(struct mmc *mmc)
840 {
841 	int ret;
842 	struct mmc_cmd cmd;
843 
844 	/*
845 	 * Set the bus width(4 or 8) with host's support and
846 	 * switch to HS200 mode if bus width is set successfully.
847 	 */
848 	ret = mmc_select_bus_width(mmc);
849 
850 	if (ret > 0) {
851 		ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
852 				   EXT_CSD_HS_TIMING,
853 				   EXT_CSD_TIMING_HS200, false);
854 
855 		if (ret)
856 			return ret;
857 
858 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS200);
859 
860 		cmd.cmdidx = MMC_CMD_SEND_STATUS;
861 		cmd.resp_type = MMC_RSP_R1;
862 		cmd.cmdarg = mmc->rca << 16;
863 
864 		ret = mmc_send_cmd(mmc, &cmd, NULL);
865 
866 		if (ret)
867 			return ret;
868 
869 		if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
870 			return -EBADMSG;
871 	}
872 
873 	return ret;
874 }
875 #endif
876 
877 static int mmc_select_hs400(struct mmc *mmc)
878 {
879 	int ret;
880 
881 	/* Switch card to HS mode */
882 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
883 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false);
884 	if (ret)
885 		return ret;
886 
887 	/* Set host controller to HS timing */
888 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
889 
890 	/* Reduce frequency to HS frequency */
891 	mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
892 
893 	ret = mmc_send_status(mmc, 1000);
894 	if (ret)
895 		return ret;
896 
897 	/* Switch card to DDR */
898 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
899 			 EXT_CSD_BUS_WIDTH,
900 			 EXT_CSD_DDR_BUS_WIDTH_8);
901 	if (ret)
902 		return ret;
903 
904 	/* Switch card to HS400 */
905 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
906 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false);
907 	if (ret)
908 		return ret;
909 
910 	/* Set host controller to HS400 timing and frequency */
911 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS400);
912 
913 	return ret;
914 }
915 
916 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd)
917 {
918 	u8 card_type;
919 	u32 host_caps, avail_type = 0;
920 
921 	card_type = ext_csd[EXT_CSD_CARD_TYPE];
922 	host_caps = mmc->cfg->host_caps;
923 
924 	if ((host_caps & MMC_MODE_HS) &&
925 	    (card_type & EXT_CSD_CARD_TYPE_26))
926 		avail_type |= EXT_CSD_CARD_TYPE_26;
927 
928 	if ((host_caps & MMC_MODE_HS) &&
929 	    (card_type & EXT_CSD_CARD_TYPE_52))
930 		avail_type |= EXT_CSD_CARD_TYPE_52;
931 
932 	/*
933 	 * For the moment, u-boot doesn't support signal voltage
934 	 * switch, therefor we assume that host support ddr52
935 	 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and
936 	 * hs400 are the same).
937 	 */
938 	if ((host_caps & MMC_MODE_DDR_52MHz) &&
939 	    (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
940 		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
941 
942 	if ((host_caps & MMC_MODE_HS200) &&
943 	    (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V))
944 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
945 
946 	/*
947 	 * If host can support HS400, it means that host can also
948 	 * support HS200.
949 	 */
950 	if ((host_caps & MMC_MODE_HS400) &&
951 	    (host_caps & MMC_MODE_8BIT) &&
952 	    (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
953 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
954 				EXT_CSD_CARD_TYPE_HS400_1_8V;
955 
956 	if ((host_caps & MMC_MODE_HS400ES) &&
957 	    (host_caps & MMC_MODE_8BIT) &&
958 	    ext_csd[EXT_CSD_STROBE_SUPPORT] &&
959 	    (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
960 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
961 				EXT_CSD_CARD_TYPE_HS400_1_8V |
962 				EXT_CSD_CARD_TYPE_HS400ES;
963 
964 	return avail_type;
965 }
966 
967 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type)
968 {
969 	int clock = 0;
970 
971 	if (mmc_card_hs(mmc))
972 		clock = (avail_type & EXT_CSD_CARD_TYPE_52) ?
973 			MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR;
974 	else if (mmc_card_hs200(mmc) ||
975 		 mmc_card_hs400(mmc) ||
976 		 mmc_card_hs400es(mmc))
977 		clock = MMC_HS200_MAX_DTR;
978 
979 	mmc_set_clock(mmc, clock);
980 }
981 
982 static int mmc_change_freq(struct mmc *mmc)
983 {
984 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
985 	u32 avail_type;
986 	int err;
987 
988 	mmc->card_caps = 0;
989 
990 	if (mmc_host_is_spi(mmc))
991 		return 0;
992 
993 	/* Only version 4 supports high-speed */
994 	if (mmc->version < MMC_VERSION_4)
995 		return 0;
996 
997 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
998 
999 	err = mmc_send_ext_csd(mmc, ext_csd);
1000 
1001 	if (err)
1002 		return err;
1003 
1004 	avail_type = mmc_select_card_type(mmc, ext_csd);
1005 
1006 #ifndef CONFIG_SPL_BUILD
1007 	if (avail_type & EXT_CSD_CARD_TYPE_HS200)
1008 		err = mmc_select_hs200(mmc);
1009 	else
1010 #endif
1011 	if (avail_type & EXT_CSD_CARD_TYPE_HS)
1012 		err = mmc_select_hs(mmc);
1013 	else
1014 		err = -EINVAL;
1015 
1016 	if (err)
1017 		return err;
1018 
1019 	mmc_set_bus_speed(mmc, avail_type);
1020 
1021 	if (mmc_card_hs200(mmc)) {
1022 		err = mmc_hs200_tuning(mmc);
1023 		if (avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1024 		    mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
1025 			err = mmc_select_hs400(mmc);
1026 			mmc_set_bus_speed(mmc, avail_type);
1027 		}
1028 	} else if (!mmc_card_hs400es(mmc)) {
1029 		err = mmc_select_bus_width(mmc) > 0 ? 0 : err;
1030 		if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52)
1031 			err = mmc_select_hs_ddr(mmc);
1032 	}
1033 
1034 	return err;
1035 }
1036 
1037 static int mmc_set_capacity(struct mmc *mmc, int part_num)
1038 {
1039 	switch (part_num) {
1040 	case 0:
1041 		mmc->capacity = mmc->capacity_user;
1042 		break;
1043 	case 1:
1044 	case 2:
1045 		mmc->capacity = mmc->capacity_boot;
1046 		break;
1047 	case 3:
1048 		mmc->capacity = mmc->capacity_rpmb;
1049 		break;
1050 	case 4:
1051 	case 5:
1052 	case 6:
1053 	case 7:
1054 		mmc->capacity = mmc->capacity_gp[part_num - 4];
1055 		break;
1056 	default:
1057 		return -1;
1058 	}
1059 
1060 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1061 
1062 	return 0;
1063 }
1064 
1065 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1066 {
1067 	int ret;
1068 
1069 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
1070 			 (mmc->part_config & ~PART_ACCESS_MASK)
1071 			 | (part_num & PART_ACCESS_MASK));
1072 
1073 	/*
1074 	 * Set the capacity if the switch succeeded or was intended
1075 	 * to return to representing the raw device.
1076 	 */
1077 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1078 		ret = mmc_set_capacity(mmc, part_num);
1079 		mmc_get_blk_desc(mmc)->hwpart = part_num;
1080 	}
1081 
1082 	return ret;
1083 }
1084 
1085 int mmc_hwpart_config(struct mmc *mmc,
1086 		      const struct mmc_hwpart_conf *conf,
1087 		      enum mmc_hwpart_conf_mode mode)
1088 {
1089 	u8 part_attrs = 0;
1090 	u32 enh_size_mult;
1091 	u32 enh_start_addr;
1092 	u32 gp_size_mult[4];
1093 	u32 max_enh_size_mult;
1094 	u32 tot_enh_size_mult = 0;
1095 	u8 wr_rel_set;
1096 	int i, pidx, err;
1097 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1098 
1099 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1100 		return -EINVAL;
1101 
1102 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1103 		printf("eMMC >= 4.4 required for enhanced user data area\n");
1104 		return -EMEDIUMTYPE;
1105 	}
1106 
1107 	if (!(mmc->part_support & PART_SUPPORT)) {
1108 		printf("Card does not support partitioning\n");
1109 		return -EMEDIUMTYPE;
1110 	}
1111 
1112 	if (!mmc->hc_wp_grp_size) {
1113 		printf("Card does not define HC WP group size\n");
1114 		return -EMEDIUMTYPE;
1115 	}
1116 
1117 	/* check partition alignment and total enhanced size */
1118 	if (conf->user.enh_size) {
1119 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1120 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
1121 			printf("User data enhanced area not HC WP group "
1122 			       "size aligned\n");
1123 			return -EINVAL;
1124 		}
1125 		part_attrs |= EXT_CSD_ENH_USR;
1126 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1127 		if (mmc->high_capacity) {
1128 			enh_start_addr = conf->user.enh_start;
1129 		} else {
1130 			enh_start_addr = (conf->user.enh_start << 9);
1131 		}
1132 	} else {
1133 		enh_size_mult = 0;
1134 		enh_start_addr = 0;
1135 	}
1136 	tot_enh_size_mult += enh_size_mult;
1137 
1138 	for (pidx = 0; pidx < 4; pidx++) {
1139 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1140 			printf("GP%i partition not HC WP group size "
1141 			       "aligned\n", pidx+1);
1142 			return -EINVAL;
1143 		}
1144 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1145 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1146 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1147 			tot_enh_size_mult += gp_size_mult[pidx];
1148 		}
1149 	}
1150 
1151 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1152 		printf("Card does not support enhanced attribute\n");
1153 		return -EMEDIUMTYPE;
1154 	}
1155 
1156 	err = mmc_send_ext_csd(mmc, ext_csd);
1157 	if (err)
1158 		return err;
1159 
1160 	max_enh_size_mult =
1161 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1162 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1163 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1164 	if (tot_enh_size_mult > max_enh_size_mult) {
1165 		printf("Total enhanced size exceeds maximum (%u > %u)\n",
1166 		       tot_enh_size_mult, max_enh_size_mult);
1167 		return -EMEDIUMTYPE;
1168 	}
1169 
1170 	/* The default value of EXT_CSD_WR_REL_SET is device
1171 	 * dependent, the values can only be changed if the
1172 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1173 	 * changed only once and before partitioning is completed. */
1174 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1175 	if (conf->user.wr_rel_change) {
1176 		if (conf->user.wr_rel_set)
1177 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1178 		else
1179 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1180 	}
1181 	for (pidx = 0; pidx < 4; pidx++) {
1182 		if (conf->gp_part[pidx].wr_rel_change) {
1183 			if (conf->gp_part[pidx].wr_rel_set)
1184 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1185 			else
1186 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1187 		}
1188 	}
1189 
1190 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1191 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1192 		puts("Card does not support host controlled partition write "
1193 		     "reliability settings\n");
1194 		return -EMEDIUMTYPE;
1195 	}
1196 
1197 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1198 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1199 		printf("Card already partitioned\n");
1200 		return -EPERM;
1201 	}
1202 
1203 	if (mode == MMC_HWPART_CONF_CHECK)
1204 		return 0;
1205 
1206 	/* Partitioning requires high-capacity size definitions */
1207 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1208 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1209 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1210 
1211 		if (err)
1212 			return err;
1213 
1214 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1215 
1216 		/* update erase group size to be high-capacity */
1217 		mmc->erase_grp_size =
1218 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1219 
1220 	}
1221 
1222 	/* all OK, write the configuration */
1223 	for (i = 0; i < 4; i++) {
1224 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1225 				 EXT_CSD_ENH_START_ADDR+i,
1226 				 (enh_start_addr >> (i*8)) & 0xFF);
1227 		if (err)
1228 			return err;
1229 	}
1230 	for (i = 0; i < 3; i++) {
1231 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1232 				 EXT_CSD_ENH_SIZE_MULT+i,
1233 				 (enh_size_mult >> (i*8)) & 0xFF);
1234 		if (err)
1235 			return err;
1236 	}
1237 	for (pidx = 0; pidx < 4; pidx++) {
1238 		for (i = 0; i < 3; i++) {
1239 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1240 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1241 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1242 			if (err)
1243 				return err;
1244 		}
1245 	}
1246 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1247 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1248 	if (err)
1249 		return err;
1250 
1251 	if (mode == MMC_HWPART_CONF_SET)
1252 		return 0;
1253 
1254 	/* The WR_REL_SET is a write-once register but shall be
1255 	 * written before setting PART_SETTING_COMPLETED. As it is
1256 	 * write-once we can only write it when completing the
1257 	 * partitioning. */
1258 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1259 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1260 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1261 		if (err)
1262 			return err;
1263 	}
1264 
1265 	/* Setting PART_SETTING_COMPLETED confirms the partition
1266 	 * configuration but it only becomes effective after power
1267 	 * cycle, so we do not adjust the partition related settings
1268 	 * in the mmc struct. */
1269 
1270 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1271 			 EXT_CSD_PARTITION_SETTING,
1272 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1273 	if (err)
1274 		return err;
1275 
1276 	return 0;
1277 }
1278 
1279 #if !CONFIG_IS_ENABLED(DM_MMC)
1280 int mmc_getcd(struct mmc *mmc)
1281 {
1282 	int cd;
1283 
1284 	cd = board_mmc_getcd(mmc);
1285 
1286 	if (cd < 0) {
1287 		if (mmc->cfg->ops->getcd)
1288 			cd = mmc->cfg->ops->getcd(mmc);
1289 		else
1290 			cd = 1;
1291 	}
1292 
1293 	return cd;
1294 }
1295 #endif
1296 
1297 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1298 {
1299 	struct mmc_cmd cmd;
1300 	struct mmc_data data;
1301 
1302 	/* Switch the frequency */
1303 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1304 	cmd.resp_type = MMC_RSP_R1;
1305 	cmd.cmdarg = (mode << 31) | 0xffffff;
1306 	cmd.cmdarg &= ~(0xf << (group * 4));
1307 	cmd.cmdarg |= value << (group * 4);
1308 
1309 	data.dest = (char *)resp;
1310 	data.blocksize = 64;
1311 	data.blocks = 1;
1312 	data.flags = MMC_DATA_READ;
1313 
1314 	return mmc_send_cmd(mmc, &cmd, &data);
1315 }
1316 
1317 
1318 static int sd_change_freq(struct mmc *mmc)
1319 {
1320 	int err;
1321 	struct mmc_cmd cmd;
1322 	ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2);
1323 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1324 	struct mmc_data data;
1325 	int timeout;
1326 
1327 	mmc->card_caps = 0;
1328 
1329 	if (mmc_host_is_spi(mmc))
1330 		return 0;
1331 
1332 	/* Read the SCR to find out if this card supports higher speeds */
1333 	cmd.cmdidx = MMC_CMD_APP_CMD;
1334 	cmd.resp_type = MMC_RSP_R1;
1335 	cmd.cmdarg = mmc->rca << 16;
1336 
1337 	err = mmc_send_cmd(mmc, &cmd, NULL);
1338 
1339 	if (err)
1340 		return err;
1341 
1342 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1343 	cmd.resp_type = MMC_RSP_R1;
1344 	cmd.cmdarg = 0;
1345 
1346 	timeout = 3;
1347 
1348 retry_scr:
1349 	data.dest = (char *)scr;
1350 	data.blocksize = 8;
1351 	data.blocks = 1;
1352 	data.flags = MMC_DATA_READ;
1353 
1354 	err = mmc_send_cmd(mmc, &cmd, &data);
1355 
1356 	if (err) {
1357 		if (timeout--)
1358 			goto retry_scr;
1359 
1360 		return err;
1361 	}
1362 
1363 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1364 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1365 
1366 	switch ((mmc->scr[0] >> 24) & 0xf) {
1367 	case 0:
1368 		mmc->version = SD_VERSION_1_0;
1369 		break;
1370 	case 1:
1371 		mmc->version = SD_VERSION_1_10;
1372 		break;
1373 	case 2:
1374 		mmc->version = SD_VERSION_2;
1375 		if ((mmc->scr[0] >> 15) & 0x1)
1376 			mmc->version = SD_VERSION_3;
1377 		break;
1378 	default:
1379 		mmc->version = SD_VERSION_1_0;
1380 		break;
1381 	}
1382 
1383 	if (mmc->scr[0] & SD_DATA_4BIT)
1384 		mmc->card_caps |= MMC_MODE_4BIT;
1385 
1386 	/* Version 1.0 doesn't support switching */
1387 	if (mmc->version == SD_VERSION_1_0)
1388 		return 0;
1389 
1390 	timeout = 4;
1391 	while (timeout--) {
1392 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1393 				(u8 *)switch_status);
1394 
1395 		if (err)
1396 			return err;
1397 
1398 		/* The high-speed function is busy.  Try again */
1399 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1400 			break;
1401 	}
1402 
1403 	/* If high-speed isn't supported, we return */
1404 	if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
1405 		return 0;
1406 
1407 	/*
1408 	 * If the host doesn't support SD_HIGHSPEED, do not switch card to
1409 	 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
1410 	 * This can avoid furthur problem when the card runs in different
1411 	 * mode between the host.
1412 	 */
1413 	if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
1414 		(mmc->cfg->host_caps & MMC_MODE_HS)))
1415 		return 0;
1416 
1417 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1418 
1419 	if (err)
1420 		return err;
1421 
1422 	if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
1423 		mmc->card_caps |= MMC_MODE_HS;
1424 
1425 	return 0;
1426 }
1427 
1428 static int sd_read_ssr(struct mmc *mmc)
1429 {
1430 	int err, i;
1431 	struct mmc_cmd cmd;
1432 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1433 	struct mmc_data data;
1434 	int timeout = 3;
1435 	unsigned int au, eo, et, es;
1436 
1437 	cmd.cmdidx = MMC_CMD_APP_CMD;
1438 	cmd.resp_type = MMC_RSP_R1;
1439 	cmd.cmdarg = mmc->rca << 16;
1440 
1441 	err = mmc_send_cmd(mmc, &cmd, NULL);
1442 	if (err)
1443 		return err;
1444 
1445 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1446 	cmd.resp_type = MMC_RSP_R1;
1447 	cmd.cmdarg = 0;
1448 
1449 retry_ssr:
1450 	data.dest = (char *)ssr;
1451 	data.blocksize = 64;
1452 	data.blocks = 1;
1453 	data.flags = MMC_DATA_READ;
1454 
1455 	err = mmc_send_cmd(mmc, &cmd, &data);
1456 	if (err) {
1457 		if (timeout--)
1458 			goto retry_ssr;
1459 
1460 		return err;
1461 	}
1462 
1463 	for (i = 0; i < 16; i++)
1464 		ssr[i] = be32_to_cpu(ssr[i]);
1465 
1466 	au = (ssr[2] >> 12) & 0xF;
1467 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1468 		mmc->ssr.au = sd_au_size[au];
1469 		es = (ssr[3] >> 24) & 0xFF;
1470 		es |= (ssr[2] & 0xFF) << 8;
1471 		et = (ssr[3] >> 18) & 0x3F;
1472 		if (es && et) {
1473 			eo = (ssr[3] >> 16) & 0x3;
1474 			mmc->ssr.erase_timeout = (et * 1000) / es;
1475 			mmc->ssr.erase_offset = eo * 1000;
1476 		}
1477 	} else {
1478 		debug("Invalid Allocation Unit Size.\n");
1479 	}
1480 
1481 	return 0;
1482 }
1483 
1484 /* frequency bases */
1485 /* divided by 10 to be nice to platforms without floating point */
1486 static const int fbase[] = {
1487 	10000,
1488 	100000,
1489 	1000000,
1490 	10000000,
1491 };
1492 
1493 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1494  * to platforms without floating point.
1495  */
1496 static const u8 multipliers[] = {
1497 	0,	/* reserved */
1498 	10,
1499 	12,
1500 	13,
1501 	15,
1502 	20,
1503 	25,
1504 	30,
1505 	35,
1506 	40,
1507 	45,
1508 	50,
1509 	55,
1510 	60,
1511 	70,
1512 	80,
1513 };
1514 
1515 #if !CONFIG_IS_ENABLED(DM_MMC)
1516 static void mmc_set_ios(struct mmc *mmc)
1517 {
1518 	if (mmc->cfg->ops->set_ios)
1519 		mmc->cfg->ops->set_ios(mmc);
1520 }
1521 
1522 static bool mmc_card_busy(struct mmc *mmc)
1523 {
1524 	if (!mmc->cfg->ops->card_busy)
1525 		return -ENOSYS;
1526 
1527 	return mmc->cfg->ops->card_busy(mmc);
1528 }
1529 
1530 static bool mmc_can_card_busy(struct mmc *)
1531 {
1532 	return !!mmc->cfg->ops->card_busy;
1533 }
1534 #endif
1535 
1536 static int mmc_startup(struct mmc *mmc)
1537 {
1538 	int err, i;
1539 	uint mult, freq, tran_speed;
1540 	u64 cmult, csize, capacity;
1541 	struct mmc_cmd cmd;
1542 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1543 	bool has_parts = false;
1544 	bool part_completed;
1545 	struct blk_desc *bdesc;
1546 
1547 #ifdef CONFIG_MMC_SPI_CRC_ON
1548 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1549 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1550 		cmd.resp_type = MMC_RSP_R1;
1551 		cmd.cmdarg = 1;
1552 		err = mmc_send_cmd(mmc, &cmd, NULL);
1553 
1554 		if (err)
1555 			return err;
1556 	}
1557 #endif
1558 
1559 	/* Put the Card in Identify Mode */
1560 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1561 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1562 	cmd.resp_type = MMC_RSP_R2;
1563 	cmd.cmdarg = 0;
1564 
1565 	err = mmc_send_cmd(mmc, &cmd, NULL);
1566 
1567 	if (err)
1568 		return err;
1569 
1570 	memcpy(mmc->cid, cmd.response, 16);
1571 
1572 	/*
1573 	 * For MMC cards, set the Relative Address.
1574 	 * For SD cards, get the Relatvie Address.
1575 	 * This also puts the cards into Standby State
1576 	 */
1577 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1578 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1579 		cmd.cmdarg = mmc->rca << 16;
1580 		cmd.resp_type = MMC_RSP_R6;
1581 
1582 		err = mmc_send_cmd(mmc, &cmd, NULL);
1583 
1584 		if (err)
1585 			return err;
1586 
1587 		if (IS_SD(mmc))
1588 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1589 	}
1590 
1591 	/* Get the Card-Specific Data */
1592 	cmd.cmdidx = MMC_CMD_SEND_CSD;
1593 	cmd.resp_type = MMC_RSP_R2;
1594 	cmd.cmdarg = mmc->rca << 16;
1595 
1596 	err = mmc_send_cmd(mmc, &cmd, NULL);
1597 
1598 	if (err)
1599 		return err;
1600 
1601 	mmc->csd[0] = cmd.response[0];
1602 	mmc->csd[1] = cmd.response[1];
1603 	mmc->csd[2] = cmd.response[2];
1604 	mmc->csd[3] = cmd.response[3];
1605 
1606 	if (mmc->version == MMC_VERSION_UNKNOWN) {
1607 		int version = (cmd.response[0] >> 26) & 0xf;
1608 
1609 		switch (version) {
1610 		case 0:
1611 			mmc->version = MMC_VERSION_1_2;
1612 			break;
1613 		case 1:
1614 			mmc->version = MMC_VERSION_1_4;
1615 			break;
1616 		case 2:
1617 			mmc->version = MMC_VERSION_2_2;
1618 			break;
1619 		case 3:
1620 			mmc->version = MMC_VERSION_3;
1621 			break;
1622 		case 4:
1623 			mmc->version = MMC_VERSION_4;
1624 			break;
1625 		default:
1626 			mmc->version = MMC_VERSION_1_2;
1627 			break;
1628 		}
1629 	}
1630 
1631 	/* divide frequency by 10, since the mults are 10x bigger */
1632 	freq = fbase[(cmd.response[0] & 0x7)];
1633 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1634 
1635 	tran_speed = freq * mult;
1636 
1637 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1638 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1639 
1640 	if (IS_SD(mmc))
1641 		mmc->write_bl_len = mmc->read_bl_len;
1642 	else
1643 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1644 
1645 	if (mmc->high_capacity) {
1646 		csize = (mmc->csd[1] & 0x3f) << 16
1647 			| (mmc->csd[2] & 0xffff0000) >> 16;
1648 		cmult = 8;
1649 	} else {
1650 		csize = (mmc->csd[1] & 0x3ff) << 2
1651 			| (mmc->csd[2] & 0xc0000000) >> 30;
1652 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
1653 	}
1654 
1655 	mmc->capacity_user = (csize + 1) << (cmult + 2);
1656 	mmc->capacity_user *= mmc->read_bl_len;
1657 	mmc->capacity_boot = 0;
1658 	mmc->capacity_rpmb = 0;
1659 	for (i = 0; i < 4; i++)
1660 		mmc->capacity_gp[i] = 0;
1661 
1662 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1663 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1664 
1665 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1666 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1667 
1668 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1669 		cmd.cmdidx = MMC_CMD_SET_DSR;
1670 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1671 		cmd.resp_type = MMC_RSP_NONE;
1672 		if (mmc_send_cmd(mmc, &cmd, NULL))
1673 			printf("MMC: SET_DSR failed\n");
1674 	}
1675 
1676 	/* Select the card, and put it into Transfer Mode */
1677 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1678 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
1679 		cmd.resp_type = MMC_RSP_R1;
1680 		cmd.cmdarg = mmc->rca << 16;
1681 		err = mmc_send_cmd(mmc, &cmd, NULL);
1682 
1683 		if (err)
1684 			return err;
1685 	}
1686 
1687 	/*
1688 	 * For SD, its erase group is always one sector
1689 	 */
1690 	mmc->erase_grp_size = 1;
1691 	mmc->part_config = MMCPART_NOAVAILABLE;
1692 	if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) {
1693 		/* check  ext_csd version and capacity */
1694 		err = mmc_send_ext_csd(mmc, ext_csd);
1695 		if (err)
1696 			return err;
1697 		if (ext_csd[EXT_CSD_REV] >= 2) {
1698 			/*
1699 			 * According to the JEDEC Standard, the value of
1700 			 * ext_csd's capacity is valid if the value is more
1701 			 * than 2GB
1702 			 */
1703 			capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1704 					| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1705 					| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1706 					| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1707 			capacity *= MMC_MAX_BLOCK_LEN;
1708 			if ((capacity >> 20) > 2 * 1024)
1709 				mmc->capacity_user = capacity;
1710 		}
1711 
1712 		switch (ext_csd[EXT_CSD_REV]) {
1713 		case 1:
1714 			mmc->version = MMC_VERSION_4_1;
1715 			break;
1716 		case 2:
1717 			mmc->version = MMC_VERSION_4_2;
1718 			break;
1719 		case 3:
1720 			mmc->version = MMC_VERSION_4_3;
1721 			break;
1722 		case 5:
1723 			mmc->version = MMC_VERSION_4_41;
1724 			break;
1725 		case 6:
1726 			mmc->version = MMC_VERSION_4_5;
1727 			break;
1728 		case 7:
1729 			mmc->version = MMC_VERSION_5_0;
1730 			break;
1731 		case 8:
1732 			mmc->version = MMC_VERSION_5_1;
1733 			break;
1734 		}
1735 
1736 		/* The partition data may be non-zero but it is only
1737 		 * effective if PARTITION_SETTING_COMPLETED is set in
1738 		 * EXT_CSD, so ignore any data if this bit is not set,
1739 		 * except for enabling the high-capacity group size
1740 		 * definition (see below). */
1741 		part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1742 				    EXT_CSD_PARTITION_SETTING_COMPLETED);
1743 
1744 		/* store the partition info of emmc */
1745 		mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1746 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1747 		    ext_csd[EXT_CSD_BOOT_MULT])
1748 			mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1749 		if (part_completed &&
1750 		    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1751 			mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1752 		if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN)
1753 			mmc->esr.mmc_can_trim = 1;
1754 
1755 		mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1756 
1757 		mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1758 
1759 		for (i = 0; i < 4; i++) {
1760 			int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1761 			uint mult = (ext_csd[idx + 2] << 16) +
1762 				(ext_csd[idx + 1] << 8) + ext_csd[idx];
1763 			if (mult)
1764 				has_parts = true;
1765 			if (!part_completed)
1766 				continue;
1767 			mmc->capacity_gp[i] = mult;
1768 			mmc->capacity_gp[i] *=
1769 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1770 			mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1771 			mmc->capacity_gp[i] <<= 19;
1772 		}
1773 
1774 		if (part_completed) {
1775 			mmc->enh_user_size =
1776 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) +
1777 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) +
1778 				ext_csd[EXT_CSD_ENH_SIZE_MULT];
1779 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1780 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1781 			mmc->enh_user_size <<= 19;
1782 			mmc->enh_user_start =
1783 				(ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) +
1784 				(ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) +
1785 				(ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) +
1786 				ext_csd[EXT_CSD_ENH_START_ADDR];
1787 			if (mmc->high_capacity)
1788 				mmc->enh_user_start <<= 9;
1789 		}
1790 
1791 		/*
1792 		 * Host needs to enable ERASE_GRP_DEF bit if device is
1793 		 * partitioned. This bit will be lost every time after a reset
1794 		 * or power off. This will affect erase size.
1795 		 */
1796 		if (part_completed)
1797 			has_parts = true;
1798 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1799 		    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1800 			has_parts = true;
1801 		if (has_parts) {
1802 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1803 				EXT_CSD_ERASE_GROUP_DEF, 1);
1804 
1805 			if (err)
1806 				return err;
1807 			else
1808 				ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1809 		}
1810 
1811 		if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1812 			/* Read out group size from ext_csd */
1813 			mmc->erase_grp_size =
1814 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1815 			/*
1816 			 * if high capacity and partition setting completed
1817 			 * SEC_COUNT is valid even if it is smaller than 2 GiB
1818 			 * JEDEC Standard JESD84-B45, 6.2.4
1819 			 */
1820 			if (mmc->high_capacity && part_completed) {
1821 				capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1822 					(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1823 					(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1824 					(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1825 				capacity *= MMC_MAX_BLOCK_LEN;
1826 				mmc->capacity_user = capacity;
1827 			}
1828 		} else {
1829 			/* Calculate the group size from the csd value. */
1830 			int erase_gsz, erase_gmul;
1831 			erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1832 			erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1833 			mmc->erase_grp_size = (erase_gsz + 1)
1834 				* (erase_gmul + 1);
1835 		}
1836 
1837 		mmc->hc_wp_grp_size = 1024
1838 			* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1839 			* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1840 
1841 		mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1842 	}
1843 
1844 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1845 	if (err)
1846 		return err;
1847 
1848 	if (IS_SD(mmc))
1849 		err = sd_change_freq(mmc);
1850 	else
1851 		err = mmc_change_freq(mmc);
1852 
1853 	if (err)
1854 		return err;
1855 
1856 	/* Restrict card's capabilities by what the host can do */
1857 	mmc->card_caps &= mmc->cfg->host_caps;
1858 
1859 	if (IS_SD(mmc)) {
1860 		if (mmc->card_caps & MMC_MODE_4BIT) {
1861 			cmd.cmdidx = MMC_CMD_APP_CMD;
1862 			cmd.resp_type = MMC_RSP_R1;
1863 			cmd.cmdarg = mmc->rca << 16;
1864 
1865 			err = mmc_send_cmd(mmc, &cmd, NULL);
1866 			if (err)
1867 				return err;
1868 
1869 			cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1870 			cmd.resp_type = MMC_RSP_R1;
1871 			cmd.cmdarg = 2;
1872 			err = mmc_send_cmd(mmc, &cmd, NULL);
1873 			if (err)
1874 				return err;
1875 
1876 			mmc_set_bus_width(mmc, 4);
1877 		}
1878 
1879 		err = sd_read_ssr(mmc);
1880 		if (err)
1881 			return err;
1882 
1883 		if (mmc->card_caps & MMC_MODE_HS)
1884 			tran_speed = 50000000;
1885 		else
1886 			tran_speed = 25000000;
1887 
1888 		mmc_set_clock(mmc, tran_speed);
1889 	}
1890 
1891 	/* Fix the block length for DDR mode */
1892 	if (mmc_card_ddr(mmc)) {
1893 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1894 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1895 	}
1896 
1897 	/* fill in device description */
1898 	bdesc = mmc_get_blk_desc(mmc);
1899 	bdesc->lun = 0;
1900 	bdesc->hwpart = 0;
1901 	bdesc->type = 0;
1902 	bdesc->blksz = mmc->read_bl_len;
1903 	bdesc->log2blksz = LOG2(bdesc->blksz);
1904 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1905 #if !defined(CONFIG_SPL_BUILD) || \
1906 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
1907 		!defined(CONFIG_USE_TINY_PRINTF))
1908 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
1909 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
1910 		(mmc->cid[3] >> 16) & 0xffff);
1911 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
1912 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
1913 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
1914 		(mmc->cid[2] >> 24) & 0xff);
1915 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
1916 		(mmc->cid[2] >> 16) & 0xf);
1917 #else
1918 	bdesc->vendor[0] = 0;
1919 	bdesc->product[0] = 0;
1920 	bdesc->revision[0] = 0;
1921 #endif
1922 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
1923 	part_init(bdesc);
1924 #endif
1925 
1926 	return 0;
1927 }
1928 
1929 static int mmc_send_if_cond(struct mmc *mmc)
1930 {
1931 	struct mmc_cmd cmd;
1932 	int err;
1933 
1934 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
1935 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
1936 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
1937 	cmd.resp_type = MMC_RSP_R7;
1938 
1939 	err = mmc_send_cmd(mmc, &cmd, NULL);
1940 
1941 	if (err)
1942 		return err;
1943 
1944 	if ((cmd.response[0] & 0xff) != 0xaa)
1945 		return -EOPNOTSUPP;
1946 	else
1947 		mmc->version = SD_VERSION_2;
1948 
1949 	return 0;
1950 }
1951 
1952 #if !CONFIG_IS_ENABLED(DM_MMC)
1953 /* board-specific MMC power initializations. */
1954 __weak void board_mmc_power_init(void)
1955 {
1956 }
1957 #endif
1958 
1959 static int mmc_power_init(struct mmc *mmc)
1960 {
1961 #if CONFIG_IS_ENABLED(DM_MMC)
1962 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD)
1963 	struct udevice *vmmc_supply;
1964 	int ret;
1965 
1966 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
1967 					  &vmmc_supply);
1968 	if (ret) {
1969 		debug("%s: No vmmc supply\n", mmc->dev->name);
1970 		return 0;
1971 	}
1972 
1973 	ret = regulator_set_enable(vmmc_supply, true);
1974 	if (ret) {
1975 		puts("Error enabling VMMC supply\n");
1976 		return ret;
1977 	}
1978 #endif
1979 #else /* !CONFIG_DM_MMC */
1980 	/*
1981 	 * Driver model should use a regulator, as above, rather than calling
1982 	 * out to board code.
1983 	 */
1984 	board_mmc_power_init();
1985 #endif
1986 	return 0;
1987 }
1988 
1989 int mmc_start_init(struct mmc *mmc)
1990 {
1991 	bool no_card;
1992 	int err;
1993 
1994 	/* we pretend there's no card when init is NULL */
1995 	no_card = mmc_getcd(mmc) == 0;
1996 #if !CONFIG_IS_ENABLED(DM_MMC)
1997 	no_card = no_card || (mmc->cfg->ops->init == NULL);
1998 #endif
1999 	if (no_card) {
2000 		mmc->has_init = 0;
2001 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2002 		printf("MMC: no card present\n");
2003 #endif
2004 		return -ENOMEDIUM;
2005 	}
2006 
2007 	if (mmc->has_init)
2008 		return 0;
2009 
2010 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2011 	mmc_adapter_card_type_ident();
2012 #endif
2013 	err = mmc_power_init(mmc);
2014 	if (err)
2015 		return err;
2016 
2017 #if CONFIG_IS_ENABLED(DM_MMC)
2018 	/* The device has already been probed ready for use */
2019 #else
2020 	/* made sure it's not NULL earlier */
2021 	err = mmc->cfg->ops->init(mmc);
2022 	if (err)
2023 		return err;
2024 #endif
2025 	mmc_set_bus_width(mmc, 1);
2026 	mmc_set_clock(mmc, 1);
2027 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2028 
2029 	/* Reset the Card */
2030 	err = mmc_go_idle(mmc);
2031 
2032 	if (err)
2033 		return err;
2034 
2035 	/* The internal partition reset to user partition(0) at every CMD0*/
2036 	mmc_get_blk_desc(mmc)->hwpart = 0;
2037 
2038 	/* Test for SD version 2 */
2039 	err = mmc_send_if_cond(mmc);
2040 
2041 	/* Now try to get the SD card's operating condition */
2042 	err = sd_send_op_cond(mmc);
2043 
2044 	/* If the command timed out, we check for an MMC card */
2045 	if (err == -ETIMEDOUT) {
2046 		err = mmc_send_op_cond(mmc);
2047 
2048 		if (err) {
2049 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2050 			printf("Card did not respond to voltage select!\n");
2051 #endif
2052 			return -EOPNOTSUPP;
2053 		}
2054 	}
2055 
2056 	if (!err)
2057 		mmc->init_in_progress = 1;
2058 
2059 	return err;
2060 }
2061 
2062 static int mmc_complete_init(struct mmc *mmc)
2063 {
2064 	int err = 0;
2065 
2066 	mmc->init_in_progress = 0;
2067 	if (mmc->op_cond_pending)
2068 		err = mmc_complete_op_cond(mmc);
2069 
2070 	if (!err)
2071 		err = mmc_startup(mmc);
2072 	if (err)
2073 		mmc->has_init = 0;
2074 	else
2075 		mmc->has_init = 1;
2076 	return err;
2077 }
2078 
2079 int mmc_init(struct mmc *mmc)
2080 {
2081 	int err = 0;
2082 	__maybe_unused unsigned start;
2083 #if CONFIG_IS_ENABLED(DM_MMC)
2084 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2085 
2086 	upriv->mmc = mmc;
2087 #endif
2088 	if (mmc->has_init)
2089 		return 0;
2090 
2091 	start = get_timer(0);
2092 
2093 	if (!mmc->init_in_progress)
2094 		err = mmc_start_init(mmc);
2095 
2096 	if (!err)
2097 		err = mmc_complete_init(mmc);
2098 	if (err)
2099 		printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2100 
2101 	return err;
2102 }
2103 
2104 int mmc_set_dsr(struct mmc *mmc, u16 val)
2105 {
2106 	mmc->dsr = val;
2107 	return 0;
2108 }
2109 
2110 /* CPU-specific MMC initializations */
2111 __weak int cpu_mmc_init(bd_t *bis)
2112 {
2113 	return -1;
2114 }
2115 
2116 /* board-specific MMC initializations. */
2117 __weak int board_mmc_init(bd_t *bis)
2118 {
2119 	return -1;
2120 }
2121 
2122 void mmc_set_preinit(struct mmc *mmc, int preinit)
2123 {
2124 	mmc->preinit = preinit;
2125 }
2126 
2127 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2128 static int mmc_probe(bd_t *bis)
2129 {
2130 	return 0;
2131 }
2132 #elif CONFIG_IS_ENABLED(DM_MMC)
2133 static int mmc_probe(bd_t *bis)
2134 {
2135 	int ret, i;
2136 	struct uclass *uc;
2137 	struct udevice *dev;
2138 
2139 	ret = uclass_get(UCLASS_MMC, &uc);
2140 	if (ret)
2141 		return ret;
2142 
2143 	/*
2144 	 * Try to add them in sequence order. Really with driver model we
2145 	 * should allow holes, but the current MMC list does not allow that.
2146 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2147 	 */
2148 	for (i = 0; ; i++) {
2149 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2150 		if (ret == -ENODEV)
2151 			break;
2152 	}
2153 	uclass_foreach_dev(dev, uc) {
2154 		ret = device_probe(dev);
2155 		if (ret)
2156 			printf("%s - probe failed: %d\n", dev->name, ret);
2157 	}
2158 
2159 	return 0;
2160 }
2161 #else
2162 static int mmc_probe(bd_t *bis)
2163 {
2164 	if (board_mmc_init(bis) < 0)
2165 		cpu_mmc_init(bis);
2166 
2167 	return 0;
2168 }
2169 #endif
2170 
2171 int mmc_initialize(bd_t *bis)
2172 {
2173 	static int initialized = 0;
2174 	int ret;
2175 	if (initialized)	/* Avoid initializing mmc multiple times */
2176 		return 0;
2177 	initialized = 1;
2178 
2179 #if !CONFIG_IS_ENABLED(BLK)
2180 #if !CONFIG_IS_ENABLED(MMC_TINY)
2181 	mmc_list_init();
2182 #endif
2183 #endif
2184 	ret = mmc_probe(bis);
2185 	if (ret)
2186 		return ret;
2187 
2188 #ifndef CONFIG_SPL_BUILD
2189 	print_mmc_devices(',');
2190 #endif
2191 
2192 	mmc_do_preinit();
2193 	return 0;
2194 }
2195 
2196 #ifdef CONFIG_CMD_BKOPS_ENABLE
2197 int mmc_set_bkops_enable(struct mmc *mmc)
2198 {
2199 	int err;
2200 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2201 
2202 	err = mmc_send_ext_csd(mmc, ext_csd);
2203 	if (err) {
2204 		puts("Could not get ext_csd register values\n");
2205 		return err;
2206 	}
2207 
2208 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2209 		puts("Background operations not supported on device\n");
2210 		return -EMEDIUMTYPE;
2211 	}
2212 
2213 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2214 		puts("Background operations already enabled\n");
2215 		return 0;
2216 	}
2217 
2218 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2219 	if (err) {
2220 		puts("Failed to enable manual background operations\n");
2221 		return err;
2222 	}
2223 
2224 	puts("Enabled manual background operations\n");
2225 
2226 	return 0;
2227 }
2228 #endif
2229