xref: /rk3399_rockchip-uboot/drivers/mmc/mmc.c (revision b86dc4195f38b5485788014794f2befd1fc2cc74)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static const unsigned int sd_au_size[] = {
26 	0,		SZ_16K / 512,		SZ_32K / 512,
27 	SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
28 	SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
29 	SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
30 	SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,	SZ_64M / 512,
31 };
32 
33 static char mmc_ext_csd[512];
34 
35 #if CONFIG_IS_ENABLED(MMC_TINY)
36 static struct mmc mmc_static;
37 struct mmc *find_mmc_device(int dev_num)
38 {
39 	return &mmc_static;
40 }
41 
42 void mmc_do_preinit(void)
43 {
44 	struct mmc *m = &mmc_static;
45 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
46 	mmc_set_preinit(m, 1);
47 #endif
48 	if (m->preinit)
49 		mmc_start_init(m);
50 }
51 
52 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
53 {
54 	return &mmc->block_dev;
55 }
56 #endif
57 
58 #if !CONFIG_IS_ENABLED(DM_MMC)
59 __weak int board_mmc_getwp(struct mmc *mmc)
60 {
61 	return -1;
62 }
63 
64 int mmc_getwp(struct mmc *mmc)
65 {
66 	int wp;
67 
68 	wp = board_mmc_getwp(mmc);
69 
70 	if (wp < 0) {
71 		if (mmc->cfg->ops->getwp)
72 			wp = mmc->cfg->ops->getwp(mmc);
73 		else
74 			wp = 0;
75 	}
76 
77 	return wp;
78 }
79 
80 __weak int board_mmc_getcd(struct mmc *mmc)
81 {
82 	return -1;
83 }
84 #endif
85 
86 #ifdef CONFIG_MMC_TRACE
87 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
88 {
89 	printf("CMD_SEND:%d\n", cmd->cmdidx);
90 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
91 }
92 
93 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
94 {
95 	int i;
96 	u8 *ptr;
97 
98 	if (ret) {
99 		printf("\t\tRET\t\t\t %d\n", ret);
100 	} else {
101 		switch (cmd->resp_type) {
102 		case MMC_RSP_NONE:
103 			printf("\t\tMMC_RSP_NONE\n");
104 			break;
105 		case MMC_RSP_R1:
106 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
107 				cmd->response[0]);
108 			break;
109 		case MMC_RSP_R1b:
110 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
111 				cmd->response[0]);
112 			break;
113 		case MMC_RSP_R2:
114 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
115 				cmd->response[0]);
116 			printf("\t\t          \t\t 0x%08X \n",
117 				cmd->response[1]);
118 			printf("\t\t          \t\t 0x%08X \n",
119 				cmd->response[2]);
120 			printf("\t\t          \t\t 0x%08X \n",
121 				cmd->response[3]);
122 			printf("\n");
123 			printf("\t\t\t\t\tDUMPING DATA\n");
124 			for (i = 0; i < 4; i++) {
125 				int j;
126 				printf("\t\t\t\t\t%03d - ", i*4);
127 				ptr = (u8 *)&cmd->response[i];
128 				ptr += 3;
129 				for (j = 0; j < 4; j++)
130 					printf("%02X ", *ptr--);
131 				printf("\n");
132 			}
133 			break;
134 		case MMC_RSP_R3:
135 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
136 				cmd->response[0]);
137 			break;
138 		default:
139 			printf("\t\tERROR MMC rsp not supported\n");
140 			break;
141 		}
142 	}
143 }
144 
145 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
146 {
147 	int status;
148 
149 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
150 	printf("CURR STATE:%d\n", status);
151 }
152 #endif
153 
154 #if !CONFIG_IS_ENABLED(DM_MMC)
155 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
156 {
157 	int ret;
158 
159 	mmmc_trace_before_send(mmc, cmd);
160 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
161 	mmmc_trace_after_send(mmc, cmd, ret);
162 
163 	return ret;
164 }
165 #endif
166 
167 int mmc_send_status(struct mmc *mmc, int timeout)
168 {
169 	struct mmc_cmd cmd;
170 	int err, retries = 5;
171 
172 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
173 	cmd.resp_type = MMC_RSP_R1;
174 	if (!mmc_host_is_spi(mmc))
175 		cmd.cmdarg = mmc->rca << 16;
176 
177 	while (1) {
178 		err = mmc_send_cmd(mmc, &cmd, NULL);
179 		if (!err) {
180 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
181 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
182 			     MMC_STATE_PRG)
183 				break;
184 			else if (cmd.response[0] & MMC_STATUS_MASK) {
185 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
186 				printf("Status Error: 0x%08X\n",
187 					cmd.response[0]);
188 #endif
189 				return -ECOMM;
190 			}
191 		} else if (--retries < 0)
192 			return err;
193 
194 		if (timeout-- <= 0)
195 			break;
196 
197 		udelay(1000);
198 	}
199 
200 	mmc_trace_state(mmc, &cmd);
201 	if (timeout <= 0) {
202 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
203 		printf("Timeout waiting card ready\n");
204 #endif
205 		return -ETIMEDOUT;
206 	}
207 
208 	return 0;
209 }
210 
211 int mmc_set_blocklen(struct mmc *mmc, int len)
212 {
213 	struct mmc_cmd cmd;
214 
215 	if (mmc_card_ddr(mmc))
216 		return 0;
217 
218 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
219 	cmd.resp_type = MMC_RSP_R1;
220 	cmd.cmdarg = len;
221 
222 	return mmc_send_cmd(mmc, &cmd, NULL);
223 }
224 
225 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
226 			   lbaint_t blkcnt)
227 {
228 	struct mmc_cmd cmd;
229 	struct mmc_data data;
230 
231 	if (blkcnt > 1)
232 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
233 	else
234 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
235 
236 	if (mmc->high_capacity)
237 		cmd.cmdarg = start;
238 	else
239 		cmd.cmdarg = start * mmc->read_bl_len;
240 
241 	cmd.resp_type = MMC_RSP_R1;
242 
243 	data.dest = dst;
244 	data.blocks = blkcnt;
245 	data.blocksize = mmc->read_bl_len;
246 	data.flags = MMC_DATA_READ;
247 
248 	if (mmc_send_cmd(mmc, &cmd, &data))
249 		return 0;
250 
251 	if (blkcnt > 1) {
252 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
253 		cmd.cmdarg = 0;
254 		cmd.resp_type = MMC_RSP_R1b;
255 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
256 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
257 			printf("mmc fail to send stop cmd\n");
258 #endif
259 			return 0;
260 		}
261 	}
262 
263 	return blkcnt;
264 }
265 
266 #if CONFIG_IS_ENABLED(BLK)
267 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
268 #else
269 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
270 		void *dst)
271 #endif
272 {
273 #if CONFIG_IS_ENABLED(BLK)
274 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
275 #endif
276 	int dev_num = block_dev->devnum;
277 	int err;
278 	lbaint_t cur, blocks_todo = blkcnt;
279 
280 	if (blkcnt == 0)
281 		return 0;
282 
283 	struct mmc *mmc = find_mmc_device(dev_num);
284 	if (!mmc)
285 		return 0;
286 
287 	if (CONFIG_IS_ENABLED(MMC_TINY))
288 		err = mmc_switch_part(mmc, block_dev->hwpart);
289 	else
290 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
291 
292 	if (err < 0)
293 		return 0;
294 
295 	if ((start + blkcnt) > block_dev->lba) {
296 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
297 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
298 			start + blkcnt, block_dev->lba);
299 #endif
300 		return 0;
301 	}
302 
303 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
304 		debug("%s: Failed to set blocklen\n", __func__);
305 		return 0;
306 	}
307 
308 	do {
309 		cur = (blocks_todo > mmc->cfg->b_max) ?
310 			mmc->cfg->b_max : blocks_todo;
311 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
312 			debug("%s: Failed to read blocks\n", __func__);
313 			int timeout = 0;
314 re_init_retry:
315 			timeout++;
316 			/*
317 			 * Try re-init seven times.
318 			 */
319 			if (timeout > 7) {
320 				printf("Re-init retry timeout\n");
321 				return 0;
322 			}
323 
324 			mmc->has_init = 0;
325 			if (mmc_init(mmc))
326 				return 0;
327 
328 			if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
329 				printf("%s: Re-init mmc_read_blocks error\n",
330 				       __func__);
331 				goto re_init_retry;
332 			}
333 		}
334 		blocks_todo -= cur;
335 		start += cur;
336 		dst += cur * mmc->read_bl_len;
337 	} while (blocks_todo > 0);
338 
339 	return blkcnt;
340 }
341 
342 void mmc_set_clock(struct mmc *mmc, uint clock)
343 {
344 	if (clock > mmc->cfg->f_max)
345 		clock = mmc->cfg->f_max;
346 
347 	if (clock < mmc->cfg->f_min)
348 		clock = mmc->cfg->f_min;
349 
350 	mmc->clock = clock;
351 
352 	mmc_set_ios(mmc);
353 }
354 
355 static void mmc_set_bus_width(struct mmc *mmc, uint width)
356 {
357 	mmc->bus_width = width;
358 
359 	mmc_set_ios(mmc);
360 }
361 
362 static void mmc_set_timing(struct mmc *mmc, uint timing)
363 {
364 	mmc->timing = timing;
365 	mmc_set_ios(mmc);
366 }
367 
368 static int mmc_go_idle(struct mmc *mmc)
369 {
370 	struct mmc_cmd cmd;
371 	int err;
372 
373 	udelay(1000);
374 
375 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
376 	cmd.cmdarg = 0;
377 	cmd.resp_type = MMC_RSP_NONE;
378 
379 	err = mmc_send_cmd(mmc, &cmd, NULL);
380 
381 	if (err)
382 		return err;
383 
384 	udelay(2000);
385 
386 	return 0;
387 }
388 
389 static int sd_send_op_cond(struct mmc *mmc)
390 {
391 	int timeout = 1000;
392 	int err;
393 	struct mmc_cmd cmd;
394 
395 	while (1) {
396 		cmd.cmdidx = MMC_CMD_APP_CMD;
397 		cmd.resp_type = MMC_RSP_R1;
398 		cmd.cmdarg = 0;
399 
400 		err = mmc_send_cmd(mmc, &cmd, NULL);
401 
402 		if (err)
403 			return err;
404 
405 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
406 		cmd.resp_type = MMC_RSP_R3;
407 
408 		/*
409 		 * Most cards do not answer if some reserved bits
410 		 * in the ocr are set. However, Some controller
411 		 * can set bit 7 (reserved for low voltages), but
412 		 * how to manage low voltages SD card is not yet
413 		 * specified.
414 		 */
415 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
416 			(mmc->cfg->voltages & 0xff8000);
417 
418 		if (mmc->version == SD_VERSION_2)
419 			cmd.cmdarg |= OCR_HCS;
420 
421 		err = mmc_send_cmd(mmc, &cmd, NULL);
422 
423 		if (err)
424 			return err;
425 
426 		if (cmd.response[0] & OCR_BUSY)
427 			break;
428 
429 		if (timeout-- <= 0)
430 			return -EOPNOTSUPP;
431 
432 		udelay(1000);
433 	}
434 
435 	if (mmc->version != SD_VERSION_2)
436 		mmc->version = SD_VERSION_1_0;
437 
438 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
439 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
440 		cmd.resp_type = MMC_RSP_R3;
441 		cmd.cmdarg = 0;
442 
443 		err = mmc_send_cmd(mmc, &cmd, NULL);
444 
445 		if (err)
446 			return err;
447 	}
448 
449 	mmc->ocr = cmd.response[0];
450 
451 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
452 	mmc->rca = 0;
453 
454 	return 0;
455 }
456 
457 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
458 {
459 	struct mmc_cmd cmd;
460 	int err;
461 
462 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
463 	cmd.resp_type = MMC_RSP_R3;
464 	cmd.cmdarg = 0;
465 	if (use_arg && !mmc_host_is_spi(mmc))
466 		cmd.cmdarg = OCR_HCS |
467 			(mmc->cfg->voltages &
468 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
469 			(mmc->ocr & OCR_ACCESS_MODE);
470 
471 	err = mmc_send_cmd(mmc, &cmd, NULL);
472 	if (err)
473 		return err;
474 	mmc->ocr = cmd.response[0];
475 	return 0;
476 }
477 
478 static int mmc_send_op_cond(struct mmc *mmc)
479 {
480 	int err, i;
481 
482 	/* Some cards seem to need this */
483 	mmc_go_idle(mmc);
484 
485  	/* Asking to the card its capabilities */
486 	for (i = 0; i < 2; i++) {
487 		err = mmc_send_op_cond_iter(mmc, i != 0);
488 		if (err)
489 			return err;
490 
491 		/* exit if not busy (flag seems to be inverted) */
492 		if (mmc->ocr & OCR_BUSY)
493 			break;
494 	}
495 	mmc->op_cond_pending = 1;
496 	return 0;
497 }
498 
499 static int mmc_complete_op_cond(struct mmc *mmc)
500 {
501 	struct mmc_cmd cmd;
502 	int timeout = 1000;
503 	uint start;
504 	int err;
505 
506 	mmc->op_cond_pending = 0;
507 	if (!(mmc->ocr & OCR_BUSY)) {
508 		/* Some cards seem to need this */
509 		mmc_go_idle(mmc);
510 
511 		start = get_timer(0);
512 		while (1) {
513 			err = mmc_send_op_cond_iter(mmc, 1);
514 			if (err)
515 				return err;
516 			if (mmc->ocr & OCR_BUSY)
517 				break;
518 			if (get_timer(start) > timeout)
519 				return -EOPNOTSUPP;
520 			udelay(100);
521 		}
522 	}
523 
524 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
525 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
526 		cmd.resp_type = MMC_RSP_R3;
527 		cmd.cmdarg = 0;
528 
529 		err = mmc_send_cmd(mmc, &cmd, NULL);
530 
531 		if (err)
532 			return err;
533 
534 		mmc->ocr = cmd.response[0];
535 	}
536 
537 	mmc->version = MMC_VERSION_UNKNOWN;
538 
539 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
540 	mmc->rca = 1;
541 
542 	return 0;
543 }
544 
545 
546 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
547 {
548 	static int initialized;
549 	struct mmc_cmd cmd;
550 	struct mmc_data data;
551 	int err;
552 
553 	if (initialized) {
554 		memcpy(ext_csd, mmc_ext_csd, 512);
555 		return 0;
556 	}
557 
558 	initialized = 1;
559 
560 	/* Get the Card Status Register */
561 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
562 	cmd.resp_type = MMC_RSP_R1;
563 	cmd.cmdarg = 0;
564 
565 	data.dest = (char *)ext_csd;
566 	data.blocks = 1;
567 	data.blocksize = MMC_MAX_BLOCK_LEN;
568 	data.flags = MMC_DATA_READ;
569 
570 	err = mmc_send_cmd(mmc, &cmd, &data);
571 	memcpy(mmc_ext_csd, ext_csd, 512);
572 
573 	return err;
574 }
575 
576 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status)
577 {
578 	struct mmc_cmd cmd;
579 	u8 busy = true;
580 	uint start;
581 	int ret;
582 	int timeout = 1000;
583 
584 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
585 	cmd.resp_type = MMC_RSP_R1;
586 	cmd.cmdarg = mmc->rca << 16;
587 
588 	start = get_timer(0);
589 
590 	if (!send_status && !mmc_can_card_busy(mmc)) {
591 		mdelay(timeout);
592 		return 0;
593 	}
594 
595 	do {
596 		if (!send_status) {
597 			busy = mmc_card_busy(mmc);
598 		} else {
599 			ret = mmc_send_cmd(mmc, &cmd, NULL);
600 
601 			if (ret)
602 				return ret;
603 
604 			if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
605 				return -EBADMSG;
606 			busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) ==
607 				MMC_STATE_PRG;
608 		}
609 
610 		if (get_timer(start) > timeout && busy)
611 			return -ETIMEDOUT;
612 	} while (busy);
613 
614 	return 0;
615 }
616 
617 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
618 			u8 send_status)
619 {
620 	struct mmc_cmd cmd;
621 	int retries = 3;
622 	int ret;
623 
624 	cmd.cmdidx = MMC_CMD_SWITCH;
625 	cmd.resp_type = MMC_RSP_R1b;
626 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
627 				 (index << 16) |
628 				 (value << 8);
629 
630 	do {
631 		ret = mmc_send_cmd(mmc, &cmd, NULL);
632 
633 		if (!ret)
634 			return mmc_poll_for_busy(mmc, send_status);
635 	} while (--retries > 0 && ret);
636 
637 	return ret;
638 }
639 
640 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
641 {
642 	return __mmc_switch(mmc, set, index, value, true);
643 }
644 
645 static int mmc_select_bus_width(struct mmc *mmc)
646 {
647 	u32 ext_csd_bits[] = {
648 		EXT_CSD_BUS_WIDTH_8,
649 		EXT_CSD_BUS_WIDTH_4,
650 	};
651 	u32 bus_widths[] = {
652 		MMC_BUS_WIDTH_8BIT,
653 		MMC_BUS_WIDTH_4BIT,
654 	};
655 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
656 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
657 	u32 idx, bus_width = 0;
658 	int err = 0;
659 
660 	if (mmc->version < MMC_VERSION_4 ||
661 	    !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT)))
662 		return 0;
663 
664 	err = mmc_send_ext_csd(mmc, ext_csd);
665 
666 	if (err)
667 		return err;
668 
669 	idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1;
670 
671 	/*
672 	 * Unlike SD, MMC cards dont have a configuration register to notify
673 	 * supported bus width. So bus test command should be run to identify
674 	 * the supported bus width or compare the ext csd values of current
675 	 * bus width and ext csd values of 1 bit mode read earlier.
676 	 */
677 	for (; idx < ARRAY_SIZE(bus_widths); idx++) {
678 		/*
679 		 * Host is capable of 8bit transfer, then switch
680 		 * the device to work in 8bit transfer mode. If the
681 		 * mmc switch command returns error then switch to
682 		 * 4bit transfer mode. On success set the corresponding
683 		 * bus width on the host.
684 		 */
685 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
686 				 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]);
687 		if (err)
688 			continue;
689 
690 		bus_width = bus_widths[idx];
691 		mmc_set_bus_width(mmc, bus_width);
692 
693 		err = mmc_send_ext_csd(mmc, test_csd);
694 
695 		if (err)
696 			continue;
697 
698 		/* Only compare read only fields */
699 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] ==
700 			test_csd[EXT_CSD_PARTITIONING_SUPPORT]) &&
701 		    (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
702 			test_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
703 		    (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) &&
704 			(ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
705 			test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
706 		    !memcmp(&ext_csd[EXT_CSD_SEC_CNT],
707 			&test_csd[EXT_CSD_SEC_CNT], 4)) {
708 			err = bus_width;
709 			break;
710 		} else {
711 			err = -EBADMSG;
712 		}
713 	}
714 
715 	return err;
716 }
717 
718 static const u8 tuning_blk_pattern_4bit[] = {
719 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
720 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
721 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
722 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
723 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
724 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
725 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
726 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
727 };
728 
729 static const u8 tuning_blk_pattern_8bit[] = {
730 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
731 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
732 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
733 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
734 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
735 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
736 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
737 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
738 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
739 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
740 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
741 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
742 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
743 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
744 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
745 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
746 };
747 
748 int mmc_send_tuning(struct mmc *mmc, u32 opcode)
749 {
750 	struct mmc_cmd cmd;
751 	struct mmc_data data;
752 	const u8 *tuning_block_pattern;
753 	int size, err = 0;
754 	u8 *data_buf;
755 
756 	if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
757 		tuning_block_pattern = tuning_blk_pattern_8bit;
758 		size = sizeof(tuning_blk_pattern_8bit);
759 	} else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) {
760 		tuning_block_pattern = tuning_blk_pattern_4bit;
761 		size = sizeof(tuning_blk_pattern_4bit);
762 	} else {
763 		return -EINVAL;
764 	}
765 
766 	data_buf = calloc(1, size);
767 	if (!data_buf)
768 		return -ENOMEM;
769 
770 	cmd.cmdidx = opcode;
771 	cmd.resp_type = MMC_RSP_R1;
772 	cmd.cmdarg = 0;
773 
774 	data.dest = (char *)data_buf;
775 	data.blocksize = size;
776 	data.blocks = 1;
777 	data.flags = MMC_DATA_READ;
778 
779 	err = mmc_send_cmd(mmc, &cmd, &data);
780 	if (err)
781 		goto out;
782 
783 	if (memcmp(data_buf, tuning_block_pattern, size))
784 		err = -EIO;
785 out:
786 	free(data_buf);
787 	return err;
788 }
789 
790 static int mmc_execute_tuning(struct mmc *mmc)
791 {
792 #ifdef CONFIG_DM_MMC
793 	struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev);
794 #endif
795 	u32 opcode;
796 
797 	if (IS_SD(mmc))
798 		opcode = MMC_SEND_TUNING_BLOCK;
799 	else
800 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
801 
802 #ifndef CONFIG_DM_MMC
803 	if (mmc->cfg->ops->execute_tuning) {
804 		return mmc->cfg->ops->execute_tuning(mmc, opcode);
805 #else
806 	if (ops->execute_tuning) {
807 		return ops->execute_tuning(mmc->dev, opcode);
808 #endif
809 	} else {
810 		debug("Tuning feature required for HS200 mode.\n");
811 		return -EIO;
812 	}
813 }
814 
815 static int mmc_hs200_tuning(struct mmc *mmc)
816 {
817 	return mmc_execute_tuning(mmc);
818 }
819 
820 static int mmc_select_hs(struct mmc *mmc)
821 {
822 	int ret;
823 
824 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
825 			 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS);
826 
827 	if (!ret)
828 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
829 
830 	return ret;
831 }
832 
833 static int mmc_select_hs_ddr(struct mmc *mmc)
834 {
835 	u32 ext_csd_bits;
836 	int err = 0;
837 
838 	if (mmc->bus_width == MMC_BUS_WIDTH_1BIT)
839 		return 0;
840 
841 	ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ?
842 			EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
843 
844 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
845 			 EXT_CSD_BUS_WIDTH, ext_csd_bits);
846 	if (err)
847 		return err;
848 
849 	mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52);
850 
851 	return 0;
852 }
853 
854 static int mmc_select_hs200(struct mmc *mmc)
855 {
856 	int ret;
857 
858 	/*
859 	 * Set the bus width(4 or 8) with host's support and
860 	 * switch to HS200 mode if bus width is set successfully.
861 	 */
862 	ret = mmc_select_bus_width(mmc);
863 
864 	if (ret > 0) {
865 		ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
866 				   EXT_CSD_HS_TIMING,
867 				   EXT_CSD_TIMING_HS200, false);
868 
869 		if (ret)
870 			return ret;
871 
872 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS200);
873 	}
874 
875 	return ret;
876 }
877 
878 static int mmc_select_hs400(struct mmc *mmc)
879 {
880 	int ret;
881 
882 	/* Switch card to HS mode */
883 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
884 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false);
885 	if (ret)
886 		return ret;
887 
888 	/* Set host controller to HS timing */
889 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
890 
891 	/* Reduce frequency to HS frequency */
892 	mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
893 
894 	ret = mmc_send_status(mmc, 1000);
895 	if (ret)
896 		return ret;
897 
898 	/* Switch card to DDR */
899 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
900 			 EXT_CSD_BUS_WIDTH,
901 			 EXT_CSD_DDR_BUS_WIDTH_8);
902 	if (ret)
903 		return ret;
904 
905 	/* Switch card to HS400 */
906 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
907 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false);
908 	if (ret)
909 		return ret;
910 
911 	/* Set host controller to HS400 timing and frequency */
912 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS400);
913 
914 	return ret;
915 }
916 
917 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd)
918 {
919 	u8 card_type;
920 	u32 host_caps, avail_type = 0;
921 
922 	card_type = ext_csd[EXT_CSD_CARD_TYPE];
923 	host_caps = mmc->cfg->host_caps;
924 
925 	if ((host_caps & MMC_MODE_HS) &&
926 	    (card_type & EXT_CSD_CARD_TYPE_26))
927 		avail_type |= EXT_CSD_CARD_TYPE_26;
928 
929 	if ((host_caps & MMC_MODE_HS) &&
930 	    (card_type & EXT_CSD_CARD_TYPE_52))
931 		avail_type |= EXT_CSD_CARD_TYPE_52;
932 
933 	/*
934 	 * For the moment, u-boot doesn't support signal voltage
935 	 * switch, therefor we assume that host support ddr52
936 	 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and
937 	 * hs400 are the same).
938 	 */
939 	if ((host_caps & MMC_MODE_DDR_52MHz) &&
940 	    (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
941 		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
942 
943 	if ((host_caps & MMC_MODE_HS200) &&
944 	    (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V))
945 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
946 
947 	/*
948 	 * If host can support HS400, it means that host can also
949 	 * support HS200.
950 	 */
951 	if ((host_caps & MMC_MODE_HS400) &&
952 	    (host_caps & MMC_MODE_8BIT) &&
953 	    (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
954 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
955 				EXT_CSD_CARD_TYPE_HS400_1_8V;
956 
957 	if ((host_caps & MMC_MODE_HS400ES) &&
958 	    (host_caps & MMC_MODE_8BIT) &&
959 	    ext_csd[EXT_CSD_STROBE_SUPPORT] &&
960 	    (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
961 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
962 				EXT_CSD_CARD_TYPE_HS400_1_8V |
963 				EXT_CSD_CARD_TYPE_HS400ES;
964 
965 	return avail_type;
966 }
967 
968 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type)
969 {
970 	int clock = 0;
971 
972 	if (mmc_card_hs(mmc))
973 		clock = (avail_type & EXT_CSD_CARD_TYPE_52) ?
974 			MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR;
975 	else if (mmc_card_hs200(mmc) ||
976 		 mmc_card_hs400(mmc) ||
977 		 mmc_card_hs400es(mmc))
978 		clock = MMC_HS200_MAX_DTR;
979 
980 	mmc_set_clock(mmc, clock);
981 }
982 
983 static int mmc_change_freq(struct mmc *mmc)
984 {
985 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
986 	u32 avail_type;
987 	int err;
988 
989 	mmc->card_caps = 0;
990 
991 	if (mmc_host_is_spi(mmc))
992 		return 0;
993 
994 	/* Only version 4 supports high-speed */
995 	if (mmc->version < MMC_VERSION_4)
996 		return 0;
997 
998 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
999 
1000 	err = mmc_send_ext_csd(mmc, ext_csd);
1001 
1002 	if (err)
1003 		return err;
1004 
1005 	avail_type = mmc_select_card_type(mmc, ext_csd);
1006 
1007 	if (avail_type & EXT_CSD_CARD_TYPE_HS200)
1008 		err = mmc_select_hs200(mmc);
1009 	else if (avail_type & EXT_CSD_CARD_TYPE_HS)
1010 		err = mmc_select_hs(mmc);
1011 	else
1012 		err = -EINVAL;
1013 
1014 	if (err)
1015 		return err;
1016 
1017 	mmc_set_bus_speed(mmc, avail_type);
1018 
1019 	if (mmc_card_hs200(mmc)) {
1020 		err = mmc_hs200_tuning(mmc);
1021 		if (avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1022 		    mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
1023 			err = mmc_select_hs400(mmc);
1024 			mmc_set_bus_speed(mmc, avail_type);
1025 		}
1026 	} else if (!mmc_card_hs400es(mmc)) {
1027 		err = mmc_select_bus_width(mmc) > 0 ? 0 : err;
1028 		if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52)
1029 			err = mmc_select_hs_ddr(mmc);
1030 	}
1031 
1032 	return err;
1033 }
1034 
1035 static int mmc_set_capacity(struct mmc *mmc, int part_num)
1036 {
1037 	switch (part_num) {
1038 	case 0:
1039 		mmc->capacity = mmc->capacity_user;
1040 		break;
1041 	case 1:
1042 	case 2:
1043 		mmc->capacity = mmc->capacity_boot;
1044 		break;
1045 	case 3:
1046 		mmc->capacity = mmc->capacity_rpmb;
1047 		break;
1048 	case 4:
1049 	case 5:
1050 	case 6:
1051 	case 7:
1052 		mmc->capacity = mmc->capacity_gp[part_num - 4];
1053 		break;
1054 	default:
1055 		return -1;
1056 	}
1057 
1058 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1059 
1060 	return 0;
1061 }
1062 
1063 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1064 {
1065 	int ret;
1066 
1067 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
1068 			 (mmc->part_config & ~PART_ACCESS_MASK)
1069 			 | (part_num & PART_ACCESS_MASK));
1070 
1071 	/*
1072 	 * Set the capacity if the switch succeeded or was intended
1073 	 * to return to representing the raw device.
1074 	 */
1075 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1076 		ret = mmc_set_capacity(mmc, part_num);
1077 		mmc_get_blk_desc(mmc)->hwpart = part_num;
1078 	}
1079 
1080 	return ret;
1081 }
1082 
1083 int mmc_hwpart_config(struct mmc *mmc,
1084 		      const struct mmc_hwpart_conf *conf,
1085 		      enum mmc_hwpart_conf_mode mode)
1086 {
1087 	u8 part_attrs = 0;
1088 	u32 enh_size_mult;
1089 	u32 enh_start_addr;
1090 	u32 gp_size_mult[4];
1091 	u32 max_enh_size_mult;
1092 	u32 tot_enh_size_mult = 0;
1093 	u8 wr_rel_set;
1094 	int i, pidx, err;
1095 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1096 
1097 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1098 		return -EINVAL;
1099 
1100 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1101 		printf("eMMC >= 4.4 required for enhanced user data area\n");
1102 		return -EMEDIUMTYPE;
1103 	}
1104 
1105 	if (!(mmc->part_support & PART_SUPPORT)) {
1106 		printf("Card does not support partitioning\n");
1107 		return -EMEDIUMTYPE;
1108 	}
1109 
1110 	if (!mmc->hc_wp_grp_size) {
1111 		printf("Card does not define HC WP group size\n");
1112 		return -EMEDIUMTYPE;
1113 	}
1114 
1115 	/* check partition alignment and total enhanced size */
1116 	if (conf->user.enh_size) {
1117 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1118 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
1119 			printf("User data enhanced area not HC WP group "
1120 			       "size aligned\n");
1121 			return -EINVAL;
1122 		}
1123 		part_attrs |= EXT_CSD_ENH_USR;
1124 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1125 		if (mmc->high_capacity) {
1126 			enh_start_addr = conf->user.enh_start;
1127 		} else {
1128 			enh_start_addr = (conf->user.enh_start << 9);
1129 		}
1130 	} else {
1131 		enh_size_mult = 0;
1132 		enh_start_addr = 0;
1133 	}
1134 	tot_enh_size_mult += enh_size_mult;
1135 
1136 	for (pidx = 0; pidx < 4; pidx++) {
1137 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1138 			printf("GP%i partition not HC WP group size "
1139 			       "aligned\n", pidx+1);
1140 			return -EINVAL;
1141 		}
1142 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1143 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1144 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1145 			tot_enh_size_mult += gp_size_mult[pidx];
1146 		}
1147 	}
1148 
1149 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1150 		printf("Card does not support enhanced attribute\n");
1151 		return -EMEDIUMTYPE;
1152 	}
1153 
1154 	err = mmc_send_ext_csd(mmc, ext_csd);
1155 	if (err)
1156 		return err;
1157 
1158 	max_enh_size_mult =
1159 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1160 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1161 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1162 	if (tot_enh_size_mult > max_enh_size_mult) {
1163 		printf("Total enhanced size exceeds maximum (%u > %u)\n",
1164 		       tot_enh_size_mult, max_enh_size_mult);
1165 		return -EMEDIUMTYPE;
1166 	}
1167 
1168 	/* The default value of EXT_CSD_WR_REL_SET is device
1169 	 * dependent, the values can only be changed if the
1170 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1171 	 * changed only once and before partitioning is completed. */
1172 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1173 	if (conf->user.wr_rel_change) {
1174 		if (conf->user.wr_rel_set)
1175 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1176 		else
1177 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1178 	}
1179 	for (pidx = 0; pidx < 4; pidx++) {
1180 		if (conf->gp_part[pidx].wr_rel_change) {
1181 			if (conf->gp_part[pidx].wr_rel_set)
1182 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1183 			else
1184 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1185 		}
1186 	}
1187 
1188 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1189 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1190 		puts("Card does not support host controlled partition write "
1191 		     "reliability settings\n");
1192 		return -EMEDIUMTYPE;
1193 	}
1194 
1195 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1196 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1197 		printf("Card already partitioned\n");
1198 		return -EPERM;
1199 	}
1200 
1201 	if (mode == MMC_HWPART_CONF_CHECK)
1202 		return 0;
1203 
1204 	/* Partitioning requires high-capacity size definitions */
1205 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1206 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1207 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1208 
1209 		if (err)
1210 			return err;
1211 
1212 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1213 
1214 		/* update erase group size to be high-capacity */
1215 		mmc->erase_grp_size =
1216 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1217 
1218 	}
1219 
1220 	/* all OK, write the configuration */
1221 	for (i = 0; i < 4; i++) {
1222 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1223 				 EXT_CSD_ENH_START_ADDR+i,
1224 				 (enh_start_addr >> (i*8)) & 0xFF);
1225 		if (err)
1226 			return err;
1227 	}
1228 	for (i = 0; i < 3; i++) {
1229 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1230 				 EXT_CSD_ENH_SIZE_MULT+i,
1231 				 (enh_size_mult >> (i*8)) & 0xFF);
1232 		if (err)
1233 			return err;
1234 	}
1235 	for (pidx = 0; pidx < 4; pidx++) {
1236 		for (i = 0; i < 3; i++) {
1237 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1238 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1239 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1240 			if (err)
1241 				return err;
1242 		}
1243 	}
1244 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1245 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1246 	if (err)
1247 		return err;
1248 
1249 	if (mode == MMC_HWPART_CONF_SET)
1250 		return 0;
1251 
1252 	/* The WR_REL_SET is a write-once register but shall be
1253 	 * written before setting PART_SETTING_COMPLETED. As it is
1254 	 * write-once we can only write it when completing the
1255 	 * partitioning. */
1256 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1257 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1258 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1259 		if (err)
1260 			return err;
1261 	}
1262 
1263 	/* Setting PART_SETTING_COMPLETED confirms the partition
1264 	 * configuration but it only becomes effective after power
1265 	 * cycle, so we do not adjust the partition related settings
1266 	 * in the mmc struct. */
1267 
1268 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1269 			 EXT_CSD_PARTITION_SETTING,
1270 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1271 	if (err)
1272 		return err;
1273 
1274 	return 0;
1275 }
1276 
1277 #if !CONFIG_IS_ENABLED(DM_MMC)
1278 int mmc_getcd(struct mmc *mmc)
1279 {
1280 	int cd;
1281 
1282 	cd = board_mmc_getcd(mmc);
1283 
1284 	if (cd < 0) {
1285 		if (mmc->cfg->ops->getcd)
1286 			cd = mmc->cfg->ops->getcd(mmc);
1287 		else
1288 			cd = 1;
1289 	}
1290 
1291 	return cd;
1292 }
1293 #endif
1294 
1295 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1296 {
1297 	struct mmc_cmd cmd;
1298 	struct mmc_data data;
1299 
1300 	/* Switch the frequency */
1301 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1302 	cmd.resp_type = MMC_RSP_R1;
1303 	cmd.cmdarg = (mode << 31) | 0xffffff;
1304 	cmd.cmdarg &= ~(0xf << (group * 4));
1305 	cmd.cmdarg |= value << (group * 4);
1306 
1307 	data.dest = (char *)resp;
1308 	data.blocksize = 64;
1309 	data.blocks = 1;
1310 	data.flags = MMC_DATA_READ;
1311 
1312 	return mmc_send_cmd(mmc, &cmd, &data);
1313 }
1314 
1315 
1316 static int sd_change_freq(struct mmc *mmc)
1317 {
1318 	int err;
1319 	struct mmc_cmd cmd;
1320 	ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2);
1321 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1322 	struct mmc_data data;
1323 	int timeout;
1324 
1325 	mmc->card_caps = 0;
1326 
1327 	if (mmc_host_is_spi(mmc))
1328 		return 0;
1329 
1330 	/* Read the SCR to find out if this card supports higher speeds */
1331 	cmd.cmdidx = MMC_CMD_APP_CMD;
1332 	cmd.resp_type = MMC_RSP_R1;
1333 	cmd.cmdarg = mmc->rca << 16;
1334 
1335 	err = mmc_send_cmd(mmc, &cmd, NULL);
1336 
1337 	if (err)
1338 		return err;
1339 
1340 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1341 	cmd.resp_type = MMC_RSP_R1;
1342 	cmd.cmdarg = 0;
1343 
1344 	timeout = 3;
1345 
1346 retry_scr:
1347 	data.dest = (char *)scr;
1348 	data.blocksize = 8;
1349 	data.blocks = 1;
1350 	data.flags = MMC_DATA_READ;
1351 
1352 	err = mmc_send_cmd(mmc, &cmd, &data);
1353 
1354 	if (err) {
1355 		if (timeout--)
1356 			goto retry_scr;
1357 
1358 		return err;
1359 	}
1360 
1361 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1362 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1363 
1364 	switch ((mmc->scr[0] >> 24) & 0xf) {
1365 	case 0:
1366 		mmc->version = SD_VERSION_1_0;
1367 		break;
1368 	case 1:
1369 		mmc->version = SD_VERSION_1_10;
1370 		break;
1371 	case 2:
1372 		mmc->version = SD_VERSION_2;
1373 		if ((mmc->scr[0] >> 15) & 0x1)
1374 			mmc->version = SD_VERSION_3;
1375 		break;
1376 	default:
1377 		mmc->version = SD_VERSION_1_0;
1378 		break;
1379 	}
1380 
1381 	if (mmc->scr[0] & SD_DATA_4BIT)
1382 		mmc->card_caps |= MMC_MODE_4BIT;
1383 
1384 	/* Version 1.0 doesn't support switching */
1385 	if (mmc->version == SD_VERSION_1_0)
1386 		return 0;
1387 
1388 	timeout = 4;
1389 	while (timeout--) {
1390 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1391 				(u8 *)switch_status);
1392 
1393 		if (err)
1394 			return err;
1395 
1396 		/* The high-speed function is busy.  Try again */
1397 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1398 			break;
1399 	}
1400 
1401 	/* If high-speed isn't supported, we return */
1402 	if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
1403 		return 0;
1404 
1405 	/*
1406 	 * If the host doesn't support SD_HIGHSPEED, do not switch card to
1407 	 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
1408 	 * This can avoid furthur problem when the card runs in different
1409 	 * mode between the host.
1410 	 */
1411 	if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
1412 		(mmc->cfg->host_caps & MMC_MODE_HS)))
1413 		return 0;
1414 
1415 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1416 
1417 	if (err)
1418 		return err;
1419 
1420 	if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
1421 		mmc->card_caps |= MMC_MODE_HS;
1422 
1423 	return 0;
1424 }
1425 
1426 static int sd_read_ssr(struct mmc *mmc)
1427 {
1428 	int err, i;
1429 	struct mmc_cmd cmd;
1430 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1431 	struct mmc_data data;
1432 	int timeout = 3;
1433 	unsigned int au, eo, et, es;
1434 
1435 	cmd.cmdidx = MMC_CMD_APP_CMD;
1436 	cmd.resp_type = MMC_RSP_R1;
1437 	cmd.cmdarg = mmc->rca << 16;
1438 
1439 	err = mmc_send_cmd(mmc, &cmd, NULL);
1440 	if (err)
1441 		return err;
1442 
1443 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1444 	cmd.resp_type = MMC_RSP_R1;
1445 	cmd.cmdarg = 0;
1446 
1447 retry_ssr:
1448 	data.dest = (char *)ssr;
1449 	data.blocksize = 64;
1450 	data.blocks = 1;
1451 	data.flags = MMC_DATA_READ;
1452 
1453 	err = mmc_send_cmd(mmc, &cmd, &data);
1454 	if (err) {
1455 		if (timeout--)
1456 			goto retry_ssr;
1457 
1458 		return err;
1459 	}
1460 
1461 	for (i = 0; i < 16; i++)
1462 		ssr[i] = be32_to_cpu(ssr[i]);
1463 
1464 	au = (ssr[2] >> 12) & 0xF;
1465 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1466 		mmc->ssr.au = sd_au_size[au];
1467 		es = (ssr[3] >> 24) & 0xFF;
1468 		es |= (ssr[2] & 0xFF) << 8;
1469 		et = (ssr[3] >> 18) & 0x3F;
1470 		if (es && et) {
1471 			eo = (ssr[3] >> 16) & 0x3;
1472 			mmc->ssr.erase_timeout = (et * 1000) / es;
1473 			mmc->ssr.erase_offset = eo * 1000;
1474 		}
1475 	} else {
1476 		debug("Invalid Allocation Unit Size.\n");
1477 	}
1478 
1479 	return 0;
1480 }
1481 
1482 /* frequency bases */
1483 /* divided by 10 to be nice to platforms without floating point */
1484 static const int fbase[] = {
1485 	10000,
1486 	100000,
1487 	1000000,
1488 	10000000,
1489 };
1490 
1491 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1492  * to platforms without floating point.
1493  */
1494 static const u8 multipliers[] = {
1495 	0,	/* reserved */
1496 	10,
1497 	12,
1498 	13,
1499 	15,
1500 	20,
1501 	25,
1502 	30,
1503 	35,
1504 	40,
1505 	45,
1506 	50,
1507 	55,
1508 	60,
1509 	70,
1510 	80,
1511 };
1512 
1513 #if !CONFIG_IS_ENABLED(DM_MMC)
1514 static void mmc_set_ios(struct mmc *mmc)
1515 {
1516 	if (mmc->cfg->ops->set_ios)
1517 		mmc->cfg->ops->set_ios(mmc);
1518 }
1519 
1520 static bool mmc_card_busy(struct mmc *mmc)
1521 {
1522 	if (!mmc->cfg->ops->card_busy)
1523 		return -ENOSYS;
1524 
1525 	return mmc->cfg->ops->card_busy(mmc);
1526 }
1527 
1528 static bool mmc_can_card_busy(struct mmc *)
1529 {
1530 	return !!mmc->cfg->ops->card_busy;
1531 }
1532 #endif
1533 
1534 static int mmc_startup(struct mmc *mmc)
1535 {
1536 	int err, i;
1537 	uint mult, freq, tran_speed;
1538 	u64 cmult, csize, capacity;
1539 	struct mmc_cmd cmd;
1540 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1541 	bool has_parts = false;
1542 	bool part_completed;
1543 	struct blk_desc *bdesc;
1544 
1545 #ifdef CONFIG_MMC_SPI_CRC_ON
1546 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1547 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1548 		cmd.resp_type = MMC_RSP_R1;
1549 		cmd.cmdarg = 1;
1550 		err = mmc_send_cmd(mmc, &cmd, NULL);
1551 
1552 		if (err)
1553 			return err;
1554 	}
1555 #endif
1556 
1557 	/* Put the Card in Identify Mode */
1558 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1559 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1560 	cmd.resp_type = MMC_RSP_R2;
1561 	cmd.cmdarg = 0;
1562 
1563 	err = mmc_send_cmd(mmc, &cmd, NULL);
1564 
1565 	if (err)
1566 		return err;
1567 
1568 	memcpy(mmc->cid, cmd.response, 16);
1569 
1570 	/*
1571 	 * For MMC cards, set the Relative Address.
1572 	 * For SD cards, get the Relatvie Address.
1573 	 * This also puts the cards into Standby State
1574 	 */
1575 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1576 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1577 		cmd.cmdarg = mmc->rca << 16;
1578 		cmd.resp_type = MMC_RSP_R6;
1579 
1580 		err = mmc_send_cmd(mmc, &cmd, NULL);
1581 
1582 		if (err)
1583 			return err;
1584 
1585 		if (IS_SD(mmc))
1586 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1587 	}
1588 
1589 	/* Get the Card-Specific Data */
1590 	cmd.cmdidx = MMC_CMD_SEND_CSD;
1591 	cmd.resp_type = MMC_RSP_R2;
1592 	cmd.cmdarg = mmc->rca << 16;
1593 
1594 	err = mmc_send_cmd(mmc, &cmd, NULL);
1595 
1596 	if (err)
1597 		return err;
1598 
1599 	mmc->csd[0] = cmd.response[0];
1600 	mmc->csd[1] = cmd.response[1];
1601 	mmc->csd[2] = cmd.response[2];
1602 	mmc->csd[3] = cmd.response[3];
1603 
1604 	if (mmc->version == MMC_VERSION_UNKNOWN) {
1605 		int version = (cmd.response[0] >> 26) & 0xf;
1606 
1607 		switch (version) {
1608 		case 0:
1609 			mmc->version = MMC_VERSION_1_2;
1610 			break;
1611 		case 1:
1612 			mmc->version = MMC_VERSION_1_4;
1613 			break;
1614 		case 2:
1615 			mmc->version = MMC_VERSION_2_2;
1616 			break;
1617 		case 3:
1618 			mmc->version = MMC_VERSION_3;
1619 			break;
1620 		case 4:
1621 			mmc->version = MMC_VERSION_4;
1622 			break;
1623 		default:
1624 			mmc->version = MMC_VERSION_1_2;
1625 			break;
1626 		}
1627 	}
1628 
1629 	/* divide frequency by 10, since the mults are 10x bigger */
1630 	freq = fbase[(cmd.response[0] & 0x7)];
1631 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1632 
1633 	tran_speed = freq * mult;
1634 
1635 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1636 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1637 
1638 	if (IS_SD(mmc))
1639 		mmc->write_bl_len = mmc->read_bl_len;
1640 	else
1641 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1642 
1643 	if (mmc->high_capacity) {
1644 		csize = (mmc->csd[1] & 0x3f) << 16
1645 			| (mmc->csd[2] & 0xffff0000) >> 16;
1646 		cmult = 8;
1647 	} else {
1648 		csize = (mmc->csd[1] & 0x3ff) << 2
1649 			| (mmc->csd[2] & 0xc0000000) >> 30;
1650 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
1651 	}
1652 
1653 	mmc->capacity_user = (csize + 1) << (cmult + 2);
1654 	mmc->capacity_user *= mmc->read_bl_len;
1655 	mmc->capacity_boot = 0;
1656 	mmc->capacity_rpmb = 0;
1657 	for (i = 0; i < 4; i++)
1658 		mmc->capacity_gp[i] = 0;
1659 
1660 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1661 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1662 
1663 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1664 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1665 
1666 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1667 		cmd.cmdidx = MMC_CMD_SET_DSR;
1668 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1669 		cmd.resp_type = MMC_RSP_NONE;
1670 		if (mmc_send_cmd(mmc, &cmd, NULL))
1671 			printf("MMC: SET_DSR failed\n");
1672 	}
1673 
1674 	/* Select the card, and put it into Transfer Mode */
1675 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1676 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
1677 		cmd.resp_type = MMC_RSP_R1;
1678 		cmd.cmdarg = mmc->rca << 16;
1679 		err = mmc_send_cmd(mmc, &cmd, NULL);
1680 
1681 		if (err)
1682 			return err;
1683 	}
1684 
1685 	/*
1686 	 * For SD, its erase group is always one sector
1687 	 */
1688 	mmc->erase_grp_size = 1;
1689 	mmc->part_config = MMCPART_NOAVAILABLE;
1690 	if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) {
1691 		/* check  ext_csd version and capacity */
1692 		err = mmc_send_ext_csd(mmc, ext_csd);
1693 		if (err)
1694 			return err;
1695 		if (ext_csd[EXT_CSD_REV] >= 2) {
1696 			/*
1697 			 * According to the JEDEC Standard, the value of
1698 			 * ext_csd's capacity is valid if the value is more
1699 			 * than 2GB
1700 			 */
1701 			capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1702 					| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1703 					| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1704 					| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1705 			capacity *= MMC_MAX_BLOCK_LEN;
1706 			if ((capacity >> 20) > 2 * 1024)
1707 				mmc->capacity_user = capacity;
1708 		}
1709 
1710 		switch (ext_csd[EXT_CSD_REV]) {
1711 		case 1:
1712 			mmc->version = MMC_VERSION_4_1;
1713 			break;
1714 		case 2:
1715 			mmc->version = MMC_VERSION_4_2;
1716 			break;
1717 		case 3:
1718 			mmc->version = MMC_VERSION_4_3;
1719 			break;
1720 		case 5:
1721 			mmc->version = MMC_VERSION_4_41;
1722 			break;
1723 		case 6:
1724 			mmc->version = MMC_VERSION_4_5;
1725 			break;
1726 		case 7:
1727 			mmc->version = MMC_VERSION_5_0;
1728 			break;
1729 		case 8:
1730 			mmc->version = MMC_VERSION_5_1;
1731 			break;
1732 		}
1733 
1734 		/* The partition data may be non-zero but it is only
1735 		 * effective if PARTITION_SETTING_COMPLETED is set in
1736 		 * EXT_CSD, so ignore any data if this bit is not set,
1737 		 * except for enabling the high-capacity group size
1738 		 * definition (see below). */
1739 		part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1740 				    EXT_CSD_PARTITION_SETTING_COMPLETED);
1741 
1742 		/* store the partition info of emmc */
1743 		mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1744 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1745 		    ext_csd[EXT_CSD_BOOT_MULT])
1746 			mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1747 		if (part_completed &&
1748 		    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1749 			mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1750 		if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN)
1751 			mmc->esr.mmc_can_trim = 1;
1752 
1753 		mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1754 
1755 		mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1756 
1757 		for (i = 0; i < 4; i++) {
1758 			int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1759 			uint mult = (ext_csd[idx + 2] << 16) +
1760 				(ext_csd[idx + 1] << 8) + ext_csd[idx];
1761 			if (mult)
1762 				has_parts = true;
1763 			if (!part_completed)
1764 				continue;
1765 			mmc->capacity_gp[i] = mult;
1766 			mmc->capacity_gp[i] *=
1767 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1768 			mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1769 			mmc->capacity_gp[i] <<= 19;
1770 		}
1771 
1772 		if (part_completed) {
1773 			mmc->enh_user_size =
1774 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) +
1775 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) +
1776 				ext_csd[EXT_CSD_ENH_SIZE_MULT];
1777 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1778 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1779 			mmc->enh_user_size <<= 19;
1780 			mmc->enh_user_start =
1781 				(ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) +
1782 				(ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) +
1783 				(ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) +
1784 				ext_csd[EXT_CSD_ENH_START_ADDR];
1785 			if (mmc->high_capacity)
1786 				mmc->enh_user_start <<= 9;
1787 		}
1788 
1789 		/*
1790 		 * Host needs to enable ERASE_GRP_DEF bit if device is
1791 		 * partitioned. This bit will be lost every time after a reset
1792 		 * or power off. This will affect erase size.
1793 		 */
1794 		if (part_completed)
1795 			has_parts = true;
1796 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1797 		    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1798 			has_parts = true;
1799 		if (has_parts) {
1800 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1801 				EXT_CSD_ERASE_GROUP_DEF, 1);
1802 
1803 			if (err)
1804 				return err;
1805 			else
1806 				ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1807 		}
1808 
1809 		if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1810 			/* Read out group size from ext_csd */
1811 			mmc->erase_grp_size =
1812 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1813 			/*
1814 			 * if high capacity and partition setting completed
1815 			 * SEC_COUNT is valid even if it is smaller than 2 GiB
1816 			 * JEDEC Standard JESD84-B45, 6.2.4
1817 			 */
1818 			if (mmc->high_capacity && part_completed) {
1819 				capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1820 					(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1821 					(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1822 					(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1823 				capacity *= MMC_MAX_BLOCK_LEN;
1824 				mmc->capacity_user = capacity;
1825 			}
1826 		} else {
1827 			/* Calculate the group size from the csd value. */
1828 			int erase_gsz, erase_gmul;
1829 			erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1830 			erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1831 			mmc->erase_grp_size = (erase_gsz + 1)
1832 				* (erase_gmul + 1);
1833 		}
1834 
1835 		mmc->hc_wp_grp_size = 1024
1836 			* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1837 			* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1838 
1839 		mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1840 	}
1841 
1842 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1843 	if (err)
1844 		return err;
1845 
1846 	if (IS_SD(mmc))
1847 		err = sd_change_freq(mmc);
1848 	else
1849 		err = mmc_change_freq(mmc);
1850 
1851 	if (err)
1852 		return err;
1853 
1854 	/* Restrict card's capabilities by what the host can do */
1855 	mmc->card_caps &= mmc->cfg->host_caps;
1856 
1857 	if (IS_SD(mmc)) {
1858 		if (mmc->card_caps & MMC_MODE_4BIT) {
1859 			cmd.cmdidx = MMC_CMD_APP_CMD;
1860 			cmd.resp_type = MMC_RSP_R1;
1861 			cmd.cmdarg = mmc->rca << 16;
1862 
1863 			err = mmc_send_cmd(mmc, &cmd, NULL);
1864 			if (err)
1865 				return err;
1866 
1867 			cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1868 			cmd.resp_type = MMC_RSP_R1;
1869 			cmd.cmdarg = 2;
1870 			err = mmc_send_cmd(mmc, &cmd, NULL);
1871 			if (err)
1872 				return err;
1873 
1874 			mmc_set_bus_width(mmc, 4);
1875 		}
1876 
1877 		err = sd_read_ssr(mmc);
1878 		if (err)
1879 			return err;
1880 
1881 		if (mmc->card_caps & MMC_MODE_HS)
1882 			tran_speed = 50000000;
1883 		else
1884 			tran_speed = 25000000;
1885 
1886 		mmc_set_clock(mmc, tran_speed);
1887 	}
1888 
1889 	/* Fix the block length for DDR mode */
1890 	if (mmc_card_ddr(mmc)) {
1891 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1892 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1893 	}
1894 
1895 	/* fill in device description */
1896 	bdesc = mmc_get_blk_desc(mmc);
1897 	bdesc->lun = 0;
1898 	bdesc->hwpart = 0;
1899 	bdesc->type = 0;
1900 	bdesc->blksz = mmc->read_bl_len;
1901 	bdesc->log2blksz = LOG2(bdesc->blksz);
1902 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1903 #if !defined(CONFIG_SPL_BUILD) || \
1904 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
1905 		!defined(CONFIG_USE_TINY_PRINTF))
1906 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
1907 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
1908 		(mmc->cid[3] >> 16) & 0xffff);
1909 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
1910 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
1911 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
1912 		(mmc->cid[2] >> 24) & 0xff);
1913 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
1914 		(mmc->cid[2] >> 16) & 0xf);
1915 #else
1916 	bdesc->vendor[0] = 0;
1917 	bdesc->product[0] = 0;
1918 	bdesc->revision[0] = 0;
1919 #endif
1920 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
1921 	part_init(bdesc);
1922 #endif
1923 
1924 	return 0;
1925 }
1926 
1927 static int mmc_send_if_cond(struct mmc *mmc)
1928 {
1929 	struct mmc_cmd cmd;
1930 	int err;
1931 
1932 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
1933 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
1934 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
1935 	cmd.resp_type = MMC_RSP_R7;
1936 
1937 	err = mmc_send_cmd(mmc, &cmd, NULL);
1938 
1939 	if (err)
1940 		return err;
1941 
1942 	if ((cmd.response[0] & 0xff) != 0xaa)
1943 		return -EOPNOTSUPP;
1944 	else
1945 		mmc->version = SD_VERSION_2;
1946 
1947 	return 0;
1948 }
1949 
1950 #if !CONFIG_IS_ENABLED(DM_MMC)
1951 /* board-specific MMC power initializations. */
1952 __weak void board_mmc_power_init(void)
1953 {
1954 }
1955 #endif
1956 
1957 static int mmc_power_init(struct mmc *mmc)
1958 {
1959 #if CONFIG_IS_ENABLED(DM_MMC)
1960 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD)
1961 	struct udevice *vmmc_supply;
1962 	int ret;
1963 
1964 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
1965 					  &vmmc_supply);
1966 	if (ret) {
1967 		debug("%s: No vmmc supply\n", mmc->dev->name);
1968 		return 0;
1969 	}
1970 
1971 	ret = regulator_set_enable(vmmc_supply, true);
1972 	if (ret) {
1973 		puts("Error enabling VMMC supply\n");
1974 		return ret;
1975 	}
1976 #endif
1977 #else /* !CONFIG_DM_MMC */
1978 	/*
1979 	 * Driver model should use a regulator, as above, rather than calling
1980 	 * out to board code.
1981 	 */
1982 	board_mmc_power_init();
1983 #endif
1984 	return 0;
1985 }
1986 
1987 int mmc_start_init(struct mmc *mmc)
1988 {
1989 	bool no_card;
1990 	int err;
1991 
1992 	/* we pretend there's no card when init is NULL */
1993 	no_card = mmc_getcd(mmc) == 0;
1994 #if !CONFIG_IS_ENABLED(DM_MMC)
1995 	no_card = no_card || (mmc->cfg->ops->init == NULL);
1996 #endif
1997 	if (no_card) {
1998 		mmc->has_init = 0;
1999 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2000 		printf("MMC: no card present\n");
2001 #endif
2002 		return -ENOMEDIUM;
2003 	}
2004 
2005 	if (mmc->has_init)
2006 		return 0;
2007 
2008 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2009 	mmc_adapter_card_type_ident();
2010 #endif
2011 	err = mmc_power_init(mmc);
2012 	if (err)
2013 		return err;
2014 
2015 #if CONFIG_IS_ENABLED(DM_MMC)
2016 	/* The device has already been probed ready for use */
2017 #else
2018 	/* made sure it's not NULL earlier */
2019 	err = mmc->cfg->ops->init(mmc);
2020 	if (err)
2021 		return err;
2022 #endif
2023 	mmc_set_bus_width(mmc, 1);
2024 	mmc_set_clock(mmc, 1);
2025 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2026 
2027 	/* Reset the Card */
2028 	err = mmc_go_idle(mmc);
2029 
2030 	if (err)
2031 		return err;
2032 
2033 	/* The internal partition reset to user partition(0) at every CMD0*/
2034 	mmc_get_blk_desc(mmc)->hwpart = 0;
2035 
2036 	/* Test for SD version 2 */
2037 	err = mmc_send_if_cond(mmc);
2038 
2039 	/* Now try to get the SD card's operating condition */
2040 	err = sd_send_op_cond(mmc);
2041 
2042 	/* If the command timed out, we check for an MMC card */
2043 	if (err == -ETIMEDOUT) {
2044 		err = mmc_send_op_cond(mmc);
2045 
2046 		if (err) {
2047 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2048 			printf("Card did not respond to voltage select!\n");
2049 #endif
2050 			return -EOPNOTSUPP;
2051 		}
2052 	}
2053 
2054 	if (!err)
2055 		mmc->init_in_progress = 1;
2056 
2057 	return err;
2058 }
2059 
2060 static int mmc_complete_init(struct mmc *mmc)
2061 {
2062 	int err = 0;
2063 
2064 	mmc->init_in_progress = 0;
2065 	if (mmc->op_cond_pending)
2066 		err = mmc_complete_op_cond(mmc);
2067 
2068 	if (!err)
2069 		err = mmc_startup(mmc);
2070 	if (err)
2071 		mmc->has_init = 0;
2072 	else
2073 		mmc->has_init = 1;
2074 	return err;
2075 }
2076 
2077 int mmc_init(struct mmc *mmc)
2078 {
2079 	int err = 0;
2080 	__maybe_unused unsigned start;
2081 #if CONFIG_IS_ENABLED(DM_MMC)
2082 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2083 
2084 	upriv->mmc = mmc;
2085 #endif
2086 	if (mmc->has_init)
2087 		return 0;
2088 
2089 	start = get_timer(0);
2090 
2091 	if (!mmc->init_in_progress)
2092 		err = mmc_start_init(mmc);
2093 
2094 	if (!err)
2095 		err = mmc_complete_init(mmc);
2096 	if (err)
2097 		printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2098 
2099 	return err;
2100 }
2101 
2102 int mmc_set_dsr(struct mmc *mmc, u16 val)
2103 {
2104 	mmc->dsr = val;
2105 	return 0;
2106 }
2107 
2108 /* CPU-specific MMC initializations */
2109 __weak int cpu_mmc_init(bd_t *bis)
2110 {
2111 	return -1;
2112 }
2113 
2114 /* board-specific MMC initializations. */
2115 __weak int board_mmc_init(bd_t *bis)
2116 {
2117 	return -1;
2118 }
2119 
2120 void mmc_set_preinit(struct mmc *mmc, int preinit)
2121 {
2122 	mmc->preinit = preinit;
2123 }
2124 
2125 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2126 static int mmc_probe(bd_t *bis)
2127 {
2128 	return 0;
2129 }
2130 #elif CONFIG_IS_ENABLED(DM_MMC)
2131 static int mmc_probe(bd_t *bis)
2132 {
2133 	int ret, i;
2134 	struct uclass *uc;
2135 	struct udevice *dev;
2136 
2137 	ret = uclass_get(UCLASS_MMC, &uc);
2138 	if (ret)
2139 		return ret;
2140 
2141 	/*
2142 	 * Try to add them in sequence order. Really with driver model we
2143 	 * should allow holes, but the current MMC list does not allow that.
2144 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2145 	 */
2146 	for (i = 0; ; i++) {
2147 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2148 		if (ret == -ENODEV)
2149 			break;
2150 	}
2151 	uclass_foreach_dev(dev, uc) {
2152 		ret = device_probe(dev);
2153 		if (ret)
2154 			printf("%s - probe failed: %d\n", dev->name, ret);
2155 	}
2156 
2157 	return 0;
2158 }
2159 #else
2160 static int mmc_probe(bd_t *bis)
2161 {
2162 	if (board_mmc_init(bis) < 0)
2163 		cpu_mmc_init(bis);
2164 
2165 	return 0;
2166 }
2167 #endif
2168 
2169 int mmc_initialize(bd_t *bis)
2170 {
2171 	static int initialized = 0;
2172 	int ret;
2173 	if (initialized)	/* Avoid initializing mmc multiple times */
2174 		return 0;
2175 	initialized = 1;
2176 
2177 #if !CONFIG_IS_ENABLED(BLK)
2178 #if !CONFIG_IS_ENABLED(MMC_TINY)
2179 	mmc_list_init();
2180 #endif
2181 #endif
2182 	ret = mmc_probe(bis);
2183 	if (ret)
2184 		return ret;
2185 
2186 #ifndef CONFIG_SPL_BUILD
2187 	print_mmc_devices(',');
2188 #endif
2189 
2190 	mmc_do_preinit();
2191 	return 0;
2192 }
2193 
2194 #ifdef CONFIG_CMD_BKOPS_ENABLE
2195 int mmc_set_bkops_enable(struct mmc *mmc)
2196 {
2197 	int err;
2198 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2199 
2200 	err = mmc_send_ext_csd(mmc, ext_csd);
2201 	if (err) {
2202 		puts("Could not get ext_csd register values\n");
2203 		return err;
2204 	}
2205 
2206 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2207 		puts("Background operations not supported on device\n");
2208 		return -EMEDIUMTYPE;
2209 	}
2210 
2211 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2212 		puts("Background operations already enabled\n");
2213 		return 0;
2214 	}
2215 
2216 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2217 	if (err) {
2218 		puts("Failed to enable manual background operations\n");
2219 		return err;
2220 	}
2221 
2222 	puts("Enabled manual background operations\n");
2223 
2224 	return 0;
2225 }
2226 #endif
2227