xref: /rk3399_rockchip-uboot/drivers/mmc/mmc.c (revision 514e00a960f8a815e0c86931b498063c6fc4ef76)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static const unsigned int sd_au_size[] = {
26 	0,		SZ_16K / 512,		SZ_32K / 512,
27 	SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
28 	SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
29 	SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
30 	SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,	SZ_64M / 512,
31 };
32 
33 static char mmc_ext_csd[512];
34 
35 #if CONFIG_IS_ENABLED(MMC_TINY)
36 static struct mmc mmc_static;
37 struct mmc *find_mmc_device(int dev_num)
38 {
39 	return &mmc_static;
40 }
41 
42 void mmc_do_preinit(void)
43 {
44 	struct mmc *m = &mmc_static;
45 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
46 	mmc_set_preinit(m, 1);
47 #endif
48 	if (m->preinit)
49 		mmc_start_init(m);
50 }
51 
52 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
53 {
54 	return &mmc->block_dev;
55 }
56 #endif
57 
58 #if !CONFIG_IS_ENABLED(DM_MMC)
59 __weak int board_mmc_getwp(struct mmc *mmc)
60 {
61 	return -1;
62 }
63 
64 int mmc_getwp(struct mmc *mmc)
65 {
66 	int wp;
67 
68 	wp = board_mmc_getwp(mmc);
69 
70 	if (wp < 0) {
71 		if (mmc->cfg->ops->getwp)
72 			wp = mmc->cfg->ops->getwp(mmc);
73 		else
74 			wp = 0;
75 	}
76 
77 	return wp;
78 }
79 
80 __weak int board_mmc_getcd(struct mmc *mmc)
81 {
82 	return -1;
83 }
84 #endif
85 
86 #ifdef CONFIG_MMC_TRACE
87 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
88 {
89 	printf("CMD_SEND:%d\n", cmd->cmdidx);
90 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
91 }
92 
93 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
94 {
95 	int i;
96 	u8 *ptr;
97 
98 	if (ret) {
99 		printf("\t\tRET\t\t\t %d\n", ret);
100 	} else {
101 		switch (cmd->resp_type) {
102 		case MMC_RSP_NONE:
103 			printf("\t\tMMC_RSP_NONE\n");
104 			break;
105 		case MMC_RSP_R1:
106 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
107 				cmd->response[0]);
108 			break;
109 		case MMC_RSP_R1b:
110 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
111 				cmd->response[0]);
112 			break;
113 		case MMC_RSP_R2:
114 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
115 				cmd->response[0]);
116 			printf("\t\t          \t\t 0x%08X \n",
117 				cmd->response[1]);
118 			printf("\t\t          \t\t 0x%08X \n",
119 				cmd->response[2]);
120 			printf("\t\t          \t\t 0x%08X \n",
121 				cmd->response[3]);
122 			printf("\n");
123 			printf("\t\t\t\t\tDUMPING DATA\n");
124 			for (i = 0; i < 4; i++) {
125 				int j;
126 				printf("\t\t\t\t\t%03d - ", i*4);
127 				ptr = (u8 *)&cmd->response[i];
128 				ptr += 3;
129 				for (j = 0; j < 4; j++)
130 					printf("%02X ", *ptr--);
131 				printf("\n");
132 			}
133 			break;
134 		case MMC_RSP_R3:
135 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
136 				cmd->response[0]);
137 			break;
138 		default:
139 			printf("\t\tERROR MMC rsp not supported\n");
140 			break;
141 		}
142 	}
143 }
144 
145 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
146 {
147 	int status;
148 
149 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
150 	printf("CURR STATE:%d\n", status);
151 }
152 #endif
153 
154 #if !CONFIG_IS_ENABLED(DM_MMC)
155 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
156 {
157 	int ret;
158 
159 	mmmc_trace_before_send(mmc, cmd);
160 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
161 	mmmc_trace_after_send(mmc, cmd, ret);
162 
163 	return ret;
164 }
165 #endif
166 
167 int mmc_send_status(struct mmc *mmc, int timeout)
168 {
169 	struct mmc_cmd cmd;
170 	int err, retries = 5;
171 
172 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
173 	cmd.resp_type = MMC_RSP_R1;
174 	if (!mmc_host_is_spi(mmc))
175 		cmd.cmdarg = mmc->rca << 16;
176 
177 	while (1) {
178 		err = mmc_send_cmd(mmc, &cmd, NULL);
179 		if (!err) {
180 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
181 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
182 			     MMC_STATE_PRG)
183 				break;
184 			else if (cmd.response[0] & MMC_STATUS_MASK) {
185 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
186 				printf("Status Error: 0x%08X\n",
187 					cmd.response[0]);
188 #endif
189 				return -ECOMM;
190 			}
191 		} else if (--retries < 0)
192 			return err;
193 
194 		if (timeout-- <= 0)
195 			break;
196 
197 		udelay(1000);
198 	}
199 
200 	mmc_trace_state(mmc, &cmd);
201 	if (timeout <= 0) {
202 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
203 		printf("Timeout waiting card ready\n");
204 #endif
205 		return -ETIMEDOUT;
206 	}
207 
208 	return 0;
209 }
210 
211 int mmc_set_blocklen(struct mmc *mmc, int len)
212 {
213 	struct mmc_cmd cmd;
214 
215 	if (mmc_card_ddr(mmc))
216 		return 0;
217 
218 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
219 	cmd.resp_type = MMC_RSP_R1;
220 	cmd.cmdarg = len;
221 
222 	return mmc_send_cmd(mmc, &cmd, NULL);
223 }
224 
225 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
226 			   lbaint_t blkcnt)
227 {
228 	struct mmc_cmd cmd;
229 	struct mmc_data data;
230 
231 	if (blkcnt > 1)
232 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
233 	else
234 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
235 
236 	if (mmc->high_capacity)
237 		cmd.cmdarg = start;
238 	else
239 		cmd.cmdarg = start * mmc->read_bl_len;
240 
241 	cmd.resp_type = MMC_RSP_R1;
242 
243 	data.dest = dst;
244 	data.blocks = blkcnt;
245 	data.blocksize = mmc->read_bl_len;
246 	data.flags = MMC_DATA_READ;
247 
248 	if (mmc_send_cmd(mmc, &cmd, &data))
249 		return 0;
250 
251 	if (blkcnt > 1) {
252 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
253 		cmd.cmdarg = 0;
254 		cmd.resp_type = MMC_RSP_R1b;
255 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
256 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
257 			printf("mmc fail to send stop cmd\n");
258 #endif
259 			return 0;
260 		}
261 	}
262 
263 	return blkcnt;
264 }
265 
266 #ifdef CONFIG_SPL_BLK_READ_PREPARE
267 static int mmc_read_blocks_prepare(struct mmc *mmc, void *dst, lbaint_t start,
268 				   lbaint_t blkcnt)
269 {
270 	struct mmc_cmd cmd;
271 	struct mmc_data data;
272 
273 	if (blkcnt > 1)
274 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
275 	else
276 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
277 
278 	if (mmc->high_capacity)
279 		cmd.cmdarg = start;
280 	else
281 		cmd.cmdarg = start * mmc->read_bl_len;
282 
283 	cmd.resp_type = MMC_RSP_R1;
284 
285 	data.dest = dst;
286 	data.blocks = blkcnt;
287 	data.blocksize = mmc->read_bl_len;
288 	data.flags = MMC_DATA_READ;
289 
290 	if (mmc_send_cmd_prepare(mmc, &cmd, &data))
291 		return 0;
292 
293 	return blkcnt;
294 }
295 #endif
296 
297 #ifdef CONFIG_SPL_BLK_READ_PREPARE
298 #if CONFIG_IS_ENABLED(BLK)
299 ulong mmc_bread_prepare(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
300 #else
301 ulong mmc_bread_prepare(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
302 			void *dst)
303 #endif
304 {
305 #if CONFIG_IS_ENABLED(BLK)
306 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
307 #endif
308 	int dev_num = block_dev->devnum;
309 	int timeout = 0;
310 	int err;
311 
312 	if (blkcnt == 0)
313 		return 0;
314 
315 	struct mmc *mmc = find_mmc_device(dev_num);
316 
317 	if (!mmc)
318 		return 0;
319 
320 	if (CONFIG_IS_ENABLED(MMC_TINY))
321 		err = mmc_switch_part(mmc, block_dev->hwpart);
322 	else
323 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
324 
325 	if (err < 0)
326 		return 0;
327 
328 	if ((start + blkcnt) > block_dev->lba) {
329 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
330 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
331 		       start + blkcnt, block_dev->lba);
332 #endif
333 		return 0;
334 	}
335 
336 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
337 		debug("%s: Failed to set blocklen\n", __func__);
338 		return 0;
339 	}
340 
341 	if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) {
342 		debug("%s: Failed to read blocks\n", __func__);
343 re_init_retry:
344 		timeout++;
345 		/*
346 		 * Try re-init seven times.
347 		 */
348 		if (timeout > 7) {
349 			printf("Re-init retry timeout\n");
350 			return 0;
351 		}
352 
353 		mmc->has_init = 0;
354 		if (mmc_init(mmc))
355 			return 0;
356 
357 		if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) {
358 			printf("%s: Re-init mmc_read_blocks_prepare error\n",
359 			       __func__);
360 			goto re_init_retry;
361 		}
362 	}
363 
364 	return blkcnt;
365 }
366 #endif
367 
368 #if CONFIG_IS_ENABLED(BLK)
369 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
370 #else
371 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
372 		void *dst)
373 #endif
374 {
375 #if CONFIG_IS_ENABLED(BLK)
376 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
377 #endif
378 	int dev_num = block_dev->devnum;
379 	int err;
380 	lbaint_t cur, blocks_todo = blkcnt;
381 
382 #ifdef CONFIG_SPL_BLK_READ_PREPARE
383 	if (block_dev->op_flag == BLK_PRE_RW)
384 #if CONFIG_IS_ENABLED(BLK)
385 		return mmc_bread_prepare(dev, start, blkcnt, dst);
386 #else
387 		return mmc_bread_prepare(block_dev, start, blkcnt, dst);
388 #endif
389 #endif
390 	if (blkcnt == 0)
391 		return 0;
392 
393 	struct mmc *mmc = find_mmc_device(dev_num);
394 	if (!mmc)
395 		return 0;
396 
397 	if (CONFIG_IS_ENABLED(MMC_TINY))
398 		err = mmc_switch_part(mmc, block_dev->hwpart);
399 	else
400 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
401 
402 	if (err < 0)
403 		return 0;
404 
405 	if ((start + blkcnt) > block_dev->lba) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
408 			start + blkcnt, block_dev->lba);
409 #endif
410 		return 0;
411 	}
412 
413 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
414 		debug("%s: Failed to set blocklen\n", __func__);
415 		return 0;
416 	}
417 
418 	do {
419 		cur = (blocks_todo > mmc->cfg->b_max) ?
420 			mmc->cfg->b_max : blocks_todo;
421 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
422 			debug("%s: Failed to read blocks\n", __func__);
423 			int timeout = 0;
424 re_init_retry:
425 			timeout++;
426 			/*
427 			 * Try re-init seven times.
428 			 */
429 			if (timeout > 7) {
430 				printf("Re-init retry timeout\n");
431 				return 0;
432 			}
433 
434 			mmc->has_init = 0;
435 			if (mmc_init(mmc))
436 				return 0;
437 
438 			if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
439 				printf("%s: Re-init mmc_read_blocks error\n",
440 				       __func__);
441 				goto re_init_retry;
442 			}
443 		}
444 		blocks_todo -= cur;
445 		start += cur;
446 		dst += cur * mmc->read_bl_len;
447 	} while (blocks_todo > 0);
448 
449 	return blkcnt;
450 }
451 
452 void mmc_set_clock(struct mmc *mmc, uint clock)
453 {
454 	if (clock > mmc->cfg->f_max)
455 		clock = mmc->cfg->f_max;
456 
457 	if (clock < mmc->cfg->f_min)
458 		clock = mmc->cfg->f_min;
459 
460 	mmc->clock = clock;
461 
462 	mmc_set_ios(mmc);
463 }
464 
465 static void mmc_set_bus_width(struct mmc *mmc, uint width)
466 {
467 	mmc->bus_width = width;
468 
469 	mmc_set_ios(mmc);
470 }
471 
472 static void mmc_set_timing(struct mmc *mmc, uint timing)
473 {
474 	mmc->timing = timing;
475 	mmc_set_ios(mmc);
476 }
477 
478 static int mmc_go_idle(struct mmc *mmc)
479 {
480 	struct mmc_cmd cmd;
481 	int err;
482 
483 	udelay(1000);
484 
485 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
486 	cmd.cmdarg = 0;
487 	cmd.resp_type = MMC_RSP_NONE;
488 
489 	err = mmc_send_cmd(mmc, &cmd, NULL);
490 
491 	if (err)
492 		return err;
493 
494 	udelay(2000);
495 
496 	return 0;
497 }
498 
499 #ifndef CONFIG_MMC_USE_PRE_CONFIG
500 static int sd_send_op_cond(struct mmc *mmc)
501 {
502 	int timeout = 1000;
503 	int err;
504 	struct mmc_cmd cmd;
505 
506 	while (1) {
507 		cmd.cmdidx = MMC_CMD_APP_CMD;
508 		cmd.resp_type = MMC_RSP_R1;
509 		cmd.cmdarg = 0;
510 
511 		err = mmc_send_cmd(mmc, &cmd, NULL);
512 
513 		if (err)
514 			return err;
515 
516 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
517 		cmd.resp_type = MMC_RSP_R3;
518 
519 		/*
520 		 * Most cards do not answer if some reserved bits
521 		 * in the ocr are set. However, Some controller
522 		 * can set bit 7 (reserved for low voltages), but
523 		 * how to manage low voltages SD card is not yet
524 		 * specified.
525 		 */
526 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
527 			(mmc->cfg->voltages & 0xff8000);
528 
529 		if (mmc->version == SD_VERSION_2)
530 			cmd.cmdarg |= OCR_HCS;
531 
532 		err = mmc_send_cmd(mmc, &cmd, NULL);
533 
534 		if (err)
535 			return err;
536 
537 		if (cmd.response[0] & OCR_BUSY)
538 			break;
539 
540 		if (timeout-- <= 0)
541 			return -EOPNOTSUPP;
542 
543 		udelay(1000);
544 	}
545 
546 	if (mmc->version != SD_VERSION_2)
547 		mmc->version = SD_VERSION_1_0;
548 
549 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
550 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
551 		cmd.resp_type = MMC_RSP_R3;
552 		cmd.cmdarg = 0;
553 
554 		err = mmc_send_cmd(mmc, &cmd, NULL);
555 
556 		if (err)
557 			return err;
558 	}
559 
560 	mmc->ocr = cmd.response[0];
561 
562 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
563 	mmc->rca = 0;
564 
565 	return 0;
566 }
567 #endif
568 
569 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
570 {
571 	struct mmc_cmd cmd;
572 	int err;
573 
574 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
575 	cmd.resp_type = MMC_RSP_R3;
576 	cmd.cmdarg = 0;
577 	if (use_arg && !mmc_host_is_spi(mmc))
578 		cmd.cmdarg = OCR_HCS |
579 			(mmc->cfg->voltages &
580 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
581 			(mmc->ocr & OCR_ACCESS_MODE);
582 
583 	err = mmc_send_cmd(mmc, &cmd, NULL);
584 	if (err)
585 		return err;
586 	mmc->ocr = cmd.response[0];
587 	return 0;
588 }
589 
590 #ifndef CONFIG_MMC_USE_PRE_CONFIG
591 static int mmc_send_op_cond(struct mmc *mmc)
592 {
593 	int err, i;
594 
595 	/* Some cards seem to need this */
596 	mmc_go_idle(mmc);
597 
598  	/* Asking to the card its capabilities */
599 	for (i = 0; i < 2; i++) {
600 		err = mmc_send_op_cond_iter(mmc, i != 0);
601 		if (err)
602 			return err;
603 
604 		/* exit if not busy (flag seems to be inverted) */
605 		if (mmc->ocr & OCR_BUSY)
606 			break;
607 	}
608 	mmc->op_cond_pending = 1;
609 	return 0;
610 }
611 #endif
612 static int mmc_complete_op_cond(struct mmc *mmc)
613 {
614 	struct mmc_cmd cmd;
615 	int timeout = 1000;
616 	uint start;
617 	int err;
618 
619 	mmc->op_cond_pending = 0;
620 	if (!(mmc->ocr & OCR_BUSY)) {
621 		/* Some cards seem to need this */
622 		mmc_go_idle(mmc);
623 
624 		start = get_timer(0);
625 		while (1) {
626 			err = mmc_send_op_cond_iter(mmc, 1);
627 			if (err)
628 				return err;
629 			if (mmc->ocr & OCR_BUSY)
630 				break;
631 			if (get_timer(start) > timeout)
632 				return -EOPNOTSUPP;
633 			udelay(100);
634 		}
635 	}
636 
637 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
638 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
639 		cmd.resp_type = MMC_RSP_R3;
640 		cmd.cmdarg = 0;
641 
642 		err = mmc_send_cmd(mmc, &cmd, NULL);
643 
644 		if (err)
645 			return err;
646 
647 		mmc->ocr = cmd.response[0];
648 	}
649 
650 	mmc->version = MMC_VERSION_UNKNOWN;
651 
652 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
653 	mmc->rca = 1;
654 
655 	return 0;
656 }
657 
658 
659 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
660 {
661 	struct mmc_cmd cmd;
662 	struct mmc_data data;
663 	int err;
664 
665 #ifdef CONFIG_MMC_USE_PRE_CONFIG
666 	static int initialized;
667 	if (initialized) {
668 		memcpy(ext_csd, mmc_ext_csd, 512);
669 		return 0;
670 	}
671 
672 	initialized = 1;
673 #endif
674 	/* Get the Card Status Register */
675 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
676 	cmd.resp_type = MMC_RSP_R1;
677 	cmd.cmdarg = 0;
678 
679 	data.dest = (char *)ext_csd;
680 	data.blocks = 1;
681 	data.blocksize = MMC_MAX_BLOCK_LEN;
682 	data.flags = MMC_DATA_READ;
683 
684 	err = mmc_send_cmd(mmc, &cmd, &data);
685 	memcpy(mmc_ext_csd, ext_csd, 512);
686 #if defined(CONFIG_MMC_USE_PRE_CONFIG) && defined(CONFIG_SPL_BUILD)
687 	char *mmc_ecsd_base = NULL;
688 	ulong mmc_ecsd;
689 
690 	mmc_ecsd = dev_read_u32_default(mmc->dev, "mmc-ecsd", 0);
691 	mmc_ecsd_base = (char *)mmc_ecsd;
692 	if (mmc_ecsd_base) {
693 		memcpy(mmc_ecsd_base, ext_csd, 512);
694 		*(unsigned int *)(mmc_ecsd_base + 512) = 0x55aa55aa;
695 	}
696 #endif
697 	return err;
698 }
699 
700 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status)
701 {
702 	struct mmc_cmd cmd;
703 	u8 busy = true;
704 	uint start;
705 	int ret;
706 	int timeout = 1000;
707 
708 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
709 	cmd.resp_type = MMC_RSP_R1;
710 	cmd.cmdarg = mmc->rca << 16;
711 
712 	start = get_timer(0);
713 
714 	if (!send_status && !mmc_can_card_busy(mmc)) {
715 		mdelay(timeout);
716 		return 0;
717 	}
718 
719 	do {
720 		if (!send_status) {
721 			busy = mmc_card_busy(mmc);
722 		} else {
723 			ret = mmc_send_cmd(mmc, &cmd, NULL);
724 
725 			if (ret)
726 				return ret;
727 
728 			if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
729 				return -EBADMSG;
730 			busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) ==
731 				MMC_STATE_PRG;
732 		}
733 
734 		if (get_timer(start) > timeout && busy)
735 			return -ETIMEDOUT;
736 	} while (busy);
737 
738 	return 0;
739 }
740 
741 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
742 			u8 send_status)
743 {
744 	struct mmc_cmd cmd;
745 	int retries = 3;
746 	int ret;
747 
748 	cmd.cmdidx = MMC_CMD_SWITCH;
749 	cmd.resp_type = MMC_RSP_R1b;
750 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
751 				 (index << 16) |
752 				 (value << 8);
753 
754 	do {
755 		ret = mmc_send_cmd(mmc, &cmd, NULL);
756 
757 		if (!ret)
758 			return mmc_poll_for_busy(mmc, send_status);
759 	} while (--retries > 0 && ret);
760 
761 	return ret;
762 }
763 
764 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
765 {
766 	return __mmc_switch(mmc, set, index, value, true);
767 }
768 
769 static int mmc_select_bus_width(struct mmc *mmc)
770 {
771 	u32 ext_csd_bits[] = {
772 		EXT_CSD_BUS_WIDTH_8,
773 		EXT_CSD_BUS_WIDTH_4,
774 	};
775 	u32 bus_widths[] = {
776 		MMC_BUS_WIDTH_8BIT,
777 		MMC_BUS_WIDTH_4BIT,
778 	};
779 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
780 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
781 	u32 idx, bus_width = 0;
782 	int err = 0;
783 
784 	if (mmc->version < MMC_VERSION_4 ||
785 	    !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT)))
786 		return 0;
787 
788 	err = mmc_send_ext_csd(mmc, ext_csd);
789 
790 	if (err)
791 		return err;
792 
793 	idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1;
794 
795 	/*
796 	 * Unlike SD, MMC cards dont have a configuration register to notify
797 	 * supported bus width. So bus test command should be run to identify
798 	 * the supported bus width or compare the ext csd values of current
799 	 * bus width and ext csd values of 1 bit mode read earlier.
800 	 */
801 	for (; idx < ARRAY_SIZE(bus_widths); idx++) {
802 		/*
803 		 * Host is capable of 8bit transfer, then switch
804 		 * the device to work in 8bit transfer mode. If the
805 		 * mmc switch command returns error then switch to
806 		 * 4bit transfer mode. On success set the corresponding
807 		 * bus width on the host.
808 		 */
809 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
810 				 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]);
811 		if (err)
812 			continue;
813 
814 		bus_width = bus_widths[idx];
815 		mmc_set_bus_width(mmc, bus_width);
816 
817 		err = mmc_send_ext_csd(mmc, test_csd);
818 
819 		if (err)
820 			continue;
821 
822 		/* Only compare read only fields */
823 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] ==
824 			test_csd[EXT_CSD_PARTITIONING_SUPPORT]) &&
825 		    (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
826 			test_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
827 		    (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) &&
828 			(ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
829 			test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
830 		    !memcmp(&ext_csd[EXT_CSD_SEC_CNT],
831 			&test_csd[EXT_CSD_SEC_CNT], 4)) {
832 			err = bus_width;
833 			break;
834 		} else {
835 			err = -EBADMSG;
836 		}
837 	}
838 
839 	return err;
840 }
841 
842 #ifndef CONFIG_MMC_SIMPLE
843 static const u8 tuning_blk_pattern_4bit[] = {
844 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
845 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
846 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
847 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
848 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
849 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
850 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
851 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
852 };
853 
854 static const u8 tuning_blk_pattern_8bit[] = {
855 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
856 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
857 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
858 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
859 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
860 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
861 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
862 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
863 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
864 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
865 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
866 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
867 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
868 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
869 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
870 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
871 };
872 
873 int mmc_send_tuning(struct mmc *mmc, u32 opcode)
874 {
875 	struct mmc_cmd cmd;
876 	struct mmc_data data;
877 	const u8 *tuning_block_pattern;
878 	int size, err = 0;
879 	u8 *data_buf;
880 
881 	if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
882 		tuning_block_pattern = tuning_blk_pattern_8bit;
883 		size = sizeof(tuning_blk_pattern_8bit);
884 	} else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) {
885 		tuning_block_pattern = tuning_blk_pattern_4bit;
886 		size = sizeof(tuning_blk_pattern_4bit);
887 	} else {
888 		return -EINVAL;
889 	}
890 
891 	data_buf = calloc(1, size);
892 	if (!data_buf)
893 		return -ENOMEM;
894 
895 	cmd.cmdidx = opcode;
896 	cmd.resp_type = MMC_RSP_R1;
897 	cmd.cmdarg = 0;
898 
899 	data.dest = (char *)data_buf;
900 	data.blocksize = size;
901 	data.blocks = 1;
902 	data.flags = MMC_DATA_READ;
903 
904 	err = mmc_send_cmd(mmc, &cmd, &data);
905 	if (err)
906 		goto out;
907 
908 	if (memcmp(data_buf, tuning_block_pattern, size))
909 		err = -EIO;
910 out:
911 	free(data_buf);
912 	return err;
913 }
914 
915 static int mmc_execute_tuning(struct mmc *mmc)
916 {
917 #ifdef CONFIG_DM_MMC
918 	struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev);
919 #endif
920 	u32 opcode;
921 
922 	if (IS_SD(mmc))
923 		opcode = MMC_SEND_TUNING_BLOCK;
924 	else
925 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
926 
927 #ifndef CONFIG_DM_MMC
928 	if (mmc->cfg->ops->execute_tuning) {
929 		return mmc->cfg->ops->execute_tuning(mmc, opcode);
930 #else
931 	if (ops->execute_tuning) {
932 		return ops->execute_tuning(mmc->dev, opcode);
933 #endif
934 	} else {
935 		debug("Tuning feature required for HS200 mode.\n");
936 		return -EIO;
937 	}
938 }
939 
940 static int mmc_hs200_tuning(struct mmc *mmc)
941 {
942 	return mmc_execute_tuning(mmc);
943 }
944 
945 #else
946 int mmc_send_tuning(struct mmc *mmc, u32 opcode) { return 0; }
947 int mmc_execute_tuning(struct mmc *mmc) { return 0; }
948 static int mmc_hs200_tuning(struct mmc *mmc) { return 0; }
949 #endif
950 
951 static int mmc_select_hs(struct mmc *mmc)
952 {
953 	int ret;
954 
955 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
956 			 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS);
957 
958 	if (!ret)
959 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
960 
961 	return ret;
962 }
963 
964 static int mmc_select_hs_ddr(struct mmc *mmc)
965 {
966 	u32 ext_csd_bits;
967 	int err = 0;
968 
969 	if (mmc->bus_width == MMC_BUS_WIDTH_1BIT)
970 		return 0;
971 
972 	ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ?
973 			EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
974 
975 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
976 			 EXT_CSD_BUS_WIDTH, ext_csd_bits);
977 	if (err)
978 		return err;
979 
980 	mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52);
981 
982 	return 0;
983 }
984 
985 #ifndef CONFIG_MMC_SIMPLE
986 static int mmc_select_hs200(struct mmc *mmc)
987 {
988 	int ret;
989 
990 	/*
991 	 * Set the bus width(4 or 8) with host's support and
992 	 * switch to HS200 mode if bus width is set successfully.
993 	 */
994 	ret = mmc_select_bus_width(mmc);
995 
996 	if (ret > 0) {
997 		ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
998 				   EXT_CSD_HS_TIMING,
999 				   EXT_CSD_TIMING_HS200, false);
1000 
1001 		if (ret)
1002 			return ret;
1003 
1004 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS200);
1005 	}
1006 
1007 	return ret;
1008 }
1009 
1010 static int mmc_select_hs400(struct mmc *mmc)
1011 {
1012 	int ret;
1013 
1014 	/* Reduce frequency to HS frequency */
1015 	mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
1016 
1017 	/* Switch card to HS mode */
1018 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1019 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false);
1020 	if (ret)
1021 		return ret;
1022 
1023 	/* Set host controller to HS timing */
1024 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
1025 
1026 	ret = mmc_send_status(mmc, 1000);
1027 	if (ret)
1028 		return ret;
1029 
1030 	/* Switch card to DDR */
1031 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1032 			 EXT_CSD_BUS_WIDTH,
1033 			 EXT_CSD_DDR_BUS_WIDTH_8);
1034 	if (ret)
1035 		return ret;
1036 
1037 	/* Switch card to HS400 */
1038 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1039 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false);
1040 	if (ret)
1041 		return ret;
1042 
1043 	/* Set host controller to HS400 timing and frequency */
1044 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS400);
1045 
1046 	return ret;
1047 }
1048 #else
1049 static int mmc_select_hs200(struct mmc *mmc) { return 0; }
1050 static int mmc_select_hs400(struct mmc *mmc) { return 0; }
1051 #endif
1052 
1053 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd)
1054 {
1055 	u8 card_type;
1056 	u32 host_caps, avail_type = 0;
1057 
1058 	card_type = ext_csd[EXT_CSD_CARD_TYPE];
1059 	host_caps = mmc->cfg->host_caps;
1060 
1061 	if ((host_caps & MMC_MODE_HS) &&
1062 	    (card_type & EXT_CSD_CARD_TYPE_26))
1063 		avail_type |= EXT_CSD_CARD_TYPE_26;
1064 
1065 	if ((host_caps & MMC_MODE_HS) &&
1066 	    (card_type & EXT_CSD_CARD_TYPE_52))
1067 		avail_type |= EXT_CSD_CARD_TYPE_52;
1068 
1069 	/*
1070 	 * For the moment, u-boot doesn't support signal voltage
1071 	 * switch, therefor we assume that host support ddr52
1072 	 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and
1073 	 * hs400 are the same).
1074 	 */
1075 	if ((host_caps & MMC_MODE_DDR_52MHz) &&
1076 	    (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
1077 		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
1078 
1079 	if ((host_caps & MMC_MODE_HS200) &&
1080 	    (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V))
1081 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
1082 
1083 	/*
1084 	 * If host can support HS400, it means that host can also
1085 	 * support HS200.
1086 	 */
1087 	if ((host_caps & MMC_MODE_HS400) &&
1088 	    (host_caps & MMC_MODE_8BIT) &&
1089 	    (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
1090 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
1091 				EXT_CSD_CARD_TYPE_HS400_1_8V;
1092 
1093 	if ((host_caps & MMC_MODE_HS400ES) &&
1094 	    (host_caps & MMC_MODE_8BIT) &&
1095 	    ext_csd[EXT_CSD_STROBE_SUPPORT] &&
1096 	    (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
1097 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
1098 				EXT_CSD_CARD_TYPE_HS400_1_8V |
1099 				EXT_CSD_CARD_TYPE_HS400ES;
1100 
1101 	return avail_type;
1102 }
1103 
1104 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type)
1105 {
1106 	int clock = 0;
1107 
1108 	if (mmc_card_hs(mmc))
1109 		clock = (avail_type & EXT_CSD_CARD_TYPE_52) ?
1110 			MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR;
1111 	else if (mmc_card_hs200(mmc) ||
1112 		 mmc_card_hs400(mmc) ||
1113 		 mmc_card_hs400es(mmc))
1114 		clock = MMC_HS200_MAX_DTR;
1115 
1116 	mmc_set_clock(mmc, clock);
1117 }
1118 
1119 static int mmc_change_freq(struct mmc *mmc)
1120 {
1121 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1122 	u32 avail_type;
1123 	int err;
1124 
1125 	mmc->card_caps = 0;
1126 
1127 	if (mmc_host_is_spi(mmc))
1128 		return 0;
1129 
1130 	/* Only version 4 supports high-speed */
1131 	if (mmc->version < MMC_VERSION_4)
1132 		return 0;
1133 
1134 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
1135 
1136 	err = mmc_send_ext_csd(mmc, ext_csd);
1137 
1138 	if (err)
1139 		return err;
1140 
1141 	avail_type = mmc_select_card_type(mmc, ext_csd);
1142 
1143 	if (avail_type & EXT_CSD_CARD_TYPE_HS200)
1144 		err = mmc_select_hs200(mmc);
1145 	else if (avail_type & EXT_CSD_CARD_TYPE_HS)
1146 		err = mmc_select_hs(mmc);
1147 	else
1148 		err = -EINVAL;
1149 
1150 	if (err)
1151 		return err;
1152 
1153 	mmc_set_bus_speed(mmc, avail_type);
1154 
1155 	if (mmc_card_hs200(mmc)) {
1156 		err = mmc_hs200_tuning(mmc);
1157 		if (avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1158 		    mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
1159 			err = mmc_select_hs400(mmc);
1160 			mmc_set_bus_speed(mmc, avail_type);
1161 		}
1162 	} else if (!mmc_card_hs400es(mmc)) {
1163 		err = mmc_select_bus_width(mmc) > 0 ? 0 : err;
1164 		if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52)
1165 			err = mmc_select_hs_ddr(mmc);
1166 	}
1167 
1168 	return err;
1169 }
1170 
1171 static int mmc_set_capacity(struct mmc *mmc, int part_num)
1172 {
1173 	switch (part_num) {
1174 	case 0:
1175 		mmc->capacity = mmc->capacity_user;
1176 		break;
1177 	case 1:
1178 	case 2:
1179 		mmc->capacity = mmc->capacity_boot;
1180 		break;
1181 	case 3:
1182 		mmc->capacity = mmc->capacity_rpmb;
1183 		break;
1184 	case 4:
1185 	case 5:
1186 	case 6:
1187 	case 7:
1188 		mmc->capacity = mmc->capacity_gp[part_num - 4];
1189 		break;
1190 	default:
1191 		return -1;
1192 	}
1193 
1194 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1195 
1196 	return 0;
1197 }
1198 
1199 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1200 {
1201 	int ret;
1202 
1203 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
1204 			 (mmc->part_config & ~PART_ACCESS_MASK)
1205 			 | (part_num & PART_ACCESS_MASK));
1206 
1207 	/*
1208 	 * Set the capacity if the switch succeeded or was intended
1209 	 * to return to representing the raw device.
1210 	 */
1211 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1212 		ret = mmc_set_capacity(mmc, part_num);
1213 		mmc_get_blk_desc(mmc)->hwpart = part_num;
1214 	}
1215 
1216 	return ret;
1217 }
1218 
1219 int mmc_hwpart_config(struct mmc *mmc,
1220 		      const struct mmc_hwpart_conf *conf,
1221 		      enum mmc_hwpart_conf_mode mode)
1222 {
1223 	u8 part_attrs = 0;
1224 	u32 enh_size_mult;
1225 	u32 enh_start_addr;
1226 	u32 gp_size_mult[4];
1227 	u32 max_enh_size_mult;
1228 	u32 tot_enh_size_mult = 0;
1229 	u8 wr_rel_set;
1230 	int i, pidx, err;
1231 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1232 
1233 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1234 		return -EINVAL;
1235 
1236 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1237 		printf("eMMC >= 4.4 required for enhanced user data area\n");
1238 		return -EMEDIUMTYPE;
1239 	}
1240 
1241 	if (!(mmc->part_support & PART_SUPPORT)) {
1242 		printf("Card does not support partitioning\n");
1243 		return -EMEDIUMTYPE;
1244 	}
1245 
1246 	if (!mmc->hc_wp_grp_size) {
1247 		printf("Card does not define HC WP group size\n");
1248 		return -EMEDIUMTYPE;
1249 	}
1250 
1251 	/* check partition alignment and total enhanced size */
1252 	if (conf->user.enh_size) {
1253 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1254 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
1255 			printf("User data enhanced area not HC WP group "
1256 			       "size aligned\n");
1257 			return -EINVAL;
1258 		}
1259 		part_attrs |= EXT_CSD_ENH_USR;
1260 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1261 		if (mmc->high_capacity) {
1262 			enh_start_addr = conf->user.enh_start;
1263 		} else {
1264 			enh_start_addr = (conf->user.enh_start << 9);
1265 		}
1266 	} else {
1267 		enh_size_mult = 0;
1268 		enh_start_addr = 0;
1269 	}
1270 	tot_enh_size_mult += enh_size_mult;
1271 
1272 	for (pidx = 0; pidx < 4; pidx++) {
1273 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1274 			printf("GP%i partition not HC WP group size "
1275 			       "aligned\n", pidx+1);
1276 			return -EINVAL;
1277 		}
1278 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1279 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1280 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1281 			tot_enh_size_mult += gp_size_mult[pidx];
1282 		}
1283 	}
1284 
1285 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1286 		printf("Card does not support enhanced attribute\n");
1287 		return -EMEDIUMTYPE;
1288 	}
1289 
1290 	err = mmc_send_ext_csd(mmc, ext_csd);
1291 	if (err)
1292 		return err;
1293 
1294 	max_enh_size_mult =
1295 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1296 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1297 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1298 	if (tot_enh_size_mult > max_enh_size_mult) {
1299 		printf("Total enhanced size exceeds maximum (%u > %u)\n",
1300 		       tot_enh_size_mult, max_enh_size_mult);
1301 		return -EMEDIUMTYPE;
1302 	}
1303 
1304 	/* The default value of EXT_CSD_WR_REL_SET is device
1305 	 * dependent, the values can only be changed if the
1306 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1307 	 * changed only once and before partitioning is completed. */
1308 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1309 	if (conf->user.wr_rel_change) {
1310 		if (conf->user.wr_rel_set)
1311 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1312 		else
1313 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1314 	}
1315 	for (pidx = 0; pidx < 4; pidx++) {
1316 		if (conf->gp_part[pidx].wr_rel_change) {
1317 			if (conf->gp_part[pidx].wr_rel_set)
1318 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1319 			else
1320 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1321 		}
1322 	}
1323 
1324 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1325 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1326 		puts("Card does not support host controlled partition write "
1327 		     "reliability settings\n");
1328 		return -EMEDIUMTYPE;
1329 	}
1330 
1331 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1332 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1333 		printf("Card already partitioned\n");
1334 		return -EPERM;
1335 	}
1336 
1337 	if (mode == MMC_HWPART_CONF_CHECK)
1338 		return 0;
1339 
1340 	/* Partitioning requires high-capacity size definitions */
1341 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1342 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1343 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1344 
1345 		if (err)
1346 			return err;
1347 
1348 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1349 
1350 		/* update erase group size to be high-capacity */
1351 		mmc->erase_grp_size =
1352 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1353 
1354 	}
1355 
1356 	/* all OK, write the configuration */
1357 	for (i = 0; i < 4; i++) {
1358 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1359 				 EXT_CSD_ENH_START_ADDR+i,
1360 				 (enh_start_addr >> (i*8)) & 0xFF);
1361 		if (err)
1362 			return err;
1363 	}
1364 	for (i = 0; i < 3; i++) {
1365 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1366 				 EXT_CSD_ENH_SIZE_MULT+i,
1367 				 (enh_size_mult >> (i*8)) & 0xFF);
1368 		if (err)
1369 			return err;
1370 	}
1371 	for (pidx = 0; pidx < 4; pidx++) {
1372 		for (i = 0; i < 3; i++) {
1373 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1374 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1375 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1376 			if (err)
1377 				return err;
1378 		}
1379 	}
1380 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1381 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1382 	if (err)
1383 		return err;
1384 
1385 	if (mode == MMC_HWPART_CONF_SET)
1386 		return 0;
1387 
1388 	/* The WR_REL_SET is a write-once register but shall be
1389 	 * written before setting PART_SETTING_COMPLETED. As it is
1390 	 * write-once we can only write it when completing the
1391 	 * partitioning. */
1392 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1393 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1394 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1395 		if (err)
1396 			return err;
1397 	}
1398 
1399 	/* Setting PART_SETTING_COMPLETED confirms the partition
1400 	 * configuration but it only becomes effective after power
1401 	 * cycle, so we do not adjust the partition related settings
1402 	 * in the mmc struct. */
1403 
1404 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1405 			 EXT_CSD_PARTITION_SETTING,
1406 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1407 	if (err)
1408 		return err;
1409 
1410 	return 0;
1411 }
1412 
1413 #if !CONFIG_IS_ENABLED(DM_MMC)
1414 int mmc_getcd(struct mmc *mmc)
1415 {
1416 	int cd;
1417 
1418 	cd = board_mmc_getcd(mmc);
1419 
1420 	if (cd < 0) {
1421 		if (mmc->cfg->ops->getcd)
1422 			cd = mmc->cfg->ops->getcd(mmc);
1423 		else
1424 			cd = 1;
1425 	}
1426 
1427 	return cd;
1428 }
1429 #endif
1430 
1431 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1432 {
1433 	struct mmc_cmd cmd;
1434 	struct mmc_data data;
1435 
1436 	/* Switch the frequency */
1437 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1438 	cmd.resp_type = MMC_RSP_R1;
1439 	cmd.cmdarg = (mode << 31) | 0xffffff;
1440 	cmd.cmdarg &= ~(0xf << (group * 4));
1441 	cmd.cmdarg |= value << (group * 4);
1442 
1443 	data.dest = (char *)resp;
1444 	data.blocksize = 64;
1445 	data.blocks = 1;
1446 	data.flags = MMC_DATA_READ;
1447 
1448 	return mmc_send_cmd(mmc, &cmd, &data);
1449 }
1450 
1451 
1452 static int sd_change_freq(struct mmc *mmc)
1453 {
1454 	int err;
1455 	struct mmc_cmd cmd;
1456 	ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2);
1457 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1458 	struct mmc_data data;
1459 	int timeout;
1460 
1461 	mmc->card_caps = 0;
1462 
1463 	if (mmc_host_is_spi(mmc))
1464 		return 0;
1465 
1466 	/* Read the SCR to find out if this card supports higher speeds */
1467 	cmd.cmdidx = MMC_CMD_APP_CMD;
1468 	cmd.resp_type = MMC_RSP_R1;
1469 	cmd.cmdarg = mmc->rca << 16;
1470 
1471 	err = mmc_send_cmd(mmc, &cmd, NULL);
1472 
1473 	if (err)
1474 		return err;
1475 
1476 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1477 	cmd.resp_type = MMC_RSP_R1;
1478 	cmd.cmdarg = 0;
1479 
1480 	timeout = 3;
1481 
1482 retry_scr:
1483 	data.dest = (char *)scr;
1484 	data.blocksize = 8;
1485 	data.blocks = 1;
1486 	data.flags = MMC_DATA_READ;
1487 
1488 	err = mmc_send_cmd(mmc, &cmd, &data);
1489 
1490 	if (err) {
1491 		if (timeout--)
1492 			goto retry_scr;
1493 
1494 		return err;
1495 	}
1496 
1497 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1498 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1499 
1500 	switch ((mmc->scr[0] >> 24) & 0xf) {
1501 	case 0:
1502 		mmc->version = SD_VERSION_1_0;
1503 		break;
1504 	case 1:
1505 		mmc->version = SD_VERSION_1_10;
1506 		break;
1507 	case 2:
1508 		mmc->version = SD_VERSION_2;
1509 		if ((mmc->scr[0] >> 15) & 0x1)
1510 			mmc->version = SD_VERSION_3;
1511 		break;
1512 	default:
1513 		mmc->version = SD_VERSION_1_0;
1514 		break;
1515 	}
1516 
1517 	if (mmc->scr[0] & SD_DATA_4BIT)
1518 		mmc->card_caps |= MMC_MODE_4BIT;
1519 
1520 	/* Version 1.0 doesn't support switching */
1521 	if (mmc->version == SD_VERSION_1_0)
1522 		return 0;
1523 
1524 	timeout = 4;
1525 	while (timeout--) {
1526 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1527 				(u8 *)switch_status);
1528 
1529 		if (err)
1530 			return err;
1531 
1532 		/* The high-speed function is busy.  Try again */
1533 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1534 			break;
1535 	}
1536 
1537 	/* If high-speed isn't supported, we return */
1538 	if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
1539 		return 0;
1540 
1541 	/*
1542 	 * If the host doesn't support SD_HIGHSPEED, do not switch card to
1543 	 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
1544 	 * This can avoid furthur problem when the card runs in different
1545 	 * mode between the host.
1546 	 */
1547 	if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
1548 		(mmc->cfg->host_caps & MMC_MODE_HS)))
1549 		return 0;
1550 
1551 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1552 
1553 	if (err)
1554 		return err;
1555 
1556 	if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
1557 		mmc->card_caps |= MMC_MODE_HS;
1558 
1559 	return 0;
1560 }
1561 
1562 static int sd_read_ssr(struct mmc *mmc)
1563 {
1564 	int err, i;
1565 	struct mmc_cmd cmd;
1566 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1567 	struct mmc_data data;
1568 	int timeout = 3;
1569 	unsigned int au, eo, et, es;
1570 
1571 	cmd.cmdidx = MMC_CMD_APP_CMD;
1572 	cmd.resp_type = MMC_RSP_R1;
1573 	cmd.cmdarg = mmc->rca << 16;
1574 
1575 	err = mmc_send_cmd(mmc, &cmd, NULL);
1576 	if (err)
1577 		return err;
1578 
1579 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1580 	cmd.resp_type = MMC_RSP_R1;
1581 	cmd.cmdarg = 0;
1582 
1583 retry_ssr:
1584 	data.dest = (char *)ssr;
1585 	data.blocksize = 64;
1586 	data.blocks = 1;
1587 	data.flags = MMC_DATA_READ;
1588 
1589 	err = mmc_send_cmd(mmc, &cmd, &data);
1590 	if (err) {
1591 		if (timeout--)
1592 			goto retry_ssr;
1593 
1594 		return err;
1595 	}
1596 
1597 	for (i = 0; i < 16; i++)
1598 		ssr[i] = be32_to_cpu(ssr[i]);
1599 
1600 	au = (ssr[2] >> 12) & 0xF;
1601 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1602 		mmc->ssr.au = sd_au_size[au];
1603 		es = (ssr[3] >> 24) & 0xFF;
1604 		es |= (ssr[2] & 0xFF) << 8;
1605 		et = (ssr[3] >> 18) & 0x3F;
1606 		if (es && et) {
1607 			eo = (ssr[3] >> 16) & 0x3;
1608 			mmc->ssr.erase_timeout = (et * 1000) / es;
1609 			mmc->ssr.erase_offset = eo * 1000;
1610 		}
1611 	} else {
1612 		debug("Invalid Allocation Unit Size.\n");
1613 	}
1614 
1615 	return 0;
1616 }
1617 
1618 /* frequency bases */
1619 /* divided by 10 to be nice to platforms without floating point */
1620 static const int fbase[] = {
1621 	10000,
1622 	100000,
1623 	1000000,
1624 	10000000,
1625 };
1626 
1627 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1628  * to platforms without floating point.
1629  */
1630 static const u8 multipliers[] = {
1631 	0,	/* reserved */
1632 	10,
1633 	12,
1634 	13,
1635 	15,
1636 	20,
1637 	25,
1638 	30,
1639 	35,
1640 	40,
1641 	45,
1642 	50,
1643 	55,
1644 	60,
1645 	70,
1646 	80,
1647 };
1648 
1649 #if !CONFIG_IS_ENABLED(DM_MMC)
1650 static void mmc_set_ios(struct mmc *mmc)
1651 {
1652 	if (mmc->cfg->ops->set_ios)
1653 		mmc->cfg->ops->set_ios(mmc);
1654 }
1655 
1656 static bool mmc_card_busy(struct mmc *mmc)
1657 {
1658 	if (!mmc->cfg->ops->card_busy)
1659 		return -ENOSYS;
1660 
1661 	return mmc->cfg->ops->card_busy(mmc);
1662 }
1663 
1664 static bool mmc_can_card_busy(struct mmc *)
1665 {
1666 	return !!mmc->cfg->ops->card_busy;
1667 }
1668 #endif
1669 
1670 static int mmc_startup(struct mmc *mmc)
1671 {
1672 	int err, i;
1673 	uint mult, freq, tran_speed;
1674 	u64 cmult, csize, capacity;
1675 	struct mmc_cmd cmd;
1676 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1677 	bool has_parts = false;
1678 	bool part_completed;
1679 	struct blk_desc *bdesc;
1680 
1681 #ifdef CONFIG_MMC_SPI_CRC_ON
1682 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1683 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1684 		cmd.resp_type = MMC_RSP_R1;
1685 		cmd.cmdarg = 1;
1686 		err = mmc_send_cmd(mmc, &cmd, NULL);
1687 
1688 		if (err)
1689 			return err;
1690 	}
1691 #endif
1692 #ifndef CONFIG_MMC_USE_PRE_CONFIG
1693 	/* Put the Card in Identify Mode */
1694 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1695 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1696 	cmd.resp_type = MMC_RSP_R2;
1697 	cmd.cmdarg = 0;
1698 
1699 	err = mmc_send_cmd(mmc, &cmd, NULL);
1700 
1701 	if (err)
1702 		return err;
1703 
1704 	memcpy(mmc->cid, cmd.response, 16);
1705 
1706 	/*
1707 	 * For MMC cards, set the Relative Address.
1708 	 * For SD cards, get the Relatvie Address.
1709 	 * This also puts the cards into Standby State
1710 	 */
1711 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1712 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1713 		cmd.cmdarg = mmc->rca << 16;
1714 		cmd.resp_type = MMC_RSP_R6;
1715 
1716 		err = mmc_send_cmd(mmc, &cmd, NULL);
1717 
1718 		if (err)
1719 			return err;
1720 
1721 		if (IS_SD(mmc))
1722 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1723 	}
1724 #endif
1725 	/* Get the Card-Specific Data */
1726 	cmd.cmdidx = MMC_CMD_SEND_CSD;
1727 	cmd.resp_type = MMC_RSP_R2;
1728 	cmd.cmdarg = mmc->rca << 16;
1729 
1730 	err = mmc_send_cmd(mmc, &cmd, NULL);
1731 
1732 	if (err)
1733 		return err;
1734 
1735 	mmc->csd[0] = cmd.response[0];
1736 	mmc->csd[1] = cmd.response[1];
1737 	mmc->csd[2] = cmd.response[2];
1738 	mmc->csd[3] = cmd.response[3];
1739 
1740 	if (mmc->version == MMC_VERSION_UNKNOWN) {
1741 		int version = (cmd.response[0] >> 26) & 0xf;
1742 
1743 		switch (version) {
1744 		case 0:
1745 			mmc->version = MMC_VERSION_1_2;
1746 			break;
1747 		case 1:
1748 			mmc->version = MMC_VERSION_1_4;
1749 			break;
1750 		case 2:
1751 			mmc->version = MMC_VERSION_2_2;
1752 			break;
1753 		case 3:
1754 			mmc->version = MMC_VERSION_3;
1755 			break;
1756 		case 4:
1757 			mmc->version = MMC_VERSION_4;
1758 			break;
1759 		default:
1760 			mmc->version = MMC_VERSION_1_2;
1761 			break;
1762 		}
1763 	}
1764 
1765 	/* divide frequency by 10, since the mults are 10x bigger */
1766 	freq = fbase[(cmd.response[0] & 0x7)];
1767 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1768 
1769 	tran_speed = freq * mult;
1770 
1771 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1772 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1773 
1774 	if (IS_SD(mmc))
1775 		mmc->write_bl_len = mmc->read_bl_len;
1776 	else
1777 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1778 
1779 	if (mmc->high_capacity) {
1780 		csize = (mmc->csd[1] & 0x3f) << 16
1781 			| (mmc->csd[2] & 0xffff0000) >> 16;
1782 		cmult = 8;
1783 	} else {
1784 		csize = (mmc->csd[1] & 0x3ff) << 2
1785 			| (mmc->csd[2] & 0xc0000000) >> 30;
1786 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
1787 	}
1788 
1789 	mmc->capacity_user = (csize + 1) << (cmult + 2);
1790 	mmc->capacity_user *= mmc->read_bl_len;
1791 	mmc->capacity_boot = 0;
1792 	mmc->capacity_rpmb = 0;
1793 	for (i = 0; i < 4; i++)
1794 		mmc->capacity_gp[i] = 0;
1795 
1796 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1797 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1798 
1799 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1800 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1801 
1802 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1803 		cmd.cmdidx = MMC_CMD_SET_DSR;
1804 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1805 		cmd.resp_type = MMC_RSP_NONE;
1806 		if (mmc_send_cmd(mmc, &cmd, NULL))
1807 			printf("MMC: SET_DSR failed\n");
1808 	}
1809 
1810 	/* Select the card, and put it into Transfer Mode */
1811 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1812 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
1813 		cmd.resp_type = MMC_RSP_R1;
1814 		cmd.cmdarg = mmc->rca << 16;
1815 		err = mmc_send_cmd(mmc, &cmd, NULL);
1816 
1817 		if (err)
1818 			return err;
1819 	}
1820 
1821 	/*
1822 	 * For SD, its erase group is always one sector
1823 	 */
1824 	mmc->erase_grp_size = 1;
1825 	mmc->part_config = MMCPART_NOAVAILABLE;
1826 	if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) {
1827 		/* check  ext_csd version and capacity */
1828 		err = mmc_send_ext_csd(mmc, ext_csd);
1829 		if (err)
1830 			return err;
1831 		if (ext_csd[EXT_CSD_REV] >= 2) {
1832 			/*
1833 			 * According to the JEDEC Standard, the value of
1834 			 * ext_csd's capacity is valid if the value is more
1835 			 * than 2GB
1836 			 */
1837 			capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1838 					| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1839 					| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1840 					| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1841 			capacity *= MMC_MAX_BLOCK_LEN;
1842 			if ((capacity >> 20) > 2 * 1024)
1843 				mmc->capacity_user = capacity;
1844 		}
1845 
1846 		switch (ext_csd[EXT_CSD_REV]) {
1847 		case 1:
1848 			mmc->version = MMC_VERSION_4_1;
1849 			break;
1850 		case 2:
1851 			mmc->version = MMC_VERSION_4_2;
1852 			break;
1853 		case 3:
1854 			mmc->version = MMC_VERSION_4_3;
1855 			break;
1856 		case 5:
1857 			mmc->version = MMC_VERSION_4_41;
1858 			break;
1859 		case 6:
1860 			mmc->version = MMC_VERSION_4_5;
1861 			break;
1862 		case 7:
1863 			mmc->version = MMC_VERSION_5_0;
1864 			break;
1865 		case 8:
1866 			mmc->version = MMC_VERSION_5_1;
1867 			break;
1868 		}
1869 
1870 		/* The partition data may be non-zero but it is only
1871 		 * effective if PARTITION_SETTING_COMPLETED is set in
1872 		 * EXT_CSD, so ignore any data if this bit is not set,
1873 		 * except for enabling the high-capacity group size
1874 		 * definition (see below). */
1875 		part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1876 				    EXT_CSD_PARTITION_SETTING_COMPLETED);
1877 
1878 		/* store the partition info of emmc */
1879 		mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1880 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1881 		    ext_csd[EXT_CSD_BOOT_MULT])
1882 			mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1883 		if (part_completed &&
1884 		    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1885 			mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1886 		if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN)
1887 			mmc->esr.mmc_can_trim = 1;
1888 
1889 		mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1890 
1891 		mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1892 
1893 		for (i = 0; i < 4; i++) {
1894 			int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1895 			uint mult = (ext_csd[idx + 2] << 16) +
1896 				(ext_csd[idx + 1] << 8) + ext_csd[idx];
1897 			if (mult)
1898 				has_parts = true;
1899 			if (!part_completed)
1900 				continue;
1901 			mmc->capacity_gp[i] = mult;
1902 			mmc->capacity_gp[i] *=
1903 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1904 			mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1905 			mmc->capacity_gp[i] <<= 19;
1906 		}
1907 
1908 		if (part_completed) {
1909 			mmc->enh_user_size =
1910 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) +
1911 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) +
1912 				ext_csd[EXT_CSD_ENH_SIZE_MULT];
1913 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1914 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1915 			mmc->enh_user_size <<= 19;
1916 			mmc->enh_user_start =
1917 				(ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) +
1918 				(ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) +
1919 				(ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) +
1920 				ext_csd[EXT_CSD_ENH_START_ADDR];
1921 			if (mmc->high_capacity)
1922 				mmc->enh_user_start <<= 9;
1923 		}
1924 
1925 		/*
1926 		 * Host needs to enable ERASE_GRP_DEF bit if device is
1927 		 * partitioned. This bit will be lost every time after a reset
1928 		 * or power off. This will affect erase size.
1929 		 */
1930 		if (part_completed)
1931 			has_parts = true;
1932 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1933 		    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1934 			has_parts = true;
1935 		if (has_parts) {
1936 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1937 				EXT_CSD_ERASE_GROUP_DEF, 1);
1938 
1939 			if (err)
1940 				return err;
1941 			else
1942 				ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1943 		}
1944 
1945 		if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1946 			/* Read out group size from ext_csd */
1947 			mmc->erase_grp_size =
1948 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1949 			/*
1950 			 * if high capacity and partition setting completed
1951 			 * SEC_COUNT is valid even if it is smaller than 2 GiB
1952 			 * JEDEC Standard JESD84-B45, 6.2.4
1953 			 */
1954 			if (mmc->high_capacity && part_completed) {
1955 				capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1956 					(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1957 					(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1958 					(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1959 				capacity *= MMC_MAX_BLOCK_LEN;
1960 				mmc->capacity_user = capacity;
1961 			}
1962 		} else {
1963 			/* Calculate the group size from the csd value. */
1964 			int erase_gsz, erase_gmul;
1965 			erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1966 			erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1967 			mmc->erase_grp_size = (erase_gsz + 1)
1968 				* (erase_gmul + 1);
1969 		}
1970 
1971 		mmc->hc_wp_grp_size = 1024
1972 			* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1973 			* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1974 
1975 		mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1976 	}
1977 
1978 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1979 	if (err)
1980 		return err;
1981 
1982 	if (IS_SD(mmc))
1983 		err = sd_change_freq(mmc);
1984 	else
1985 		err = mmc_change_freq(mmc);
1986 
1987 	if (err)
1988 		return err;
1989 
1990 	/* Restrict card's capabilities by what the host can do */
1991 	mmc->card_caps &= mmc->cfg->host_caps;
1992 
1993 	if (IS_SD(mmc)) {
1994 		if (mmc->card_caps & MMC_MODE_4BIT) {
1995 			cmd.cmdidx = MMC_CMD_APP_CMD;
1996 			cmd.resp_type = MMC_RSP_R1;
1997 			cmd.cmdarg = mmc->rca << 16;
1998 
1999 			err = mmc_send_cmd(mmc, &cmd, NULL);
2000 			if (err)
2001 				return err;
2002 
2003 			cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
2004 			cmd.resp_type = MMC_RSP_R1;
2005 			cmd.cmdarg = 2;
2006 			err = mmc_send_cmd(mmc, &cmd, NULL);
2007 			if (err)
2008 				return err;
2009 
2010 			mmc_set_bus_width(mmc, 4);
2011 		}
2012 
2013 		err = sd_read_ssr(mmc);
2014 		if (err)
2015 			return err;
2016 
2017 		if (mmc->card_caps & MMC_MODE_HS)
2018 			tran_speed = MMC_HIGH_52_MAX_DTR;
2019 		else
2020 			tran_speed = MMC_HIGH_26_MAX_DTR;
2021 
2022 		mmc_set_clock(mmc, tran_speed);
2023 	}
2024 
2025 	/* Fix the block length for DDR mode */
2026 	if (mmc_card_ddr(mmc)) {
2027 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2028 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2029 	}
2030 
2031 	/* fill in device description */
2032 	bdesc = mmc_get_blk_desc(mmc);
2033 	bdesc->lun = 0;
2034 	bdesc->hwpart = 0;
2035 	bdesc->type = 0;
2036 	bdesc->blksz = mmc->read_bl_len;
2037 	bdesc->log2blksz = LOG2(bdesc->blksz);
2038 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2039 #if !defined(CONFIG_SPL_BUILD) || \
2040 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2041 		!defined(CONFIG_USE_TINY_PRINTF))
2042 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2043 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2044 		(mmc->cid[3] >> 16) & 0xffff);
2045 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2046 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2047 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2048 		(mmc->cid[2] >> 24) & 0xff);
2049 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2050 		(mmc->cid[2] >> 16) & 0xf);
2051 #else
2052 	bdesc->vendor[0] = 0;
2053 	bdesc->product[0] = 0;
2054 	bdesc->revision[0] = 0;
2055 #endif
2056 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2057 	part_init(bdesc);
2058 #endif
2059 
2060 	return 0;
2061 }
2062 
2063 #ifndef CONFIG_MMC_USE_PRE_CONFIG
2064 static int mmc_send_if_cond(struct mmc *mmc)
2065 {
2066 	struct mmc_cmd cmd;
2067 	int err;
2068 
2069 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2070 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2071 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2072 	cmd.resp_type = MMC_RSP_R7;
2073 
2074 	err = mmc_send_cmd(mmc, &cmd, NULL);
2075 
2076 	if (err)
2077 		return err;
2078 
2079 	if ((cmd.response[0] & 0xff) != 0xaa)
2080 		return -EOPNOTSUPP;
2081 	else
2082 		mmc->version = SD_VERSION_2;
2083 
2084 	return 0;
2085 }
2086 #endif
2087 
2088 #if !CONFIG_IS_ENABLED(DM_MMC)
2089 /* board-specific MMC power initializations. */
2090 __weak void board_mmc_power_init(void)
2091 {
2092 }
2093 #endif
2094 
2095 #ifndef CONFIG_MMC_USE_PRE_CONFIG
2096 static int mmc_power_init(struct mmc *mmc)
2097 {
2098 #if CONFIG_IS_ENABLED(DM_MMC)
2099 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD)
2100 	struct udevice *vmmc_supply;
2101 	int ret;
2102 
2103 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2104 					  &vmmc_supply);
2105 	if (ret) {
2106 		debug("%s: No vmmc supply\n", mmc->dev->name);
2107 		return 0;
2108 	}
2109 
2110 	ret = regulator_set_enable(vmmc_supply, true);
2111 	if (ret) {
2112 		puts("Error enabling VMMC supply\n");
2113 		return ret;
2114 	}
2115 #endif
2116 #else /* !CONFIG_DM_MMC */
2117 	/*
2118 	 * Driver model should use a regulator, as above, rather than calling
2119 	 * out to board code.
2120 	 */
2121 	board_mmc_power_init();
2122 #endif
2123 	return 0;
2124 }
2125 #endif
2126 #ifdef CONFIG_MMC_USE_PRE_CONFIG
2127 static int mmc_select_card(struct mmc *mmc, int n)
2128 {
2129 	struct mmc_cmd cmd;
2130 	int err = 0;
2131 
2132 	memset(&cmd, 0, sizeof(struct mmc_cmd));
2133 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2134 		mmc->rca = n;
2135 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2136 		cmd.resp_type = MMC_RSP_R1;
2137 		cmd.cmdarg = mmc->rca << 16;
2138 		err = mmc_send_cmd(mmc, &cmd, NULL);
2139 	}
2140 
2141 	return err;
2142 }
2143 
2144 int mmc_start_init(struct mmc *mmc)
2145 {
2146 	/*
2147 	 * We use the MMC config set by the bootrom.
2148 	 * So it is no need to reset the eMMC device.
2149 	 */
2150 	mmc_set_bus_width(mmc, 8);
2151 	mmc_set_clock(mmc, 1);
2152 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2153 	/* Send cmd7 to return stand-by state*/
2154 	mmc_select_card(mmc, 0);
2155 	mmc->version = MMC_VERSION_UNKNOWN;
2156 	mmc->high_capacity = 1;
2157 	/*
2158 	 * The RCA is set to 2 by rockchip bootrom, use the default
2159 	 * value here.
2160 	 */
2161 #ifdef CONFIG_ARCH_ROCKCHIP
2162 	mmc->rca = 2;
2163 #else
2164 	mmc->rca = 1;
2165 #endif
2166 	return 0;
2167 }
2168 #else
2169 int mmc_start_init(struct mmc *mmc)
2170 {
2171 	bool no_card;
2172 	int err;
2173 
2174 	/* we pretend there's no card when init is NULL */
2175 	no_card = mmc_getcd(mmc) == 0;
2176 #if !CONFIG_IS_ENABLED(DM_MMC)
2177 	no_card = no_card || (mmc->cfg->ops->init == NULL);
2178 #endif
2179 	if (no_card) {
2180 		mmc->has_init = 0;
2181 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2182 		printf("MMC: no card present\n");
2183 #endif
2184 		return -ENOMEDIUM;
2185 	}
2186 
2187 	if (mmc->has_init)
2188 		return 0;
2189 
2190 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2191 	mmc_adapter_card_type_ident();
2192 #endif
2193 	err = mmc_power_init(mmc);
2194 	if (err)
2195 		return err;
2196 
2197 #if CONFIG_IS_ENABLED(DM_MMC)
2198 	/* The device has already been probed ready for use */
2199 #else
2200 	/* made sure it's not NULL earlier */
2201 	err = mmc->cfg->ops->init(mmc);
2202 	if (err)
2203 		return err;
2204 #endif
2205 	mmc_set_bus_width(mmc, 1);
2206 	mmc_set_clock(mmc, 1);
2207 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2208 
2209 	/* Reset the Card */
2210 	err = mmc_go_idle(mmc);
2211 
2212 	if (err)
2213 		return err;
2214 
2215 	/* The internal partition reset to user partition(0) at every CMD0*/
2216 	mmc_get_blk_desc(mmc)->hwpart = 0;
2217 
2218 	/* Test for SD version 2 */
2219 	err = mmc_send_if_cond(mmc);
2220 
2221 	/* Now try to get the SD card's operating condition */
2222 	err = sd_send_op_cond(mmc);
2223 
2224 	/* If the command timed out, we check for an MMC card */
2225 	if (err == -ETIMEDOUT) {
2226 		err = mmc_send_op_cond(mmc);
2227 
2228 		if (err) {
2229 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2230 			printf("Card did not respond to voltage select!\n");
2231 #endif
2232 			return -EOPNOTSUPP;
2233 		}
2234 	}
2235 
2236 	if (!err)
2237 		mmc->init_in_progress = 1;
2238 
2239 	return err;
2240 }
2241 #endif
2242 
2243 static int mmc_complete_init(struct mmc *mmc)
2244 {
2245 	int err = 0;
2246 
2247 	mmc->init_in_progress = 0;
2248 	if (mmc->op_cond_pending)
2249 		err = mmc_complete_op_cond(mmc);
2250 
2251 	if (!err)
2252 		err = mmc_startup(mmc);
2253 	if (err)
2254 		mmc->has_init = 0;
2255 	else
2256 		mmc->has_init = 1;
2257 	return err;
2258 }
2259 
2260 int mmc_init(struct mmc *mmc)
2261 {
2262 	int err = 0;
2263 	__maybe_unused unsigned start;
2264 #if CONFIG_IS_ENABLED(DM_MMC)
2265 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2266 
2267 	upriv->mmc = mmc;
2268 #endif
2269 	if (mmc->has_init)
2270 		return 0;
2271 
2272 	start = get_timer(0);
2273 
2274 	if (!mmc->init_in_progress)
2275 		err = mmc_start_init(mmc);
2276 
2277 	if (!err)
2278 		err = mmc_complete_init(mmc);
2279 	if (err)
2280 		printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2281 
2282 	return err;
2283 }
2284 
2285 int mmc_set_dsr(struct mmc *mmc, u16 val)
2286 {
2287 	mmc->dsr = val;
2288 	return 0;
2289 }
2290 
2291 /* CPU-specific MMC initializations */
2292 __weak int cpu_mmc_init(bd_t *bis)
2293 {
2294 	return -1;
2295 }
2296 
2297 /* board-specific MMC initializations. */
2298 __weak int board_mmc_init(bd_t *bis)
2299 {
2300 	return -1;
2301 }
2302 
2303 void mmc_set_preinit(struct mmc *mmc, int preinit)
2304 {
2305 	mmc->preinit = preinit;
2306 }
2307 
2308 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2309 static int mmc_probe(bd_t *bis)
2310 {
2311 	return 0;
2312 }
2313 #elif CONFIG_IS_ENABLED(DM_MMC)
2314 static int mmc_probe(bd_t *bis)
2315 {
2316 	int ret, i;
2317 	struct uclass *uc;
2318 	struct udevice *dev;
2319 
2320 	ret = uclass_get(UCLASS_MMC, &uc);
2321 	if (ret)
2322 		return ret;
2323 
2324 	/*
2325 	 * Try to add them in sequence order. Really with driver model we
2326 	 * should allow holes, but the current MMC list does not allow that.
2327 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2328 	 */
2329 	for (i = 0; ; i++) {
2330 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2331 		if (ret == -ENODEV)
2332 			break;
2333 	}
2334 	uclass_foreach_dev(dev, uc) {
2335 		ret = device_probe(dev);
2336 		if (ret)
2337 			printf("%s - probe failed: %d\n", dev->name, ret);
2338 	}
2339 
2340 	return 0;
2341 }
2342 #else
2343 static int mmc_probe(bd_t *bis)
2344 {
2345 	if (board_mmc_init(bis) < 0)
2346 		cpu_mmc_init(bis);
2347 
2348 	return 0;
2349 }
2350 #endif
2351 
2352 int mmc_initialize(bd_t *bis)
2353 {
2354 	static int initialized = 0;
2355 	int ret;
2356 	if (initialized)	/* Avoid initializing mmc multiple times */
2357 		return 0;
2358 	initialized = 1;
2359 
2360 #if !CONFIG_IS_ENABLED(BLK)
2361 #if !CONFIG_IS_ENABLED(MMC_TINY)
2362 	mmc_list_init();
2363 #endif
2364 #endif
2365 	ret = mmc_probe(bis);
2366 	if (ret)
2367 		return ret;
2368 
2369 #ifndef CONFIG_SPL_BUILD
2370 	print_mmc_devices(',');
2371 #endif
2372 
2373 	mmc_do_preinit();
2374 	return 0;
2375 }
2376 
2377 #ifdef CONFIG_CMD_BKOPS_ENABLE
2378 int mmc_set_bkops_enable(struct mmc *mmc)
2379 {
2380 	int err;
2381 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2382 
2383 	err = mmc_send_ext_csd(mmc, ext_csd);
2384 	if (err) {
2385 		puts("Could not get ext_csd register values\n");
2386 		return err;
2387 	}
2388 
2389 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2390 		puts("Background operations not supported on device\n");
2391 		return -EMEDIUMTYPE;
2392 	}
2393 
2394 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2395 		puts("Background operations already enabled\n");
2396 		return 0;
2397 	}
2398 
2399 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2400 	if (err) {
2401 		puts("Failed to enable manual background operations\n");
2402 		return err;
2403 	}
2404 
2405 	puts("Enabled manual background operations\n");
2406 
2407 	return 0;
2408 }
2409 #endif
2410