xref: /rk3399_rockchip-uboot/drivers/mmc/mmc.c (revision 9cb4a869eb4230279bba9c9e57b7a59e10dfd03f)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static const unsigned int sd_au_size[] = {
26 	0,		SZ_16K / 512,		SZ_32K / 512,
27 	SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
28 	SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
29 	SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
30 	SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,	SZ_64M / 512,
31 };
32 
33 static char mmc_ext_csd[512];
34 
35 #if CONFIG_IS_ENABLED(MMC_TINY)
36 static struct mmc mmc_static;
37 struct mmc *find_mmc_device(int dev_num)
38 {
39 	return &mmc_static;
40 }
41 
42 void mmc_do_preinit(void)
43 {
44 	struct mmc *m = &mmc_static;
45 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
46 	mmc_set_preinit(m, 1);
47 #endif
48 	if (m->preinit)
49 		mmc_start_init(m);
50 }
51 
52 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
53 {
54 	return &mmc->block_dev;
55 }
56 #endif
57 
58 #if !CONFIG_IS_ENABLED(DM_MMC)
59 __weak int board_mmc_getwp(struct mmc *mmc)
60 {
61 	return -1;
62 }
63 
64 int mmc_getwp(struct mmc *mmc)
65 {
66 	int wp;
67 
68 	wp = board_mmc_getwp(mmc);
69 
70 	if (wp < 0) {
71 		if (mmc->cfg->ops->getwp)
72 			wp = mmc->cfg->ops->getwp(mmc);
73 		else
74 			wp = 0;
75 	}
76 
77 	return wp;
78 }
79 
80 __weak int board_mmc_getcd(struct mmc *mmc)
81 {
82 	return -1;
83 }
84 #endif
85 
86 #ifdef CONFIG_MMC_TRACE
87 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
88 {
89 	printf("CMD_SEND:%d\n", cmd->cmdidx);
90 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
91 }
92 
93 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
94 {
95 	int i;
96 	u8 *ptr;
97 
98 	if (ret) {
99 		printf("\t\tRET\t\t\t %d\n", ret);
100 	} else {
101 		switch (cmd->resp_type) {
102 		case MMC_RSP_NONE:
103 			printf("\t\tMMC_RSP_NONE\n");
104 			break;
105 		case MMC_RSP_R1:
106 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
107 				cmd->response[0]);
108 			break;
109 		case MMC_RSP_R1b:
110 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
111 				cmd->response[0]);
112 			break;
113 		case MMC_RSP_R2:
114 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
115 				cmd->response[0]);
116 			printf("\t\t          \t\t 0x%08X \n",
117 				cmd->response[1]);
118 			printf("\t\t          \t\t 0x%08X \n",
119 				cmd->response[2]);
120 			printf("\t\t          \t\t 0x%08X \n",
121 				cmd->response[3]);
122 			printf("\n");
123 			printf("\t\t\t\t\tDUMPING DATA\n");
124 			for (i = 0; i < 4; i++) {
125 				int j;
126 				printf("\t\t\t\t\t%03d - ", i*4);
127 				ptr = (u8 *)&cmd->response[i];
128 				ptr += 3;
129 				for (j = 0; j < 4; j++)
130 					printf("%02X ", *ptr--);
131 				printf("\n");
132 			}
133 			break;
134 		case MMC_RSP_R3:
135 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
136 				cmd->response[0]);
137 			break;
138 		default:
139 			printf("\t\tERROR MMC rsp not supported\n");
140 			break;
141 		}
142 	}
143 }
144 
145 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
146 {
147 	int status;
148 
149 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
150 	printf("CURR STATE:%d\n", status);
151 }
152 #endif
153 
154 #if !CONFIG_IS_ENABLED(DM_MMC)
155 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
156 {
157 	int ret;
158 
159 	mmmc_trace_before_send(mmc, cmd);
160 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
161 	mmmc_trace_after_send(mmc, cmd, ret);
162 
163 	return ret;
164 }
165 #endif
166 
167 int mmc_send_status(struct mmc *mmc, int timeout)
168 {
169 	struct mmc_cmd cmd;
170 	int err, retries = 5;
171 
172 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
173 	cmd.resp_type = MMC_RSP_R1;
174 	if (!mmc_host_is_spi(mmc))
175 		cmd.cmdarg = mmc->rca << 16;
176 
177 	while (1) {
178 		err = mmc_send_cmd(mmc, &cmd, NULL);
179 		if (!err) {
180 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
181 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
182 			     MMC_STATE_PRG)
183 				break;
184 			else if (cmd.response[0] & MMC_STATUS_MASK) {
185 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
186 				printf("Status Error: 0x%08X\n",
187 					cmd.response[0]);
188 #endif
189 				return -ECOMM;
190 			}
191 		} else if (--retries < 0)
192 			return err;
193 
194 		if (timeout-- <= 0)
195 			break;
196 
197 		udelay(1000);
198 	}
199 
200 	mmc_trace_state(mmc, &cmd);
201 	if (timeout <= 0) {
202 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
203 		printf("Timeout waiting card ready\n");
204 #endif
205 		return -ETIMEDOUT;
206 	}
207 
208 	return 0;
209 }
210 
211 int mmc_set_blocklen(struct mmc *mmc, int len)
212 {
213 	struct mmc_cmd cmd;
214 
215 	if (mmc_card_ddr(mmc))
216 		return 0;
217 
218 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
219 	cmd.resp_type = MMC_RSP_R1;
220 	cmd.cmdarg = len;
221 
222 	return mmc_send_cmd(mmc, &cmd, NULL);
223 }
224 
225 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
226 			   lbaint_t blkcnt)
227 {
228 	struct mmc_cmd cmd;
229 	struct mmc_data data;
230 
231 	if (blkcnt > 1)
232 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
233 	else
234 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
235 
236 	if (mmc->high_capacity)
237 		cmd.cmdarg = start;
238 	else
239 		cmd.cmdarg = start * mmc->read_bl_len;
240 
241 	cmd.resp_type = MMC_RSP_R1;
242 
243 	data.dest = dst;
244 	data.blocks = blkcnt;
245 	data.blocksize = mmc->read_bl_len;
246 	data.flags = MMC_DATA_READ;
247 
248 	if (mmc_send_cmd(mmc, &cmd, &data))
249 		return 0;
250 
251 	if (blkcnt > 1) {
252 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
253 		cmd.cmdarg = 0;
254 		cmd.resp_type = MMC_RSP_R1b;
255 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
256 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
257 			printf("mmc fail to send stop cmd\n");
258 #endif
259 			return 0;
260 		}
261 	}
262 
263 	return blkcnt;
264 }
265 
266 #ifdef CONFIG_SPL_BLK_READ_PREPARE
267 static int mmc_read_blocks_prepare(struct mmc *mmc, void *dst, lbaint_t start,
268 				   lbaint_t blkcnt)
269 {
270 	struct mmc_cmd cmd;
271 	struct mmc_data data;
272 
273 	if (blkcnt > 1)
274 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
275 	else
276 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
277 
278 	if (mmc->high_capacity)
279 		cmd.cmdarg = start;
280 	else
281 		cmd.cmdarg = start * mmc->read_bl_len;
282 
283 	cmd.resp_type = MMC_RSP_R1;
284 
285 	data.dest = dst;
286 	data.blocks = blkcnt;
287 	data.blocksize = mmc->read_bl_len;
288 	data.flags = MMC_DATA_READ;
289 
290 	if (mmc_send_cmd_prepare(mmc, &cmd, &data))
291 		return 0;
292 
293 	return blkcnt;
294 }
295 #endif
296 
297 #ifdef CONFIG_SPL_BLK_READ_PREPARE
298 #if CONFIG_IS_ENABLED(BLK)
299 ulong mmc_bread_prepare(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
300 #else
301 ulong mmc_bread_prepare(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
302 			void *dst)
303 #endif
304 {
305 #if CONFIG_IS_ENABLED(BLK)
306 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
307 #endif
308 	int dev_num = block_dev->devnum;
309 	int timeout = 0;
310 	int err;
311 
312 	if (blkcnt == 0)
313 		return 0;
314 
315 	struct mmc *mmc = find_mmc_device(dev_num);
316 
317 	if (!mmc)
318 		return 0;
319 
320 	if (CONFIG_IS_ENABLED(MMC_TINY))
321 		err = mmc_switch_part(mmc, block_dev->hwpart);
322 	else
323 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
324 
325 	if (err < 0)
326 		return 0;
327 
328 	if ((start + blkcnt) > block_dev->lba) {
329 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
330 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
331 		       start + blkcnt, block_dev->lba);
332 #endif
333 		return 0;
334 	}
335 
336 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
337 		debug("%s: Failed to set blocklen\n", __func__);
338 		return 0;
339 	}
340 
341 	if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) {
342 		debug("%s: Failed to read blocks\n", __func__);
343 re_init_retry:
344 		timeout++;
345 		/*
346 		 * Try re-init seven times.
347 		 */
348 		if (timeout > 7) {
349 			printf("Re-init retry timeout\n");
350 			return 0;
351 		}
352 
353 		mmc->has_init = 0;
354 		if (mmc_init(mmc))
355 			return 0;
356 
357 		if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) {
358 			printf("%s: Re-init mmc_read_blocks_prepare error\n",
359 			       __func__);
360 			goto re_init_retry;
361 		}
362 	}
363 
364 	return blkcnt;
365 }
366 #endif
367 
368 #if CONFIG_IS_ENABLED(BLK)
369 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
370 #else
371 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
372 		void *dst)
373 #endif
374 {
375 #if CONFIG_IS_ENABLED(BLK)
376 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
377 #endif
378 	int dev_num = block_dev->devnum;
379 	int err;
380 	lbaint_t cur, blocks_todo = blkcnt;
381 
382 #ifdef CONFIG_SPL_BLK_READ_PREPARE
383 	if (block_dev->op_flag == BLK_PRE_RW)
384 #if CONFIG_IS_ENABLED(BLK)
385 		return mmc_bread_prepare(dev, start, blkcnt, dst);
386 #else
387 		return mmc_bread_prepare(block_dev, start, blkcnt, dst);
388 #endif
389 #endif
390 	if (blkcnt == 0)
391 		return 0;
392 
393 	struct mmc *mmc = find_mmc_device(dev_num);
394 	if (!mmc)
395 		return 0;
396 
397 	if (CONFIG_IS_ENABLED(MMC_TINY))
398 		err = mmc_switch_part(mmc, block_dev->hwpart);
399 	else
400 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
401 
402 	if (err < 0)
403 		return 0;
404 
405 	if ((start + blkcnt) > block_dev->lba) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
408 			start + blkcnt, block_dev->lba);
409 #endif
410 		return 0;
411 	}
412 
413 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
414 		debug("%s: Failed to set blocklen\n", __func__);
415 		return 0;
416 	}
417 
418 	do {
419 		cur = (blocks_todo > mmc->cfg->b_max) ?
420 			mmc->cfg->b_max : blocks_todo;
421 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
422 			debug("%s: Failed to read blocks\n", __func__);
423 			int timeout = 0;
424 re_init_retry:
425 			timeout++;
426 			/*
427 			 * Try re-init seven times.
428 			 */
429 			if (timeout > 7) {
430 				printf("Re-init retry timeout\n");
431 				return 0;
432 			}
433 
434 			mmc->has_init = 0;
435 			if (mmc_init(mmc))
436 				return 0;
437 
438 			if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
439 				printf("%s: Re-init mmc_read_blocks error\n",
440 				       __func__);
441 				goto re_init_retry;
442 			}
443 		}
444 		blocks_todo -= cur;
445 		start += cur;
446 		dst += cur * mmc->read_bl_len;
447 	} while (blocks_todo > 0);
448 
449 	return blkcnt;
450 }
451 
452 void mmc_set_clock(struct mmc *mmc, uint clock)
453 {
454 	if (clock > mmc->cfg->f_max)
455 		clock = mmc->cfg->f_max;
456 
457 	if (clock < mmc->cfg->f_min)
458 		clock = mmc->cfg->f_min;
459 
460 	mmc->clock = clock;
461 
462 	mmc_set_ios(mmc);
463 }
464 
465 static void mmc_set_bus_width(struct mmc *mmc, uint width)
466 {
467 	mmc->bus_width = width;
468 
469 	mmc_set_ios(mmc);
470 }
471 
472 static void mmc_set_timing(struct mmc *mmc, uint timing)
473 {
474 	mmc->timing = timing;
475 	mmc_set_ios(mmc);
476 }
477 
478 static int mmc_go_idle(struct mmc *mmc)
479 {
480 	struct mmc_cmd cmd;
481 	int err;
482 
483 	udelay(1000);
484 
485 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
486 	cmd.cmdarg = 0;
487 	cmd.resp_type = MMC_RSP_NONE;
488 
489 	err = mmc_send_cmd(mmc, &cmd, NULL);
490 
491 	if (err)
492 		return err;
493 
494 	udelay(2000);
495 
496 	return 0;
497 }
498 
499 #ifndef CONFIG_MMC_USE_PRE_CONFIG
500 static int sd_send_op_cond(struct mmc *mmc)
501 {
502 	int timeout = 1000;
503 	int err;
504 	struct mmc_cmd cmd;
505 
506 	while (1) {
507 		cmd.cmdidx = MMC_CMD_APP_CMD;
508 		cmd.resp_type = MMC_RSP_R1;
509 		cmd.cmdarg = 0;
510 
511 		err = mmc_send_cmd(mmc, &cmd, NULL);
512 
513 		if (err)
514 			return err;
515 
516 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
517 		cmd.resp_type = MMC_RSP_R3;
518 
519 		/*
520 		 * Most cards do not answer if some reserved bits
521 		 * in the ocr are set. However, Some controller
522 		 * can set bit 7 (reserved for low voltages), but
523 		 * how to manage low voltages SD card is not yet
524 		 * specified.
525 		 */
526 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
527 			(mmc->cfg->voltages & 0xff8000);
528 
529 		if (mmc->version == SD_VERSION_2)
530 			cmd.cmdarg |= OCR_HCS;
531 
532 		err = mmc_send_cmd(mmc, &cmd, NULL);
533 
534 		if (err)
535 			return err;
536 
537 		if (cmd.response[0] & OCR_BUSY)
538 			break;
539 
540 		if (timeout-- <= 0)
541 			return -EOPNOTSUPP;
542 
543 		udelay(1000);
544 	}
545 
546 	if (mmc->version != SD_VERSION_2)
547 		mmc->version = SD_VERSION_1_0;
548 
549 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
550 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
551 		cmd.resp_type = MMC_RSP_R3;
552 		cmd.cmdarg = 0;
553 
554 		err = mmc_send_cmd(mmc, &cmd, NULL);
555 
556 		if (err)
557 			return err;
558 	}
559 
560 	mmc->ocr = cmd.response[0];
561 
562 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
563 	mmc->rca = 0;
564 
565 	return 0;
566 }
567 #endif
568 
569 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
570 {
571 	struct mmc_cmd cmd;
572 	int err;
573 
574 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
575 	cmd.resp_type = MMC_RSP_R3;
576 	cmd.cmdarg = 0;
577 	if (use_arg && !mmc_host_is_spi(mmc))
578 		cmd.cmdarg = OCR_HCS |
579 			(mmc->cfg->voltages &
580 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
581 			(mmc->ocr & OCR_ACCESS_MODE);
582 
583 	err = mmc_send_cmd(mmc, &cmd, NULL);
584 	if (err)
585 		return err;
586 	mmc->ocr = cmd.response[0];
587 	return 0;
588 }
589 
590 #ifndef CONFIG_MMC_USE_PRE_CONFIG
591 static int mmc_send_op_cond(struct mmc *mmc)
592 {
593 	int err, i;
594 
595 	/* Some cards seem to need this */
596 	mmc_go_idle(mmc);
597 
598  	/* Asking to the card its capabilities */
599 	for (i = 0; i < 2; i++) {
600 		err = mmc_send_op_cond_iter(mmc, i != 0);
601 		if (err)
602 			return err;
603 
604 		/* exit if not busy (flag seems to be inverted) */
605 		if (mmc->ocr & OCR_BUSY)
606 			break;
607 	}
608 	mmc->op_cond_pending = 1;
609 	return 0;
610 }
611 #endif
612 static int mmc_complete_op_cond(struct mmc *mmc)
613 {
614 	struct mmc_cmd cmd;
615 	int timeout = 1000;
616 	uint start;
617 	int err;
618 
619 	mmc->op_cond_pending = 0;
620 	if (!(mmc->ocr & OCR_BUSY)) {
621 		/* Some cards seem to need this */
622 		mmc_go_idle(mmc);
623 
624 		start = get_timer(0);
625 		while (1) {
626 			err = mmc_send_op_cond_iter(mmc, 1);
627 			if (err)
628 				return err;
629 			if (mmc->ocr & OCR_BUSY)
630 				break;
631 			if (get_timer(start) > timeout)
632 				return -EOPNOTSUPP;
633 			udelay(100);
634 		}
635 	}
636 
637 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
638 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
639 		cmd.resp_type = MMC_RSP_R3;
640 		cmd.cmdarg = 0;
641 
642 		err = mmc_send_cmd(mmc, &cmd, NULL);
643 
644 		if (err)
645 			return err;
646 
647 		mmc->ocr = cmd.response[0];
648 	}
649 
650 	mmc->version = MMC_VERSION_UNKNOWN;
651 
652 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
653 	mmc->rca = 1;
654 
655 	return 0;
656 }
657 
658 
659 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
660 {
661 	struct mmc_cmd cmd;
662 	struct mmc_data data;
663 	int err;
664 
665 #ifdef CONFIG_MMC_USE_PRE_CONFIG
666 	static int initialized;
667 	if (initialized) {
668 		memcpy(ext_csd, mmc_ext_csd, 512);
669 		return 0;
670 	}
671 
672 	initialized = 1;
673 #endif
674 	/* Get the Card Status Register */
675 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
676 	cmd.resp_type = MMC_RSP_R1;
677 	cmd.cmdarg = 0;
678 
679 	data.dest = (char *)ext_csd;
680 	data.blocks = 1;
681 	data.blocksize = MMC_MAX_BLOCK_LEN;
682 	data.flags = MMC_DATA_READ;
683 
684 	err = mmc_send_cmd(mmc, &cmd, &data);
685 	memcpy(mmc_ext_csd, ext_csd, 512);
686 #if defined(CONFIG_MMC_USE_PRE_CONFIG) && defined(CONFIG_SPL_BUILD)
687 	char *mmc_ecsd_base = NULL;
688 	ulong mmc_ecsd;
689 
690 	mmc_ecsd = dev_read_u32_default(mmc->dev, "mmc-ecsd", 0);
691 	mmc_ecsd_base = (char *)mmc_ecsd;
692 	if (mmc_ecsd_base) {
693 		memcpy(mmc_ecsd_base, ext_csd, 512);
694 		*(unsigned int *)(mmc_ecsd_base + 512) = 0x55aa55aa;
695 	}
696 #endif
697 	return err;
698 }
699 
700 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status)
701 {
702 	struct mmc_cmd cmd;
703 	u8 busy = true;
704 	uint start;
705 	int ret;
706 	int timeout = 1000;
707 
708 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
709 	cmd.resp_type = MMC_RSP_R1;
710 	cmd.cmdarg = mmc->rca << 16;
711 
712 	start = get_timer(0);
713 
714 	if (!send_status && !mmc_can_card_busy(mmc)) {
715 		mdelay(timeout);
716 		return 0;
717 	}
718 
719 	do {
720 		if (!send_status) {
721 			busy = mmc_card_busy(mmc);
722 		} else {
723 			ret = mmc_send_cmd(mmc, &cmd, NULL);
724 
725 			if (ret)
726 				return ret;
727 
728 			if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
729 				return -EBADMSG;
730 			busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) ==
731 				MMC_STATE_PRG;
732 		}
733 
734 		if (get_timer(start) > timeout && busy)
735 			return -ETIMEDOUT;
736 	} while (busy);
737 
738 	return 0;
739 }
740 
741 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
742 			u8 send_status)
743 {
744 	struct mmc_cmd cmd;
745 	int retries = 3;
746 	int ret;
747 
748 	cmd.cmdidx = MMC_CMD_SWITCH;
749 	cmd.resp_type = MMC_RSP_R1b;
750 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
751 				 (index << 16) |
752 				 (value << 8);
753 
754 	do {
755 		ret = mmc_send_cmd(mmc, &cmd, NULL);
756 
757 		if (!ret)
758 			return mmc_poll_for_busy(mmc, send_status);
759 	} while (--retries > 0 && ret);
760 
761 	return ret;
762 }
763 
764 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
765 {
766 	return __mmc_switch(mmc, set, index, value, true);
767 }
768 
769 static int mmc_select_bus_width(struct mmc *mmc)
770 {
771 	u32 ext_csd_bits[] = {
772 		EXT_CSD_BUS_WIDTH_8,
773 		EXT_CSD_BUS_WIDTH_4,
774 	};
775 	u32 bus_widths[] = {
776 		MMC_BUS_WIDTH_8BIT,
777 		MMC_BUS_WIDTH_4BIT,
778 	};
779 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
780 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
781 	u32 idx, bus_width = 0;
782 	int err = 0;
783 
784 	if (mmc->version < MMC_VERSION_4 ||
785 	    !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT)))
786 		return 0;
787 
788 	err = mmc_send_ext_csd(mmc, ext_csd);
789 
790 	if (err)
791 		return err;
792 
793 	idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1;
794 
795 	/*
796 	 * Unlike SD, MMC cards dont have a configuration register to notify
797 	 * supported bus width. So bus test command should be run to identify
798 	 * the supported bus width or compare the ext csd values of current
799 	 * bus width and ext csd values of 1 bit mode read earlier.
800 	 */
801 	for (; idx < ARRAY_SIZE(bus_widths); idx++) {
802 		/*
803 		 * Host is capable of 8bit transfer, then switch
804 		 * the device to work in 8bit transfer mode. If the
805 		 * mmc switch command returns error then switch to
806 		 * 4bit transfer mode. On success set the corresponding
807 		 * bus width on the host.
808 		 */
809 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
810 				 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]);
811 		if (err)
812 			continue;
813 
814 		bus_width = bus_widths[idx];
815 		mmc_set_bus_width(mmc, bus_width);
816 
817 		err = mmc_send_ext_csd(mmc, test_csd);
818 
819 		if (err)
820 			continue;
821 
822 		/* Only compare read only fields */
823 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] ==
824 			test_csd[EXT_CSD_PARTITIONING_SUPPORT]) &&
825 		    (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
826 			test_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
827 		    (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) &&
828 			(ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
829 			test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
830 		    !memcmp(&ext_csd[EXT_CSD_SEC_CNT],
831 			&test_csd[EXT_CSD_SEC_CNT], 4)) {
832 			err = bus_width;
833 			break;
834 		} else {
835 			err = -EBADMSG;
836 		}
837 	}
838 
839 	return err;
840 }
841 
842 #ifndef CONFIG_MMC_SIMPLE
843 static const u8 tuning_blk_pattern_4bit[] = {
844 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
845 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
846 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
847 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
848 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
849 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
850 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
851 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
852 };
853 
854 static const u8 tuning_blk_pattern_8bit[] = {
855 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
856 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
857 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
858 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
859 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
860 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
861 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
862 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
863 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
864 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
865 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
866 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
867 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
868 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
869 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
870 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
871 };
872 
873 int mmc_send_tuning(struct mmc *mmc, u32 opcode)
874 {
875 	struct mmc_cmd cmd;
876 	struct mmc_data data;
877 	const u8 *tuning_block_pattern;
878 	int size, err = 0;
879 	u8 *data_buf;
880 
881 	if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
882 		tuning_block_pattern = tuning_blk_pattern_8bit;
883 		size = sizeof(tuning_blk_pattern_8bit);
884 	} else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) {
885 		tuning_block_pattern = tuning_blk_pattern_4bit;
886 		size = sizeof(tuning_blk_pattern_4bit);
887 	} else {
888 		return -EINVAL;
889 	}
890 
891 	data_buf = calloc(1, size);
892 	if (!data_buf)
893 		return -ENOMEM;
894 
895 	cmd.cmdidx = opcode;
896 	cmd.resp_type = MMC_RSP_R1;
897 	cmd.cmdarg = 0;
898 
899 	data.dest = (char *)data_buf;
900 	data.blocksize = size;
901 	data.blocks = 1;
902 	data.flags = MMC_DATA_READ;
903 
904 	err = mmc_send_cmd(mmc, &cmd, &data);
905 	if (err)
906 		goto out;
907 
908 	if (memcmp(data_buf, tuning_block_pattern, size))
909 		err = -EIO;
910 out:
911 	free(data_buf);
912 	return err;
913 }
914 
915 static int mmc_execute_tuning(struct mmc *mmc)
916 {
917 #ifdef CONFIG_DM_MMC
918 	struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev);
919 #endif
920 	u32 opcode;
921 
922 	if (IS_SD(mmc))
923 		opcode = MMC_SEND_TUNING_BLOCK;
924 	else
925 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
926 
927 #ifndef CONFIG_DM_MMC
928 	if (mmc->cfg->ops->execute_tuning) {
929 		return mmc->cfg->ops->execute_tuning(mmc, opcode);
930 #else
931 	if (ops->execute_tuning) {
932 		return ops->execute_tuning(mmc->dev, opcode);
933 #endif
934 	} else {
935 		debug("Tuning feature required for HS200 mode.\n");
936 		return -EIO;
937 	}
938 }
939 
940 static int mmc_hs200_tuning(struct mmc *mmc)
941 {
942 	return mmc_execute_tuning(mmc);
943 }
944 
945 #else
946 int mmc_send_tuning(struct mmc *mmc, u32 opcode) { return 0; }
947 int mmc_execute_tuning(struct mmc *mmc) { return 0; }
948 static int mmc_hs200_tuning(struct mmc *mmc) { return 0; }
949 #endif
950 
951 static int mmc_select_hs(struct mmc *mmc)
952 {
953 	int ret;
954 
955 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
956 			 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS);
957 
958 	if (!ret)
959 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
960 
961 	return ret;
962 }
963 
964 static int mmc_select_hs_ddr(struct mmc *mmc)
965 {
966 	u32 ext_csd_bits;
967 	int err = 0;
968 
969 	if (mmc->bus_width == MMC_BUS_WIDTH_1BIT)
970 		return 0;
971 
972 	ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ?
973 			EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
974 
975 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
976 			 EXT_CSD_BUS_WIDTH, ext_csd_bits);
977 	if (err)
978 		return err;
979 
980 	mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52);
981 
982 	return 0;
983 }
984 
985 #ifndef CONFIG_MMC_SIMPLE
986 static int mmc_select_hs200(struct mmc *mmc)
987 {
988 	int ret;
989 
990 	/*
991 	 * Set the bus width(4 or 8) with host's support and
992 	 * switch to HS200 mode if bus width is set successfully.
993 	 */
994 	ret = mmc_select_bus_width(mmc);
995 
996 	if (ret > 0) {
997 		ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
998 				   EXT_CSD_HS_TIMING,
999 				   EXT_CSD_TIMING_HS200, false);
1000 
1001 		if (ret)
1002 			return ret;
1003 
1004 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS200);
1005 	}
1006 
1007 	return ret;
1008 }
1009 
1010 static int mmc_select_hs400(struct mmc *mmc)
1011 {
1012 	int ret;
1013 
1014 	/* Switch card to HS mode */
1015 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1016 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false);
1017 	if (ret)
1018 		return ret;
1019 
1020 	/* Set host controller to HS timing */
1021 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
1022 
1023 	/* Reduce frequency to HS frequency */
1024 	mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
1025 
1026 	ret = mmc_send_status(mmc, 1000);
1027 	if (ret)
1028 		return ret;
1029 
1030 	/* Switch card to DDR */
1031 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1032 			 EXT_CSD_BUS_WIDTH,
1033 			 EXT_CSD_DDR_BUS_WIDTH_8);
1034 	if (ret)
1035 		return ret;
1036 
1037 	/* Switch card to HS400 */
1038 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1039 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false);
1040 	if (ret)
1041 		return ret;
1042 
1043 	/* Set host controller to HS400 timing and frequency */
1044 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS400);
1045 
1046 	return ret;
1047 }
1048 
1049 static int mmc_select_hs400es(struct mmc *mmc)
1050 {
1051 	int err;
1052 
1053 	/* Switch card to HS mode */
1054 	err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1055 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false);
1056 	if (err)
1057 		return err;
1058 
1059 	/* Set host controller to HS timing */
1060 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
1061 
1062 	err = mmc_send_status(mmc, 1000);
1063 	if (err)
1064 		return err;
1065 
1066 	mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
1067 
1068 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1069 			 EXT_CSD_DDR_BUS_WIDTH_8 |
1070 			 EXT_CSD_BUS_WIDTH_STROBE);
1071 	if (err) {
1072 		printf("switch to bus width for hs400 failed\n");
1073 		return err;
1074 	}
1075 
1076 	/* Switch card to HS400 */
1077 	err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1078 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false);
1079 	if (err)
1080 		return err;
1081 
1082 	/* Set host controller to HS400 timing and frequency */
1083 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS400ES);
1084 
1085 	return mmc_set_enhanced_strobe(mmc);
1086 }
1087 #else
1088 static int mmc_select_hs200(struct mmc *mmc) { return 0; }
1089 static int mmc_select_hs400(struct mmc *mmc) { return 0; }
1090 static int mmc_select_hs400es(struct mmc *mmc) { return 0; }
1091 #endif
1092 
1093 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd)
1094 {
1095 	u8 card_type;
1096 	u32 host_caps, avail_type = 0;
1097 
1098 	card_type = ext_csd[EXT_CSD_CARD_TYPE];
1099 	host_caps = mmc->cfg->host_caps;
1100 
1101 	if ((host_caps & MMC_MODE_HS) &&
1102 	    (card_type & EXT_CSD_CARD_TYPE_26))
1103 		avail_type |= EXT_CSD_CARD_TYPE_26;
1104 
1105 	if ((host_caps & MMC_MODE_HS) &&
1106 	    (card_type & EXT_CSD_CARD_TYPE_52))
1107 		avail_type |= EXT_CSD_CARD_TYPE_52;
1108 
1109 	/*
1110 	 * For the moment, u-boot doesn't support signal voltage
1111 	 * switch, therefor we assume that host support ddr52
1112 	 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and
1113 	 * hs400 are the same).
1114 	 */
1115 	if ((host_caps & MMC_MODE_DDR_52MHz) &&
1116 	    (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
1117 		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
1118 
1119 	if ((host_caps & MMC_MODE_HS200) &&
1120 	    (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V))
1121 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
1122 
1123 	/*
1124 	 * If host can support HS400, it means that host can also
1125 	 * support HS200.
1126 	 */
1127 	if ((host_caps & MMC_MODE_HS400) &&
1128 	    (host_caps & MMC_MODE_8BIT) &&
1129 	    (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
1130 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
1131 				EXT_CSD_CARD_TYPE_HS400_1_8V;
1132 
1133 	if ((host_caps & MMC_MODE_HS400ES) &&
1134 	    (host_caps & MMC_MODE_8BIT) &&
1135 	    ext_csd[EXT_CSD_STROBE_SUPPORT] &&
1136 	    (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
1137 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
1138 				EXT_CSD_CARD_TYPE_HS400_1_8V |
1139 				EXT_CSD_CARD_TYPE_HS400ES;
1140 
1141 	return avail_type;
1142 }
1143 
1144 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type)
1145 {
1146 	int clock = 0;
1147 
1148 	if (mmc_card_hs(mmc))
1149 		clock = (avail_type & EXT_CSD_CARD_TYPE_52) ?
1150 			MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR;
1151 	else if (mmc_card_hs200(mmc) ||
1152 		 mmc_card_hs400(mmc) ||
1153 		 mmc_card_hs400es(mmc))
1154 		clock = MMC_HS200_MAX_DTR;
1155 
1156 	mmc_set_clock(mmc, clock);
1157 }
1158 
1159 static int mmc_change_freq(struct mmc *mmc)
1160 {
1161 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1162 	u32 avail_type;
1163 	int err;
1164 
1165 	mmc->card_caps = 0;
1166 
1167 	if (mmc_host_is_spi(mmc))
1168 		return 0;
1169 
1170 	/* Only version 4 supports high-speed */
1171 	if (mmc->version < MMC_VERSION_4)
1172 		return 0;
1173 
1174 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
1175 
1176 	err = mmc_send_ext_csd(mmc, ext_csd);
1177 
1178 	if (err)
1179 		return err;
1180 
1181 	avail_type = mmc_select_card_type(mmc, ext_csd);
1182 
1183 	if (avail_type & EXT_CSD_CARD_TYPE_HS400ES) {
1184 		err = mmc_select_bus_width(mmc);
1185 		if (err > 0 && mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
1186 			err = mmc_select_hs400es(mmc);
1187 			mmc_set_bus_speed(mmc, avail_type);
1188 			if (!err)
1189 				return err;
1190 		}
1191 	}
1192 
1193 	if (avail_type & EXT_CSD_CARD_TYPE_HS200)
1194 		err = mmc_select_hs200(mmc);
1195 	else if (avail_type & EXT_CSD_CARD_TYPE_HS)
1196 		err = mmc_select_hs(mmc);
1197 	else
1198 		err = -EINVAL;
1199 
1200 	if (err)
1201 		return err;
1202 
1203 	mmc_set_bus_speed(mmc, avail_type);
1204 
1205 	if (mmc_card_hs200(mmc)) {
1206 		err = mmc_hs200_tuning(mmc);
1207 		if (avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1208 		    mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
1209 			err = mmc_select_hs400(mmc);
1210 			mmc_set_bus_speed(mmc, avail_type);
1211 		}
1212 	} else if (!mmc_card_hs400es(mmc)) {
1213 		err = mmc_select_bus_width(mmc) > 0 ? 0 : err;
1214 		if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52)
1215 			err = mmc_select_hs_ddr(mmc);
1216 	}
1217 
1218 	return err;
1219 }
1220 
1221 static int mmc_set_capacity(struct mmc *mmc, int part_num)
1222 {
1223 	switch (part_num) {
1224 	case 0:
1225 		mmc->capacity = mmc->capacity_user;
1226 		break;
1227 	case 1:
1228 	case 2:
1229 		mmc->capacity = mmc->capacity_boot;
1230 		break;
1231 	case 3:
1232 		mmc->capacity = mmc->capacity_rpmb;
1233 		break;
1234 	case 4:
1235 	case 5:
1236 	case 6:
1237 	case 7:
1238 		mmc->capacity = mmc->capacity_gp[part_num - 4];
1239 		break;
1240 	default:
1241 		return -1;
1242 	}
1243 
1244 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1245 
1246 	return 0;
1247 }
1248 
1249 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1250 {
1251 	int ret;
1252 
1253 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
1254 			 (mmc->part_config & ~PART_ACCESS_MASK)
1255 			 | (part_num & PART_ACCESS_MASK));
1256 
1257 	/*
1258 	 * Set the capacity if the switch succeeded or was intended
1259 	 * to return to representing the raw device.
1260 	 */
1261 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1262 		ret = mmc_set_capacity(mmc, part_num);
1263 		mmc_get_blk_desc(mmc)->hwpart = part_num;
1264 	}
1265 
1266 	return ret;
1267 }
1268 
1269 int mmc_hwpart_config(struct mmc *mmc,
1270 		      const struct mmc_hwpart_conf *conf,
1271 		      enum mmc_hwpart_conf_mode mode)
1272 {
1273 	u8 part_attrs = 0;
1274 	u32 enh_size_mult;
1275 	u32 enh_start_addr;
1276 	u32 gp_size_mult[4];
1277 	u32 max_enh_size_mult;
1278 	u32 tot_enh_size_mult = 0;
1279 	u8 wr_rel_set;
1280 	int i, pidx, err;
1281 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1282 
1283 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1284 		return -EINVAL;
1285 
1286 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1287 		printf("eMMC >= 4.4 required for enhanced user data area\n");
1288 		return -EMEDIUMTYPE;
1289 	}
1290 
1291 	if (!(mmc->part_support & PART_SUPPORT)) {
1292 		printf("Card does not support partitioning\n");
1293 		return -EMEDIUMTYPE;
1294 	}
1295 
1296 	if (!mmc->hc_wp_grp_size) {
1297 		printf("Card does not define HC WP group size\n");
1298 		return -EMEDIUMTYPE;
1299 	}
1300 
1301 	/* check partition alignment and total enhanced size */
1302 	if (conf->user.enh_size) {
1303 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1304 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
1305 			printf("User data enhanced area not HC WP group "
1306 			       "size aligned\n");
1307 			return -EINVAL;
1308 		}
1309 		part_attrs |= EXT_CSD_ENH_USR;
1310 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1311 		if (mmc->high_capacity) {
1312 			enh_start_addr = conf->user.enh_start;
1313 		} else {
1314 			enh_start_addr = (conf->user.enh_start << 9);
1315 		}
1316 	} else {
1317 		enh_size_mult = 0;
1318 		enh_start_addr = 0;
1319 	}
1320 	tot_enh_size_mult += enh_size_mult;
1321 
1322 	for (pidx = 0; pidx < 4; pidx++) {
1323 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1324 			printf("GP%i partition not HC WP group size "
1325 			       "aligned\n", pidx+1);
1326 			return -EINVAL;
1327 		}
1328 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1329 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1330 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1331 			tot_enh_size_mult += gp_size_mult[pidx];
1332 		}
1333 	}
1334 
1335 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1336 		printf("Card does not support enhanced attribute\n");
1337 		return -EMEDIUMTYPE;
1338 	}
1339 
1340 	err = mmc_send_ext_csd(mmc, ext_csd);
1341 	if (err)
1342 		return err;
1343 
1344 	max_enh_size_mult =
1345 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1346 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1347 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1348 	if (tot_enh_size_mult > max_enh_size_mult) {
1349 		printf("Total enhanced size exceeds maximum (%u > %u)\n",
1350 		       tot_enh_size_mult, max_enh_size_mult);
1351 		return -EMEDIUMTYPE;
1352 	}
1353 
1354 	/* The default value of EXT_CSD_WR_REL_SET is device
1355 	 * dependent, the values can only be changed if the
1356 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1357 	 * changed only once and before partitioning is completed. */
1358 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1359 	if (conf->user.wr_rel_change) {
1360 		if (conf->user.wr_rel_set)
1361 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1362 		else
1363 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1364 	}
1365 	for (pidx = 0; pidx < 4; pidx++) {
1366 		if (conf->gp_part[pidx].wr_rel_change) {
1367 			if (conf->gp_part[pidx].wr_rel_set)
1368 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1369 			else
1370 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1371 		}
1372 	}
1373 
1374 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1375 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1376 		puts("Card does not support host controlled partition write "
1377 		     "reliability settings\n");
1378 		return -EMEDIUMTYPE;
1379 	}
1380 
1381 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1382 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1383 		printf("Card already partitioned\n");
1384 		return -EPERM;
1385 	}
1386 
1387 	if (mode == MMC_HWPART_CONF_CHECK)
1388 		return 0;
1389 
1390 	/* Partitioning requires high-capacity size definitions */
1391 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1392 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1393 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1394 
1395 		if (err)
1396 			return err;
1397 
1398 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1399 
1400 		/* update erase group size to be high-capacity */
1401 		mmc->erase_grp_size =
1402 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1403 
1404 	}
1405 
1406 	/* all OK, write the configuration */
1407 	for (i = 0; i < 4; i++) {
1408 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1409 				 EXT_CSD_ENH_START_ADDR+i,
1410 				 (enh_start_addr >> (i*8)) & 0xFF);
1411 		if (err)
1412 			return err;
1413 	}
1414 	for (i = 0; i < 3; i++) {
1415 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1416 				 EXT_CSD_ENH_SIZE_MULT+i,
1417 				 (enh_size_mult >> (i*8)) & 0xFF);
1418 		if (err)
1419 			return err;
1420 	}
1421 	for (pidx = 0; pidx < 4; pidx++) {
1422 		for (i = 0; i < 3; i++) {
1423 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1424 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1425 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1426 			if (err)
1427 				return err;
1428 		}
1429 	}
1430 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1431 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1432 	if (err)
1433 		return err;
1434 
1435 	if (mode == MMC_HWPART_CONF_SET)
1436 		return 0;
1437 
1438 	/* The WR_REL_SET is a write-once register but shall be
1439 	 * written before setting PART_SETTING_COMPLETED. As it is
1440 	 * write-once we can only write it when completing the
1441 	 * partitioning. */
1442 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1443 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1444 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1445 		if (err)
1446 			return err;
1447 	}
1448 
1449 	/* Setting PART_SETTING_COMPLETED confirms the partition
1450 	 * configuration but it only becomes effective after power
1451 	 * cycle, so we do not adjust the partition related settings
1452 	 * in the mmc struct. */
1453 
1454 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1455 			 EXT_CSD_PARTITION_SETTING,
1456 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1457 	if (err)
1458 		return err;
1459 
1460 	return 0;
1461 }
1462 
1463 #if !CONFIG_IS_ENABLED(DM_MMC)
1464 int mmc_getcd(struct mmc *mmc)
1465 {
1466 	int cd;
1467 
1468 	cd = board_mmc_getcd(mmc);
1469 
1470 	if (cd < 0) {
1471 		if (mmc->cfg->ops->getcd)
1472 			cd = mmc->cfg->ops->getcd(mmc);
1473 		else
1474 			cd = 1;
1475 	}
1476 
1477 	return cd;
1478 }
1479 #endif
1480 
1481 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1482 {
1483 	struct mmc_cmd cmd;
1484 	struct mmc_data data;
1485 
1486 	/* Switch the frequency */
1487 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1488 	cmd.resp_type = MMC_RSP_R1;
1489 	cmd.cmdarg = (mode << 31) | 0xffffff;
1490 	cmd.cmdarg &= ~(0xf << (group * 4));
1491 	cmd.cmdarg |= value << (group * 4);
1492 
1493 	data.dest = (char *)resp;
1494 	data.blocksize = 64;
1495 	data.blocks = 1;
1496 	data.flags = MMC_DATA_READ;
1497 
1498 	return mmc_send_cmd(mmc, &cmd, &data);
1499 }
1500 
1501 
1502 static int sd_change_freq(struct mmc *mmc)
1503 {
1504 	int err;
1505 	struct mmc_cmd cmd;
1506 	ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2);
1507 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1508 	struct mmc_data data;
1509 	int timeout;
1510 
1511 	mmc->card_caps = 0;
1512 
1513 	if (mmc_host_is_spi(mmc))
1514 		return 0;
1515 
1516 	/* Read the SCR to find out if this card supports higher speeds */
1517 	cmd.cmdidx = MMC_CMD_APP_CMD;
1518 	cmd.resp_type = MMC_RSP_R1;
1519 	cmd.cmdarg = mmc->rca << 16;
1520 
1521 	err = mmc_send_cmd(mmc, &cmd, NULL);
1522 
1523 	if (err)
1524 		return err;
1525 
1526 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1527 	cmd.resp_type = MMC_RSP_R1;
1528 	cmd.cmdarg = 0;
1529 
1530 	timeout = 3;
1531 
1532 retry_scr:
1533 	data.dest = (char *)scr;
1534 	data.blocksize = 8;
1535 	data.blocks = 1;
1536 	data.flags = MMC_DATA_READ;
1537 
1538 	err = mmc_send_cmd(mmc, &cmd, &data);
1539 
1540 	if (err) {
1541 		if (timeout--)
1542 			goto retry_scr;
1543 
1544 		return err;
1545 	}
1546 
1547 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1548 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1549 
1550 	switch ((mmc->scr[0] >> 24) & 0xf) {
1551 	case 0:
1552 		mmc->version = SD_VERSION_1_0;
1553 		break;
1554 	case 1:
1555 		mmc->version = SD_VERSION_1_10;
1556 		break;
1557 	case 2:
1558 		mmc->version = SD_VERSION_2;
1559 		if ((mmc->scr[0] >> 15) & 0x1)
1560 			mmc->version = SD_VERSION_3;
1561 		break;
1562 	default:
1563 		mmc->version = SD_VERSION_1_0;
1564 		break;
1565 	}
1566 
1567 	if (mmc->scr[0] & SD_DATA_4BIT)
1568 		mmc->card_caps |= MMC_MODE_4BIT;
1569 
1570 	/* Version 1.0 doesn't support switching */
1571 	if (mmc->version == SD_VERSION_1_0)
1572 		return 0;
1573 
1574 	timeout = 4;
1575 	while (timeout--) {
1576 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1577 				(u8 *)switch_status);
1578 
1579 		if (err)
1580 			return err;
1581 
1582 		/* The high-speed function is busy.  Try again */
1583 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1584 			break;
1585 	}
1586 
1587 	/* If high-speed isn't supported, we return */
1588 	if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
1589 		return 0;
1590 
1591 	/*
1592 	 * If the host doesn't support SD_HIGHSPEED, do not switch card to
1593 	 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
1594 	 * This can avoid furthur problem when the card runs in different
1595 	 * mode between the host.
1596 	 */
1597 	if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
1598 		(mmc->cfg->host_caps & MMC_MODE_HS)))
1599 		return 0;
1600 
1601 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1602 
1603 	if (err)
1604 		return err;
1605 
1606 	if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
1607 		mmc->card_caps |= MMC_MODE_HS;
1608 
1609 	return 0;
1610 }
1611 
1612 static int sd_read_ssr(struct mmc *mmc)
1613 {
1614 	int err, i;
1615 	struct mmc_cmd cmd;
1616 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1617 	struct mmc_data data;
1618 	int timeout = 3;
1619 	unsigned int au, eo, et, es;
1620 
1621 	cmd.cmdidx = MMC_CMD_APP_CMD;
1622 	cmd.resp_type = MMC_RSP_R1;
1623 	cmd.cmdarg = mmc->rca << 16;
1624 
1625 	err = mmc_send_cmd(mmc, &cmd, NULL);
1626 	if (err)
1627 		return err;
1628 
1629 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1630 	cmd.resp_type = MMC_RSP_R1;
1631 	cmd.cmdarg = 0;
1632 
1633 retry_ssr:
1634 	data.dest = (char *)ssr;
1635 	data.blocksize = 64;
1636 	data.blocks = 1;
1637 	data.flags = MMC_DATA_READ;
1638 
1639 	err = mmc_send_cmd(mmc, &cmd, &data);
1640 	if (err) {
1641 		if (timeout--)
1642 			goto retry_ssr;
1643 
1644 		return err;
1645 	}
1646 
1647 	for (i = 0; i < 16; i++)
1648 		ssr[i] = be32_to_cpu(ssr[i]);
1649 
1650 	au = (ssr[2] >> 12) & 0xF;
1651 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1652 		mmc->ssr.au = sd_au_size[au];
1653 		es = (ssr[3] >> 24) & 0xFF;
1654 		es |= (ssr[2] & 0xFF) << 8;
1655 		et = (ssr[3] >> 18) & 0x3F;
1656 		if (es && et) {
1657 			eo = (ssr[3] >> 16) & 0x3;
1658 			mmc->ssr.erase_timeout = (et * 1000) / es;
1659 			mmc->ssr.erase_offset = eo * 1000;
1660 		}
1661 	} else {
1662 		debug("Invalid Allocation Unit Size.\n");
1663 	}
1664 
1665 	return 0;
1666 }
1667 
1668 /* frequency bases */
1669 /* divided by 10 to be nice to platforms without floating point */
1670 static const int fbase[] = {
1671 	10000,
1672 	100000,
1673 	1000000,
1674 	10000000,
1675 };
1676 
1677 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1678  * to platforms without floating point.
1679  */
1680 static const u8 multipliers[] = {
1681 	0,	/* reserved */
1682 	10,
1683 	12,
1684 	13,
1685 	15,
1686 	20,
1687 	25,
1688 	30,
1689 	35,
1690 	40,
1691 	45,
1692 	50,
1693 	55,
1694 	60,
1695 	70,
1696 	80,
1697 };
1698 
1699 #if !CONFIG_IS_ENABLED(DM_MMC)
1700 static void mmc_set_ios(struct mmc *mmc)
1701 {
1702 	if (mmc->cfg->ops->set_ios)
1703 		mmc->cfg->ops->set_ios(mmc);
1704 }
1705 
1706 static bool mmc_card_busy(struct mmc *mmc)
1707 {
1708 	if (!mmc->cfg->ops->card_busy)
1709 		return -ENOSYS;
1710 
1711 	return mmc->cfg->ops->card_busy(mmc);
1712 }
1713 
1714 static bool mmc_can_card_busy(struct mmc *)
1715 {
1716 	return !!mmc->cfg->ops->card_busy;
1717 }
1718 #endif
1719 
1720 static int mmc_startup(struct mmc *mmc)
1721 {
1722 	int err, i;
1723 	uint mult, freq, tran_speed;
1724 	u64 cmult, csize, capacity;
1725 	struct mmc_cmd cmd;
1726 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1727 	bool has_parts = false;
1728 	bool part_completed;
1729 	struct blk_desc *bdesc;
1730 
1731 #ifdef CONFIG_MMC_SPI_CRC_ON
1732 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1733 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1734 		cmd.resp_type = MMC_RSP_R1;
1735 		cmd.cmdarg = 1;
1736 		err = mmc_send_cmd(mmc, &cmd, NULL);
1737 
1738 		if (err)
1739 			return err;
1740 	}
1741 #endif
1742 #ifndef CONFIG_MMC_USE_PRE_CONFIG
1743 	/* Put the Card in Identify Mode */
1744 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1745 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1746 	cmd.resp_type = MMC_RSP_R2;
1747 	cmd.cmdarg = 0;
1748 
1749 	err = mmc_send_cmd(mmc, &cmd, NULL);
1750 
1751 	if (err)
1752 		return err;
1753 
1754 	memcpy(mmc->cid, cmd.response, 16);
1755 
1756 	/*
1757 	 * For MMC cards, set the Relative Address.
1758 	 * For SD cards, get the Relatvie Address.
1759 	 * This also puts the cards into Standby State
1760 	 */
1761 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1762 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1763 		cmd.cmdarg = mmc->rca << 16;
1764 		cmd.resp_type = MMC_RSP_R6;
1765 
1766 		err = mmc_send_cmd(mmc, &cmd, NULL);
1767 
1768 		if (err)
1769 			return err;
1770 
1771 		if (IS_SD(mmc))
1772 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1773 	}
1774 #endif
1775 	/* Get the Card-Specific Data */
1776 	cmd.cmdidx = MMC_CMD_SEND_CSD;
1777 	cmd.resp_type = MMC_RSP_R2;
1778 	cmd.cmdarg = mmc->rca << 16;
1779 
1780 	err = mmc_send_cmd(mmc, &cmd, NULL);
1781 
1782 	if (err)
1783 		return err;
1784 
1785 	mmc->csd[0] = cmd.response[0];
1786 	mmc->csd[1] = cmd.response[1];
1787 	mmc->csd[2] = cmd.response[2];
1788 	mmc->csd[3] = cmd.response[3];
1789 
1790 	if (mmc->version == MMC_VERSION_UNKNOWN) {
1791 		int version = (cmd.response[0] >> 26) & 0xf;
1792 
1793 		switch (version) {
1794 		case 0:
1795 			mmc->version = MMC_VERSION_1_2;
1796 			break;
1797 		case 1:
1798 			mmc->version = MMC_VERSION_1_4;
1799 			break;
1800 		case 2:
1801 			mmc->version = MMC_VERSION_2_2;
1802 			break;
1803 		case 3:
1804 			mmc->version = MMC_VERSION_3;
1805 			break;
1806 		case 4:
1807 			mmc->version = MMC_VERSION_4;
1808 			break;
1809 		default:
1810 			mmc->version = MMC_VERSION_1_2;
1811 			break;
1812 		}
1813 	}
1814 
1815 	/* divide frequency by 10, since the mults are 10x bigger */
1816 	freq = fbase[(cmd.response[0] & 0x7)];
1817 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1818 
1819 	tran_speed = freq * mult;
1820 
1821 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1822 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1823 
1824 	if (IS_SD(mmc))
1825 		mmc->write_bl_len = mmc->read_bl_len;
1826 	else
1827 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1828 
1829 	if (mmc->high_capacity) {
1830 		csize = (mmc->csd[1] & 0x3f) << 16
1831 			| (mmc->csd[2] & 0xffff0000) >> 16;
1832 		cmult = 8;
1833 	} else {
1834 		csize = (mmc->csd[1] & 0x3ff) << 2
1835 			| (mmc->csd[2] & 0xc0000000) >> 30;
1836 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
1837 	}
1838 
1839 	mmc->capacity_user = (csize + 1) << (cmult + 2);
1840 	mmc->capacity_user *= mmc->read_bl_len;
1841 	mmc->capacity_boot = 0;
1842 	mmc->capacity_rpmb = 0;
1843 	for (i = 0; i < 4; i++)
1844 		mmc->capacity_gp[i] = 0;
1845 
1846 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1847 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1848 
1849 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1850 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1851 
1852 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1853 		cmd.cmdidx = MMC_CMD_SET_DSR;
1854 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1855 		cmd.resp_type = MMC_RSP_NONE;
1856 		if (mmc_send_cmd(mmc, &cmd, NULL))
1857 			printf("MMC: SET_DSR failed\n");
1858 	}
1859 
1860 	/* Select the card, and put it into Transfer Mode */
1861 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1862 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
1863 		cmd.resp_type = MMC_RSP_R1;
1864 		cmd.cmdarg = mmc->rca << 16;
1865 		err = mmc_send_cmd(mmc, &cmd, NULL);
1866 
1867 		if (err)
1868 			return err;
1869 	}
1870 
1871 	/*
1872 	 * For SD, its erase group is always one sector
1873 	 */
1874 	mmc->erase_grp_size = 1;
1875 	mmc->part_config = MMCPART_NOAVAILABLE;
1876 	if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) {
1877 		/* check  ext_csd version and capacity */
1878 		err = mmc_send_ext_csd(mmc, ext_csd);
1879 		if (err)
1880 			return err;
1881 		if (ext_csd[EXT_CSD_REV] >= 2) {
1882 			/*
1883 			 * According to the JEDEC Standard, the value of
1884 			 * ext_csd's capacity is valid if the value is more
1885 			 * than 2GB
1886 			 */
1887 			capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1888 					| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1889 					| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1890 					| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1891 			capacity *= MMC_MAX_BLOCK_LEN;
1892 			if ((capacity >> 20) > 2 * 1024)
1893 				mmc->capacity_user = capacity;
1894 		}
1895 
1896 		switch (ext_csd[EXT_CSD_REV]) {
1897 		case 1:
1898 			mmc->version = MMC_VERSION_4_1;
1899 			break;
1900 		case 2:
1901 			mmc->version = MMC_VERSION_4_2;
1902 			break;
1903 		case 3:
1904 			mmc->version = MMC_VERSION_4_3;
1905 			break;
1906 		case 5:
1907 			mmc->version = MMC_VERSION_4_41;
1908 			break;
1909 		case 6:
1910 			mmc->version = MMC_VERSION_4_5;
1911 			break;
1912 		case 7:
1913 			mmc->version = MMC_VERSION_5_0;
1914 			break;
1915 		case 8:
1916 			mmc->version = MMC_VERSION_5_1;
1917 			break;
1918 		}
1919 
1920 		/* The partition data may be non-zero but it is only
1921 		 * effective if PARTITION_SETTING_COMPLETED is set in
1922 		 * EXT_CSD, so ignore any data if this bit is not set,
1923 		 * except for enabling the high-capacity group size
1924 		 * definition (see below). */
1925 		part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1926 				    EXT_CSD_PARTITION_SETTING_COMPLETED);
1927 
1928 		/* store the partition info of emmc */
1929 		mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1930 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1931 		    ext_csd[EXT_CSD_BOOT_MULT])
1932 			mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1933 		if (part_completed &&
1934 		    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1935 			mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1936 		if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN)
1937 			mmc->esr.mmc_can_trim = 1;
1938 
1939 		mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1940 
1941 		mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1942 
1943 		for (i = 0; i < 4; i++) {
1944 			int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1945 			uint mult = (ext_csd[idx + 2] << 16) +
1946 				(ext_csd[idx + 1] << 8) + ext_csd[idx];
1947 			if (mult)
1948 				has_parts = true;
1949 			if (!part_completed)
1950 				continue;
1951 			mmc->capacity_gp[i] = mult;
1952 			mmc->capacity_gp[i] *=
1953 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1954 			mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1955 			mmc->capacity_gp[i] <<= 19;
1956 		}
1957 
1958 		if (part_completed) {
1959 			mmc->enh_user_size =
1960 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) +
1961 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) +
1962 				ext_csd[EXT_CSD_ENH_SIZE_MULT];
1963 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1964 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1965 			mmc->enh_user_size <<= 19;
1966 			mmc->enh_user_start =
1967 				(ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) +
1968 				(ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) +
1969 				(ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) +
1970 				ext_csd[EXT_CSD_ENH_START_ADDR];
1971 			if (mmc->high_capacity)
1972 				mmc->enh_user_start <<= 9;
1973 		}
1974 
1975 		/*
1976 		 * Host needs to enable ERASE_GRP_DEF bit if device is
1977 		 * partitioned. This bit will be lost every time after a reset
1978 		 * or power off. This will affect erase size.
1979 		 */
1980 		if (part_completed)
1981 			has_parts = true;
1982 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1983 		    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1984 			has_parts = true;
1985 		if (has_parts) {
1986 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1987 				EXT_CSD_ERASE_GROUP_DEF, 1);
1988 
1989 			if (err)
1990 				return err;
1991 			else
1992 				ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1993 		}
1994 
1995 		if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1996 			/* Read out group size from ext_csd */
1997 			mmc->erase_grp_size =
1998 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1999 			/*
2000 			 * if high capacity and partition setting completed
2001 			 * SEC_COUNT is valid even if it is smaller than 2 GiB
2002 			 * JEDEC Standard JESD84-B45, 6.2.4
2003 			 */
2004 			if (mmc->high_capacity && part_completed) {
2005 				capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2006 					(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2007 					(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2008 					(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2009 				capacity *= MMC_MAX_BLOCK_LEN;
2010 				mmc->capacity_user = capacity;
2011 			}
2012 		} else {
2013 			/* Calculate the group size from the csd value. */
2014 			int erase_gsz, erase_gmul;
2015 			erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2016 			erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2017 			mmc->erase_grp_size = (erase_gsz + 1)
2018 				* (erase_gmul + 1);
2019 		}
2020 
2021 		mmc->hc_wp_grp_size = 1024
2022 			* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2023 			* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2024 
2025 		mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2026 	}
2027 
2028 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2029 	if (err)
2030 		return err;
2031 
2032 	if (IS_SD(mmc))
2033 		err = sd_change_freq(mmc);
2034 	else
2035 		err = mmc_change_freq(mmc);
2036 
2037 	if (err)
2038 		return err;
2039 
2040 	/* Restrict card's capabilities by what the host can do */
2041 	mmc->card_caps &= mmc->cfg->host_caps;
2042 
2043 	if (IS_SD(mmc)) {
2044 		if (mmc->card_caps & MMC_MODE_4BIT) {
2045 			cmd.cmdidx = MMC_CMD_APP_CMD;
2046 			cmd.resp_type = MMC_RSP_R1;
2047 			cmd.cmdarg = mmc->rca << 16;
2048 
2049 			err = mmc_send_cmd(mmc, &cmd, NULL);
2050 			if (err)
2051 				return err;
2052 
2053 			cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
2054 			cmd.resp_type = MMC_RSP_R1;
2055 			cmd.cmdarg = 2;
2056 			err = mmc_send_cmd(mmc, &cmd, NULL);
2057 			if (err)
2058 				return err;
2059 
2060 			mmc_set_bus_width(mmc, 4);
2061 		}
2062 
2063 		err = sd_read_ssr(mmc);
2064 		if (err)
2065 			return err;
2066 
2067 		if (mmc->card_caps & MMC_MODE_HS)
2068 			tran_speed = MMC_HIGH_52_MAX_DTR;
2069 		else
2070 			tran_speed = MMC_HIGH_26_MAX_DTR;
2071 
2072 		mmc_set_clock(mmc, tran_speed);
2073 	}
2074 
2075 	/* Fix the block length for DDR mode */
2076 	if (mmc_card_ddr(mmc)) {
2077 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2078 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2079 	}
2080 
2081 	/* fill in device description */
2082 	bdesc = mmc_get_blk_desc(mmc);
2083 	bdesc->lun = 0;
2084 	bdesc->hwpart = 0;
2085 	bdesc->type = 0;
2086 	bdesc->blksz = mmc->read_bl_len;
2087 	bdesc->log2blksz = LOG2(bdesc->blksz);
2088 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2089 #if !defined(CONFIG_SPL_BUILD) || \
2090 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2091 		!defined(CONFIG_USE_TINY_PRINTF))
2092 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2093 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2094 		(mmc->cid[3] >> 16) & 0xffff);
2095 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2096 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2097 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2098 		(mmc->cid[2] >> 24) & 0xff);
2099 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2100 		(mmc->cid[2] >> 16) & 0xf);
2101 #else
2102 	bdesc->vendor[0] = 0;
2103 	bdesc->product[0] = 0;
2104 	bdesc->revision[0] = 0;
2105 #endif
2106 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2107 	part_init(bdesc);
2108 #endif
2109 
2110 	return 0;
2111 }
2112 
2113 #ifndef CONFIG_MMC_USE_PRE_CONFIG
2114 static int mmc_send_if_cond(struct mmc *mmc)
2115 {
2116 	struct mmc_cmd cmd;
2117 	int err;
2118 
2119 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2120 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2121 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2122 	cmd.resp_type = MMC_RSP_R7;
2123 
2124 	err = mmc_send_cmd(mmc, &cmd, NULL);
2125 
2126 	if (err)
2127 		return err;
2128 
2129 	if ((cmd.response[0] & 0xff) != 0xaa)
2130 		return -EOPNOTSUPP;
2131 	else
2132 		mmc->version = SD_VERSION_2;
2133 
2134 	return 0;
2135 }
2136 #endif
2137 
2138 #if !CONFIG_IS_ENABLED(DM_MMC)
2139 /* board-specific MMC power initializations. */
2140 __weak void board_mmc_power_init(void)
2141 {
2142 }
2143 #endif
2144 
2145 #ifndef CONFIG_MMC_USE_PRE_CONFIG
2146 static int mmc_power_init(struct mmc *mmc)
2147 {
2148 #if CONFIG_IS_ENABLED(DM_MMC)
2149 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD)
2150 	struct udevice *vmmc_supply;
2151 	int ret;
2152 
2153 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2154 					  &vmmc_supply);
2155 	if (ret) {
2156 		debug("%s: No vmmc supply\n", mmc->dev->name);
2157 		return 0;
2158 	}
2159 
2160 	ret = regulator_set_enable(vmmc_supply, true);
2161 	if (ret) {
2162 		puts("Error enabling VMMC supply\n");
2163 		return ret;
2164 	}
2165 #endif
2166 #else /* !CONFIG_DM_MMC */
2167 	/*
2168 	 * Driver model should use a regulator, as above, rather than calling
2169 	 * out to board code.
2170 	 */
2171 	board_mmc_power_init();
2172 #endif
2173 	return 0;
2174 }
2175 #endif
2176 #ifdef CONFIG_MMC_USE_PRE_CONFIG
2177 static int mmc_select_card(struct mmc *mmc, int n)
2178 {
2179 	struct mmc_cmd cmd;
2180 	int err = 0;
2181 
2182 	memset(&cmd, 0, sizeof(struct mmc_cmd));
2183 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2184 		mmc->rca = n;
2185 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2186 		cmd.resp_type = MMC_RSP_R1;
2187 		cmd.cmdarg = mmc->rca << 16;
2188 		err = mmc_send_cmd(mmc, &cmd, NULL);
2189 	}
2190 
2191 	return err;
2192 }
2193 
2194 int mmc_start_init(struct mmc *mmc)
2195 {
2196 	/*
2197 	 * We use the MMC config set by the bootrom.
2198 	 * So it is no need to reset the eMMC device.
2199 	 */
2200 	mmc_set_bus_width(mmc, 8);
2201 	mmc_set_clock(mmc, 1);
2202 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2203 	/* Send cmd7 to return stand-by state*/
2204 	mmc_select_card(mmc, 0);
2205 	mmc->version = MMC_VERSION_UNKNOWN;
2206 	mmc->high_capacity = 1;
2207 	/*
2208 	 * The RCA is set to 2 by rockchip bootrom, use the default
2209 	 * value here.
2210 	 */
2211 #ifdef CONFIG_ARCH_ROCKCHIP
2212 	mmc->rca = 2;
2213 #else
2214 	mmc->rca = 1;
2215 #endif
2216 	return 0;
2217 }
2218 #else
2219 int mmc_start_init(struct mmc *mmc)
2220 {
2221 	bool no_card;
2222 	int err;
2223 
2224 	/* we pretend there's no card when init is NULL */
2225 	no_card = mmc_getcd(mmc) == 0;
2226 #if !CONFIG_IS_ENABLED(DM_MMC)
2227 	no_card = no_card || (mmc->cfg->ops->init == NULL);
2228 #endif
2229 	if (no_card) {
2230 		mmc->has_init = 0;
2231 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2232 		printf("MMC: no card present\n");
2233 #endif
2234 		return -ENOMEDIUM;
2235 	}
2236 
2237 	if (mmc->has_init)
2238 		return 0;
2239 
2240 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2241 	mmc_adapter_card_type_ident();
2242 #endif
2243 	err = mmc_power_init(mmc);
2244 	if (err)
2245 		return err;
2246 
2247 #if CONFIG_IS_ENABLED(DM_MMC)
2248 	/* The device has already been probed ready for use */
2249 #else
2250 	/* made sure it's not NULL earlier */
2251 	err = mmc->cfg->ops->init(mmc);
2252 	if (err)
2253 		return err;
2254 #endif
2255 	mmc_set_bus_width(mmc, 1);
2256 	mmc_set_clock(mmc, 1);
2257 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2258 
2259 	/* Reset the Card */
2260 	err = mmc_go_idle(mmc);
2261 
2262 	if (err)
2263 		return err;
2264 
2265 	/* The internal partition reset to user partition(0) at every CMD0*/
2266 	mmc_get_blk_desc(mmc)->hwpart = 0;
2267 
2268 	/* Test for SD version 2 */
2269 	err = mmc_send_if_cond(mmc);
2270 
2271 	/* Now try to get the SD card's operating condition */
2272 	err = sd_send_op_cond(mmc);
2273 
2274 	/* If the command timed out, we check for an MMC card */
2275 	if (err == -ETIMEDOUT) {
2276 		err = mmc_send_op_cond(mmc);
2277 
2278 		if (err) {
2279 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2280 			printf("Card did not respond to voltage select!\n");
2281 #endif
2282 			return -EOPNOTSUPP;
2283 		}
2284 	}
2285 
2286 	if (!err)
2287 		mmc->init_in_progress = 1;
2288 
2289 	return err;
2290 }
2291 #endif
2292 
2293 static int mmc_complete_init(struct mmc *mmc)
2294 {
2295 	int err = 0;
2296 
2297 	mmc->init_in_progress = 0;
2298 	if (mmc->op_cond_pending)
2299 		err = mmc_complete_op_cond(mmc);
2300 
2301 	if (!err)
2302 		err = mmc_startup(mmc);
2303 	if (err)
2304 		mmc->has_init = 0;
2305 	else
2306 		mmc->has_init = 1;
2307 	return err;
2308 }
2309 
2310 int mmc_init(struct mmc *mmc)
2311 {
2312 	int err = 0;
2313 	__maybe_unused unsigned start;
2314 #if CONFIG_IS_ENABLED(DM_MMC)
2315 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2316 
2317 	upriv->mmc = mmc;
2318 #endif
2319 	if (mmc->has_init)
2320 		return 0;
2321 
2322 	start = get_timer(0);
2323 
2324 	if (!mmc->init_in_progress)
2325 		err = mmc_start_init(mmc);
2326 
2327 	if (!err)
2328 		err = mmc_complete_init(mmc);
2329 	if (err)
2330 		printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2331 
2332 	return err;
2333 }
2334 
2335 int mmc_set_dsr(struct mmc *mmc, u16 val)
2336 {
2337 	mmc->dsr = val;
2338 	return 0;
2339 }
2340 
2341 /* CPU-specific MMC initializations */
2342 __weak int cpu_mmc_init(bd_t *bis)
2343 {
2344 	return -1;
2345 }
2346 
2347 /* board-specific MMC initializations. */
2348 __weak int board_mmc_init(bd_t *bis)
2349 {
2350 	return -1;
2351 }
2352 
2353 void mmc_set_preinit(struct mmc *mmc, int preinit)
2354 {
2355 	mmc->preinit = preinit;
2356 }
2357 
2358 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2359 static int mmc_probe(bd_t *bis)
2360 {
2361 	return 0;
2362 }
2363 #elif CONFIG_IS_ENABLED(DM_MMC)
2364 static int mmc_probe(bd_t *bis)
2365 {
2366 	int ret, i;
2367 	struct uclass *uc;
2368 	struct udevice *dev;
2369 
2370 	ret = uclass_get(UCLASS_MMC, &uc);
2371 	if (ret)
2372 		return ret;
2373 
2374 	/*
2375 	 * Try to add them in sequence order. Really with driver model we
2376 	 * should allow holes, but the current MMC list does not allow that.
2377 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2378 	 */
2379 	for (i = 0; ; i++) {
2380 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2381 		if (ret == -ENODEV)
2382 			break;
2383 	}
2384 	uclass_foreach_dev(dev, uc) {
2385 		ret = device_probe(dev);
2386 		if (ret)
2387 			printf("%s - probe failed: %d\n", dev->name, ret);
2388 	}
2389 
2390 	return 0;
2391 }
2392 #else
2393 static int mmc_probe(bd_t *bis)
2394 {
2395 	if (board_mmc_init(bis) < 0)
2396 		cpu_mmc_init(bis);
2397 
2398 	return 0;
2399 }
2400 #endif
2401 
2402 int mmc_initialize(bd_t *bis)
2403 {
2404 	static int initialized = 0;
2405 	int ret;
2406 	if (initialized)	/* Avoid initializing mmc multiple times */
2407 		return 0;
2408 	initialized = 1;
2409 
2410 #if !CONFIG_IS_ENABLED(BLK)
2411 #if !CONFIG_IS_ENABLED(MMC_TINY)
2412 	mmc_list_init();
2413 #endif
2414 #endif
2415 	ret = mmc_probe(bis);
2416 	if (ret)
2417 		return ret;
2418 
2419 #ifndef CONFIG_SPL_BUILD
2420 	print_mmc_devices(',');
2421 #endif
2422 
2423 	mmc_do_preinit();
2424 	return 0;
2425 }
2426 
2427 #ifdef CONFIG_CMD_BKOPS_ENABLE
2428 int mmc_set_bkops_enable(struct mmc *mmc)
2429 {
2430 	int err;
2431 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2432 
2433 	err = mmc_send_ext_csd(mmc, ext_csd);
2434 	if (err) {
2435 		puts("Could not get ext_csd register values\n");
2436 		return err;
2437 	}
2438 
2439 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2440 		puts("Background operations not supported on device\n");
2441 		return -EMEDIUMTYPE;
2442 	}
2443 
2444 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2445 		puts("Background operations already enabled\n");
2446 		return 0;
2447 	}
2448 
2449 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2450 	if (err) {
2451 		puts("Failed to enable manual background operations\n");
2452 		return err;
2453 	}
2454 
2455 	puts("Enabled manual background operations\n");
2456 
2457 	return 0;
2458 }
2459 #endif
2460