xref: /rk3399_rockchip-uboot/drivers/mmc/mmc.c (revision d079c1a5ed50ce9f1e3b37be5ce4e13edec90bd0)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static const unsigned int sd_au_size[] = {
26 	0,		SZ_16K / 512,		SZ_32K / 512,
27 	SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
28 	SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
29 	SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
30 	SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,	SZ_64M / 512,
31 };
32 
33 static char mmc_ext_csd[512];
34 
35 #if CONFIG_IS_ENABLED(MMC_TINY)
36 static struct mmc mmc_static;
37 struct mmc *find_mmc_device(int dev_num)
38 {
39 	return &mmc_static;
40 }
41 
42 void mmc_do_preinit(void)
43 {
44 	struct mmc *m = &mmc_static;
45 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
46 	mmc_set_preinit(m, 1);
47 #endif
48 	if (m->preinit)
49 		mmc_start_init(m);
50 }
51 
52 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
53 {
54 	return &mmc->block_dev;
55 }
56 #endif
57 
58 #if !CONFIG_IS_ENABLED(DM_MMC)
59 __weak int board_mmc_getwp(struct mmc *mmc)
60 {
61 	return -1;
62 }
63 
64 int mmc_getwp(struct mmc *mmc)
65 {
66 	int wp;
67 
68 	wp = board_mmc_getwp(mmc);
69 
70 	if (wp < 0) {
71 		if (mmc->cfg->ops->getwp)
72 			wp = mmc->cfg->ops->getwp(mmc);
73 		else
74 			wp = 0;
75 	}
76 
77 	return wp;
78 }
79 
80 __weak int board_mmc_getcd(struct mmc *mmc)
81 {
82 	return -1;
83 }
84 #endif
85 
86 #ifdef CONFIG_MMC_TRACE
87 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
88 {
89 	printf("CMD_SEND:%d\n", cmd->cmdidx);
90 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
91 }
92 
93 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
94 {
95 	int i;
96 	u8 *ptr;
97 
98 	if (ret) {
99 		printf("\t\tRET\t\t\t %d\n", ret);
100 	} else {
101 		switch (cmd->resp_type) {
102 		case MMC_RSP_NONE:
103 			printf("\t\tMMC_RSP_NONE\n");
104 			break;
105 		case MMC_RSP_R1:
106 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
107 				cmd->response[0]);
108 			break;
109 		case MMC_RSP_R1b:
110 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
111 				cmd->response[0]);
112 			break;
113 		case MMC_RSP_R2:
114 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
115 				cmd->response[0]);
116 			printf("\t\t          \t\t 0x%08X \n",
117 				cmd->response[1]);
118 			printf("\t\t          \t\t 0x%08X \n",
119 				cmd->response[2]);
120 			printf("\t\t          \t\t 0x%08X \n",
121 				cmd->response[3]);
122 			printf("\n");
123 			printf("\t\t\t\t\tDUMPING DATA\n");
124 			for (i = 0; i < 4; i++) {
125 				int j;
126 				printf("\t\t\t\t\t%03d - ", i*4);
127 				ptr = (u8 *)&cmd->response[i];
128 				ptr += 3;
129 				for (j = 0; j < 4; j++)
130 					printf("%02X ", *ptr--);
131 				printf("\n");
132 			}
133 			break;
134 		case MMC_RSP_R3:
135 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
136 				cmd->response[0]);
137 			break;
138 		default:
139 			printf("\t\tERROR MMC rsp not supported\n");
140 			break;
141 		}
142 	}
143 }
144 
145 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
146 {
147 	int status;
148 
149 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
150 	printf("CURR STATE:%d\n", status);
151 }
152 #endif
153 
154 #if !CONFIG_IS_ENABLED(DM_MMC)
155 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
156 {
157 	int ret;
158 
159 	mmmc_trace_before_send(mmc, cmd);
160 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
161 	mmmc_trace_after_send(mmc, cmd, ret);
162 
163 	return ret;
164 }
165 #endif
166 
167 int mmc_send_status(struct mmc *mmc, int timeout)
168 {
169 	struct mmc_cmd cmd;
170 	int err, retries = 5;
171 
172 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
173 	cmd.resp_type = MMC_RSP_R1;
174 	if (!mmc_host_is_spi(mmc))
175 		cmd.cmdarg = mmc->rca << 16;
176 
177 	while (1) {
178 		err = mmc_send_cmd(mmc, &cmd, NULL);
179 		if (!err) {
180 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
181 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
182 			     MMC_STATE_PRG)
183 				break;
184 			else if (cmd.response[0] & MMC_STATUS_MASK) {
185 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
186 				printf("Status Error: 0x%08X\n",
187 					cmd.response[0]);
188 #endif
189 				return -ECOMM;
190 			}
191 		} else if (--retries < 0)
192 			return err;
193 
194 		if (timeout-- <= 0)
195 			break;
196 
197 		udelay(1000);
198 	}
199 
200 	mmc_trace_state(mmc, &cmd);
201 	if (timeout <= 0) {
202 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
203 		printf("Timeout waiting card ready\n");
204 #endif
205 		return -ETIMEDOUT;
206 	}
207 
208 	return 0;
209 }
210 
211 int mmc_set_blocklen(struct mmc *mmc, int len)
212 {
213 	struct mmc_cmd cmd;
214 
215 	if (mmc_card_ddr(mmc))
216 		return 0;
217 
218 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
219 	cmd.resp_type = MMC_RSP_R1;
220 	cmd.cmdarg = len;
221 
222 	return mmc_send_cmd(mmc, &cmd, NULL);
223 }
224 
225 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
226 			   lbaint_t blkcnt)
227 {
228 	struct mmc_cmd cmd;
229 	struct mmc_data data;
230 
231 	if (blkcnt > 1)
232 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
233 	else
234 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
235 
236 	if (mmc->high_capacity)
237 		cmd.cmdarg = start;
238 	else
239 		cmd.cmdarg = start * mmc->read_bl_len;
240 
241 	cmd.resp_type = MMC_RSP_R1;
242 
243 	data.dest = dst;
244 	data.blocks = blkcnt;
245 	data.blocksize = mmc->read_bl_len;
246 	data.flags = MMC_DATA_READ;
247 
248 	if (mmc_send_cmd(mmc, &cmd, &data))
249 		return 0;
250 
251 	if (blkcnt > 1) {
252 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
253 		cmd.cmdarg = 0;
254 		cmd.resp_type = MMC_RSP_R1b;
255 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
256 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
257 			printf("mmc fail to send stop cmd\n");
258 #endif
259 			return 0;
260 		}
261 	}
262 
263 	return blkcnt;
264 }
265 
266 #ifdef CONFIG_SPL_BLK_READ_PREPARE
267 static int mmc_read_blocks_prepare(struct mmc *mmc, void *dst, lbaint_t start,
268 				   lbaint_t blkcnt)
269 {
270 	struct mmc_cmd cmd;
271 	struct mmc_data data;
272 
273 	if (blkcnt > 1)
274 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
275 	else
276 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
277 
278 	if (mmc->high_capacity)
279 		cmd.cmdarg = start;
280 	else
281 		cmd.cmdarg = start * mmc->read_bl_len;
282 
283 	cmd.resp_type = MMC_RSP_R1;
284 
285 	data.dest = dst;
286 	data.blocks = blkcnt;
287 	data.blocksize = mmc->read_bl_len;
288 	data.flags = MMC_DATA_READ;
289 
290 	if (mmc_send_cmd_prepare(mmc, &cmd, &data))
291 		return 0;
292 
293 	return blkcnt;
294 }
295 #endif
296 
297 #if CONFIG_IS_ENABLED(BLK)
298 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
299 #else
300 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
301 		void *dst)
302 #endif
303 {
304 #if CONFIG_IS_ENABLED(BLK)
305 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
306 #endif
307 	int dev_num = block_dev->devnum;
308 	int err;
309 	lbaint_t cur, blocks_todo = blkcnt;
310 
311 	if (blkcnt == 0)
312 		return 0;
313 
314 	struct mmc *mmc = find_mmc_device(dev_num);
315 	if (!mmc)
316 		return 0;
317 
318 	if (CONFIG_IS_ENABLED(MMC_TINY))
319 		err = mmc_switch_part(mmc, block_dev->hwpart);
320 	else
321 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
322 
323 	if (err < 0)
324 		return 0;
325 
326 	if ((start + blkcnt) > block_dev->lba) {
327 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
328 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
329 			start + blkcnt, block_dev->lba);
330 #endif
331 		return 0;
332 	}
333 
334 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
335 		debug("%s: Failed to set blocklen\n", __func__);
336 		return 0;
337 	}
338 
339 	do {
340 		cur = (blocks_todo > mmc->cfg->b_max) ?
341 			mmc->cfg->b_max : blocks_todo;
342 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
343 			debug("%s: Failed to read blocks\n", __func__);
344 			int timeout = 0;
345 re_init_retry:
346 			timeout++;
347 			/*
348 			 * Try re-init seven times.
349 			 */
350 			if (timeout > 7) {
351 				printf("Re-init retry timeout\n");
352 				return 0;
353 			}
354 
355 			mmc->has_init = 0;
356 			if (mmc_init(mmc))
357 				return 0;
358 
359 			if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
360 				printf("%s: Re-init mmc_read_blocks error\n",
361 				       __func__);
362 				goto re_init_retry;
363 			}
364 		}
365 		blocks_todo -= cur;
366 		start += cur;
367 		dst += cur * mmc->read_bl_len;
368 	} while (blocks_todo > 0);
369 
370 	return blkcnt;
371 }
372 
373 #ifdef CONFIG_SPL_BLK_READ_PREPARE
374 #if CONFIG_IS_ENABLED(BLK)
375 ulong mmc_bread_prepare(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
376 #else
377 ulong mmc_bread_prepare(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
378 			void *dst)
379 #endif
380 {
381 #if CONFIG_IS_ENABLED(BLK)
382 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
383 #endif
384 	int dev_num = block_dev->devnum;
385 	int timeout = 0;
386 	int err;
387 
388 	if (blkcnt == 0)
389 		return 0;
390 
391 	struct mmc *mmc = find_mmc_device(dev_num);
392 
393 	if (!mmc)
394 		return 0;
395 
396 	if (CONFIG_IS_ENABLED(MMC_TINY))
397 		err = mmc_switch_part(mmc, block_dev->hwpart);
398 	else
399 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
400 
401 	if (err < 0)
402 		return 0;
403 
404 	if ((start + blkcnt) > block_dev->lba) {
405 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
406 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
407 		       start + blkcnt, block_dev->lba);
408 #endif
409 		return 0;
410 	}
411 
412 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
413 		debug("%s: Failed to set blocklen\n", __func__);
414 		return 0;
415 	}
416 
417 	if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) {
418 		debug("%s: Failed to read blocks\n", __func__);
419 re_init_retry:
420 		timeout++;
421 		/*
422 		 * Try re-init seven times.
423 		 */
424 		if (timeout > 7) {
425 			printf("Re-init retry timeout\n");
426 			return 0;
427 		}
428 
429 		mmc->has_init = 0;
430 		if (mmc_init(mmc))
431 			return 0;
432 
433 		if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) {
434 			printf("%s: Re-init mmc_read_blocks_prepare error\n",
435 			       __func__);
436 			goto re_init_retry;
437 		}
438 	}
439 
440 	return blkcnt;
441 }
442 #endif
443 
444 void mmc_set_clock(struct mmc *mmc, uint clock)
445 {
446 	if (clock > mmc->cfg->f_max)
447 		clock = mmc->cfg->f_max;
448 
449 	if (clock < mmc->cfg->f_min)
450 		clock = mmc->cfg->f_min;
451 
452 	mmc->clock = clock;
453 
454 	mmc_set_ios(mmc);
455 }
456 
457 static void mmc_set_bus_width(struct mmc *mmc, uint width)
458 {
459 	mmc->bus_width = width;
460 
461 	mmc_set_ios(mmc);
462 }
463 
464 static void mmc_set_timing(struct mmc *mmc, uint timing)
465 {
466 	mmc->timing = timing;
467 	mmc_set_ios(mmc);
468 }
469 
470 static int mmc_go_idle(struct mmc *mmc)
471 {
472 	struct mmc_cmd cmd;
473 	int err;
474 
475 	udelay(1000);
476 
477 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
478 	cmd.cmdarg = 0;
479 	cmd.resp_type = MMC_RSP_NONE;
480 
481 	err = mmc_send_cmd(mmc, &cmd, NULL);
482 
483 	if (err)
484 		return err;
485 
486 	udelay(2000);
487 
488 	return 0;
489 }
490 
491 #ifndef CONFIG_MMC_USE_PRE_CONFIG
492 static int sd_send_op_cond(struct mmc *mmc)
493 {
494 	int timeout = 1000;
495 	int err;
496 	struct mmc_cmd cmd;
497 
498 	while (1) {
499 		cmd.cmdidx = MMC_CMD_APP_CMD;
500 		cmd.resp_type = MMC_RSP_R1;
501 		cmd.cmdarg = 0;
502 
503 		err = mmc_send_cmd(mmc, &cmd, NULL);
504 
505 		if (err)
506 			return err;
507 
508 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
509 		cmd.resp_type = MMC_RSP_R3;
510 
511 		/*
512 		 * Most cards do not answer if some reserved bits
513 		 * in the ocr are set. However, Some controller
514 		 * can set bit 7 (reserved for low voltages), but
515 		 * how to manage low voltages SD card is not yet
516 		 * specified.
517 		 */
518 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
519 			(mmc->cfg->voltages & 0xff8000);
520 
521 		if (mmc->version == SD_VERSION_2)
522 			cmd.cmdarg |= OCR_HCS;
523 
524 		err = mmc_send_cmd(mmc, &cmd, NULL);
525 
526 		if (err)
527 			return err;
528 
529 		if (cmd.response[0] & OCR_BUSY)
530 			break;
531 
532 		if (timeout-- <= 0)
533 			return -EOPNOTSUPP;
534 
535 		udelay(1000);
536 	}
537 
538 	if (mmc->version != SD_VERSION_2)
539 		mmc->version = SD_VERSION_1_0;
540 
541 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
542 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
543 		cmd.resp_type = MMC_RSP_R3;
544 		cmd.cmdarg = 0;
545 
546 		err = mmc_send_cmd(mmc, &cmd, NULL);
547 
548 		if (err)
549 			return err;
550 	}
551 
552 	mmc->ocr = cmd.response[0];
553 
554 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
555 	mmc->rca = 0;
556 
557 	return 0;
558 }
559 #endif
560 
561 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
562 {
563 	struct mmc_cmd cmd;
564 	int err;
565 
566 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
567 	cmd.resp_type = MMC_RSP_R3;
568 	cmd.cmdarg = 0;
569 	if (use_arg && !mmc_host_is_spi(mmc))
570 		cmd.cmdarg = OCR_HCS |
571 			(mmc->cfg->voltages &
572 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
573 			(mmc->ocr & OCR_ACCESS_MODE);
574 
575 	err = mmc_send_cmd(mmc, &cmd, NULL);
576 	if (err)
577 		return err;
578 	mmc->ocr = cmd.response[0];
579 	return 0;
580 }
581 
582 #ifndef CONFIG_MMC_USE_PRE_CONFIG
583 static int mmc_send_op_cond(struct mmc *mmc)
584 {
585 	int err, i;
586 
587 	/* Some cards seem to need this */
588 	mmc_go_idle(mmc);
589 
590  	/* Asking to the card its capabilities */
591 	for (i = 0; i < 2; i++) {
592 		err = mmc_send_op_cond_iter(mmc, i != 0);
593 		if (err)
594 			return err;
595 
596 		/* exit if not busy (flag seems to be inverted) */
597 		if (mmc->ocr & OCR_BUSY)
598 			break;
599 	}
600 	mmc->op_cond_pending = 1;
601 	return 0;
602 }
603 #endif
604 static int mmc_complete_op_cond(struct mmc *mmc)
605 {
606 	struct mmc_cmd cmd;
607 	int timeout = 1000;
608 	uint start;
609 	int err;
610 
611 	mmc->op_cond_pending = 0;
612 	if (!(mmc->ocr & OCR_BUSY)) {
613 		/* Some cards seem to need this */
614 		mmc_go_idle(mmc);
615 
616 		start = get_timer(0);
617 		while (1) {
618 			err = mmc_send_op_cond_iter(mmc, 1);
619 			if (err)
620 				return err;
621 			if (mmc->ocr & OCR_BUSY)
622 				break;
623 			if (get_timer(start) > timeout)
624 				return -EOPNOTSUPP;
625 			udelay(100);
626 		}
627 	}
628 
629 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
630 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
631 		cmd.resp_type = MMC_RSP_R3;
632 		cmd.cmdarg = 0;
633 
634 		err = mmc_send_cmd(mmc, &cmd, NULL);
635 
636 		if (err)
637 			return err;
638 
639 		mmc->ocr = cmd.response[0];
640 	}
641 
642 	mmc->version = MMC_VERSION_UNKNOWN;
643 
644 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
645 	mmc->rca = 1;
646 
647 	return 0;
648 }
649 
650 
651 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
652 {
653 	static int initialized;
654 	struct mmc_cmd cmd;
655 	struct mmc_data data;
656 	int err;
657 
658 	if (initialized) {
659 		memcpy(ext_csd, mmc_ext_csd, 512);
660 		return 0;
661 	}
662 
663 	initialized = 1;
664 
665 	/* Get the Card Status Register */
666 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
667 	cmd.resp_type = MMC_RSP_R1;
668 	cmd.cmdarg = 0;
669 
670 	data.dest = (char *)ext_csd;
671 	data.blocks = 1;
672 	data.blocksize = MMC_MAX_BLOCK_LEN;
673 	data.flags = MMC_DATA_READ;
674 
675 	err = mmc_send_cmd(mmc, &cmd, &data);
676 	memcpy(mmc_ext_csd, ext_csd, 512);
677 #if defined(CONFIG_MMC_USE_PRE_CONFIG) && defined(CONFIG_SPL_BUILD)
678 	char *mmc_ecsd_base = NULL;
679 	ulong mmc_ecsd;
680 
681 	mmc_ecsd = dev_read_u32_default(mmc->dev, "mmc-ecsd", 0);
682 	mmc_ecsd_base = (char *)mmc_ecsd;
683 	if (mmc_ecsd_base) {
684 		memcpy(mmc_ecsd_base, ext_csd, 512);
685 		*(unsigned int *)(mmc_ecsd_base + 512) = 0x55aa55aa;
686 	}
687 #endif
688 	return err;
689 }
690 
691 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status)
692 {
693 	struct mmc_cmd cmd;
694 	u8 busy = true;
695 	uint start;
696 	int ret;
697 	int timeout = 1000;
698 
699 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
700 	cmd.resp_type = MMC_RSP_R1;
701 	cmd.cmdarg = mmc->rca << 16;
702 
703 	start = get_timer(0);
704 
705 	if (!send_status && !mmc_can_card_busy(mmc)) {
706 		mdelay(timeout);
707 		return 0;
708 	}
709 
710 	do {
711 		if (!send_status) {
712 			busy = mmc_card_busy(mmc);
713 		} else {
714 			ret = mmc_send_cmd(mmc, &cmd, NULL);
715 
716 			if (ret)
717 				return ret;
718 
719 			if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
720 				return -EBADMSG;
721 			busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) ==
722 				MMC_STATE_PRG;
723 		}
724 
725 		if (get_timer(start) > timeout && busy)
726 			return -ETIMEDOUT;
727 	} while (busy);
728 
729 	return 0;
730 }
731 
732 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
733 			u8 send_status)
734 {
735 	struct mmc_cmd cmd;
736 	int retries = 3;
737 	int ret;
738 
739 	cmd.cmdidx = MMC_CMD_SWITCH;
740 	cmd.resp_type = MMC_RSP_R1b;
741 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
742 				 (index << 16) |
743 				 (value << 8);
744 
745 	do {
746 		ret = mmc_send_cmd(mmc, &cmd, NULL);
747 
748 		if (!ret)
749 			return mmc_poll_for_busy(mmc, send_status);
750 	} while (--retries > 0 && ret);
751 
752 	return ret;
753 }
754 
755 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
756 {
757 	return __mmc_switch(mmc, set, index, value, true);
758 }
759 
760 static int mmc_select_bus_width(struct mmc *mmc)
761 {
762 	u32 ext_csd_bits[] = {
763 		EXT_CSD_BUS_WIDTH_8,
764 		EXT_CSD_BUS_WIDTH_4,
765 	};
766 	u32 bus_widths[] = {
767 		MMC_BUS_WIDTH_8BIT,
768 		MMC_BUS_WIDTH_4BIT,
769 	};
770 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
771 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
772 	u32 idx, bus_width = 0;
773 	int err = 0;
774 
775 	if (mmc->version < MMC_VERSION_4 ||
776 	    !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT)))
777 		return 0;
778 
779 	err = mmc_send_ext_csd(mmc, ext_csd);
780 
781 	if (err)
782 		return err;
783 
784 	idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1;
785 
786 	/*
787 	 * Unlike SD, MMC cards dont have a configuration register to notify
788 	 * supported bus width. So bus test command should be run to identify
789 	 * the supported bus width or compare the ext csd values of current
790 	 * bus width and ext csd values of 1 bit mode read earlier.
791 	 */
792 	for (; idx < ARRAY_SIZE(bus_widths); idx++) {
793 		/*
794 		 * Host is capable of 8bit transfer, then switch
795 		 * the device to work in 8bit transfer mode. If the
796 		 * mmc switch command returns error then switch to
797 		 * 4bit transfer mode. On success set the corresponding
798 		 * bus width on the host.
799 		 */
800 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
801 				 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]);
802 		if (err)
803 			continue;
804 
805 		bus_width = bus_widths[idx];
806 		mmc_set_bus_width(mmc, bus_width);
807 
808 		err = mmc_send_ext_csd(mmc, test_csd);
809 
810 		if (err)
811 			continue;
812 
813 		/* Only compare read only fields */
814 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] ==
815 			test_csd[EXT_CSD_PARTITIONING_SUPPORT]) &&
816 		    (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
817 			test_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
818 		    (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) &&
819 			(ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
820 			test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
821 		    !memcmp(&ext_csd[EXT_CSD_SEC_CNT],
822 			&test_csd[EXT_CSD_SEC_CNT], 4)) {
823 			err = bus_width;
824 			break;
825 		} else {
826 			err = -EBADMSG;
827 		}
828 	}
829 
830 	return err;
831 }
832 
833 static const u8 tuning_blk_pattern_4bit[] = {
834 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
835 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
836 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
837 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
838 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
839 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
840 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
841 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
842 };
843 
844 static const u8 tuning_blk_pattern_8bit[] = {
845 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
846 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
847 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
848 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
849 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
850 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
851 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
852 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
853 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
854 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
855 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
856 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
857 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
858 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
859 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
860 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
861 };
862 
863 int mmc_send_tuning(struct mmc *mmc, u32 opcode)
864 {
865 	struct mmc_cmd cmd;
866 	struct mmc_data data;
867 	const u8 *tuning_block_pattern;
868 	int size, err = 0;
869 	u8 *data_buf;
870 
871 	if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
872 		tuning_block_pattern = tuning_blk_pattern_8bit;
873 		size = sizeof(tuning_blk_pattern_8bit);
874 	} else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) {
875 		tuning_block_pattern = tuning_blk_pattern_4bit;
876 		size = sizeof(tuning_blk_pattern_4bit);
877 	} else {
878 		return -EINVAL;
879 	}
880 
881 	data_buf = calloc(1, size);
882 	if (!data_buf)
883 		return -ENOMEM;
884 
885 	cmd.cmdidx = opcode;
886 	cmd.resp_type = MMC_RSP_R1;
887 	cmd.cmdarg = 0;
888 
889 	data.dest = (char *)data_buf;
890 	data.blocksize = size;
891 	data.blocks = 1;
892 	data.flags = MMC_DATA_READ;
893 
894 	err = mmc_send_cmd(mmc, &cmd, &data);
895 	if (err)
896 		goto out;
897 
898 	if (memcmp(data_buf, tuning_block_pattern, size))
899 		err = -EIO;
900 out:
901 	free(data_buf);
902 	return err;
903 }
904 
905 static int mmc_execute_tuning(struct mmc *mmc)
906 {
907 #ifdef CONFIG_DM_MMC
908 	struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev);
909 #endif
910 	u32 opcode;
911 
912 	if (IS_SD(mmc))
913 		opcode = MMC_SEND_TUNING_BLOCK;
914 	else
915 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
916 
917 #ifndef CONFIG_DM_MMC
918 	if (mmc->cfg->ops->execute_tuning) {
919 		return mmc->cfg->ops->execute_tuning(mmc, opcode);
920 #else
921 	if (ops->execute_tuning) {
922 		return ops->execute_tuning(mmc->dev, opcode);
923 #endif
924 	} else {
925 		debug("Tuning feature required for HS200 mode.\n");
926 		return -EIO;
927 	}
928 }
929 
930 static int mmc_hs200_tuning(struct mmc *mmc)
931 {
932 	return mmc_execute_tuning(mmc);
933 }
934 
935 static int mmc_select_hs(struct mmc *mmc)
936 {
937 	int ret;
938 
939 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
940 			 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS);
941 
942 	if (!ret)
943 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
944 
945 	return ret;
946 }
947 
948 static int mmc_select_hs_ddr(struct mmc *mmc)
949 {
950 	u32 ext_csd_bits;
951 	int err = 0;
952 
953 	if (mmc->bus_width == MMC_BUS_WIDTH_1BIT)
954 		return 0;
955 
956 	ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ?
957 			EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
958 
959 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
960 			 EXT_CSD_BUS_WIDTH, ext_csd_bits);
961 	if (err)
962 		return err;
963 
964 	mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52);
965 
966 	return 0;
967 }
968 
969 static int mmc_select_hs200(struct mmc *mmc)
970 {
971 	int ret;
972 
973 	/*
974 	 * Set the bus width(4 or 8) with host's support and
975 	 * switch to HS200 mode if bus width is set successfully.
976 	 */
977 	ret = mmc_select_bus_width(mmc);
978 
979 	if (ret > 0) {
980 		ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
981 				   EXT_CSD_HS_TIMING,
982 				   EXT_CSD_TIMING_HS200, false);
983 
984 		if (ret)
985 			return ret;
986 
987 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS200);
988 	}
989 
990 	return ret;
991 }
992 
993 static int mmc_select_hs400(struct mmc *mmc)
994 {
995 	int ret;
996 
997 	/* Switch card to HS mode */
998 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
999 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false);
1000 	if (ret)
1001 		return ret;
1002 
1003 	/* Set host controller to HS timing */
1004 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
1005 
1006 	/* Reduce frequency to HS frequency */
1007 	mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
1008 
1009 	ret = mmc_send_status(mmc, 1000);
1010 	if (ret)
1011 		return ret;
1012 
1013 	/* Switch card to DDR */
1014 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1015 			 EXT_CSD_BUS_WIDTH,
1016 			 EXT_CSD_DDR_BUS_WIDTH_8);
1017 	if (ret)
1018 		return ret;
1019 
1020 	/* Switch card to HS400 */
1021 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1022 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false);
1023 	if (ret)
1024 		return ret;
1025 
1026 	/* Set host controller to HS400 timing and frequency */
1027 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS400);
1028 
1029 	return ret;
1030 }
1031 
1032 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd)
1033 {
1034 	u8 card_type;
1035 	u32 host_caps, avail_type = 0;
1036 
1037 	card_type = ext_csd[EXT_CSD_CARD_TYPE];
1038 	host_caps = mmc->cfg->host_caps;
1039 
1040 	if ((host_caps & MMC_MODE_HS) &&
1041 	    (card_type & EXT_CSD_CARD_TYPE_26))
1042 		avail_type |= EXT_CSD_CARD_TYPE_26;
1043 
1044 	if ((host_caps & MMC_MODE_HS) &&
1045 	    (card_type & EXT_CSD_CARD_TYPE_52))
1046 		avail_type |= EXT_CSD_CARD_TYPE_52;
1047 
1048 	/*
1049 	 * For the moment, u-boot doesn't support signal voltage
1050 	 * switch, therefor we assume that host support ddr52
1051 	 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and
1052 	 * hs400 are the same).
1053 	 */
1054 	if ((host_caps & MMC_MODE_DDR_52MHz) &&
1055 	    (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
1056 		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
1057 
1058 	if ((host_caps & MMC_MODE_HS200) &&
1059 	    (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V))
1060 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
1061 
1062 	/*
1063 	 * If host can support HS400, it means that host can also
1064 	 * support HS200.
1065 	 */
1066 	if ((host_caps & MMC_MODE_HS400) &&
1067 	    (host_caps & MMC_MODE_8BIT) &&
1068 	    (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
1069 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
1070 				EXT_CSD_CARD_TYPE_HS400_1_8V;
1071 
1072 	if ((host_caps & MMC_MODE_HS400ES) &&
1073 	    (host_caps & MMC_MODE_8BIT) &&
1074 	    ext_csd[EXT_CSD_STROBE_SUPPORT] &&
1075 	    (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
1076 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
1077 				EXT_CSD_CARD_TYPE_HS400_1_8V |
1078 				EXT_CSD_CARD_TYPE_HS400ES;
1079 
1080 	return avail_type;
1081 }
1082 
1083 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type)
1084 {
1085 	int clock = 0;
1086 
1087 	if (mmc_card_hs(mmc))
1088 		clock = (avail_type & EXT_CSD_CARD_TYPE_52) ?
1089 			MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR;
1090 	else if (mmc_card_hs200(mmc) ||
1091 		 mmc_card_hs400(mmc) ||
1092 		 mmc_card_hs400es(mmc))
1093 		clock = MMC_HS200_MAX_DTR;
1094 
1095 	mmc_set_clock(mmc, clock);
1096 }
1097 
1098 static int mmc_change_freq(struct mmc *mmc)
1099 {
1100 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1101 	u32 avail_type;
1102 	int err;
1103 
1104 	mmc->card_caps = 0;
1105 
1106 	if (mmc_host_is_spi(mmc))
1107 		return 0;
1108 
1109 	/* Only version 4 supports high-speed */
1110 	if (mmc->version < MMC_VERSION_4)
1111 		return 0;
1112 
1113 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
1114 
1115 	err = mmc_send_ext_csd(mmc, ext_csd);
1116 
1117 	if (err)
1118 		return err;
1119 
1120 	avail_type = mmc_select_card_type(mmc, ext_csd);
1121 
1122 	if (avail_type & EXT_CSD_CARD_TYPE_HS200)
1123 		err = mmc_select_hs200(mmc);
1124 	else if (avail_type & EXT_CSD_CARD_TYPE_HS)
1125 		err = mmc_select_hs(mmc);
1126 	else
1127 		err = -EINVAL;
1128 
1129 	if (err)
1130 		return err;
1131 
1132 	mmc_set_bus_speed(mmc, avail_type);
1133 
1134 	if (mmc_card_hs200(mmc)) {
1135 		err = mmc_hs200_tuning(mmc);
1136 		if (avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1137 		    mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
1138 			err = mmc_select_hs400(mmc);
1139 			mmc_set_bus_speed(mmc, avail_type);
1140 		}
1141 	} else if (!mmc_card_hs400es(mmc)) {
1142 		err = mmc_select_bus_width(mmc) > 0 ? 0 : err;
1143 		if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52)
1144 			err = mmc_select_hs_ddr(mmc);
1145 	}
1146 
1147 	return err;
1148 }
1149 
1150 static int mmc_set_capacity(struct mmc *mmc, int part_num)
1151 {
1152 	switch (part_num) {
1153 	case 0:
1154 		mmc->capacity = mmc->capacity_user;
1155 		break;
1156 	case 1:
1157 	case 2:
1158 		mmc->capacity = mmc->capacity_boot;
1159 		break;
1160 	case 3:
1161 		mmc->capacity = mmc->capacity_rpmb;
1162 		break;
1163 	case 4:
1164 	case 5:
1165 	case 6:
1166 	case 7:
1167 		mmc->capacity = mmc->capacity_gp[part_num - 4];
1168 		break;
1169 	default:
1170 		return -1;
1171 	}
1172 
1173 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1174 
1175 	return 0;
1176 }
1177 
1178 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1179 {
1180 	int ret;
1181 
1182 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
1183 			 (mmc->part_config & ~PART_ACCESS_MASK)
1184 			 | (part_num & PART_ACCESS_MASK));
1185 
1186 	/*
1187 	 * Set the capacity if the switch succeeded or was intended
1188 	 * to return to representing the raw device.
1189 	 */
1190 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1191 		ret = mmc_set_capacity(mmc, part_num);
1192 		mmc_get_blk_desc(mmc)->hwpart = part_num;
1193 	}
1194 
1195 	return ret;
1196 }
1197 
1198 int mmc_hwpart_config(struct mmc *mmc,
1199 		      const struct mmc_hwpart_conf *conf,
1200 		      enum mmc_hwpart_conf_mode mode)
1201 {
1202 	u8 part_attrs = 0;
1203 	u32 enh_size_mult;
1204 	u32 enh_start_addr;
1205 	u32 gp_size_mult[4];
1206 	u32 max_enh_size_mult;
1207 	u32 tot_enh_size_mult = 0;
1208 	u8 wr_rel_set;
1209 	int i, pidx, err;
1210 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1211 
1212 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1213 		return -EINVAL;
1214 
1215 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1216 		printf("eMMC >= 4.4 required for enhanced user data area\n");
1217 		return -EMEDIUMTYPE;
1218 	}
1219 
1220 	if (!(mmc->part_support & PART_SUPPORT)) {
1221 		printf("Card does not support partitioning\n");
1222 		return -EMEDIUMTYPE;
1223 	}
1224 
1225 	if (!mmc->hc_wp_grp_size) {
1226 		printf("Card does not define HC WP group size\n");
1227 		return -EMEDIUMTYPE;
1228 	}
1229 
1230 	/* check partition alignment and total enhanced size */
1231 	if (conf->user.enh_size) {
1232 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1233 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
1234 			printf("User data enhanced area not HC WP group "
1235 			       "size aligned\n");
1236 			return -EINVAL;
1237 		}
1238 		part_attrs |= EXT_CSD_ENH_USR;
1239 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1240 		if (mmc->high_capacity) {
1241 			enh_start_addr = conf->user.enh_start;
1242 		} else {
1243 			enh_start_addr = (conf->user.enh_start << 9);
1244 		}
1245 	} else {
1246 		enh_size_mult = 0;
1247 		enh_start_addr = 0;
1248 	}
1249 	tot_enh_size_mult += enh_size_mult;
1250 
1251 	for (pidx = 0; pidx < 4; pidx++) {
1252 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1253 			printf("GP%i partition not HC WP group size "
1254 			       "aligned\n", pidx+1);
1255 			return -EINVAL;
1256 		}
1257 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1258 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1259 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1260 			tot_enh_size_mult += gp_size_mult[pidx];
1261 		}
1262 	}
1263 
1264 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1265 		printf("Card does not support enhanced attribute\n");
1266 		return -EMEDIUMTYPE;
1267 	}
1268 
1269 	err = mmc_send_ext_csd(mmc, ext_csd);
1270 	if (err)
1271 		return err;
1272 
1273 	max_enh_size_mult =
1274 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1275 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1276 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1277 	if (tot_enh_size_mult > max_enh_size_mult) {
1278 		printf("Total enhanced size exceeds maximum (%u > %u)\n",
1279 		       tot_enh_size_mult, max_enh_size_mult);
1280 		return -EMEDIUMTYPE;
1281 	}
1282 
1283 	/* The default value of EXT_CSD_WR_REL_SET is device
1284 	 * dependent, the values can only be changed if the
1285 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1286 	 * changed only once and before partitioning is completed. */
1287 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1288 	if (conf->user.wr_rel_change) {
1289 		if (conf->user.wr_rel_set)
1290 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1291 		else
1292 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1293 	}
1294 	for (pidx = 0; pidx < 4; pidx++) {
1295 		if (conf->gp_part[pidx].wr_rel_change) {
1296 			if (conf->gp_part[pidx].wr_rel_set)
1297 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1298 			else
1299 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1300 		}
1301 	}
1302 
1303 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1304 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1305 		puts("Card does not support host controlled partition write "
1306 		     "reliability settings\n");
1307 		return -EMEDIUMTYPE;
1308 	}
1309 
1310 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1311 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1312 		printf("Card already partitioned\n");
1313 		return -EPERM;
1314 	}
1315 
1316 	if (mode == MMC_HWPART_CONF_CHECK)
1317 		return 0;
1318 
1319 	/* Partitioning requires high-capacity size definitions */
1320 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1321 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1322 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1323 
1324 		if (err)
1325 			return err;
1326 
1327 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1328 
1329 		/* update erase group size to be high-capacity */
1330 		mmc->erase_grp_size =
1331 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1332 
1333 	}
1334 
1335 	/* all OK, write the configuration */
1336 	for (i = 0; i < 4; i++) {
1337 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1338 				 EXT_CSD_ENH_START_ADDR+i,
1339 				 (enh_start_addr >> (i*8)) & 0xFF);
1340 		if (err)
1341 			return err;
1342 	}
1343 	for (i = 0; i < 3; i++) {
1344 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1345 				 EXT_CSD_ENH_SIZE_MULT+i,
1346 				 (enh_size_mult >> (i*8)) & 0xFF);
1347 		if (err)
1348 			return err;
1349 	}
1350 	for (pidx = 0; pidx < 4; pidx++) {
1351 		for (i = 0; i < 3; i++) {
1352 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1353 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1354 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1355 			if (err)
1356 				return err;
1357 		}
1358 	}
1359 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1360 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1361 	if (err)
1362 		return err;
1363 
1364 	if (mode == MMC_HWPART_CONF_SET)
1365 		return 0;
1366 
1367 	/* The WR_REL_SET is a write-once register but shall be
1368 	 * written before setting PART_SETTING_COMPLETED. As it is
1369 	 * write-once we can only write it when completing the
1370 	 * partitioning. */
1371 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1372 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1373 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1374 		if (err)
1375 			return err;
1376 	}
1377 
1378 	/* Setting PART_SETTING_COMPLETED confirms the partition
1379 	 * configuration but it only becomes effective after power
1380 	 * cycle, so we do not adjust the partition related settings
1381 	 * in the mmc struct. */
1382 
1383 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1384 			 EXT_CSD_PARTITION_SETTING,
1385 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1386 	if (err)
1387 		return err;
1388 
1389 	return 0;
1390 }
1391 
1392 #if !CONFIG_IS_ENABLED(DM_MMC)
1393 int mmc_getcd(struct mmc *mmc)
1394 {
1395 	int cd;
1396 
1397 	cd = board_mmc_getcd(mmc);
1398 
1399 	if (cd < 0) {
1400 		if (mmc->cfg->ops->getcd)
1401 			cd = mmc->cfg->ops->getcd(mmc);
1402 		else
1403 			cd = 1;
1404 	}
1405 
1406 	return cd;
1407 }
1408 #endif
1409 
1410 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1411 {
1412 	struct mmc_cmd cmd;
1413 	struct mmc_data data;
1414 
1415 	/* Switch the frequency */
1416 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1417 	cmd.resp_type = MMC_RSP_R1;
1418 	cmd.cmdarg = (mode << 31) | 0xffffff;
1419 	cmd.cmdarg &= ~(0xf << (group * 4));
1420 	cmd.cmdarg |= value << (group * 4);
1421 
1422 	data.dest = (char *)resp;
1423 	data.blocksize = 64;
1424 	data.blocks = 1;
1425 	data.flags = MMC_DATA_READ;
1426 
1427 	return mmc_send_cmd(mmc, &cmd, &data);
1428 }
1429 
1430 
1431 static int sd_change_freq(struct mmc *mmc)
1432 {
1433 	int err;
1434 	struct mmc_cmd cmd;
1435 	ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2);
1436 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1437 	struct mmc_data data;
1438 	int timeout;
1439 
1440 	mmc->card_caps = 0;
1441 
1442 	if (mmc_host_is_spi(mmc))
1443 		return 0;
1444 
1445 	/* Read the SCR to find out if this card supports higher speeds */
1446 	cmd.cmdidx = MMC_CMD_APP_CMD;
1447 	cmd.resp_type = MMC_RSP_R1;
1448 	cmd.cmdarg = mmc->rca << 16;
1449 
1450 	err = mmc_send_cmd(mmc, &cmd, NULL);
1451 
1452 	if (err)
1453 		return err;
1454 
1455 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1456 	cmd.resp_type = MMC_RSP_R1;
1457 	cmd.cmdarg = 0;
1458 
1459 	timeout = 3;
1460 
1461 retry_scr:
1462 	data.dest = (char *)scr;
1463 	data.blocksize = 8;
1464 	data.blocks = 1;
1465 	data.flags = MMC_DATA_READ;
1466 
1467 	err = mmc_send_cmd(mmc, &cmd, &data);
1468 
1469 	if (err) {
1470 		if (timeout--)
1471 			goto retry_scr;
1472 
1473 		return err;
1474 	}
1475 
1476 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1477 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1478 
1479 	switch ((mmc->scr[0] >> 24) & 0xf) {
1480 	case 0:
1481 		mmc->version = SD_VERSION_1_0;
1482 		break;
1483 	case 1:
1484 		mmc->version = SD_VERSION_1_10;
1485 		break;
1486 	case 2:
1487 		mmc->version = SD_VERSION_2;
1488 		if ((mmc->scr[0] >> 15) & 0x1)
1489 			mmc->version = SD_VERSION_3;
1490 		break;
1491 	default:
1492 		mmc->version = SD_VERSION_1_0;
1493 		break;
1494 	}
1495 
1496 	if (mmc->scr[0] & SD_DATA_4BIT)
1497 		mmc->card_caps |= MMC_MODE_4BIT;
1498 
1499 	/* Version 1.0 doesn't support switching */
1500 	if (mmc->version == SD_VERSION_1_0)
1501 		return 0;
1502 
1503 	timeout = 4;
1504 	while (timeout--) {
1505 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1506 				(u8 *)switch_status);
1507 
1508 		if (err)
1509 			return err;
1510 
1511 		/* The high-speed function is busy.  Try again */
1512 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1513 			break;
1514 	}
1515 
1516 	/* If high-speed isn't supported, we return */
1517 	if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
1518 		return 0;
1519 
1520 	/*
1521 	 * If the host doesn't support SD_HIGHSPEED, do not switch card to
1522 	 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
1523 	 * This can avoid furthur problem when the card runs in different
1524 	 * mode between the host.
1525 	 */
1526 	if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
1527 		(mmc->cfg->host_caps & MMC_MODE_HS)))
1528 		return 0;
1529 
1530 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1531 
1532 	if (err)
1533 		return err;
1534 
1535 	if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
1536 		mmc->card_caps |= MMC_MODE_HS;
1537 
1538 	return 0;
1539 }
1540 
1541 static int sd_read_ssr(struct mmc *mmc)
1542 {
1543 	int err, i;
1544 	struct mmc_cmd cmd;
1545 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1546 	struct mmc_data data;
1547 	int timeout = 3;
1548 	unsigned int au, eo, et, es;
1549 
1550 	cmd.cmdidx = MMC_CMD_APP_CMD;
1551 	cmd.resp_type = MMC_RSP_R1;
1552 	cmd.cmdarg = mmc->rca << 16;
1553 
1554 	err = mmc_send_cmd(mmc, &cmd, NULL);
1555 	if (err)
1556 		return err;
1557 
1558 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1559 	cmd.resp_type = MMC_RSP_R1;
1560 	cmd.cmdarg = 0;
1561 
1562 retry_ssr:
1563 	data.dest = (char *)ssr;
1564 	data.blocksize = 64;
1565 	data.blocks = 1;
1566 	data.flags = MMC_DATA_READ;
1567 
1568 	err = mmc_send_cmd(mmc, &cmd, &data);
1569 	if (err) {
1570 		if (timeout--)
1571 			goto retry_ssr;
1572 
1573 		return err;
1574 	}
1575 
1576 	for (i = 0; i < 16; i++)
1577 		ssr[i] = be32_to_cpu(ssr[i]);
1578 
1579 	au = (ssr[2] >> 12) & 0xF;
1580 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1581 		mmc->ssr.au = sd_au_size[au];
1582 		es = (ssr[3] >> 24) & 0xFF;
1583 		es |= (ssr[2] & 0xFF) << 8;
1584 		et = (ssr[3] >> 18) & 0x3F;
1585 		if (es && et) {
1586 			eo = (ssr[3] >> 16) & 0x3;
1587 			mmc->ssr.erase_timeout = (et * 1000) / es;
1588 			mmc->ssr.erase_offset = eo * 1000;
1589 		}
1590 	} else {
1591 		debug("Invalid Allocation Unit Size.\n");
1592 	}
1593 
1594 	return 0;
1595 }
1596 
1597 /* frequency bases */
1598 /* divided by 10 to be nice to platforms without floating point */
1599 static const int fbase[] = {
1600 	10000,
1601 	100000,
1602 	1000000,
1603 	10000000,
1604 };
1605 
1606 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1607  * to platforms without floating point.
1608  */
1609 static const u8 multipliers[] = {
1610 	0,	/* reserved */
1611 	10,
1612 	12,
1613 	13,
1614 	15,
1615 	20,
1616 	25,
1617 	30,
1618 	35,
1619 	40,
1620 	45,
1621 	50,
1622 	55,
1623 	60,
1624 	70,
1625 	80,
1626 };
1627 
1628 #if !CONFIG_IS_ENABLED(DM_MMC)
1629 static void mmc_set_ios(struct mmc *mmc)
1630 {
1631 	if (mmc->cfg->ops->set_ios)
1632 		mmc->cfg->ops->set_ios(mmc);
1633 }
1634 
1635 static bool mmc_card_busy(struct mmc *mmc)
1636 {
1637 	if (!mmc->cfg->ops->card_busy)
1638 		return -ENOSYS;
1639 
1640 	return mmc->cfg->ops->card_busy(mmc);
1641 }
1642 
1643 static bool mmc_can_card_busy(struct mmc *)
1644 {
1645 	return !!mmc->cfg->ops->card_busy;
1646 }
1647 #endif
1648 
1649 static int mmc_startup(struct mmc *mmc)
1650 {
1651 	int err, i;
1652 	uint mult, freq, tran_speed;
1653 	u64 cmult, csize, capacity;
1654 	struct mmc_cmd cmd;
1655 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1656 	bool has_parts = false;
1657 	bool part_completed;
1658 	struct blk_desc *bdesc;
1659 
1660 #ifdef CONFIG_MMC_SPI_CRC_ON
1661 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1662 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1663 		cmd.resp_type = MMC_RSP_R1;
1664 		cmd.cmdarg = 1;
1665 		err = mmc_send_cmd(mmc, &cmd, NULL);
1666 
1667 		if (err)
1668 			return err;
1669 	}
1670 #endif
1671 #ifndef CONFIG_MMC_USE_PRE_CONFIG
1672 	/* Put the Card in Identify Mode */
1673 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1674 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1675 	cmd.resp_type = MMC_RSP_R2;
1676 	cmd.cmdarg = 0;
1677 
1678 	err = mmc_send_cmd(mmc, &cmd, NULL);
1679 
1680 	if (err)
1681 		return err;
1682 
1683 	memcpy(mmc->cid, cmd.response, 16);
1684 
1685 	/*
1686 	 * For MMC cards, set the Relative Address.
1687 	 * For SD cards, get the Relatvie Address.
1688 	 * This also puts the cards into Standby State
1689 	 */
1690 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1691 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1692 		cmd.cmdarg = mmc->rca << 16;
1693 		cmd.resp_type = MMC_RSP_R6;
1694 
1695 		err = mmc_send_cmd(mmc, &cmd, NULL);
1696 
1697 		if (err)
1698 			return err;
1699 
1700 		if (IS_SD(mmc))
1701 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1702 	}
1703 #endif
1704 	/* Get the Card-Specific Data */
1705 	cmd.cmdidx = MMC_CMD_SEND_CSD;
1706 	cmd.resp_type = MMC_RSP_R2;
1707 	cmd.cmdarg = mmc->rca << 16;
1708 
1709 	err = mmc_send_cmd(mmc, &cmd, NULL);
1710 
1711 	if (err)
1712 		return err;
1713 
1714 	mmc->csd[0] = cmd.response[0];
1715 	mmc->csd[1] = cmd.response[1];
1716 	mmc->csd[2] = cmd.response[2];
1717 	mmc->csd[3] = cmd.response[3];
1718 
1719 	if (mmc->version == MMC_VERSION_UNKNOWN) {
1720 		int version = (cmd.response[0] >> 26) & 0xf;
1721 
1722 		switch (version) {
1723 		case 0:
1724 			mmc->version = MMC_VERSION_1_2;
1725 			break;
1726 		case 1:
1727 			mmc->version = MMC_VERSION_1_4;
1728 			break;
1729 		case 2:
1730 			mmc->version = MMC_VERSION_2_2;
1731 			break;
1732 		case 3:
1733 			mmc->version = MMC_VERSION_3;
1734 			break;
1735 		case 4:
1736 			mmc->version = MMC_VERSION_4;
1737 			break;
1738 		default:
1739 			mmc->version = MMC_VERSION_1_2;
1740 			break;
1741 		}
1742 	}
1743 
1744 	/* divide frequency by 10, since the mults are 10x bigger */
1745 	freq = fbase[(cmd.response[0] & 0x7)];
1746 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1747 
1748 	tran_speed = freq * mult;
1749 
1750 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1751 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1752 
1753 	if (IS_SD(mmc))
1754 		mmc->write_bl_len = mmc->read_bl_len;
1755 	else
1756 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1757 
1758 	if (mmc->high_capacity) {
1759 		csize = (mmc->csd[1] & 0x3f) << 16
1760 			| (mmc->csd[2] & 0xffff0000) >> 16;
1761 		cmult = 8;
1762 	} else {
1763 		csize = (mmc->csd[1] & 0x3ff) << 2
1764 			| (mmc->csd[2] & 0xc0000000) >> 30;
1765 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
1766 	}
1767 
1768 	mmc->capacity_user = (csize + 1) << (cmult + 2);
1769 	mmc->capacity_user *= mmc->read_bl_len;
1770 	mmc->capacity_boot = 0;
1771 	mmc->capacity_rpmb = 0;
1772 	for (i = 0; i < 4; i++)
1773 		mmc->capacity_gp[i] = 0;
1774 
1775 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1776 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1777 
1778 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1779 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1780 
1781 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1782 		cmd.cmdidx = MMC_CMD_SET_DSR;
1783 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1784 		cmd.resp_type = MMC_RSP_NONE;
1785 		if (mmc_send_cmd(mmc, &cmd, NULL))
1786 			printf("MMC: SET_DSR failed\n");
1787 	}
1788 
1789 	/* Select the card, and put it into Transfer Mode */
1790 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1791 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
1792 		cmd.resp_type = MMC_RSP_R1;
1793 		cmd.cmdarg = mmc->rca << 16;
1794 		err = mmc_send_cmd(mmc, &cmd, NULL);
1795 
1796 		if (err)
1797 			return err;
1798 	}
1799 
1800 	/*
1801 	 * For SD, its erase group is always one sector
1802 	 */
1803 	mmc->erase_grp_size = 1;
1804 	mmc->part_config = MMCPART_NOAVAILABLE;
1805 	if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) {
1806 		/* check  ext_csd version and capacity */
1807 		err = mmc_send_ext_csd(mmc, ext_csd);
1808 		if (err)
1809 			return err;
1810 		if (ext_csd[EXT_CSD_REV] >= 2) {
1811 			/*
1812 			 * According to the JEDEC Standard, the value of
1813 			 * ext_csd's capacity is valid if the value is more
1814 			 * than 2GB
1815 			 */
1816 			capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1817 					| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1818 					| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1819 					| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1820 			capacity *= MMC_MAX_BLOCK_LEN;
1821 			if ((capacity >> 20) > 2 * 1024)
1822 				mmc->capacity_user = capacity;
1823 		}
1824 
1825 		switch (ext_csd[EXT_CSD_REV]) {
1826 		case 1:
1827 			mmc->version = MMC_VERSION_4_1;
1828 			break;
1829 		case 2:
1830 			mmc->version = MMC_VERSION_4_2;
1831 			break;
1832 		case 3:
1833 			mmc->version = MMC_VERSION_4_3;
1834 			break;
1835 		case 5:
1836 			mmc->version = MMC_VERSION_4_41;
1837 			break;
1838 		case 6:
1839 			mmc->version = MMC_VERSION_4_5;
1840 			break;
1841 		case 7:
1842 			mmc->version = MMC_VERSION_5_0;
1843 			break;
1844 		case 8:
1845 			mmc->version = MMC_VERSION_5_1;
1846 			break;
1847 		}
1848 
1849 		/* The partition data may be non-zero but it is only
1850 		 * effective if PARTITION_SETTING_COMPLETED is set in
1851 		 * EXT_CSD, so ignore any data if this bit is not set,
1852 		 * except for enabling the high-capacity group size
1853 		 * definition (see below). */
1854 		part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1855 				    EXT_CSD_PARTITION_SETTING_COMPLETED);
1856 
1857 		/* store the partition info of emmc */
1858 		mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1859 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1860 		    ext_csd[EXT_CSD_BOOT_MULT])
1861 			mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1862 		if (part_completed &&
1863 		    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1864 			mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1865 		if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN)
1866 			mmc->esr.mmc_can_trim = 1;
1867 
1868 		mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1869 
1870 		mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1871 
1872 		for (i = 0; i < 4; i++) {
1873 			int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1874 			uint mult = (ext_csd[idx + 2] << 16) +
1875 				(ext_csd[idx + 1] << 8) + ext_csd[idx];
1876 			if (mult)
1877 				has_parts = true;
1878 			if (!part_completed)
1879 				continue;
1880 			mmc->capacity_gp[i] = mult;
1881 			mmc->capacity_gp[i] *=
1882 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1883 			mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1884 			mmc->capacity_gp[i] <<= 19;
1885 		}
1886 
1887 		if (part_completed) {
1888 			mmc->enh_user_size =
1889 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) +
1890 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) +
1891 				ext_csd[EXT_CSD_ENH_SIZE_MULT];
1892 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1893 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1894 			mmc->enh_user_size <<= 19;
1895 			mmc->enh_user_start =
1896 				(ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) +
1897 				(ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) +
1898 				(ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) +
1899 				ext_csd[EXT_CSD_ENH_START_ADDR];
1900 			if (mmc->high_capacity)
1901 				mmc->enh_user_start <<= 9;
1902 		}
1903 
1904 		/*
1905 		 * Host needs to enable ERASE_GRP_DEF bit if device is
1906 		 * partitioned. This bit will be lost every time after a reset
1907 		 * or power off. This will affect erase size.
1908 		 */
1909 		if (part_completed)
1910 			has_parts = true;
1911 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1912 		    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1913 			has_parts = true;
1914 		if (has_parts) {
1915 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1916 				EXT_CSD_ERASE_GROUP_DEF, 1);
1917 
1918 			if (err)
1919 				return err;
1920 			else
1921 				ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1922 		}
1923 
1924 		if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1925 			/* Read out group size from ext_csd */
1926 			mmc->erase_grp_size =
1927 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1928 			/*
1929 			 * if high capacity and partition setting completed
1930 			 * SEC_COUNT is valid even if it is smaller than 2 GiB
1931 			 * JEDEC Standard JESD84-B45, 6.2.4
1932 			 */
1933 			if (mmc->high_capacity && part_completed) {
1934 				capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1935 					(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1936 					(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1937 					(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1938 				capacity *= MMC_MAX_BLOCK_LEN;
1939 				mmc->capacity_user = capacity;
1940 			}
1941 		} else {
1942 			/* Calculate the group size from the csd value. */
1943 			int erase_gsz, erase_gmul;
1944 			erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1945 			erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1946 			mmc->erase_grp_size = (erase_gsz + 1)
1947 				* (erase_gmul + 1);
1948 		}
1949 
1950 		mmc->hc_wp_grp_size = 1024
1951 			* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1952 			* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1953 
1954 		mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1955 	}
1956 
1957 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1958 	if (err)
1959 		return err;
1960 
1961 	if (IS_SD(mmc))
1962 		err = sd_change_freq(mmc);
1963 	else
1964 		err = mmc_change_freq(mmc);
1965 
1966 	if (err)
1967 		return err;
1968 
1969 	/* Restrict card's capabilities by what the host can do */
1970 	mmc->card_caps &= mmc->cfg->host_caps;
1971 
1972 	if (IS_SD(mmc)) {
1973 		if (mmc->card_caps & MMC_MODE_4BIT) {
1974 			cmd.cmdidx = MMC_CMD_APP_CMD;
1975 			cmd.resp_type = MMC_RSP_R1;
1976 			cmd.cmdarg = mmc->rca << 16;
1977 
1978 			err = mmc_send_cmd(mmc, &cmd, NULL);
1979 			if (err)
1980 				return err;
1981 
1982 			cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1983 			cmd.resp_type = MMC_RSP_R1;
1984 			cmd.cmdarg = 2;
1985 			err = mmc_send_cmd(mmc, &cmd, NULL);
1986 			if (err)
1987 				return err;
1988 
1989 			mmc_set_bus_width(mmc, 4);
1990 		}
1991 
1992 		err = sd_read_ssr(mmc);
1993 		if (err)
1994 			return err;
1995 
1996 		if (mmc->card_caps & MMC_MODE_HS)
1997 			tran_speed = 50000000;
1998 		else
1999 			tran_speed = 25000000;
2000 
2001 		mmc_set_clock(mmc, tran_speed);
2002 	}
2003 
2004 	/* Fix the block length for DDR mode */
2005 	if (mmc_card_ddr(mmc)) {
2006 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2007 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2008 	}
2009 
2010 	/* fill in device description */
2011 	bdesc = mmc_get_blk_desc(mmc);
2012 	bdesc->lun = 0;
2013 	bdesc->hwpart = 0;
2014 	bdesc->type = 0;
2015 	bdesc->blksz = mmc->read_bl_len;
2016 	bdesc->log2blksz = LOG2(bdesc->blksz);
2017 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2018 #if !defined(CONFIG_SPL_BUILD) || \
2019 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2020 		!defined(CONFIG_USE_TINY_PRINTF))
2021 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2022 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2023 		(mmc->cid[3] >> 16) & 0xffff);
2024 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2025 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2026 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2027 		(mmc->cid[2] >> 24) & 0xff);
2028 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2029 		(mmc->cid[2] >> 16) & 0xf);
2030 #else
2031 	bdesc->vendor[0] = 0;
2032 	bdesc->product[0] = 0;
2033 	bdesc->revision[0] = 0;
2034 #endif
2035 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2036 	part_init(bdesc);
2037 #endif
2038 
2039 	return 0;
2040 }
2041 
2042 #ifndef CONFIG_MMC_USE_PRE_CONFIG
2043 static int mmc_send_if_cond(struct mmc *mmc)
2044 {
2045 	struct mmc_cmd cmd;
2046 	int err;
2047 
2048 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2049 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2050 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2051 	cmd.resp_type = MMC_RSP_R7;
2052 
2053 	err = mmc_send_cmd(mmc, &cmd, NULL);
2054 
2055 	if (err)
2056 		return err;
2057 
2058 	if ((cmd.response[0] & 0xff) != 0xaa)
2059 		return -EOPNOTSUPP;
2060 	else
2061 		mmc->version = SD_VERSION_2;
2062 
2063 	return 0;
2064 }
2065 #endif
2066 
2067 #if !CONFIG_IS_ENABLED(DM_MMC)
2068 /* board-specific MMC power initializations. */
2069 __weak void board_mmc_power_init(void)
2070 {
2071 }
2072 #endif
2073 
2074 #ifndef CONFIG_MMC_USE_PRE_CONFIG
2075 static int mmc_power_init(struct mmc *mmc)
2076 {
2077 #if CONFIG_IS_ENABLED(DM_MMC)
2078 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD)
2079 	struct udevice *vmmc_supply;
2080 	int ret;
2081 
2082 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2083 					  &vmmc_supply);
2084 	if (ret) {
2085 		debug("%s: No vmmc supply\n", mmc->dev->name);
2086 		return 0;
2087 	}
2088 
2089 	ret = regulator_set_enable(vmmc_supply, true);
2090 	if (ret) {
2091 		puts("Error enabling VMMC supply\n");
2092 		return ret;
2093 	}
2094 #endif
2095 #else /* !CONFIG_DM_MMC */
2096 	/*
2097 	 * Driver model should use a regulator, as above, rather than calling
2098 	 * out to board code.
2099 	 */
2100 	board_mmc_power_init();
2101 #endif
2102 	return 0;
2103 }
2104 #endif
2105 #ifdef CONFIG_MMC_USE_PRE_CONFIG
2106 static int mmc_select_card(struct mmc *mmc, int n)
2107 {
2108 	struct mmc_cmd cmd;
2109 	int err = 0;
2110 
2111 	memset(&cmd, 0, sizeof(struct mmc_cmd));
2112 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2113 		mmc->rca = n;
2114 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2115 		cmd.resp_type = MMC_RSP_R1;
2116 		cmd.cmdarg = mmc->rca << 16;
2117 		err = mmc_send_cmd(mmc, &cmd, NULL);
2118 	}
2119 
2120 	return err;
2121 }
2122 
2123 int mmc_start_init(struct mmc *mmc)
2124 {
2125 	/*
2126 	 * We use the MMC config set by the bootrom.
2127 	 * So it is no need to reset the eMMC device.
2128 	 */
2129 	mmc_set_bus_width(mmc, 8);
2130 	mmc_set_clock(mmc, 1);
2131 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2132 	/* Send cmd7 to return stand-by state*/
2133 	mmc_select_card(mmc, 0);
2134 	mmc->version = MMC_VERSION_UNKNOWN;
2135 	mmc->high_capacity = 1;
2136 	/*
2137 	 * The RCA is set to 2 by rockchip bootrom, use the default
2138 	 * value here.
2139 	 */
2140 #ifdef CONFIG_ARCH_ROCKCHIP
2141 	mmc->rca = 2;
2142 #else
2143 	mmc->rca = 1;
2144 #endif
2145 	return 0;
2146 }
2147 #else
2148 int mmc_start_init(struct mmc *mmc)
2149 {
2150 	bool no_card;
2151 	int err;
2152 
2153 	/* we pretend there's no card when init is NULL */
2154 	no_card = mmc_getcd(mmc) == 0;
2155 #if !CONFIG_IS_ENABLED(DM_MMC)
2156 	no_card = no_card || (mmc->cfg->ops->init == NULL);
2157 #endif
2158 	if (no_card) {
2159 		mmc->has_init = 0;
2160 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2161 		printf("MMC: no card present\n");
2162 #endif
2163 		return -ENOMEDIUM;
2164 	}
2165 
2166 	if (mmc->has_init)
2167 		return 0;
2168 
2169 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2170 	mmc_adapter_card_type_ident();
2171 #endif
2172 	err = mmc_power_init(mmc);
2173 	if (err)
2174 		return err;
2175 
2176 #if CONFIG_IS_ENABLED(DM_MMC)
2177 	/* The device has already been probed ready for use */
2178 #else
2179 	/* made sure it's not NULL earlier */
2180 	err = mmc->cfg->ops->init(mmc);
2181 	if (err)
2182 		return err;
2183 #endif
2184 	mmc_set_bus_width(mmc, 1);
2185 	mmc_set_clock(mmc, 1);
2186 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2187 
2188 	/* Reset the Card */
2189 	err = mmc_go_idle(mmc);
2190 
2191 	if (err)
2192 		return err;
2193 
2194 	/* The internal partition reset to user partition(0) at every CMD0*/
2195 	mmc_get_blk_desc(mmc)->hwpart = 0;
2196 
2197 	/* Test for SD version 2 */
2198 	err = mmc_send_if_cond(mmc);
2199 
2200 	/* Now try to get the SD card's operating condition */
2201 	err = sd_send_op_cond(mmc);
2202 
2203 	/* If the command timed out, we check for an MMC card */
2204 	if (err == -ETIMEDOUT) {
2205 		err = mmc_send_op_cond(mmc);
2206 
2207 		if (err) {
2208 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2209 			printf("Card did not respond to voltage select!\n");
2210 #endif
2211 			return -EOPNOTSUPP;
2212 		}
2213 	}
2214 
2215 	if (!err)
2216 		mmc->init_in_progress = 1;
2217 
2218 	return err;
2219 }
2220 #endif
2221 
2222 static int mmc_complete_init(struct mmc *mmc)
2223 {
2224 	int err = 0;
2225 
2226 	mmc->init_in_progress = 0;
2227 	if (mmc->op_cond_pending)
2228 		err = mmc_complete_op_cond(mmc);
2229 
2230 	if (!err)
2231 		err = mmc_startup(mmc);
2232 	if (err)
2233 		mmc->has_init = 0;
2234 	else
2235 		mmc->has_init = 1;
2236 	return err;
2237 }
2238 
2239 int mmc_init(struct mmc *mmc)
2240 {
2241 	int err = 0;
2242 	__maybe_unused unsigned start;
2243 #if CONFIG_IS_ENABLED(DM_MMC)
2244 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2245 
2246 	upriv->mmc = mmc;
2247 #endif
2248 	if (mmc->has_init)
2249 		return 0;
2250 
2251 	start = get_timer(0);
2252 
2253 	if (!mmc->init_in_progress)
2254 		err = mmc_start_init(mmc);
2255 
2256 	if (!err)
2257 		err = mmc_complete_init(mmc);
2258 	if (err)
2259 		printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2260 
2261 	return err;
2262 }
2263 
2264 int mmc_set_dsr(struct mmc *mmc, u16 val)
2265 {
2266 	mmc->dsr = val;
2267 	return 0;
2268 }
2269 
2270 /* CPU-specific MMC initializations */
2271 __weak int cpu_mmc_init(bd_t *bis)
2272 {
2273 	return -1;
2274 }
2275 
2276 /* board-specific MMC initializations. */
2277 __weak int board_mmc_init(bd_t *bis)
2278 {
2279 	return -1;
2280 }
2281 
2282 void mmc_set_preinit(struct mmc *mmc, int preinit)
2283 {
2284 	mmc->preinit = preinit;
2285 }
2286 
2287 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2288 static int mmc_probe(bd_t *bis)
2289 {
2290 	return 0;
2291 }
2292 #elif CONFIG_IS_ENABLED(DM_MMC)
2293 static int mmc_probe(bd_t *bis)
2294 {
2295 	int ret, i;
2296 	struct uclass *uc;
2297 	struct udevice *dev;
2298 
2299 	ret = uclass_get(UCLASS_MMC, &uc);
2300 	if (ret)
2301 		return ret;
2302 
2303 	/*
2304 	 * Try to add them in sequence order. Really with driver model we
2305 	 * should allow holes, but the current MMC list does not allow that.
2306 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2307 	 */
2308 	for (i = 0; ; i++) {
2309 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2310 		if (ret == -ENODEV)
2311 			break;
2312 	}
2313 	uclass_foreach_dev(dev, uc) {
2314 		ret = device_probe(dev);
2315 		if (ret)
2316 			printf("%s - probe failed: %d\n", dev->name, ret);
2317 	}
2318 
2319 	return 0;
2320 }
2321 #else
2322 static int mmc_probe(bd_t *bis)
2323 {
2324 	if (board_mmc_init(bis) < 0)
2325 		cpu_mmc_init(bis);
2326 
2327 	return 0;
2328 }
2329 #endif
2330 
2331 int mmc_initialize(bd_t *bis)
2332 {
2333 	static int initialized = 0;
2334 	int ret;
2335 	if (initialized)	/* Avoid initializing mmc multiple times */
2336 		return 0;
2337 	initialized = 1;
2338 
2339 #if !CONFIG_IS_ENABLED(BLK)
2340 #if !CONFIG_IS_ENABLED(MMC_TINY)
2341 	mmc_list_init();
2342 #endif
2343 #endif
2344 	ret = mmc_probe(bis);
2345 	if (ret)
2346 		return ret;
2347 
2348 #ifndef CONFIG_SPL_BUILD
2349 	print_mmc_devices(',');
2350 #endif
2351 
2352 	mmc_do_preinit();
2353 	return 0;
2354 }
2355 
2356 #ifdef CONFIG_CMD_BKOPS_ENABLE
2357 int mmc_set_bkops_enable(struct mmc *mmc)
2358 {
2359 	int err;
2360 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2361 
2362 	err = mmc_send_ext_csd(mmc, ext_csd);
2363 	if (err) {
2364 		puts("Could not get ext_csd register values\n");
2365 		return err;
2366 	}
2367 
2368 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2369 		puts("Background operations not supported on device\n");
2370 		return -EMEDIUMTYPE;
2371 	}
2372 
2373 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2374 		puts("Background operations already enabled\n");
2375 		return 0;
2376 	}
2377 
2378 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2379 	if (err) {
2380 		puts("Failed to enable manual background operations\n");
2381 		return err;
2382 	}
2383 
2384 	puts("Enabled manual background operations\n");
2385 
2386 	return 0;
2387 }
2388 #endif
2389