xref: /rk3399_rockchip-uboot/drivers/mmc/mmc.c (revision e6a8227120c4768196dfed2656d8c83e8ef6bbe8)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static const unsigned int sd_au_size[] = {
26 	0,		SZ_16K / 512,		SZ_32K / 512,
27 	SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
28 	SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
29 	SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
30 	SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,	SZ_64M / 512,
31 };
32 
33 static char mmc_ext_csd[512];
34 
35 #if CONFIG_IS_ENABLED(MMC_TINY)
36 static struct mmc mmc_static;
37 struct mmc *find_mmc_device(int dev_num)
38 {
39 	return &mmc_static;
40 }
41 
42 void mmc_do_preinit(void)
43 {
44 	struct mmc *m = &mmc_static;
45 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
46 	mmc_set_preinit(m, 1);
47 #endif
48 	if (m->preinit)
49 		mmc_start_init(m);
50 }
51 
52 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
53 {
54 	return &mmc->block_dev;
55 }
56 #endif
57 
58 #if !CONFIG_IS_ENABLED(DM_MMC)
59 __weak int board_mmc_getwp(struct mmc *mmc)
60 {
61 	return -1;
62 }
63 
64 int mmc_getwp(struct mmc *mmc)
65 {
66 	int wp;
67 
68 	wp = board_mmc_getwp(mmc);
69 
70 	if (wp < 0) {
71 		if (mmc->cfg->ops->getwp)
72 			wp = mmc->cfg->ops->getwp(mmc);
73 		else
74 			wp = 0;
75 	}
76 
77 	return wp;
78 }
79 
80 __weak int board_mmc_getcd(struct mmc *mmc)
81 {
82 	return -1;
83 }
84 #endif
85 
86 #ifdef CONFIG_MMC_TRACE
87 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
88 {
89 	printf("CMD_SEND:%d\n", cmd->cmdidx);
90 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
91 }
92 
93 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
94 {
95 	int i;
96 	u8 *ptr;
97 
98 	if (ret) {
99 		printf("\t\tRET\t\t\t %d\n", ret);
100 	} else {
101 		switch (cmd->resp_type) {
102 		case MMC_RSP_NONE:
103 			printf("\t\tMMC_RSP_NONE\n");
104 			break;
105 		case MMC_RSP_R1:
106 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
107 				cmd->response[0]);
108 			break;
109 		case MMC_RSP_R1b:
110 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
111 				cmd->response[0]);
112 			break;
113 		case MMC_RSP_R2:
114 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
115 				cmd->response[0]);
116 			printf("\t\t          \t\t 0x%08X \n",
117 				cmd->response[1]);
118 			printf("\t\t          \t\t 0x%08X \n",
119 				cmd->response[2]);
120 			printf("\t\t          \t\t 0x%08X \n",
121 				cmd->response[3]);
122 			printf("\n");
123 			printf("\t\t\t\t\tDUMPING DATA\n");
124 			for (i = 0; i < 4; i++) {
125 				int j;
126 				printf("\t\t\t\t\t%03d - ", i*4);
127 				ptr = (u8 *)&cmd->response[i];
128 				ptr += 3;
129 				for (j = 0; j < 4; j++)
130 					printf("%02X ", *ptr--);
131 				printf("\n");
132 			}
133 			break;
134 		case MMC_RSP_R3:
135 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
136 				cmd->response[0]);
137 			break;
138 		default:
139 			printf("\t\tERROR MMC rsp not supported\n");
140 			break;
141 		}
142 	}
143 }
144 
145 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
146 {
147 	int status;
148 
149 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
150 	printf("CURR STATE:%d\n", status);
151 }
152 #endif
153 
154 #if !CONFIG_IS_ENABLED(DM_MMC)
155 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
156 {
157 	int ret;
158 
159 	mmmc_trace_before_send(mmc, cmd);
160 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
161 	mmmc_trace_after_send(mmc, cmd, ret);
162 
163 	return ret;
164 }
165 #endif
166 
167 int mmc_send_status(struct mmc *mmc, int timeout)
168 {
169 	struct mmc_cmd cmd;
170 	int err, retries = 5;
171 
172 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
173 	cmd.resp_type = MMC_RSP_R1;
174 	if (!mmc_host_is_spi(mmc))
175 		cmd.cmdarg = mmc->rca << 16;
176 
177 	while (1) {
178 		err = mmc_send_cmd(mmc, &cmd, NULL);
179 		if (!err) {
180 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
181 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
182 			     MMC_STATE_PRG)
183 				break;
184 			else if (cmd.response[0] & MMC_STATUS_MASK) {
185 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
186 				printf("Status Error: 0x%08X\n",
187 					cmd.response[0]);
188 #endif
189 				return -ECOMM;
190 			}
191 		} else if (--retries < 0)
192 			return err;
193 
194 		if (timeout-- <= 0)
195 			break;
196 
197 		udelay(1000);
198 	}
199 
200 	mmc_trace_state(mmc, &cmd);
201 	if (timeout <= 0) {
202 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
203 		printf("Timeout waiting card ready\n");
204 #endif
205 		return -ETIMEDOUT;
206 	}
207 
208 	return 0;
209 }
210 
211 int mmc_set_blocklen(struct mmc *mmc, int len)
212 {
213 	struct mmc_cmd cmd;
214 
215 	if (mmc_card_ddr(mmc))
216 		return 0;
217 
218 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
219 	cmd.resp_type = MMC_RSP_R1;
220 	cmd.cmdarg = len;
221 
222 	return mmc_send_cmd(mmc, &cmd, NULL);
223 }
224 
225 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
226 			   lbaint_t blkcnt)
227 {
228 	struct mmc_cmd cmd;
229 	struct mmc_data data;
230 
231 	if (blkcnt > 1)
232 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
233 	else
234 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
235 
236 	if (mmc->high_capacity)
237 		cmd.cmdarg = start;
238 	else
239 		cmd.cmdarg = start * mmc->read_bl_len;
240 
241 	cmd.resp_type = MMC_RSP_R1;
242 
243 	data.dest = dst;
244 	data.blocks = blkcnt;
245 	data.blocksize = mmc->read_bl_len;
246 	data.flags = MMC_DATA_READ;
247 
248 	if (mmc_send_cmd(mmc, &cmd, &data))
249 		return 0;
250 
251 	if (blkcnt > 1) {
252 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
253 		cmd.cmdarg = 0;
254 		cmd.resp_type = MMC_RSP_R1b;
255 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
256 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
257 			printf("mmc fail to send stop cmd\n");
258 #endif
259 			return 0;
260 		}
261 	}
262 
263 	return blkcnt;
264 }
265 
266 #ifdef CONFIG_SPL_BLK_READ_PREPARE
267 static int mmc_read_blocks_prepare(struct mmc *mmc, void *dst, lbaint_t start,
268 				   lbaint_t blkcnt)
269 {
270 	struct mmc_cmd cmd;
271 	struct mmc_data data;
272 
273 	if (blkcnt > 1)
274 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
275 	else
276 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
277 
278 	if (mmc->high_capacity)
279 		cmd.cmdarg = start;
280 	else
281 		cmd.cmdarg = start * mmc->read_bl_len;
282 
283 	cmd.resp_type = MMC_RSP_R1;
284 
285 	data.dest = dst;
286 	data.blocks = blkcnt;
287 	data.blocksize = mmc->read_bl_len;
288 	data.flags = MMC_DATA_READ;
289 
290 	if (mmc_send_cmd_prepare(mmc, &cmd, &data))
291 		return 0;
292 
293 	return blkcnt;
294 }
295 #endif
296 
297 #ifdef CONFIG_SPL_BLK_READ_PREPARE
298 #if CONFIG_IS_ENABLED(BLK)
299 ulong mmc_bread_prepare(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
300 #else
301 ulong mmc_bread_prepare(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
302 			void *dst)
303 #endif
304 {
305 #if CONFIG_IS_ENABLED(BLK)
306 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
307 #endif
308 	int dev_num = block_dev->devnum;
309 	int timeout = 0;
310 	int err;
311 
312 	if (blkcnt == 0)
313 		return 0;
314 
315 	struct mmc *mmc = find_mmc_device(dev_num);
316 
317 	if (!mmc)
318 		return 0;
319 
320 	if (CONFIG_IS_ENABLED(MMC_TINY))
321 		err = mmc_switch_part(mmc, block_dev->hwpart);
322 	else
323 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
324 
325 	if (err < 0)
326 		return 0;
327 
328 	if ((start + blkcnt) > block_dev->lba) {
329 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
330 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
331 		       start + blkcnt, block_dev->lba);
332 #endif
333 		return 0;
334 	}
335 
336 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
337 		debug("%s: Failed to set blocklen\n", __func__);
338 		return 0;
339 	}
340 
341 	if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) {
342 		debug("%s: Failed to read blocks\n", __func__);
343 re_init_retry:
344 		timeout++;
345 		/*
346 		 * Try re-init seven times.
347 		 */
348 		if (timeout > 7) {
349 			printf("Re-init retry timeout\n");
350 			return 0;
351 		}
352 
353 		mmc->has_init = 0;
354 		if (mmc_init(mmc))
355 			return 0;
356 
357 		if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) {
358 			printf("%s: Re-init mmc_read_blocks_prepare error\n",
359 			       __func__);
360 			goto re_init_retry;
361 		}
362 	}
363 
364 	return blkcnt;
365 }
366 #endif
367 
368 #if CONFIG_IS_ENABLED(BLK)
369 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
370 #else
371 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
372 		void *dst)
373 #endif
374 {
375 #if CONFIG_IS_ENABLED(BLK)
376 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
377 #endif
378 	int dev_num = block_dev->devnum;
379 	int err;
380 	lbaint_t cur, blocks_todo = blkcnt;
381 
382 #ifdef CONFIG_SPL_BLK_READ_PREPARE
383 	if (block_dev->op_flag == BLK_PRE_RW)
384 #if CONFIG_IS_ENABLED(BLK)
385 		return mmc_bread_prepare(dev, start, blkcnt, dst);
386 #else
387 		return mmc_bread_prepare(block_dev, start, blkcnt, dst);
388 #endif
389 #endif
390 	if (blkcnt == 0)
391 		return 0;
392 
393 	struct mmc *mmc = find_mmc_device(dev_num);
394 	if (!mmc)
395 		return 0;
396 
397 	if (CONFIG_IS_ENABLED(MMC_TINY))
398 		err = mmc_switch_part(mmc, block_dev->hwpart);
399 	else
400 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
401 
402 	if (err < 0)
403 		return 0;
404 
405 	if ((start + blkcnt) > block_dev->lba) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
408 			start + blkcnt, block_dev->lba);
409 #endif
410 		return 0;
411 	}
412 
413 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
414 		debug("%s: Failed to set blocklen\n", __func__);
415 		return 0;
416 	}
417 
418 	do {
419 		cur = (blocks_todo > mmc->cfg->b_max) ?
420 			mmc->cfg->b_max : blocks_todo;
421 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
422 			debug("%s: Failed to read blocks\n", __func__);
423 			int timeout = 0;
424 re_init_retry:
425 			timeout++;
426 			/*
427 			 * Try re-init seven times.
428 			 */
429 			if (timeout > 7) {
430 				printf("Re-init retry timeout\n");
431 				return 0;
432 			}
433 
434 			mmc->has_init = 0;
435 			if (mmc_init(mmc))
436 				return 0;
437 
438 			if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
439 				printf("%s: Re-init mmc_read_blocks error\n",
440 				       __func__);
441 				goto re_init_retry;
442 			}
443 		}
444 		blocks_todo -= cur;
445 		start += cur;
446 		dst += cur * mmc->read_bl_len;
447 	} while (blocks_todo > 0);
448 
449 	return blkcnt;
450 }
451 
452 void mmc_set_clock(struct mmc *mmc, uint clock)
453 {
454 	if (clock > mmc->cfg->f_max)
455 		clock = mmc->cfg->f_max;
456 
457 	if (clock < mmc->cfg->f_min)
458 		clock = mmc->cfg->f_min;
459 
460 	mmc->clock = clock;
461 
462 	mmc_set_ios(mmc);
463 }
464 
465 static void mmc_set_bus_width(struct mmc *mmc, uint width)
466 {
467 	mmc->bus_width = width;
468 
469 	mmc_set_ios(mmc);
470 }
471 
472 static void mmc_set_timing(struct mmc *mmc, uint timing)
473 {
474 	mmc->timing = timing;
475 	mmc_set_ios(mmc);
476 }
477 
478 static int mmc_go_idle(struct mmc *mmc)
479 {
480 	struct mmc_cmd cmd;
481 	int err;
482 
483 	udelay(1000);
484 
485 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
486 	cmd.cmdarg = 0;
487 	cmd.resp_type = MMC_RSP_NONE;
488 
489 	err = mmc_send_cmd(mmc, &cmd, NULL);
490 
491 	if (err)
492 		return err;
493 
494 	udelay(2000);
495 
496 	return 0;
497 }
498 
499 #ifndef CONFIG_MMC_USE_PRE_CONFIG
500 static int sd_send_op_cond(struct mmc *mmc)
501 {
502 	int timeout = 1000;
503 	int err;
504 	struct mmc_cmd cmd;
505 
506 	while (1) {
507 		cmd.cmdidx = MMC_CMD_APP_CMD;
508 		cmd.resp_type = MMC_RSP_R1;
509 		cmd.cmdarg = 0;
510 
511 		err = mmc_send_cmd(mmc, &cmd, NULL);
512 
513 		if (err)
514 			return err;
515 
516 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
517 		cmd.resp_type = MMC_RSP_R3;
518 
519 		/*
520 		 * Most cards do not answer if some reserved bits
521 		 * in the ocr are set. However, Some controller
522 		 * can set bit 7 (reserved for low voltages), but
523 		 * how to manage low voltages SD card is not yet
524 		 * specified.
525 		 */
526 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
527 			(mmc->cfg->voltages & 0xff8000);
528 
529 		if (mmc->version == SD_VERSION_2)
530 			cmd.cmdarg |= OCR_HCS;
531 
532 		err = mmc_send_cmd(mmc, &cmd, NULL);
533 
534 		if (err)
535 			return err;
536 
537 		if (cmd.response[0] & OCR_BUSY)
538 			break;
539 
540 		if (timeout-- <= 0)
541 			return -EOPNOTSUPP;
542 
543 		udelay(1000);
544 	}
545 
546 	if (mmc->version != SD_VERSION_2)
547 		mmc->version = SD_VERSION_1_0;
548 
549 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
550 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
551 		cmd.resp_type = MMC_RSP_R3;
552 		cmd.cmdarg = 0;
553 
554 		err = mmc_send_cmd(mmc, &cmd, NULL);
555 
556 		if (err)
557 			return err;
558 	}
559 
560 	mmc->ocr = cmd.response[0];
561 
562 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
563 	mmc->rca = 0;
564 
565 	return 0;
566 }
567 #endif
568 
569 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
570 {
571 	struct mmc_cmd cmd;
572 	int err;
573 
574 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
575 	cmd.resp_type = MMC_RSP_R3;
576 	cmd.cmdarg = 0;
577 	if (use_arg && !mmc_host_is_spi(mmc))
578 		cmd.cmdarg = OCR_HCS |
579 			(mmc->cfg->voltages &
580 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
581 			(mmc->ocr & OCR_ACCESS_MODE);
582 
583 	err = mmc_send_cmd(mmc, &cmd, NULL);
584 	if (err)
585 		return err;
586 	mmc->ocr = cmd.response[0];
587 	return 0;
588 }
589 
590 #ifndef CONFIG_MMC_USE_PRE_CONFIG
591 static int mmc_send_op_cond(struct mmc *mmc)
592 {
593 	int err, i;
594 
595 	/* Some cards seem to need this */
596 	mmc_go_idle(mmc);
597 
598  	/* Asking to the card its capabilities */
599 	for (i = 0; i < 2; i++) {
600 		err = mmc_send_op_cond_iter(mmc, i != 0);
601 		if (err)
602 			return err;
603 
604 		/* exit if not busy (flag seems to be inverted) */
605 		if (mmc->ocr & OCR_BUSY)
606 			break;
607 	}
608 	mmc->op_cond_pending = 1;
609 	return 0;
610 }
611 #endif
612 static int mmc_complete_op_cond(struct mmc *mmc)
613 {
614 	struct mmc_cmd cmd;
615 	int timeout = 1000;
616 	uint start;
617 	int err;
618 
619 	mmc->op_cond_pending = 0;
620 	if (!(mmc->ocr & OCR_BUSY)) {
621 		/* Some cards seem to need this */
622 		mmc_go_idle(mmc);
623 
624 		start = get_timer(0);
625 		while (1) {
626 			err = mmc_send_op_cond_iter(mmc, 1);
627 			if (err)
628 				return err;
629 			if (mmc->ocr & OCR_BUSY)
630 				break;
631 			if (get_timer(start) > timeout)
632 				return -EOPNOTSUPP;
633 			udelay(100);
634 		}
635 	}
636 
637 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
638 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
639 		cmd.resp_type = MMC_RSP_R3;
640 		cmd.cmdarg = 0;
641 
642 		err = mmc_send_cmd(mmc, &cmd, NULL);
643 
644 		if (err)
645 			return err;
646 
647 		mmc->ocr = cmd.response[0];
648 	}
649 
650 	mmc->version = MMC_VERSION_UNKNOWN;
651 
652 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
653 	mmc->rca = 1;
654 
655 	return 0;
656 }
657 
658 
659 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
660 {
661 	static int initialized;
662 	struct mmc_cmd cmd;
663 	struct mmc_data data;
664 	int err;
665 
666 	if (initialized) {
667 		memcpy(ext_csd, mmc_ext_csd, 512);
668 		return 0;
669 	}
670 
671 	initialized = 1;
672 
673 	/* Get the Card Status Register */
674 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
675 	cmd.resp_type = MMC_RSP_R1;
676 	cmd.cmdarg = 0;
677 
678 	data.dest = (char *)ext_csd;
679 	data.blocks = 1;
680 	data.blocksize = MMC_MAX_BLOCK_LEN;
681 	data.flags = MMC_DATA_READ;
682 
683 	err = mmc_send_cmd(mmc, &cmd, &data);
684 	memcpy(mmc_ext_csd, ext_csd, 512);
685 #if defined(CONFIG_MMC_USE_PRE_CONFIG) && defined(CONFIG_SPL_BUILD)
686 	char *mmc_ecsd_base = NULL;
687 	ulong mmc_ecsd;
688 
689 	mmc_ecsd = dev_read_u32_default(mmc->dev, "mmc-ecsd", 0);
690 	mmc_ecsd_base = (char *)mmc_ecsd;
691 	if (mmc_ecsd_base) {
692 		memcpy(mmc_ecsd_base, ext_csd, 512);
693 		*(unsigned int *)(mmc_ecsd_base + 512) = 0x55aa55aa;
694 	}
695 #endif
696 	return err;
697 }
698 
699 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status)
700 {
701 	struct mmc_cmd cmd;
702 	u8 busy = true;
703 	uint start;
704 	int ret;
705 	int timeout = 1000;
706 
707 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
708 	cmd.resp_type = MMC_RSP_R1;
709 	cmd.cmdarg = mmc->rca << 16;
710 
711 	start = get_timer(0);
712 
713 	if (!send_status && !mmc_can_card_busy(mmc)) {
714 		mdelay(timeout);
715 		return 0;
716 	}
717 
718 	do {
719 		if (!send_status) {
720 			busy = mmc_card_busy(mmc);
721 		} else {
722 			ret = mmc_send_cmd(mmc, &cmd, NULL);
723 
724 			if (ret)
725 				return ret;
726 
727 			if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
728 				return -EBADMSG;
729 			busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) ==
730 				MMC_STATE_PRG;
731 		}
732 
733 		if (get_timer(start) > timeout && busy)
734 			return -ETIMEDOUT;
735 	} while (busy);
736 
737 	return 0;
738 }
739 
740 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
741 			u8 send_status)
742 {
743 	struct mmc_cmd cmd;
744 	int retries = 3;
745 	int ret;
746 
747 	cmd.cmdidx = MMC_CMD_SWITCH;
748 	cmd.resp_type = MMC_RSP_R1b;
749 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
750 				 (index << 16) |
751 				 (value << 8);
752 
753 	do {
754 		ret = mmc_send_cmd(mmc, &cmd, NULL);
755 
756 		if (!ret)
757 			return mmc_poll_for_busy(mmc, send_status);
758 	} while (--retries > 0 && ret);
759 
760 	return ret;
761 }
762 
763 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
764 {
765 	return __mmc_switch(mmc, set, index, value, true);
766 }
767 
768 static int mmc_select_bus_width(struct mmc *mmc)
769 {
770 	u32 ext_csd_bits[] = {
771 		EXT_CSD_BUS_WIDTH_8,
772 		EXT_CSD_BUS_WIDTH_4,
773 	};
774 	u32 bus_widths[] = {
775 		MMC_BUS_WIDTH_8BIT,
776 		MMC_BUS_WIDTH_4BIT,
777 	};
778 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
779 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
780 	u32 idx, bus_width = 0;
781 	int err = 0;
782 
783 	if (mmc->version < MMC_VERSION_4 ||
784 	    !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT)))
785 		return 0;
786 
787 	err = mmc_send_ext_csd(mmc, ext_csd);
788 
789 	if (err)
790 		return err;
791 
792 	idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1;
793 
794 	/*
795 	 * Unlike SD, MMC cards dont have a configuration register to notify
796 	 * supported bus width. So bus test command should be run to identify
797 	 * the supported bus width or compare the ext csd values of current
798 	 * bus width and ext csd values of 1 bit mode read earlier.
799 	 */
800 	for (; idx < ARRAY_SIZE(bus_widths); idx++) {
801 		/*
802 		 * Host is capable of 8bit transfer, then switch
803 		 * the device to work in 8bit transfer mode. If the
804 		 * mmc switch command returns error then switch to
805 		 * 4bit transfer mode. On success set the corresponding
806 		 * bus width on the host.
807 		 */
808 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
809 				 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]);
810 		if (err)
811 			continue;
812 
813 		bus_width = bus_widths[idx];
814 		mmc_set_bus_width(mmc, bus_width);
815 
816 		err = mmc_send_ext_csd(mmc, test_csd);
817 
818 		if (err)
819 			continue;
820 
821 		/* Only compare read only fields */
822 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] ==
823 			test_csd[EXT_CSD_PARTITIONING_SUPPORT]) &&
824 		    (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
825 			test_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
826 		    (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) &&
827 			(ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
828 			test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
829 		    !memcmp(&ext_csd[EXT_CSD_SEC_CNT],
830 			&test_csd[EXT_CSD_SEC_CNT], 4)) {
831 			err = bus_width;
832 			break;
833 		} else {
834 			err = -EBADMSG;
835 		}
836 	}
837 
838 	return err;
839 }
840 
841 static const u8 tuning_blk_pattern_4bit[] = {
842 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
843 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
844 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
845 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
846 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
847 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
848 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
849 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
850 };
851 
852 static const u8 tuning_blk_pattern_8bit[] = {
853 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
854 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
855 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
856 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
857 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
858 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
859 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
860 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
861 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
862 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
863 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
864 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
865 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
866 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
867 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
868 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
869 };
870 
871 int mmc_send_tuning(struct mmc *mmc, u32 opcode)
872 {
873 	struct mmc_cmd cmd;
874 	struct mmc_data data;
875 	const u8 *tuning_block_pattern;
876 	int size, err = 0;
877 	u8 *data_buf;
878 
879 	if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
880 		tuning_block_pattern = tuning_blk_pattern_8bit;
881 		size = sizeof(tuning_blk_pattern_8bit);
882 	} else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) {
883 		tuning_block_pattern = tuning_blk_pattern_4bit;
884 		size = sizeof(tuning_blk_pattern_4bit);
885 	} else {
886 		return -EINVAL;
887 	}
888 
889 	data_buf = calloc(1, size);
890 	if (!data_buf)
891 		return -ENOMEM;
892 
893 	cmd.cmdidx = opcode;
894 	cmd.resp_type = MMC_RSP_R1;
895 	cmd.cmdarg = 0;
896 
897 	data.dest = (char *)data_buf;
898 	data.blocksize = size;
899 	data.blocks = 1;
900 	data.flags = MMC_DATA_READ;
901 
902 	err = mmc_send_cmd(mmc, &cmd, &data);
903 	if (err)
904 		goto out;
905 
906 	if (memcmp(data_buf, tuning_block_pattern, size))
907 		err = -EIO;
908 out:
909 	free(data_buf);
910 	return err;
911 }
912 
913 static int mmc_execute_tuning(struct mmc *mmc)
914 {
915 #ifdef CONFIG_DM_MMC
916 	struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev);
917 #endif
918 	u32 opcode;
919 
920 	if (IS_SD(mmc))
921 		opcode = MMC_SEND_TUNING_BLOCK;
922 	else
923 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
924 
925 #ifndef CONFIG_DM_MMC
926 	if (mmc->cfg->ops->execute_tuning) {
927 		return mmc->cfg->ops->execute_tuning(mmc, opcode);
928 #else
929 	if (ops->execute_tuning) {
930 		return ops->execute_tuning(mmc->dev, opcode);
931 #endif
932 	} else {
933 		debug("Tuning feature required for HS200 mode.\n");
934 		return -EIO;
935 	}
936 }
937 
938 static int mmc_hs200_tuning(struct mmc *mmc)
939 {
940 	return mmc_execute_tuning(mmc);
941 }
942 
943 static int mmc_select_hs(struct mmc *mmc)
944 {
945 	int ret;
946 
947 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
948 			 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS);
949 
950 	if (!ret)
951 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
952 
953 	return ret;
954 }
955 
956 static int mmc_select_hs_ddr(struct mmc *mmc)
957 {
958 	u32 ext_csd_bits;
959 	int err = 0;
960 
961 	if (mmc->bus_width == MMC_BUS_WIDTH_1BIT)
962 		return 0;
963 
964 	ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ?
965 			EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
966 
967 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
968 			 EXT_CSD_BUS_WIDTH, ext_csd_bits);
969 	if (err)
970 		return err;
971 
972 	mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52);
973 
974 	return 0;
975 }
976 
977 static int mmc_select_hs200(struct mmc *mmc)
978 {
979 	int ret;
980 
981 	/*
982 	 * Set the bus width(4 or 8) with host's support and
983 	 * switch to HS200 mode if bus width is set successfully.
984 	 */
985 	ret = mmc_select_bus_width(mmc);
986 
987 	if (ret > 0) {
988 		ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
989 				   EXT_CSD_HS_TIMING,
990 				   EXT_CSD_TIMING_HS200, false);
991 
992 		if (ret)
993 			return ret;
994 
995 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS200);
996 	}
997 
998 	return ret;
999 }
1000 
1001 static int mmc_select_hs400(struct mmc *mmc)
1002 {
1003 	int ret;
1004 
1005 	/* Switch card to HS mode */
1006 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1007 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false);
1008 	if (ret)
1009 		return ret;
1010 
1011 	/* Set host controller to HS timing */
1012 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
1013 
1014 	/* Reduce frequency to HS frequency */
1015 	mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
1016 
1017 	ret = mmc_send_status(mmc, 1000);
1018 	if (ret)
1019 		return ret;
1020 
1021 	/* Switch card to DDR */
1022 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1023 			 EXT_CSD_BUS_WIDTH,
1024 			 EXT_CSD_DDR_BUS_WIDTH_8);
1025 	if (ret)
1026 		return ret;
1027 
1028 	/* Switch card to HS400 */
1029 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1030 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false);
1031 	if (ret)
1032 		return ret;
1033 
1034 	/* Set host controller to HS400 timing and frequency */
1035 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS400);
1036 
1037 	return ret;
1038 }
1039 
1040 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd)
1041 {
1042 	u8 card_type;
1043 	u32 host_caps, avail_type = 0;
1044 
1045 	card_type = ext_csd[EXT_CSD_CARD_TYPE];
1046 	host_caps = mmc->cfg->host_caps;
1047 
1048 	if ((host_caps & MMC_MODE_HS) &&
1049 	    (card_type & EXT_CSD_CARD_TYPE_26))
1050 		avail_type |= EXT_CSD_CARD_TYPE_26;
1051 
1052 	if ((host_caps & MMC_MODE_HS) &&
1053 	    (card_type & EXT_CSD_CARD_TYPE_52))
1054 		avail_type |= EXT_CSD_CARD_TYPE_52;
1055 
1056 	/*
1057 	 * For the moment, u-boot doesn't support signal voltage
1058 	 * switch, therefor we assume that host support ddr52
1059 	 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and
1060 	 * hs400 are the same).
1061 	 */
1062 	if ((host_caps & MMC_MODE_DDR_52MHz) &&
1063 	    (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
1064 		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
1065 
1066 	if ((host_caps & MMC_MODE_HS200) &&
1067 	    (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V))
1068 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
1069 
1070 	/*
1071 	 * If host can support HS400, it means that host can also
1072 	 * support HS200.
1073 	 */
1074 	if ((host_caps & MMC_MODE_HS400) &&
1075 	    (host_caps & MMC_MODE_8BIT) &&
1076 	    (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
1077 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
1078 				EXT_CSD_CARD_TYPE_HS400_1_8V;
1079 
1080 	if ((host_caps & MMC_MODE_HS400ES) &&
1081 	    (host_caps & MMC_MODE_8BIT) &&
1082 	    ext_csd[EXT_CSD_STROBE_SUPPORT] &&
1083 	    (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
1084 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
1085 				EXT_CSD_CARD_TYPE_HS400_1_8V |
1086 				EXT_CSD_CARD_TYPE_HS400ES;
1087 
1088 	return avail_type;
1089 }
1090 
1091 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type)
1092 {
1093 	int clock = 0;
1094 
1095 	if (mmc_card_hs(mmc))
1096 		clock = (avail_type & EXT_CSD_CARD_TYPE_52) ?
1097 			MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR;
1098 	else if (mmc_card_hs200(mmc) ||
1099 		 mmc_card_hs400(mmc) ||
1100 		 mmc_card_hs400es(mmc))
1101 		clock = MMC_HS200_MAX_DTR;
1102 
1103 	mmc_set_clock(mmc, clock);
1104 }
1105 
1106 static int mmc_change_freq(struct mmc *mmc)
1107 {
1108 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1109 	u32 avail_type;
1110 	int err;
1111 
1112 	mmc->card_caps = 0;
1113 
1114 	if (mmc_host_is_spi(mmc))
1115 		return 0;
1116 
1117 	/* Only version 4 supports high-speed */
1118 	if (mmc->version < MMC_VERSION_4)
1119 		return 0;
1120 
1121 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
1122 
1123 	err = mmc_send_ext_csd(mmc, ext_csd);
1124 
1125 	if (err)
1126 		return err;
1127 
1128 	avail_type = mmc_select_card_type(mmc, ext_csd);
1129 
1130 	if (avail_type & EXT_CSD_CARD_TYPE_HS200)
1131 		err = mmc_select_hs200(mmc);
1132 	else if (avail_type & EXT_CSD_CARD_TYPE_HS)
1133 		err = mmc_select_hs(mmc);
1134 	else
1135 		err = -EINVAL;
1136 
1137 	if (err)
1138 		return err;
1139 
1140 	mmc_set_bus_speed(mmc, avail_type);
1141 
1142 	if (mmc_card_hs200(mmc)) {
1143 		err = mmc_hs200_tuning(mmc);
1144 		if (avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1145 		    mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
1146 			err = mmc_select_hs400(mmc);
1147 			mmc_set_bus_speed(mmc, avail_type);
1148 		}
1149 	} else if (!mmc_card_hs400es(mmc)) {
1150 		err = mmc_select_bus_width(mmc) > 0 ? 0 : err;
1151 		if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52)
1152 			err = mmc_select_hs_ddr(mmc);
1153 	}
1154 
1155 	return err;
1156 }
1157 
1158 static int mmc_set_capacity(struct mmc *mmc, int part_num)
1159 {
1160 	switch (part_num) {
1161 	case 0:
1162 		mmc->capacity = mmc->capacity_user;
1163 		break;
1164 	case 1:
1165 	case 2:
1166 		mmc->capacity = mmc->capacity_boot;
1167 		break;
1168 	case 3:
1169 		mmc->capacity = mmc->capacity_rpmb;
1170 		break;
1171 	case 4:
1172 	case 5:
1173 	case 6:
1174 	case 7:
1175 		mmc->capacity = mmc->capacity_gp[part_num - 4];
1176 		break;
1177 	default:
1178 		return -1;
1179 	}
1180 
1181 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1182 
1183 	return 0;
1184 }
1185 
1186 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1187 {
1188 	int ret;
1189 
1190 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
1191 			 (mmc->part_config & ~PART_ACCESS_MASK)
1192 			 | (part_num & PART_ACCESS_MASK));
1193 
1194 	/*
1195 	 * Set the capacity if the switch succeeded or was intended
1196 	 * to return to representing the raw device.
1197 	 */
1198 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1199 		ret = mmc_set_capacity(mmc, part_num);
1200 		mmc_get_blk_desc(mmc)->hwpart = part_num;
1201 	}
1202 
1203 	return ret;
1204 }
1205 
1206 int mmc_hwpart_config(struct mmc *mmc,
1207 		      const struct mmc_hwpart_conf *conf,
1208 		      enum mmc_hwpart_conf_mode mode)
1209 {
1210 	u8 part_attrs = 0;
1211 	u32 enh_size_mult;
1212 	u32 enh_start_addr;
1213 	u32 gp_size_mult[4];
1214 	u32 max_enh_size_mult;
1215 	u32 tot_enh_size_mult = 0;
1216 	u8 wr_rel_set;
1217 	int i, pidx, err;
1218 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1219 
1220 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1221 		return -EINVAL;
1222 
1223 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1224 		printf("eMMC >= 4.4 required for enhanced user data area\n");
1225 		return -EMEDIUMTYPE;
1226 	}
1227 
1228 	if (!(mmc->part_support & PART_SUPPORT)) {
1229 		printf("Card does not support partitioning\n");
1230 		return -EMEDIUMTYPE;
1231 	}
1232 
1233 	if (!mmc->hc_wp_grp_size) {
1234 		printf("Card does not define HC WP group size\n");
1235 		return -EMEDIUMTYPE;
1236 	}
1237 
1238 	/* check partition alignment and total enhanced size */
1239 	if (conf->user.enh_size) {
1240 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1241 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
1242 			printf("User data enhanced area not HC WP group "
1243 			       "size aligned\n");
1244 			return -EINVAL;
1245 		}
1246 		part_attrs |= EXT_CSD_ENH_USR;
1247 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1248 		if (mmc->high_capacity) {
1249 			enh_start_addr = conf->user.enh_start;
1250 		} else {
1251 			enh_start_addr = (conf->user.enh_start << 9);
1252 		}
1253 	} else {
1254 		enh_size_mult = 0;
1255 		enh_start_addr = 0;
1256 	}
1257 	tot_enh_size_mult += enh_size_mult;
1258 
1259 	for (pidx = 0; pidx < 4; pidx++) {
1260 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1261 			printf("GP%i partition not HC WP group size "
1262 			       "aligned\n", pidx+1);
1263 			return -EINVAL;
1264 		}
1265 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1266 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1267 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1268 			tot_enh_size_mult += gp_size_mult[pidx];
1269 		}
1270 	}
1271 
1272 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1273 		printf("Card does not support enhanced attribute\n");
1274 		return -EMEDIUMTYPE;
1275 	}
1276 
1277 	err = mmc_send_ext_csd(mmc, ext_csd);
1278 	if (err)
1279 		return err;
1280 
1281 	max_enh_size_mult =
1282 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1283 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1284 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1285 	if (tot_enh_size_mult > max_enh_size_mult) {
1286 		printf("Total enhanced size exceeds maximum (%u > %u)\n",
1287 		       tot_enh_size_mult, max_enh_size_mult);
1288 		return -EMEDIUMTYPE;
1289 	}
1290 
1291 	/* The default value of EXT_CSD_WR_REL_SET is device
1292 	 * dependent, the values can only be changed if the
1293 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1294 	 * changed only once and before partitioning is completed. */
1295 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1296 	if (conf->user.wr_rel_change) {
1297 		if (conf->user.wr_rel_set)
1298 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1299 		else
1300 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1301 	}
1302 	for (pidx = 0; pidx < 4; pidx++) {
1303 		if (conf->gp_part[pidx].wr_rel_change) {
1304 			if (conf->gp_part[pidx].wr_rel_set)
1305 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1306 			else
1307 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1308 		}
1309 	}
1310 
1311 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1312 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1313 		puts("Card does not support host controlled partition write "
1314 		     "reliability settings\n");
1315 		return -EMEDIUMTYPE;
1316 	}
1317 
1318 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1319 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1320 		printf("Card already partitioned\n");
1321 		return -EPERM;
1322 	}
1323 
1324 	if (mode == MMC_HWPART_CONF_CHECK)
1325 		return 0;
1326 
1327 	/* Partitioning requires high-capacity size definitions */
1328 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1329 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1330 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1331 
1332 		if (err)
1333 			return err;
1334 
1335 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1336 
1337 		/* update erase group size to be high-capacity */
1338 		mmc->erase_grp_size =
1339 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1340 
1341 	}
1342 
1343 	/* all OK, write the configuration */
1344 	for (i = 0; i < 4; i++) {
1345 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1346 				 EXT_CSD_ENH_START_ADDR+i,
1347 				 (enh_start_addr >> (i*8)) & 0xFF);
1348 		if (err)
1349 			return err;
1350 	}
1351 	for (i = 0; i < 3; i++) {
1352 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1353 				 EXT_CSD_ENH_SIZE_MULT+i,
1354 				 (enh_size_mult >> (i*8)) & 0xFF);
1355 		if (err)
1356 			return err;
1357 	}
1358 	for (pidx = 0; pidx < 4; pidx++) {
1359 		for (i = 0; i < 3; i++) {
1360 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1361 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1362 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1363 			if (err)
1364 				return err;
1365 		}
1366 	}
1367 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1368 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1369 	if (err)
1370 		return err;
1371 
1372 	if (mode == MMC_HWPART_CONF_SET)
1373 		return 0;
1374 
1375 	/* The WR_REL_SET is a write-once register but shall be
1376 	 * written before setting PART_SETTING_COMPLETED. As it is
1377 	 * write-once we can only write it when completing the
1378 	 * partitioning. */
1379 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1380 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1381 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1382 		if (err)
1383 			return err;
1384 	}
1385 
1386 	/* Setting PART_SETTING_COMPLETED confirms the partition
1387 	 * configuration but it only becomes effective after power
1388 	 * cycle, so we do not adjust the partition related settings
1389 	 * in the mmc struct. */
1390 
1391 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1392 			 EXT_CSD_PARTITION_SETTING,
1393 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1394 	if (err)
1395 		return err;
1396 
1397 	return 0;
1398 }
1399 
1400 #if !CONFIG_IS_ENABLED(DM_MMC)
1401 int mmc_getcd(struct mmc *mmc)
1402 {
1403 	int cd;
1404 
1405 	cd = board_mmc_getcd(mmc);
1406 
1407 	if (cd < 0) {
1408 		if (mmc->cfg->ops->getcd)
1409 			cd = mmc->cfg->ops->getcd(mmc);
1410 		else
1411 			cd = 1;
1412 	}
1413 
1414 	return cd;
1415 }
1416 #endif
1417 
1418 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1419 {
1420 	struct mmc_cmd cmd;
1421 	struct mmc_data data;
1422 
1423 	/* Switch the frequency */
1424 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1425 	cmd.resp_type = MMC_RSP_R1;
1426 	cmd.cmdarg = (mode << 31) | 0xffffff;
1427 	cmd.cmdarg &= ~(0xf << (group * 4));
1428 	cmd.cmdarg |= value << (group * 4);
1429 
1430 	data.dest = (char *)resp;
1431 	data.blocksize = 64;
1432 	data.blocks = 1;
1433 	data.flags = MMC_DATA_READ;
1434 
1435 	return mmc_send_cmd(mmc, &cmd, &data);
1436 }
1437 
1438 
1439 static int sd_change_freq(struct mmc *mmc)
1440 {
1441 	int err;
1442 	struct mmc_cmd cmd;
1443 	ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2);
1444 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1445 	struct mmc_data data;
1446 	int timeout;
1447 
1448 	mmc->card_caps = 0;
1449 
1450 	if (mmc_host_is_spi(mmc))
1451 		return 0;
1452 
1453 	/* Read the SCR to find out if this card supports higher speeds */
1454 	cmd.cmdidx = MMC_CMD_APP_CMD;
1455 	cmd.resp_type = MMC_RSP_R1;
1456 	cmd.cmdarg = mmc->rca << 16;
1457 
1458 	err = mmc_send_cmd(mmc, &cmd, NULL);
1459 
1460 	if (err)
1461 		return err;
1462 
1463 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1464 	cmd.resp_type = MMC_RSP_R1;
1465 	cmd.cmdarg = 0;
1466 
1467 	timeout = 3;
1468 
1469 retry_scr:
1470 	data.dest = (char *)scr;
1471 	data.blocksize = 8;
1472 	data.blocks = 1;
1473 	data.flags = MMC_DATA_READ;
1474 
1475 	err = mmc_send_cmd(mmc, &cmd, &data);
1476 
1477 	if (err) {
1478 		if (timeout--)
1479 			goto retry_scr;
1480 
1481 		return err;
1482 	}
1483 
1484 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1485 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1486 
1487 	switch ((mmc->scr[0] >> 24) & 0xf) {
1488 	case 0:
1489 		mmc->version = SD_VERSION_1_0;
1490 		break;
1491 	case 1:
1492 		mmc->version = SD_VERSION_1_10;
1493 		break;
1494 	case 2:
1495 		mmc->version = SD_VERSION_2;
1496 		if ((mmc->scr[0] >> 15) & 0x1)
1497 			mmc->version = SD_VERSION_3;
1498 		break;
1499 	default:
1500 		mmc->version = SD_VERSION_1_0;
1501 		break;
1502 	}
1503 
1504 	if (mmc->scr[0] & SD_DATA_4BIT)
1505 		mmc->card_caps |= MMC_MODE_4BIT;
1506 
1507 	/* Version 1.0 doesn't support switching */
1508 	if (mmc->version == SD_VERSION_1_0)
1509 		return 0;
1510 
1511 	timeout = 4;
1512 	while (timeout--) {
1513 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1514 				(u8 *)switch_status);
1515 
1516 		if (err)
1517 			return err;
1518 
1519 		/* The high-speed function is busy.  Try again */
1520 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1521 			break;
1522 	}
1523 
1524 	/* If high-speed isn't supported, we return */
1525 	if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
1526 		return 0;
1527 
1528 	/*
1529 	 * If the host doesn't support SD_HIGHSPEED, do not switch card to
1530 	 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
1531 	 * This can avoid furthur problem when the card runs in different
1532 	 * mode between the host.
1533 	 */
1534 	if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
1535 		(mmc->cfg->host_caps & MMC_MODE_HS)))
1536 		return 0;
1537 
1538 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1539 
1540 	if (err)
1541 		return err;
1542 
1543 	if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
1544 		mmc->card_caps |= MMC_MODE_HS;
1545 
1546 	return 0;
1547 }
1548 
1549 static int sd_read_ssr(struct mmc *mmc)
1550 {
1551 	int err, i;
1552 	struct mmc_cmd cmd;
1553 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1554 	struct mmc_data data;
1555 	int timeout = 3;
1556 	unsigned int au, eo, et, es;
1557 
1558 	cmd.cmdidx = MMC_CMD_APP_CMD;
1559 	cmd.resp_type = MMC_RSP_R1;
1560 	cmd.cmdarg = mmc->rca << 16;
1561 
1562 	err = mmc_send_cmd(mmc, &cmd, NULL);
1563 	if (err)
1564 		return err;
1565 
1566 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1567 	cmd.resp_type = MMC_RSP_R1;
1568 	cmd.cmdarg = 0;
1569 
1570 retry_ssr:
1571 	data.dest = (char *)ssr;
1572 	data.blocksize = 64;
1573 	data.blocks = 1;
1574 	data.flags = MMC_DATA_READ;
1575 
1576 	err = mmc_send_cmd(mmc, &cmd, &data);
1577 	if (err) {
1578 		if (timeout--)
1579 			goto retry_ssr;
1580 
1581 		return err;
1582 	}
1583 
1584 	for (i = 0; i < 16; i++)
1585 		ssr[i] = be32_to_cpu(ssr[i]);
1586 
1587 	au = (ssr[2] >> 12) & 0xF;
1588 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1589 		mmc->ssr.au = sd_au_size[au];
1590 		es = (ssr[3] >> 24) & 0xFF;
1591 		es |= (ssr[2] & 0xFF) << 8;
1592 		et = (ssr[3] >> 18) & 0x3F;
1593 		if (es && et) {
1594 			eo = (ssr[3] >> 16) & 0x3;
1595 			mmc->ssr.erase_timeout = (et * 1000) / es;
1596 			mmc->ssr.erase_offset = eo * 1000;
1597 		}
1598 	} else {
1599 		debug("Invalid Allocation Unit Size.\n");
1600 	}
1601 
1602 	return 0;
1603 }
1604 
1605 /* frequency bases */
1606 /* divided by 10 to be nice to platforms without floating point */
1607 static const int fbase[] = {
1608 	10000,
1609 	100000,
1610 	1000000,
1611 	10000000,
1612 };
1613 
1614 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1615  * to platforms without floating point.
1616  */
1617 static const u8 multipliers[] = {
1618 	0,	/* reserved */
1619 	10,
1620 	12,
1621 	13,
1622 	15,
1623 	20,
1624 	25,
1625 	30,
1626 	35,
1627 	40,
1628 	45,
1629 	50,
1630 	55,
1631 	60,
1632 	70,
1633 	80,
1634 };
1635 
1636 #if !CONFIG_IS_ENABLED(DM_MMC)
1637 static void mmc_set_ios(struct mmc *mmc)
1638 {
1639 	if (mmc->cfg->ops->set_ios)
1640 		mmc->cfg->ops->set_ios(mmc);
1641 }
1642 
1643 static bool mmc_card_busy(struct mmc *mmc)
1644 {
1645 	if (!mmc->cfg->ops->card_busy)
1646 		return -ENOSYS;
1647 
1648 	return mmc->cfg->ops->card_busy(mmc);
1649 }
1650 
1651 static bool mmc_can_card_busy(struct mmc *)
1652 {
1653 	return !!mmc->cfg->ops->card_busy;
1654 }
1655 #endif
1656 
1657 static int mmc_startup(struct mmc *mmc)
1658 {
1659 	int err, i;
1660 	uint mult, freq, tran_speed;
1661 	u64 cmult, csize, capacity;
1662 	struct mmc_cmd cmd;
1663 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1664 	bool has_parts = false;
1665 	bool part_completed;
1666 	struct blk_desc *bdesc;
1667 
1668 #ifdef CONFIG_MMC_SPI_CRC_ON
1669 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1670 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1671 		cmd.resp_type = MMC_RSP_R1;
1672 		cmd.cmdarg = 1;
1673 		err = mmc_send_cmd(mmc, &cmd, NULL);
1674 
1675 		if (err)
1676 			return err;
1677 	}
1678 #endif
1679 #ifndef CONFIG_MMC_USE_PRE_CONFIG
1680 	/* Put the Card in Identify Mode */
1681 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1682 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1683 	cmd.resp_type = MMC_RSP_R2;
1684 	cmd.cmdarg = 0;
1685 
1686 	err = mmc_send_cmd(mmc, &cmd, NULL);
1687 
1688 	if (err)
1689 		return err;
1690 
1691 	memcpy(mmc->cid, cmd.response, 16);
1692 
1693 	/*
1694 	 * For MMC cards, set the Relative Address.
1695 	 * For SD cards, get the Relatvie Address.
1696 	 * This also puts the cards into Standby State
1697 	 */
1698 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1699 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1700 		cmd.cmdarg = mmc->rca << 16;
1701 		cmd.resp_type = MMC_RSP_R6;
1702 
1703 		err = mmc_send_cmd(mmc, &cmd, NULL);
1704 
1705 		if (err)
1706 			return err;
1707 
1708 		if (IS_SD(mmc))
1709 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1710 	}
1711 #endif
1712 	/* Get the Card-Specific Data */
1713 	cmd.cmdidx = MMC_CMD_SEND_CSD;
1714 	cmd.resp_type = MMC_RSP_R2;
1715 	cmd.cmdarg = mmc->rca << 16;
1716 
1717 	err = mmc_send_cmd(mmc, &cmd, NULL);
1718 
1719 	if (err)
1720 		return err;
1721 
1722 	mmc->csd[0] = cmd.response[0];
1723 	mmc->csd[1] = cmd.response[1];
1724 	mmc->csd[2] = cmd.response[2];
1725 	mmc->csd[3] = cmd.response[3];
1726 
1727 	if (mmc->version == MMC_VERSION_UNKNOWN) {
1728 		int version = (cmd.response[0] >> 26) & 0xf;
1729 
1730 		switch (version) {
1731 		case 0:
1732 			mmc->version = MMC_VERSION_1_2;
1733 			break;
1734 		case 1:
1735 			mmc->version = MMC_VERSION_1_4;
1736 			break;
1737 		case 2:
1738 			mmc->version = MMC_VERSION_2_2;
1739 			break;
1740 		case 3:
1741 			mmc->version = MMC_VERSION_3;
1742 			break;
1743 		case 4:
1744 			mmc->version = MMC_VERSION_4;
1745 			break;
1746 		default:
1747 			mmc->version = MMC_VERSION_1_2;
1748 			break;
1749 		}
1750 	}
1751 
1752 	/* divide frequency by 10, since the mults are 10x bigger */
1753 	freq = fbase[(cmd.response[0] & 0x7)];
1754 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1755 
1756 	tran_speed = freq * mult;
1757 
1758 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1759 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1760 
1761 	if (IS_SD(mmc))
1762 		mmc->write_bl_len = mmc->read_bl_len;
1763 	else
1764 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1765 
1766 	if (mmc->high_capacity) {
1767 		csize = (mmc->csd[1] & 0x3f) << 16
1768 			| (mmc->csd[2] & 0xffff0000) >> 16;
1769 		cmult = 8;
1770 	} else {
1771 		csize = (mmc->csd[1] & 0x3ff) << 2
1772 			| (mmc->csd[2] & 0xc0000000) >> 30;
1773 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
1774 	}
1775 
1776 	mmc->capacity_user = (csize + 1) << (cmult + 2);
1777 	mmc->capacity_user *= mmc->read_bl_len;
1778 	mmc->capacity_boot = 0;
1779 	mmc->capacity_rpmb = 0;
1780 	for (i = 0; i < 4; i++)
1781 		mmc->capacity_gp[i] = 0;
1782 
1783 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1784 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1785 
1786 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1787 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1788 
1789 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1790 		cmd.cmdidx = MMC_CMD_SET_DSR;
1791 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1792 		cmd.resp_type = MMC_RSP_NONE;
1793 		if (mmc_send_cmd(mmc, &cmd, NULL))
1794 			printf("MMC: SET_DSR failed\n");
1795 	}
1796 
1797 	/* Select the card, and put it into Transfer Mode */
1798 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1799 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
1800 		cmd.resp_type = MMC_RSP_R1;
1801 		cmd.cmdarg = mmc->rca << 16;
1802 		err = mmc_send_cmd(mmc, &cmd, NULL);
1803 
1804 		if (err)
1805 			return err;
1806 	}
1807 
1808 	/*
1809 	 * For SD, its erase group is always one sector
1810 	 */
1811 	mmc->erase_grp_size = 1;
1812 	mmc->part_config = MMCPART_NOAVAILABLE;
1813 	if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) {
1814 		/* check  ext_csd version and capacity */
1815 		err = mmc_send_ext_csd(mmc, ext_csd);
1816 		if (err)
1817 			return err;
1818 		if (ext_csd[EXT_CSD_REV] >= 2) {
1819 			/*
1820 			 * According to the JEDEC Standard, the value of
1821 			 * ext_csd's capacity is valid if the value is more
1822 			 * than 2GB
1823 			 */
1824 			capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1825 					| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1826 					| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1827 					| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1828 			capacity *= MMC_MAX_BLOCK_LEN;
1829 			if ((capacity >> 20) > 2 * 1024)
1830 				mmc->capacity_user = capacity;
1831 		}
1832 
1833 		switch (ext_csd[EXT_CSD_REV]) {
1834 		case 1:
1835 			mmc->version = MMC_VERSION_4_1;
1836 			break;
1837 		case 2:
1838 			mmc->version = MMC_VERSION_4_2;
1839 			break;
1840 		case 3:
1841 			mmc->version = MMC_VERSION_4_3;
1842 			break;
1843 		case 5:
1844 			mmc->version = MMC_VERSION_4_41;
1845 			break;
1846 		case 6:
1847 			mmc->version = MMC_VERSION_4_5;
1848 			break;
1849 		case 7:
1850 			mmc->version = MMC_VERSION_5_0;
1851 			break;
1852 		case 8:
1853 			mmc->version = MMC_VERSION_5_1;
1854 			break;
1855 		}
1856 
1857 		/* The partition data may be non-zero but it is only
1858 		 * effective if PARTITION_SETTING_COMPLETED is set in
1859 		 * EXT_CSD, so ignore any data if this bit is not set,
1860 		 * except for enabling the high-capacity group size
1861 		 * definition (see below). */
1862 		part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1863 				    EXT_CSD_PARTITION_SETTING_COMPLETED);
1864 
1865 		/* store the partition info of emmc */
1866 		mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1867 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1868 		    ext_csd[EXT_CSD_BOOT_MULT])
1869 			mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1870 		if (part_completed &&
1871 		    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1872 			mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1873 		if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN)
1874 			mmc->esr.mmc_can_trim = 1;
1875 
1876 		mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1877 
1878 		mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1879 
1880 		for (i = 0; i < 4; i++) {
1881 			int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1882 			uint mult = (ext_csd[idx + 2] << 16) +
1883 				(ext_csd[idx + 1] << 8) + ext_csd[idx];
1884 			if (mult)
1885 				has_parts = true;
1886 			if (!part_completed)
1887 				continue;
1888 			mmc->capacity_gp[i] = mult;
1889 			mmc->capacity_gp[i] *=
1890 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1891 			mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1892 			mmc->capacity_gp[i] <<= 19;
1893 		}
1894 
1895 		if (part_completed) {
1896 			mmc->enh_user_size =
1897 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) +
1898 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) +
1899 				ext_csd[EXT_CSD_ENH_SIZE_MULT];
1900 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1901 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1902 			mmc->enh_user_size <<= 19;
1903 			mmc->enh_user_start =
1904 				(ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) +
1905 				(ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) +
1906 				(ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) +
1907 				ext_csd[EXT_CSD_ENH_START_ADDR];
1908 			if (mmc->high_capacity)
1909 				mmc->enh_user_start <<= 9;
1910 		}
1911 
1912 		/*
1913 		 * Host needs to enable ERASE_GRP_DEF bit if device is
1914 		 * partitioned. This bit will be lost every time after a reset
1915 		 * or power off. This will affect erase size.
1916 		 */
1917 		if (part_completed)
1918 			has_parts = true;
1919 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1920 		    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1921 			has_parts = true;
1922 		if (has_parts) {
1923 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1924 				EXT_CSD_ERASE_GROUP_DEF, 1);
1925 
1926 			if (err)
1927 				return err;
1928 			else
1929 				ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1930 		}
1931 
1932 		if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1933 			/* Read out group size from ext_csd */
1934 			mmc->erase_grp_size =
1935 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1936 			/*
1937 			 * if high capacity and partition setting completed
1938 			 * SEC_COUNT is valid even if it is smaller than 2 GiB
1939 			 * JEDEC Standard JESD84-B45, 6.2.4
1940 			 */
1941 			if (mmc->high_capacity && part_completed) {
1942 				capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1943 					(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1944 					(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1945 					(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1946 				capacity *= MMC_MAX_BLOCK_LEN;
1947 				mmc->capacity_user = capacity;
1948 			}
1949 		} else {
1950 			/* Calculate the group size from the csd value. */
1951 			int erase_gsz, erase_gmul;
1952 			erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1953 			erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1954 			mmc->erase_grp_size = (erase_gsz + 1)
1955 				* (erase_gmul + 1);
1956 		}
1957 
1958 		mmc->hc_wp_grp_size = 1024
1959 			* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1960 			* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1961 
1962 		mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1963 	}
1964 
1965 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1966 	if (err)
1967 		return err;
1968 
1969 	if (IS_SD(mmc))
1970 		err = sd_change_freq(mmc);
1971 	else
1972 		err = mmc_change_freq(mmc);
1973 
1974 	if (err)
1975 		return err;
1976 
1977 	/* Restrict card's capabilities by what the host can do */
1978 	mmc->card_caps &= mmc->cfg->host_caps;
1979 
1980 	if (IS_SD(mmc)) {
1981 		if (mmc->card_caps & MMC_MODE_4BIT) {
1982 			cmd.cmdidx = MMC_CMD_APP_CMD;
1983 			cmd.resp_type = MMC_RSP_R1;
1984 			cmd.cmdarg = mmc->rca << 16;
1985 
1986 			err = mmc_send_cmd(mmc, &cmd, NULL);
1987 			if (err)
1988 				return err;
1989 
1990 			cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1991 			cmd.resp_type = MMC_RSP_R1;
1992 			cmd.cmdarg = 2;
1993 			err = mmc_send_cmd(mmc, &cmd, NULL);
1994 			if (err)
1995 				return err;
1996 
1997 			mmc_set_bus_width(mmc, 4);
1998 		}
1999 
2000 		err = sd_read_ssr(mmc);
2001 		if (err)
2002 			return err;
2003 
2004 		if (mmc->card_caps & MMC_MODE_HS)
2005 			tran_speed = 50000000;
2006 		else
2007 			tran_speed = 25000000;
2008 
2009 		mmc_set_clock(mmc, tran_speed);
2010 	}
2011 
2012 	/* Fix the block length for DDR mode */
2013 	if (mmc_card_ddr(mmc)) {
2014 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2015 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2016 	}
2017 
2018 	/* fill in device description */
2019 	bdesc = mmc_get_blk_desc(mmc);
2020 	bdesc->lun = 0;
2021 	bdesc->hwpart = 0;
2022 	bdesc->type = 0;
2023 	bdesc->blksz = mmc->read_bl_len;
2024 	bdesc->log2blksz = LOG2(bdesc->blksz);
2025 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2026 #if !defined(CONFIG_SPL_BUILD) || \
2027 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2028 		!defined(CONFIG_USE_TINY_PRINTF))
2029 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2030 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2031 		(mmc->cid[3] >> 16) & 0xffff);
2032 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2033 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2034 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2035 		(mmc->cid[2] >> 24) & 0xff);
2036 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2037 		(mmc->cid[2] >> 16) & 0xf);
2038 #else
2039 	bdesc->vendor[0] = 0;
2040 	bdesc->product[0] = 0;
2041 	bdesc->revision[0] = 0;
2042 #endif
2043 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2044 	part_init(bdesc);
2045 #endif
2046 
2047 	return 0;
2048 }
2049 
2050 #ifndef CONFIG_MMC_USE_PRE_CONFIG
2051 static int mmc_send_if_cond(struct mmc *mmc)
2052 {
2053 	struct mmc_cmd cmd;
2054 	int err;
2055 
2056 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2057 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2058 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2059 	cmd.resp_type = MMC_RSP_R7;
2060 
2061 	err = mmc_send_cmd(mmc, &cmd, NULL);
2062 
2063 	if (err)
2064 		return err;
2065 
2066 	if ((cmd.response[0] & 0xff) != 0xaa)
2067 		return -EOPNOTSUPP;
2068 	else
2069 		mmc->version = SD_VERSION_2;
2070 
2071 	return 0;
2072 }
2073 #endif
2074 
2075 #if !CONFIG_IS_ENABLED(DM_MMC)
2076 /* board-specific MMC power initializations. */
2077 __weak void board_mmc_power_init(void)
2078 {
2079 }
2080 #endif
2081 
2082 #ifndef CONFIG_MMC_USE_PRE_CONFIG
2083 static int mmc_power_init(struct mmc *mmc)
2084 {
2085 #if CONFIG_IS_ENABLED(DM_MMC)
2086 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD)
2087 	struct udevice *vmmc_supply;
2088 	int ret;
2089 
2090 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2091 					  &vmmc_supply);
2092 	if (ret) {
2093 		debug("%s: No vmmc supply\n", mmc->dev->name);
2094 		return 0;
2095 	}
2096 
2097 	ret = regulator_set_enable(vmmc_supply, true);
2098 	if (ret) {
2099 		puts("Error enabling VMMC supply\n");
2100 		return ret;
2101 	}
2102 #endif
2103 #else /* !CONFIG_DM_MMC */
2104 	/*
2105 	 * Driver model should use a regulator, as above, rather than calling
2106 	 * out to board code.
2107 	 */
2108 	board_mmc_power_init();
2109 #endif
2110 	return 0;
2111 }
2112 #endif
2113 #ifdef CONFIG_MMC_USE_PRE_CONFIG
2114 static int mmc_select_card(struct mmc *mmc, int n)
2115 {
2116 	struct mmc_cmd cmd;
2117 	int err = 0;
2118 
2119 	memset(&cmd, 0, sizeof(struct mmc_cmd));
2120 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2121 		mmc->rca = n;
2122 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2123 		cmd.resp_type = MMC_RSP_R1;
2124 		cmd.cmdarg = mmc->rca << 16;
2125 		err = mmc_send_cmd(mmc, &cmd, NULL);
2126 	}
2127 
2128 	return err;
2129 }
2130 
2131 int mmc_start_init(struct mmc *mmc)
2132 {
2133 	/*
2134 	 * We use the MMC config set by the bootrom.
2135 	 * So it is no need to reset the eMMC device.
2136 	 */
2137 	mmc_set_bus_width(mmc, 8);
2138 	mmc_set_clock(mmc, 1);
2139 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2140 	/* Send cmd7 to return stand-by state*/
2141 	mmc_select_card(mmc, 0);
2142 	mmc->version = MMC_VERSION_UNKNOWN;
2143 	mmc->high_capacity = 1;
2144 	/*
2145 	 * The RCA is set to 2 by rockchip bootrom, use the default
2146 	 * value here.
2147 	 */
2148 #ifdef CONFIG_ARCH_ROCKCHIP
2149 	mmc->rca = 2;
2150 #else
2151 	mmc->rca = 1;
2152 #endif
2153 	return 0;
2154 }
2155 #else
2156 int mmc_start_init(struct mmc *mmc)
2157 {
2158 	bool no_card;
2159 	int err;
2160 
2161 	/* we pretend there's no card when init is NULL */
2162 	no_card = mmc_getcd(mmc) == 0;
2163 #if !CONFIG_IS_ENABLED(DM_MMC)
2164 	no_card = no_card || (mmc->cfg->ops->init == NULL);
2165 #endif
2166 	if (no_card) {
2167 		mmc->has_init = 0;
2168 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2169 		printf("MMC: no card present\n");
2170 #endif
2171 		return -ENOMEDIUM;
2172 	}
2173 
2174 	if (mmc->has_init)
2175 		return 0;
2176 
2177 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2178 	mmc_adapter_card_type_ident();
2179 #endif
2180 	err = mmc_power_init(mmc);
2181 	if (err)
2182 		return err;
2183 
2184 #if CONFIG_IS_ENABLED(DM_MMC)
2185 	/* The device has already been probed ready for use */
2186 #else
2187 	/* made sure it's not NULL earlier */
2188 	err = mmc->cfg->ops->init(mmc);
2189 	if (err)
2190 		return err;
2191 #endif
2192 	mmc_set_bus_width(mmc, 1);
2193 	mmc_set_clock(mmc, 1);
2194 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2195 
2196 	/* Reset the Card */
2197 	err = mmc_go_idle(mmc);
2198 
2199 	if (err)
2200 		return err;
2201 
2202 	/* The internal partition reset to user partition(0) at every CMD0*/
2203 	mmc_get_blk_desc(mmc)->hwpart = 0;
2204 
2205 	/* Test for SD version 2 */
2206 	err = mmc_send_if_cond(mmc);
2207 
2208 	/* Now try to get the SD card's operating condition */
2209 	err = sd_send_op_cond(mmc);
2210 
2211 	/* If the command timed out, we check for an MMC card */
2212 	if (err == -ETIMEDOUT) {
2213 		err = mmc_send_op_cond(mmc);
2214 
2215 		if (err) {
2216 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2217 			printf("Card did not respond to voltage select!\n");
2218 #endif
2219 			return -EOPNOTSUPP;
2220 		}
2221 	}
2222 
2223 	if (!err)
2224 		mmc->init_in_progress = 1;
2225 
2226 	return err;
2227 }
2228 #endif
2229 
2230 static int mmc_complete_init(struct mmc *mmc)
2231 {
2232 	int err = 0;
2233 
2234 	mmc->init_in_progress = 0;
2235 	if (mmc->op_cond_pending)
2236 		err = mmc_complete_op_cond(mmc);
2237 
2238 	if (!err)
2239 		err = mmc_startup(mmc);
2240 	if (err)
2241 		mmc->has_init = 0;
2242 	else
2243 		mmc->has_init = 1;
2244 	return err;
2245 }
2246 
2247 int mmc_init(struct mmc *mmc)
2248 {
2249 	int err = 0;
2250 	__maybe_unused unsigned start;
2251 #if CONFIG_IS_ENABLED(DM_MMC)
2252 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2253 
2254 	upriv->mmc = mmc;
2255 #endif
2256 	if (mmc->has_init)
2257 		return 0;
2258 
2259 	start = get_timer(0);
2260 
2261 	if (!mmc->init_in_progress)
2262 		err = mmc_start_init(mmc);
2263 
2264 	if (!err)
2265 		err = mmc_complete_init(mmc);
2266 	if (err)
2267 		printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2268 
2269 	return err;
2270 }
2271 
2272 int mmc_set_dsr(struct mmc *mmc, u16 val)
2273 {
2274 	mmc->dsr = val;
2275 	return 0;
2276 }
2277 
2278 /* CPU-specific MMC initializations */
2279 __weak int cpu_mmc_init(bd_t *bis)
2280 {
2281 	return -1;
2282 }
2283 
2284 /* board-specific MMC initializations. */
2285 __weak int board_mmc_init(bd_t *bis)
2286 {
2287 	return -1;
2288 }
2289 
2290 void mmc_set_preinit(struct mmc *mmc, int preinit)
2291 {
2292 	mmc->preinit = preinit;
2293 }
2294 
2295 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2296 static int mmc_probe(bd_t *bis)
2297 {
2298 	return 0;
2299 }
2300 #elif CONFIG_IS_ENABLED(DM_MMC)
2301 static int mmc_probe(bd_t *bis)
2302 {
2303 	int ret, i;
2304 	struct uclass *uc;
2305 	struct udevice *dev;
2306 
2307 	ret = uclass_get(UCLASS_MMC, &uc);
2308 	if (ret)
2309 		return ret;
2310 
2311 	/*
2312 	 * Try to add them in sequence order. Really with driver model we
2313 	 * should allow holes, but the current MMC list does not allow that.
2314 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2315 	 */
2316 	for (i = 0; ; i++) {
2317 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2318 		if (ret == -ENODEV)
2319 			break;
2320 	}
2321 	uclass_foreach_dev(dev, uc) {
2322 		ret = device_probe(dev);
2323 		if (ret)
2324 			printf("%s - probe failed: %d\n", dev->name, ret);
2325 	}
2326 
2327 	return 0;
2328 }
2329 #else
2330 static int mmc_probe(bd_t *bis)
2331 {
2332 	if (board_mmc_init(bis) < 0)
2333 		cpu_mmc_init(bis);
2334 
2335 	return 0;
2336 }
2337 #endif
2338 
2339 int mmc_initialize(bd_t *bis)
2340 {
2341 	static int initialized = 0;
2342 	int ret;
2343 	if (initialized)	/* Avoid initializing mmc multiple times */
2344 		return 0;
2345 	initialized = 1;
2346 
2347 #if !CONFIG_IS_ENABLED(BLK)
2348 #if !CONFIG_IS_ENABLED(MMC_TINY)
2349 	mmc_list_init();
2350 #endif
2351 #endif
2352 	ret = mmc_probe(bis);
2353 	if (ret)
2354 		return ret;
2355 
2356 #ifndef CONFIG_SPL_BUILD
2357 	print_mmc_devices(',');
2358 #endif
2359 
2360 	mmc_do_preinit();
2361 	return 0;
2362 }
2363 
2364 #ifdef CONFIG_CMD_BKOPS_ENABLE
2365 int mmc_set_bkops_enable(struct mmc *mmc)
2366 {
2367 	int err;
2368 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2369 
2370 	err = mmc_send_ext_csd(mmc, ext_csd);
2371 	if (err) {
2372 		puts("Could not get ext_csd register values\n");
2373 		return err;
2374 	}
2375 
2376 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2377 		puts("Background operations not supported on device\n");
2378 		return -EMEDIUMTYPE;
2379 	}
2380 
2381 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2382 		puts("Background operations already enabled\n");
2383 		return 0;
2384 	}
2385 
2386 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2387 	if (err) {
2388 		puts("Failed to enable manual background operations\n");
2389 		return err;
2390 	}
2391 
2392 	puts("Enabled manual background operations\n");
2393 
2394 	return 0;
2395 }
2396 #endif
2397