xref: /rk3399_rockchip-uboot/drivers/mmc/mmc.c (revision 965eda410b8d28439dc1ba4f76061880d72978fd)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static const unsigned int sd_au_size[] = {
26 	0,		SZ_16K / 512,		SZ_32K / 512,
27 	SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
28 	SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
29 	SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
30 	SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,	SZ_64M / 512,
31 };
32 
33 #if CONFIG_IS_ENABLED(MMC_TINY)
34 static struct mmc mmc_static;
35 struct mmc *find_mmc_device(int dev_num)
36 {
37 	return &mmc_static;
38 }
39 
40 void mmc_do_preinit(void)
41 {
42 	struct mmc *m = &mmc_static;
43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
44 	mmc_set_preinit(m, 1);
45 #endif
46 	if (m->preinit)
47 		mmc_start_init(m);
48 }
49 
50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
51 {
52 	return &mmc->block_dev;
53 }
54 #endif
55 
56 #if !CONFIG_IS_ENABLED(DM_MMC)
57 __weak int board_mmc_getwp(struct mmc *mmc)
58 {
59 	return -1;
60 }
61 
62 int mmc_getwp(struct mmc *mmc)
63 {
64 	int wp;
65 
66 	wp = board_mmc_getwp(mmc);
67 
68 	if (wp < 0) {
69 		if (mmc->cfg->ops->getwp)
70 			wp = mmc->cfg->ops->getwp(mmc);
71 		else
72 			wp = 0;
73 	}
74 
75 	return wp;
76 }
77 
78 __weak int board_mmc_getcd(struct mmc *mmc)
79 {
80 	return -1;
81 }
82 #endif
83 
84 #ifdef CONFIG_MMC_TRACE
85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
86 {
87 	printf("CMD_SEND:%d\n", cmd->cmdidx);
88 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
89 }
90 
91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
92 {
93 	int i;
94 	u8 *ptr;
95 
96 	if (ret) {
97 		printf("\t\tRET\t\t\t %d\n", ret);
98 	} else {
99 		switch (cmd->resp_type) {
100 		case MMC_RSP_NONE:
101 			printf("\t\tMMC_RSP_NONE\n");
102 			break;
103 		case MMC_RSP_R1:
104 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
105 				cmd->response[0]);
106 			break;
107 		case MMC_RSP_R1b:
108 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
109 				cmd->response[0]);
110 			break;
111 		case MMC_RSP_R2:
112 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
113 				cmd->response[0]);
114 			printf("\t\t          \t\t 0x%08X \n",
115 				cmd->response[1]);
116 			printf("\t\t          \t\t 0x%08X \n",
117 				cmd->response[2]);
118 			printf("\t\t          \t\t 0x%08X \n",
119 				cmd->response[3]);
120 			printf("\n");
121 			printf("\t\t\t\t\tDUMPING DATA\n");
122 			for (i = 0; i < 4; i++) {
123 				int j;
124 				printf("\t\t\t\t\t%03d - ", i*4);
125 				ptr = (u8 *)&cmd->response[i];
126 				ptr += 3;
127 				for (j = 0; j < 4; j++)
128 					printf("%02X ", *ptr--);
129 				printf("\n");
130 			}
131 			break;
132 		case MMC_RSP_R3:
133 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
134 				cmd->response[0]);
135 			break;
136 		default:
137 			printf("\t\tERROR MMC rsp not supported\n");
138 			break;
139 		}
140 	}
141 }
142 
143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
144 {
145 	int status;
146 
147 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
148 	printf("CURR STATE:%d\n", status);
149 }
150 #endif
151 
152 #if !CONFIG_IS_ENABLED(DM_MMC)
153 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
154 {
155 	int ret;
156 
157 	mmmc_trace_before_send(mmc, cmd);
158 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
159 	mmmc_trace_after_send(mmc, cmd, ret);
160 
161 	return ret;
162 }
163 #endif
164 
165 int mmc_send_status(struct mmc *mmc, int timeout)
166 {
167 	struct mmc_cmd cmd;
168 	int err, retries = 5;
169 
170 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
171 	cmd.resp_type = MMC_RSP_R1;
172 	if (!mmc_host_is_spi(mmc))
173 		cmd.cmdarg = mmc->rca << 16;
174 
175 	while (1) {
176 		err = mmc_send_cmd(mmc, &cmd, NULL);
177 		if (!err) {
178 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
179 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
180 			     MMC_STATE_PRG)
181 				break;
182 			else if (cmd.response[0] & MMC_STATUS_MASK) {
183 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
184 				printf("Status Error: 0x%08X\n",
185 					cmd.response[0]);
186 #endif
187 				return -ECOMM;
188 			}
189 		} else if (--retries < 0)
190 			return err;
191 
192 		if (timeout-- <= 0)
193 			break;
194 
195 		udelay(1000);
196 	}
197 
198 	mmc_trace_state(mmc, &cmd);
199 	if (timeout <= 0) {
200 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
201 		printf("Timeout waiting card ready\n");
202 #endif
203 		return -ETIMEDOUT;
204 	}
205 
206 	return 0;
207 }
208 
209 int mmc_set_blocklen(struct mmc *mmc, int len)
210 {
211 	struct mmc_cmd cmd;
212 
213 	if (mmc_card_ddr(mmc))
214 		return 0;
215 
216 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
217 	cmd.resp_type = MMC_RSP_R1;
218 	cmd.cmdarg = len;
219 
220 	return mmc_send_cmd(mmc, &cmd, NULL);
221 }
222 
223 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
224 			   lbaint_t blkcnt)
225 {
226 	struct mmc_cmd cmd;
227 	struct mmc_data data;
228 
229 	if (blkcnt > 1)
230 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
231 	else
232 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
233 
234 	if (mmc->high_capacity)
235 		cmd.cmdarg = start;
236 	else
237 		cmd.cmdarg = start * mmc->read_bl_len;
238 
239 	cmd.resp_type = MMC_RSP_R1;
240 
241 	data.dest = dst;
242 	data.blocks = blkcnt;
243 	data.blocksize = mmc->read_bl_len;
244 	data.flags = MMC_DATA_READ;
245 
246 	if (mmc_send_cmd(mmc, &cmd, &data))
247 		return 0;
248 
249 	if (blkcnt > 1) {
250 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
251 		cmd.cmdarg = 0;
252 		cmd.resp_type = MMC_RSP_R1b;
253 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
254 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
255 			printf("mmc fail to send stop cmd\n");
256 #endif
257 			return 0;
258 		}
259 	}
260 
261 	return blkcnt;
262 }
263 
264 #if CONFIG_IS_ENABLED(BLK)
265 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
266 #else
267 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
268 		void *dst)
269 #endif
270 {
271 #if CONFIG_IS_ENABLED(BLK)
272 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
273 #endif
274 	int dev_num = block_dev->devnum;
275 	int err;
276 	lbaint_t cur, blocks_todo = blkcnt;
277 
278 	if (blkcnt == 0)
279 		return 0;
280 
281 	struct mmc *mmc = find_mmc_device(dev_num);
282 	if (!mmc)
283 		return 0;
284 
285 	if (CONFIG_IS_ENABLED(MMC_TINY))
286 		err = mmc_switch_part(mmc, block_dev->hwpart);
287 	else
288 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
289 
290 	if (err < 0)
291 		return 0;
292 
293 	if ((start + blkcnt) > block_dev->lba) {
294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
295 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
296 			start + blkcnt, block_dev->lba);
297 #endif
298 		return 0;
299 	}
300 
301 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
302 		debug("%s: Failed to set blocklen\n", __func__);
303 		return 0;
304 	}
305 
306 	do {
307 		cur = (blocks_todo > mmc->cfg->b_max) ?
308 			mmc->cfg->b_max : blocks_todo;
309 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
310 			debug("%s: Failed to read blocks\n", __func__);
311 			return 0;
312 		}
313 		blocks_todo -= cur;
314 		start += cur;
315 		dst += cur * mmc->read_bl_len;
316 	} while (blocks_todo > 0);
317 
318 	return blkcnt;
319 }
320 
321 void mmc_set_clock(struct mmc *mmc, uint clock)
322 {
323 	if (clock > mmc->cfg->f_max)
324 		clock = mmc->cfg->f_max;
325 
326 	if (clock < mmc->cfg->f_min)
327 		clock = mmc->cfg->f_min;
328 
329 	mmc->clock = clock;
330 
331 	mmc_set_ios(mmc);
332 }
333 
334 static void mmc_set_bus_width(struct mmc *mmc, uint width)
335 {
336 	mmc->bus_width = width;
337 
338 	mmc_set_ios(mmc);
339 }
340 
341 static void mmc_set_timing(struct mmc *mmc, uint timing)
342 {
343 	mmc->timing = timing;
344 	mmc_set_ios(mmc);
345 }
346 
347 static int mmc_go_idle(struct mmc *mmc)
348 {
349 	struct mmc_cmd cmd;
350 	int err;
351 
352 	udelay(1000);
353 
354 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
355 	cmd.cmdarg = 0;
356 	cmd.resp_type = MMC_RSP_NONE;
357 
358 	err = mmc_send_cmd(mmc, &cmd, NULL);
359 
360 	if (err)
361 		return err;
362 
363 	udelay(2000);
364 
365 	return 0;
366 }
367 
368 static int sd_send_op_cond(struct mmc *mmc)
369 {
370 	int timeout = 1000;
371 	int err;
372 	struct mmc_cmd cmd;
373 
374 	while (1) {
375 		cmd.cmdidx = MMC_CMD_APP_CMD;
376 		cmd.resp_type = MMC_RSP_R1;
377 		cmd.cmdarg = 0;
378 
379 		err = mmc_send_cmd(mmc, &cmd, NULL);
380 
381 		if (err)
382 			return err;
383 
384 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
385 		cmd.resp_type = MMC_RSP_R3;
386 
387 		/*
388 		 * Most cards do not answer if some reserved bits
389 		 * in the ocr are set. However, Some controller
390 		 * can set bit 7 (reserved for low voltages), but
391 		 * how to manage low voltages SD card is not yet
392 		 * specified.
393 		 */
394 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
395 			(mmc->cfg->voltages & 0xff8000);
396 
397 		if (mmc->version == SD_VERSION_2)
398 			cmd.cmdarg |= OCR_HCS;
399 
400 		err = mmc_send_cmd(mmc, &cmd, NULL);
401 
402 		if (err)
403 			return err;
404 
405 		if (cmd.response[0] & OCR_BUSY)
406 			break;
407 
408 		if (timeout-- <= 0)
409 			return -EOPNOTSUPP;
410 
411 		udelay(1000);
412 	}
413 
414 	if (mmc->version != SD_VERSION_2)
415 		mmc->version = SD_VERSION_1_0;
416 
417 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
418 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
419 		cmd.resp_type = MMC_RSP_R3;
420 		cmd.cmdarg = 0;
421 
422 		err = mmc_send_cmd(mmc, &cmd, NULL);
423 
424 		if (err)
425 			return err;
426 	}
427 
428 	mmc->ocr = cmd.response[0];
429 
430 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
431 	mmc->rca = 0;
432 
433 	return 0;
434 }
435 
436 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
437 {
438 	struct mmc_cmd cmd;
439 	int err;
440 
441 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
442 	cmd.resp_type = MMC_RSP_R3;
443 	cmd.cmdarg = 0;
444 	if (use_arg && !mmc_host_is_spi(mmc))
445 		cmd.cmdarg = OCR_HCS |
446 			(mmc->cfg->voltages &
447 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
448 			(mmc->ocr & OCR_ACCESS_MODE);
449 
450 	err = mmc_send_cmd(mmc, &cmd, NULL);
451 	if (err)
452 		return err;
453 	mmc->ocr = cmd.response[0];
454 	return 0;
455 }
456 
457 static int mmc_send_op_cond(struct mmc *mmc)
458 {
459 	int err, i;
460 
461 	/* Some cards seem to need this */
462 	mmc_go_idle(mmc);
463 
464  	/* Asking to the card its capabilities */
465 	for (i = 0; i < 2; i++) {
466 		err = mmc_send_op_cond_iter(mmc, i != 0);
467 		if (err)
468 			return err;
469 
470 		/* exit if not busy (flag seems to be inverted) */
471 		if (mmc->ocr & OCR_BUSY)
472 			break;
473 	}
474 	mmc->op_cond_pending = 1;
475 	return 0;
476 }
477 
478 static int mmc_complete_op_cond(struct mmc *mmc)
479 {
480 	struct mmc_cmd cmd;
481 	int timeout = 1000;
482 	uint start;
483 	int err;
484 
485 	mmc->op_cond_pending = 0;
486 	if (!(mmc->ocr & OCR_BUSY)) {
487 		/* Some cards seem to need this */
488 		mmc_go_idle(mmc);
489 
490 		start = get_timer(0);
491 		while (1) {
492 			err = mmc_send_op_cond_iter(mmc, 1);
493 			if (err)
494 				return err;
495 			if (mmc->ocr & OCR_BUSY)
496 				break;
497 			if (get_timer(start) > timeout)
498 				return -EOPNOTSUPP;
499 			udelay(100);
500 		}
501 	}
502 
503 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
504 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
505 		cmd.resp_type = MMC_RSP_R3;
506 		cmd.cmdarg = 0;
507 
508 		err = mmc_send_cmd(mmc, &cmd, NULL);
509 
510 		if (err)
511 			return err;
512 
513 		mmc->ocr = cmd.response[0];
514 	}
515 
516 	mmc->version = MMC_VERSION_UNKNOWN;
517 
518 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
519 	mmc->rca = 1;
520 
521 	return 0;
522 }
523 
524 
525 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
526 {
527 	struct mmc_cmd cmd;
528 	struct mmc_data data;
529 	int err;
530 
531 	/* Get the Card Status Register */
532 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
533 	cmd.resp_type = MMC_RSP_R1;
534 	cmd.cmdarg = 0;
535 
536 	data.dest = (char *)ext_csd;
537 	data.blocks = 1;
538 	data.blocksize = MMC_MAX_BLOCK_LEN;
539 	data.flags = MMC_DATA_READ;
540 
541 	err = mmc_send_cmd(mmc, &cmd, &data);
542 
543 	return err;
544 }
545 
546 static int mmc_poll_for_busy(struct mmc *mmc)
547 {
548 	struct mmc_cmd cmd;
549 	u8 busy = true;
550 	uint start;
551 	int ret;
552 	int timeout = 1000;
553 
554 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
555 	cmd.resp_type = MMC_RSP_R1;
556 	cmd.cmdarg = mmc->rca << 16;
557 
558 	start = get_timer(0);
559 
560 	do {
561 		if (mmc_can_card_busy(mmc)) {
562 			busy = mmc_card_busy(mmc);
563 		} else {
564 			ret = mmc_send_cmd(mmc, &cmd, NULL);
565 
566 			if (ret)
567 				return ret;
568 
569 			if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
570 				return -EBADMSG;
571 			busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) ==
572 				MMC_STATE_PRG;
573 		}
574 
575 		if (get_timer(start) > timeout && busy)
576 			return -ETIMEDOUT;
577 	} while (busy);
578 
579 	return 0;
580 }
581 
582 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
583 			u8 send_status)
584 {
585 	struct mmc_cmd cmd;
586 	int retries = 3;
587 	int ret;
588 
589 	cmd.cmdidx = MMC_CMD_SWITCH;
590 	cmd.resp_type = MMC_RSP_R1b;
591 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
592 				 (index << 16) |
593 				 (value << 8);
594 
595 	do {
596 		ret = mmc_send_cmd(mmc, &cmd, NULL);
597 
598 		if (!ret && send_status)
599 			return mmc_poll_for_busy(mmc);
600 	} while (--retries > 0 && ret);
601 
602 	return ret;
603 }
604 
605 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
606 {
607 	return __mmc_switch(mmc, set, index, value, true);
608 }
609 
610 static int mmc_select_bus_width(struct mmc *mmc)
611 {
612 	u32 ext_csd_bits[] = {
613 		EXT_CSD_BUS_WIDTH_8,
614 		EXT_CSD_BUS_WIDTH_4,
615 	};
616 	u32 bus_widths[] = {
617 		MMC_BUS_WIDTH_8BIT,
618 		MMC_BUS_WIDTH_4BIT,
619 	};
620 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
621 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
622 	u32 idx, bus_width = 0;
623 	int err = 0;
624 
625 	if (mmc->version < MMC_VERSION_4 ||
626 	    !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT)))
627 		return 0;
628 
629 	err = mmc_send_ext_csd(mmc, ext_csd);
630 
631 	if (err)
632 		return err;
633 
634 	idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1;
635 
636 	/*
637 	 * Unlike SD, MMC cards dont have a configuration register to notify
638 	 * supported bus width. So bus test command should be run to identify
639 	 * the supported bus width or compare the ext csd values of current
640 	 * bus width and ext csd values of 1 bit mode read earlier.
641 	 */
642 	for (; idx < ARRAY_SIZE(bus_widths); idx++) {
643 		/*
644 		 * Host is capable of 8bit transfer, then switch
645 		 * the device to work in 8bit transfer mode. If the
646 		 * mmc switch command returns error then switch to
647 		 * 4bit transfer mode. On success set the corresponding
648 		 * bus width on the host.
649 		 */
650 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
651 				 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]);
652 		if (err)
653 			continue;
654 
655 		bus_width = bus_widths[idx];
656 		mmc_set_bus_width(mmc, bus_width);
657 
658 		err = mmc_send_ext_csd(mmc, test_csd);
659 
660 		if (err)
661 			continue;
662 
663 		/* Only compare read only fields */
664 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] ==
665 			test_csd[EXT_CSD_PARTITIONING_SUPPORT]) &&
666 		    (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
667 			test_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
668 		    (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) &&
669 			(ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
670 			test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
671 		    !memcmp(&ext_csd[EXT_CSD_SEC_CNT],
672 			&test_csd[EXT_CSD_SEC_CNT], 4)) {
673 			err = bus_width;
674 			break;
675 		} else {
676 			err = -EBADMSG;
677 		}
678 	}
679 
680 	return err;
681 }
682 
683 static const u8 tuning_blk_pattern_4bit[] = {
684 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
685 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
686 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
687 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
688 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
689 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
690 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
691 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
692 };
693 
694 static const u8 tuning_blk_pattern_8bit[] = {
695 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
696 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
697 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
698 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
699 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
700 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
701 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
702 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
703 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
704 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
705 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
706 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
707 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
708 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
709 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
710 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
711 };
712 
713 int mmc_send_tuning(struct mmc *mmc, u32 opcode)
714 {
715 	struct mmc_cmd cmd;
716 	struct mmc_data data;
717 	const u8 *tuning_block_pattern;
718 	int size, err = 0;
719 	u8 *data_buf;
720 
721 	if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
722 		tuning_block_pattern = tuning_blk_pattern_8bit;
723 		size = sizeof(tuning_blk_pattern_8bit);
724 	} else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) {
725 		tuning_block_pattern = tuning_blk_pattern_4bit;
726 		size = sizeof(tuning_blk_pattern_4bit);
727 	} else {
728 		return -EINVAL;
729 	}
730 
731 	data_buf = calloc(1, size);
732 	if (!data_buf)
733 		return -ENOMEM;
734 
735 	cmd.cmdidx = opcode;
736 	cmd.resp_type = MMC_RSP_R1;
737 	cmd.cmdarg = 0;
738 
739 	data.dest = (char *)data_buf;
740 	data.blocksize = size;
741 	data.blocks = 1;
742 	data.flags = MMC_DATA_READ;
743 
744 	err = mmc_send_cmd(mmc, &cmd, &data);
745 	if (err)
746 		goto out;
747 
748 	if (memcmp(data_buf, tuning_block_pattern, size))
749 		err = -EIO;
750 out:
751 	free(data_buf);
752 	return err;
753 }
754 
755 static int mmc_execute_tuning(struct mmc *mmc)
756 {
757 #ifdef CONFIG_DM_MMC
758 	struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev);
759 #endif
760 	u32 opcode;
761 
762 	if (IS_SD(mmc))
763 		opcode = MMC_SEND_TUNING_BLOCK;
764 	else
765 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
766 
767 #ifndef CONFIG_DM_MMC
768 	if (mmc->cfg->ops->execute_tuning) {
769 		return mmc->cfg->ops->execute_tuning(mmc, opcode);
770 #else
771 	if (ops->execute_tuning) {
772 		return ops->execute_tuning(mmc->dev, opcode);
773 #endif
774 	} else {
775 		debug("Tuning feature required for HS200 mode.\n");
776 		return -EIO;
777 	}
778 }
779 
780 static int mmc_hs200_tuning(struct mmc *mmc)
781 {
782 	return mmc_execute_tuning(mmc);
783 }
784 
785 static int mmc_select_hs(struct mmc *mmc)
786 {
787 	int ret;
788 
789 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
790 			 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS);
791 
792 	if (!ret)
793 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
794 
795 	return ret;
796 }
797 
798 static int mmc_select_hs_ddr(struct mmc *mmc)
799 {
800 	u32 ext_csd_bits;
801 	int err = 0;
802 
803 	if (mmc->bus_width == MMC_BUS_WIDTH_1BIT)
804 		return 0;
805 
806 	ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ?
807 			EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
808 
809 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
810 			 EXT_CSD_BUS_WIDTH, ext_csd_bits);
811 	if (err)
812 		return err;
813 
814 	mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52);
815 
816 	return 0;
817 }
818 
819 #ifndef CONFIG_SPL_BUILD
820 static int mmc_select_hs200(struct mmc *mmc)
821 {
822 	int ret;
823 	struct mmc_cmd cmd;
824 
825 	/*
826 	 * Set the bus width(4 or 8) with host's support and
827 	 * switch to HS200 mode if bus width is set successfully.
828 	 */
829 	ret = mmc_select_bus_width(mmc);
830 
831 	if (ret > 0) {
832 		ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
833 				   EXT_CSD_HS_TIMING,
834 				   EXT_CSD_TIMING_HS200, false);
835 
836 		if (ret)
837 			return ret;
838 
839 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS200);
840 
841 		cmd.cmdidx = MMC_CMD_SEND_STATUS;
842 		cmd.resp_type = MMC_RSP_R1;
843 		cmd.cmdarg = mmc->rca << 16;
844 
845 		ret = mmc_send_cmd(mmc, &cmd, NULL);
846 
847 		if (ret)
848 			return ret;
849 
850 		if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
851 			return -EBADMSG;
852 	}
853 
854 	return ret;
855 }
856 #endif
857 
858 static int mmc_select_hs400(struct mmc *mmc)
859 {
860 	int ret;
861 
862 	/* Switch card to HS mode */
863 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
864 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false);
865 	if (ret)
866 		return ret;
867 
868 	/* Set host controller to HS timing */
869 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
870 
871 	/* Reduce frequency to HS frequency */
872 	mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
873 
874 	ret = mmc_send_status(mmc, 1000);
875 	if (ret)
876 		return ret;
877 
878 	/* Switch card to DDR */
879 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
880 			 EXT_CSD_BUS_WIDTH,
881 			 EXT_CSD_DDR_BUS_WIDTH_8);
882 	if (ret)
883 		return ret;
884 
885 	/* Switch card to HS400 */
886 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
887 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false);
888 	if (ret)
889 		return ret;
890 
891 	/* Set host controller to HS400 timing and frequency */
892 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS400);
893 
894 	return ret;
895 }
896 
897 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd)
898 {
899 	u8 card_type;
900 	u32 host_caps, avail_type = 0;
901 
902 	card_type = ext_csd[EXT_CSD_CARD_TYPE];
903 	host_caps = mmc->cfg->host_caps;
904 
905 	if ((host_caps & MMC_MODE_HS) &&
906 	    (card_type & EXT_CSD_CARD_TYPE_26))
907 		avail_type |= EXT_CSD_CARD_TYPE_26;
908 
909 	if ((host_caps & MMC_MODE_HS) &&
910 	    (card_type & EXT_CSD_CARD_TYPE_52))
911 		avail_type |= EXT_CSD_CARD_TYPE_52;
912 
913 	/*
914 	 * For the moment, u-boot doesn't support signal voltage
915 	 * switch, therefor we assume that host support ddr52
916 	 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and
917 	 * hs400 are the same).
918 	 */
919 	if ((host_caps & MMC_MODE_DDR_52MHz) &&
920 	    (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
921 		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
922 
923 	if ((host_caps & MMC_MODE_HS200) &&
924 	    (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V))
925 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
926 
927 	/*
928 	 * If host can support HS400, it means that host can also
929 	 * support HS200.
930 	 */
931 	if ((host_caps & MMC_MODE_HS400) &&
932 	    (host_caps & MMC_MODE_8BIT) &&
933 	    (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
934 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
935 				EXT_CSD_CARD_TYPE_HS400_1_8V;
936 
937 	if ((host_caps & MMC_MODE_HS400ES) &&
938 	    (host_caps & MMC_MODE_8BIT) &&
939 	    ext_csd[EXT_CSD_STROBE_SUPPORT] &&
940 	    (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
941 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
942 				EXT_CSD_CARD_TYPE_HS400_1_8V |
943 				EXT_CSD_CARD_TYPE_HS400ES;
944 
945 	return avail_type;
946 }
947 
948 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type)
949 {
950 	int clock = 0;
951 
952 	if (mmc_card_hs(mmc))
953 		clock = (avail_type & EXT_CSD_CARD_TYPE_52) ?
954 			MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR;
955 	else if (mmc_card_hs200(mmc) ||
956 		 mmc_card_hs400(mmc) ||
957 		 mmc_card_hs400es(mmc))
958 		clock = MMC_HS200_MAX_DTR;
959 
960 	mmc_set_clock(mmc, clock);
961 }
962 
963 static int mmc_change_freq(struct mmc *mmc)
964 {
965 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
966 	u32 avail_type;
967 	int err;
968 
969 	mmc->card_caps = 0;
970 
971 	if (mmc_host_is_spi(mmc))
972 		return 0;
973 
974 	/* Only version 4 supports high-speed */
975 	if (mmc->version < MMC_VERSION_4)
976 		return 0;
977 
978 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
979 
980 	err = mmc_send_ext_csd(mmc, ext_csd);
981 
982 	if (err)
983 		return err;
984 
985 	avail_type = mmc_select_card_type(mmc, ext_csd);
986 
987 #ifndef CONFIG_SPL_BUILD
988 	if (avail_type & EXT_CSD_CARD_TYPE_HS200)
989 		err = mmc_select_hs200(mmc);
990 	else
991 #endif
992 	if (avail_type & EXT_CSD_CARD_TYPE_HS)
993 		err = mmc_select_hs(mmc);
994 	else
995 		err = -EINVAL;
996 
997 	if (err)
998 		return err;
999 
1000 	mmc_set_bus_speed(mmc, avail_type);
1001 
1002 	if (mmc_card_hs200(mmc)) {
1003 		err = mmc_hs200_tuning(mmc);
1004 		if (avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1005 		    mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
1006 			err = mmc_select_hs400(mmc);
1007 			mmc_set_bus_speed(mmc, avail_type);
1008 		}
1009 	} else if (!mmc_card_hs400es(mmc)) {
1010 		err = mmc_select_bus_width(mmc) > 0 ? 0 : err;
1011 		if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52)
1012 			err = mmc_select_hs_ddr(mmc);
1013 	}
1014 
1015 	return err;
1016 }
1017 
1018 static int mmc_set_capacity(struct mmc *mmc, int part_num)
1019 {
1020 	switch (part_num) {
1021 	case 0:
1022 		mmc->capacity = mmc->capacity_user;
1023 		break;
1024 	case 1:
1025 	case 2:
1026 		mmc->capacity = mmc->capacity_boot;
1027 		break;
1028 	case 3:
1029 		mmc->capacity = mmc->capacity_rpmb;
1030 		break;
1031 	case 4:
1032 	case 5:
1033 	case 6:
1034 	case 7:
1035 		mmc->capacity = mmc->capacity_gp[part_num - 4];
1036 		break;
1037 	default:
1038 		return -1;
1039 	}
1040 
1041 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1042 
1043 	return 0;
1044 }
1045 
1046 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1047 {
1048 	int ret;
1049 
1050 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
1051 			 (mmc->part_config & ~PART_ACCESS_MASK)
1052 			 | (part_num & PART_ACCESS_MASK));
1053 
1054 	/*
1055 	 * Set the capacity if the switch succeeded or was intended
1056 	 * to return to representing the raw device.
1057 	 */
1058 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1059 		ret = mmc_set_capacity(mmc, part_num);
1060 		mmc_get_blk_desc(mmc)->hwpart = part_num;
1061 	}
1062 
1063 	return ret;
1064 }
1065 
1066 int mmc_hwpart_config(struct mmc *mmc,
1067 		      const struct mmc_hwpart_conf *conf,
1068 		      enum mmc_hwpart_conf_mode mode)
1069 {
1070 	u8 part_attrs = 0;
1071 	u32 enh_size_mult;
1072 	u32 enh_start_addr;
1073 	u32 gp_size_mult[4];
1074 	u32 max_enh_size_mult;
1075 	u32 tot_enh_size_mult = 0;
1076 	u8 wr_rel_set;
1077 	int i, pidx, err;
1078 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1079 
1080 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1081 		return -EINVAL;
1082 
1083 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1084 		printf("eMMC >= 4.4 required for enhanced user data area\n");
1085 		return -EMEDIUMTYPE;
1086 	}
1087 
1088 	if (!(mmc->part_support & PART_SUPPORT)) {
1089 		printf("Card does not support partitioning\n");
1090 		return -EMEDIUMTYPE;
1091 	}
1092 
1093 	if (!mmc->hc_wp_grp_size) {
1094 		printf("Card does not define HC WP group size\n");
1095 		return -EMEDIUMTYPE;
1096 	}
1097 
1098 	/* check partition alignment and total enhanced size */
1099 	if (conf->user.enh_size) {
1100 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1101 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
1102 			printf("User data enhanced area not HC WP group "
1103 			       "size aligned\n");
1104 			return -EINVAL;
1105 		}
1106 		part_attrs |= EXT_CSD_ENH_USR;
1107 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1108 		if (mmc->high_capacity) {
1109 			enh_start_addr = conf->user.enh_start;
1110 		} else {
1111 			enh_start_addr = (conf->user.enh_start << 9);
1112 		}
1113 	} else {
1114 		enh_size_mult = 0;
1115 		enh_start_addr = 0;
1116 	}
1117 	tot_enh_size_mult += enh_size_mult;
1118 
1119 	for (pidx = 0; pidx < 4; pidx++) {
1120 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1121 			printf("GP%i partition not HC WP group size "
1122 			       "aligned\n", pidx+1);
1123 			return -EINVAL;
1124 		}
1125 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1126 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1127 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1128 			tot_enh_size_mult += gp_size_mult[pidx];
1129 		}
1130 	}
1131 
1132 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1133 		printf("Card does not support enhanced attribute\n");
1134 		return -EMEDIUMTYPE;
1135 	}
1136 
1137 	err = mmc_send_ext_csd(mmc, ext_csd);
1138 	if (err)
1139 		return err;
1140 
1141 	max_enh_size_mult =
1142 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1143 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1144 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1145 	if (tot_enh_size_mult > max_enh_size_mult) {
1146 		printf("Total enhanced size exceeds maximum (%u > %u)\n",
1147 		       tot_enh_size_mult, max_enh_size_mult);
1148 		return -EMEDIUMTYPE;
1149 	}
1150 
1151 	/* The default value of EXT_CSD_WR_REL_SET is device
1152 	 * dependent, the values can only be changed if the
1153 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1154 	 * changed only once and before partitioning is completed. */
1155 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1156 	if (conf->user.wr_rel_change) {
1157 		if (conf->user.wr_rel_set)
1158 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1159 		else
1160 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1161 	}
1162 	for (pidx = 0; pidx < 4; pidx++) {
1163 		if (conf->gp_part[pidx].wr_rel_change) {
1164 			if (conf->gp_part[pidx].wr_rel_set)
1165 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1166 			else
1167 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1168 		}
1169 	}
1170 
1171 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1172 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1173 		puts("Card does not support host controlled partition write "
1174 		     "reliability settings\n");
1175 		return -EMEDIUMTYPE;
1176 	}
1177 
1178 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1179 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1180 		printf("Card already partitioned\n");
1181 		return -EPERM;
1182 	}
1183 
1184 	if (mode == MMC_HWPART_CONF_CHECK)
1185 		return 0;
1186 
1187 	/* Partitioning requires high-capacity size definitions */
1188 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1189 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1190 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1191 
1192 		if (err)
1193 			return err;
1194 
1195 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1196 
1197 		/* update erase group size to be high-capacity */
1198 		mmc->erase_grp_size =
1199 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1200 
1201 	}
1202 
1203 	/* all OK, write the configuration */
1204 	for (i = 0; i < 4; i++) {
1205 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1206 				 EXT_CSD_ENH_START_ADDR+i,
1207 				 (enh_start_addr >> (i*8)) & 0xFF);
1208 		if (err)
1209 			return err;
1210 	}
1211 	for (i = 0; i < 3; i++) {
1212 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1213 				 EXT_CSD_ENH_SIZE_MULT+i,
1214 				 (enh_size_mult >> (i*8)) & 0xFF);
1215 		if (err)
1216 			return err;
1217 	}
1218 	for (pidx = 0; pidx < 4; pidx++) {
1219 		for (i = 0; i < 3; i++) {
1220 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1221 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1222 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1223 			if (err)
1224 				return err;
1225 		}
1226 	}
1227 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1228 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1229 	if (err)
1230 		return err;
1231 
1232 	if (mode == MMC_HWPART_CONF_SET)
1233 		return 0;
1234 
1235 	/* The WR_REL_SET is a write-once register but shall be
1236 	 * written before setting PART_SETTING_COMPLETED. As it is
1237 	 * write-once we can only write it when completing the
1238 	 * partitioning. */
1239 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1240 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1241 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1242 		if (err)
1243 			return err;
1244 	}
1245 
1246 	/* Setting PART_SETTING_COMPLETED confirms the partition
1247 	 * configuration but it only becomes effective after power
1248 	 * cycle, so we do not adjust the partition related settings
1249 	 * in the mmc struct. */
1250 
1251 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1252 			 EXT_CSD_PARTITION_SETTING,
1253 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1254 	if (err)
1255 		return err;
1256 
1257 	return 0;
1258 }
1259 
1260 #if !CONFIG_IS_ENABLED(DM_MMC)
1261 int mmc_getcd(struct mmc *mmc)
1262 {
1263 	int cd;
1264 
1265 	cd = board_mmc_getcd(mmc);
1266 
1267 	if (cd < 0) {
1268 		if (mmc->cfg->ops->getcd)
1269 			cd = mmc->cfg->ops->getcd(mmc);
1270 		else
1271 			cd = 1;
1272 	}
1273 
1274 	return cd;
1275 }
1276 #endif
1277 
1278 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1279 {
1280 	struct mmc_cmd cmd;
1281 	struct mmc_data data;
1282 
1283 	/* Switch the frequency */
1284 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1285 	cmd.resp_type = MMC_RSP_R1;
1286 	cmd.cmdarg = (mode << 31) | 0xffffff;
1287 	cmd.cmdarg &= ~(0xf << (group * 4));
1288 	cmd.cmdarg |= value << (group * 4);
1289 
1290 	data.dest = (char *)resp;
1291 	data.blocksize = 64;
1292 	data.blocks = 1;
1293 	data.flags = MMC_DATA_READ;
1294 
1295 	return mmc_send_cmd(mmc, &cmd, &data);
1296 }
1297 
1298 
1299 static int sd_change_freq(struct mmc *mmc)
1300 {
1301 	int err;
1302 	struct mmc_cmd cmd;
1303 	ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2);
1304 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1305 	struct mmc_data data;
1306 	int timeout;
1307 
1308 	mmc->card_caps = 0;
1309 
1310 	if (mmc_host_is_spi(mmc))
1311 		return 0;
1312 
1313 	/* Read the SCR to find out if this card supports higher speeds */
1314 	cmd.cmdidx = MMC_CMD_APP_CMD;
1315 	cmd.resp_type = MMC_RSP_R1;
1316 	cmd.cmdarg = mmc->rca << 16;
1317 
1318 	err = mmc_send_cmd(mmc, &cmd, NULL);
1319 
1320 	if (err)
1321 		return err;
1322 
1323 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1324 	cmd.resp_type = MMC_RSP_R1;
1325 	cmd.cmdarg = 0;
1326 
1327 	timeout = 3;
1328 
1329 retry_scr:
1330 	data.dest = (char *)scr;
1331 	data.blocksize = 8;
1332 	data.blocks = 1;
1333 	data.flags = MMC_DATA_READ;
1334 
1335 	err = mmc_send_cmd(mmc, &cmd, &data);
1336 
1337 	if (err) {
1338 		if (timeout--)
1339 			goto retry_scr;
1340 
1341 		return err;
1342 	}
1343 
1344 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1345 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1346 
1347 	switch ((mmc->scr[0] >> 24) & 0xf) {
1348 	case 0:
1349 		mmc->version = SD_VERSION_1_0;
1350 		break;
1351 	case 1:
1352 		mmc->version = SD_VERSION_1_10;
1353 		break;
1354 	case 2:
1355 		mmc->version = SD_VERSION_2;
1356 		if ((mmc->scr[0] >> 15) & 0x1)
1357 			mmc->version = SD_VERSION_3;
1358 		break;
1359 	default:
1360 		mmc->version = SD_VERSION_1_0;
1361 		break;
1362 	}
1363 
1364 	if (mmc->scr[0] & SD_DATA_4BIT)
1365 		mmc->card_caps |= MMC_MODE_4BIT;
1366 
1367 	/* Version 1.0 doesn't support switching */
1368 	if (mmc->version == SD_VERSION_1_0)
1369 		return 0;
1370 
1371 	timeout = 4;
1372 	while (timeout--) {
1373 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1374 				(u8 *)switch_status);
1375 
1376 		if (err)
1377 			return err;
1378 
1379 		/* The high-speed function is busy.  Try again */
1380 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1381 			break;
1382 	}
1383 
1384 	/* If high-speed isn't supported, we return */
1385 	if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
1386 		return 0;
1387 
1388 	/*
1389 	 * If the host doesn't support SD_HIGHSPEED, do not switch card to
1390 	 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
1391 	 * This can avoid furthur problem when the card runs in different
1392 	 * mode between the host.
1393 	 */
1394 	if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
1395 		(mmc->cfg->host_caps & MMC_MODE_HS)))
1396 		return 0;
1397 
1398 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1399 
1400 	if (err)
1401 		return err;
1402 
1403 	if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
1404 		mmc->card_caps |= MMC_MODE_HS;
1405 
1406 	return 0;
1407 }
1408 
1409 static int sd_read_ssr(struct mmc *mmc)
1410 {
1411 	int err, i;
1412 	struct mmc_cmd cmd;
1413 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1414 	struct mmc_data data;
1415 	int timeout = 3;
1416 	unsigned int au, eo, et, es;
1417 
1418 	cmd.cmdidx = MMC_CMD_APP_CMD;
1419 	cmd.resp_type = MMC_RSP_R1;
1420 	cmd.cmdarg = mmc->rca << 16;
1421 
1422 	err = mmc_send_cmd(mmc, &cmd, NULL);
1423 	if (err)
1424 		return err;
1425 
1426 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1427 	cmd.resp_type = MMC_RSP_R1;
1428 	cmd.cmdarg = 0;
1429 
1430 retry_ssr:
1431 	data.dest = (char *)ssr;
1432 	data.blocksize = 64;
1433 	data.blocks = 1;
1434 	data.flags = MMC_DATA_READ;
1435 
1436 	err = mmc_send_cmd(mmc, &cmd, &data);
1437 	if (err) {
1438 		if (timeout--)
1439 			goto retry_ssr;
1440 
1441 		return err;
1442 	}
1443 
1444 	for (i = 0; i < 16; i++)
1445 		ssr[i] = be32_to_cpu(ssr[i]);
1446 
1447 	au = (ssr[2] >> 12) & 0xF;
1448 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1449 		mmc->ssr.au = sd_au_size[au];
1450 		es = (ssr[3] >> 24) & 0xFF;
1451 		es |= (ssr[2] & 0xFF) << 8;
1452 		et = (ssr[3] >> 18) & 0x3F;
1453 		if (es && et) {
1454 			eo = (ssr[3] >> 16) & 0x3;
1455 			mmc->ssr.erase_timeout = (et * 1000) / es;
1456 			mmc->ssr.erase_offset = eo * 1000;
1457 		}
1458 	} else {
1459 		debug("Invalid Allocation Unit Size.\n");
1460 	}
1461 
1462 	return 0;
1463 }
1464 
1465 /* frequency bases */
1466 /* divided by 10 to be nice to platforms without floating point */
1467 static const int fbase[] = {
1468 	10000,
1469 	100000,
1470 	1000000,
1471 	10000000,
1472 };
1473 
1474 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1475  * to platforms without floating point.
1476  */
1477 static const u8 multipliers[] = {
1478 	0,	/* reserved */
1479 	10,
1480 	12,
1481 	13,
1482 	15,
1483 	20,
1484 	25,
1485 	30,
1486 	35,
1487 	40,
1488 	45,
1489 	50,
1490 	55,
1491 	60,
1492 	70,
1493 	80,
1494 };
1495 
1496 #if !CONFIG_IS_ENABLED(DM_MMC)
1497 static void mmc_set_ios(struct mmc *mmc)
1498 {
1499 	if (mmc->cfg->ops->set_ios)
1500 		mmc->cfg->ops->set_ios(mmc);
1501 }
1502 
1503 static bool mmc_card_busy(struct mmc *mmc)
1504 {
1505 	if (!mmc->cfg->ops->card_busy)
1506 		return -ENOSYS;
1507 
1508 	return mmc->cfg->ops->card_busy(mmc);
1509 }
1510 
1511 static bool mmc_can_card_busy(struct mmc *)
1512 {
1513 	return !!mmc->cfg->ops->card_busy;
1514 }
1515 #endif
1516 
1517 static int mmc_startup(struct mmc *mmc)
1518 {
1519 	int err, i;
1520 	uint mult, freq, tran_speed;
1521 	u64 cmult, csize, capacity;
1522 	struct mmc_cmd cmd;
1523 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1524 	bool has_parts = false;
1525 	bool part_completed;
1526 	struct blk_desc *bdesc;
1527 
1528 #ifdef CONFIG_MMC_SPI_CRC_ON
1529 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1530 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1531 		cmd.resp_type = MMC_RSP_R1;
1532 		cmd.cmdarg = 1;
1533 		err = mmc_send_cmd(mmc, &cmd, NULL);
1534 
1535 		if (err)
1536 			return err;
1537 	}
1538 #endif
1539 
1540 	/* Put the Card in Identify Mode */
1541 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1542 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1543 	cmd.resp_type = MMC_RSP_R2;
1544 	cmd.cmdarg = 0;
1545 
1546 	err = mmc_send_cmd(mmc, &cmd, NULL);
1547 
1548 	if (err)
1549 		return err;
1550 
1551 	memcpy(mmc->cid, cmd.response, 16);
1552 
1553 	/*
1554 	 * For MMC cards, set the Relative Address.
1555 	 * For SD cards, get the Relatvie Address.
1556 	 * This also puts the cards into Standby State
1557 	 */
1558 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1559 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1560 		cmd.cmdarg = mmc->rca << 16;
1561 		cmd.resp_type = MMC_RSP_R6;
1562 
1563 		err = mmc_send_cmd(mmc, &cmd, NULL);
1564 
1565 		if (err)
1566 			return err;
1567 
1568 		if (IS_SD(mmc))
1569 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1570 	}
1571 
1572 	/* Get the Card-Specific Data */
1573 	cmd.cmdidx = MMC_CMD_SEND_CSD;
1574 	cmd.resp_type = MMC_RSP_R2;
1575 	cmd.cmdarg = mmc->rca << 16;
1576 
1577 	err = mmc_send_cmd(mmc, &cmd, NULL);
1578 
1579 	if (err)
1580 		return err;
1581 
1582 	mmc->csd[0] = cmd.response[0];
1583 	mmc->csd[1] = cmd.response[1];
1584 	mmc->csd[2] = cmd.response[2];
1585 	mmc->csd[3] = cmd.response[3];
1586 
1587 	if (mmc->version == MMC_VERSION_UNKNOWN) {
1588 		int version = (cmd.response[0] >> 26) & 0xf;
1589 
1590 		switch (version) {
1591 		case 0:
1592 			mmc->version = MMC_VERSION_1_2;
1593 			break;
1594 		case 1:
1595 			mmc->version = MMC_VERSION_1_4;
1596 			break;
1597 		case 2:
1598 			mmc->version = MMC_VERSION_2_2;
1599 			break;
1600 		case 3:
1601 			mmc->version = MMC_VERSION_3;
1602 			break;
1603 		case 4:
1604 			mmc->version = MMC_VERSION_4;
1605 			break;
1606 		default:
1607 			mmc->version = MMC_VERSION_1_2;
1608 			break;
1609 		}
1610 	}
1611 
1612 	/* divide frequency by 10, since the mults are 10x bigger */
1613 	freq = fbase[(cmd.response[0] & 0x7)];
1614 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1615 
1616 	tran_speed = freq * mult;
1617 
1618 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1619 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1620 
1621 	if (IS_SD(mmc))
1622 		mmc->write_bl_len = mmc->read_bl_len;
1623 	else
1624 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1625 
1626 	if (mmc->high_capacity) {
1627 		csize = (mmc->csd[1] & 0x3f) << 16
1628 			| (mmc->csd[2] & 0xffff0000) >> 16;
1629 		cmult = 8;
1630 	} else {
1631 		csize = (mmc->csd[1] & 0x3ff) << 2
1632 			| (mmc->csd[2] & 0xc0000000) >> 30;
1633 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
1634 	}
1635 
1636 	mmc->capacity_user = (csize + 1) << (cmult + 2);
1637 	mmc->capacity_user *= mmc->read_bl_len;
1638 	mmc->capacity_boot = 0;
1639 	mmc->capacity_rpmb = 0;
1640 	for (i = 0; i < 4; i++)
1641 		mmc->capacity_gp[i] = 0;
1642 
1643 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1644 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1645 
1646 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1647 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1648 
1649 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1650 		cmd.cmdidx = MMC_CMD_SET_DSR;
1651 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1652 		cmd.resp_type = MMC_RSP_NONE;
1653 		if (mmc_send_cmd(mmc, &cmd, NULL))
1654 			printf("MMC: SET_DSR failed\n");
1655 	}
1656 
1657 	/* Select the card, and put it into Transfer Mode */
1658 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1659 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
1660 		cmd.resp_type = MMC_RSP_R1;
1661 		cmd.cmdarg = mmc->rca << 16;
1662 		err = mmc_send_cmd(mmc, &cmd, NULL);
1663 
1664 		if (err)
1665 			return err;
1666 	}
1667 
1668 	/*
1669 	 * For SD, its erase group is always one sector
1670 	 */
1671 	mmc->erase_grp_size = 1;
1672 	mmc->part_config = MMCPART_NOAVAILABLE;
1673 	if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) {
1674 		/* check  ext_csd version and capacity */
1675 		err = mmc_send_ext_csd(mmc, ext_csd);
1676 		if (err)
1677 			return err;
1678 		if (ext_csd[EXT_CSD_REV] >= 2) {
1679 			/*
1680 			 * According to the JEDEC Standard, the value of
1681 			 * ext_csd's capacity is valid if the value is more
1682 			 * than 2GB
1683 			 */
1684 			capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1685 					| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1686 					| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1687 					| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1688 			capacity *= MMC_MAX_BLOCK_LEN;
1689 			if ((capacity >> 20) > 2 * 1024)
1690 				mmc->capacity_user = capacity;
1691 		}
1692 
1693 		switch (ext_csd[EXT_CSD_REV]) {
1694 		case 1:
1695 			mmc->version = MMC_VERSION_4_1;
1696 			break;
1697 		case 2:
1698 			mmc->version = MMC_VERSION_4_2;
1699 			break;
1700 		case 3:
1701 			mmc->version = MMC_VERSION_4_3;
1702 			break;
1703 		case 5:
1704 			mmc->version = MMC_VERSION_4_41;
1705 			break;
1706 		case 6:
1707 			mmc->version = MMC_VERSION_4_5;
1708 			break;
1709 		case 7:
1710 			mmc->version = MMC_VERSION_5_0;
1711 			break;
1712 		case 8:
1713 			mmc->version = MMC_VERSION_5_1;
1714 			break;
1715 		}
1716 
1717 		/* The partition data may be non-zero but it is only
1718 		 * effective if PARTITION_SETTING_COMPLETED is set in
1719 		 * EXT_CSD, so ignore any data if this bit is not set,
1720 		 * except for enabling the high-capacity group size
1721 		 * definition (see below). */
1722 		part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1723 				    EXT_CSD_PARTITION_SETTING_COMPLETED);
1724 
1725 		/* store the partition info of emmc */
1726 		mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1727 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1728 		    ext_csd[EXT_CSD_BOOT_MULT])
1729 			mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1730 		if (part_completed &&
1731 		    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1732 			mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1733 
1734 		mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1735 
1736 		mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1737 
1738 		for (i = 0; i < 4; i++) {
1739 			int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1740 			uint mult = (ext_csd[idx + 2] << 16) +
1741 				(ext_csd[idx + 1] << 8) + ext_csd[idx];
1742 			if (mult)
1743 				has_parts = true;
1744 			if (!part_completed)
1745 				continue;
1746 			mmc->capacity_gp[i] = mult;
1747 			mmc->capacity_gp[i] *=
1748 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1749 			mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1750 			mmc->capacity_gp[i] <<= 19;
1751 		}
1752 
1753 		if (part_completed) {
1754 			mmc->enh_user_size =
1755 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) +
1756 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) +
1757 				ext_csd[EXT_CSD_ENH_SIZE_MULT];
1758 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1759 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1760 			mmc->enh_user_size <<= 19;
1761 			mmc->enh_user_start =
1762 				(ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) +
1763 				(ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) +
1764 				(ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) +
1765 				ext_csd[EXT_CSD_ENH_START_ADDR];
1766 			if (mmc->high_capacity)
1767 				mmc->enh_user_start <<= 9;
1768 		}
1769 
1770 		/*
1771 		 * Host needs to enable ERASE_GRP_DEF bit if device is
1772 		 * partitioned. This bit will be lost every time after a reset
1773 		 * or power off. This will affect erase size.
1774 		 */
1775 		if (part_completed)
1776 			has_parts = true;
1777 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1778 		    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1779 			has_parts = true;
1780 		if (has_parts) {
1781 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1782 				EXT_CSD_ERASE_GROUP_DEF, 1);
1783 
1784 			if (err)
1785 				return err;
1786 			else
1787 				ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1788 		}
1789 
1790 		if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1791 			/* Read out group size from ext_csd */
1792 			mmc->erase_grp_size =
1793 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1794 			/*
1795 			 * if high capacity and partition setting completed
1796 			 * SEC_COUNT is valid even if it is smaller than 2 GiB
1797 			 * JEDEC Standard JESD84-B45, 6.2.4
1798 			 */
1799 			if (mmc->high_capacity && part_completed) {
1800 				capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1801 					(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1802 					(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1803 					(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1804 				capacity *= MMC_MAX_BLOCK_LEN;
1805 				mmc->capacity_user = capacity;
1806 			}
1807 		} else {
1808 			/* Calculate the group size from the csd value. */
1809 			int erase_gsz, erase_gmul;
1810 			erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1811 			erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1812 			mmc->erase_grp_size = (erase_gsz + 1)
1813 				* (erase_gmul + 1);
1814 		}
1815 
1816 		mmc->hc_wp_grp_size = 1024
1817 			* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1818 			* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1819 
1820 		mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1821 	}
1822 
1823 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1824 	if (err)
1825 		return err;
1826 
1827 	if (IS_SD(mmc))
1828 		err = sd_change_freq(mmc);
1829 	else
1830 		err = mmc_change_freq(mmc);
1831 
1832 	if (err)
1833 		return err;
1834 
1835 	/* Restrict card's capabilities by what the host can do */
1836 	mmc->card_caps &= mmc->cfg->host_caps;
1837 
1838 	if (IS_SD(mmc)) {
1839 		if (mmc->card_caps & MMC_MODE_4BIT) {
1840 			cmd.cmdidx = MMC_CMD_APP_CMD;
1841 			cmd.resp_type = MMC_RSP_R1;
1842 			cmd.cmdarg = mmc->rca << 16;
1843 
1844 			err = mmc_send_cmd(mmc, &cmd, NULL);
1845 			if (err)
1846 				return err;
1847 
1848 			cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1849 			cmd.resp_type = MMC_RSP_R1;
1850 			cmd.cmdarg = 2;
1851 			err = mmc_send_cmd(mmc, &cmd, NULL);
1852 			if (err)
1853 				return err;
1854 
1855 			mmc_set_bus_width(mmc, 4);
1856 		}
1857 
1858 		err = sd_read_ssr(mmc);
1859 		if (err)
1860 			return err;
1861 
1862 		if (mmc->card_caps & MMC_MODE_HS)
1863 			tran_speed = 50000000;
1864 		else
1865 			tran_speed = 25000000;
1866 
1867 		mmc_set_clock(mmc, tran_speed);
1868 	}
1869 
1870 	/* Fix the block length for DDR mode */
1871 	if (mmc_card_ddr(mmc)) {
1872 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1873 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1874 	}
1875 
1876 	/* fill in device description */
1877 	bdesc = mmc_get_blk_desc(mmc);
1878 	bdesc->lun = 0;
1879 	bdesc->hwpart = 0;
1880 	bdesc->type = 0;
1881 	bdesc->blksz = mmc->read_bl_len;
1882 	bdesc->log2blksz = LOG2(bdesc->blksz);
1883 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1884 #if !defined(CONFIG_SPL_BUILD) || \
1885 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
1886 		!defined(CONFIG_USE_TINY_PRINTF))
1887 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
1888 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
1889 		(mmc->cid[3] >> 16) & 0xffff);
1890 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
1891 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
1892 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
1893 		(mmc->cid[2] >> 24) & 0xff);
1894 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
1895 		(mmc->cid[2] >> 16) & 0xf);
1896 #else
1897 	bdesc->vendor[0] = 0;
1898 	bdesc->product[0] = 0;
1899 	bdesc->revision[0] = 0;
1900 #endif
1901 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
1902 	part_init(bdesc);
1903 #endif
1904 
1905 	return 0;
1906 }
1907 
1908 static int mmc_send_if_cond(struct mmc *mmc)
1909 {
1910 	struct mmc_cmd cmd;
1911 	int err;
1912 
1913 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
1914 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
1915 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
1916 	cmd.resp_type = MMC_RSP_R7;
1917 
1918 	err = mmc_send_cmd(mmc, &cmd, NULL);
1919 
1920 	if (err)
1921 		return err;
1922 
1923 	if ((cmd.response[0] & 0xff) != 0xaa)
1924 		return -EOPNOTSUPP;
1925 	else
1926 		mmc->version = SD_VERSION_2;
1927 
1928 	return 0;
1929 }
1930 
1931 #if !CONFIG_IS_ENABLED(DM_MMC)
1932 /* board-specific MMC power initializations. */
1933 __weak void board_mmc_power_init(void)
1934 {
1935 }
1936 #endif
1937 
1938 static int mmc_power_init(struct mmc *mmc)
1939 {
1940 #if CONFIG_IS_ENABLED(DM_MMC)
1941 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD)
1942 	struct udevice *vmmc_supply;
1943 	int ret;
1944 
1945 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
1946 					  &vmmc_supply);
1947 	if (ret) {
1948 		debug("%s: No vmmc supply\n", mmc->dev->name);
1949 		return 0;
1950 	}
1951 
1952 	ret = regulator_set_enable(vmmc_supply, true);
1953 	if (ret) {
1954 		puts("Error enabling VMMC supply\n");
1955 		return ret;
1956 	}
1957 #endif
1958 #else /* !CONFIG_DM_MMC */
1959 	/*
1960 	 * Driver model should use a regulator, as above, rather than calling
1961 	 * out to board code.
1962 	 */
1963 	board_mmc_power_init();
1964 #endif
1965 	return 0;
1966 }
1967 
1968 int mmc_start_init(struct mmc *mmc)
1969 {
1970 	bool no_card;
1971 	int err;
1972 
1973 	/* we pretend there's no card when init is NULL */
1974 	no_card = mmc_getcd(mmc) == 0;
1975 #if !CONFIG_IS_ENABLED(DM_MMC)
1976 	no_card = no_card || (mmc->cfg->ops->init == NULL);
1977 #endif
1978 	if (no_card) {
1979 		mmc->has_init = 0;
1980 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1981 		printf("MMC: no card present\n");
1982 #endif
1983 		return -ENOMEDIUM;
1984 	}
1985 
1986 	if (mmc->has_init)
1987 		return 0;
1988 
1989 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
1990 	mmc_adapter_card_type_ident();
1991 #endif
1992 	err = mmc_power_init(mmc);
1993 	if (err)
1994 		return err;
1995 
1996 #if CONFIG_IS_ENABLED(DM_MMC)
1997 	/* The device has already been probed ready for use */
1998 #else
1999 	/* made sure it's not NULL earlier */
2000 	err = mmc->cfg->ops->init(mmc);
2001 	if (err)
2002 		return err;
2003 #endif
2004 	mmc_set_bus_width(mmc, 1);
2005 	mmc_set_clock(mmc, 1);
2006 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2007 
2008 	/* Reset the Card */
2009 	err = mmc_go_idle(mmc);
2010 
2011 	if (err)
2012 		return err;
2013 
2014 	/* The internal partition reset to user partition(0) at every CMD0*/
2015 	mmc_get_blk_desc(mmc)->hwpart = 0;
2016 
2017 	/* Test for SD version 2 */
2018 	err = mmc_send_if_cond(mmc);
2019 
2020 	/* Now try to get the SD card's operating condition */
2021 	err = sd_send_op_cond(mmc);
2022 
2023 	/* If the command timed out, we check for an MMC card */
2024 	if (err == -ETIMEDOUT) {
2025 		err = mmc_send_op_cond(mmc);
2026 
2027 		if (err) {
2028 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2029 			printf("Card did not respond to voltage select!\n");
2030 #endif
2031 			return -EOPNOTSUPP;
2032 		}
2033 	}
2034 
2035 	if (!err)
2036 		mmc->init_in_progress = 1;
2037 
2038 	return err;
2039 }
2040 
2041 static int mmc_complete_init(struct mmc *mmc)
2042 {
2043 	int err = 0;
2044 
2045 	mmc->init_in_progress = 0;
2046 	if (mmc->op_cond_pending)
2047 		err = mmc_complete_op_cond(mmc);
2048 
2049 	if (!err)
2050 		err = mmc_startup(mmc);
2051 	if (err)
2052 		mmc->has_init = 0;
2053 	else
2054 		mmc->has_init = 1;
2055 	return err;
2056 }
2057 
2058 int mmc_init(struct mmc *mmc)
2059 {
2060 	int err = 0;
2061 	__maybe_unused unsigned start;
2062 #if CONFIG_IS_ENABLED(DM_MMC)
2063 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2064 
2065 	upriv->mmc = mmc;
2066 #endif
2067 	if (mmc->has_init)
2068 		return 0;
2069 
2070 	start = get_timer(0);
2071 
2072 	if (!mmc->init_in_progress)
2073 		err = mmc_start_init(mmc);
2074 
2075 	if (!err)
2076 		err = mmc_complete_init(mmc);
2077 	if (err)
2078 		printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2079 
2080 	return err;
2081 }
2082 
2083 int mmc_set_dsr(struct mmc *mmc, u16 val)
2084 {
2085 	mmc->dsr = val;
2086 	return 0;
2087 }
2088 
2089 /* CPU-specific MMC initializations */
2090 __weak int cpu_mmc_init(bd_t *bis)
2091 {
2092 	return -1;
2093 }
2094 
2095 /* board-specific MMC initializations. */
2096 __weak int board_mmc_init(bd_t *bis)
2097 {
2098 	return -1;
2099 }
2100 
2101 void mmc_set_preinit(struct mmc *mmc, int preinit)
2102 {
2103 	mmc->preinit = preinit;
2104 }
2105 
2106 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2107 static int mmc_probe(bd_t *bis)
2108 {
2109 	return 0;
2110 }
2111 #elif CONFIG_IS_ENABLED(DM_MMC)
2112 static int mmc_probe(bd_t *bis)
2113 {
2114 	int ret, i;
2115 	struct uclass *uc;
2116 	struct udevice *dev;
2117 
2118 	ret = uclass_get(UCLASS_MMC, &uc);
2119 	if (ret)
2120 		return ret;
2121 
2122 	/*
2123 	 * Try to add them in sequence order. Really with driver model we
2124 	 * should allow holes, but the current MMC list does not allow that.
2125 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2126 	 */
2127 	for (i = 0; ; i++) {
2128 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2129 		if (ret == -ENODEV)
2130 			break;
2131 	}
2132 	uclass_foreach_dev(dev, uc) {
2133 		ret = device_probe(dev);
2134 		if (ret)
2135 			printf("%s - probe failed: %d\n", dev->name, ret);
2136 	}
2137 
2138 	return 0;
2139 }
2140 #else
2141 static int mmc_probe(bd_t *bis)
2142 {
2143 	if (board_mmc_init(bis) < 0)
2144 		cpu_mmc_init(bis);
2145 
2146 	return 0;
2147 }
2148 #endif
2149 
2150 int mmc_initialize(bd_t *bis)
2151 {
2152 	static int initialized = 0;
2153 	int ret;
2154 	if (initialized)	/* Avoid initializing mmc multiple times */
2155 		return 0;
2156 	initialized = 1;
2157 
2158 #if !CONFIG_IS_ENABLED(BLK)
2159 #if !CONFIG_IS_ENABLED(MMC_TINY)
2160 	mmc_list_init();
2161 #endif
2162 #endif
2163 	ret = mmc_probe(bis);
2164 	if (ret)
2165 		return ret;
2166 
2167 #ifndef CONFIG_SPL_BUILD
2168 	print_mmc_devices(',');
2169 #endif
2170 
2171 	mmc_do_preinit();
2172 	return 0;
2173 }
2174 
2175 #ifdef CONFIG_CMD_BKOPS_ENABLE
2176 int mmc_set_bkops_enable(struct mmc *mmc)
2177 {
2178 	int err;
2179 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2180 
2181 	err = mmc_send_ext_csd(mmc, ext_csd);
2182 	if (err) {
2183 		puts("Could not get ext_csd register values\n");
2184 		return err;
2185 	}
2186 
2187 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2188 		puts("Background operations not supported on device\n");
2189 		return -EMEDIUMTYPE;
2190 	}
2191 
2192 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2193 		puts("Background operations already enabled\n");
2194 		return 0;
2195 	}
2196 
2197 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2198 	if (err) {
2199 		puts("Failed to enable manual background operations\n");
2200 		return err;
2201 	}
2202 
2203 	puts("Enabled manual background operations\n");
2204 
2205 	return 0;
2206 }
2207 #endif
2208