xref: /OK3568_Linux_fs/u-boot/drivers/mmc/mmc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static const unsigned int sd_au_size[] = {
26 	0,		SZ_16K / 512,		SZ_32K / 512,
27 	SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
28 	SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
29 	SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
30 	SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,	SZ_64M / 512,
31 };
32 
33 static char mmc_ext_csd[512];
34 
35 #if CONFIG_IS_ENABLED(MMC_TINY)
36 static struct mmc mmc_static;
find_mmc_device(int dev_num)37 struct mmc *find_mmc_device(int dev_num)
38 {
39 	return &mmc_static;
40 }
41 
mmc_do_preinit(void)42 void mmc_do_preinit(void)
43 {
44 	struct mmc *m = &mmc_static;
45 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
46 	mmc_set_preinit(m, 1);
47 #endif
48 	if (m->preinit)
49 		mmc_start_init(m);
50 }
51 
mmc_get_blk_desc(struct mmc * mmc)52 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
53 {
54 	return &mmc->block_dev;
55 }
56 #endif
57 
58 #if !CONFIG_IS_ENABLED(DM_MMC)
board_mmc_getwp(struct mmc * mmc)59 __weak int board_mmc_getwp(struct mmc *mmc)
60 {
61 	return -1;
62 }
63 
mmc_getwp(struct mmc * mmc)64 int mmc_getwp(struct mmc *mmc)
65 {
66 	int wp;
67 
68 	wp = board_mmc_getwp(mmc);
69 
70 	if (wp < 0) {
71 		if (mmc->cfg->ops->getwp)
72 			wp = mmc->cfg->ops->getwp(mmc);
73 		else
74 			wp = 0;
75 	}
76 
77 	return wp;
78 }
79 
board_mmc_getcd(struct mmc * mmc)80 __weak int board_mmc_getcd(struct mmc *mmc)
81 {
82 	return -1;
83 }
84 #endif
85 
86 #ifdef CONFIG_MMC_TRACE
mmmc_trace_before_send(struct mmc * mmc,struct mmc_cmd * cmd)87 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
88 {
89 	printf("CMD_SEND:%d\n", cmd->cmdidx);
90 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
91 }
92 
mmmc_trace_after_send(struct mmc * mmc,struct mmc_cmd * cmd,int ret)93 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
94 {
95 	int i;
96 	u8 *ptr;
97 
98 	if (ret) {
99 		printf("\t\tRET\t\t\t %d\n", ret);
100 	} else {
101 		switch (cmd->resp_type) {
102 		case MMC_RSP_NONE:
103 			printf("\t\tMMC_RSP_NONE\n");
104 			break;
105 		case MMC_RSP_R1:
106 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
107 				cmd->response[0]);
108 			break;
109 		case MMC_RSP_R1b:
110 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
111 				cmd->response[0]);
112 			break;
113 		case MMC_RSP_R2:
114 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
115 				cmd->response[0]);
116 			printf("\t\t          \t\t 0x%08X \n",
117 				cmd->response[1]);
118 			printf("\t\t          \t\t 0x%08X \n",
119 				cmd->response[2]);
120 			printf("\t\t          \t\t 0x%08X \n",
121 				cmd->response[3]);
122 			printf("\n");
123 			printf("\t\t\t\t\tDUMPING DATA\n");
124 			for (i = 0; i < 4; i++) {
125 				int j;
126 				printf("\t\t\t\t\t%03d - ", i*4);
127 				ptr = (u8 *)&cmd->response[i];
128 				ptr += 3;
129 				for (j = 0; j < 4; j++)
130 					printf("%02X ", *ptr--);
131 				printf("\n");
132 			}
133 			break;
134 		case MMC_RSP_R3:
135 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
136 				cmd->response[0]);
137 			break;
138 		default:
139 			printf("\t\tERROR MMC rsp not supported\n");
140 			break;
141 		}
142 	}
143 }
144 
mmc_trace_state(struct mmc * mmc,struct mmc_cmd * cmd)145 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
146 {
147 	int status;
148 
149 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
150 	printf("CURR STATE:%d\n", status);
151 }
152 #endif
153 
154 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_send_cmd(struct mmc * mmc,struct mmc_cmd * cmd,struct mmc_data * data)155 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
156 {
157 	int ret;
158 
159 	mmmc_trace_before_send(mmc, cmd);
160 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
161 	mmmc_trace_after_send(mmc, cmd, ret);
162 
163 	return ret;
164 }
165 #endif
166 
mmc_send_status(struct mmc * mmc,int timeout)167 int mmc_send_status(struct mmc *mmc, int timeout)
168 {
169 	struct mmc_cmd cmd;
170 	int err, retries = 5;
171 
172 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
173 	cmd.resp_type = MMC_RSP_R1;
174 	if (!mmc_host_is_spi(mmc))
175 		cmd.cmdarg = mmc->rca << 16;
176 
177 	while (1) {
178 		err = mmc_send_cmd(mmc, &cmd, NULL);
179 		if (!err) {
180 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
181 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
182 			     MMC_STATE_PRG)
183 				break;
184 			else if (cmd.response[0] & MMC_STATUS_MASK) {
185 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
186 				printf("Status Error: 0x%08X\n",
187 					cmd.response[0]);
188 #endif
189 				return -ECOMM;
190 			}
191 		} else if (--retries < 0)
192 			return err;
193 
194 		if (timeout-- <= 0)
195 			break;
196 
197 		udelay(1000);
198 	}
199 
200 	mmc_trace_state(mmc, &cmd);
201 	if (timeout <= 0) {
202 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
203 		printf("Timeout waiting card ready\n");
204 #endif
205 		return -ETIMEDOUT;
206 	}
207 
208 	return 0;
209 }
210 
mmc_set_blocklen(struct mmc * mmc,int len)211 int mmc_set_blocklen(struct mmc *mmc, int len)
212 {
213 	struct mmc_cmd cmd;
214 
215 	if (mmc_card_ddr(mmc))
216 		return 0;
217 
218 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
219 	cmd.resp_type = MMC_RSP_R1;
220 	cmd.cmdarg = len;
221 
222 	return mmc_send_cmd(mmc, &cmd, NULL);
223 }
224 
mmc_read_blocks(struct mmc * mmc,void * dst,lbaint_t start,lbaint_t blkcnt)225 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
226 			   lbaint_t blkcnt)
227 {
228 	struct mmc_cmd cmd;
229 	struct mmc_data data;
230 
231 	if (blkcnt > 1)
232 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
233 	else
234 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
235 
236 	if (mmc->high_capacity)
237 		cmd.cmdarg = start;
238 	else
239 		cmd.cmdarg = start * mmc->read_bl_len;
240 
241 	cmd.resp_type = MMC_RSP_R1;
242 
243 	data.dest = dst;
244 	data.blocks = blkcnt;
245 	data.blocksize = mmc->read_bl_len;
246 	data.flags = MMC_DATA_READ;
247 
248 	if (mmc_send_cmd(mmc, &cmd, &data))
249 		return 0;
250 
251 	if (blkcnt > 1) {
252 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
253 		cmd.cmdarg = 0;
254 		cmd.resp_type = MMC_RSP_R1b;
255 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
256 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
257 			printf("mmc fail to send stop cmd\n");
258 #endif
259 			return 0;
260 		}
261 	}
262 
263 	return blkcnt;
264 }
265 
266 #ifdef CONFIG_SPL_BLK_READ_PREPARE
mmc_read_blocks_prepare(struct mmc * mmc,void * dst,lbaint_t start,lbaint_t blkcnt)267 static int mmc_read_blocks_prepare(struct mmc *mmc, void *dst, lbaint_t start,
268 				   lbaint_t blkcnt)
269 {
270 	struct mmc_cmd cmd;
271 	struct mmc_data data;
272 
273 	if (blkcnt > 1)
274 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
275 	else
276 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
277 
278 	if (mmc->high_capacity)
279 		cmd.cmdarg = start;
280 	else
281 		cmd.cmdarg = start * mmc->read_bl_len;
282 
283 	cmd.resp_type = MMC_RSP_R1;
284 
285 	data.dest = dst;
286 	data.blocks = blkcnt;
287 	data.blocksize = mmc->read_bl_len;
288 	data.flags = MMC_DATA_READ;
289 
290 	if (mmc_send_cmd_prepare(mmc, &cmd, &data))
291 		return 0;
292 
293 	return blkcnt;
294 }
295 #endif
296 
297 #ifdef CONFIG_SPL_BLK_READ_PREPARE
298 #if CONFIG_IS_ENABLED(BLK)
mmc_bread_prepare(struct udevice * dev,lbaint_t start,lbaint_t blkcnt,void * dst)299 ulong mmc_bread_prepare(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
300 #else
301 ulong mmc_bread_prepare(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
302 			void *dst)
303 #endif
304 {
305 #if CONFIG_IS_ENABLED(BLK)
306 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
307 #endif
308 	int dev_num = block_dev->devnum;
309 	int timeout = 0;
310 	int err;
311 
312 	if (blkcnt == 0)
313 		return 0;
314 
315 	struct mmc *mmc = find_mmc_device(dev_num);
316 
317 	if (!mmc)
318 		return 0;
319 
320 	if (CONFIG_IS_ENABLED(MMC_TINY))
321 		err = mmc_switch_part(mmc, block_dev->hwpart);
322 	else
323 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
324 
325 	if (err < 0)
326 		return 0;
327 
328 	if ((start + blkcnt) > block_dev->lba) {
329 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
330 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
331 		       start + blkcnt, block_dev->lba);
332 #endif
333 		return 0;
334 	}
335 
336 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
337 		debug("%s: Failed to set blocklen\n", __func__);
338 		return 0;
339 	}
340 
341 	if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) {
342 		debug("%s: Failed to read blocks\n", __func__);
343 re_init_retry:
344 		timeout++;
345 		/*
346 		 * Try re-init seven times.
347 		 */
348 		if (timeout > 7) {
349 			printf("Re-init retry timeout\n");
350 			return 0;
351 		}
352 
353 		mmc->has_init = 0;
354 		if (mmc_init(mmc))
355 			return 0;
356 
357 		if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) {
358 			printf("%s: Re-init mmc_read_blocks_prepare error\n",
359 			       __func__);
360 			goto re_init_retry;
361 		}
362 	}
363 
364 	return blkcnt;
365 }
366 #endif
367 
368 #if CONFIG_IS_ENABLED(BLK)
mmc_bread(struct udevice * dev,lbaint_t start,lbaint_t blkcnt,void * dst)369 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
370 #else
371 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
372 		void *dst)
373 #endif
374 {
375 #if CONFIG_IS_ENABLED(BLK)
376 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
377 #endif
378 	int dev_num = block_dev->devnum;
379 	int err;
380 	lbaint_t cur, blocks_todo = blkcnt;
381 
382 #ifdef CONFIG_SPL_BLK_READ_PREPARE
383 	if (block_dev->op_flag == BLK_PRE_RW)
384 #if CONFIG_IS_ENABLED(BLK)
385 		return mmc_bread_prepare(dev, start, blkcnt, dst);
386 #else
387 		return mmc_bread_prepare(block_dev, start, blkcnt, dst);
388 #endif
389 #endif
390 	if (blkcnt == 0)
391 		return 0;
392 
393 	struct mmc *mmc = find_mmc_device(dev_num);
394 	if (!mmc)
395 		return 0;
396 
397 	if (CONFIG_IS_ENABLED(MMC_TINY))
398 		err = mmc_switch_part(mmc, block_dev->hwpart);
399 	else
400 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
401 
402 	if (err < 0)
403 		return 0;
404 
405 	if ((start + blkcnt) > block_dev->lba) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
408 			start + blkcnt, block_dev->lba);
409 #endif
410 		return 0;
411 	}
412 
413 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
414 		debug("%s: Failed to set blocklen\n", __func__);
415 		return 0;
416 	}
417 
418 	do {
419 		cur = (blocks_todo > mmc->cfg->b_max) ?
420 			mmc->cfg->b_max : blocks_todo;
421 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
422 			debug("%s: Failed to read blocks\n", __func__);
423 			int timeout = 0;
424 re_init_retry:
425 			timeout++;
426 			/*
427 			 * Try re-init seven times.
428 			 */
429 			if (timeout > 7) {
430 				printf("Re-init retry timeout\n");
431 				return 0;
432 			}
433 
434 			mmc->has_init = 0;
435 			if (mmc_init(mmc))
436 				return 0;
437 
438 			if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
439 				printf("%s: Re-init mmc_read_blocks error\n",
440 				       __func__);
441 				goto re_init_retry;
442 			}
443 		}
444 		blocks_todo -= cur;
445 		start += cur;
446 		dst += cur * mmc->read_bl_len;
447 	} while (blocks_todo > 0);
448 
449 	return blkcnt;
450 }
451 
mmc_set_clock(struct mmc * mmc,uint clock)452 void mmc_set_clock(struct mmc *mmc, uint clock)
453 {
454 	if (clock > mmc->cfg->f_max)
455 		clock = mmc->cfg->f_max;
456 
457 	if (clock < mmc->cfg->f_min)
458 		clock = mmc->cfg->f_min;
459 
460 	mmc->clock = clock;
461 
462 	mmc_set_ios(mmc);
463 }
464 
mmc_set_bus_width(struct mmc * mmc,uint width)465 static void mmc_set_bus_width(struct mmc *mmc, uint width)
466 {
467 	mmc->bus_width = width;
468 
469 	mmc_set_ios(mmc);
470 }
471 
mmc_set_timing(struct mmc * mmc,uint timing)472 static void mmc_set_timing(struct mmc *mmc, uint timing)
473 {
474 	mmc->timing = timing;
475 	mmc_set_ios(mmc);
476 }
477 
mmc_go_idle(struct mmc * mmc)478 static int mmc_go_idle(struct mmc *mmc)
479 {
480 	struct mmc_cmd cmd;
481 	int err;
482 
483 	udelay(1000);
484 
485 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
486 	cmd.cmdarg = 0;
487 	cmd.resp_type = MMC_RSP_NONE;
488 
489 	err = mmc_send_cmd(mmc, &cmd, NULL);
490 
491 	if (err)
492 		return err;
493 
494 	udelay(2000);
495 
496 	return 0;
497 }
498 
499 #ifndef CONFIG_MMC_USE_PRE_CONFIG
sd_send_op_cond(struct mmc * mmc)500 static int sd_send_op_cond(struct mmc *mmc)
501 {
502 	int timeout = 1000;
503 	int err;
504 	struct mmc_cmd cmd;
505 
506 	while (1) {
507 		cmd.cmdidx = MMC_CMD_APP_CMD;
508 		cmd.resp_type = MMC_RSP_R1;
509 		cmd.cmdarg = 0;
510 
511 		err = mmc_send_cmd(mmc, &cmd, NULL);
512 
513 		if (err)
514 			return err;
515 
516 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
517 		cmd.resp_type = MMC_RSP_R3;
518 
519 		/*
520 		 * Most cards do not answer if some reserved bits
521 		 * in the ocr are set. However, Some controller
522 		 * can set bit 7 (reserved for low voltages), but
523 		 * how to manage low voltages SD card is not yet
524 		 * specified.
525 		 */
526 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
527 			(mmc->cfg->voltages & 0xff8000);
528 
529 		if (mmc->version == SD_VERSION_2)
530 			cmd.cmdarg |= OCR_HCS;
531 
532 		err = mmc_send_cmd(mmc, &cmd, NULL);
533 
534 		if (err)
535 			return err;
536 
537 		if (cmd.response[0] & OCR_BUSY)
538 			break;
539 
540 		if (timeout-- <= 0)
541 			return -EOPNOTSUPP;
542 
543 		udelay(1000);
544 	}
545 
546 	if (mmc->version != SD_VERSION_2)
547 		mmc->version = SD_VERSION_1_0;
548 
549 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
550 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
551 		cmd.resp_type = MMC_RSP_R3;
552 		cmd.cmdarg = 0;
553 
554 		err = mmc_send_cmd(mmc, &cmd, NULL);
555 
556 		if (err)
557 			return err;
558 	}
559 
560 	mmc->ocr = cmd.response[0];
561 
562 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
563 	mmc->rca = 0;
564 
565 	return 0;
566 }
567 #endif
568 
mmc_send_op_cond_iter(struct mmc * mmc,int use_arg)569 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
570 {
571 	struct mmc_cmd cmd;
572 	int err;
573 
574 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
575 	cmd.resp_type = MMC_RSP_R3;
576 	cmd.cmdarg = 0;
577 	if (use_arg && !mmc_host_is_spi(mmc))
578 		cmd.cmdarg = OCR_HCS |
579 			(mmc->cfg->voltages &
580 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
581 			(mmc->ocr & OCR_ACCESS_MODE);
582 
583 	err = mmc_send_cmd(mmc, &cmd, NULL);
584 	if (err)
585 		return err;
586 	mmc->ocr = cmd.response[0];
587 	return 0;
588 }
589 
590 #ifndef CONFIG_MMC_USE_PRE_CONFIG
mmc_send_op_cond(struct mmc * mmc)591 static int mmc_send_op_cond(struct mmc *mmc)
592 {
593 	int err, i;
594 
595 	/* Some cards seem to need this */
596 	mmc_go_idle(mmc);
597 
598  	/* Asking to the card its capabilities */
599 	for (i = 0; i < 2; i++) {
600 		err = mmc_send_op_cond_iter(mmc, i != 0);
601 		if (err)
602 			return err;
603 
604 		/* exit if not busy (flag seems to be inverted) */
605 		if (mmc->ocr & OCR_BUSY)
606 			break;
607 	}
608 	mmc->op_cond_pending = 1;
609 	return 0;
610 }
611 #endif
mmc_complete_op_cond(struct mmc * mmc)612 static int mmc_complete_op_cond(struct mmc *mmc)
613 {
614 	struct mmc_cmd cmd;
615 	int timeout = 1000;
616 	uint start;
617 	int err;
618 
619 	mmc->op_cond_pending = 0;
620 	if (!(mmc->ocr & OCR_BUSY)) {
621 		/* Some cards seem to need this */
622 		mmc_go_idle(mmc);
623 
624 		start = get_timer(0);
625 		while (1) {
626 			err = mmc_send_op_cond_iter(mmc, 1);
627 			if (err)
628 				return err;
629 			if (mmc->ocr & OCR_BUSY)
630 				break;
631 			if (get_timer(start) > timeout)
632 				return -EOPNOTSUPP;
633 			udelay(100);
634 		}
635 	}
636 
637 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
638 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
639 		cmd.resp_type = MMC_RSP_R3;
640 		cmd.cmdarg = 0;
641 
642 		err = mmc_send_cmd(mmc, &cmd, NULL);
643 
644 		if (err)
645 			return err;
646 
647 		mmc->ocr = cmd.response[0];
648 	}
649 
650 	mmc->version = MMC_VERSION_UNKNOWN;
651 
652 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
653 	mmc->rca = 1;
654 
655 	return 0;
656 }
657 
658 
mmc_send_ext_csd(struct mmc * mmc,u8 * ext_csd)659 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
660 {
661 	struct mmc_cmd cmd;
662 	struct mmc_data data;
663 	int err;
664 
665 #ifdef CONFIG_MMC_USE_PRE_CONFIG
666 	static int initialized;
667 	if (initialized) {
668 		memcpy(ext_csd, mmc_ext_csd, 512);
669 		return 0;
670 	}
671 
672 	initialized = 1;
673 #endif
674 	/* Get the Card Status Register */
675 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
676 	cmd.resp_type = MMC_RSP_R1;
677 	cmd.cmdarg = 0;
678 
679 	data.dest = (char *)ext_csd;
680 	data.blocks = 1;
681 	data.blocksize = MMC_MAX_BLOCK_LEN;
682 	data.flags = MMC_DATA_READ;
683 
684 	err = mmc_send_cmd(mmc, &cmd, &data);
685 	memcpy(mmc_ext_csd, ext_csd, 512);
686 #if defined(CONFIG_MMC_USE_PRE_CONFIG) && defined(CONFIG_SPL_BUILD)
687 	char *mmc_ecsd_base = NULL;
688 	ulong mmc_ecsd;
689 
690 	mmc_ecsd = dev_read_u32_default(mmc->dev, "mmc-ecsd", 0);
691 	mmc_ecsd_base = (char *)mmc_ecsd;
692 	if (mmc_ecsd_base) {
693 		memcpy(mmc_ecsd_base, ext_csd, 512);
694 		*(unsigned int *)(mmc_ecsd_base + 512) = 0x55aa55aa;
695 	}
696 #endif
697 	return err;
698 }
699 
mmc_poll_for_busy(struct mmc * mmc,u8 send_status)700 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status)
701 {
702 	struct mmc_cmd cmd;
703 	u8 busy = true;
704 	uint start;
705 	int ret;
706 	int timeout = 1000;
707 
708 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
709 	cmd.resp_type = MMC_RSP_R1;
710 	cmd.cmdarg = mmc->rca << 16;
711 
712 	start = get_timer(0);
713 
714 	if (!send_status && !mmc_can_card_busy(mmc)) {
715 		mdelay(timeout);
716 		return 0;
717 	}
718 
719 	do {
720 		if (!send_status) {
721 			busy = mmc_card_busy(mmc);
722 		} else {
723 			ret = mmc_send_cmd(mmc, &cmd, NULL);
724 
725 			if (ret)
726 				return ret;
727 
728 			if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
729 				return -EBADMSG;
730 			busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) ==
731 				MMC_STATE_PRG;
732 		}
733 
734 		if (get_timer(start) > timeout && busy)
735 			return -ETIMEDOUT;
736 	} while (busy);
737 
738 	return 0;
739 }
740 
__mmc_switch(struct mmc * mmc,u8 set,u8 index,u8 value,u8 send_status)741 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
742 			u8 send_status)
743 {
744 	struct mmc_cmd cmd;
745 	int retries = 3;
746 	int ret;
747 
748 	cmd.cmdidx = MMC_CMD_SWITCH;
749 	cmd.resp_type = MMC_RSP_R1b;
750 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
751 				 (index << 16) |
752 				 (value << 8);
753 
754 	do {
755 		ret = mmc_send_cmd(mmc, &cmd, NULL);
756 
757 		if (!ret)
758 			return mmc_poll_for_busy(mmc, send_status);
759 	} while (--retries > 0 && ret);
760 
761 	return ret;
762 }
763 
mmc_switch(struct mmc * mmc,u8 set,u8 index,u8 value)764 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
765 {
766 	return __mmc_switch(mmc, set, index, value, true);
767 }
768 
mmc_select_bus_width(struct mmc * mmc)769 static int mmc_select_bus_width(struct mmc *mmc)
770 {
771 	u32 ext_csd_bits[] = {
772 		EXT_CSD_BUS_WIDTH_8,
773 		EXT_CSD_BUS_WIDTH_4,
774 	};
775 	u32 bus_widths[] = {
776 		MMC_BUS_WIDTH_8BIT,
777 		MMC_BUS_WIDTH_4BIT,
778 	};
779 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
780 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
781 	u32 idx, bus_width = 0;
782 	int err = 0;
783 
784 	if (mmc->version < MMC_VERSION_4 ||
785 	    !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT)))
786 		return 0;
787 
788 	err = mmc_send_ext_csd(mmc, ext_csd);
789 
790 	if (err)
791 		return err;
792 
793 	idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1;
794 
795 	/*
796 	 * Unlike SD, MMC cards dont have a configuration register to notify
797 	 * supported bus width. So bus test command should be run to identify
798 	 * the supported bus width or compare the ext csd values of current
799 	 * bus width and ext csd values of 1 bit mode read earlier.
800 	 */
801 	for (; idx < ARRAY_SIZE(bus_widths); idx++) {
802 		/*
803 		 * Host is capable of 8bit transfer, then switch
804 		 * the device to work in 8bit transfer mode. If the
805 		 * mmc switch command returns error then switch to
806 		 * 4bit transfer mode. On success set the corresponding
807 		 * bus width on the host.
808 		 */
809 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
810 				 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]);
811 		if (err)
812 			continue;
813 
814 		bus_width = bus_widths[idx];
815 		mmc_set_bus_width(mmc, bus_width);
816 
817 		err = mmc_send_ext_csd(mmc, test_csd);
818 
819 		if (err)
820 			continue;
821 
822 		/* Only compare read only fields */
823 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] ==
824 			test_csd[EXT_CSD_PARTITIONING_SUPPORT]) &&
825 		    (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
826 			test_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
827 		    (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) &&
828 			(ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
829 			test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
830 		    !memcmp(&ext_csd[EXT_CSD_SEC_CNT],
831 			&test_csd[EXT_CSD_SEC_CNT], 4)) {
832 			err = bus_width;
833 			break;
834 		} else {
835 			err = -EBADMSG;
836 		}
837 	}
838 
839 	return err;
840 }
841 
842 #ifndef CONFIG_MMC_SIMPLE
843 static const u8 tuning_blk_pattern_4bit[] = {
844 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
845 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
846 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
847 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
848 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
849 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
850 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
851 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
852 };
853 
854 static const u8 tuning_blk_pattern_8bit[] = {
855 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
856 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
857 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
858 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
859 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
860 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
861 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
862 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
863 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
864 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
865 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
866 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
867 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
868 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
869 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
870 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
871 };
872 
mmc_send_tuning(struct mmc * mmc,u32 opcode)873 int mmc_send_tuning(struct mmc *mmc, u32 opcode)
874 {
875 	struct mmc_cmd cmd;
876 	struct mmc_data data;
877 	const u8 *tuning_block_pattern;
878 	int size, err = 0;
879 	u8 *data_buf;
880 
881 	if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
882 		tuning_block_pattern = tuning_blk_pattern_8bit;
883 		size = sizeof(tuning_blk_pattern_8bit);
884 	} else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) {
885 		tuning_block_pattern = tuning_blk_pattern_4bit;
886 		size = sizeof(tuning_blk_pattern_4bit);
887 	} else {
888 		return -EINVAL;
889 	}
890 
891 	data_buf = calloc(1, size);
892 	if (!data_buf)
893 		return -ENOMEM;
894 
895 	cmd.cmdidx = opcode;
896 	cmd.resp_type = MMC_RSP_R1;
897 	cmd.cmdarg = 0;
898 
899 	data.dest = (char *)data_buf;
900 	data.blocksize = size;
901 	data.blocks = 1;
902 	data.flags = MMC_DATA_READ;
903 
904 	err = mmc_send_cmd(mmc, &cmd, &data);
905 	if (err)
906 		goto out;
907 
908 	if (memcmp(data_buf, tuning_block_pattern, size))
909 		err = -EIO;
910 out:
911 	free(data_buf);
912 	return err;
913 }
914 
mmc_execute_tuning(struct mmc * mmc)915 static int mmc_execute_tuning(struct mmc *mmc)
916 {
917 #ifdef CONFIG_DM_MMC
918 	struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev);
919 #endif
920 	u32 opcode;
921 
922 	if (IS_SD(mmc))
923 		opcode = MMC_SEND_TUNING_BLOCK;
924 	else
925 		opcode = MMC_SEND_TUNING_BLOCK_HS200;
926 
927 #ifndef CONFIG_DM_MMC
928 	if (mmc->cfg->ops->execute_tuning) {
929 		return mmc->cfg->ops->execute_tuning(mmc, opcode);
930 #else
931 	if (ops->execute_tuning) {
932 		return ops->execute_tuning(mmc->dev, opcode);
933 #endif
934 	} else {
935 		debug("Tuning feature required for HS200 mode.\n");
936 		return -EIO;
937 	}
938 }
939 
940 static int mmc_hs200_tuning(struct mmc *mmc)
941 {
942 	return mmc_execute_tuning(mmc);
943 }
944 
945 #else
946 int mmc_send_tuning(struct mmc *mmc, u32 opcode) { return 0; }
947 int mmc_execute_tuning(struct mmc *mmc) { return 0; }
948 static int mmc_hs200_tuning(struct mmc *mmc) { return 0; }
949 #endif
950 
951 static int mmc_select_hs(struct mmc *mmc)
952 {
953 	int ret;
954 
955 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
956 			 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS);
957 
958 	if (!ret)
959 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
960 
961 	return ret;
962 }
963 
964 static int mmc_select_hs_ddr(struct mmc *mmc)
965 {
966 	u32 ext_csd_bits;
967 	int err = 0;
968 
969 	if (mmc->bus_width == MMC_BUS_WIDTH_1BIT)
970 		return 0;
971 
972 	ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ?
973 			EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
974 
975 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
976 			 EXT_CSD_BUS_WIDTH, ext_csd_bits);
977 	if (err)
978 		return err;
979 
980 	mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52);
981 
982 	return 0;
983 }
984 
985 #ifndef CONFIG_MMC_SIMPLE
986 static int mmc_select_hs200(struct mmc *mmc)
987 {
988 	int ret;
989 
990 	/*
991 	 * Set the bus width(4 or 8) with host's support and
992 	 * switch to HS200 mode if bus width is set successfully.
993 	 */
994 	ret = mmc_select_bus_width(mmc);
995 
996 	if (ret > 0) {
997 		ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
998 				   EXT_CSD_HS_TIMING,
999 				   EXT_CSD_TIMING_HS200, false);
1000 
1001 		if (ret)
1002 			return ret;
1003 
1004 		mmc_set_timing(mmc, MMC_TIMING_MMC_HS200);
1005 	}
1006 
1007 	return ret;
1008 }
1009 
1010 static int mmc_switch_to_hs400(struct mmc *mmc)
1011 {
1012 	u8 val, fixed_drv_type, card_drv_type, drive_strength;
1013 
1014 	fixed_drv_type = mmc->cfg->fixed_drv_type;
1015 	card_drv_type = mmc->raw_driver_strength | mmc_driver_type_mask(0);
1016 	drive_strength = (card_drv_type & mmc_driver_type_mask(fixed_drv_type))
1017 				 ? fixed_drv_type : 0;
1018 	val = EXT_CSD_TIMING_HS400 | drive_strength << EXT_CSD_DRV_STR_SHIFT;
1019 
1020 	return __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, false);
1021 }
1022 
1023 static int mmc_select_hs400(struct mmc *mmc)
1024 {
1025 	int ret;
1026 
1027 	/* Switch card to HS mode */
1028 	ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1029 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false);
1030 	if (ret)
1031 		return ret;
1032 
1033 	/* Set host controller to HS timing */
1034 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
1035 
1036 	/* Reduce frequency to HS frequency */
1037 	mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
1038 
1039 	ret = mmc_send_status(mmc, 1000);
1040 	if (ret)
1041 		return ret;
1042 
1043 	/* Switch card to DDR */
1044 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1045 			 EXT_CSD_BUS_WIDTH,
1046 			 EXT_CSD_DDR_BUS_WIDTH_8);
1047 	if (ret)
1048 		return ret;
1049 
1050 	/* Switch card to HS400 */
1051 	ret = mmc_switch_to_hs400(mmc);
1052 	if (ret)
1053 		return ret;
1054 
1055 	/* Set host controller to HS400 timing and frequency */
1056 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS400);
1057 
1058 	return ret;
1059 }
1060 
1061 static int mmc_select_hs400es(struct mmc *mmc)
1062 {
1063 	int err;
1064 
1065 	/* Switch card to HS mode */
1066 	err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1067 			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false);
1068 	if (err)
1069 		return err;
1070 
1071 	/* Set host controller to HS timing */
1072 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
1073 
1074 	err = mmc_send_status(mmc, 1000);
1075 	if (err)
1076 		return err;
1077 
1078 	mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
1079 
1080 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1081 			 EXT_CSD_DDR_BUS_WIDTH_8 |
1082 			 EXT_CSD_BUS_WIDTH_STROBE);
1083 	if (err) {
1084 		printf("switch to bus width for hs400 failed\n");
1085 		return err;
1086 	}
1087 
1088 	/* Switch card to HS400 */
1089 	err = mmc_switch_to_hs400(mmc);
1090 	if (err)
1091 		return err;
1092 
1093 	/* Set host controller to HS400 timing and frequency */
1094 	mmc_set_timing(mmc, MMC_TIMING_MMC_HS400ES);
1095 
1096 	return mmc_set_enhanced_strobe(mmc);
1097 }
1098 #else
1099 static int mmc_select_hs200(struct mmc *mmc) { return 0; }
1100 static int mmc_select_hs400(struct mmc *mmc) { return 0; }
1101 static int mmc_select_hs400es(struct mmc *mmc) { return 0; }
1102 #endif
1103 
1104 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd)
1105 {
1106 	u8 card_type;
1107 	u32 host_caps, avail_type = 0;
1108 
1109 	card_type = ext_csd[EXT_CSD_CARD_TYPE];
1110 	host_caps = mmc->cfg->host_caps;
1111 
1112 	if ((host_caps & MMC_MODE_HS) &&
1113 	    (card_type & EXT_CSD_CARD_TYPE_26))
1114 		avail_type |= EXT_CSD_CARD_TYPE_26;
1115 
1116 	if ((host_caps & MMC_MODE_HS) &&
1117 	    (card_type & EXT_CSD_CARD_TYPE_52))
1118 		avail_type |= EXT_CSD_CARD_TYPE_52;
1119 
1120 	/*
1121 	 * For the moment, u-boot doesn't support signal voltage
1122 	 * switch, therefor we assume that host support ddr52
1123 	 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and
1124 	 * hs400 are the same).
1125 	 */
1126 	if ((host_caps & MMC_MODE_DDR_52MHz) &&
1127 	    (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
1128 		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
1129 
1130 	if ((host_caps & MMC_MODE_HS200) &&
1131 	    (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V))
1132 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
1133 
1134 	/*
1135 	 * If host can support HS400, it means that host can also
1136 	 * support HS200.
1137 	 */
1138 	if ((host_caps & MMC_MODE_HS400) &&
1139 	    (host_caps & MMC_MODE_8BIT) &&
1140 	    (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
1141 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
1142 				EXT_CSD_CARD_TYPE_HS400_1_8V;
1143 
1144 	if ((host_caps & MMC_MODE_HS400ES) &&
1145 	    (host_caps & MMC_MODE_8BIT) &&
1146 	    ext_csd[EXT_CSD_STROBE_SUPPORT] &&
1147 	    (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
1148 		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
1149 				EXT_CSD_CARD_TYPE_HS400_1_8V |
1150 				EXT_CSD_CARD_TYPE_HS400ES;
1151 
1152 	return avail_type;
1153 }
1154 
1155 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type)
1156 {
1157 	int clock = 0;
1158 
1159 	if (mmc_card_hs(mmc))
1160 		clock = (avail_type & EXT_CSD_CARD_TYPE_52) ?
1161 			MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR;
1162 	else if (mmc_card_hs200(mmc) ||
1163 		 mmc_card_hs400(mmc) ||
1164 		 mmc_card_hs400es(mmc))
1165 		clock = MMC_HS200_MAX_DTR;
1166 
1167 	mmc_set_clock(mmc, clock);
1168 }
1169 
1170 static int mmc_change_freq(struct mmc *mmc)
1171 {
1172 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1173 	u32 avail_type;
1174 	int err;
1175 
1176 	mmc->card_caps = 0;
1177 
1178 	if (mmc_host_is_spi(mmc))
1179 		return 0;
1180 
1181 	/* Only version 4 supports high-speed */
1182 	if (mmc->version < MMC_VERSION_4)
1183 		return 0;
1184 
1185 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
1186 
1187 	err = mmc_send_ext_csd(mmc, ext_csd);
1188 
1189 	if (err)
1190 		return err;
1191 
1192 	avail_type = mmc_select_card_type(mmc, ext_csd);
1193 
1194 	if (avail_type & EXT_CSD_CARD_TYPE_HS400ES) {
1195 		err = mmc_select_bus_width(mmc);
1196 		if (err > 0 && mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
1197 			err = mmc_select_hs400es(mmc);
1198 			mmc_set_bus_speed(mmc, avail_type);
1199 			if (!err)
1200 				return err;
1201 		}
1202 	}
1203 
1204 	if (avail_type & EXT_CSD_CARD_TYPE_HS200)
1205 		err = mmc_select_hs200(mmc);
1206 	else if (avail_type & EXT_CSD_CARD_TYPE_HS)
1207 		err = mmc_select_hs(mmc);
1208 	else
1209 		err = -EINVAL;
1210 
1211 	if (err)
1212 		return err;
1213 
1214 	mmc_set_bus_speed(mmc, avail_type);
1215 
1216 	if (mmc_card_hs200(mmc)) {
1217 		err = mmc_hs200_tuning(mmc);
1218 		if (avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1219 		    mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
1220 			err = mmc_select_hs400(mmc);
1221 			mmc_set_bus_speed(mmc, avail_type);
1222 		}
1223 	} else if (!mmc_card_hs400es(mmc)) {
1224 		err = mmc_select_bus_width(mmc) > 0 ? 0 : err;
1225 		if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52)
1226 			err = mmc_select_hs_ddr(mmc);
1227 	}
1228 
1229 	return err;
1230 }
1231 
1232 static int mmc_set_capacity(struct mmc *mmc, int part_num)
1233 {
1234 	switch (part_num) {
1235 	case 0:
1236 		mmc->capacity = mmc->capacity_user;
1237 		break;
1238 	case 1:
1239 	case 2:
1240 		mmc->capacity = mmc->capacity_boot;
1241 		break;
1242 	case 3:
1243 		mmc->capacity = mmc->capacity_rpmb;
1244 		break;
1245 	case 4:
1246 	case 5:
1247 	case 6:
1248 	case 7:
1249 		mmc->capacity = mmc->capacity_gp[part_num - 4];
1250 		break;
1251 	default:
1252 		return -1;
1253 	}
1254 
1255 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1256 
1257 	return 0;
1258 }
1259 
1260 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1261 {
1262 	int ret;
1263 
1264 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
1265 			 (mmc->part_config & ~PART_ACCESS_MASK)
1266 			 | (part_num & PART_ACCESS_MASK));
1267 
1268 	/*
1269 	 * Set the capacity if the switch succeeded or was intended
1270 	 * to return to representing the raw device.
1271 	 */
1272 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1273 		ret = mmc_set_capacity(mmc, part_num);
1274 		mmc_get_blk_desc(mmc)->hwpart = part_num;
1275 	}
1276 
1277 	return ret;
1278 }
1279 
1280 int mmc_hwpart_config(struct mmc *mmc,
1281 		      const struct mmc_hwpart_conf *conf,
1282 		      enum mmc_hwpart_conf_mode mode)
1283 {
1284 	u8 part_attrs = 0;
1285 	u32 enh_size_mult;
1286 	u32 enh_start_addr;
1287 	u32 gp_size_mult[4];
1288 	u32 max_enh_size_mult;
1289 	u32 tot_enh_size_mult = 0;
1290 	u8 wr_rel_set;
1291 	int i, pidx, err;
1292 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1293 
1294 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1295 		return -EINVAL;
1296 
1297 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1298 		printf("eMMC >= 4.4 required for enhanced user data area\n");
1299 		return -EMEDIUMTYPE;
1300 	}
1301 
1302 	if (!(mmc->part_support & PART_SUPPORT)) {
1303 		printf("Card does not support partitioning\n");
1304 		return -EMEDIUMTYPE;
1305 	}
1306 
1307 	if (!mmc->hc_wp_grp_size) {
1308 		printf("Card does not define HC WP group size\n");
1309 		return -EMEDIUMTYPE;
1310 	}
1311 
1312 	/* check partition alignment and total enhanced size */
1313 	if (conf->user.enh_size) {
1314 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1315 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
1316 			printf("User data enhanced area not HC WP group "
1317 			       "size aligned\n");
1318 			return -EINVAL;
1319 		}
1320 		part_attrs |= EXT_CSD_ENH_USR;
1321 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1322 		if (mmc->high_capacity) {
1323 			enh_start_addr = conf->user.enh_start;
1324 		} else {
1325 			enh_start_addr = (conf->user.enh_start << 9);
1326 		}
1327 	} else {
1328 		enh_size_mult = 0;
1329 		enh_start_addr = 0;
1330 	}
1331 	tot_enh_size_mult += enh_size_mult;
1332 
1333 	for (pidx = 0; pidx < 4; pidx++) {
1334 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1335 			printf("GP%i partition not HC WP group size "
1336 			       "aligned\n", pidx+1);
1337 			return -EINVAL;
1338 		}
1339 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1340 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1341 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1342 			tot_enh_size_mult += gp_size_mult[pidx];
1343 		}
1344 	}
1345 
1346 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1347 		printf("Card does not support enhanced attribute\n");
1348 		return -EMEDIUMTYPE;
1349 	}
1350 
1351 	err = mmc_send_ext_csd(mmc, ext_csd);
1352 	if (err)
1353 		return err;
1354 
1355 	max_enh_size_mult =
1356 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1357 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1358 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1359 	if (tot_enh_size_mult > max_enh_size_mult) {
1360 		printf("Total enhanced size exceeds maximum (%u > %u)\n",
1361 		       tot_enh_size_mult, max_enh_size_mult);
1362 		return -EMEDIUMTYPE;
1363 	}
1364 
1365 	/* The default value of EXT_CSD_WR_REL_SET is device
1366 	 * dependent, the values can only be changed if the
1367 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1368 	 * changed only once and before partitioning is completed. */
1369 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1370 	if (conf->user.wr_rel_change) {
1371 		if (conf->user.wr_rel_set)
1372 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1373 		else
1374 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1375 	}
1376 	for (pidx = 0; pidx < 4; pidx++) {
1377 		if (conf->gp_part[pidx].wr_rel_change) {
1378 			if (conf->gp_part[pidx].wr_rel_set)
1379 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1380 			else
1381 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1382 		}
1383 	}
1384 
1385 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1386 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1387 		puts("Card does not support host controlled partition write "
1388 		     "reliability settings\n");
1389 		return -EMEDIUMTYPE;
1390 	}
1391 
1392 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1393 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1394 		printf("Card already partitioned\n");
1395 		return -EPERM;
1396 	}
1397 
1398 	if (mode == MMC_HWPART_CONF_CHECK)
1399 		return 0;
1400 
1401 	/* Partitioning requires high-capacity size definitions */
1402 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1403 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1404 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1405 
1406 		if (err)
1407 			return err;
1408 
1409 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1410 
1411 		/* update erase group size to be high-capacity */
1412 		mmc->erase_grp_size =
1413 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1414 
1415 	}
1416 
1417 	/* all OK, write the configuration */
1418 	for (i = 0; i < 4; i++) {
1419 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1420 				 EXT_CSD_ENH_START_ADDR+i,
1421 				 (enh_start_addr >> (i*8)) & 0xFF);
1422 		if (err)
1423 			return err;
1424 	}
1425 	for (i = 0; i < 3; i++) {
1426 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1427 				 EXT_CSD_ENH_SIZE_MULT+i,
1428 				 (enh_size_mult >> (i*8)) & 0xFF);
1429 		if (err)
1430 			return err;
1431 	}
1432 	for (pidx = 0; pidx < 4; pidx++) {
1433 		for (i = 0; i < 3; i++) {
1434 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1435 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1436 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1437 			if (err)
1438 				return err;
1439 		}
1440 	}
1441 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1442 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1443 	if (err)
1444 		return err;
1445 
1446 	if (mode == MMC_HWPART_CONF_SET)
1447 		return 0;
1448 
1449 	/* The WR_REL_SET is a write-once register but shall be
1450 	 * written before setting PART_SETTING_COMPLETED. As it is
1451 	 * write-once we can only write it when completing the
1452 	 * partitioning. */
1453 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1454 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1455 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1456 		if (err)
1457 			return err;
1458 	}
1459 
1460 	/* Setting PART_SETTING_COMPLETED confirms the partition
1461 	 * configuration but it only becomes effective after power
1462 	 * cycle, so we do not adjust the partition related settings
1463 	 * in the mmc struct. */
1464 
1465 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1466 			 EXT_CSD_PARTITION_SETTING,
1467 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1468 	if (err)
1469 		return err;
1470 
1471 	return 0;
1472 }
1473 
1474 #if !CONFIG_IS_ENABLED(DM_MMC)
1475 int mmc_getcd(struct mmc *mmc)
1476 {
1477 	int cd;
1478 
1479 	cd = board_mmc_getcd(mmc);
1480 
1481 	if (cd < 0) {
1482 		if (mmc->cfg->ops->getcd)
1483 			cd = mmc->cfg->ops->getcd(mmc);
1484 		else
1485 			cd = 1;
1486 	}
1487 
1488 	return cd;
1489 }
1490 #endif
1491 
1492 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1493 {
1494 	struct mmc_cmd cmd;
1495 	struct mmc_data data;
1496 
1497 	/* Switch the frequency */
1498 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1499 	cmd.resp_type = MMC_RSP_R1;
1500 	cmd.cmdarg = (mode << 31) | 0xffffff;
1501 	cmd.cmdarg &= ~(0xf << (group * 4));
1502 	cmd.cmdarg |= value << (group * 4);
1503 
1504 	data.dest = (char *)resp;
1505 	data.blocksize = 64;
1506 	data.blocks = 1;
1507 	data.flags = MMC_DATA_READ;
1508 
1509 	return mmc_send_cmd(mmc, &cmd, &data);
1510 }
1511 
1512 
1513 static int sd_change_freq(struct mmc *mmc)
1514 {
1515 	int err;
1516 	struct mmc_cmd cmd;
1517 	ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2);
1518 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1519 	struct mmc_data data;
1520 	int timeout;
1521 
1522 	mmc->card_caps = 0;
1523 
1524 	if (mmc_host_is_spi(mmc))
1525 		return 0;
1526 
1527 	/* Read the SCR to find out if this card supports higher speeds */
1528 	cmd.cmdidx = MMC_CMD_APP_CMD;
1529 	cmd.resp_type = MMC_RSP_R1;
1530 	cmd.cmdarg = mmc->rca << 16;
1531 
1532 	err = mmc_send_cmd(mmc, &cmd, NULL);
1533 
1534 	if (err)
1535 		return err;
1536 
1537 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1538 	cmd.resp_type = MMC_RSP_R1;
1539 	cmd.cmdarg = 0;
1540 
1541 	timeout = 3;
1542 
1543 retry_scr:
1544 	data.dest = (char *)scr;
1545 	data.blocksize = 8;
1546 	data.blocks = 1;
1547 	data.flags = MMC_DATA_READ;
1548 
1549 	err = mmc_send_cmd(mmc, &cmd, &data);
1550 
1551 	if (err) {
1552 		if (timeout--)
1553 			goto retry_scr;
1554 
1555 		return err;
1556 	}
1557 
1558 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1559 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1560 
1561 	switch ((mmc->scr[0] >> 24) & 0xf) {
1562 	case 0:
1563 		mmc->version = SD_VERSION_1_0;
1564 		break;
1565 	case 1:
1566 		mmc->version = SD_VERSION_1_10;
1567 		break;
1568 	case 2:
1569 		mmc->version = SD_VERSION_2;
1570 		if ((mmc->scr[0] >> 15) & 0x1)
1571 			mmc->version = SD_VERSION_3;
1572 		break;
1573 	default:
1574 		mmc->version = SD_VERSION_1_0;
1575 		break;
1576 	}
1577 
1578 	if (mmc->scr[0] & SD_DATA_4BIT)
1579 		mmc->card_caps |= MMC_MODE_4BIT;
1580 
1581 	/* Version 1.0 doesn't support switching */
1582 	if (mmc->version == SD_VERSION_1_0)
1583 		return 0;
1584 
1585 	timeout = 4;
1586 	while (timeout--) {
1587 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1588 				(u8 *)switch_status);
1589 
1590 		if (err)
1591 			return err;
1592 
1593 		/* The high-speed function is busy.  Try again */
1594 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1595 			break;
1596 	}
1597 
1598 	/* If high-speed isn't supported, we return */
1599 	if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
1600 		return 0;
1601 
1602 	/*
1603 	 * If the host doesn't support SD_HIGHSPEED, do not switch card to
1604 	 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
1605 	 * This can avoid furthur problem when the card runs in different
1606 	 * mode between the host.
1607 	 */
1608 	if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
1609 		(mmc->cfg->host_caps & MMC_MODE_HS)))
1610 		return 0;
1611 
1612 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1613 
1614 	if (err)
1615 		return err;
1616 
1617 	if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
1618 		mmc->card_caps |= MMC_MODE_HS;
1619 
1620 	return 0;
1621 }
1622 
1623 static int sd_read_ssr(struct mmc *mmc)
1624 {
1625 	int err, i;
1626 	struct mmc_cmd cmd;
1627 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1628 	struct mmc_data data;
1629 	int timeout = 3;
1630 	unsigned int au, eo, et, es;
1631 
1632 	cmd.cmdidx = MMC_CMD_APP_CMD;
1633 	cmd.resp_type = MMC_RSP_R1;
1634 	cmd.cmdarg = mmc->rca << 16;
1635 
1636 	err = mmc_send_cmd(mmc, &cmd, NULL);
1637 	if (err)
1638 		return err;
1639 
1640 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1641 	cmd.resp_type = MMC_RSP_R1;
1642 	cmd.cmdarg = 0;
1643 
1644 retry_ssr:
1645 	data.dest = (char *)ssr;
1646 	data.blocksize = 64;
1647 	data.blocks = 1;
1648 	data.flags = MMC_DATA_READ;
1649 
1650 	err = mmc_send_cmd(mmc, &cmd, &data);
1651 	if (err) {
1652 		if (timeout--)
1653 			goto retry_ssr;
1654 
1655 		return err;
1656 	}
1657 
1658 	for (i = 0; i < 16; i++)
1659 		ssr[i] = be32_to_cpu(ssr[i]);
1660 
1661 	au = (ssr[2] >> 12) & 0xF;
1662 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1663 		mmc->ssr.au = sd_au_size[au];
1664 		es = (ssr[3] >> 24) & 0xFF;
1665 		es |= (ssr[2] & 0xFF) << 8;
1666 		et = (ssr[3] >> 18) & 0x3F;
1667 		if (es && et) {
1668 			eo = (ssr[3] >> 16) & 0x3;
1669 			mmc->ssr.erase_timeout = (et * 1000) / es;
1670 			mmc->ssr.erase_offset = eo * 1000;
1671 		}
1672 	} else {
1673 		debug("Invalid Allocation Unit Size.\n");
1674 	}
1675 
1676 	return 0;
1677 }
1678 
1679 /* frequency bases */
1680 /* divided by 10 to be nice to platforms without floating point */
1681 static const int fbase[] = {
1682 	10000,
1683 	100000,
1684 	1000000,
1685 	10000000,
1686 };
1687 
1688 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1689  * to platforms without floating point.
1690  */
1691 static const u8 multipliers[] = {
1692 	0,	/* reserved */
1693 	10,
1694 	12,
1695 	13,
1696 	15,
1697 	20,
1698 	25,
1699 	30,
1700 	35,
1701 	40,
1702 	45,
1703 	50,
1704 	55,
1705 	60,
1706 	70,
1707 	80,
1708 };
1709 
1710 #if !CONFIG_IS_ENABLED(DM_MMC)
1711 static void mmc_set_ios(struct mmc *mmc)
1712 {
1713 	if (mmc->cfg->ops->set_ios)
1714 		mmc->cfg->ops->set_ios(mmc);
1715 }
1716 
1717 static bool mmc_card_busy(struct mmc *mmc)
1718 {
1719 	if (!mmc->cfg->ops->card_busy)
1720 		return -ENOSYS;
1721 
1722 	return mmc->cfg->ops->card_busy(mmc);
1723 }
1724 
1725 static bool mmc_can_card_busy(struct mmc *)
1726 {
1727 	return !!mmc->cfg->ops->card_busy;
1728 }
1729 #endif
1730 
1731 static int mmc_startup(struct mmc *mmc)
1732 {
1733 	int err, i;
1734 	uint mult, freq, tran_speed;
1735 	u64 cmult, csize, capacity;
1736 	struct mmc_cmd cmd;
1737 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1738 	bool has_parts = false;
1739 	bool part_completed;
1740 	struct blk_desc *bdesc;
1741 
1742 #ifdef CONFIG_MMC_SPI_CRC_ON
1743 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1744 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1745 		cmd.resp_type = MMC_RSP_R1;
1746 		cmd.cmdarg = 1;
1747 		err = mmc_send_cmd(mmc, &cmd, NULL);
1748 
1749 		if (err)
1750 			return err;
1751 	}
1752 #endif
1753 #ifndef CONFIG_MMC_USE_PRE_CONFIG
1754 	/* Put the Card in Identify Mode */
1755 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1756 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1757 	cmd.resp_type = MMC_RSP_R2;
1758 	cmd.cmdarg = 0;
1759 
1760 	err = mmc_send_cmd(mmc, &cmd, NULL);
1761 
1762 	if (err)
1763 		return err;
1764 
1765 	memcpy(mmc->cid, cmd.response, 16);
1766 
1767 	/*
1768 	 * For MMC cards, set the Relative Address.
1769 	 * For SD cards, get the Relatvie Address.
1770 	 * This also puts the cards into Standby State
1771 	 */
1772 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1773 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1774 		cmd.cmdarg = mmc->rca << 16;
1775 		cmd.resp_type = MMC_RSP_R6;
1776 
1777 		err = mmc_send_cmd(mmc, &cmd, NULL);
1778 
1779 		if (err)
1780 			return err;
1781 
1782 		if (IS_SD(mmc))
1783 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1784 	}
1785 #endif
1786 	/* Get the Card-Specific Data */
1787 	cmd.cmdidx = MMC_CMD_SEND_CSD;
1788 	cmd.resp_type = MMC_RSP_R2;
1789 	cmd.cmdarg = mmc->rca << 16;
1790 
1791 	err = mmc_send_cmd(mmc, &cmd, NULL);
1792 
1793 	if (err)
1794 		return err;
1795 
1796 	mmc->csd[0] = cmd.response[0];
1797 	mmc->csd[1] = cmd.response[1];
1798 	mmc->csd[2] = cmd.response[2];
1799 	mmc->csd[3] = cmd.response[3];
1800 
1801 	if (mmc->version == MMC_VERSION_UNKNOWN) {
1802 		int version = (cmd.response[0] >> 26) & 0xf;
1803 
1804 		switch (version) {
1805 		case 0:
1806 			mmc->version = MMC_VERSION_1_2;
1807 			break;
1808 		case 1:
1809 			mmc->version = MMC_VERSION_1_4;
1810 			break;
1811 		case 2:
1812 			mmc->version = MMC_VERSION_2_2;
1813 			break;
1814 		case 3:
1815 			mmc->version = MMC_VERSION_3;
1816 			break;
1817 		case 4:
1818 			mmc->version = MMC_VERSION_4;
1819 			break;
1820 		default:
1821 			mmc->version = MMC_VERSION_1_2;
1822 			break;
1823 		}
1824 	}
1825 
1826 	/* divide frequency by 10, since the mults are 10x bigger */
1827 	freq = fbase[(cmd.response[0] & 0x7)];
1828 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1829 
1830 	tran_speed = freq * mult;
1831 
1832 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1833 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1834 
1835 	if (IS_SD(mmc))
1836 		mmc->write_bl_len = mmc->read_bl_len;
1837 	else
1838 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1839 
1840 	if (mmc->high_capacity) {
1841 		csize = (mmc->csd[1] & 0x3f) << 16
1842 			| (mmc->csd[2] & 0xffff0000) >> 16;
1843 		cmult = 8;
1844 	} else {
1845 		csize = (mmc->csd[1] & 0x3ff) << 2
1846 			| (mmc->csd[2] & 0xc0000000) >> 30;
1847 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
1848 	}
1849 
1850 	mmc->capacity_user = (csize + 1) << (cmult + 2);
1851 	mmc->capacity_user *= mmc->read_bl_len;
1852 	mmc->capacity_boot = 0;
1853 	mmc->capacity_rpmb = 0;
1854 	for (i = 0; i < 4; i++)
1855 		mmc->capacity_gp[i] = 0;
1856 
1857 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1858 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1859 
1860 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1861 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1862 
1863 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1864 		cmd.cmdidx = MMC_CMD_SET_DSR;
1865 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1866 		cmd.resp_type = MMC_RSP_NONE;
1867 		if (mmc_send_cmd(mmc, &cmd, NULL))
1868 			printf("MMC: SET_DSR failed\n");
1869 	}
1870 
1871 	/* Select the card, and put it into Transfer Mode */
1872 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1873 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
1874 		cmd.resp_type = MMC_RSP_R1;
1875 		cmd.cmdarg = mmc->rca << 16;
1876 		err = mmc_send_cmd(mmc, &cmd, NULL);
1877 
1878 		if (err)
1879 			return err;
1880 	}
1881 
1882 	/*
1883 	 * For SD, its erase group is always one sector
1884 	 */
1885 	mmc->erase_grp_size = 1;
1886 	mmc->part_config = MMCPART_NOAVAILABLE;
1887 	if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) {
1888 		/* select high speed to reduce initialization time */
1889 		mmc_select_hs(mmc);
1890 		mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
1891 
1892 		/* check  ext_csd version and capacity */
1893 		err = mmc_send_ext_csd(mmc, ext_csd);
1894 		if (err)
1895 			return err;
1896 		if (ext_csd[EXT_CSD_REV] >= 2) {
1897 			/*
1898 			 * According to the JEDEC Standard, the value of
1899 			 * ext_csd's capacity is valid if the value is more
1900 			 * than 2GB
1901 			 */
1902 			capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1903 					| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1904 					| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1905 					| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1906 			capacity *= MMC_MAX_BLOCK_LEN;
1907 			if ((capacity >> 20) > 2 * 1024)
1908 				mmc->capacity_user = capacity;
1909 		}
1910 
1911 		switch (ext_csd[EXT_CSD_REV]) {
1912 		case 1:
1913 			mmc->version = MMC_VERSION_4_1;
1914 			break;
1915 		case 2:
1916 			mmc->version = MMC_VERSION_4_2;
1917 			break;
1918 		case 3:
1919 			mmc->version = MMC_VERSION_4_3;
1920 			break;
1921 		case 5:
1922 			mmc->version = MMC_VERSION_4_41;
1923 			break;
1924 		case 6:
1925 			mmc->version = MMC_VERSION_4_5;
1926 			break;
1927 		case 7:
1928 			mmc->version = MMC_VERSION_5_0;
1929 			break;
1930 		case 8:
1931 			mmc->version = MMC_VERSION_5_1;
1932 			break;
1933 		}
1934 
1935 		/* The partition data may be non-zero but it is only
1936 		 * effective if PARTITION_SETTING_COMPLETED is set in
1937 		 * EXT_CSD, so ignore any data if this bit is not set,
1938 		 * except for enabling the high-capacity group size
1939 		 * definition (see below). */
1940 		part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1941 				    EXT_CSD_PARTITION_SETTING_COMPLETED);
1942 
1943 		/* store the partition info of emmc */
1944 		mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1945 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1946 		    ext_csd[EXT_CSD_BOOT_MULT])
1947 			mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1948 		if (part_completed &&
1949 		    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1950 			mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1951 		if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN)
1952 			mmc->esr.mmc_can_trim = 1;
1953 
1954 		mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1955 
1956 		mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1957 
1958 		for (i = 0; i < 4; i++) {
1959 			int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1960 			uint mult = (ext_csd[idx + 2] << 16) +
1961 				(ext_csd[idx + 1] << 8) + ext_csd[idx];
1962 			if (mult)
1963 				has_parts = true;
1964 			if (!part_completed)
1965 				continue;
1966 			mmc->capacity_gp[i] = mult;
1967 			mmc->capacity_gp[i] *=
1968 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1969 			mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1970 			mmc->capacity_gp[i] <<= 19;
1971 		}
1972 
1973 		if (part_completed) {
1974 			mmc->enh_user_size =
1975 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) +
1976 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) +
1977 				ext_csd[EXT_CSD_ENH_SIZE_MULT];
1978 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1979 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1980 			mmc->enh_user_size <<= 19;
1981 			mmc->enh_user_start =
1982 				(ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) +
1983 				(ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) +
1984 				(ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) +
1985 				ext_csd[EXT_CSD_ENH_START_ADDR];
1986 			if (mmc->high_capacity)
1987 				mmc->enh_user_start <<= 9;
1988 		}
1989 
1990 		/*
1991 		 * Host needs to enable ERASE_GRP_DEF bit if device is
1992 		 * partitioned. This bit will be lost every time after a reset
1993 		 * or power off. This will affect erase size.
1994 		 */
1995 		if (part_completed)
1996 			has_parts = true;
1997 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1998 		    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1999 			has_parts = true;
2000 		if (has_parts) {
2001 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2002 				EXT_CSD_ERASE_GROUP_DEF, 1);
2003 
2004 			if (err)
2005 				return err;
2006 			else
2007 				ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2008 		}
2009 
2010 		if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2011 			/* Read out group size from ext_csd */
2012 			mmc->erase_grp_size =
2013 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2014 			/*
2015 			 * if high capacity and partition setting completed
2016 			 * SEC_COUNT is valid even if it is smaller than 2 GiB
2017 			 * JEDEC Standard JESD84-B45, 6.2.4
2018 			 */
2019 			if (mmc->high_capacity && part_completed) {
2020 				capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2021 					(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2022 					(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2023 					(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2024 				capacity *= MMC_MAX_BLOCK_LEN;
2025 				mmc->capacity_user = capacity;
2026 			}
2027 		} else {
2028 			/* Calculate the group size from the csd value. */
2029 			int erase_gsz, erase_gmul;
2030 			erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2031 			erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2032 			mmc->erase_grp_size = (erase_gsz + 1)
2033 				* (erase_gmul + 1);
2034 		}
2035 
2036 		mmc->hc_wp_grp_size = 1024
2037 			* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2038 			* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2039 
2040 		mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2041 
2042 		mmc->raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH];
2043 	}
2044 
2045 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2046 	if (err)
2047 		return err;
2048 
2049 	if (IS_SD(mmc))
2050 		err = sd_change_freq(mmc);
2051 	else
2052 		err = mmc_change_freq(mmc);
2053 
2054 	if (err)
2055 		return err;
2056 
2057 	/* Restrict card's capabilities by what the host can do */
2058 	mmc->card_caps &= mmc->cfg->host_caps;
2059 
2060 	if (IS_SD(mmc)) {
2061 		if (mmc->card_caps & MMC_MODE_4BIT) {
2062 			cmd.cmdidx = MMC_CMD_APP_CMD;
2063 			cmd.resp_type = MMC_RSP_R1;
2064 			cmd.cmdarg = mmc->rca << 16;
2065 
2066 			err = mmc_send_cmd(mmc, &cmd, NULL);
2067 			if (err)
2068 				return err;
2069 
2070 			cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
2071 			cmd.resp_type = MMC_RSP_R1;
2072 			cmd.cmdarg = 2;
2073 			err = mmc_send_cmd(mmc, &cmd, NULL);
2074 			if (err)
2075 				return err;
2076 
2077 			mmc_set_bus_width(mmc, 4);
2078 		}
2079 
2080 		err = sd_read_ssr(mmc);
2081 		if (err)
2082 			return err;
2083 
2084 		if (mmc->card_caps & MMC_MODE_HS)
2085 			tran_speed = MMC_HIGH_52_MAX_DTR;
2086 		else
2087 			tran_speed = MMC_HIGH_26_MAX_DTR;
2088 
2089 		mmc_set_clock(mmc, tran_speed);
2090 	}
2091 
2092 	/* Fix the block length for DDR mode */
2093 	if (mmc_card_ddr(mmc)) {
2094 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2095 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2096 	}
2097 
2098 	/* fill in device description */
2099 	bdesc = mmc_get_blk_desc(mmc);
2100 	bdesc->lun = 0;
2101 	bdesc->hwpart = 0;
2102 	bdesc->type = 0;
2103 	bdesc->blksz = mmc->read_bl_len;
2104 	bdesc->log2blksz = LOG2(bdesc->blksz);
2105 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2106 #if !defined(CONFIG_SPL_BUILD) || \
2107 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2108 		!defined(CONFIG_USE_TINY_PRINTF))
2109 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2110 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2111 		(mmc->cid[3] >> 16) & 0xffff);
2112 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2113 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2114 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2115 		(mmc->cid[2] >> 24) & 0xff);
2116 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2117 		(mmc->cid[2] >> 16) & 0xf);
2118 #else
2119 	bdesc->vendor[0] = 0;
2120 	bdesc->product[0] = 0;
2121 	bdesc->revision[0] = 0;
2122 #endif
2123 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2124 	part_init(bdesc);
2125 #endif
2126 
2127 	return 0;
2128 }
2129 
2130 #ifndef CONFIG_MMC_USE_PRE_CONFIG
2131 static int mmc_send_if_cond(struct mmc *mmc)
2132 {
2133 	struct mmc_cmd cmd;
2134 	int err;
2135 
2136 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2137 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2138 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2139 	cmd.resp_type = MMC_RSP_R7;
2140 
2141 	err = mmc_send_cmd(mmc, &cmd, NULL);
2142 
2143 	if (err)
2144 		return err;
2145 
2146 	if ((cmd.response[0] & 0xff) != 0xaa)
2147 		return -EOPNOTSUPP;
2148 	else
2149 		mmc->version = SD_VERSION_2;
2150 
2151 	return 0;
2152 }
2153 #endif
2154 
2155 #if !CONFIG_IS_ENABLED(DM_MMC)
2156 /* board-specific MMC power initializations. */
2157 __weak void board_mmc_power_init(void)
2158 {
2159 }
2160 #endif
2161 
2162 #ifndef CONFIG_MMC_USE_PRE_CONFIG
2163 static int mmc_power_init(struct mmc *mmc)
2164 {
2165 #if CONFIG_IS_ENABLED(DM_MMC)
2166 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD)
2167 	struct udevice *vmmc_supply;
2168 	int ret;
2169 
2170 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2171 					  &vmmc_supply);
2172 	if (ret) {
2173 		debug("%s: No vmmc supply\n", mmc->dev->name);
2174 		return 0;
2175 	}
2176 
2177 	ret = regulator_set_enable(vmmc_supply, true);
2178 	if (ret) {
2179 		puts("Error enabling VMMC supply\n");
2180 		return ret;
2181 	}
2182 #endif
2183 #else /* !CONFIG_DM_MMC */
2184 	/*
2185 	 * Driver model should use a regulator, as above, rather than calling
2186 	 * out to board code.
2187 	 */
2188 	board_mmc_power_init();
2189 #endif
2190 	return 0;
2191 }
2192 #endif
2193 #ifdef CONFIG_MMC_USE_PRE_CONFIG
2194 static int mmc_select_card(struct mmc *mmc, int n)
2195 {
2196 	struct mmc_cmd cmd;
2197 	int err = 0;
2198 
2199 	memset(&cmd, 0, sizeof(struct mmc_cmd));
2200 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2201 		mmc->rca = n;
2202 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2203 		cmd.resp_type = MMC_RSP_R1;
2204 		cmd.cmdarg = mmc->rca << 16;
2205 		err = mmc_send_cmd(mmc, &cmd, NULL);
2206 	}
2207 
2208 	return err;
2209 }
2210 
2211 int mmc_start_init(struct mmc *mmc)
2212 {
2213 	/*
2214 	 * We use the MMC config set by the bootrom.
2215 	 * So it is no need to reset the eMMC device.
2216 	 */
2217 	mmc_set_bus_width(mmc, 8);
2218 	mmc_set_clock(mmc, 1);
2219 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2220 	/* Send cmd7 to return stand-by state*/
2221 	mmc_select_card(mmc, 0);
2222 	mmc->version = MMC_VERSION_UNKNOWN;
2223 	mmc->high_capacity = 1;
2224 	/*
2225 	 * The RCA is set to 2 by rockchip bootrom, use the default
2226 	 * value here.
2227 	 */
2228 #ifdef CONFIG_ARCH_ROCKCHIP
2229 	mmc->rca = 2;
2230 #else
2231 	mmc->rca = 1;
2232 #endif
2233 	return 0;
2234 }
2235 #else
2236 int mmc_start_init(struct mmc *mmc)
2237 {
2238 	bool no_card;
2239 	int err;
2240 
2241 	/* we pretend there's no card when init is NULL */
2242 	no_card = mmc_getcd(mmc) == 0;
2243 #if !CONFIG_IS_ENABLED(DM_MMC)
2244 	no_card = no_card || (mmc->cfg->ops->init == NULL);
2245 #endif
2246 	if (no_card) {
2247 		mmc->has_init = 0;
2248 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2249 		printf("MMC: no card present\n");
2250 #endif
2251 		return -ENOMEDIUM;
2252 	}
2253 
2254 	if (mmc->has_init)
2255 		return 0;
2256 
2257 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2258 	mmc_adapter_card_type_ident();
2259 #endif
2260 	err = mmc_power_init(mmc);
2261 	if (err)
2262 		return err;
2263 
2264 #if CONFIG_IS_ENABLED(DM_MMC)
2265 	/* The device has already been probed ready for use */
2266 #else
2267 	/* made sure it's not NULL earlier */
2268 	err = mmc->cfg->ops->init(mmc);
2269 	if (err)
2270 		return err;
2271 #endif
2272 	mmc_set_bus_width(mmc, 1);
2273 	mmc_set_clock(mmc, 1);
2274 	mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2275 
2276 	/* Reset the Card */
2277 	err = mmc_go_idle(mmc);
2278 
2279 	if (err)
2280 		return err;
2281 
2282 	/* The internal partition reset to user partition(0) at every CMD0*/
2283 	mmc_get_blk_desc(mmc)->hwpart = 0;
2284 
2285 	/* Test for SD version 2 */
2286 	err = mmc_send_if_cond(mmc);
2287 
2288 	/* Now try to get the SD card's operating condition */
2289 	err = sd_send_op_cond(mmc);
2290 
2291 	/* If the command timed out, we check for an MMC card */
2292 	if (err == -ETIMEDOUT) {
2293 		err = mmc_send_op_cond(mmc);
2294 
2295 		if (err) {
2296 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2297 			printf("Card did not respond to voltage select!\n");
2298 #endif
2299 			return -EOPNOTSUPP;
2300 		}
2301 	}
2302 
2303 	if (!err)
2304 		mmc->init_in_progress = 1;
2305 
2306 	return err;
2307 }
2308 #endif
2309 
2310 static int mmc_complete_init(struct mmc *mmc)
2311 {
2312 	int err = 0;
2313 
2314 	mmc->init_in_progress = 0;
2315 	if (mmc->op_cond_pending)
2316 		err = mmc_complete_op_cond(mmc);
2317 
2318 	if (!err)
2319 		err = mmc_startup(mmc);
2320 	if (err)
2321 		mmc->has_init = 0;
2322 	else
2323 		mmc->has_init = 1;
2324 	return err;
2325 }
2326 
2327 int mmc_init(struct mmc *mmc)
2328 {
2329 	int err = 0;
2330 	__maybe_unused unsigned start;
2331 #if CONFIG_IS_ENABLED(DM_MMC)
2332 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2333 
2334 	upriv->mmc = mmc;
2335 #endif
2336 	if (mmc->has_init)
2337 		return 0;
2338 
2339 	start = get_timer(0);
2340 
2341 	if (!mmc->init_in_progress)
2342 		err = mmc_start_init(mmc);
2343 
2344 	if (!err)
2345 		err = mmc_complete_init(mmc);
2346 	if (err)
2347 		printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2348 
2349 	return err;
2350 }
2351 
2352 int mmc_set_dsr(struct mmc *mmc, u16 val)
2353 {
2354 	mmc->dsr = val;
2355 	return 0;
2356 }
2357 
2358 /* CPU-specific MMC initializations */
2359 __weak int cpu_mmc_init(bd_t *bis)
2360 {
2361 	return -1;
2362 }
2363 
2364 /* board-specific MMC initializations. */
2365 __weak int board_mmc_init(bd_t *bis)
2366 {
2367 	return -1;
2368 }
2369 
2370 void mmc_set_preinit(struct mmc *mmc, int preinit)
2371 {
2372 	mmc->preinit = preinit;
2373 }
2374 
2375 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2376 static int mmc_probe(bd_t *bis)
2377 {
2378 	return 0;
2379 }
2380 #elif CONFIG_IS_ENABLED(DM_MMC)
2381 static int mmc_probe(bd_t *bis)
2382 {
2383 	int ret, i;
2384 	struct uclass *uc;
2385 	struct udevice *dev;
2386 
2387 	ret = uclass_get(UCLASS_MMC, &uc);
2388 	if (ret)
2389 		return ret;
2390 
2391 	/*
2392 	 * Try to add them in sequence order. Really with driver model we
2393 	 * should allow holes, but the current MMC list does not allow that.
2394 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2395 	 */
2396 	for (i = 0; ; i++) {
2397 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2398 		if (ret == -ENODEV)
2399 			break;
2400 	}
2401 	uclass_foreach_dev(dev, uc) {
2402 		ret = device_probe(dev);
2403 		if (ret)
2404 			printf("%s - probe failed: %d\n", dev->name, ret);
2405 	}
2406 
2407 	return 0;
2408 }
2409 #else
2410 static int mmc_probe(bd_t *bis)
2411 {
2412 	if (board_mmc_init(bis) < 0)
2413 		cpu_mmc_init(bis);
2414 
2415 	return 0;
2416 }
2417 #endif
2418 
2419 int mmc_initialize(bd_t *bis)
2420 {
2421 	static int initialized = 0;
2422 	int ret;
2423 	if (initialized)	/* Avoid initializing mmc multiple times */
2424 		return 0;
2425 	initialized = 1;
2426 
2427 #if !CONFIG_IS_ENABLED(BLK)
2428 #if !CONFIG_IS_ENABLED(MMC_TINY)
2429 	mmc_list_init();
2430 #endif
2431 #endif
2432 	ret = mmc_probe(bis);
2433 	if (ret)
2434 		return ret;
2435 
2436 #ifndef CONFIG_SPL_BUILD
2437 	print_mmc_devices(',');
2438 #endif
2439 
2440 	mmc_do_preinit();
2441 	return 0;
2442 }
2443 
2444 #ifdef CONFIG_CMD_BKOPS_ENABLE
2445 int mmc_set_bkops_enable(struct mmc *mmc)
2446 {
2447 	int err;
2448 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2449 
2450 	err = mmc_send_ext_csd(mmc, ext_csd);
2451 	if (err) {
2452 		puts("Could not get ext_csd register values\n");
2453 		return err;
2454 	}
2455 
2456 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2457 		puts("Background operations not supported on device\n");
2458 		return -EMEDIUMTYPE;
2459 	}
2460 
2461 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2462 		puts("Background operations already enabled\n");
2463 		return 0;
2464 	}
2465 
2466 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2467 	if (err) {
2468 		puts("Failed to enable manual background operations\n");
2469 		return err;
2470 	}
2471 
2472 	puts("Enabled manual background operations\n");
2473 
2474 	return 0;
2475 }
2476 #endif
2477