1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 static char mmc_ext_csd[512];
34
35 #if CONFIG_IS_ENABLED(MMC_TINY)
36 static struct mmc mmc_static;
find_mmc_device(int dev_num)37 struct mmc *find_mmc_device(int dev_num)
38 {
39 return &mmc_static;
40 }
41
mmc_do_preinit(void)42 void mmc_do_preinit(void)
43 {
44 struct mmc *m = &mmc_static;
45 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
46 mmc_set_preinit(m, 1);
47 #endif
48 if (m->preinit)
49 mmc_start_init(m);
50 }
51
mmc_get_blk_desc(struct mmc * mmc)52 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
53 {
54 return &mmc->block_dev;
55 }
56 #endif
57
58 #if !CONFIG_IS_ENABLED(DM_MMC)
board_mmc_getwp(struct mmc * mmc)59 __weak int board_mmc_getwp(struct mmc *mmc)
60 {
61 return -1;
62 }
63
mmc_getwp(struct mmc * mmc)64 int mmc_getwp(struct mmc *mmc)
65 {
66 int wp;
67
68 wp = board_mmc_getwp(mmc);
69
70 if (wp < 0) {
71 if (mmc->cfg->ops->getwp)
72 wp = mmc->cfg->ops->getwp(mmc);
73 else
74 wp = 0;
75 }
76
77 return wp;
78 }
79
board_mmc_getcd(struct mmc * mmc)80 __weak int board_mmc_getcd(struct mmc *mmc)
81 {
82 return -1;
83 }
84 #endif
85
86 #ifdef CONFIG_MMC_TRACE
mmmc_trace_before_send(struct mmc * mmc,struct mmc_cmd * cmd)87 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
88 {
89 printf("CMD_SEND:%d\n", cmd->cmdidx);
90 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
91 }
92
mmmc_trace_after_send(struct mmc * mmc,struct mmc_cmd * cmd,int ret)93 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
94 {
95 int i;
96 u8 *ptr;
97
98 if (ret) {
99 printf("\t\tRET\t\t\t %d\n", ret);
100 } else {
101 switch (cmd->resp_type) {
102 case MMC_RSP_NONE:
103 printf("\t\tMMC_RSP_NONE\n");
104 break;
105 case MMC_RSP_R1:
106 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
107 cmd->response[0]);
108 break;
109 case MMC_RSP_R1b:
110 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
111 cmd->response[0]);
112 break;
113 case MMC_RSP_R2:
114 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
115 cmd->response[0]);
116 printf("\t\t \t\t 0x%08X \n",
117 cmd->response[1]);
118 printf("\t\t \t\t 0x%08X \n",
119 cmd->response[2]);
120 printf("\t\t \t\t 0x%08X \n",
121 cmd->response[3]);
122 printf("\n");
123 printf("\t\t\t\t\tDUMPING DATA\n");
124 for (i = 0; i < 4; i++) {
125 int j;
126 printf("\t\t\t\t\t%03d - ", i*4);
127 ptr = (u8 *)&cmd->response[i];
128 ptr += 3;
129 for (j = 0; j < 4; j++)
130 printf("%02X ", *ptr--);
131 printf("\n");
132 }
133 break;
134 case MMC_RSP_R3:
135 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
136 cmd->response[0]);
137 break;
138 default:
139 printf("\t\tERROR MMC rsp not supported\n");
140 break;
141 }
142 }
143 }
144
mmc_trace_state(struct mmc * mmc,struct mmc_cmd * cmd)145 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
146 {
147 int status;
148
149 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
150 printf("CURR STATE:%d\n", status);
151 }
152 #endif
153
154 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_send_cmd(struct mmc * mmc,struct mmc_cmd * cmd,struct mmc_data * data)155 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
156 {
157 int ret;
158
159 mmmc_trace_before_send(mmc, cmd);
160 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
161 mmmc_trace_after_send(mmc, cmd, ret);
162
163 return ret;
164 }
165 #endif
166
mmc_send_status(struct mmc * mmc,int timeout)167 int mmc_send_status(struct mmc *mmc, int timeout)
168 {
169 struct mmc_cmd cmd;
170 int err, retries = 5;
171
172 cmd.cmdidx = MMC_CMD_SEND_STATUS;
173 cmd.resp_type = MMC_RSP_R1;
174 if (!mmc_host_is_spi(mmc))
175 cmd.cmdarg = mmc->rca << 16;
176
177 while (1) {
178 err = mmc_send_cmd(mmc, &cmd, NULL);
179 if (!err) {
180 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
181 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
182 MMC_STATE_PRG)
183 break;
184 else if (cmd.response[0] & MMC_STATUS_MASK) {
185 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
186 printf("Status Error: 0x%08X\n",
187 cmd.response[0]);
188 #endif
189 return -ECOMM;
190 }
191 } else if (--retries < 0)
192 return err;
193
194 if (timeout-- <= 0)
195 break;
196
197 udelay(1000);
198 }
199
200 mmc_trace_state(mmc, &cmd);
201 if (timeout <= 0) {
202 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
203 printf("Timeout waiting card ready\n");
204 #endif
205 return -ETIMEDOUT;
206 }
207
208 return 0;
209 }
210
mmc_set_blocklen(struct mmc * mmc,int len)211 int mmc_set_blocklen(struct mmc *mmc, int len)
212 {
213 struct mmc_cmd cmd;
214
215 if (mmc_card_ddr(mmc))
216 return 0;
217
218 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
219 cmd.resp_type = MMC_RSP_R1;
220 cmd.cmdarg = len;
221
222 return mmc_send_cmd(mmc, &cmd, NULL);
223 }
224
mmc_set_blockcount(struct mmc * mmc,unsigned int blkcnt,bool is_rel_write)225 int mmc_set_blockcount(struct mmc *mmc, unsigned int blkcnt, bool is_rel_write)
226 {
227 struct mmc_cmd cmd = {0};
228
229 cmd.cmdidx = MMC_CMD_SET_BLOCK_COUNT;
230 cmd.cmdarg = blkcnt & 0x0000FFFF;
231 if (is_rel_write)
232 cmd.cmdarg |= 1 << 31;
233 cmd.resp_type = MMC_RSP_R1;
234
235 return mmc_send_cmd(mmc, &cmd, NULL);
236 }
237
mmc_read_blocks(struct mmc * mmc,void * dst,lbaint_t start,lbaint_t blkcnt)238 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
239 lbaint_t blkcnt)
240 {
241 struct mmc_cmd cmd;
242 struct mmc_data data;
243
244 if (blkcnt > 1)
245 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
246 else
247 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
248
249 if (mmc->high_capacity)
250 cmd.cmdarg = start;
251 else
252 cmd.cmdarg = start * mmc->read_bl_len;
253
254 cmd.resp_type = MMC_RSP_R1;
255
256 data.dest = dst;
257 data.blocks = blkcnt;
258 data.blocksize = mmc->read_bl_len;
259 data.flags = MMC_DATA_READ;
260
261 if (mmc_send_cmd(mmc, &cmd, &data))
262 return 0;
263
264 if (blkcnt > 1) {
265 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
266 cmd.cmdarg = 0;
267 cmd.resp_type = MMC_RSP_R1b;
268 if (mmc_send_cmd(mmc, &cmd, NULL)) {
269 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
270 printf("mmc fail to send stop cmd\n");
271 #endif
272 return 0;
273 }
274 }
275
276 return blkcnt;
277 }
278
279 #ifdef CONFIG_SPL_BLK_READ_PREPARE
mmc_read_blocks_prepare(struct mmc * mmc,void * dst,lbaint_t start,lbaint_t blkcnt)280 static int mmc_read_blocks_prepare(struct mmc *mmc, void *dst, lbaint_t start,
281 lbaint_t blkcnt)
282 {
283 struct mmc_cmd cmd;
284 struct mmc_data data;
285
286 if (blkcnt > 1) {
287 mmc_set_blockcount(mmc, blkcnt, false);
288 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
289 } else {
290 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
291 }
292
293 if (mmc->high_capacity)
294 cmd.cmdarg = start;
295 else
296 cmd.cmdarg = start * mmc->read_bl_len;
297
298 cmd.resp_type = MMC_RSP_R1;
299
300 data.dest = dst;
301 data.blocks = blkcnt;
302 data.blocksize = mmc->read_bl_len;
303 data.flags = MMC_DATA_READ;
304
305 if (mmc_send_cmd_prepare(mmc, &cmd, &data))
306 return 0;
307
308 return blkcnt;
309 }
310 #endif
311
312 #ifdef CONFIG_SPL_BLK_READ_PREPARE
313 #if CONFIG_IS_ENABLED(BLK)
mmc_bread_prepare(struct udevice * dev,lbaint_t start,lbaint_t blkcnt,void * dst)314 ulong mmc_bread_prepare(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
315 #else
316 ulong mmc_bread_prepare(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
317 void *dst)
318 #endif
319 {
320 #if CONFIG_IS_ENABLED(BLK)
321 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
322 #endif
323 int dev_num = block_dev->devnum;
324 int timeout = 0;
325 int err;
326
327 if (blkcnt == 0)
328 return 0;
329
330 struct mmc *mmc = find_mmc_device(dev_num);
331
332 if (!mmc)
333 return 0;
334
335 if (CONFIG_IS_ENABLED(MMC_TINY))
336 err = mmc_switch_part(mmc, block_dev->hwpart);
337 else
338 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
339
340 if (err < 0)
341 return 0;
342
343 if ((start + blkcnt) > block_dev->lba) {
344 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
345 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
346 start + blkcnt, block_dev->lba);
347 #endif
348 return 0;
349 }
350
351 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
352 debug("%s: Failed to set blocklen\n", __func__);
353 return 0;
354 }
355
356 if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) {
357 debug("%s: Failed to read blocks\n", __func__);
358 re_init_retry:
359 timeout++;
360 /*
361 * Try re-init seven times.
362 */
363 if (timeout > 7) {
364 printf("Re-init retry timeout\n");
365 return 0;
366 }
367
368 mmc->has_init = 0;
369 if (mmc_init(mmc))
370 return 0;
371
372 if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) {
373 printf("%s: Re-init mmc_read_blocks_prepare error\n",
374 __func__);
375 goto re_init_retry;
376 }
377 }
378
379 return blkcnt;
380 }
381 #endif
382
383 #if CONFIG_IS_ENABLED(BLK)
mmc_bread(struct udevice * dev,lbaint_t start,lbaint_t blkcnt,void * dst)384 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
385 #else
386 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
387 void *dst)
388 #endif
389 {
390 #if CONFIG_IS_ENABLED(BLK)
391 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
392 #endif
393 int dev_num = block_dev->devnum;
394 int err;
395 lbaint_t cur, blocks_todo = blkcnt;
396
397 #ifdef CONFIG_SPL_BLK_READ_PREPARE
398 if (block_dev->op_flag == BLK_PRE_RW)
399 #if CONFIG_IS_ENABLED(BLK)
400 return mmc_bread_prepare(dev, start, blkcnt, dst);
401 #else
402 return mmc_bread_prepare(block_dev, start, blkcnt, dst);
403 #endif
404 #endif
405 if (blkcnt == 0)
406 return 0;
407
408 struct mmc *mmc = find_mmc_device(dev_num);
409 if (!mmc)
410 return 0;
411
412 if (CONFIG_IS_ENABLED(MMC_TINY))
413 err = mmc_switch_part(mmc, block_dev->hwpart);
414 else
415 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
416
417 if (err < 0)
418 return 0;
419
420 if ((start + blkcnt) > block_dev->lba) {
421 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
422 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
423 start + blkcnt, block_dev->lba);
424 #endif
425 return 0;
426 }
427
428 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
429 debug("%s: Failed to set blocklen\n", __func__);
430 return 0;
431 }
432
433 do {
434 cur = (blocks_todo > mmc->cfg->b_max) ?
435 mmc->cfg->b_max : blocks_todo;
436 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
437 debug("%s: Failed to read blocks\n", __func__);
438 int timeout = 0;
439 re_init_retry:
440 timeout++;
441 /*
442 * Try re-init seven times.
443 */
444 if (timeout > 7) {
445 printf("Re-init retry timeout\n");
446 return 0;
447 }
448
449 mmc->has_init = 0;
450 if (mmc_init(mmc))
451 return 0;
452
453 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
454 printf("%s: Re-init mmc_read_blocks error\n",
455 __func__);
456 goto re_init_retry;
457 }
458 }
459 blocks_todo -= cur;
460 start += cur;
461 dst += cur * mmc->read_bl_len;
462 } while (blocks_todo > 0);
463
464 return blkcnt;
465 }
466
mmc_set_clock(struct mmc * mmc,uint clock)467 void mmc_set_clock(struct mmc *mmc, uint clock)
468 {
469 if (clock > mmc->cfg->f_max)
470 clock = mmc->cfg->f_max;
471
472 if (clock < mmc->cfg->f_min)
473 clock = mmc->cfg->f_min;
474
475 mmc->clock = clock;
476
477 mmc_set_ios(mmc);
478 }
479
mmc_set_bus_width(struct mmc * mmc,uint width)480 static void mmc_set_bus_width(struct mmc *mmc, uint width)
481 {
482 mmc->bus_width = width;
483
484 mmc_set_ios(mmc);
485 }
486
mmc_set_timing(struct mmc * mmc,uint timing)487 static void mmc_set_timing(struct mmc *mmc, uint timing)
488 {
489 mmc->timing = timing;
490 mmc_set_ios(mmc);
491 }
492
mmc_go_idle(struct mmc * mmc)493 static int mmc_go_idle(struct mmc *mmc)
494 {
495 struct mmc_cmd cmd;
496 int err;
497
498 udelay(1000);
499
500 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
501 cmd.cmdarg = 0;
502 cmd.resp_type = MMC_RSP_NONE;
503
504 err = mmc_send_cmd(mmc, &cmd, NULL);
505
506 if (err)
507 return err;
508
509 udelay(2000);
510
511 return 0;
512 }
513
514 #ifndef CONFIG_MMC_USE_PRE_CONFIG
sd_send_op_cond(struct mmc * mmc)515 static int sd_send_op_cond(struct mmc *mmc)
516 {
517 int timeout = 1000;
518 int err;
519 struct mmc_cmd cmd;
520
521 while (1) {
522 cmd.cmdidx = MMC_CMD_APP_CMD;
523 cmd.resp_type = MMC_RSP_R1;
524 cmd.cmdarg = 0;
525
526 err = mmc_send_cmd(mmc, &cmd, NULL);
527
528 if (err)
529 return err;
530
531 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
532 cmd.resp_type = MMC_RSP_R3;
533
534 /*
535 * Most cards do not answer if some reserved bits
536 * in the ocr are set. However, Some controller
537 * can set bit 7 (reserved for low voltages), but
538 * how to manage low voltages SD card is not yet
539 * specified.
540 */
541 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
542 (mmc->cfg->voltages & 0xff8000);
543
544 if (mmc->version == SD_VERSION_2)
545 cmd.cmdarg |= OCR_HCS;
546
547 err = mmc_send_cmd(mmc, &cmd, NULL);
548
549 if (err)
550 return err;
551
552 if (cmd.response[0] & OCR_BUSY)
553 break;
554
555 if (timeout-- <= 0)
556 return -EOPNOTSUPP;
557
558 udelay(1000);
559 }
560
561 if (mmc->version != SD_VERSION_2)
562 mmc->version = SD_VERSION_1_0;
563
564 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
565 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
566 cmd.resp_type = MMC_RSP_R3;
567 cmd.cmdarg = 0;
568
569 err = mmc_send_cmd(mmc, &cmd, NULL);
570
571 if (err)
572 return err;
573 }
574
575 mmc->ocr = cmd.response[0];
576
577 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
578 mmc->rca = 0;
579
580 return 0;
581 }
582 #endif
583
mmc_send_op_cond_iter(struct mmc * mmc,int use_arg)584 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
585 {
586 struct mmc_cmd cmd;
587 int err;
588
589 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
590 cmd.resp_type = MMC_RSP_R3;
591 cmd.cmdarg = 0;
592 if (use_arg && !mmc_host_is_spi(mmc))
593 cmd.cmdarg = OCR_HCS |
594 (mmc->cfg->voltages &
595 (mmc->ocr & OCR_VOLTAGE_MASK)) |
596 (mmc->ocr & OCR_ACCESS_MODE);
597
598 err = mmc_send_cmd(mmc, &cmd, NULL);
599 if (err)
600 return err;
601 mmc->ocr = cmd.response[0];
602 return 0;
603 }
604
605 #ifndef CONFIG_MMC_USE_PRE_CONFIG
mmc_send_op_cond(struct mmc * mmc)606 static int mmc_send_op_cond(struct mmc *mmc)
607 {
608 int err, i;
609
610 /* Some cards seem to need this */
611 mmc_go_idle(mmc);
612
613 /* Asking to the card its capabilities */
614 for (i = 0; i < 2; i++) {
615 err = mmc_send_op_cond_iter(mmc, i != 0);
616 if (err)
617 return err;
618
619 /* exit if not busy (flag seems to be inverted) */
620 if (mmc->ocr & OCR_BUSY)
621 break;
622 }
623 mmc->op_cond_pending = 1;
624 return 0;
625 }
626 #endif
mmc_complete_op_cond(struct mmc * mmc)627 static int mmc_complete_op_cond(struct mmc *mmc)
628 {
629 struct mmc_cmd cmd;
630 int timeout = 1000;
631 uint start;
632 int err;
633
634 mmc->op_cond_pending = 0;
635 if (!(mmc->ocr & OCR_BUSY)) {
636 /* Some cards seem to need this */
637 mmc_go_idle(mmc);
638
639 start = get_timer(0);
640 while (1) {
641 err = mmc_send_op_cond_iter(mmc, 1);
642 if (err)
643 return err;
644 if (mmc->ocr & OCR_BUSY)
645 break;
646 if (get_timer(start) > timeout)
647 return -EOPNOTSUPP;
648 udelay(100);
649 }
650 }
651
652 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
653 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
654 cmd.resp_type = MMC_RSP_R3;
655 cmd.cmdarg = 0;
656
657 err = mmc_send_cmd(mmc, &cmd, NULL);
658
659 if (err)
660 return err;
661
662 mmc->ocr = cmd.response[0];
663 }
664
665 mmc->version = MMC_VERSION_UNKNOWN;
666
667 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
668 mmc->rca = 1;
669
670 return 0;
671 }
672
673
mmc_send_ext_csd(struct mmc * mmc,u8 * ext_csd)674 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
675 {
676 struct mmc_cmd cmd;
677 struct mmc_data data;
678 int err;
679
680 #ifdef CONFIG_MMC_USE_PRE_CONFIG
681 static int initialized;
682 if (initialized) {
683 memcpy(ext_csd, mmc_ext_csd, 512);
684 return 0;
685 }
686
687 initialized = 1;
688 #endif
689 /* Get the Card Status Register */
690 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
691 cmd.resp_type = MMC_RSP_R1;
692 cmd.cmdarg = 0;
693
694 data.dest = (char *)ext_csd;
695 data.blocks = 1;
696 data.blocksize = MMC_MAX_BLOCK_LEN;
697 data.flags = MMC_DATA_READ;
698
699 err = mmc_send_cmd(mmc, &cmd, &data);
700 memcpy(mmc_ext_csd, ext_csd, 512);
701 #if defined(CONFIG_MMC_USE_PRE_CONFIG) && defined(CONFIG_SPL_BUILD)
702 char *mmc_ecsd_base = NULL;
703 ulong mmc_ecsd;
704
705 mmc_ecsd = dev_read_u32_default(mmc->dev, "mmc-ecsd", 0);
706 mmc_ecsd_base = (char *)mmc_ecsd;
707 if (mmc_ecsd_base) {
708 memcpy(mmc_ecsd_base, ext_csd, 512);
709 *(unsigned int *)(mmc_ecsd_base + 512) = 0x55aa55aa;
710 }
711 #endif
712 return err;
713 }
714
mmc_poll_for_busy(struct mmc * mmc,u8 send_status)715 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status)
716 {
717 struct mmc_cmd cmd;
718 u8 busy = true;
719 uint start;
720 int ret;
721 int timeout = 1000;
722
723 cmd.cmdidx = MMC_CMD_SEND_STATUS;
724 cmd.resp_type = MMC_RSP_R1;
725 cmd.cmdarg = mmc->rca << 16;
726
727 start = get_timer(0);
728
729 if (!send_status && !mmc_can_card_busy(mmc)) {
730 mdelay(timeout);
731 return 0;
732 }
733
734 do {
735 if (!send_status) {
736 busy = mmc_card_busy(mmc);
737 } else {
738 ret = mmc_send_cmd(mmc, &cmd, NULL);
739
740 if (ret)
741 return ret;
742
743 if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR)
744 return -EBADMSG;
745 busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) ==
746 MMC_STATE_PRG;
747 }
748
749 if (get_timer(start) > timeout && busy)
750 return -ETIMEDOUT;
751 } while (busy);
752
753 return 0;
754 }
755
__mmc_switch(struct mmc * mmc,u8 set,u8 index,u8 value,u8 send_status)756 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
757 u8 send_status)
758 {
759 struct mmc_cmd cmd;
760 int retries = 3;
761 int ret;
762
763 cmd.cmdidx = MMC_CMD_SWITCH;
764 cmd.resp_type = MMC_RSP_R1b;
765 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
766 (index << 16) |
767 (value << 8);
768
769 do {
770 ret = mmc_send_cmd(mmc, &cmd, NULL);
771
772 if (!ret)
773 return mmc_poll_for_busy(mmc, send_status);
774 } while (--retries > 0 && ret);
775
776 return ret;
777 }
778
mmc_switch(struct mmc * mmc,u8 set,u8 index,u8 value)779 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
780 {
781 return __mmc_switch(mmc, set, index, value, true);
782 }
783
mmc_select_bus_width(struct mmc * mmc)784 static int mmc_select_bus_width(struct mmc *mmc)
785 {
786 u32 ext_csd_bits[] = {
787 EXT_CSD_BUS_WIDTH_8,
788 EXT_CSD_BUS_WIDTH_4,
789 };
790 u32 bus_widths[] = {
791 MMC_BUS_WIDTH_8BIT,
792 MMC_BUS_WIDTH_4BIT,
793 };
794 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
795 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
796 u32 idx, bus_width = 0;
797 int err = 0;
798
799 if (mmc->version < MMC_VERSION_4 ||
800 !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT)))
801 return 0;
802
803 err = mmc_send_ext_csd(mmc, ext_csd);
804
805 if (err)
806 return err;
807
808 idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1;
809
810 /*
811 * Unlike SD, MMC cards dont have a configuration register to notify
812 * supported bus width. So bus test command should be run to identify
813 * the supported bus width or compare the ext csd values of current
814 * bus width and ext csd values of 1 bit mode read earlier.
815 */
816 for (; idx < ARRAY_SIZE(bus_widths); idx++) {
817 /*
818 * Host is capable of 8bit transfer, then switch
819 * the device to work in 8bit transfer mode. If the
820 * mmc switch command returns error then switch to
821 * 4bit transfer mode. On success set the corresponding
822 * bus width on the host.
823 */
824 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
825 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]);
826 if (err)
827 continue;
828
829 bus_width = bus_widths[idx];
830 mmc_set_bus_width(mmc, bus_width);
831
832 err = mmc_send_ext_csd(mmc, test_csd);
833
834 if (err)
835 continue;
836
837 /* Only compare read only fields */
838 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] ==
839 test_csd[EXT_CSD_PARTITIONING_SUPPORT]) &&
840 (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
841 test_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
842 (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) &&
843 (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
844 test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
845 !memcmp(&ext_csd[EXT_CSD_SEC_CNT],
846 &test_csd[EXT_CSD_SEC_CNT], 4)) {
847 err = bus_width;
848 break;
849 } else {
850 err = -EBADMSG;
851 }
852 }
853
854 return err;
855 }
856
857 #ifndef CONFIG_MMC_SIMPLE
858 static const u8 tuning_blk_pattern_4bit[] = {
859 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
860 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
861 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
862 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
863 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
864 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
865 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
866 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
867 };
868
869 static const u8 tuning_blk_pattern_8bit[] = {
870 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
871 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
872 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
873 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
874 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
875 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
876 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
877 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
878 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
879 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
880 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
881 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
882 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
883 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
884 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
885 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
886 };
887
mmc_send_tuning(struct mmc * mmc,u32 opcode)888 int mmc_send_tuning(struct mmc *mmc, u32 opcode)
889 {
890 struct mmc_cmd cmd;
891 struct mmc_data data;
892 const u8 *tuning_block_pattern;
893 int size, err = 0;
894 u8 *data_buf;
895
896 if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
897 tuning_block_pattern = tuning_blk_pattern_8bit;
898 size = sizeof(tuning_blk_pattern_8bit);
899 } else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) {
900 tuning_block_pattern = tuning_blk_pattern_4bit;
901 size = sizeof(tuning_blk_pattern_4bit);
902 } else {
903 return -EINVAL;
904 }
905
906 data_buf = calloc(1, size);
907 if (!data_buf)
908 return -ENOMEM;
909
910 cmd.cmdidx = opcode;
911 cmd.resp_type = MMC_RSP_R1;
912 cmd.cmdarg = 0;
913
914 data.dest = (char *)data_buf;
915 data.blocksize = size;
916 data.blocks = 1;
917 data.flags = MMC_DATA_READ;
918
919 err = mmc_send_cmd(mmc, &cmd, &data);
920 if (err) {
921 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
922 cmd.cmdarg = 0;
923 cmd.resp_type = MMC_RSP_R1b;
924 mmc_send_cmd(mmc, &cmd, NULL);
925 goto out;
926 }
927 if (memcmp(data_buf, tuning_block_pattern, size))
928 err = -EIO;
929 out:
930 free(data_buf);
931 return err;
932 }
933
mmc_execute_tuning(struct mmc * mmc)934 static int mmc_execute_tuning(struct mmc *mmc)
935 {
936 #ifdef CONFIG_DM_MMC
937 struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev);
938 #endif
939 u32 opcode;
940
941 if (IS_SD(mmc))
942 opcode = MMC_SEND_TUNING_BLOCK;
943 else
944 opcode = MMC_SEND_TUNING_BLOCK_HS200;
945
946 #ifndef CONFIG_DM_MMC
947 if (mmc->cfg->ops->execute_tuning) {
948 return mmc->cfg->ops->execute_tuning(mmc, opcode);
949 #else
950 if (ops->execute_tuning) {
951 return ops->execute_tuning(mmc->dev, opcode);
952 #endif
953 } else {
954 debug("Tuning feature required for HS200 mode.\n");
955 return -EIO;
956 }
957 }
958
959 static int mmc_hs200_tuning(struct mmc *mmc)
960 {
961 return mmc_execute_tuning(mmc);
962 }
963
964 #else
965 int mmc_send_tuning(struct mmc *mmc, u32 opcode) { return 0; }
966 int mmc_execute_tuning(struct mmc *mmc) { return 0; }
967 static int mmc_hs200_tuning(struct mmc *mmc) { return 0; }
968 #endif
969
970 static int mmc_select_hs(struct mmc *mmc)
971 {
972 int ret;
973
974 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
975 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS);
976
977 if (!ret)
978 mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
979
980 return ret;
981 }
982
983 static int mmc_select_hs_ddr(struct mmc *mmc)
984 {
985 u32 ext_csd_bits;
986 int err = 0;
987
988 if (mmc->bus_width == MMC_BUS_WIDTH_1BIT)
989 return 0;
990
991 ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ?
992 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
993
994 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
995 EXT_CSD_BUS_WIDTH, ext_csd_bits);
996 if (err)
997 return err;
998
999 mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52);
1000
1001 return 0;
1002 }
1003
1004 #ifndef CONFIG_MMC_SIMPLE
1005 static int mmc_select_hs200(struct mmc *mmc)
1006 {
1007 int ret;
1008
1009 /*
1010 * Set the bus width(4 or 8) with host's support and
1011 * switch to HS200 mode if bus width is set successfully.
1012 */
1013 ret = mmc_select_bus_width(mmc);
1014
1015 if (ret > 0) {
1016 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1017 EXT_CSD_HS_TIMING,
1018 EXT_CSD_TIMING_HS200, false);
1019
1020 if (ret)
1021 return ret;
1022
1023 mmc_set_timing(mmc, MMC_TIMING_MMC_HS200);
1024 }
1025
1026 return ret;
1027 }
1028
1029 static int mmc_switch_to_hs400(struct mmc *mmc)
1030 {
1031 u8 val, fixed_drv_type, card_drv_type, drive_strength;
1032
1033 fixed_drv_type = mmc->cfg->fixed_drv_type;
1034 card_drv_type = mmc->raw_driver_strength | mmc_driver_type_mask(0);
1035 drive_strength = (card_drv_type & mmc_driver_type_mask(fixed_drv_type))
1036 ? fixed_drv_type : 0;
1037 val = EXT_CSD_TIMING_HS400 | drive_strength << EXT_CSD_DRV_STR_SHIFT;
1038
1039 return __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, false);
1040 }
1041
1042 static int mmc_select_hs400(struct mmc *mmc)
1043 {
1044 int ret;
1045
1046 /* Switch card to HS mode */
1047 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1048 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false);
1049 if (ret)
1050 return ret;
1051
1052 /* Set host controller to HS timing */
1053 mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
1054
1055 /* Reduce frequency to HS frequency */
1056 mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
1057
1058 ret = mmc_send_status(mmc, 1000);
1059 if (ret)
1060 return ret;
1061
1062 /* Switch card to DDR */
1063 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1064 EXT_CSD_BUS_WIDTH,
1065 EXT_CSD_DDR_BUS_WIDTH_8);
1066 if (ret)
1067 return ret;
1068
1069 /* Switch card to HS400 */
1070 ret = mmc_switch_to_hs400(mmc);
1071 if (ret)
1072 return ret;
1073
1074 /* Set host controller to HS400 timing and frequency */
1075 mmc_set_timing(mmc, MMC_TIMING_MMC_HS400);
1076
1077 return ret;
1078 }
1079
1080 static int mmc_select_hs400es(struct mmc *mmc)
1081 {
1082 int err;
1083
1084 /* Switch card to HS mode */
1085 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1086 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false);
1087 if (err)
1088 return err;
1089
1090 /* Set host controller to HS timing */
1091 mmc_set_timing(mmc, MMC_TIMING_MMC_HS);
1092
1093 err = mmc_send_status(mmc, 1000);
1094 if (err)
1095 return err;
1096
1097 mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
1098
1099 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1100 EXT_CSD_DDR_BUS_WIDTH_8 |
1101 EXT_CSD_BUS_WIDTH_STROBE);
1102 if (err) {
1103 printf("switch to bus width for hs400 failed\n");
1104 return err;
1105 }
1106
1107 /* Switch card to HS400 */
1108 err = mmc_switch_to_hs400(mmc);
1109 if (err)
1110 return err;
1111
1112 /* Set host controller to HS400 timing and frequency */
1113 mmc_set_timing(mmc, MMC_TIMING_MMC_HS400ES);
1114
1115 return mmc_set_enhanced_strobe(mmc);
1116 }
1117 #else
1118 static int mmc_select_hs200(struct mmc *mmc) { return 0; }
1119 static int mmc_select_hs400(struct mmc *mmc) { return 0; }
1120 static int mmc_select_hs400es(struct mmc *mmc) { return 0; }
1121 #endif
1122
1123 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd)
1124 {
1125 u8 card_type;
1126 u32 host_caps, avail_type = 0;
1127
1128 card_type = ext_csd[EXT_CSD_CARD_TYPE];
1129 host_caps = mmc->cfg->host_caps;
1130
1131 if ((host_caps & MMC_MODE_HS) &&
1132 (card_type & EXT_CSD_CARD_TYPE_26))
1133 avail_type |= EXT_CSD_CARD_TYPE_26;
1134
1135 if ((host_caps & MMC_MODE_HS) &&
1136 (card_type & EXT_CSD_CARD_TYPE_52))
1137 avail_type |= EXT_CSD_CARD_TYPE_52;
1138
1139 /*
1140 * For the moment, u-boot doesn't support signal voltage
1141 * switch, therefor we assume that host support ddr52
1142 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and
1143 * hs400 are the same).
1144 */
1145 if ((host_caps & MMC_MODE_DDR_52MHz) &&
1146 (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
1147 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
1148
1149 if ((host_caps & MMC_MODE_HS200) &&
1150 (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V))
1151 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
1152
1153 /*
1154 * If host can support HS400, it means that host can also
1155 * support HS200.
1156 */
1157 if ((host_caps & MMC_MODE_HS400) &&
1158 (host_caps & MMC_MODE_8BIT) &&
1159 (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
1160 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
1161 EXT_CSD_CARD_TYPE_HS400_1_8V;
1162
1163 if ((host_caps & MMC_MODE_HS400ES) &&
1164 (host_caps & MMC_MODE_8BIT) &&
1165 ext_csd[EXT_CSD_STROBE_SUPPORT] &&
1166 (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V))
1167 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V |
1168 EXT_CSD_CARD_TYPE_HS400_1_8V |
1169 EXT_CSD_CARD_TYPE_HS400ES;
1170
1171 return avail_type;
1172 }
1173
1174 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type)
1175 {
1176 int clock = 0;
1177
1178 if (mmc_card_hs(mmc))
1179 clock = (avail_type & EXT_CSD_CARD_TYPE_52) ?
1180 MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR;
1181 else if (mmc_card_hs200(mmc) ||
1182 mmc_card_hs400(mmc) ||
1183 mmc_card_hs400es(mmc))
1184 clock = MMC_HS200_MAX_DTR;
1185
1186 mmc_set_clock(mmc, clock);
1187 }
1188
1189 static int mmc_change_freq(struct mmc *mmc)
1190 {
1191 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1192 u32 avail_type;
1193 int err;
1194
1195 mmc->card_caps = 0;
1196
1197 if (mmc_host_is_spi(mmc))
1198 return 0;
1199
1200 /* Only version 4 supports high-speed */
1201 if (mmc->version < MMC_VERSION_4)
1202 return 0;
1203
1204 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
1205
1206 err = mmc_send_ext_csd(mmc, ext_csd);
1207
1208 if (err)
1209 return err;
1210
1211 avail_type = mmc_select_card_type(mmc, ext_csd);
1212
1213 if (avail_type & EXT_CSD_CARD_TYPE_HS400ES) {
1214 err = mmc_select_bus_width(mmc);
1215 if (err > 0 && mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
1216 err = mmc_select_hs400es(mmc);
1217 mmc_set_bus_speed(mmc, avail_type);
1218 if (!err)
1219 return err;
1220 }
1221 }
1222
1223 if (avail_type & EXT_CSD_CARD_TYPE_HS200)
1224 err = mmc_select_hs200(mmc);
1225 else if (avail_type & EXT_CSD_CARD_TYPE_HS)
1226 err = mmc_select_hs(mmc);
1227 else
1228 err = -EINVAL;
1229
1230 if (err)
1231 return err;
1232
1233 mmc_set_bus_speed(mmc, avail_type);
1234
1235 if (mmc_card_hs200(mmc)) {
1236 err = mmc_hs200_tuning(mmc);
1237 if (avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1238 mmc->bus_width == MMC_BUS_WIDTH_8BIT) {
1239 err = mmc_select_hs400(mmc);
1240 mmc_set_bus_speed(mmc, avail_type);
1241 }
1242 } else if (!mmc_card_hs400es(mmc)) {
1243 err = mmc_select_bus_width(mmc) > 0 ? 0 : err;
1244 if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52)
1245 err = mmc_select_hs_ddr(mmc);
1246 }
1247
1248 return err;
1249 }
1250
1251 static int mmc_set_capacity(struct mmc *mmc, int part_num)
1252 {
1253 switch (part_num) {
1254 case 0:
1255 mmc->capacity = mmc->capacity_user;
1256 break;
1257 case 1:
1258 case 2:
1259 mmc->capacity = mmc->capacity_boot;
1260 break;
1261 case 3:
1262 mmc->capacity = mmc->capacity_rpmb;
1263 break;
1264 case 4:
1265 case 5:
1266 case 6:
1267 case 7:
1268 mmc->capacity = mmc->capacity_gp[part_num - 4];
1269 break;
1270 default:
1271 return -1;
1272 }
1273
1274 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1275
1276 return 0;
1277 }
1278
1279 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1280 {
1281 int ret;
1282
1283 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
1284 (mmc->part_config & ~PART_ACCESS_MASK)
1285 | (part_num & PART_ACCESS_MASK));
1286
1287 /*
1288 * Set the capacity if the switch succeeded or was intended
1289 * to return to representing the raw device.
1290 */
1291 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1292 ret = mmc_set_capacity(mmc, part_num);
1293 mmc_get_blk_desc(mmc)->hwpart = part_num;
1294 }
1295
1296 return ret;
1297 }
1298
1299 int mmc_hwpart_config(struct mmc *mmc,
1300 const struct mmc_hwpart_conf *conf,
1301 enum mmc_hwpart_conf_mode mode)
1302 {
1303 u8 part_attrs = 0;
1304 u32 enh_size_mult;
1305 u32 enh_start_addr;
1306 u32 gp_size_mult[4];
1307 u32 max_enh_size_mult;
1308 u32 tot_enh_size_mult = 0;
1309 u8 wr_rel_set;
1310 int i, pidx, err;
1311 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1312
1313 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1314 return -EINVAL;
1315
1316 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1317 printf("eMMC >= 4.4 required for enhanced user data area\n");
1318 return -EMEDIUMTYPE;
1319 }
1320
1321 if (!(mmc->part_support & PART_SUPPORT)) {
1322 printf("Card does not support partitioning\n");
1323 return -EMEDIUMTYPE;
1324 }
1325
1326 if (!mmc->hc_wp_grp_size) {
1327 printf("Card does not define HC WP group size\n");
1328 return -EMEDIUMTYPE;
1329 }
1330
1331 /* check partition alignment and total enhanced size */
1332 if (conf->user.enh_size) {
1333 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1334 conf->user.enh_start % mmc->hc_wp_grp_size) {
1335 printf("User data enhanced area not HC WP group "
1336 "size aligned\n");
1337 return -EINVAL;
1338 }
1339 part_attrs |= EXT_CSD_ENH_USR;
1340 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1341 if (mmc->high_capacity) {
1342 enh_start_addr = conf->user.enh_start;
1343 } else {
1344 enh_start_addr = (conf->user.enh_start << 9);
1345 }
1346 } else {
1347 enh_size_mult = 0;
1348 enh_start_addr = 0;
1349 }
1350 tot_enh_size_mult += enh_size_mult;
1351
1352 for (pidx = 0; pidx < 4; pidx++) {
1353 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1354 printf("GP%i partition not HC WP group size "
1355 "aligned\n", pidx+1);
1356 return -EINVAL;
1357 }
1358 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1359 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1360 part_attrs |= EXT_CSD_ENH_GP(pidx);
1361 tot_enh_size_mult += gp_size_mult[pidx];
1362 }
1363 }
1364
1365 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1366 printf("Card does not support enhanced attribute\n");
1367 return -EMEDIUMTYPE;
1368 }
1369
1370 err = mmc_send_ext_csd(mmc, ext_csd);
1371 if (err)
1372 return err;
1373
1374 max_enh_size_mult =
1375 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1376 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1377 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1378 if (tot_enh_size_mult > max_enh_size_mult) {
1379 printf("Total enhanced size exceeds maximum (%u > %u)\n",
1380 tot_enh_size_mult, max_enh_size_mult);
1381 return -EMEDIUMTYPE;
1382 }
1383
1384 /* The default value of EXT_CSD_WR_REL_SET is device
1385 * dependent, the values can only be changed if the
1386 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1387 * changed only once and before partitioning is completed. */
1388 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1389 if (conf->user.wr_rel_change) {
1390 if (conf->user.wr_rel_set)
1391 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1392 else
1393 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1394 }
1395 for (pidx = 0; pidx < 4; pidx++) {
1396 if (conf->gp_part[pidx].wr_rel_change) {
1397 if (conf->gp_part[pidx].wr_rel_set)
1398 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1399 else
1400 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1401 }
1402 }
1403
1404 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1405 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1406 puts("Card does not support host controlled partition write "
1407 "reliability settings\n");
1408 return -EMEDIUMTYPE;
1409 }
1410
1411 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1412 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1413 printf("Card already partitioned\n");
1414 return -EPERM;
1415 }
1416
1417 if (mode == MMC_HWPART_CONF_CHECK)
1418 return 0;
1419
1420 /* Partitioning requires high-capacity size definitions */
1421 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1422 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1423 EXT_CSD_ERASE_GROUP_DEF, 1);
1424
1425 if (err)
1426 return err;
1427
1428 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1429
1430 /* update erase group size to be high-capacity */
1431 mmc->erase_grp_size =
1432 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1433
1434 }
1435
1436 /* all OK, write the configuration */
1437 for (i = 0; i < 4; i++) {
1438 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1439 EXT_CSD_ENH_START_ADDR+i,
1440 (enh_start_addr >> (i*8)) & 0xFF);
1441 if (err)
1442 return err;
1443 }
1444 for (i = 0; i < 3; i++) {
1445 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1446 EXT_CSD_ENH_SIZE_MULT+i,
1447 (enh_size_mult >> (i*8)) & 0xFF);
1448 if (err)
1449 return err;
1450 }
1451 for (pidx = 0; pidx < 4; pidx++) {
1452 for (i = 0; i < 3; i++) {
1453 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1454 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1455 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1456 if (err)
1457 return err;
1458 }
1459 }
1460 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1461 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1462 if (err)
1463 return err;
1464
1465 if (mode == MMC_HWPART_CONF_SET)
1466 return 0;
1467
1468 /* The WR_REL_SET is a write-once register but shall be
1469 * written before setting PART_SETTING_COMPLETED. As it is
1470 * write-once we can only write it when completing the
1471 * partitioning. */
1472 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1473 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1474 EXT_CSD_WR_REL_SET, wr_rel_set);
1475 if (err)
1476 return err;
1477 }
1478
1479 /* Setting PART_SETTING_COMPLETED confirms the partition
1480 * configuration but it only becomes effective after power
1481 * cycle, so we do not adjust the partition related settings
1482 * in the mmc struct. */
1483
1484 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1485 EXT_CSD_PARTITION_SETTING,
1486 EXT_CSD_PARTITION_SETTING_COMPLETED);
1487 if (err)
1488 return err;
1489
1490 return 0;
1491 }
1492
1493 #if !CONFIG_IS_ENABLED(DM_MMC)
1494 int mmc_getcd(struct mmc *mmc)
1495 {
1496 int cd;
1497
1498 cd = board_mmc_getcd(mmc);
1499
1500 if (cd < 0) {
1501 if (mmc->cfg->ops->getcd)
1502 cd = mmc->cfg->ops->getcd(mmc);
1503 else
1504 cd = 1;
1505 }
1506
1507 return cd;
1508 }
1509 #endif
1510
1511 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1512 {
1513 struct mmc_cmd cmd;
1514 struct mmc_data data;
1515
1516 /* Switch the frequency */
1517 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1518 cmd.resp_type = MMC_RSP_R1;
1519 cmd.cmdarg = (mode << 31) | 0xffffff;
1520 cmd.cmdarg &= ~(0xf << (group * 4));
1521 cmd.cmdarg |= value << (group * 4);
1522
1523 data.dest = (char *)resp;
1524 data.blocksize = 64;
1525 data.blocks = 1;
1526 data.flags = MMC_DATA_READ;
1527
1528 return mmc_send_cmd(mmc, &cmd, &data);
1529 }
1530
1531
1532 static int sd_change_freq(struct mmc *mmc)
1533 {
1534 int err;
1535 struct mmc_cmd cmd;
1536 ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2);
1537 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1538 struct mmc_data data;
1539 int timeout;
1540
1541 mmc->card_caps = 0;
1542
1543 if (mmc_host_is_spi(mmc))
1544 return 0;
1545
1546 /* Read the SCR to find out if this card supports higher speeds */
1547 cmd.cmdidx = MMC_CMD_APP_CMD;
1548 cmd.resp_type = MMC_RSP_R1;
1549 cmd.cmdarg = mmc->rca << 16;
1550
1551 err = mmc_send_cmd(mmc, &cmd, NULL);
1552
1553 if (err)
1554 return err;
1555
1556 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1557 cmd.resp_type = MMC_RSP_R1;
1558 cmd.cmdarg = 0;
1559
1560 timeout = 3;
1561
1562 retry_scr:
1563 data.dest = (char *)scr;
1564 data.blocksize = 8;
1565 data.blocks = 1;
1566 data.flags = MMC_DATA_READ;
1567
1568 err = mmc_send_cmd(mmc, &cmd, &data);
1569
1570 if (err) {
1571 if (timeout--)
1572 goto retry_scr;
1573
1574 return err;
1575 }
1576
1577 mmc->scr[0] = __be32_to_cpu(scr[0]);
1578 mmc->scr[1] = __be32_to_cpu(scr[1]);
1579
1580 switch ((mmc->scr[0] >> 24) & 0xf) {
1581 case 0:
1582 mmc->version = SD_VERSION_1_0;
1583 break;
1584 case 1:
1585 mmc->version = SD_VERSION_1_10;
1586 break;
1587 case 2:
1588 mmc->version = SD_VERSION_2;
1589 if ((mmc->scr[0] >> 15) & 0x1)
1590 mmc->version = SD_VERSION_3;
1591 break;
1592 default:
1593 mmc->version = SD_VERSION_1_0;
1594 break;
1595 }
1596
1597 if (mmc->scr[0] & SD_DATA_4BIT)
1598 mmc->card_caps |= MMC_MODE_4BIT;
1599
1600 /* Version 1.0 doesn't support switching */
1601 if (mmc->version == SD_VERSION_1_0)
1602 return 0;
1603
1604 timeout = 4;
1605 while (timeout--) {
1606 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1607 (u8 *)switch_status);
1608
1609 if (err)
1610 return err;
1611
1612 /* The high-speed function is busy. Try again */
1613 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1614 break;
1615 }
1616
1617 /* If high-speed isn't supported, we return */
1618 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
1619 return 0;
1620
1621 /*
1622 * If the host doesn't support SD_HIGHSPEED, do not switch card to
1623 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
1624 * This can avoid furthur problem when the card runs in different
1625 * mode between the host.
1626 */
1627 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
1628 (mmc->cfg->host_caps & MMC_MODE_HS)))
1629 return 0;
1630
1631 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
1632
1633 if (err)
1634 return err;
1635
1636 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
1637 mmc->card_caps |= MMC_MODE_HS;
1638
1639 return 0;
1640 }
1641
1642 static int sd_read_ssr(struct mmc *mmc)
1643 {
1644 int err, i;
1645 struct mmc_cmd cmd;
1646 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1647 struct mmc_data data;
1648 int timeout = 3;
1649 unsigned int au, eo, et, es;
1650
1651 cmd.cmdidx = MMC_CMD_APP_CMD;
1652 cmd.resp_type = MMC_RSP_R1;
1653 cmd.cmdarg = mmc->rca << 16;
1654
1655 err = mmc_send_cmd(mmc, &cmd, NULL);
1656 if (err)
1657 return err;
1658
1659 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1660 cmd.resp_type = MMC_RSP_R1;
1661 cmd.cmdarg = 0;
1662
1663 retry_ssr:
1664 data.dest = (char *)ssr;
1665 data.blocksize = 64;
1666 data.blocks = 1;
1667 data.flags = MMC_DATA_READ;
1668
1669 err = mmc_send_cmd(mmc, &cmd, &data);
1670 if (err) {
1671 if (timeout--)
1672 goto retry_ssr;
1673
1674 return err;
1675 }
1676
1677 for (i = 0; i < 16; i++)
1678 ssr[i] = be32_to_cpu(ssr[i]);
1679
1680 au = (ssr[2] >> 12) & 0xF;
1681 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1682 mmc->ssr.au = sd_au_size[au];
1683 es = (ssr[3] >> 24) & 0xFF;
1684 es |= (ssr[2] & 0xFF) << 8;
1685 et = (ssr[3] >> 18) & 0x3F;
1686 if (es && et) {
1687 eo = (ssr[3] >> 16) & 0x3;
1688 mmc->ssr.erase_timeout = (et * 1000) / es;
1689 mmc->ssr.erase_offset = eo * 1000;
1690 }
1691 } else {
1692 debug("Invalid Allocation Unit Size.\n");
1693 }
1694
1695 return 0;
1696 }
1697
1698 /* frequency bases */
1699 /* divided by 10 to be nice to platforms without floating point */
1700 static const int fbase[] = {
1701 10000,
1702 100000,
1703 1000000,
1704 10000000,
1705 };
1706
1707 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1708 * to platforms without floating point.
1709 */
1710 static const u8 multipliers[] = {
1711 0, /* reserved */
1712 10,
1713 12,
1714 13,
1715 15,
1716 20,
1717 25,
1718 30,
1719 35,
1720 40,
1721 45,
1722 50,
1723 55,
1724 60,
1725 70,
1726 80,
1727 };
1728
1729 #if !CONFIG_IS_ENABLED(DM_MMC)
1730 static void mmc_set_ios(struct mmc *mmc)
1731 {
1732 if (mmc->cfg->ops->set_ios)
1733 mmc->cfg->ops->set_ios(mmc);
1734 }
1735
1736 static bool mmc_card_busy(struct mmc *mmc)
1737 {
1738 if (!mmc->cfg->ops->card_busy)
1739 return -ENOSYS;
1740
1741 return mmc->cfg->ops->card_busy(mmc);
1742 }
1743
1744 static bool mmc_can_card_busy(struct mmc *)
1745 {
1746 return !!mmc->cfg->ops->card_busy;
1747 }
1748 #endif
1749
1750 static int mmc_startup(struct mmc *mmc)
1751 {
1752 int err, i;
1753 uint mult, freq, tran_speed;
1754 u64 cmult, csize, capacity;
1755 struct mmc_cmd cmd;
1756 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1757 bool has_parts = false;
1758 bool part_completed;
1759 struct blk_desc *bdesc;
1760
1761 #ifdef CONFIG_MMC_SPI_CRC_ON
1762 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1763 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1764 cmd.resp_type = MMC_RSP_R1;
1765 cmd.cmdarg = 1;
1766 err = mmc_send_cmd(mmc, &cmd, NULL);
1767
1768 if (err)
1769 return err;
1770 }
1771 #endif
1772 #ifndef CONFIG_MMC_USE_PRE_CONFIG
1773 /* Put the Card in Identify Mode */
1774 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1775 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1776 cmd.resp_type = MMC_RSP_R2;
1777 cmd.cmdarg = 0;
1778
1779 err = mmc_send_cmd(mmc, &cmd, NULL);
1780
1781 if (err)
1782 return err;
1783
1784 memcpy(mmc->cid, cmd.response, 16);
1785
1786 /*
1787 * For MMC cards, set the Relative Address.
1788 * For SD cards, get the Relatvie Address.
1789 * This also puts the cards into Standby State
1790 */
1791 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1792 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1793 cmd.cmdarg = mmc->rca << 16;
1794 cmd.resp_type = MMC_RSP_R6;
1795
1796 err = mmc_send_cmd(mmc, &cmd, NULL);
1797
1798 if (err)
1799 return err;
1800
1801 if (IS_SD(mmc))
1802 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1803 }
1804 #endif
1805 /* Get the Card-Specific Data */
1806 cmd.cmdidx = MMC_CMD_SEND_CSD;
1807 cmd.resp_type = MMC_RSP_R2;
1808 cmd.cmdarg = mmc->rca << 16;
1809
1810 err = mmc_send_cmd(mmc, &cmd, NULL);
1811
1812 if (err)
1813 return err;
1814
1815 mmc->csd[0] = cmd.response[0];
1816 mmc->csd[1] = cmd.response[1];
1817 mmc->csd[2] = cmd.response[2];
1818 mmc->csd[3] = cmd.response[3];
1819
1820 if (mmc->version == MMC_VERSION_UNKNOWN) {
1821 int version = (cmd.response[0] >> 26) & 0xf;
1822
1823 switch (version) {
1824 case 0:
1825 mmc->version = MMC_VERSION_1_2;
1826 break;
1827 case 1:
1828 mmc->version = MMC_VERSION_1_4;
1829 break;
1830 case 2:
1831 mmc->version = MMC_VERSION_2_2;
1832 break;
1833 case 3:
1834 mmc->version = MMC_VERSION_3;
1835 break;
1836 case 4:
1837 mmc->version = MMC_VERSION_4;
1838 break;
1839 default:
1840 mmc->version = MMC_VERSION_1_2;
1841 break;
1842 }
1843 }
1844
1845 /* divide frequency by 10, since the mults are 10x bigger */
1846 freq = fbase[(cmd.response[0] & 0x7)];
1847 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1848
1849 tran_speed = freq * mult;
1850
1851 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1852 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1853
1854 if (IS_SD(mmc))
1855 mmc->write_bl_len = mmc->read_bl_len;
1856 else
1857 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1858
1859 if (mmc->high_capacity) {
1860 csize = (mmc->csd[1] & 0x3f) << 16
1861 | (mmc->csd[2] & 0xffff0000) >> 16;
1862 cmult = 8;
1863 } else {
1864 csize = (mmc->csd[1] & 0x3ff) << 2
1865 | (mmc->csd[2] & 0xc0000000) >> 30;
1866 cmult = (mmc->csd[2] & 0x00038000) >> 15;
1867 }
1868
1869 mmc->capacity_user = (csize + 1) << (cmult + 2);
1870 mmc->capacity_user *= mmc->read_bl_len;
1871 mmc->capacity_boot = 0;
1872 mmc->capacity_rpmb = 0;
1873 for (i = 0; i < 4; i++)
1874 mmc->capacity_gp[i] = 0;
1875
1876 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1877 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1878
1879 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1880 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1881
1882 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1883 cmd.cmdidx = MMC_CMD_SET_DSR;
1884 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1885 cmd.resp_type = MMC_RSP_NONE;
1886 if (mmc_send_cmd(mmc, &cmd, NULL))
1887 printf("MMC: SET_DSR failed\n");
1888 }
1889
1890 /* Select the card, and put it into Transfer Mode */
1891 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1892 cmd.cmdidx = MMC_CMD_SELECT_CARD;
1893 cmd.resp_type = MMC_RSP_R1;
1894 cmd.cmdarg = mmc->rca << 16;
1895 err = mmc_send_cmd(mmc, &cmd, NULL);
1896
1897 if (err)
1898 return err;
1899 }
1900
1901 /*
1902 * For SD, its erase group is always one sector
1903 */
1904 mmc->erase_grp_size = 1;
1905 mmc->part_config = MMCPART_NOAVAILABLE;
1906 if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) {
1907 /* select high speed to reduce initialization time */
1908 mmc_select_hs(mmc);
1909 mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR);
1910
1911 /* check ext_csd version and capacity */
1912 err = mmc_send_ext_csd(mmc, ext_csd);
1913 if (err)
1914 return err;
1915 if (ext_csd[EXT_CSD_REV] >= 2) {
1916 /*
1917 * According to the JEDEC Standard, the value of
1918 * ext_csd's capacity is valid if the value is more
1919 * than 2GB
1920 */
1921 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1922 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1923 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1924 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1925 capacity *= MMC_MAX_BLOCK_LEN;
1926 if ((capacity >> 20) > 2 * 1024)
1927 mmc->capacity_user = capacity;
1928 }
1929
1930 switch (ext_csd[EXT_CSD_REV]) {
1931 case 1:
1932 mmc->version = MMC_VERSION_4_1;
1933 break;
1934 case 2:
1935 mmc->version = MMC_VERSION_4_2;
1936 break;
1937 case 3:
1938 mmc->version = MMC_VERSION_4_3;
1939 break;
1940 case 5:
1941 mmc->version = MMC_VERSION_4_41;
1942 break;
1943 case 6:
1944 mmc->version = MMC_VERSION_4_5;
1945 break;
1946 case 7:
1947 mmc->version = MMC_VERSION_5_0;
1948 break;
1949 case 8:
1950 mmc->version = MMC_VERSION_5_1;
1951 break;
1952 }
1953
1954 /* The partition data may be non-zero but it is only
1955 * effective if PARTITION_SETTING_COMPLETED is set in
1956 * EXT_CSD, so ignore any data if this bit is not set,
1957 * except for enabling the high-capacity group size
1958 * definition (see below). */
1959 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1960 EXT_CSD_PARTITION_SETTING_COMPLETED);
1961
1962 /* store the partition info of emmc */
1963 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1964 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1965 ext_csd[EXT_CSD_BOOT_MULT])
1966 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1967 if (part_completed &&
1968 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1969 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1970 if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN)
1971 mmc->esr.mmc_can_trim = 1;
1972
1973 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1974
1975 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1976
1977 for (i = 0; i < 4; i++) {
1978 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1979 uint mult = (ext_csd[idx + 2] << 16) +
1980 (ext_csd[idx + 1] << 8) + ext_csd[idx];
1981 if (mult)
1982 has_parts = true;
1983 if (!part_completed)
1984 continue;
1985 mmc->capacity_gp[i] = mult;
1986 mmc->capacity_gp[i] *=
1987 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1988 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1989 mmc->capacity_gp[i] <<= 19;
1990 }
1991
1992 if (part_completed) {
1993 mmc->enh_user_size =
1994 (ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) +
1995 (ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) +
1996 ext_csd[EXT_CSD_ENH_SIZE_MULT];
1997 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1998 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1999 mmc->enh_user_size <<= 19;
2000 mmc->enh_user_start =
2001 (ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) +
2002 (ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) +
2003 (ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) +
2004 ext_csd[EXT_CSD_ENH_START_ADDR];
2005 if (mmc->high_capacity)
2006 mmc->enh_user_start <<= 9;
2007 }
2008
2009 /*
2010 * Host needs to enable ERASE_GRP_DEF bit if device is
2011 * partitioned. This bit will be lost every time after a reset
2012 * or power off. This will affect erase size.
2013 */
2014 if (part_completed)
2015 has_parts = true;
2016 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2017 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2018 has_parts = true;
2019 if (has_parts) {
2020 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2021 EXT_CSD_ERASE_GROUP_DEF, 1);
2022
2023 if (err)
2024 return err;
2025 else
2026 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2027 }
2028
2029 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2030 /* Read out group size from ext_csd */
2031 mmc->erase_grp_size =
2032 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2033 /*
2034 * if high capacity and partition setting completed
2035 * SEC_COUNT is valid even if it is smaller than 2 GiB
2036 * JEDEC Standard JESD84-B45, 6.2.4
2037 */
2038 if (mmc->high_capacity && part_completed) {
2039 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2040 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2041 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2042 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2043 capacity *= MMC_MAX_BLOCK_LEN;
2044 mmc->capacity_user = capacity;
2045 }
2046 } else {
2047 /* Calculate the group size from the csd value. */
2048 int erase_gsz, erase_gmul;
2049 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2050 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2051 mmc->erase_grp_size = (erase_gsz + 1)
2052 * (erase_gmul + 1);
2053 }
2054
2055 mmc->hc_wp_grp_size = 1024
2056 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2057 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2058
2059 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2060
2061 mmc->raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH];
2062 }
2063
2064 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2065 if (err)
2066 return err;
2067
2068 if (IS_SD(mmc))
2069 err = sd_change_freq(mmc);
2070 else
2071 err = mmc_change_freq(mmc);
2072
2073 if (err)
2074 return err;
2075
2076 /* Restrict card's capabilities by what the host can do */
2077 mmc->card_caps &= mmc->cfg->host_caps;
2078
2079 if (IS_SD(mmc)) {
2080 if (mmc->card_caps & MMC_MODE_4BIT) {
2081 cmd.cmdidx = MMC_CMD_APP_CMD;
2082 cmd.resp_type = MMC_RSP_R1;
2083 cmd.cmdarg = mmc->rca << 16;
2084
2085 err = mmc_send_cmd(mmc, &cmd, NULL);
2086 if (err)
2087 return err;
2088
2089 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
2090 cmd.resp_type = MMC_RSP_R1;
2091 cmd.cmdarg = 2;
2092 err = mmc_send_cmd(mmc, &cmd, NULL);
2093 if (err)
2094 return err;
2095
2096 mmc_set_bus_width(mmc, 4);
2097 }
2098
2099 err = sd_read_ssr(mmc);
2100 if (err)
2101 return err;
2102
2103 if (mmc->card_caps & MMC_MODE_HS)
2104 tran_speed = MMC_HIGH_52_MAX_DTR;
2105 else
2106 tran_speed = MMC_HIGH_26_MAX_DTR;
2107
2108 mmc_set_clock(mmc, tran_speed);
2109 }
2110
2111 /* Fix the block length for DDR mode */
2112 if (mmc_card_ddr(mmc)) {
2113 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2114 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2115 }
2116
2117 /* fill in device description */
2118 bdesc = mmc_get_blk_desc(mmc);
2119 bdesc->lun = 0;
2120 bdesc->hwpart = 0;
2121 bdesc->type = 0;
2122 bdesc->blksz = mmc->read_bl_len;
2123 bdesc->log2blksz = LOG2(bdesc->blksz);
2124 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2125 #if !defined(CONFIG_SPL_BUILD) || \
2126 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2127 !defined(CONFIG_USE_TINY_PRINTF))
2128 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2129 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2130 (mmc->cid[3] >> 16) & 0xffff);
2131 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2132 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2133 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2134 (mmc->cid[2] >> 24) & 0xff);
2135 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2136 (mmc->cid[2] >> 16) & 0xf);
2137 #else
2138 bdesc->vendor[0] = 0;
2139 bdesc->product[0] = 0;
2140 bdesc->revision[0] = 0;
2141 #endif
2142 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2143 part_init(bdesc);
2144 #endif
2145
2146 return 0;
2147 }
2148
2149 #ifndef CONFIG_MMC_USE_PRE_CONFIG
2150 static int mmc_send_if_cond(struct mmc *mmc)
2151 {
2152 struct mmc_cmd cmd;
2153 int err;
2154
2155 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2156 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2157 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2158 cmd.resp_type = MMC_RSP_R7;
2159
2160 err = mmc_send_cmd(mmc, &cmd, NULL);
2161
2162 if (err)
2163 return err;
2164
2165 if ((cmd.response[0] & 0xff) != 0xaa)
2166 return -EOPNOTSUPP;
2167 else
2168 mmc->version = SD_VERSION_2;
2169
2170 return 0;
2171 }
2172 #endif
2173
2174 #if !CONFIG_IS_ENABLED(DM_MMC)
2175 /* board-specific MMC power initializations. */
2176 __weak void board_mmc_power_init(void)
2177 {
2178 }
2179 #endif
2180
2181 #ifndef CONFIG_MMC_USE_PRE_CONFIG
2182 static int mmc_power_init(struct mmc *mmc)
2183 {
2184 #if CONFIG_IS_ENABLED(DM_MMC)
2185 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD)
2186 struct udevice *vmmc_supply;
2187 int ret;
2188
2189 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2190 &vmmc_supply);
2191 if (ret) {
2192 debug("%s: No vmmc supply\n", mmc->dev->name);
2193 return 0;
2194 }
2195
2196 ret = regulator_set_enable(vmmc_supply, true);
2197 if (ret) {
2198 puts("Error enabling VMMC supply\n");
2199 return ret;
2200 }
2201 #endif
2202 #else /* !CONFIG_DM_MMC */
2203 /*
2204 * Driver model should use a regulator, as above, rather than calling
2205 * out to board code.
2206 */
2207 board_mmc_power_init();
2208 #endif
2209 return 0;
2210 }
2211 #endif
2212 #ifdef CONFIG_MMC_USE_PRE_CONFIG
2213 static int mmc_select_card(struct mmc *mmc, int n)
2214 {
2215 struct mmc_cmd cmd;
2216 int err = 0;
2217
2218 memset(&cmd, 0, sizeof(struct mmc_cmd));
2219 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2220 mmc->rca = n;
2221 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2222 cmd.resp_type = MMC_RSP_R1;
2223 cmd.cmdarg = mmc->rca << 16;
2224 err = mmc_send_cmd(mmc, &cmd, NULL);
2225 }
2226
2227 return err;
2228 }
2229
2230 int mmc_start_init(struct mmc *mmc)
2231 {
2232 int bus_width = 1;
2233 /*
2234 * We use the MMC config set by the bootrom.
2235 * So it is no need to reset the eMMC device.
2236 */
2237 if (mmc->cfg->host_caps & MMC_MODE_8BIT)
2238 bus_width = 8;
2239 else if (mmc->cfg->host_caps & MMC_MODE_4BIT)
2240 bus_width = 4;
2241 mmc_set_bus_width(mmc, bus_width);
2242
2243 mmc_set_clock(mmc, 1);
2244 mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2245 /* Send cmd7 to return stand-by state*/
2246 mmc_select_card(mmc, 0);
2247 mmc->version = MMC_VERSION_UNKNOWN;
2248 mmc->high_capacity = 1;
2249 /*
2250 * The RCA is set to 2 by rockchip bootrom, use the default
2251 * value here.
2252 */
2253 #ifdef CONFIG_ARCH_ROCKCHIP
2254 mmc->rca = 2;
2255 #else
2256 mmc->rca = 1;
2257 #endif
2258 return 0;
2259 }
2260 #else
2261 int mmc_start_init(struct mmc *mmc)
2262 {
2263 bool no_card;
2264 int err;
2265
2266 /* we pretend there's no card when init is NULL */
2267 no_card = mmc_getcd(mmc) == 0;
2268 #if !CONFIG_IS_ENABLED(DM_MMC)
2269 no_card = no_card || (mmc->cfg->ops->init == NULL);
2270 #endif
2271 if (no_card) {
2272 mmc->has_init = 0;
2273 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2274 printf("MMC: no card present\n");
2275 #endif
2276 return -ENOMEDIUM;
2277 }
2278
2279 if (mmc->has_init)
2280 return 0;
2281
2282 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2283 mmc_adapter_card_type_ident();
2284 #endif
2285 err = mmc_power_init(mmc);
2286 if (err)
2287 return err;
2288
2289 #if CONFIG_IS_ENABLED(DM_MMC)
2290 /* The device has already been probed ready for use */
2291 #else
2292 /* made sure it's not NULL earlier */
2293 err = mmc->cfg->ops->init(mmc);
2294 if (err)
2295 return err;
2296 #endif
2297 mmc_set_bus_width(mmc, 1);
2298 mmc_set_clock(mmc, 1);
2299 mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2300
2301 /* Reset the Card */
2302 err = mmc_go_idle(mmc);
2303
2304 if (err)
2305 return err;
2306
2307 /* The internal partition reset to user partition(0) at every CMD0*/
2308 mmc_get_blk_desc(mmc)->hwpart = 0;
2309
2310 /* Test for SD version 2 */
2311 err = mmc_send_if_cond(mmc);
2312
2313 /* Now try to get the SD card's operating condition */
2314 err = sd_send_op_cond(mmc);
2315
2316 /* If the command timed out, we check for an MMC card */
2317 if (err == -ETIMEDOUT) {
2318 err = mmc_send_op_cond(mmc);
2319
2320 if (err) {
2321 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2322 printf("Card did not respond to voltage select!\n");
2323 #endif
2324 return -EOPNOTSUPP;
2325 }
2326 }
2327
2328 if (!err)
2329 mmc->init_in_progress = 1;
2330
2331 return err;
2332 }
2333 #endif
2334
2335 static int mmc_complete_init(struct mmc *mmc)
2336 {
2337 int err = 0;
2338
2339 mmc->init_in_progress = 0;
2340 if (mmc->op_cond_pending)
2341 err = mmc_complete_op_cond(mmc);
2342
2343 if (!err)
2344 err = mmc_startup(mmc);
2345 if (err)
2346 mmc->has_init = 0;
2347 else
2348 mmc->has_init = 1;
2349 return err;
2350 }
2351
2352 int mmc_init(struct mmc *mmc)
2353 {
2354 int err = 0;
2355 __maybe_unused unsigned start;
2356 #if CONFIG_IS_ENABLED(DM_MMC)
2357 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2358
2359 upriv->mmc = mmc;
2360 #endif
2361 if (mmc->has_init)
2362 return 0;
2363
2364 start = get_timer(0);
2365
2366 if (!mmc->init_in_progress)
2367 err = mmc_start_init(mmc);
2368
2369 if (!err)
2370 err = mmc_complete_init(mmc);
2371 if (err)
2372 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2373
2374 return err;
2375 }
2376
2377 int mmc_set_dsr(struct mmc *mmc, u16 val)
2378 {
2379 mmc->dsr = val;
2380 return 0;
2381 }
2382
2383 /* CPU-specific MMC initializations */
2384 __weak int cpu_mmc_init(bd_t *bis)
2385 {
2386 return -1;
2387 }
2388
2389 /* board-specific MMC initializations. */
2390 __weak int board_mmc_init(bd_t *bis)
2391 {
2392 return -1;
2393 }
2394
2395 void mmc_set_preinit(struct mmc *mmc, int preinit)
2396 {
2397 mmc->preinit = preinit;
2398 }
2399
2400 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2401 static int mmc_probe(bd_t *bis)
2402 {
2403 return 0;
2404 }
2405 #elif CONFIG_IS_ENABLED(DM_MMC)
2406 static int mmc_probe(bd_t *bis)
2407 {
2408 int ret, i;
2409 struct uclass *uc;
2410 struct udevice *dev;
2411
2412 ret = uclass_get(UCLASS_MMC, &uc);
2413 if (ret)
2414 return ret;
2415
2416 /*
2417 * Try to add them in sequence order. Really with driver model we
2418 * should allow holes, but the current MMC list does not allow that.
2419 * So if we request 0, 1, 3 we will get 0, 1, 2.
2420 */
2421 for (i = 0; ; i++) {
2422 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2423 if (ret == -ENODEV)
2424 break;
2425 }
2426 uclass_foreach_dev(dev, uc) {
2427 ret = device_probe(dev);
2428 if (ret)
2429 printf("%s - probe failed: %d\n", dev->name, ret);
2430 }
2431
2432 return 0;
2433 }
2434 #else
2435 static int mmc_probe(bd_t *bis)
2436 {
2437 if (board_mmc_init(bis) < 0)
2438 cpu_mmc_init(bis);
2439
2440 return 0;
2441 }
2442 #endif
2443
2444 int mmc_initialize(bd_t *bis)
2445 {
2446 static int initialized = 0;
2447 int ret;
2448 if (initialized) /* Avoid initializing mmc multiple times */
2449 return 0;
2450 initialized = 1;
2451
2452 #if !CONFIG_IS_ENABLED(BLK)
2453 #if !CONFIG_IS_ENABLED(MMC_TINY)
2454 mmc_list_init();
2455 #endif
2456 #endif
2457 ret = mmc_probe(bis);
2458 if (ret)
2459 return ret;
2460
2461 #ifndef CONFIG_SPL_BUILD
2462 print_mmc_devices(',');
2463 #endif
2464
2465 mmc_do_preinit();
2466 return 0;
2467 }
2468
2469 #ifdef CONFIG_CMD_BKOPS_ENABLE
2470 int mmc_set_bkops_enable(struct mmc *mmc)
2471 {
2472 int err;
2473 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2474
2475 err = mmc_send_ext_csd(mmc, ext_csd);
2476 if (err) {
2477 puts("Could not get ext_csd register values\n");
2478 return err;
2479 }
2480
2481 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2482 puts("Background operations not supported on device\n");
2483 return -EMEDIUMTYPE;
2484 }
2485
2486 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2487 puts("Background operations already enabled\n");
2488 return 0;
2489 }
2490
2491 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2492 if (err) {
2493 puts("Failed to enable manual background operations\n");
2494 return err;
2495 }
2496
2497 puts("Enabled manual background operations\n");
2498
2499 return 0;
2500 }
2501 #endif
2502