1 /* 2 * Copyright 2008, Freescale Semiconductor, Inc 3 * Andy Fleming 4 * 5 * Based vaguely on the Linux code 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10 #include <config.h> 11 #include <common.h> 12 #include <command.h> 13 #include <dm.h> 14 #include <dm/device-internal.h> 15 #include <errno.h> 16 #include <mmc.h> 17 #include <part.h> 18 #include <power/regulator.h> 19 #include <malloc.h> 20 #include <memalign.h> 21 #include <linux/list.h> 22 #include <div64.h> 23 #include "mmc_private.h" 24 25 static const unsigned int sd_au_size[] = { 26 0, SZ_16K / 512, SZ_32K / 512, 27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512, 28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512, 29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, 30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, 31 }; 32 33 #if CONFIG_IS_ENABLED(MMC_TINY) 34 static struct mmc mmc_static; 35 struct mmc *find_mmc_device(int dev_num) 36 { 37 return &mmc_static; 38 } 39 40 void mmc_do_preinit(void) 41 { 42 struct mmc *m = &mmc_static; 43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 44 mmc_set_preinit(m, 1); 45 #endif 46 if (m->preinit) 47 mmc_start_init(m); 48 } 49 50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc) 51 { 52 return &mmc->block_dev; 53 } 54 #endif 55 56 #if !CONFIG_IS_ENABLED(DM_MMC) 57 __weak int board_mmc_getwp(struct mmc *mmc) 58 { 59 return -1; 60 } 61 62 int mmc_getwp(struct mmc *mmc) 63 { 64 int wp; 65 66 wp = board_mmc_getwp(mmc); 67 68 if (wp < 0) { 69 if (mmc->cfg->ops->getwp) 70 wp = mmc->cfg->ops->getwp(mmc); 71 else 72 wp = 0; 73 } 74 75 return wp; 76 } 77 78 __weak int board_mmc_getcd(struct mmc *mmc) 79 { 80 return -1; 81 } 82 #endif 83 84 #ifdef CONFIG_MMC_TRACE 85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd) 86 { 87 printf("CMD_SEND:%d\n", cmd->cmdidx); 88 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg); 89 } 90 91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret) 92 { 93 int i; 94 u8 *ptr; 95 96 if (ret) { 97 printf("\t\tRET\t\t\t %d\n", ret); 98 } else { 99 switch (cmd->resp_type) { 100 case MMC_RSP_NONE: 101 printf("\t\tMMC_RSP_NONE\n"); 102 break; 103 case MMC_RSP_R1: 104 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n", 105 cmd->response[0]); 106 break; 107 case MMC_RSP_R1b: 108 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n", 109 cmd->response[0]); 110 break; 111 case MMC_RSP_R2: 112 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n", 113 cmd->response[0]); 114 printf("\t\t \t\t 0x%08X \n", 115 cmd->response[1]); 116 printf("\t\t \t\t 0x%08X \n", 117 cmd->response[2]); 118 printf("\t\t \t\t 0x%08X \n", 119 cmd->response[3]); 120 printf("\n"); 121 printf("\t\t\t\t\tDUMPING DATA\n"); 122 for (i = 0; i < 4; i++) { 123 int j; 124 printf("\t\t\t\t\t%03d - ", i*4); 125 ptr = (u8 *)&cmd->response[i]; 126 ptr += 3; 127 for (j = 0; j < 4; j++) 128 printf("%02X ", *ptr--); 129 printf("\n"); 130 } 131 break; 132 case MMC_RSP_R3: 133 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n", 134 cmd->response[0]); 135 break; 136 default: 137 printf("\t\tERROR MMC rsp not supported\n"); 138 break; 139 } 140 } 141 } 142 143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd) 144 { 145 int status; 146 147 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9; 148 printf("CURR STATE:%d\n", status); 149 } 150 #endif 151 152 #if !CONFIG_IS_ENABLED(DM_MMC) 153 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data) 154 { 155 int ret; 156 157 mmmc_trace_before_send(mmc, cmd); 158 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data); 159 mmmc_trace_after_send(mmc, cmd, ret); 160 161 return ret; 162 } 163 #endif 164 165 int mmc_send_status(struct mmc *mmc, int timeout) 166 { 167 struct mmc_cmd cmd; 168 int err, retries = 5; 169 170 cmd.cmdidx = MMC_CMD_SEND_STATUS; 171 cmd.resp_type = MMC_RSP_R1; 172 if (!mmc_host_is_spi(mmc)) 173 cmd.cmdarg = mmc->rca << 16; 174 175 while (1) { 176 err = mmc_send_cmd(mmc, &cmd, NULL); 177 if (!err) { 178 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) && 179 (cmd.response[0] & MMC_STATUS_CURR_STATE) != 180 MMC_STATE_PRG) 181 break; 182 else if (cmd.response[0] & MMC_STATUS_MASK) { 183 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 184 printf("Status Error: 0x%08X\n", 185 cmd.response[0]); 186 #endif 187 return -ECOMM; 188 } 189 } else if (--retries < 0) 190 return err; 191 192 if (timeout-- <= 0) 193 break; 194 195 udelay(1000); 196 } 197 198 mmc_trace_state(mmc, &cmd); 199 if (timeout <= 0) { 200 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 201 printf("Timeout waiting card ready\n"); 202 #endif 203 return -ETIMEDOUT; 204 } 205 206 return 0; 207 } 208 209 int mmc_set_blocklen(struct mmc *mmc, int len) 210 { 211 struct mmc_cmd cmd; 212 213 if (mmc_card_ddr(mmc)) 214 return 0; 215 216 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN; 217 cmd.resp_type = MMC_RSP_R1; 218 cmd.cmdarg = len; 219 220 return mmc_send_cmd(mmc, &cmd, NULL); 221 } 222 223 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start, 224 lbaint_t blkcnt) 225 { 226 struct mmc_cmd cmd; 227 struct mmc_data data; 228 229 if (blkcnt > 1) 230 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 231 else 232 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 233 234 if (mmc->high_capacity) 235 cmd.cmdarg = start; 236 else 237 cmd.cmdarg = start * mmc->read_bl_len; 238 239 cmd.resp_type = MMC_RSP_R1; 240 241 data.dest = dst; 242 data.blocks = blkcnt; 243 data.blocksize = mmc->read_bl_len; 244 data.flags = MMC_DATA_READ; 245 246 if (mmc_send_cmd(mmc, &cmd, &data)) 247 return 0; 248 249 if (blkcnt > 1) { 250 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 251 cmd.cmdarg = 0; 252 cmd.resp_type = MMC_RSP_R1b; 253 if (mmc_send_cmd(mmc, &cmd, NULL)) { 254 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 255 printf("mmc fail to send stop cmd\n"); 256 #endif 257 return 0; 258 } 259 } 260 261 return blkcnt; 262 } 263 264 #if CONFIG_IS_ENABLED(BLK) 265 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 266 #else 267 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 268 void *dst) 269 #endif 270 { 271 #if CONFIG_IS_ENABLED(BLK) 272 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 273 #endif 274 int dev_num = block_dev->devnum; 275 int err; 276 lbaint_t cur, blocks_todo = blkcnt; 277 278 if (blkcnt == 0) 279 return 0; 280 281 struct mmc *mmc = find_mmc_device(dev_num); 282 if (!mmc) 283 return 0; 284 285 if (CONFIG_IS_ENABLED(MMC_TINY)) 286 err = mmc_switch_part(mmc, block_dev->hwpart); 287 else 288 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 289 290 if (err < 0) 291 return 0; 292 293 if ((start + blkcnt) > block_dev->lba) { 294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 295 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 296 start + blkcnt, block_dev->lba); 297 #endif 298 return 0; 299 } 300 301 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 302 debug("%s: Failed to set blocklen\n", __func__); 303 return 0; 304 } 305 306 do { 307 cur = (blocks_todo > mmc->cfg->b_max) ? 308 mmc->cfg->b_max : blocks_todo; 309 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 310 debug("%s: Failed to read blocks\n", __func__); 311 int timeout = 0; 312 re_init_retry: 313 timeout++; 314 /* 315 * Try re-init seven times. 316 */ 317 if (timeout > 7) { 318 printf("Re-init retry timeout\n"); 319 return 0; 320 } 321 322 mmc->has_init = 0; 323 if (mmc_init(mmc)) 324 return 0; 325 326 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 327 printf("%s: Re-init mmc_read_blocks error\n", 328 __func__); 329 goto re_init_retry; 330 } 331 } 332 blocks_todo -= cur; 333 start += cur; 334 dst += cur * mmc->read_bl_len; 335 } while (blocks_todo > 0); 336 337 return blkcnt; 338 } 339 340 void mmc_set_clock(struct mmc *mmc, uint clock) 341 { 342 if (clock > mmc->cfg->f_max) 343 clock = mmc->cfg->f_max; 344 345 if (clock < mmc->cfg->f_min) 346 clock = mmc->cfg->f_min; 347 348 mmc->clock = clock; 349 350 mmc_set_ios(mmc); 351 } 352 353 static void mmc_set_bus_width(struct mmc *mmc, uint width) 354 { 355 mmc->bus_width = width; 356 357 mmc_set_ios(mmc); 358 } 359 360 static void mmc_set_timing(struct mmc *mmc, uint timing) 361 { 362 mmc->timing = timing; 363 mmc_set_ios(mmc); 364 } 365 366 static int mmc_go_idle(struct mmc *mmc) 367 { 368 struct mmc_cmd cmd; 369 int err; 370 371 udelay(1000); 372 373 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE; 374 cmd.cmdarg = 0; 375 cmd.resp_type = MMC_RSP_NONE; 376 377 err = mmc_send_cmd(mmc, &cmd, NULL); 378 379 if (err) 380 return err; 381 382 udelay(2000); 383 384 return 0; 385 } 386 387 static int sd_send_op_cond(struct mmc *mmc) 388 { 389 int timeout = 1000; 390 int err; 391 struct mmc_cmd cmd; 392 393 while (1) { 394 cmd.cmdidx = MMC_CMD_APP_CMD; 395 cmd.resp_type = MMC_RSP_R1; 396 cmd.cmdarg = 0; 397 398 err = mmc_send_cmd(mmc, &cmd, NULL); 399 400 if (err) 401 return err; 402 403 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND; 404 cmd.resp_type = MMC_RSP_R3; 405 406 /* 407 * Most cards do not answer if some reserved bits 408 * in the ocr are set. However, Some controller 409 * can set bit 7 (reserved for low voltages), but 410 * how to manage low voltages SD card is not yet 411 * specified. 412 */ 413 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 : 414 (mmc->cfg->voltages & 0xff8000); 415 416 if (mmc->version == SD_VERSION_2) 417 cmd.cmdarg |= OCR_HCS; 418 419 err = mmc_send_cmd(mmc, &cmd, NULL); 420 421 if (err) 422 return err; 423 424 if (cmd.response[0] & OCR_BUSY) 425 break; 426 427 if (timeout-- <= 0) 428 return -EOPNOTSUPP; 429 430 udelay(1000); 431 } 432 433 if (mmc->version != SD_VERSION_2) 434 mmc->version = SD_VERSION_1_0; 435 436 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 437 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 438 cmd.resp_type = MMC_RSP_R3; 439 cmd.cmdarg = 0; 440 441 err = mmc_send_cmd(mmc, &cmd, NULL); 442 443 if (err) 444 return err; 445 } 446 447 mmc->ocr = cmd.response[0]; 448 449 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 450 mmc->rca = 0; 451 452 return 0; 453 } 454 455 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg) 456 { 457 struct mmc_cmd cmd; 458 int err; 459 460 cmd.cmdidx = MMC_CMD_SEND_OP_COND; 461 cmd.resp_type = MMC_RSP_R3; 462 cmd.cmdarg = 0; 463 if (use_arg && !mmc_host_is_spi(mmc)) 464 cmd.cmdarg = OCR_HCS | 465 (mmc->cfg->voltages & 466 (mmc->ocr & OCR_VOLTAGE_MASK)) | 467 (mmc->ocr & OCR_ACCESS_MODE); 468 469 err = mmc_send_cmd(mmc, &cmd, NULL); 470 if (err) 471 return err; 472 mmc->ocr = cmd.response[0]; 473 return 0; 474 } 475 476 static int mmc_send_op_cond(struct mmc *mmc) 477 { 478 int err, i; 479 480 /* Some cards seem to need this */ 481 mmc_go_idle(mmc); 482 483 /* Asking to the card its capabilities */ 484 for (i = 0; i < 2; i++) { 485 err = mmc_send_op_cond_iter(mmc, i != 0); 486 if (err) 487 return err; 488 489 /* exit if not busy (flag seems to be inverted) */ 490 if (mmc->ocr & OCR_BUSY) 491 break; 492 } 493 mmc->op_cond_pending = 1; 494 return 0; 495 } 496 497 static int mmc_complete_op_cond(struct mmc *mmc) 498 { 499 struct mmc_cmd cmd; 500 int timeout = 1000; 501 uint start; 502 int err; 503 504 mmc->op_cond_pending = 0; 505 if (!(mmc->ocr & OCR_BUSY)) { 506 /* Some cards seem to need this */ 507 mmc_go_idle(mmc); 508 509 start = get_timer(0); 510 while (1) { 511 err = mmc_send_op_cond_iter(mmc, 1); 512 if (err) 513 return err; 514 if (mmc->ocr & OCR_BUSY) 515 break; 516 if (get_timer(start) > timeout) 517 return -EOPNOTSUPP; 518 udelay(100); 519 } 520 } 521 522 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 523 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 524 cmd.resp_type = MMC_RSP_R3; 525 cmd.cmdarg = 0; 526 527 err = mmc_send_cmd(mmc, &cmd, NULL); 528 529 if (err) 530 return err; 531 532 mmc->ocr = cmd.response[0]; 533 } 534 535 mmc->version = MMC_VERSION_UNKNOWN; 536 537 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 538 mmc->rca = 1; 539 540 return 0; 541 } 542 543 544 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd) 545 { 546 struct mmc_cmd cmd; 547 struct mmc_data data; 548 int err; 549 550 /* Get the Card Status Register */ 551 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD; 552 cmd.resp_type = MMC_RSP_R1; 553 cmd.cmdarg = 0; 554 555 data.dest = (char *)ext_csd; 556 data.blocks = 1; 557 data.blocksize = MMC_MAX_BLOCK_LEN; 558 data.flags = MMC_DATA_READ; 559 560 err = mmc_send_cmd(mmc, &cmd, &data); 561 562 return err; 563 } 564 565 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status) 566 { 567 struct mmc_cmd cmd; 568 u8 busy = true; 569 uint start; 570 int ret; 571 int timeout = 1000; 572 573 cmd.cmdidx = MMC_CMD_SEND_STATUS; 574 cmd.resp_type = MMC_RSP_R1; 575 cmd.cmdarg = mmc->rca << 16; 576 577 start = get_timer(0); 578 579 if (!send_status && !mmc_can_card_busy(mmc)) { 580 mdelay(timeout); 581 return 0; 582 } 583 584 do { 585 if (!send_status) { 586 busy = mmc_card_busy(mmc); 587 } else { 588 ret = mmc_send_cmd(mmc, &cmd, NULL); 589 590 if (ret) 591 return ret; 592 593 if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR) 594 return -EBADMSG; 595 busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) == 596 MMC_STATE_PRG; 597 } 598 599 if (get_timer(start) > timeout && busy) 600 return -ETIMEDOUT; 601 } while (busy); 602 603 return 0; 604 } 605 606 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value, 607 u8 send_status) 608 { 609 struct mmc_cmd cmd; 610 int retries = 3; 611 int ret; 612 613 cmd.cmdidx = MMC_CMD_SWITCH; 614 cmd.resp_type = MMC_RSP_R1b; 615 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 616 (index << 16) | 617 (value << 8); 618 619 do { 620 ret = mmc_send_cmd(mmc, &cmd, NULL); 621 622 if (!ret) 623 return mmc_poll_for_busy(mmc, send_status); 624 } while (--retries > 0 && ret); 625 626 return ret; 627 } 628 629 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value) 630 { 631 return __mmc_switch(mmc, set, index, value, true); 632 } 633 634 static int mmc_select_bus_width(struct mmc *mmc) 635 { 636 u32 ext_csd_bits[] = { 637 EXT_CSD_BUS_WIDTH_8, 638 EXT_CSD_BUS_WIDTH_4, 639 }; 640 u32 bus_widths[] = { 641 MMC_BUS_WIDTH_8BIT, 642 MMC_BUS_WIDTH_4BIT, 643 }; 644 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 645 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 646 u32 idx, bus_width = 0; 647 int err = 0; 648 649 if (mmc->version < MMC_VERSION_4 || 650 !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT))) 651 return 0; 652 653 err = mmc_send_ext_csd(mmc, ext_csd); 654 655 if (err) 656 return err; 657 658 idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1; 659 660 /* 661 * Unlike SD, MMC cards dont have a configuration register to notify 662 * supported bus width. So bus test command should be run to identify 663 * the supported bus width or compare the ext csd values of current 664 * bus width and ext csd values of 1 bit mode read earlier. 665 */ 666 for (; idx < ARRAY_SIZE(bus_widths); idx++) { 667 /* 668 * Host is capable of 8bit transfer, then switch 669 * the device to work in 8bit transfer mode. If the 670 * mmc switch command returns error then switch to 671 * 4bit transfer mode. On success set the corresponding 672 * bus width on the host. 673 */ 674 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 675 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]); 676 if (err) 677 continue; 678 679 bus_width = bus_widths[idx]; 680 mmc_set_bus_width(mmc, bus_width); 681 682 err = mmc_send_ext_csd(mmc, test_csd); 683 684 if (err) 685 continue; 686 687 /* Only compare read only fields */ 688 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] == 689 test_csd[EXT_CSD_PARTITIONING_SUPPORT]) && 690 (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] == 691 test_csd[EXT_CSD_HC_WP_GRP_SIZE]) && 692 (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) && 693 (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] == 694 test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && 695 !memcmp(&ext_csd[EXT_CSD_SEC_CNT], 696 &test_csd[EXT_CSD_SEC_CNT], 4)) { 697 err = bus_width; 698 break; 699 } else { 700 err = -EBADMSG; 701 } 702 } 703 704 return err; 705 } 706 707 static const u8 tuning_blk_pattern_4bit[] = { 708 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 709 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 710 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 711 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 712 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 713 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 714 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 715 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 716 }; 717 718 static const u8 tuning_blk_pattern_8bit[] = { 719 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 720 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 721 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 722 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 723 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 724 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 725 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 726 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 727 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 728 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 729 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 730 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 731 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 732 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 733 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 734 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 735 }; 736 737 int mmc_send_tuning(struct mmc *mmc, u32 opcode) 738 { 739 struct mmc_cmd cmd; 740 struct mmc_data data; 741 const u8 *tuning_block_pattern; 742 int size, err = 0; 743 u8 *data_buf; 744 745 if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 746 tuning_block_pattern = tuning_blk_pattern_8bit; 747 size = sizeof(tuning_blk_pattern_8bit); 748 } else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) { 749 tuning_block_pattern = tuning_blk_pattern_4bit; 750 size = sizeof(tuning_blk_pattern_4bit); 751 } else { 752 return -EINVAL; 753 } 754 755 data_buf = calloc(1, size); 756 if (!data_buf) 757 return -ENOMEM; 758 759 cmd.cmdidx = opcode; 760 cmd.resp_type = MMC_RSP_R1; 761 cmd.cmdarg = 0; 762 763 data.dest = (char *)data_buf; 764 data.blocksize = size; 765 data.blocks = 1; 766 data.flags = MMC_DATA_READ; 767 768 err = mmc_send_cmd(mmc, &cmd, &data); 769 if (err) 770 goto out; 771 772 if (memcmp(data_buf, tuning_block_pattern, size)) 773 err = -EIO; 774 out: 775 free(data_buf); 776 return err; 777 } 778 779 static int mmc_execute_tuning(struct mmc *mmc) 780 { 781 #ifdef CONFIG_DM_MMC 782 struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev); 783 #endif 784 u32 opcode; 785 786 if (IS_SD(mmc)) 787 opcode = MMC_SEND_TUNING_BLOCK; 788 else 789 opcode = MMC_SEND_TUNING_BLOCK_HS200; 790 791 #ifndef CONFIG_DM_MMC 792 if (mmc->cfg->ops->execute_tuning) { 793 return mmc->cfg->ops->execute_tuning(mmc, opcode); 794 #else 795 if (ops->execute_tuning) { 796 return ops->execute_tuning(mmc->dev, opcode); 797 #endif 798 } else { 799 debug("Tuning feature required for HS200 mode.\n"); 800 return -EIO; 801 } 802 } 803 804 static int mmc_hs200_tuning(struct mmc *mmc) 805 { 806 return mmc_execute_tuning(mmc); 807 } 808 809 static int mmc_select_hs(struct mmc *mmc) 810 { 811 int ret; 812 813 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 814 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS); 815 816 if (!ret) 817 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 818 819 return ret; 820 } 821 822 static int mmc_select_hs_ddr(struct mmc *mmc) 823 { 824 u32 ext_csd_bits; 825 int err = 0; 826 827 if (mmc->bus_width == MMC_BUS_WIDTH_1BIT) 828 return 0; 829 830 ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ? 831 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4; 832 833 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 834 EXT_CSD_BUS_WIDTH, ext_csd_bits); 835 if (err) 836 return err; 837 838 mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52); 839 840 return 0; 841 } 842 843 static int mmc_select_hs200(struct mmc *mmc) 844 { 845 int ret; 846 847 /* 848 * Set the bus width(4 or 8) with host's support and 849 * switch to HS200 mode if bus width is set successfully. 850 */ 851 ret = mmc_select_bus_width(mmc); 852 853 if (ret > 0) { 854 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 855 EXT_CSD_HS_TIMING, 856 EXT_CSD_TIMING_HS200, false); 857 858 if (ret) 859 return ret; 860 861 mmc_set_timing(mmc, MMC_TIMING_MMC_HS200); 862 } 863 864 return ret; 865 } 866 867 static int mmc_select_hs400(struct mmc *mmc) 868 { 869 int ret; 870 871 /* Switch card to HS mode */ 872 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 873 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false); 874 if (ret) 875 return ret; 876 877 /* Set host controller to HS timing */ 878 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 879 880 /* Reduce frequency to HS frequency */ 881 mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR); 882 883 ret = mmc_send_status(mmc, 1000); 884 if (ret) 885 return ret; 886 887 /* Switch card to DDR */ 888 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 889 EXT_CSD_BUS_WIDTH, 890 EXT_CSD_DDR_BUS_WIDTH_8); 891 if (ret) 892 return ret; 893 894 /* Switch card to HS400 */ 895 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 896 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false); 897 if (ret) 898 return ret; 899 900 /* Set host controller to HS400 timing and frequency */ 901 mmc_set_timing(mmc, MMC_TIMING_MMC_HS400); 902 903 return ret; 904 } 905 906 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd) 907 { 908 u8 card_type; 909 u32 host_caps, avail_type = 0; 910 911 card_type = ext_csd[EXT_CSD_CARD_TYPE]; 912 host_caps = mmc->cfg->host_caps; 913 914 if ((host_caps & MMC_MODE_HS) && 915 (card_type & EXT_CSD_CARD_TYPE_26)) 916 avail_type |= EXT_CSD_CARD_TYPE_26; 917 918 if ((host_caps & MMC_MODE_HS) && 919 (card_type & EXT_CSD_CARD_TYPE_52)) 920 avail_type |= EXT_CSD_CARD_TYPE_52; 921 922 /* 923 * For the moment, u-boot doesn't support signal voltage 924 * switch, therefor we assume that host support ddr52 925 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and 926 * hs400 are the same). 927 */ 928 if ((host_caps & MMC_MODE_DDR_52MHz) && 929 (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)) 930 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V; 931 932 if ((host_caps & MMC_MODE_HS200) && 933 (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V)) 934 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V; 935 936 /* 937 * If host can support HS400, it means that host can also 938 * support HS200. 939 */ 940 if ((host_caps & MMC_MODE_HS400) && 941 (host_caps & MMC_MODE_8BIT) && 942 (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 943 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 944 EXT_CSD_CARD_TYPE_HS400_1_8V; 945 946 if ((host_caps & MMC_MODE_HS400ES) && 947 (host_caps & MMC_MODE_8BIT) && 948 ext_csd[EXT_CSD_STROBE_SUPPORT] && 949 (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 950 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 951 EXT_CSD_CARD_TYPE_HS400_1_8V | 952 EXT_CSD_CARD_TYPE_HS400ES; 953 954 return avail_type; 955 } 956 957 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type) 958 { 959 int clock = 0; 960 961 if (mmc_card_hs(mmc)) 962 clock = (avail_type & EXT_CSD_CARD_TYPE_52) ? 963 MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR; 964 else if (mmc_card_hs200(mmc) || 965 mmc_card_hs400(mmc) || 966 mmc_card_hs400es(mmc)) 967 clock = MMC_HS200_MAX_DTR; 968 969 mmc_set_clock(mmc, clock); 970 } 971 972 static int mmc_change_freq(struct mmc *mmc) 973 { 974 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 975 u32 avail_type; 976 int err; 977 978 mmc->card_caps = 0; 979 980 if (mmc_host_is_spi(mmc)) 981 return 0; 982 983 /* Only version 4 supports high-speed */ 984 if (mmc->version < MMC_VERSION_4) 985 return 0; 986 987 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT; 988 989 err = mmc_send_ext_csd(mmc, ext_csd); 990 991 if (err) 992 return err; 993 994 avail_type = mmc_select_card_type(mmc, ext_csd); 995 996 if (avail_type & EXT_CSD_CARD_TYPE_HS200) 997 err = mmc_select_hs200(mmc); 998 else if (avail_type & EXT_CSD_CARD_TYPE_HS) 999 err = mmc_select_hs(mmc); 1000 else 1001 err = -EINVAL; 1002 1003 if (err) 1004 return err; 1005 1006 mmc_set_bus_speed(mmc, avail_type); 1007 1008 if (mmc_card_hs200(mmc)) { 1009 err = mmc_hs200_tuning(mmc); 1010 if (avail_type & EXT_CSD_CARD_TYPE_HS400 && 1011 mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 1012 err = mmc_select_hs400(mmc); 1013 mmc_set_bus_speed(mmc, avail_type); 1014 } 1015 } else if (!mmc_card_hs400es(mmc)) { 1016 err = mmc_select_bus_width(mmc) > 0 ? 0 : err; 1017 if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52) 1018 err = mmc_select_hs_ddr(mmc); 1019 } 1020 1021 return err; 1022 } 1023 1024 static int mmc_set_capacity(struct mmc *mmc, int part_num) 1025 { 1026 switch (part_num) { 1027 case 0: 1028 mmc->capacity = mmc->capacity_user; 1029 break; 1030 case 1: 1031 case 2: 1032 mmc->capacity = mmc->capacity_boot; 1033 break; 1034 case 3: 1035 mmc->capacity = mmc->capacity_rpmb; 1036 break; 1037 case 4: 1038 case 5: 1039 case 6: 1040 case 7: 1041 mmc->capacity = mmc->capacity_gp[part_num - 4]; 1042 break; 1043 default: 1044 return -1; 1045 } 1046 1047 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1048 1049 return 0; 1050 } 1051 1052 int mmc_switch_part(struct mmc *mmc, unsigned int part_num) 1053 { 1054 int ret; 1055 1056 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF, 1057 (mmc->part_config & ~PART_ACCESS_MASK) 1058 | (part_num & PART_ACCESS_MASK)); 1059 1060 /* 1061 * Set the capacity if the switch succeeded or was intended 1062 * to return to representing the raw device. 1063 */ 1064 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) { 1065 ret = mmc_set_capacity(mmc, part_num); 1066 mmc_get_blk_desc(mmc)->hwpart = part_num; 1067 } 1068 1069 return ret; 1070 } 1071 1072 int mmc_hwpart_config(struct mmc *mmc, 1073 const struct mmc_hwpart_conf *conf, 1074 enum mmc_hwpart_conf_mode mode) 1075 { 1076 u8 part_attrs = 0; 1077 u32 enh_size_mult; 1078 u32 enh_start_addr; 1079 u32 gp_size_mult[4]; 1080 u32 max_enh_size_mult; 1081 u32 tot_enh_size_mult = 0; 1082 u8 wr_rel_set; 1083 int i, pidx, err; 1084 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1085 1086 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE) 1087 return -EINVAL; 1088 1089 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) { 1090 printf("eMMC >= 4.4 required for enhanced user data area\n"); 1091 return -EMEDIUMTYPE; 1092 } 1093 1094 if (!(mmc->part_support & PART_SUPPORT)) { 1095 printf("Card does not support partitioning\n"); 1096 return -EMEDIUMTYPE; 1097 } 1098 1099 if (!mmc->hc_wp_grp_size) { 1100 printf("Card does not define HC WP group size\n"); 1101 return -EMEDIUMTYPE; 1102 } 1103 1104 /* check partition alignment and total enhanced size */ 1105 if (conf->user.enh_size) { 1106 if (conf->user.enh_size % mmc->hc_wp_grp_size || 1107 conf->user.enh_start % mmc->hc_wp_grp_size) { 1108 printf("User data enhanced area not HC WP group " 1109 "size aligned\n"); 1110 return -EINVAL; 1111 } 1112 part_attrs |= EXT_CSD_ENH_USR; 1113 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size; 1114 if (mmc->high_capacity) { 1115 enh_start_addr = conf->user.enh_start; 1116 } else { 1117 enh_start_addr = (conf->user.enh_start << 9); 1118 } 1119 } else { 1120 enh_size_mult = 0; 1121 enh_start_addr = 0; 1122 } 1123 tot_enh_size_mult += enh_size_mult; 1124 1125 for (pidx = 0; pidx < 4; pidx++) { 1126 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) { 1127 printf("GP%i partition not HC WP group size " 1128 "aligned\n", pidx+1); 1129 return -EINVAL; 1130 } 1131 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size; 1132 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) { 1133 part_attrs |= EXT_CSD_ENH_GP(pidx); 1134 tot_enh_size_mult += gp_size_mult[pidx]; 1135 } 1136 } 1137 1138 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) { 1139 printf("Card does not support enhanced attribute\n"); 1140 return -EMEDIUMTYPE; 1141 } 1142 1143 err = mmc_send_ext_csd(mmc, ext_csd); 1144 if (err) 1145 return err; 1146 1147 max_enh_size_mult = 1148 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) + 1149 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) + 1150 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT]; 1151 if (tot_enh_size_mult > max_enh_size_mult) { 1152 printf("Total enhanced size exceeds maximum (%u > %u)\n", 1153 tot_enh_size_mult, max_enh_size_mult); 1154 return -EMEDIUMTYPE; 1155 } 1156 1157 /* The default value of EXT_CSD_WR_REL_SET is device 1158 * dependent, the values can only be changed if the 1159 * EXT_CSD_HS_CTRL_REL bit is set. The values can be 1160 * changed only once and before partitioning is completed. */ 1161 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1162 if (conf->user.wr_rel_change) { 1163 if (conf->user.wr_rel_set) 1164 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR; 1165 else 1166 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR; 1167 } 1168 for (pidx = 0; pidx < 4; pidx++) { 1169 if (conf->gp_part[pidx].wr_rel_change) { 1170 if (conf->gp_part[pidx].wr_rel_set) 1171 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx); 1172 else 1173 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx); 1174 } 1175 } 1176 1177 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] && 1178 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) { 1179 puts("Card does not support host controlled partition write " 1180 "reliability settings\n"); 1181 return -EMEDIUMTYPE; 1182 } 1183 1184 if (ext_csd[EXT_CSD_PARTITION_SETTING] & 1185 EXT_CSD_PARTITION_SETTING_COMPLETED) { 1186 printf("Card already partitioned\n"); 1187 return -EPERM; 1188 } 1189 1190 if (mode == MMC_HWPART_CONF_CHECK) 1191 return 0; 1192 1193 /* Partitioning requires high-capacity size definitions */ 1194 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) { 1195 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1196 EXT_CSD_ERASE_GROUP_DEF, 1); 1197 1198 if (err) 1199 return err; 1200 1201 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1202 1203 /* update erase group size to be high-capacity */ 1204 mmc->erase_grp_size = 1205 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1206 1207 } 1208 1209 /* all OK, write the configuration */ 1210 for (i = 0; i < 4; i++) { 1211 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1212 EXT_CSD_ENH_START_ADDR+i, 1213 (enh_start_addr >> (i*8)) & 0xFF); 1214 if (err) 1215 return err; 1216 } 1217 for (i = 0; i < 3; i++) { 1218 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1219 EXT_CSD_ENH_SIZE_MULT+i, 1220 (enh_size_mult >> (i*8)) & 0xFF); 1221 if (err) 1222 return err; 1223 } 1224 for (pidx = 0; pidx < 4; pidx++) { 1225 for (i = 0; i < 3; i++) { 1226 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1227 EXT_CSD_GP_SIZE_MULT+pidx*3+i, 1228 (gp_size_mult[pidx] >> (i*8)) & 0xFF); 1229 if (err) 1230 return err; 1231 } 1232 } 1233 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1234 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs); 1235 if (err) 1236 return err; 1237 1238 if (mode == MMC_HWPART_CONF_SET) 1239 return 0; 1240 1241 /* The WR_REL_SET is a write-once register but shall be 1242 * written before setting PART_SETTING_COMPLETED. As it is 1243 * write-once we can only write it when completing the 1244 * partitioning. */ 1245 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) { 1246 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1247 EXT_CSD_WR_REL_SET, wr_rel_set); 1248 if (err) 1249 return err; 1250 } 1251 1252 /* Setting PART_SETTING_COMPLETED confirms the partition 1253 * configuration but it only becomes effective after power 1254 * cycle, so we do not adjust the partition related settings 1255 * in the mmc struct. */ 1256 1257 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1258 EXT_CSD_PARTITION_SETTING, 1259 EXT_CSD_PARTITION_SETTING_COMPLETED); 1260 if (err) 1261 return err; 1262 1263 return 0; 1264 } 1265 1266 #if !CONFIG_IS_ENABLED(DM_MMC) 1267 int mmc_getcd(struct mmc *mmc) 1268 { 1269 int cd; 1270 1271 cd = board_mmc_getcd(mmc); 1272 1273 if (cd < 0) { 1274 if (mmc->cfg->ops->getcd) 1275 cd = mmc->cfg->ops->getcd(mmc); 1276 else 1277 cd = 1; 1278 } 1279 1280 return cd; 1281 } 1282 #endif 1283 1284 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp) 1285 { 1286 struct mmc_cmd cmd; 1287 struct mmc_data data; 1288 1289 /* Switch the frequency */ 1290 cmd.cmdidx = SD_CMD_SWITCH_FUNC; 1291 cmd.resp_type = MMC_RSP_R1; 1292 cmd.cmdarg = (mode << 31) | 0xffffff; 1293 cmd.cmdarg &= ~(0xf << (group * 4)); 1294 cmd.cmdarg |= value << (group * 4); 1295 1296 data.dest = (char *)resp; 1297 data.blocksize = 64; 1298 data.blocks = 1; 1299 data.flags = MMC_DATA_READ; 1300 1301 return mmc_send_cmd(mmc, &cmd, &data); 1302 } 1303 1304 1305 static int sd_change_freq(struct mmc *mmc) 1306 { 1307 int err; 1308 struct mmc_cmd cmd; 1309 ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2); 1310 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16); 1311 struct mmc_data data; 1312 int timeout; 1313 1314 mmc->card_caps = 0; 1315 1316 if (mmc_host_is_spi(mmc)) 1317 return 0; 1318 1319 /* Read the SCR to find out if this card supports higher speeds */ 1320 cmd.cmdidx = MMC_CMD_APP_CMD; 1321 cmd.resp_type = MMC_RSP_R1; 1322 cmd.cmdarg = mmc->rca << 16; 1323 1324 err = mmc_send_cmd(mmc, &cmd, NULL); 1325 1326 if (err) 1327 return err; 1328 1329 cmd.cmdidx = SD_CMD_APP_SEND_SCR; 1330 cmd.resp_type = MMC_RSP_R1; 1331 cmd.cmdarg = 0; 1332 1333 timeout = 3; 1334 1335 retry_scr: 1336 data.dest = (char *)scr; 1337 data.blocksize = 8; 1338 data.blocks = 1; 1339 data.flags = MMC_DATA_READ; 1340 1341 err = mmc_send_cmd(mmc, &cmd, &data); 1342 1343 if (err) { 1344 if (timeout--) 1345 goto retry_scr; 1346 1347 return err; 1348 } 1349 1350 mmc->scr[0] = __be32_to_cpu(scr[0]); 1351 mmc->scr[1] = __be32_to_cpu(scr[1]); 1352 1353 switch ((mmc->scr[0] >> 24) & 0xf) { 1354 case 0: 1355 mmc->version = SD_VERSION_1_0; 1356 break; 1357 case 1: 1358 mmc->version = SD_VERSION_1_10; 1359 break; 1360 case 2: 1361 mmc->version = SD_VERSION_2; 1362 if ((mmc->scr[0] >> 15) & 0x1) 1363 mmc->version = SD_VERSION_3; 1364 break; 1365 default: 1366 mmc->version = SD_VERSION_1_0; 1367 break; 1368 } 1369 1370 if (mmc->scr[0] & SD_DATA_4BIT) 1371 mmc->card_caps |= MMC_MODE_4BIT; 1372 1373 /* Version 1.0 doesn't support switching */ 1374 if (mmc->version == SD_VERSION_1_0) 1375 return 0; 1376 1377 timeout = 4; 1378 while (timeout--) { 1379 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1, 1380 (u8 *)switch_status); 1381 1382 if (err) 1383 return err; 1384 1385 /* The high-speed function is busy. Try again */ 1386 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY)) 1387 break; 1388 } 1389 1390 /* If high-speed isn't supported, we return */ 1391 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)) 1392 return 0; 1393 1394 /* 1395 * If the host doesn't support SD_HIGHSPEED, do not switch card to 1396 * HIGHSPEED mode even if the card support SD_HIGHSPPED. 1397 * This can avoid furthur problem when the card runs in different 1398 * mode between the host. 1399 */ 1400 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) && 1401 (mmc->cfg->host_caps & MMC_MODE_HS))) 1402 return 0; 1403 1404 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status); 1405 1406 if (err) 1407 return err; 1408 1409 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000) 1410 mmc->card_caps |= MMC_MODE_HS; 1411 1412 return 0; 1413 } 1414 1415 static int sd_read_ssr(struct mmc *mmc) 1416 { 1417 int err, i; 1418 struct mmc_cmd cmd; 1419 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16); 1420 struct mmc_data data; 1421 int timeout = 3; 1422 unsigned int au, eo, et, es; 1423 1424 cmd.cmdidx = MMC_CMD_APP_CMD; 1425 cmd.resp_type = MMC_RSP_R1; 1426 cmd.cmdarg = mmc->rca << 16; 1427 1428 err = mmc_send_cmd(mmc, &cmd, NULL); 1429 if (err) 1430 return err; 1431 1432 cmd.cmdidx = SD_CMD_APP_SD_STATUS; 1433 cmd.resp_type = MMC_RSP_R1; 1434 cmd.cmdarg = 0; 1435 1436 retry_ssr: 1437 data.dest = (char *)ssr; 1438 data.blocksize = 64; 1439 data.blocks = 1; 1440 data.flags = MMC_DATA_READ; 1441 1442 err = mmc_send_cmd(mmc, &cmd, &data); 1443 if (err) { 1444 if (timeout--) 1445 goto retry_ssr; 1446 1447 return err; 1448 } 1449 1450 for (i = 0; i < 16; i++) 1451 ssr[i] = be32_to_cpu(ssr[i]); 1452 1453 au = (ssr[2] >> 12) & 0xF; 1454 if ((au <= 9) || (mmc->version == SD_VERSION_3)) { 1455 mmc->ssr.au = sd_au_size[au]; 1456 es = (ssr[3] >> 24) & 0xFF; 1457 es |= (ssr[2] & 0xFF) << 8; 1458 et = (ssr[3] >> 18) & 0x3F; 1459 if (es && et) { 1460 eo = (ssr[3] >> 16) & 0x3; 1461 mmc->ssr.erase_timeout = (et * 1000) / es; 1462 mmc->ssr.erase_offset = eo * 1000; 1463 } 1464 } else { 1465 debug("Invalid Allocation Unit Size.\n"); 1466 } 1467 1468 return 0; 1469 } 1470 1471 /* frequency bases */ 1472 /* divided by 10 to be nice to platforms without floating point */ 1473 static const int fbase[] = { 1474 10000, 1475 100000, 1476 1000000, 1477 10000000, 1478 }; 1479 1480 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice 1481 * to platforms without floating point. 1482 */ 1483 static const u8 multipliers[] = { 1484 0, /* reserved */ 1485 10, 1486 12, 1487 13, 1488 15, 1489 20, 1490 25, 1491 30, 1492 35, 1493 40, 1494 45, 1495 50, 1496 55, 1497 60, 1498 70, 1499 80, 1500 }; 1501 1502 #if !CONFIG_IS_ENABLED(DM_MMC) 1503 static void mmc_set_ios(struct mmc *mmc) 1504 { 1505 if (mmc->cfg->ops->set_ios) 1506 mmc->cfg->ops->set_ios(mmc); 1507 } 1508 1509 static bool mmc_card_busy(struct mmc *mmc) 1510 { 1511 if (!mmc->cfg->ops->card_busy) 1512 return -ENOSYS; 1513 1514 return mmc->cfg->ops->card_busy(mmc); 1515 } 1516 1517 static bool mmc_can_card_busy(struct mmc *) 1518 { 1519 return !!mmc->cfg->ops->card_busy; 1520 } 1521 #endif 1522 1523 static int mmc_startup(struct mmc *mmc) 1524 { 1525 int err, i; 1526 uint mult, freq, tran_speed; 1527 u64 cmult, csize, capacity; 1528 struct mmc_cmd cmd; 1529 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1530 bool has_parts = false; 1531 bool part_completed; 1532 struct blk_desc *bdesc; 1533 1534 #ifdef CONFIG_MMC_SPI_CRC_ON 1535 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */ 1536 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF; 1537 cmd.resp_type = MMC_RSP_R1; 1538 cmd.cmdarg = 1; 1539 err = mmc_send_cmd(mmc, &cmd, NULL); 1540 1541 if (err) 1542 return err; 1543 } 1544 #endif 1545 1546 /* Put the Card in Identify Mode */ 1547 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID : 1548 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */ 1549 cmd.resp_type = MMC_RSP_R2; 1550 cmd.cmdarg = 0; 1551 1552 err = mmc_send_cmd(mmc, &cmd, NULL); 1553 1554 if (err) 1555 return err; 1556 1557 memcpy(mmc->cid, cmd.response, 16); 1558 1559 /* 1560 * For MMC cards, set the Relative Address. 1561 * For SD cards, get the Relatvie Address. 1562 * This also puts the cards into Standby State 1563 */ 1564 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1565 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR; 1566 cmd.cmdarg = mmc->rca << 16; 1567 cmd.resp_type = MMC_RSP_R6; 1568 1569 err = mmc_send_cmd(mmc, &cmd, NULL); 1570 1571 if (err) 1572 return err; 1573 1574 if (IS_SD(mmc)) 1575 mmc->rca = (cmd.response[0] >> 16) & 0xffff; 1576 } 1577 1578 /* Get the Card-Specific Data */ 1579 cmd.cmdidx = MMC_CMD_SEND_CSD; 1580 cmd.resp_type = MMC_RSP_R2; 1581 cmd.cmdarg = mmc->rca << 16; 1582 1583 err = mmc_send_cmd(mmc, &cmd, NULL); 1584 1585 if (err) 1586 return err; 1587 1588 mmc->csd[0] = cmd.response[0]; 1589 mmc->csd[1] = cmd.response[1]; 1590 mmc->csd[2] = cmd.response[2]; 1591 mmc->csd[3] = cmd.response[3]; 1592 1593 if (mmc->version == MMC_VERSION_UNKNOWN) { 1594 int version = (cmd.response[0] >> 26) & 0xf; 1595 1596 switch (version) { 1597 case 0: 1598 mmc->version = MMC_VERSION_1_2; 1599 break; 1600 case 1: 1601 mmc->version = MMC_VERSION_1_4; 1602 break; 1603 case 2: 1604 mmc->version = MMC_VERSION_2_2; 1605 break; 1606 case 3: 1607 mmc->version = MMC_VERSION_3; 1608 break; 1609 case 4: 1610 mmc->version = MMC_VERSION_4; 1611 break; 1612 default: 1613 mmc->version = MMC_VERSION_1_2; 1614 break; 1615 } 1616 } 1617 1618 /* divide frequency by 10, since the mults are 10x bigger */ 1619 freq = fbase[(cmd.response[0] & 0x7)]; 1620 mult = multipliers[((cmd.response[0] >> 3) & 0xf)]; 1621 1622 tran_speed = freq * mult; 1623 1624 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1); 1625 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf); 1626 1627 if (IS_SD(mmc)) 1628 mmc->write_bl_len = mmc->read_bl_len; 1629 else 1630 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf); 1631 1632 if (mmc->high_capacity) { 1633 csize = (mmc->csd[1] & 0x3f) << 16 1634 | (mmc->csd[2] & 0xffff0000) >> 16; 1635 cmult = 8; 1636 } else { 1637 csize = (mmc->csd[1] & 0x3ff) << 2 1638 | (mmc->csd[2] & 0xc0000000) >> 30; 1639 cmult = (mmc->csd[2] & 0x00038000) >> 15; 1640 } 1641 1642 mmc->capacity_user = (csize + 1) << (cmult + 2); 1643 mmc->capacity_user *= mmc->read_bl_len; 1644 mmc->capacity_boot = 0; 1645 mmc->capacity_rpmb = 0; 1646 for (i = 0; i < 4; i++) 1647 mmc->capacity_gp[i] = 0; 1648 1649 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN) 1650 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1651 1652 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN) 1653 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1654 1655 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) { 1656 cmd.cmdidx = MMC_CMD_SET_DSR; 1657 cmd.cmdarg = (mmc->dsr & 0xffff) << 16; 1658 cmd.resp_type = MMC_RSP_NONE; 1659 if (mmc_send_cmd(mmc, &cmd, NULL)) 1660 printf("MMC: SET_DSR failed\n"); 1661 } 1662 1663 /* Select the card, and put it into Transfer Mode */ 1664 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1665 cmd.cmdidx = MMC_CMD_SELECT_CARD; 1666 cmd.resp_type = MMC_RSP_R1; 1667 cmd.cmdarg = mmc->rca << 16; 1668 err = mmc_send_cmd(mmc, &cmd, NULL); 1669 1670 if (err) 1671 return err; 1672 } 1673 1674 /* 1675 * For SD, its erase group is always one sector 1676 */ 1677 mmc->erase_grp_size = 1; 1678 mmc->part_config = MMCPART_NOAVAILABLE; 1679 if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) { 1680 /* check ext_csd version and capacity */ 1681 err = mmc_send_ext_csd(mmc, ext_csd); 1682 if (err) 1683 return err; 1684 if (ext_csd[EXT_CSD_REV] >= 2) { 1685 /* 1686 * According to the JEDEC Standard, the value of 1687 * ext_csd's capacity is valid if the value is more 1688 * than 2GB 1689 */ 1690 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0 1691 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 1692 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 1693 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 1694 capacity *= MMC_MAX_BLOCK_LEN; 1695 if ((capacity >> 20) > 2 * 1024) 1696 mmc->capacity_user = capacity; 1697 } 1698 1699 switch (ext_csd[EXT_CSD_REV]) { 1700 case 1: 1701 mmc->version = MMC_VERSION_4_1; 1702 break; 1703 case 2: 1704 mmc->version = MMC_VERSION_4_2; 1705 break; 1706 case 3: 1707 mmc->version = MMC_VERSION_4_3; 1708 break; 1709 case 5: 1710 mmc->version = MMC_VERSION_4_41; 1711 break; 1712 case 6: 1713 mmc->version = MMC_VERSION_4_5; 1714 break; 1715 case 7: 1716 mmc->version = MMC_VERSION_5_0; 1717 break; 1718 case 8: 1719 mmc->version = MMC_VERSION_5_1; 1720 break; 1721 } 1722 1723 /* The partition data may be non-zero but it is only 1724 * effective if PARTITION_SETTING_COMPLETED is set in 1725 * EXT_CSD, so ignore any data if this bit is not set, 1726 * except for enabling the high-capacity group size 1727 * definition (see below). */ 1728 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] & 1729 EXT_CSD_PARTITION_SETTING_COMPLETED); 1730 1731 /* store the partition info of emmc */ 1732 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT]; 1733 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) || 1734 ext_csd[EXT_CSD_BOOT_MULT]) 1735 mmc->part_config = ext_csd[EXT_CSD_PART_CONF]; 1736 if (part_completed && 1737 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT)) 1738 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE]; 1739 if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN) 1740 mmc->esr.mmc_can_trim = 1; 1741 1742 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17; 1743 1744 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17; 1745 1746 for (i = 0; i < 4; i++) { 1747 int idx = EXT_CSD_GP_SIZE_MULT + i * 3; 1748 uint mult = (ext_csd[idx + 2] << 16) + 1749 (ext_csd[idx + 1] << 8) + ext_csd[idx]; 1750 if (mult) 1751 has_parts = true; 1752 if (!part_completed) 1753 continue; 1754 mmc->capacity_gp[i] = mult; 1755 mmc->capacity_gp[i] *= 1756 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1757 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1758 mmc->capacity_gp[i] <<= 19; 1759 } 1760 1761 if (part_completed) { 1762 mmc->enh_user_size = 1763 (ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) + 1764 (ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) + 1765 ext_csd[EXT_CSD_ENH_SIZE_MULT]; 1766 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1767 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1768 mmc->enh_user_size <<= 19; 1769 mmc->enh_user_start = 1770 (ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) + 1771 (ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) + 1772 (ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) + 1773 ext_csd[EXT_CSD_ENH_START_ADDR]; 1774 if (mmc->high_capacity) 1775 mmc->enh_user_start <<= 9; 1776 } 1777 1778 /* 1779 * Host needs to enable ERASE_GRP_DEF bit if device is 1780 * partitioned. This bit will be lost every time after a reset 1781 * or power off. This will affect erase size. 1782 */ 1783 if (part_completed) 1784 has_parts = true; 1785 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) && 1786 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB)) 1787 has_parts = true; 1788 if (has_parts) { 1789 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1790 EXT_CSD_ERASE_GROUP_DEF, 1); 1791 1792 if (err) 1793 return err; 1794 else 1795 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1796 } 1797 1798 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) { 1799 /* Read out group size from ext_csd */ 1800 mmc->erase_grp_size = 1801 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1802 /* 1803 * if high capacity and partition setting completed 1804 * SEC_COUNT is valid even if it is smaller than 2 GiB 1805 * JEDEC Standard JESD84-B45, 6.2.4 1806 */ 1807 if (mmc->high_capacity && part_completed) { 1808 capacity = (ext_csd[EXT_CSD_SEC_CNT]) | 1809 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) | 1810 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) | 1811 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24); 1812 capacity *= MMC_MAX_BLOCK_LEN; 1813 mmc->capacity_user = capacity; 1814 } 1815 } else { 1816 /* Calculate the group size from the csd value. */ 1817 int erase_gsz, erase_gmul; 1818 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10; 1819 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5; 1820 mmc->erase_grp_size = (erase_gsz + 1) 1821 * (erase_gmul + 1); 1822 } 1823 1824 mmc->hc_wp_grp_size = 1024 1825 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1826 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1827 1828 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1829 } 1830 1831 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart); 1832 if (err) 1833 return err; 1834 1835 if (IS_SD(mmc)) 1836 err = sd_change_freq(mmc); 1837 else 1838 err = mmc_change_freq(mmc); 1839 1840 if (err) 1841 return err; 1842 1843 /* Restrict card's capabilities by what the host can do */ 1844 mmc->card_caps &= mmc->cfg->host_caps; 1845 1846 if (IS_SD(mmc)) { 1847 if (mmc->card_caps & MMC_MODE_4BIT) { 1848 cmd.cmdidx = MMC_CMD_APP_CMD; 1849 cmd.resp_type = MMC_RSP_R1; 1850 cmd.cmdarg = mmc->rca << 16; 1851 1852 err = mmc_send_cmd(mmc, &cmd, NULL); 1853 if (err) 1854 return err; 1855 1856 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH; 1857 cmd.resp_type = MMC_RSP_R1; 1858 cmd.cmdarg = 2; 1859 err = mmc_send_cmd(mmc, &cmd, NULL); 1860 if (err) 1861 return err; 1862 1863 mmc_set_bus_width(mmc, 4); 1864 } 1865 1866 err = sd_read_ssr(mmc); 1867 if (err) 1868 return err; 1869 1870 if (mmc->card_caps & MMC_MODE_HS) 1871 tran_speed = 50000000; 1872 else 1873 tran_speed = 25000000; 1874 1875 mmc_set_clock(mmc, tran_speed); 1876 } 1877 1878 /* Fix the block length for DDR mode */ 1879 if (mmc_card_ddr(mmc)) { 1880 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1881 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1882 } 1883 1884 /* fill in device description */ 1885 bdesc = mmc_get_blk_desc(mmc); 1886 bdesc->lun = 0; 1887 bdesc->hwpart = 0; 1888 bdesc->type = 0; 1889 bdesc->blksz = mmc->read_bl_len; 1890 bdesc->log2blksz = LOG2(bdesc->blksz); 1891 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1892 #if !defined(CONFIG_SPL_BUILD) || \ 1893 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \ 1894 !defined(CONFIG_USE_TINY_PRINTF)) 1895 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x", 1896 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), 1897 (mmc->cid[3] >> 16) & 0xffff); 1898 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, 1899 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, 1900 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, 1901 (mmc->cid[2] >> 24) & 0xff); 1902 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, 1903 (mmc->cid[2] >> 16) & 0xf); 1904 #else 1905 bdesc->vendor[0] = 0; 1906 bdesc->product[0] = 0; 1907 bdesc->revision[0] = 0; 1908 #endif 1909 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT) 1910 part_init(bdesc); 1911 #endif 1912 1913 return 0; 1914 } 1915 1916 static int mmc_send_if_cond(struct mmc *mmc) 1917 { 1918 struct mmc_cmd cmd; 1919 int err; 1920 1921 cmd.cmdidx = SD_CMD_SEND_IF_COND; 1922 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */ 1923 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa; 1924 cmd.resp_type = MMC_RSP_R7; 1925 1926 err = mmc_send_cmd(mmc, &cmd, NULL); 1927 1928 if (err) 1929 return err; 1930 1931 if ((cmd.response[0] & 0xff) != 0xaa) 1932 return -EOPNOTSUPP; 1933 else 1934 mmc->version = SD_VERSION_2; 1935 1936 return 0; 1937 } 1938 1939 #if !CONFIG_IS_ENABLED(DM_MMC) 1940 /* board-specific MMC power initializations. */ 1941 __weak void board_mmc_power_init(void) 1942 { 1943 } 1944 #endif 1945 1946 static int mmc_power_init(struct mmc *mmc) 1947 { 1948 #if CONFIG_IS_ENABLED(DM_MMC) 1949 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD) 1950 struct udevice *vmmc_supply; 1951 int ret; 1952 1953 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply", 1954 &vmmc_supply); 1955 if (ret) { 1956 debug("%s: No vmmc supply\n", mmc->dev->name); 1957 return 0; 1958 } 1959 1960 ret = regulator_set_enable(vmmc_supply, true); 1961 if (ret) { 1962 puts("Error enabling VMMC supply\n"); 1963 return ret; 1964 } 1965 #endif 1966 #else /* !CONFIG_DM_MMC */ 1967 /* 1968 * Driver model should use a regulator, as above, rather than calling 1969 * out to board code. 1970 */ 1971 board_mmc_power_init(); 1972 #endif 1973 return 0; 1974 } 1975 1976 int mmc_start_init(struct mmc *mmc) 1977 { 1978 bool no_card; 1979 int err; 1980 1981 /* we pretend there's no card when init is NULL */ 1982 no_card = mmc_getcd(mmc) == 0; 1983 #if !CONFIG_IS_ENABLED(DM_MMC) 1984 no_card = no_card || (mmc->cfg->ops->init == NULL); 1985 #endif 1986 if (no_card) { 1987 mmc->has_init = 0; 1988 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 1989 printf("MMC: no card present\n"); 1990 #endif 1991 return -ENOMEDIUM; 1992 } 1993 1994 if (mmc->has_init) 1995 return 0; 1996 1997 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 1998 mmc_adapter_card_type_ident(); 1999 #endif 2000 err = mmc_power_init(mmc); 2001 if (err) 2002 return err; 2003 2004 #if CONFIG_IS_ENABLED(DM_MMC) 2005 /* The device has already been probed ready for use */ 2006 #else 2007 /* made sure it's not NULL earlier */ 2008 err = mmc->cfg->ops->init(mmc); 2009 if (err) 2010 return err; 2011 #endif 2012 mmc_set_bus_width(mmc, 1); 2013 mmc_set_clock(mmc, 1); 2014 mmc_set_timing(mmc, MMC_TIMING_LEGACY); 2015 2016 /* Reset the Card */ 2017 err = mmc_go_idle(mmc); 2018 2019 if (err) 2020 return err; 2021 2022 /* The internal partition reset to user partition(0) at every CMD0*/ 2023 mmc_get_blk_desc(mmc)->hwpart = 0; 2024 2025 /* Test for SD version 2 */ 2026 err = mmc_send_if_cond(mmc); 2027 2028 /* Now try to get the SD card's operating condition */ 2029 err = sd_send_op_cond(mmc); 2030 2031 /* If the command timed out, we check for an MMC card */ 2032 if (err == -ETIMEDOUT) { 2033 err = mmc_send_op_cond(mmc); 2034 2035 if (err) { 2036 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2037 printf("Card did not respond to voltage select!\n"); 2038 #endif 2039 return -EOPNOTSUPP; 2040 } 2041 } 2042 2043 if (!err) 2044 mmc->init_in_progress = 1; 2045 2046 return err; 2047 } 2048 2049 static int mmc_complete_init(struct mmc *mmc) 2050 { 2051 int err = 0; 2052 2053 mmc->init_in_progress = 0; 2054 if (mmc->op_cond_pending) 2055 err = mmc_complete_op_cond(mmc); 2056 2057 if (!err) 2058 err = mmc_startup(mmc); 2059 if (err) 2060 mmc->has_init = 0; 2061 else 2062 mmc->has_init = 1; 2063 return err; 2064 } 2065 2066 int mmc_init(struct mmc *mmc) 2067 { 2068 int err = 0; 2069 __maybe_unused unsigned start; 2070 #if CONFIG_IS_ENABLED(DM_MMC) 2071 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev); 2072 2073 upriv->mmc = mmc; 2074 #endif 2075 if (mmc->has_init) 2076 return 0; 2077 2078 start = get_timer(0); 2079 2080 if (!mmc->init_in_progress) 2081 err = mmc_start_init(mmc); 2082 2083 if (!err) 2084 err = mmc_complete_init(mmc); 2085 if (err) 2086 printf("%s: %d, time %lu\n", __func__, err, get_timer(start)); 2087 2088 return err; 2089 } 2090 2091 int mmc_set_dsr(struct mmc *mmc, u16 val) 2092 { 2093 mmc->dsr = val; 2094 return 0; 2095 } 2096 2097 /* CPU-specific MMC initializations */ 2098 __weak int cpu_mmc_init(bd_t *bis) 2099 { 2100 return -1; 2101 } 2102 2103 /* board-specific MMC initializations. */ 2104 __weak int board_mmc_init(bd_t *bis) 2105 { 2106 return -1; 2107 } 2108 2109 void mmc_set_preinit(struct mmc *mmc, int preinit) 2110 { 2111 mmc->preinit = preinit; 2112 } 2113 2114 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD) 2115 static int mmc_probe(bd_t *bis) 2116 { 2117 return 0; 2118 } 2119 #elif CONFIG_IS_ENABLED(DM_MMC) 2120 static int mmc_probe(bd_t *bis) 2121 { 2122 int ret, i; 2123 struct uclass *uc; 2124 struct udevice *dev; 2125 2126 ret = uclass_get(UCLASS_MMC, &uc); 2127 if (ret) 2128 return ret; 2129 2130 /* 2131 * Try to add them in sequence order. Really with driver model we 2132 * should allow holes, but the current MMC list does not allow that. 2133 * So if we request 0, 1, 3 we will get 0, 1, 2. 2134 */ 2135 for (i = 0; ; i++) { 2136 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev); 2137 if (ret == -ENODEV) 2138 break; 2139 } 2140 uclass_foreach_dev(dev, uc) { 2141 ret = device_probe(dev); 2142 if (ret) 2143 printf("%s - probe failed: %d\n", dev->name, ret); 2144 } 2145 2146 return 0; 2147 } 2148 #else 2149 static int mmc_probe(bd_t *bis) 2150 { 2151 if (board_mmc_init(bis) < 0) 2152 cpu_mmc_init(bis); 2153 2154 return 0; 2155 } 2156 #endif 2157 2158 int mmc_initialize(bd_t *bis) 2159 { 2160 static int initialized = 0; 2161 int ret; 2162 if (initialized) /* Avoid initializing mmc multiple times */ 2163 return 0; 2164 initialized = 1; 2165 2166 #if !CONFIG_IS_ENABLED(BLK) 2167 #if !CONFIG_IS_ENABLED(MMC_TINY) 2168 mmc_list_init(); 2169 #endif 2170 #endif 2171 ret = mmc_probe(bis); 2172 if (ret) 2173 return ret; 2174 2175 #ifndef CONFIG_SPL_BUILD 2176 print_mmc_devices(','); 2177 #endif 2178 2179 mmc_do_preinit(); 2180 return 0; 2181 } 2182 2183 #ifdef CONFIG_CMD_BKOPS_ENABLE 2184 int mmc_set_bkops_enable(struct mmc *mmc) 2185 { 2186 int err; 2187 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 2188 2189 err = mmc_send_ext_csd(mmc, ext_csd); 2190 if (err) { 2191 puts("Could not get ext_csd register values\n"); 2192 return err; 2193 } 2194 2195 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) { 2196 puts("Background operations not supported on device\n"); 2197 return -EMEDIUMTYPE; 2198 } 2199 2200 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) { 2201 puts("Background operations already enabled\n"); 2202 return 0; 2203 } 2204 2205 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); 2206 if (err) { 2207 puts("Failed to enable manual background operations\n"); 2208 return err; 2209 } 2210 2211 puts("Enabled manual background operations\n"); 2212 2213 return 0; 2214 } 2215 #endif 2216