1 /* 2 * Copyright 2008, Freescale Semiconductor, Inc 3 * Andy Fleming 4 * 5 * Based vaguely on the Linux code 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10 #include <config.h> 11 #include <common.h> 12 #include <command.h> 13 #include <dm.h> 14 #include <dm/device-internal.h> 15 #include <errno.h> 16 #include <mmc.h> 17 #include <part.h> 18 #include <power/regulator.h> 19 #include <malloc.h> 20 #include <memalign.h> 21 #include <linux/list.h> 22 #include <div64.h> 23 #include "mmc_private.h" 24 25 static const unsigned int sd_au_size[] = { 26 0, SZ_16K / 512, SZ_32K / 512, 27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512, 28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512, 29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, 30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, 31 }; 32 33 static char mmc_ext_csd[512]; 34 35 #if CONFIG_IS_ENABLED(MMC_TINY) 36 static struct mmc mmc_static; 37 struct mmc *find_mmc_device(int dev_num) 38 { 39 return &mmc_static; 40 } 41 42 void mmc_do_preinit(void) 43 { 44 struct mmc *m = &mmc_static; 45 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 46 mmc_set_preinit(m, 1); 47 #endif 48 if (m->preinit) 49 mmc_start_init(m); 50 } 51 52 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc) 53 { 54 return &mmc->block_dev; 55 } 56 #endif 57 58 #if !CONFIG_IS_ENABLED(DM_MMC) 59 __weak int board_mmc_getwp(struct mmc *mmc) 60 { 61 return -1; 62 } 63 64 int mmc_getwp(struct mmc *mmc) 65 { 66 int wp; 67 68 wp = board_mmc_getwp(mmc); 69 70 if (wp < 0) { 71 if (mmc->cfg->ops->getwp) 72 wp = mmc->cfg->ops->getwp(mmc); 73 else 74 wp = 0; 75 } 76 77 return wp; 78 } 79 80 __weak int board_mmc_getcd(struct mmc *mmc) 81 { 82 return -1; 83 } 84 #endif 85 86 #ifdef CONFIG_MMC_TRACE 87 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd) 88 { 89 printf("CMD_SEND:%d\n", cmd->cmdidx); 90 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg); 91 } 92 93 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret) 94 { 95 int i; 96 u8 *ptr; 97 98 if (ret) { 99 printf("\t\tRET\t\t\t %d\n", ret); 100 } else { 101 switch (cmd->resp_type) { 102 case MMC_RSP_NONE: 103 printf("\t\tMMC_RSP_NONE\n"); 104 break; 105 case MMC_RSP_R1: 106 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n", 107 cmd->response[0]); 108 break; 109 case MMC_RSP_R1b: 110 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n", 111 cmd->response[0]); 112 break; 113 case MMC_RSP_R2: 114 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n", 115 cmd->response[0]); 116 printf("\t\t \t\t 0x%08X \n", 117 cmd->response[1]); 118 printf("\t\t \t\t 0x%08X \n", 119 cmd->response[2]); 120 printf("\t\t \t\t 0x%08X \n", 121 cmd->response[3]); 122 printf("\n"); 123 printf("\t\t\t\t\tDUMPING DATA\n"); 124 for (i = 0; i < 4; i++) { 125 int j; 126 printf("\t\t\t\t\t%03d - ", i*4); 127 ptr = (u8 *)&cmd->response[i]; 128 ptr += 3; 129 for (j = 0; j < 4; j++) 130 printf("%02X ", *ptr--); 131 printf("\n"); 132 } 133 break; 134 case MMC_RSP_R3: 135 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n", 136 cmd->response[0]); 137 break; 138 default: 139 printf("\t\tERROR MMC rsp not supported\n"); 140 break; 141 } 142 } 143 } 144 145 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd) 146 { 147 int status; 148 149 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9; 150 printf("CURR STATE:%d\n", status); 151 } 152 #endif 153 154 #if !CONFIG_IS_ENABLED(DM_MMC) 155 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data) 156 { 157 int ret; 158 159 mmmc_trace_before_send(mmc, cmd); 160 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data); 161 mmmc_trace_after_send(mmc, cmd, ret); 162 163 return ret; 164 } 165 #endif 166 167 int mmc_send_status(struct mmc *mmc, int timeout) 168 { 169 struct mmc_cmd cmd; 170 int err, retries = 5; 171 172 cmd.cmdidx = MMC_CMD_SEND_STATUS; 173 cmd.resp_type = MMC_RSP_R1; 174 if (!mmc_host_is_spi(mmc)) 175 cmd.cmdarg = mmc->rca << 16; 176 177 while (1) { 178 err = mmc_send_cmd(mmc, &cmd, NULL); 179 if (!err) { 180 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) && 181 (cmd.response[0] & MMC_STATUS_CURR_STATE) != 182 MMC_STATE_PRG) 183 break; 184 else if (cmd.response[0] & MMC_STATUS_MASK) { 185 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 186 printf("Status Error: 0x%08X\n", 187 cmd.response[0]); 188 #endif 189 return -ECOMM; 190 } 191 } else if (--retries < 0) 192 return err; 193 194 if (timeout-- <= 0) 195 break; 196 197 udelay(1000); 198 } 199 200 mmc_trace_state(mmc, &cmd); 201 if (timeout <= 0) { 202 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 203 printf("Timeout waiting card ready\n"); 204 #endif 205 return -ETIMEDOUT; 206 } 207 208 return 0; 209 } 210 211 int mmc_set_blocklen(struct mmc *mmc, int len) 212 { 213 struct mmc_cmd cmd; 214 215 if (mmc_card_ddr(mmc)) 216 return 0; 217 218 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN; 219 cmd.resp_type = MMC_RSP_R1; 220 cmd.cmdarg = len; 221 222 return mmc_send_cmd(mmc, &cmd, NULL); 223 } 224 225 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start, 226 lbaint_t blkcnt) 227 { 228 struct mmc_cmd cmd; 229 struct mmc_data data; 230 231 if (blkcnt > 1) 232 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 233 else 234 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 235 236 if (mmc->high_capacity) 237 cmd.cmdarg = start; 238 else 239 cmd.cmdarg = start * mmc->read_bl_len; 240 241 cmd.resp_type = MMC_RSP_R1; 242 243 data.dest = dst; 244 data.blocks = blkcnt; 245 data.blocksize = mmc->read_bl_len; 246 data.flags = MMC_DATA_READ; 247 248 if (mmc_send_cmd(mmc, &cmd, &data)) 249 return 0; 250 251 if (blkcnt > 1) { 252 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 253 cmd.cmdarg = 0; 254 cmd.resp_type = MMC_RSP_R1b; 255 if (mmc_send_cmd(mmc, &cmd, NULL)) { 256 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 257 printf("mmc fail to send stop cmd\n"); 258 #endif 259 return 0; 260 } 261 } 262 263 return blkcnt; 264 } 265 266 #if CONFIG_IS_ENABLED(BLK) 267 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 268 #else 269 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 270 void *dst) 271 #endif 272 { 273 #if CONFIG_IS_ENABLED(BLK) 274 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 275 #endif 276 int dev_num = block_dev->devnum; 277 int err; 278 lbaint_t cur, blocks_todo = blkcnt; 279 280 if (blkcnt == 0) 281 return 0; 282 283 struct mmc *mmc = find_mmc_device(dev_num); 284 if (!mmc) 285 return 0; 286 287 if (CONFIG_IS_ENABLED(MMC_TINY)) 288 err = mmc_switch_part(mmc, block_dev->hwpart); 289 else 290 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 291 292 if (err < 0) 293 return 0; 294 295 if ((start + blkcnt) > block_dev->lba) { 296 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 297 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 298 start + blkcnt, block_dev->lba); 299 #endif 300 return 0; 301 } 302 303 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 304 debug("%s: Failed to set blocklen\n", __func__); 305 return 0; 306 } 307 308 do { 309 cur = (blocks_todo > mmc->cfg->b_max) ? 310 mmc->cfg->b_max : blocks_todo; 311 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 312 debug("%s: Failed to read blocks\n", __func__); 313 int timeout = 0; 314 re_init_retry: 315 timeout++; 316 /* 317 * Try re-init seven times. 318 */ 319 if (timeout > 7) { 320 printf("Re-init retry timeout\n"); 321 return 0; 322 } 323 324 mmc->has_init = 0; 325 if (mmc_init(mmc)) 326 return 0; 327 328 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 329 printf("%s: Re-init mmc_read_blocks error\n", 330 __func__); 331 goto re_init_retry; 332 } 333 } 334 blocks_todo -= cur; 335 start += cur; 336 dst += cur * mmc->read_bl_len; 337 } while (blocks_todo > 0); 338 339 return blkcnt; 340 } 341 342 void mmc_set_clock(struct mmc *mmc, uint clock) 343 { 344 if (clock > mmc->cfg->f_max) 345 clock = mmc->cfg->f_max; 346 347 if (clock < mmc->cfg->f_min) 348 clock = mmc->cfg->f_min; 349 350 mmc->clock = clock; 351 352 mmc_set_ios(mmc); 353 } 354 355 static void mmc_set_bus_width(struct mmc *mmc, uint width) 356 { 357 mmc->bus_width = width; 358 359 mmc_set_ios(mmc); 360 } 361 362 static void mmc_set_timing(struct mmc *mmc, uint timing) 363 { 364 mmc->timing = timing; 365 mmc_set_ios(mmc); 366 } 367 368 static int mmc_go_idle(struct mmc *mmc) 369 { 370 struct mmc_cmd cmd; 371 int err; 372 373 udelay(1000); 374 375 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE; 376 cmd.cmdarg = 0; 377 cmd.resp_type = MMC_RSP_NONE; 378 379 err = mmc_send_cmd(mmc, &cmd, NULL); 380 381 if (err) 382 return err; 383 384 udelay(2000); 385 386 return 0; 387 } 388 389 #ifndef CONFIG_MMC_USE_PRE_CONFIG 390 static int sd_send_op_cond(struct mmc *mmc) 391 { 392 int timeout = 1000; 393 int err; 394 struct mmc_cmd cmd; 395 396 while (1) { 397 cmd.cmdidx = MMC_CMD_APP_CMD; 398 cmd.resp_type = MMC_RSP_R1; 399 cmd.cmdarg = 0; 400 401 err = mmc_send_cmd(mmc, &cmd, NULL); 402 403 if (err) 404 return err; 405 406 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND; 407 cmd.resp_type = MMC_RSP_R3; 408 409 /* 410 * Most cards do not answer if some reserved bits 411 * in the ocr are set. However, Some controller 412 * can set bit 7 (reserved for low voltages), but 413 * how to manage low voltages SD card is not yet 414 * specified. 415 */ 416 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 : 417 (mmc->cfg->voltages & 0xff8000); 418 419 if (mmc->version == SD_VERSION_2) 420 cmd.cmdarg |= OCR_HCS; 421 422 err = mmc_send_cmd(mmc, &cmd, NULL); 423 424 if (err) 425 return err; 426 427 if (cmd.response[0] & OCR_BUSY) 428 break; 429 430 if (timeout-- <= 0) 431 return -EOPNOTSUPP; 432 433 udelay(1000); 434 } 435 436 if (mmc->version != SD_VERSION_2) 437 mmc->version = SD_VERSION_1_0; 438 439 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 440 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 441 cmd.resp_type = MMC_RSP_R3; 442 cmd.cmdarg = 0; 443 444 err = mmc_send_cmd(mmc, &cmd, NULL); 445 446 if (err) 447 return err; 448 } 449 450 mmc->ocr = cmd.response[0]; 451 452 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 453 mmc->rca = 0; 454 455 return 0; 456 } 457 #endif 458 459 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg) 460 { 461 struct mmc_cmd cmd; 462 int err; 463 464 cmd.cmdidx = MMC_CMD_SEND_OP_COND; 465 cmd.resp_type = MMC_RSP_R3; 466 cmd.cmdarg = 0; 467 if (use_arg && !mmc_host_is_spi(mmc)) 468 cmd.cmdarg = OCR_HCS | 469 (mmc->cfg->voltages & 470 (mmc->ocr & OCR_VOLTAGE_MASK)) | 471 (mmc->ocr & OCR_ACCESS_MODE); 472 473 err = mmc_send_cmd(mmc, &cmd, NULL); 474 if (err) 475 return err; 476 mmc->ocr = cmd.response[0]; 477 return 0; 478 } 479 480 #ifndef CONFIG_MMC_USE_PRE_CONFIG 481 static int mmc_send_op_cond(struct mmc *mmc) 482 { 483 int err, i; 484 485 /* Some cards seem to need this */ 486 mmc_go_idle(mmc); 487 488 /* Asking to the card its capabilities */ 489 for (i = 0; i < 2; i++) { 490 err = mmc_send_op_cond_iter(mmc, i != 0); 491 if (err) 492 return err; 493 494 /* exit if not busy (flag seems to be inverted) */ 495 if (mmc->ocr & OCR_BUSY) 496 break; 497 } 498 mmc->op_cond_pending = 1; 499 return 0; 500 } 501 #endif 502 static int mmc_complete_op_cond(struct mmc *mmc) 503 { 504 struct mmc_cmd cmd; 505 int timeout = 1000; 506 uint start; 507 int err; 508 509 mmc->op_cond_pending = 0; 510 if (!(mmc->ocr & OCR_BUSY)) { 511 /* Some cards seem to need this */ 512 mmc_go_idle(mmc); 513 514 start = get_timer(0); 515 while (1) { 516 err = mmc_send_op_cond_iter(mmc, 1); 517 if (err) 518 return err; 519 if (mmc->ocr & OCR_BUSY) 520 break; 521 if (get_timer(start) > timeout) 522 return -EOPNOTSUPP; 523 udelay(100); 524 } 525 } 526 527 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 528 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 529 cmd.resp_type = MMC_RSP_R3; 530 cmd.cmdarg = 0; 531 532 err = mmc_send_cmd(mmc, &cmd, NULL); 533 534 if (err) 535 return err; 536 537 mmc->ocr = cmd.response[0]; 538 } 539 540 mmc->version = MMC_VERSION_UNKNOWN; 541 542 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 543 mmc->rca = 1; 544 545 return 0; 546 } 547 548 549 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd) 550 { 551 static int initialized; 552 struct mmc_cmd cmd; 553 struct mmc_data data; 554 int err; 555 556 if (initialized) { 557 memcpy(ext_csd, mmc_ext_csd, 512); 558 return 0; 559 } 560 561 initialized = 1; 562 563 /* Get the Card Status Register */ 564 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD; 565 cmd.resp_type = MMC_RSP_R1; 566 cmd.cmdarg = 0; 567 568 data.dest = (char *)ext_csd; 569 data.blocks = 1; 570 data.blocksize = MMC_MAX_BLOCK_LEN; 571 data.flags = MMC_DATA_READ; 572 573 err = mmc_send_cmd(mmc, &cmd, &data); 574 memcpy(mmc_ext_csd, ext_csd, 512); 575 #if defined(CONFIG_MMC_USE_PRE_CONFIG) && defined(CONFIG_SPL_BUILD) 576 char *mmc_ecsd_base = NULL; 577 ulong mmc_ecsd; 578 579 mmc_ecsd = dev_read_u32_default(mmc->dev, "mmc-ecsd", 0); 580 mmc_ecsd_base = (char *)mmc_ecsd; 581 if (mmc_ecsd_base) { 582 memcpy(mmc_ecsd_base, ext_csd, 512); 583 *(unsigned int *)(mmc_ecsd_base + 512) = 0x55aa55aa; 584 } 585 #endif 586 return err; 587 } 588 589 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status) 590 { 591 struct mmc_cmd cmd; 592 u8 busy = true; 593 uint start; 594 int ret; 595 int timeout = 1000; 596 597 cmd.cmdidx = MMC_CMD_SEND_STATUS; 598 cmd.resp_type = MMC_RSP_R1; 599 cmd.cmdarg = mmc->rca << 16; 600 601 start = get_timer(0); 602 603 if (!send_status && !mmc_can_card_busy(mmc)) { 604 mdelay(timeout); 605 return 0; 606 } 607 608 do { 609 if (!send_status) { 610 busy = mmc_card_busy(mmc); 611 } else { 612 ret = mmc_send_cmd(mmc, &cmd, NULL); 613 614 if (ret) 615 return ret; 616 617 if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR) 618 return -EBADMSG; 619 busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) == 620 MMC_STATE_PRG; 621 } 622 623 if (get_timer(start) > timeout && busy) 624 return -ETIMEDOUT; 625 } while (busy); 626 627 return 0; 628 } 629 630 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value, 631 u8 send_status) 632 { 633 struct mmc_cmd cmd; 634 int retries = 3; 635 int ret; 636 637 cmd.cmdidx = MMC_CMD_SWITCH; 638 cmd.resp_type = MMC_RSP_R1b; 639 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 640 (index << 16) | 641 (value << 8); 642 643 do { 644 ret = mmc_send_cmd(mmc, &cmd, NULL); 645 646 if (!ret) 647 return mmc_poll_for_busy(mmc, send_status); 648 } while (--retries > 0 && ret); 649 650 return ret; 651 } 652 653 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value) 654 { 655 return __mmc_switch(mmc, set, index, value, true); 656 } 657 658 static int mmc_select_bus_width(struct mmc *mmc) 659 { 660 u32 ext_csd_bits[] = { 661 EXT_CSD_BUS_WIDTH_8, 662 EXT_CSD_BUS_WIDTH_4, 663 }; 664 u32 bus_widths[] = { 665 MMC_BUS_WIDTH_8BIT, 666 MMC_BUS_WIDTH_4BIT, 667 }; 668 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 669 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 670 u32 idx, bus_width = 0; 671 int err = 0; 672 673 if (mmc->version < MMC_VERSION_4 || 674 !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT))) 675 return 0; 676 677 err = mmc_send_ext_csd(mmc, ext_csd); 678 679 if (err) 680 return err; 681 682 idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1; 683 684 /* 685 * Unlike SD, MMC cards dont have a configuration register to notify 686 * supported bus width. So bus test command should be run to identify 687 * the supported bus width or compare the ext csd values of current 688 * bus width and ext csd values of 1 bit mode read earlier. 689 */ 690 for (; idx < ARRAY_SIZE(bus_widths); idx++) { 691 /* 692 * Host is capable of 8bit transfer, then switch 693 * the device to work in 8bit transfer mode. If the 694 * mmc switch command returns error then switch to 695 * 4bit transfer mode. On success set the corresponding 696 * bus width on the host. 697 */ 698 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 699 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]); 700 if (err) 701 continue; 702 703 bus_width = bus_widths[idx]; 704 mmc_set_bus_width(mmc, bus_width); 705 706 err = mmc_send_ext_csd(mmc, test_csd); 707 708 if (err) 709 continue; 710 711 /* Only compare read only fields */ 712 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] == 713 test_csd[EXT_CSD_PARTITIONING_SUPPORT]) && 714 (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] == 715 test_csd[EXT_CSD_HC_WP_GRP_SIZE]) && 716 (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) && 717 (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] == 718 test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && 719 !memcmp(&ext_csd[EXT_CSD_SEC_CNT], 720 &test_csd[EXT_CSD_SEC_CNT], 4)) { 721 err = bus_width; 722 break; 723 } else { 724 err = -EBADMSG; 725 } 726 } 727 728 return err; 729 } 730 731 static const u8 tuning_blk_pattern_4bit[] = { 732 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 733 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 734 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 735 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 736 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 737 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 738 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 739 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 740 }; 741 742 static const u8 tuning_blk_pattern_8bit[] = { 743 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 744 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 745 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 746 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 747 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 748 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 749 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 750 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 751 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 752 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 753 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 754 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 755 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 756 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 757 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 758 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 759 }; 760 761 int mmc_send_tuning(struct mmc *mmc, u32 opcode) 762 { 763 struct mmc_cmd cmd; 764 struct mmc_data data; 765 const u8 *tuning_block_pattern; 766 int size, err = 0; 767 u8 *data_buf; 768 769 if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 770 tuning_block_pattern = tuning_blk_pattern_8bit; 771 size = sizeof(tuning_blk_pattern_8bit); 772 } else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) { 773 tuning_block_pattern = tuning_blk_pattern_4bit; 774 size = sizeof(tuning_blk_pattern_4bit); 775 } else { 776 return -EINVAL; 777 } 778 779 data_buf = calloc(1, size); 780 if (!data_buf) 781 return -ENOMEM; 782 783 cmd.cmdidx = opcode; 784 cmd.resp_type = MMC_RSP_R1; 785 cmd.cmdarg = 0; 786 787 data.dest = (char *)data_buf; 788 data.blocksize = size; 789 data.blocks = 1; 790 data.flags = MMC_DATA_READ; 791 792 err = mmc_send_cmd(mmc, &cmd, &data); 793 if (err) 794 goto out; 795 796 if (memcmp(data_buf, tuning_block_pattern, size)) 797 err = -EIO; 798 out: 799 free(data_buf); 800 return err; 801 } 802 803 static int mmc_execute_tuning(struct mmc *mmc) 804 { 805 #ifdef CONFIG_DM_MMC 806 struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev); 807 #endif 808 u32 opcode; 809 810 if (IS_SD(mmc)) 811 opcode = MMC_SEND_TUNING_BLOCK; 812 else 813 opcode = MMC_SEND_TUNING_BLOCK_HS200; 814 815 #ifndef CONFIG_DM_MMC 816 if (mmc->cfg->ops->execute_tuning) { 817 return mmc->cfg->ops->execute_tuning(mmc, opcode); 818 #else 819 if (ops->execute_tuning) { 820 return ops->execute_tuning(mmc->dev, opcode); 821 #endif 822 } else { 823 debug("Tuning feature required for HS200 mode.\n"); 824 return -EIO; 825 } 826 } 827 828 static int mmc_hs200_tuning(struct mmc *mmc) 829 { 830 return mmc_execute_tuning(mmc); 831 } 832 833 static int mmc_select_hs(struct mmc *mmc) 834 { 835 int ret; 836 837 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 838 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS); 839 840 if (!ret) 841 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 842 843 return ret; 844 } 845 846 static int mmc_select_hs_ddr(struct mmc *mmc) 847 { 848 u32 ext_csd_bits; 849 int err = 0; 850 851 if (mmc->bus_width == MMC_BUS_WIDTH_1BIT) 852 return 0; 853 854 ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ? 855 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4; 856 857 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 858 EXT_CSD_BUS_WIDTH, ext_csd_bits); 859 if (err) 860 return err; 861 862 mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52); 863 864 return 0; 865 } 866 867 static int mmc_select_hs200(struct mmc *mmc) 868 { 869 int ret; 870 871 /* 872 * Set the bus width(4 or 8) with host's support and 873 * switch to HS200 mode if bus width is set successfully. 874 */ 875 ret = mmc_select_bus_width(mmc); 876 877 if (ret > 0) { 878 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 879 EXT_CSD_HS_TIMING, 880 EXT_CSD_TIMING_HS200, false); 881 882 if (ret) 883 return ret; 884 885 mmc_set_timing(mmc, MMC_TIMING_MMC_HS200); 886 } 887 888 return ret; 889 } 890 891 static int mmc_select_hs400(struct mmc *mmc) 892 { 893 int ret; 894 895 /* Switch card to HS mode */ 896 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 897 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false); 898 if (ret) 899 return ret; 900 901 /* Set host controller to HS timing */ 902 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 903 904 /* Reduce frequency to HS frequency */ 905 mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR); 906 907 ret = mmc_send_status(mmc, 1000); 908 if (ret) 909 return ret; 910 911 /* Switch card to DDR */ 912 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 913 EXT_CSD_BUS_WIDTH, 914 EXT_CSD_DDR_BUS_WIDTH_8); 915 if (ret) 916 return ret; 917 918 /* Switch card to HS400 */ 919 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 920 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false); 921 if (ret) 922 return ret; 923 924 /* Set host controller to HS400 timing and frequency */ 925 mmc_set_timing(mmc, MMC_TIMING_MMC_HS400); 926 927 return ret; 928 } 929 930 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd) 931 { 932 u8 card_type; 933 u32 host_caps, avail_type = 0; 934 935 card_type = ext_csd[EXT_CSD_CARD_TYPE]; 936 host_caps = mmc->cfg->host_caps; 937 938 if ((host_caps & MMC_MODE_HS) && 939 (card_type & EXT_CSD_CARD_TYPE_26)) 940 avail_type |= EXT_CSD_CARD_TYPE_26; 941 942 if ((host_caps & MMC_MODE_HS) && 943 (card_type & EXT_CSD_CARD_TYPE_52)) 944 avail_type |= EXT_CSD_CARD_TYPE_52; 945 946 /* 947 * For the moment, u-boot doesn't support signal voltage 948 * switch, therefor we assume that host support ddr52 949 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and 950 * hs400 are the same). 951 */ 952 if ((host_caps & MMC_MODE_DDR_52MHz) && 953 (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)) 954 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V; 955 956 if ((host_caps & MMC_MODE_HS200) && 957 (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V)) 958 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V; 959 960 /* 961 * If host can support HS400, it means that host can also 962 * support HS200. 963 */ 964 if ((host_caps & MMC_MODE_HS400) && 965 (host_caps & MMC_MODE_8BIT) && 966 (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 967 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 968 EXT_CSD_CARD_TYPE_HS400_1_8V; 969 970 if ((host_caps & MMC_MODE_HS400ES) && 971 (host_caps & MMC_MODE_8BIT) && 972 ext_csd[EXT_CSD_STROBE_SUPPORT] && 973 (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 974 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 975 EXT_CSD_CARD_TYPE_HS400_1_8V | 976 EXT_CSD_CARD_TYPE_HS400ES; 977 978 return avail_type; 979 } 980 981 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type) 982 { 983 int clock = 0; 984 985 if (mmc_card_hs(mmc)) 986 clock = (avail_type & EXT_CSD_CARD_TYPE_52) ? 987 MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR; 988 else if (mmc_card_hs200(mmc) || 989 mmc_card_hs400(mmc) || 990 mmc_card_hs400es(mmc)) 991 clock = MMC_HS200_MAX_DTR; 992 993 mmc_set_clock(mmc, clock); 994 } 995 996 static int mmc_change_freq(struct mmc *mmc) 997 { 998 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 999 u32 avail_type; 1000 int err; 1001 1002 mmc->card_caps = 0; 1003 1004 if (mmc_host_is_spi(mmc)) 1005 return 0; 1006 1007 /* Only version 4 supports high-speed */ 1008 if (mmc->version < MMC_VERSION_4) 1009 return 0; 1010 1011 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT; 1012 1013 err = mmc_send_ext_csd(mmc, ext_csd); 1014 1015 if (err) 1016 return err; 1017 1018 avail_type = mmc_select_card_type(mmc, ext_csd); 1019 1020 if (avail_type & EXT_CSD_CARD_TYPE_HS200) 1021 err = mmc_select_hs200(mmc); 1022 else if (avail_type & EXT_CSD_CARD_TYPE_HS) 1023 err = mmc_select_hs(mmc); 1024 else 1025 err = -EINVAL; 1026 1027 if (err) 1028 return err; 1029 1030 mmc_set_bus_speed(mmc, avail_type); 1031 1032 if (mmc_card_hs200(mmc)) { 1033 err = mmc_hs200_tuning(mmc); 1034 if (avail_type & EXT_CSD_CARD_TYPE_HS400 && 1035 mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 1036 err = mmc_select_hs400(mmc); 1037 mmc_set_bus_speed(mmc, avail_type); 1038 } 1039 } else if (!mmc_card_hs400es(mmc)) { 1040 err = mmc_select_bus_width(mmc) > 0 ? 0 : err; 1041 if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52) 1042 err = mmc_select_hs_ddr(mmc); 1043 } 1044 1045 return err; 1046 } 1047 1048 static int mmc_set_capacity(struct mmc *mmc, int part_num) 1049 { 1050 switch (part_num) { 1051 case 0: 1052 mmc->capacity = mmc->capacity_user; 1053 break; 1054 case 1: 1055 case 2: 1056 mmc->capacity = mmc->capacity_boot; 1057 break; 1058 case 3: 1059 mmc->capacity = mmc->capacity_rpmb; 1060 break; 1061 case 4: 1062 case 5: 1063 case 6: 1064 case 7: 1065 mmc->capacity = mmc->capacity_gp[part_num - 4]; 1066 break; 1067 default: 1068 return -1; 1069 } 1070 1071 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1072 1073 return 0; 1074 } 1075 1076 int mmc_switch_part(struct mmc *mmc, unsigned int part_num) 1077 { 1078 int ret; 1079 1080 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF, 1081 (mmc->part_config & ~PART_ACCESS_MASK) 1082 | (part_num & PART_ACCESS_MASK)); 1083 1084 /* 1085 * Set the capacity if the switch succeeded or was intended 1086 * to return to representing the raw device. 1087 */ 1088 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) { 1089 ret = mmc_set_capacity(mmc, part_num); 1090 mmc_get_blk_desc(mmc)->hwpart = part_num; 1091 } 1092 1093 return ret; 1094 } 1095 1096 int mmc_hwpart_config(struct mmc *mmc, 1097 const struct mmc_hwpart_conf *conf, 1098 enum mmc_hwpart_conf_mode mode) 1099 { 1100 u8 part_attrs = 0; 1101 u32 enh_size_mult; 1102 u32 enh_start_addr; 1103 u32 gp_size_mult[4]; 1104 u32 max_enh_size_mult; 1105 u32 tot_enh_size_mult = 0; 1106 u8 wr_rel_set; 1107 int i, pidx, err; 1108 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1109 1110 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE) 1111 return -EINVAL; 1112 1113 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) { 1114 printf("eMMC >= 4.4 required for enhanced user data area\n"); 1115 return -EMEDIUMTYPE; 1116 } 1117 1118 if (!(mmc->part_support & PART_SUPPORT)) { 1119 printf("Card does not support partitioning\n"); 1120 return -EMEDIUMTYPE; 1121 } 1122 1123 if (!mmc->hc_wp_grp_size) { 1124 printf("Card does not define HC WP group size\n"); 1125 return -EMEDIUMTYPE; 1126 } 1127 1128 /* check partition alignment and total enhanced size */ 1129 if (conf->user.enh_size) { 1130 if (conf->user.enh_size % mmc->hc_wp_grp_size || 1131 conf->user.enh_start % mmc->hc_wp_grp_size) { 1132 printf("User data enhanced area not HC WP group " 1133 "size aligned\n"); 1134 return -EINVAL; 1135 } 1136 part_attrs |= EXT_CSD_ENH_USR; 1137 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size; 1138 if (mmc->high_capacity) { 1139 enh_start_addr = conf->user.enh_start; 1140 } else { 1141 enh_start_addr = (conf->user.enh_start << 9); 1142 } 1143 } else { 1144 enh_size_mult = 0; 1145 enh_start_addr = 0; 1146 } 1147 tot_enh_size_mult += enh_size_mult; 1148 1149 for (pidx = 0; pidx < 4; pidx++) { 1150 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) { 1151 printf("GP%i partition not HC WP group size " 1152 "aligned\n", pidx+1); 1153 return -EINVAL; 1154 } 1155 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size; 1156 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) { 1157 part_attrs |= EXT_CSD_ENH_GP(pidx); 1158 tot_enh_size_mult += gp_size_mult[pidx]; 1159 } 1160 } 1161 1162 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) { 1163 printf("Card does not support enhanced attribute\n"); 1164 return -EMEDIUMTYPE; 1165 } 1166 1167 err = mmc_send_ext_csd(mmc, ext_csd); 1168 if (err) 1169 return err; 1170 1171 max_enh_size_mult = 1172 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) + 1173 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) + 1174 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT]; 1175 if (tot_enh_size_mult > max_enh_size_mult) { 1176 printf("Total enhanced size exceeds maximum (%u > %u)\n", 1177 tot_enh_size_mult, max_enh_size_mult); 1178 return -EMEDIUMTYPE; 1179 } 1180 1181 /* The default value of EXT_CSD_WR_REL_SET is device 1182 * dependent, the values can only be changed if the 1183 * EXT_CSD_HS_CTRL_REL bit is set. The values can be 1184 * changed only once and before partitioning is completed. */ 1185 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1186 if (conf->user.wr_rel_change) { 1187 if (conf->user.wr_rel_set) 1188 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR; 1189 else 1190 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR; 1191 } 1192 for (pidx = 0; pidx < 4; pidx++) { 1193 if (conf->gp_part[pidx].wr_rel_change) { 1194 if (conf->gp_part[pidx].wr_rel_set) 1195 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx); 1196 else 1197 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx); 1198 } 1199 } 1200 1201 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] && 1202 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) { 1203 puts("Card does not support host controlled partition write " 1204 "reliability settings\n"); 1205 return -EMEDIUMTYPE; 1206 } 1207 1208 if (ext_csd[EXT_CSD_PARTITION_SETTING] & 1209 EXT_CSD_PARTITION_SETTING_COMPLETED) { 1210 printf("Card already partitioned\n"); 1211 return -EPERM; 1212 } 1213 1214 if (mode == MMC_HWPART_CONF_CHECK) 1215 return 0; 1216 1217 /* Partitioning requires high-capacity size definitions */ 1218 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) { 1219 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1220 EXT_CSD_ERASE_GROUP_DEF, 1); 1221 1222 if (err) 1223 return err; 1224 1225 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1226 1227 /* update erase group size to be high-capacity */ 1228 mmc->erase_grp_size = 1229 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1230 1231 } 1232 1233 /* all OK, write the configuration */ 1234 for (i = 0; i < 4; i++) { 1235 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1236 EXT_CSD_ENH_START_ADDR+i, 1237 (enh_start_addr >> (i*8)) & 0xFF); 1238 if (err) 1239 return err; 1240 } 1241 for (i = 0; i < 3; i++) { 1242 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1243 EXT_CSD_ENH_SIZE_MULT+i, 1244 (enh_size_mult >> (i*8)) & 0xFF); 1245 if (err) 1246 return err; 1247 } 1248 for (pidx = 0; pidx < 4; pidx++) { 1249 for (i = 0; i < 3; i++) { 1250 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1251 EXT_CSD_GP_SIZE_MULT+pidx*3+i, 1252 (gp_size_mult[pidx] >> (i*8)) & 0xFF); 1253 if (err) 1254 return err; 1255 } 1256 } 1257 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1258 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs); 1259 if (err) 1260 return err; 1261 1262 if (mode == MMC_HWPART_CONF_SET) 1263 return 0; 1264 1265 /* The WR_REL_SET is a write-once register but shall be 1266 * written before setting PART_SETTING_COMPLETED. As it is 1267 * write-once we can only write it when completing the 1268 * partitioning. */ 1269 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) { 1270 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1271 EXT_CSD_WR_REL_SET, wr_rel_set); 1272 if (err) 1273 return err; 1274 } 1275 1276 /* Setting PART_SETTING_COMPLETED confirms the partition 1277 * configuration but it only becomes effective after power 1278 * cycle, so we do not adjust the partition related settings 1279 * in the mmc struct. */ 1280 1281 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1282 EXT_CSD_PARTITION_SETTING, 1283 EXT_CSD_PARTITION_SETTING_COMPLETED); 1284 if (err) 1285 return err; 1286 1287 return 0; 1288 } 1289 1290 #if !CONFIG_IS_ENABLED(DM_MMC) 1291 int mmc_getcd(struct mmc *mmc) 1292 { 1293 int cd; 1294 1295 cd = board_mmc_getcd(mmc); 1296 1297 if (cd < 0) { 1298 if (mmc->cfg->ops->getcd) 1299 cd = mmc->cfg->ops->getcd(mmc); 1300 else 1301 cd = 1; 1302 } 1303 1304 return cd; 1305 } 1306 #endif 1307 1308 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp) 1309 { 1310 struct mmc_cmd cmd; 1311 struct mmc_data data; 1312 1313 /* Switch the frequency */ 1314 cmd.cmdidx = SD_CMD_SWITCH_FUNC; 1315 cmd.resp_type = MMC_RSP_R1; 1316 cmd.cmdarg = (mode << 31) | 0xffffff; 1317 cmd.cmdarg &= ~(0xf << (group * 4)); 1318 cmd.cmdarg |= value << (group * 4); 1319 1320 data.dest = (char *)resp; 1321 data.blocksize = 64; 1322 data.blocks = 1; 1323 data.flags = MMC_DATA_READ; 1324 1325 return mmc_send_cmd(mmc, &cmd, &data); 1326 } 1327 1328 1329 static int sd_change_freq(struct mmc *mmc) 1330 { 1331 int err; 1332 struct mmc_cmd cmd; 1333 ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2); 1334 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16); 1335 struct mmc_data data; 1336 int timeout; 1337 1338 mmc->card_caps = 0; 1339 1340 if (mmc_host_is_spi(mmc)) 1341 return 0; 1342 1343 /* Read the SCR to find out if this card supports higher speeds */ 1344 cmd.cmdidx = MMC_CMD_APP_CMD; 1345 cmd.resp_type = MMC_RSP_R1; 1346 cmd.cmdarg = mmc->rca << 16; 1347 1348 err = mmc_send_cmd(mmc, &cmd, NULL); 1349 1350 if (err) 1351 return err; 1352 1353 cmd.cmdidx = SD_CMD_APP_SEND_SCR; 1354 cmd.resp_type = MMC_RSP_R1; 1355 cmd.cmdarg = 0; 1356 1357 timeout = 3; 1358 1359 retry_scr: 1360 data.dest = (char *)scr; 1361 data.blocksize = 8; 1362 data.blocks = 1; 1363 data.flags = MMC_DATA_READ; 1364 1365 err = mmc_send_cmd(mmc, &cmd, &data); 1366 1367 if (err) { 1368 if (timeout--) 1369 goto retry_scr; 1370 1371 return err; 1372 } 1373 1374 mmc->scr[0] = __be32_to_cpu(scr[0]); 1375 mmc->scr[1] = __be32_to_cpu(scr[1]); 1376 1377 switch ((mmc->scr[0] >> 24) & 0xf) { 1378 case 0: 1379 mmc->version = SD_VERSION_1_0; 1380 break; 1381 case 1: 1382 mmc->version = SD_VERSION_1_10; 1383 break; 1384 case 2: 1385 mmc->version = SD_VERSION_2; 1386 if ((mmc->scr[0] >> 15) & 0x1) 1387 mmc->version = SD_VERSION_3; 1388 break; 1389 default: 1390 mmc->version = SD_VERSION_1_0; 1391 break; 1392 } 1393 1394 if (mmc->scr[0] & SD_DATA_4BIT) 1395 mmc->card_caps |= MMC_MODE_4BIT; 1396 1397 /* Version 1.0 doesn't support switching */ 1398 if (mmc->version == SD_VERSION_1_0) 1399 return 0; 1400 1401 timeout = 4; 1402 while (timeout--) { 1403 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1, 1404 (u8 *)switch_status); 1405 1406 if (err) 1407 return err; 1408 1409 /* The high-speed function is busy. Try again */ 1410 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY)) 1411 break; 1412 } 1413 1414 /* If high-speed isn't supported, we return */ 1415 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)) 1416 return 0; 1417 1418 /* 1419 * If the host doesn't support SD_HIGHSPEED, do not switch card to 1420 * HIGHSPEED mode even if the card support SD_HIGHSPPED. 1421 * This can avoid furthur problem when the card runs in different 1422 * mode between the host. 1423 */ 1424 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) && 1425 (mmc->cfg->host_caps & MMC_MODE_HS))) 1426 return 0; 1427 1428 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status); 1429 1430 if (err) 1431 return err; 1432 1433 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000) 1434 mmc->card_caps |= MMC_MODE_HS; 1435 1436 return 0; 1437 } 1438 1439 static int sd_read_ssr(struct mmc *mmc) 1440 { 1441 int err, i; 1442 struct mmc_cmd cmd; 1443 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16); 1444 struct mmc_data data; 1445 int timeout = 3; 1446 unsigned int au, eo, et, es; 1447 1448 cmd.cmdidx = MMC_CMD_APP_CMD; 1449 cmd.resp_type = MMC_RSP_R1; 1450 cmd.cmdarg = mmc->rca << 16; 1451 1452 err = mmc_send_cmd(mmc, &cmd, NULL); 1453 if (err) 1454 return err; 1455 1456 cmd.cmdidx = SD_CMD_APP_SD_STATUS; 1457 cmd.resp_type = MMC_RSP_R1; 1458 cmd.cmdarg = 0; 1459 1460 retry_ssr: 1461 data.dest = (char *)ssr; 1462 data.blocksize = 64; 1463 data.blocks = 1; 1464 data.flags = MMC_DATA_READ; 1465 1466 err = mmc_send_cmd(mmc, &cmd, &data); 1467 if (err) { 1468 if (timeout--) 1469 goto retry_ssr; 1470 1471 return err; 1472 } 1473 1474 for (i = 0; i < 16; i++) 1475 ssr[i] = be32_to_cpu(ssr[i]); 1476 1477 au = (ssr[2] >> 12) & 0xF; 1478 if ((au <= 9) || (mmc->version == SD_VERSION_3)) { 1479 mmc->ssr.au = sd_au_size[au]; 1480 es = (ssr[3] >> 24) & 0xFF; 1481 es |= (ssr[2] & 0xFF) << 8; 1482 et = (ssr[3] >> 18) & 0x3F; 1483 if (es && et) { 1484 eo = (ssr[3] >> 16) & 0x3; 1485 mmc->ssr.erase_timeout = (et * 1000) / es; 1486 mmc->ssr.erase_offset = eo * 1000; 1487 } 1488 } else { 1489 debug("Invalid Allocation Unit Size.\n"); 1490 } 1491 1492 return 0; 1493 } 1494 1495 /* frequency bases */ 1496 /* divided by 10 to be nice to platforms without floating point */ 1497 static const int fbase[] = { 1498 10000, 1499 100000, 1500 1000000, 1501 10000000, 1502 }; 1503 1504 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice 1505 * to platforms without floating point. 1506 */ 1507 static const u8 multipliers[] = { 1508 0, /* reserved */ 1509 10, 1510 12, 1511 13, 1512 15, 1513 20, 1514 25, 1515 30, 1516 35, 1517 40, 1518 45, 1519 50, 1520 55, 1521 60, 1522 70, 1523 80, 1524 }; 1525 1526 #if !CONFIG_IS_ENABLED(DM_MMC) 1527 static void mmc_set_ios(struct mmc *mmc) 1528 { 1529 if (mmc->cfg->ops->set_ios) 1530 mmc->cfg->ops->set_ios(mmc); 1531 } 1532 1533 static bool mmc_card_busy(struct mmc *mmc) 1534 { 1535 if (!mmc->cfg->ops->card_busy) 1536 return -ENOSYS; 1537 1538 return mmc->cfg->ops->card_busy(mmc); 1539 } 1540 1541 static bool mmc_can_card_busy(struct mmc *) 1542 { 1543 return !!mmc->cfg->ops->card_busy; 1544 } 1545 #endif 1546 1547 static int mmc_startup(struct mmc *mmc) 1548 { 1549 int err, i; 1550 uint mult, freq, tran_speed; 1551 u64 cmult, csize, capacity; 1552 struct mmc_cmd cmd; 1553 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1554 bool has_parts = false; 1555 bool part_completed; 1556 struct blk_desc *bdesc; 1557 1558 #ifdef CONFIG_MMC_SPI_CRC_ON 1559 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */ 1560 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF; 1561 cmd.resp_type = MMC_RSP_R1; 1562 cmd.cmdarg = 1; 1563 err = mmc_send_cmd(mmc, &cmd, NULL); 1564 1565 if (err) 1566 return err; 1567 } 1568 #endif 1569 #ifndef CONFIG_MMC_USE_PRE_CONFIG 1570 /* Put the Card in Identify Mode */ 1571 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID : 1572 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */ 1573 cmd.resp_type = MMC_RSP_R2; 1574 cmd.cmdarg = 0; 1575 1576 err = mmc_send_cmd(mmc, &cmd, NULL); 1577 1578 if (err) 1579 return err; 1580 1581 memcpy(mmc->cid, cmd.response, 16); 1582 1583 /* 1584 * For MMC cards, set the Relative Address. 1585 * For SD cards, get the Relatvie Address. 1586 * This also puts the cards into Standby State 1587 */ 1588 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1589 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR; 1590 cmd.cmdarg = mmc->rca << 16; 1591 cmd.resp_type = MMC_RSP_R6; 1592 1593 err = mmc_send_cmd(mmc, &cmd, NULL); 1594 1595 if (err) 1596 return err; 1597 1598 if (IS_SD(mmc)) 1599 mmc->rca = (cmd.response[0] >> 16) & 0xffff; 1600 } 1601 #endif 1602 /* Get the Card-Specific Data */ 1603 cmd.cmdidx = MMC_CMD_SEND_CSD; 1604 cmd.resp_type = MMC_RSP_R2; 1605 cmd.cmdarg = mmc->rca << 16; 1606 1607 err = mmc_send_cmd(mmc, &cmd, NULL); 1608 1609 if (err) 1610 return err; 1611 1612 mmc->csd[0] = cmd.response[0]; 1613 mmc->csd[1] = cmd.response[1]; 1614 mmc->csd[2] = cmd.response[2]; 1615 mmc->csd[3] = cmd.response[3]; 1616 1617 if (mmc->version == MMC_VERSION_UNKNOWN) { 1618 int version = (cmd.response[0] >> 26) & 0xf; 1619 1620 switch (version) { 1621 case 0: 1622 mmc->version = MMC_VERSION_1_2; 1623 break; 1624 case 1: 1625 mmc->version = MMC_VERSION_1_4; 1626 break; 1627 case 2: 1628 mmc->version = MMC_VERSION_2_2; 1629 break; 1630 case 3: 1631 mmc->version = MMC_VERSION_3; 1632 break; 1633 case 4: 1634 mmc->version = MMC_VERSION_4; 1635 break; 1636 default: 1637 mmc->version = MMC_VERSION_1_2; 1638 break; 1639 } 1640 } 1641 1642 /* divide frequency by 10, since the mults are 10x bigger */ 1643 freq = fbase[(cmd.response[0] & 0x7)]; 1644 mult = multipliers[((cmd.response[0] >> 3) & 0xf)]; 1645 1646 tran_speed = freq * mult; 1647 1648 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1); 1649 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf); 1650 1651 if (IS_SD(mmc)) 1652 mmc->write_bl_len = mmc->read_bl_len; 1653 else 1654 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf); 1655 1656 if (mmc->high_capacity) { 1657 csize = (mmc->csd[1] & 0x3f) << 16 1658 | (mmc->csd[2] & 0xffff0000) >> 16; 1659 cmult = 8; 1660 } else { 1661 csize = (mmc->csd[1] & 0x3ff) << 2 1662 | (mmc->csd[2] & 0xc0000000) >> 30; 1663 cmult = (mmc->csd[2] & 0x00038000) >> 15; 1664 } 1665 1666 mmc->capacity_user = (csize + 1) << (cmult + 2); 1667 mmc->capacity_user *= mmc->read_bl_len; 1668 mmc->capacity_boot = 0; 1669 mmc->capacity_rpmb = 0; 1670 for (i = 0; i < 4; i++) 1671 mmc->capacity_gp[i] = 0; 1672 1673 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN) 1674 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1675 1676 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN) 1677 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1678 1679 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) { 1680 cmd.cmdidx = MMC_CMD_SET_DSR; 1681 cmd.cmdarg = (mmc->dsr & 0xffff) << 16; 1682 cmd.resp_type = MMC_RSP_NONE; 1683 if (mmc_send_cmd(mmc, &cmd, NULL)) 1684 printf("MMC: SET_DSR failed\n"); 1685 } 1686 1687 /* Select the card, and put it into Transfer Mode */ 1688 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1689 cmd.cmdidx = MMC_CMD_SELECT_CARD; 1690 cmd.resp_type = MMC_RSP_R1; 1691 cmd.cmdarg = mmc->rca << 16; 1692 err = mmc_send_cmd(mmc, &cmd, NULL); 1693 1694 if (err) 1695 return err; 1696 } 1697 1698 /* 1699 * For SD, its erase group is always one sector 1700 */ 1701 mmc->erase_grp_size = 1; 1702 mmc->part_config = MMCPART_NOAVAILABLE; 1703 if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) { 1704 /* check ext_csd version and capacity */ 1705 err = mmc_send_ext_csd(mmc, ext_csd); 1706 if (err) 1707 return err; 1708 if (ext_csd[EXT_CSD_REV] >= 2) { 1709 /* 1710 * According to the JEDEC Standard, the value of 1711 * ext_csd's capacity is valid if the value is more 1712 * than 2GB 1713 */ 1714 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0 1715 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 1716 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 1717 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 1718 capacity *= MMC_MAX_BLOCK_LEN; 1719 if ((capacity >> 20) > 2 * 1024) 1720 mmc->capacity_user = capacity; 1721 } 1722 1723 switch (ext_csd[EXT_CSD_REV]) { 1724 case 1: 1725 mmc->version = MMC_VERSION_4_1; 1726 break; 1727 case 2: 1728 mmc->version = MMC_VERSION_4_2; 1729 break; 1730 case 3: 1731 mmc->version = MMC_VERSION_4_3; 1732 break; 1733 case 5: 1734 mmc->version = MMC_VERSION_4_41; 1735 break; 1736 case 6: 1737 mmc->version = MMC_VERSION_4_5; 1738 break; 1739 case 7: 1740 mmc->version = MMC_VERSION_5_0; 1741 break; 1742 case 8: 1743 mmc->version = MMC_VERSION_5_1; 1744 break; 1745 } 1746 1747 /* The partition data may be non-zero but it is only 1748 * effective if PARTITION_SETTING_COMPLETED is set in 1749 * EXT_CSD, so ignore any data if this bit is not set, 1750 * except for enabling the high-capacity group size 1751 * definition (see below). */ 1752 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] & 1753 EXT_CSD_PARTITION_SETTING_COMPLETED); 1754 1755 /* store the partition info of emmc */ 1756 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT]; 1757 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) || 1758 ext_csd[EXT_CSD_BOOT_MULT]) 1759 mmc->part_config = ext_csd[EXT_CSD_PART_CONF]; 1760 if (part_completed && 1761 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT)) 1762 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE]; 1763 if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN) 1764 mmc->esr.mmc_can_trim = 1; 1765 1766 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17; 1767 1768 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17; 1769 1770 for (i = 0; i < 4; i++) { 1771 int idx = EXT_CSD_GP_SIZE_MULT + i * 3; 1772 uint mult = (ext_csd[idx + 2] << 16) + 1773 (ext_csd[idx + 1] << 8) + ext_csd[idx]; 1774 if (mult) 1775 has_parts = true; 1776 if (!part_completed) 1777 continue; 1778 mmc->capacity_gp[i] = mult; 1779 mmc->capacity_gp[i] *= 1780 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1781 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1782 mmc->capacity_gp[i] <<= 19; 1783 } 1784 1785 if (part_completed) { 1786 mmc->enh_user_size = 1787 (ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) + 1788 (ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) + 1789 ext_csd[EXT_CSD_ENH_SIZE_MULT]; 1790 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1791 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1792 mmc->enh_user_size <<= 19; 1793 mmc->enh_user_start = 1794 (ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) + 1795 (ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) + 1796 (ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) + 1797 ext_csd[EXT_CSD_ENH_START_ADDR]; 1798 if (mmc->high_capacity) 1799 mmc->enh_user_start <<= 9; 1800 } 1801 1802 /* 1803 * Host needs to enable ERASE_GRP_DEF bit if device is 1804 * partitioned. This bit will be lost every time after a reset 1805 * or power off. This will affect erase size. 1806 */ 1807 if (part_completed) 1808 has_parts = true; 1809 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) && 1810 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB)) 1811 has_parts = true; 1812 if (has_parts) { 1813 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1814 EXT_CSD_ERASE_GROUP_DEF, 1); 1815 1816 if (err) 1817 return err; 1818 else 1819 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1820 } 1821 1822 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) { 1823 /* Read out group size from ext_csd */ 1824 mmc->erase_grp_size = 1825 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1826 /* 1827 * if high capacity and partition setting completed 1828 * SEC_COUNT is valid even if it is smaller than 2 GiB 1829 * JEDEC Standard JESD84-B45, 6.2.4 1830 */ 1831 if (mmc->high_capacity && part_completed) { 1832 capacity = (ext_csd[EXT_CSD_SEC_CNT]) | 1833 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) | 1834 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) | 1835 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24); 1836 capacity *= MMC_MAX_BLOCK_LEN; 1837 mmc->capacity_user = capacity; 1838 } 1839 } else { 1840 /* Calculate the group size from the csd value. */ 1841 int erase_gsz, erase_gmul; 1842 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10; 1843 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5; 1844 mmc->erase_grp_size = (erase_gsz + 1) 1845 * (erase_gmul + 1); 1846 } 1847 1848 mmc->hc_wp_grp_size = 1024 1849 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1850 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1851 1852 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1853 } 1854 1855 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart); 1856 if (err) 1857 return err; 1858 1859 if (IS_SD(mmc)) 1860 err = sd_change_freq(mmc); 1861 else 1862 err = mmc_change_freq(mmc); 1863 1864 if (err) 1865 return err; 1866 1867 /* Restrict card's capabilities by what the host can do */ 1868 mmc->card_caps &= mmc->cfg->host_caps; 1869 1870 if (IS_SD(mmc)) { 1871 if (mmc->card_caps & MMC_MODE_4BIT) { 1872 cmd.cmdidx = MMC_CMD_APP_CMD; 1873 cmd.resp_type = MMC_RSP_R1; 1874 cmd.cmdarg = mmc->rca << 16; 1875 1876 err = mmc_send_cmd(mmc, &cmd, NULL); 1877 if (err) 1878 return err; 1879 1880 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH; 1881 cmd.resp_type = MMC_RSP_R1; 1882 cmd.cmdarg = 2; 1883 err = mmc_send_cmd(mmc, &cmd, NULL); 1884 if (err) 1885 return err; 1886 1887 mmc_set_bus_width(mmc, 4); 1888 } 1889 1890 err = sd_read_ssr(mmc); 1891 if (err) 1892 return err; 1893 1894 if (mmc->card_caps & MMC_MODE_HS) 1895 tran_speed = 50000000; 1896 else 1897 tran_speed = 25000000; 1898 1899 mmc_set_clock(mmc, tran_speed); 1900 } 1901 1902 /* Fix the block length for DDR mode */ 1903 if (mmc_card_ddr(mmc)) { 1904 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1905 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1906 } 1907 1908 /* fill in device description */ 1909 bdesc = mmc_get_blk_desc(mmc); 1910 bdesc->lun = 0; 1911 bdesc->hwpart = 0; 1912 bdesc->type = 0; 1913 bdesc->blksz = mmc->read_bl_len; 1914 bdesc->log2blksz = LOG2(bdesc->blksz); 1915 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1916 #if !defined(CONFIG_SPL_BUILD) || \ 1917 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \ 1918 !defined(CONFIG_USE_TINY_PRINTF)) 1919 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x", 1920 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), 1921 (mmc->cid[3] >> 16) & 0xffff); 1922 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, 1923 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, 1924 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, 1925 (mmc->cid[2] >> 24) & 0xff); 1926 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, 1927 (mmc->cid[2] >> 16) & 0xf); 1928 #else 1929 bdesc->vendor[0] = 0; 1930 bdesc->product[0] = 0; 1931 bdesc->revision[0] = 0; 1932 #endif 1933 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT) 1934 part_init(bdesc); 1935 #endif 1936 1937 return 0; 1938 } 1939 1940 #ifndef CONFIG_MMC_USE_PRE_CONFIG 1941 static int mmc_send_if_cond(struct mmc *mmc) 1942 { 1943 struct mmc_cmd cmd; 1944 int err; 1945 1946 cmd.cmdidx = SD_CMD_SEND_IF_COND; 1947 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */ 1948 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa; 1949 cmd.resp_type = MMC_RSP_R7; 1950 1951 err = mmc_send_cmd(mmc, &cmd, NULL); 1952 1953 if (err) 1954 return err; 1955 1956 if ((cmd.response[0] & 0xff) != 0xaa) 1957 return -EOPNOTSUPP; 1958 else 1959 mmc->version = SD_VERSION_2; 1960 1961 return 0; 1962 } 1963 #endif 1964 1965 #if !CONFIG_IS_ENABLED(DM_MMC) 1966 /* board-specific MMC power initializations. */ 1967 __weak void board_mmc_power_init(void) 1968 { 1969 } 1970 #endif 1971 1972 #ifndef CONFIG_MMC_USE_PRE_CONFIG 1973 static int mmc_power_init(struct mmc *mmc) 1974 { 1975 #if CONFIG_IS_ENABLED(DM_MMC) 1976 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD) 1977 struct udevice *vmmc_supply; 1978 int ret; 1979 1980 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply", 1981 &vmmc_supply); 1982 if (ret) { 1983 debug("%s: No vmmc supply\n", mmc->dev->name); 1984 return 0; 1985 } 1986 1987 ret = regulator_set_enable(vmmc_supply, true); 1988 if (ret) { 1989 puts("Error enabling VMMC supply\n"); 1990 return ret; 1991 } 1992 #endif 1993 #else /* !CONFIG_DM_MMC */ 1994 /* 1995 * Driver model should use a regulator, as above, rather than calling 1996 * out to board code. 1997 */ 1998 board_mmc_power_init(); 1999 #endif 2000 return 0; 2001 } 2002 #endif 2003 #ifdef CONFIG_MMC_USE_PRE_CONFIG 2004 static int mmc_select_card(struct mmc *mmc, int n) 2005 { 2006 struct mmc_cmd cmd; 2007 int err = 0; 2008 2009 memset(&cmd, 0, sizeof(struct mmc_cmd)); 2010 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 2011 mmc->rca = n; 2012 cmd.cmdidx = MMC_CMD_SELECT_CARD; 2013 cmd.resp_type = MMC_RSP_R1; 2014 cmd.cmdarg = mmc->rca << 16; 2015 err = mmc_send_cmd(mmc, &cmd, NULL); 2016 } 2017 2018 return err; 2019 } 2020 2021 int mmc_start_init(struct mmc *mmc) 2022 { 2023 /* 2024 * We use the MMC config set by the bootrom. 2025 * So it is no need to reset the eMMC device. 2026 */ 2027 mmc_set_bus_width(mmc, 8); 2028 mmc_set_clock(mmc, 1); 2029 mmc_set_timing(mmc, MMC_TIMING_LEGACY); 2030 /* Send cmd7 to return stand-by state*/ 2031 mmc_select_card(mmc, 0); 2032 mmc->version = MMC_VERSION_UNKNOWN; 2033 mmc->high_capacity = 1; 2034 /* 2035 * The RCA is set to 2 by rockchip bootrom, use the default 2036 * value here. 2037 */ 2038 #ifdef CONFIG_ARCH_ROCKCHIP 2039 mmc->rca = 2; 2040 #else 2041 mmc->rca = 1; 2042 #endif 2043 return 0; 2044 } 2045 #else 2046 int mmc_start_init(struct mmc *mmc) 2047 { 2048 bool no_card; 2049 int err; 2050 2051 /* we pretend there's no card when init is NULL */ 2052 no_card = mmc_getcd(mmc) == 0; 2053 #if !CONFIG_IS_ENABLED(DM_MMC) 2054 no_card = no_card || (mmc->cfg->ops->init == NULL); 2055 #endif 2056 if (no_card) { 2057 mmc->has_init = 0; 2058 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2059 printf("MMC: no card present\n"); 2060 #endif 2061 return -ENOMEDIUM; 2062 } 2063 2064 if (mmc->has_init) 2065 return 0; 2066 2067 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 2068 mmc_adapter_card_type_ident(); 2069 #endif 2070 err = mmc_power_init(mmc); 2071 if (err) 2072 return err; 2073 2074 #if CONFIG_IS_ENABLED(DM_MMC) 2075 /* The device has already been probed ready for use */ 2076 #else 2077 /* made sure it's not NULL earlier */ 2078 err = mmc->cfg->ops->init(mmc); 2079 if (err) 2080 return err; 2081 #endif 2082 mmc_set_bus_width(mmc, 1); 2083 mmc_set_clock(mmc, 1); 2084 mmc_set_timing(mmc, MMC_TIMING_LEGACY); 2085 2086 /* Reset the Card */ 2087 err = mmc_go_idle(mmc); 2088 2089 if (err) 2090 return err; 2091 2092 /* The internal partition reset to user partition(0) at every CMD0*/ 2093 mmc_get_blk_desc(mmc)->hwpart = 0; 2094 2095 /* Test for SD version 2 */ 2096 err = mmc_send_if_cond(mmc); 2097 2098 /* Now try to get the SD card's operating condition */ 2099 err = sd_send_op_cond(mmc); 2100 2101 /* If the command timed out, we check for an MMC card */ 2102 if (err == -ETIMEDOUT) { 2103 err = mmc_send_op_cond(mmc); 2104 2105 if (err) { 2106 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2107 printf("Card did not respond to voltage select!\n"); 2108 #endif 2109 return -EOPNOTSUPP; 2110 } 2111 } 2112 2113 if (!err) 2114 mmc->init_in_progress = 1; 2115 2116 return err; 2117 } 2118 #endif 2119 2120 static int mmc_complete_init(struct mmc *mmc) 2121 { 2122 int err = 0; 2123 2124 mmc->init_in_progress = 0; 2125 if (mmc->op_cond_pending) 2126 err = mmc_complete_op_cond(mmc); 2127 2128 if (!err) 2129 err = mmc_startup(mmc); 2130 if (err) 2131 mmc->has_init = 0; 2132 else 2133 mmc->has_init = 1; 2134 return err; 2135 } 2136 2137 int mmc_init(struct mmc *mmc) 2138 { 2139 int err = 0; 2140 __maybe_unused unsigned start; 2141 #if CONFIG_IS_ENABLED(DM_MMC) 2142 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev); 2143 2144 upriv->mmc = mmc; 2145 #endif 2146 if (mmc->has_init) 2147 return 0; 2148 2149 start = get_timer(0); 2150 2151 if (!mmc->init_in_progress) 2152 err = mmc_start_init(mmc); 2153 2154 if (!err) 2155 err = mmc_complete_init(mmc); 2156 if (err) 2157 printf("%s: %d, time %lu\n", __func__, err, get_timer(start)); 2158 2159 return err; 2160 } 2161 2162 int mmc_set_dsr(struct mmc *mmc, u16 val) 2163 { 2164 mmc->dsr = val; 2165 return 0; 2166 } 2167 2168 /* CPU-specific MMC initializations */ 2169 __weak int cpu_mmc_init(bd_t *bis) 2170 { 2171 return -1; 2172 } 2173 2174 /* board-specific MMC initializations. */ 2175 __weak int board_mmc_init(bd_t *bis) 2176 { 2177 return -1; 2178 } 2179 2180 void mmc_set_preinit(struct mmc *mmc, int preinit) 2181 { 2182 mmc->preinit = preinit; 2183 } 2184 2185 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD) 2186 static int mmc_probe(bd_t *bis) 2187 { 2188 return 0; 2189 } 2190 #elif CONFIG_IS_ENABLED(DM_MMC) 2191 static int mmc_probe(bd_t *bis) 2192 { 2193 int ret, i; 2194 struct uclass *uc; 2195 struct udevice *dev; 2196 2197 ret = uclass_get(UCLASS_MMC, &uc); 2198 if (ret) 2199 return ret; 2200 2201 /* 2202 * Try to add them in sequence order. Really with driver model we 2203 * should allow holes, but the current MMC list does not allow that. 2204 * So if we request 0, 1, 3 we will get 0, 1, 2. 2205 */ 2206 for (i = 0; ; i++) { 2207 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev); 2208 if (ret == -ENODEV) 2209 break; 2210 } 2211 uclass_foreach_dev(dev, uc) { 2212 ret = device_probe(dev); 2213 if (ret) 2214 printf("%s - probe failed: %d\n", dev->name, ret); 2215 } 2216 2217 return 0; 2218 } 2219 #else 2220 static int mmc_probe(bd_t *bis) 2221 { 2222 if (board_mmc_init(bis) < 0) 2223 cpu_mmc_init(bis); 2224 2225 return 0; 2226 } 2227 #endif 2228 2229 int mmc_initialize(bd_t *bis) 2230 { 2231 static int initialized = 0; 2232 int ret; 2233 if (initialized) /* Avoid initializing mmc multiple times */ 2234 return 0; 2235 initialized = 1; 2236 2237 #if !CONFIG_IS_ENABLED(BLK) 2238 #if !CONFIG_IS_ENABLED(MMC_TINY) 2239 mmc_list_init(); 2240 #endif 2241 #endif 2242 ret = mmc_probe(bis); 2243 if (ret) 2244 return ret; 2245 2246 #ifndef CONFIG_SPL_BUILD 2247 print_mmc_devices(','); 2248 #endif 2249 2250 mmc_do_preinit(); 2251 return 0; 2252 } 2253 2254 #ifdef CONFIG_CMD_BKOPS_ENABLE 2255 int mmc_set_bkops_enable(struct mmc *mmc) 2256 { 2257 int err; 2258 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 2259 2260 err = mmc_send_ext_csd(mmc, ext_csd); 2261 if (err) { 2262 puts("Could not get ext_csd register values\n"); 2263 return err; 2264 } 2265 2266 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) { 2267 puts("Background operations not supported on device\n"); 2268 return -EMEDIUMTYPE; 2269 } 2270 2271 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) { 2272 puts("Background operations already enabled\n"); 2273 return 0; 2274 } 2275 2276 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); 2277 if (err) { 2278 puts("Failed to enable manual background operations\n"); 2279 return err; 2280 } 2281 2282 puts("Enabled manual background operations\n"); 2283 2284 return 0; 2285 } 2286 #endif 2287