1 /* 2 * Copyright 2008, Freescale Semiconductor, Inc 3 * Andy Fleming 4 * 5 * Based vaguely on the Linux code 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10 #include <config.h> 11 #include <common.h> 12 #include <command.h> 13 #include <dm.h> 14 #include <dm/device-internal.h> 15 #include <errno.h> 16 #include <mmc.h> 17 #include <part.h> 18 #include <power/regulator.h> 19 #include <malloc.h> 20 #include <memalign.h> 21 #include <linux/list.h> 22 #include <div64.h> 23 #include "mmc_private.h" 24 25 static const unsigned int sd_au_size[] = { 26 0, SZ_16K / 512, SZ_32K / 512, 27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512, 28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512, 29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, 30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, 31 }; 32 33 static char mmc_ext_csd[512]; 34 35 #if CONFIG_IS_ENABLED(MMC_TINY) 36 static struct mmc mmc_static; 37 struct mmc *find_mmc_device(int dev_num) 38 { 39 return &mmc_static; 40 } 41 42 void mmc_do_preinit(void) 43 { 44 struct mmc *m = &mmc_static; 45 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 46 mmc_set_preinit(m, 1); 47 #endif 48 if (m->preinit) 49 mmc_start_init(m); 50 } 51 52 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc) 53 { 54 return &mmc->block_dev; 55 } 56 #endif 57 58 #if !CONFIG_IS_ENABLED(DM_MMC) 59 __weak int board_mmc_getwp(struct mmc *mmc) 60 { 61 return -1; 62 } 63 64 int mmc_getwp(struct mmc *mmc) 65 { 66 int wp; 67 68 wp = board_mmc_getwp(mmc); 69 70 if (wp < 0) { 71 if (mmc->cfg->ops->getwp) 72 wp = mmc->cfg->ops->getwp(mmc); 73 else 74 wp = 0; 75 } 76 77 return wp; 78 } 79 80 __weak int board_mmc_getcd(struct mmc *mmc) 81 { 82 return -1; 83 } 84 #endif 85 86 #ifdef CONFIG_MMC_TRACE 87 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd) 88 { 89 printf("CMD_SEND:%d\n", cmd->cmdidx); 90 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg); 91 } 92 93 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret) 94 { 95 int i; 96 u8 *ptr; 97 98 if (ret) { 99 printf("\t\tRET\t\t\t %d\n", ret); 100 } else { 101 switch (cmd->resp_type) { 102 case MMC_RSP_NONE: 103 printf("\t\tMMC_RSP_NONE\n"); 104 break; 105 case MMC_RSP_R1: 106 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n", 107 cmd->response[0]); 108 break; 109 case MMC_RSP_R1b: 110 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n", 111 cmd->response[0]); 112 break; 113 case MMC_RSP_R2: 114 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n", 115 cmd->response[0]); 116 printf("\t\t \t\t 0x%08X \n", 117 cmd->response[1]); 118 printf("\t\t \t\t 0x%08X \n", 119 cmd->response[2]); 120 printf("\t\t \t\t 0x%08X \n", 121 cmd->response[3]); 122 printf("\n"); 123 printf("\t\t\t\t\tDUMPING DATA\n"); 124 for (i = 0; i < 4; i++) { 125 int j; 126 printf("\t\t\t\t\t%03d - ", i*4); 127 ptr = (u8 *)&cmd->response[i]; 128 ptr += 3; 129 for (j = 0; j < 4; j++) 130 printf("%02X ", *ptr--); 131 printf("\n"); 132 } 133 break; 134 case MMC_RSP_R3: 135 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n", 136 cmd->response[0]); 137 break; 138 default: 139 printf("\t\tERROR MMC rsp not supported\n"); 140 break; 141 } 142 } 143 } 144 145 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd) 146 { 147 int status; 148 149 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9; 150 printf("CURR STATE:%d\n", status); 151 } 152 #endif 153 154 #if !CONFIG_IS_ENABLED(DM_MMC) 155 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data) 156 { 157 int ret; 158 159 mmmc_trace_before_send(mmc, cmd); 160 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data); 161 mmmc_trace_after_send(mmc, cmd, ret); 162 163 return ret; 164 } 165 #endif 166 167 int mmc_send_status(struct mmc *mmc, int timeout) 168 { 169 struct mmc_cmd cmd; 170 int err, retries = 5; 171 172 cmd.cmdidx = MMC_CMD_SEND_STATUS; 173 cmd.resp_type = MMC_RSP_R1; 174 if (!mmc_host_is_spi(mmc)) 175 cmd.cmdarg = mmc->rca << 16; 176 177 while (1) { 178 err = mmc_send_cmd(mmc, &cmd, NULL); 179 if (!err) { 180 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) && 181 (cmd.response[0] & MMC_STATUS_CURR_STATE) != 182 MMC_STATE_PRG) 183 break; 184 else if (cmd.response[0] & MMC_STATUS_MASK) { 185 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 186 printf("Status Error: 0x%08X\n", 187 cmd.response[0]); 188 #endif 189 return -ECOMM; 190 } 191 } else if (--retries < 0) 192 return err; 193 194 if (timeout-- <= 0) 195 break; 196 197 udelay(1000); 198 } 199 200 mmc_trace_state(mmc, &cmd); 201 if (timeout <= 0) { 202 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 203 printf("Timeout waiting card ready\n"); 204 #endif 205 return -ETIMEDOUT; 206 } 207 208 return 0; 209 } 210 211 int mmc_set_blocklen(struct mmc *mmc, int len) 212 { 213 struct mmc_cmd cmd; 214 215 if (mmc_card_ddr(mmc)) 216 return 0; 217 218 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN; 219 cmd.resp_type = MMC_RSP_R1; 220 cmd.cmdarg = len; 221 222 return mmc_send_cmd(mmc, &cmd, NULL); 223 } 224 225 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start, 226 lbaint_t blkcnt) 227 { 228 struct mmc_cmd cmd; 229 struct mmc_data data; 230 231 if (blkcnt > 1) 232 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 233 else 234 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 235 236 if (mmc->high_capacity) 237 cmd.cmdarg = start; 238 else 239 cmd.cmdarg = start * mmc->read_bl_len; 240 241 cmd.resp_type = MMC_RSP_R1; 242 243 data.dest = dst; 244 data.blocks = blkcnt; 245 data.blocksize = mmc->read_bl_len; 246 data.flags = MMC_DATA_READ; 247 248 if (mmc_send_cmd(mmc, &cmd, &data)) 249 return 0; 250 251 if (blkcnt > 1) { 252 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 253 cmd.cmdarg = 0; 254 cmd.resp_type = MMC_RSP_R1b; 255 if (mmc_send_cmd(mmc, &cmd, NULL)) { 256 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 257 printf("mmc fail to send stop cmd\n"); 258 #endif 259 return 0; 260 } 261 } 262 263 return blkcnt; 264 } 265 266 #ifdef CONFIG_SPL_BLK_READ_PREPARE 267 static int mmc_read_blocks_prepare(struct mmc *mmc, void *dst, lbaint_t start, 268 lbaint_t blkcnt) 269 { 270 struct mmc_cmd cmd; 271 struct mmc_data data; 272 273 if (blkcnt > 1) 274 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 275 else 276 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 277 278 if (mmc->high_capacity) 279 cmd.cmdarg = start; 280 else 281 cmd.cmdarg = start * mmc->read_bl_len; 282 283 cmd.resp_type = MMC_RSP_R1; 284 285 data.dest = dst; 286 data.blocks = blkcnt; 287 data.blocksize = mmc->read_bl_len; 288 data.flags = MMC_DATA_READ; 289 290 if (mmc_send_cmd_prepare(mmc, &cmd, &data)) 291 return 0; 292 293 return blkcnt; 294 } 295 #endif 296 297 #ifdef CONFIG_SPL_BLK_READ_PREPARE 298 #if CONFIG_IS_ENABLED(BLK) 299 ulong mmc_bread_prepare(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 300 #else 301 ulong mmc_bread_prepare(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 302 void *dst) 303 #endif 304 { 305 #if CONFIG_IS_ENABLED(BLK) 306 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 307 #endif 308 int dev_num = block_dev->devnum; 309 int timeout = 0; 310 int err; 311 312 if (blkcnt == 0) 313 return 0; 314 315 struct mmc *mmc = find_mmc_device(dev_num); 316 317 if (!mmc) 318 return 0; 319 320 if (CONFIG_IS_ENABLED(MMC_TINY)) 321 err = mmc_switch_part(mmc, block_dev->hwpart); 322 else 323 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 324 325 if (err < 0) 326 return 0; 327 328 if ((start + blkcnt) > block_dev->lba) { 329 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 330 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 331 start + blkcnt, block_dev->lba); 332 #endif 333 return 0; 334 } 335 336 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 337 debug("%s: Failed to set blocklen\n", __func__); 338 return 0; 339 } 340 341 if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) { 342 debug("%s: Failed to read blocks\n", __func__); 343 re_init_retry: 344 timeout++; 345 /* 346 * Try re-init seven times. 347 */ 348 if (timeout > 7) { 349 printf("Re-init retry timeout\n"); 350 return 0; 351 } 352 353 mmc->has_init = 0; 354 if (mmc_init(mmc)) 355 return 0; 356 357 if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) { 358 printf("%s: Re-init mmc_read_blocks_prepare error\n", 359 __func__); 360 goto re_init_retry; 361 } 362 } 363 364 return blkcnt; 365 } 366 #endif 367 368 #if CONFIG_IS_ENABLED(BLK) 369 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 370 #else 371 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 372 void *dst) 373 #endif 374 { 375 #if CONFIG_IS_ENABLED(BLK) 376 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 377 #endif 378 int dev_num = block_dev->devnum; 379 int err; 380 lbaint_t cur, blocks_todo = blkcnt; 381 382 #ifdef CONFIG_SPL_BLK_READ_PREPARE 383 if (block_dev->op_flag == BLK_PRE_RW) 384 #if CONFIG_IS_ENABLED(BLK) 385 return mmc_bread_prepare(dev, start, blkcnt, dst); 386 #else 387 return mmc_bread_prepare(block_dev, start, blkcnt, dst); 388 #endif 389 #endif 390 if (blkcnt == 0) 391 return 0; 392 393 struct mmc *mmc = find_mmc_device(dev_num); 394 if (!mmc) 395 return 0; 396 397 if (CONFIG_IS_ENABLED(MMC_TINY)) 398 err = mmc_switch_part(mmc, block_dev->hwpart); 399 else 400 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 401 402 if (err < 0) 403 return 0; 404 405 if ((start + blkcnt) > block_dev->lba) { 406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 407 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 408 start + blkcnt, block_dev->lba); 409 #endif 410 return 0; 411 } 412 413 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 414 debug("%s: Failed to set blocklen\n", __func__); 415 return 0; 416 } 417 418 do { 419 cur = (blocks_todo > mmc->cfg->b_max) ? 420 mmc->cfg->b_max : blocks_todo; 421 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 422 debug("%s: Failed to read blocks\n", __func__); 423 int timeout = 0; 424 re_init_retry: 425 timeout++; 426 /* 427 * Try re-init seven times. 428 */ 429 if (timeout > 7) { 430 printf("Re-init retry timeout\n"); 431 return 0; 432 } 433 434 mmc->has_init = 0; 435 if (mmc_init(mmc)) 436 return 0; 437 438 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 439 printf("%s: Re-init mmc_read_blocks error\n", 440 __func__); 441 goto re_init_retry; 442 } 443 } 444 blocks_todo -= cur; 445 start += cur; 446 dst += cur * mmc->read_bl_len; 447 } while (blocks_todo > 0); 448 449 return blkcnt; 450 } 451 452 void mmc_set_clock(struct mmc *mmc, uint clock) 453 { 454 if (clock > mmc->cfg->f_max) 455 clock = mmc->cfg->f_max; 456 457 if (clock < mmc->cfg->f_min) 458 clock = mmc->cfg->f_min; 459 460 mmc->clock = clock; 461 462 mmc_set_ios(mmc); 463 } 464 465 static void mmc_set_bus_width(struct mmc *mmc, uint width) 466 { 467 mmc->bus_width = width; 468 469 mmc_set_ios(mmc); 470 } 471 472 static void mmc_set_timing(struct mmc *mmc, uint timing) 473 { 474 mmc->timing = timing; 475 mmc_set_ios(mmc); 476 } 477 478 static int mmc_go_idle(struct mmc *mmc) 479 { 480 struct mmc_cmd cmd; 481 int err; 482 483 udelay(1000); 484 485 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE; 486 cmd.cmdarg = 0; 487 cmd.resp_type = MMC_RSP_NONE; 488 489 err = mmc_send_cmd(mmc, &cmd, NULL); 490 491 if (err) 492 return err; 493 494 udelay(2000); 495 496 return 0; 497 } 498 499 #ifndef CONFIG_MMC_USE_PRE_CONFIG 500 static int sd_send_op_cond(struct mmc *mmc) 501 { 502 int timeout = 1000; 503 int err; 504 struct mmc_cmd cmd; 505 506 while (1) { 507 cmd.cmdidx = MMC_CMD_APP_CMD; 508 cmd.resp_type = MMC_RSP_R1; 509 cmd.cmdarg = 0; 510 511 err = mmc_send_cmd(mmc, &cmd, NULL); 512 513 if (err) 514 return err; 515 516 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND; 517 cmd.resp_type = MMC_RSP_R3; 518 519 /* 520 * Most cards do not answer if some reserved bits 521 * in the ocr are set. However, Some controller 522 * can set bit 7 (reserved for low voltages), but 523 * how to manage low voltages SD card is not yet 524 * specified. 525 */ 526 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 : 527 (mmc->cfg->voltages & 0xff8000); 528 529 if (mmc->version == SD_VERSION_2) 530 cmd.cmdarg |= OCR_HCS; 531 532 err = mmc_send_cmd(mmc, &cmd, NULL); 533 534 if (err) 535 return err; 536 537 if (cmd.response[0] & OCR_BUSY) 538 break; 539 540 if (timeout-- <= 0) 541 return -EOPNOTSUPP; 542 543 udelay(1000); 544 } 545 546 if (mmc->version != SD_VERSION_2) 547 mmc->version = SD_VERSION_1_0; 548 549 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 550 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 551 cmd.resp_type = MMC_RSP_R3; 552 cmd.cmdarg = 0; 553 554 err = mmc_send_cmd(mmc, &cmd, NULL); 555 556 if (err) 557 return err; 558 } 559 560 mmc->ocr = cmd.response[0]; 561 562 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 563 mmc->rca = 0; 564 565 return 0; 566 } 567 #endif 568 569 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg) 570 { 571 struct mmc_cmd cmd; 572 int err; 573 574 cmd.cmdidx = MMC_CMD_SEND_OP_COND; 575 cmd.resp_type = MMC_RSP_R3; 576 cmd.cmdarg = 0; 577 if (use_arg && !mmc_host_is_spi(mmc)) 578 cmd.cmdarg = OCR_HCS | 579 (mmc->cfg->voltages & 580 (mmc->ocr & OCR_VOLTAGE_MASK)) | 581 (mmc->ocr & OCR_ACCESS_MODE); 582 583 err = mmc_send_cmd(mmc, &cmd, NULL); 584 if (err) 585 return err; 586 mmc->ocr = cmd.response[0]; 587 return 0; 588 } 589 590 #ifndef CONFIG_MMC_USE_PRE_CONFIG 591 static int mmc_send_op_cond(struct mmc *mmc) 592 { 593 int err, i; 594 595 /* Some cards seem to need this */ 596 mmc_go_idle(mmc); 597 598 /* Asking to the card its capabilities */ 599 for (i = 0; i < 2; i++) { 600 err = mmc_send_op_cond_iter(mmc, i != 0); 601 if (err) 602 return err; 603 604 /* exit if not busy (flag seems to be inverted) */ 605 if (mmc->ocr & OCR_BUSY) 606 break; 607 } 608 mmc->op_cond_pending = 1; 609 return 0; 610 } 611 #endif 612 static int mmc_complete_op_cond(struct mmc *mmc) 613 { 614 struct mmc_cmd cmd; 615 int timeout = 1000; 616 uint start; 617 int err; 618 619 mmc->op_cond_pending = 0; 620 if (!(mmc->ocr & OCR_BUSY)) { 621 /* Some cards seem to need this */ 622 mmc_go_idle(mmc); 623 624 start = get_timer(0); 625 while (1) { 626 err = mmc_send_op_cond_iter(mmc, 1); 627 if (err) 628 return err; 629 if (mmc->ocr & OCR_BUSY) 630 break; 631 if (get_timer(start) > timeout) 632 return -EOPNOTSUPP; 633 udelay(100); 634 } 635 } 636 637 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 638 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 639 cmd.resp_type = MMC_RSP_R3; 640 cmd.cmdarg = 0; 641 642 err = mmc_send_cmd(mmc, &cmd, NULL); 643 644 if (err) 645 return err; 646 647 mmc->ocr = cmd.response[0]; 648 } 649 650 mmc->version = MMC_VERSION_UNKNOWN; 651 652 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 653 mmc->rca = 1; 654 655 return 0; 656 } 657 658 659 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd) 660 { 661 static int initialized; 662 struct mmc_cmd cmd; 663 struct mmc_data data; 664 int err; 665 666 if (initialized) { 667 memcpy(ext_csd, mmc_ext_csd, 512); 668 return 0; 669 } 670 671 initialized = 1; 672 673 /* Get the Card Status Register */ 674 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD; 675 cmd.resp_type = MMC_RSP_R1; 676 cmd.cmdarg = 0; 677 678 data.dest = (char *)ext_csd; 679 data.blocks = 1; 680 data.blocksize = MMC_MAX_BLOCK_LEN; 681 data.flags = MMC_DATA_READ; 682 683 err = mmc_send_cmd(mmc, &cmd, &data); 684 memcpy(mmc_ext_csd, ext_csd, 512); 685 #if defined(CONFIG_MMC_USE_PRE_CONFIG) && defined(CONFIG_SPL_BUILD) 686 char *mmc_ecsd_base = NULL; 687 ulong mmc_ecsd; 688 689 mmc_ecsd = dev_read_u32_default(mmc->dev, "mmc-ecsd", 0); 690 mmc_ecsd_base = (char *)mmc_ecsd; 691 if (mmc_ecsd_base) { 692 memcpy(mmc_ecsd_base, ext_csd, 512); 693 *(unsigned int *)(mmc_ecsd_base + 512) = 0x55aa55aa; 694 } 695 #endif 696 return err; 697 } 698 699 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status) 700 { 701 struct mmc_cmd cmd; 702 u8 busy = true; 703 uint start; 704 int ret; 705 int timeout = 1000; 706 707 cmd.cmdidx = MMC_CMD_SEND_STATUS; 708 cmd.resp_type = MMC_RSP_R1; 709 cmd.cmdarg = mmc->rca << 16; 710 711 start = get_timer(0); 712 713 if (!send_status && !mmc_can_card_busy(mmc)) { 714 mdelay(timeout); 715 return 0; 716 } 717 718 do { 719 if (!send_status) { 720 busy = mmc_card_busy(mmc); 721 } else { 722 ret = mmc_send_cmd(mmc, &cmd, NULL); 723 724 if (ret) 725 return ret; 726 727 if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR) 728 return -EBADMSG; 729 busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) == 730 MMC_STATE_PRG; 731 } 732 733 if (get_timer(start) > timeout && busy) 734 return -ETIMEDOUT; 735 } while (busy); 736 737 return 0; 738 } 739 740 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value, 741 u8 send_status) 742 { 743 struct mmc_cmd cmd; 744 int retries = 3; 745 int ret; 746 747 cmd.cmdidx = MMC_CMD_SWITCH; 748 cmd.resp_type = MMC_RSP_R1b; 749 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 750 (index << 16) | 751 (value << 8); 752 753 do { 754 ret = mmc_send_cmd(mmc, &cmd, NULL); 755 756 if (!ret) 757 return mmc_poll_for_busy(mmc, send_status); 758 } while (--retries > 0 && ret); 759 760 return ret; 761 } 762 763 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value) 764 { 765 return __mmc_switch(mmc, set, index, value, true); 766 } 767 768 static int mmc_select_bus_width(struct mmc *mmc) 769 { 770 u32 ext_csd_bits[] = { 771 EXT_CSD_BUS_WIDTH_8, 772 EXT_CSD_BUS_WIDTH_4, 773 }; 774 u32 bus_widths[] = { 775 MMC_BUS_WIDTH_8BIT, 776 MMC_BUS_WIDTH_4BIT, 777 }; 778 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 779 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 780 u32 idx, bus_width = 0; 781 int err = 0; 782 783 if (mmc->version < MMC_VERSION_4 || 784 !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT))) 785 return 0; 786 787 err = mmc_send_ext_csd(mmc, ext_csd); 788 789 if (err) 790 return err; 791 792 idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1; 793 794 /* 795 * Unlike SD, MMC cards dont have a configuration register to notify 796 * supported bus width. So bus test command should be run to identify 797 * the supported bus width or compare the ext csd values of current 798 * bus width and ext csd values of 1 bit mode read earlier. 799 */ 800 for (; idx < ARRAY_SIZE(bus_widths); idx++) { 801 /* 802 * Host is capable of 8bit transfer, then switch 803 * the device to work in 8bit transfer mode. If the 804 * mmc switch command returns error then switch to 805 * 4bit transfer mode. On success set the corresponding 806 * bus width on the host. 807 */ 808 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 809 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]); 810 if (err) 811 continue; 812 813 bus_width = bus_widths[idx]; 814 mmc_set_bus_width(mmc, bus_width); 815 816 err = mmc_send_ext_csd(mmc, test_csd); 817 818 if (err) 819 continue; 820 821 /* Only compare read only fields */ 822 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] == 823 test_csd[EXT_CSD_PARTITIONING_SUPPORT]) && 824 (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] == 825 test_csd[EXT_CSD_HC_WP_GRP_SIZE]) && 826 (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) && 827 (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] == 828 test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && 829 !memcmp(&ext_csd[EXT_CSD_SEC_CNT], 830 &test_csd[EXT_CSD_SEC_CNT], 4)) { 831 err = bus_width; 832 break; 833 } else { 834 err = -EBADMSG; 835 } 836 } 837 838 return err; 839 } 840 841 #ifndef CONFIG_MMC_SIMPLE 842 static const u8 tuning_blk_pattern_4bit[] = { 843 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 844 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 845 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 846 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 847 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 848 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 849 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 850 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 851 }; 852 853 static const u8 tuning_blk_pattern_8bit[] = { 854 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 855 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 856 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 857 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 858 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 859 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 860 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 861 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 862 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 863 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 864 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 865 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 866 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 867 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 868 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 869 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 870 }; 871 872 int mmc_send_tuning(struct mmc *mmc, u32 opcode) 873 { 874 struct mmc_cmd cmd; 875 struct mmc_data data; 876 const u8 *tuning_block_pattern; 877 int size, err = 0; 878 u8 *data_buf; 879 880 if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 881 tuning_block_pattern = tuning_blk_pattern_8bit; 882 size = sizeof(tuning_blk_pattern_8bit); 883 } else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) { 884 tuning_block_pattern = tuning_blk_pattern_4bit; 885 size = sizeof(tuning_blk_pattern_4bit); 886 } else { 887 return -EINVAL; 888 } 889 890 data_buf = calloc(1, size); 891 if (!data_buf) 892 return -ENOMEM; 893 894 cmd.cmdidx = opcode; 895 cmd.resp_type = MMC_RSP_R1; 896 cmd.cmdarg = 0; 897 898 data.dest = (char *)data_buf; 899 data.blocksize = size; 900 data.blocks = 1; 901 data.flags = MMC_DATA_READ; 902 903 err = mmc_send_cmd(mmc, &cmd, &data); 904 if (err) 905 goto out; 906 907 if (memcmp(data_buf, tuning_block_pattern, size)) 908 err = -EIO; 909 out: 910 free(data_buf); 911 return err; 912 } 913 914 static int mmc_execute_tuning(struct mmc *mmc) 915 { 916 #ifdef CONFIG_DM_MMC 917 struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev); 918 #endif 919 u32 opcode; 920 921 if (IS_SD(mmc)) 922 opcode = MMC_SEND_TUNING_BLOCK; 923 else 924 opcode = MMC_SEND_TUNING_BLOCK_HS200; 925 926 #ifndef CONFIG_DM_MMC 927 if (mmc->cfg->ops->execute_tuning) { 928 return mmc->cfg->ops->execute_tuning(mmc, opcode); 929 #else 930 if (ops->execute_tuning) { 931 return ops->execute_tuning(mmc->dev, opcode); 932 #endif 933 } else { 934 debug("Tuning feature required for HS200 mode.\n"); 935 return -EIO; 936 } 937 } 938 939 static int mmc_hs200_tuning(struct mmc *mmc) 940 { 941 return mmc_execute_tuning(mmc); 942 } 943 944 #else 945 int mmc_send_tuning(struct mmc *mmc, u32 opcode) { return 0; } 946 int mmc_execute_tuning(struct mmc *mmc) { return 0; } 947 static int mmc_hs200_tuning(struct mmc *mmc) { return 0; } 948 #endif 949 950 static int mmc_select_hs(struct mmc *mmc) 951 { 952 int ret; 953 954 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 955 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS); 956 957 if (!ret) 958 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 959 960 return ret; 961 } 962 963 static int mmc_select_hs_ddr(struct mmc *mmc) 964 { 965 u32 ext_csd_bits; 966 int err = 0; 967 968 if (mmc->bus_width == MMC_BUS_WIDTH_1BIT) 969 return 0; 970 971 ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ? 972 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4; 973 974 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 975 EXT_CSD_BUS_WIDTH, ext_csd_bits); 976 if (err) 977 return err; 978 979 mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52); 980 981 return 0; 982 } 983 984 #ifndef CONFIG_MMC_SIMPLE 985 static int mmc_select_hs200(struct mmc *mmc) 986 { 987 int ret; 988 989 /* 990 * Set the bus width(4 or 8) with host's support and 991 * switch to HS200 mode if bus width is set successfully. 992 */ 993 ret = mmc_select_bus_width(mmc); 994 995 if (ret > 0) { 996 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 997 EXT_CSD_HS_TIMING, 998 EXT_CSD_TIMING_HS200, false); 999 1000 if (ret) 1001 return ret; 1002 1003 mmc_set_timing(mmc, MMC_TIMING_MMC_HS200); 1004 } 1005 1006 return ret; 1007 } 1008 1009 static int mmc_select_hs400(struct mmc *mmc) 1010 { 1011 int ret; 1012 1013 /* Reduce frequency to HS frequency */ 1014 mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR); 1015 1016 /* Switch card to HS mode */ 1017 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1018 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false); 1019 if (ret) 1020 return ret; 1021 1022 /* Set host controller to HS timing */ 1023 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 1024 1025 ret = mmc_send_status(mmc, 1000); 1026 if (ret) 1027 return ret; 1028 1029 /* Switch card to DDR */ 1030 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1031 EXT_CSD_BUS_WIDTH, 1032 EXT_CSD_DDR_BUS_WIDTH_8); 1033 if (ret) 1034 return ret; 1035 1036 /* Switch card to HS400 */ 1037 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1038 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false); 1039 if (ret) 1040 return ret; 1041 1042 /* Set host controller to HS400 timing and frequency */ 1043 mmc_set_timing(mmc, MMC_TIMING_MMC_HS400); 1044 1045 return ret; 1046 } 1047 #else 1048 static int mmc_select_hs200(struct mmc *mmc) { return 0; } 1049 static int mmc_select_hs400(struct mmc *mmc) { return 0; } 1050 #endif 1051 1052 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd) 1053 { 1054 u8 card_type; 1055 u32 host_caps, avail_type = 0; 1056 1057 card_type = ext_csd[EXT_CSD_CARD_TYPE]; 1058 host_caps = mmc->cfg->host_caps; 1059 1060 if ((host_caps & MMC_MODE_HS) && 1061 (card_type & EXT_CSD_CARD_TYPE_26)) 1062 avail_type |= EXT_CSD_CARD_TYPE_26; 1063 1064 if ((host_caps & MMC_MODE_HS) && 1065 (card_type & EXT_CSD_CARD_TYPE_52)) 1066 avail_type |= EXT_CSD_CARD_TYPE_52; 1067 1068 /* 1069 * For the moment, u-boot doesn't support signal voltage 1070 * switch, therefor we assume that host support ddr52 1071 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and 1072 * hs400 are the same). 1073 */ 1074 if ((host_caps & MMC_MODE_DDR_52MHz) && 1075 (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)) 1076 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V; 1077 1078 if ((host_caps & MMC_MODE_HS200) && 1079 (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V)) 1080 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V; 1081 1082 /* 1083 * If host can support HS400, it means that host can also 1084 * support HS200. 1085 */ 1086 if ((host_caps & MMC_MODE_HS400) && 1087 (host_caps & MMC_MODE_8BIT) && 1088 (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 1089 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 1090 EXT_CSD_CARD_TYPE_HS400_1_8V; 1091 1092 if ((host_caps & MMC_MODE_HS400ES) && 1093 (host_caps & MMC_MODE_8BIT) && 1094 ext_csd[EXT_CSD_STROBE_SUPPORT] && 1095 (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 1096 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 1097 EXT_CSD_CARD_TYPE_HS400_1_8V | 1098 EXT_CSD_CARD_TYPE_HS400ES; 1099 1100 return avail_type; 1101 } 1102 1103 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type) 1104 { 1105 int clock = 0; 1106 1107 if (mmc_card_hs(mmc)) 1108 clock = (avail_type & EXT_CSD_CARD_TYPE_52) ? 1109 MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR; 1110 else if (mmc_card_hs200(mmc) || 1111 mmc_card_hs400(mmc) || 1112 mmc_card_hs400es(mmc)) 1113 clock = MMC_HS200_MAX_DTR; 1114 1115 mmc_set_clock(mmc, clock); 1116 } 1117 1118 static int mmc_change_freq(struct mmc *mmc) 1119 { 1120 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1121 u32 avail_type; 1122 int err; 1123 1124 mmc->card_caps = 0; 1125 1126 if (mmc_host_is_spi(mmc)) 1127 return 0; 1128 1129 /* Only version 4 supports high-speed */ 1130 if (mmc->version < MMC_VERSION_4) 1131 return 0; 1132 1133 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT; 1134 1135 err = mmc_send_ext_csd(mmc, ext_csd); 1136 1137 if (err) 1138 return err; 1139 1140 avail_type = mmc_select_card_type(mmc, ext_csd); 1141 1142 if (avail_type & EXT_CSD_CARD_TYPE_HS200) 1143 err = mmc_select_hs200(mmc); 1144 else if (avail_type & EXT_CSD_CARD_TYPE_HS) 1145 err = mmc_select_hs(mmc); 1146 else 1147 err = -EINVAL; 1148 1149 if (err) 1150 return err; 1151 1152 mmc_set_bus_speed(mmc, avail_type); 1153 1154 if (mmc_card_hs200(mmc)) { 1155 err = mmc_hs200_tuning(mmc); 1156 if (avail_type & EXT_CSD_CARD_TYPE_HS400 && 1157 mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 1158 err = mmc_select_hs400(mmc); 1159 mmc_set_bus_speed(mmc, avail_type); 1160 } 1161 } else if (!mmc_card_hs400es(mmc)) { 1162 err = mmc_select_bus_width(mmc) > 0 ? 0 : err; 1163 if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52) 1164 err = mmc_select_hs_ddr(mmc); 1165 } 1166 1167 return err; 1168 } 1169 1170 static int mmc_set_capacity(struct mmc *mmc, int part_num) 1171 { 1172 switch (part_num) { 1173 case 0: 1174 mmc->capacity = mmc->capacity_user; 1175 break; 1176 case 1: 1177 case 2: 1178 mmc->capacity = mmc->capacity_boot; 1179 break; 1180 case 3: 1181 mmc->capacity = mmc->capacity_rpmb; 1182 break; 1183 case 4: 1184 case 5: 1185 case 6: 1186 case 7: 1187 mmc->capacity = mmc->capacity_gp[part_num - 4]; 1188 break; 1189 default: 1190 return -1; 1191 } 1192 1193 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1194 1195 return 0; 1196 } 1197 1198 int mmc_switch_part(struct mmc *mmc, unsigned int part_num) 1199 { 1200 int ret; 1201 1202 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF, 1203 (mmc->part_config & ~PART_ACCESS_MASK) 1204 | (part_num & PART_ACCESS_MASK)); 1205 1206 /* 1207 * Set the capacity if the switch succeeded or was intended 1208 * to return to representing the raw device. 1209 */ 1210 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) { 1211 ret = mmc_set_capacity(mmc, part_num); 1212 mmc_get_blk_desc(mmc)->hwpart = part_num; 1213 } 1214 1215 return ret; 1216 } 1217 1218 int mmc_hwpart_config(struct mmc *mmc, 1219 const struct mmc_hwpart_conf *conf, 1220 enum mmc_hwpart_conf_mode mode) 1221 { 1222 u8 part_attrs = 0; 1223 u32 enh_size_mult; 1224 u32 enh_start_addr; 1225 u32 gp_size_mult[4]; 1226 u32 max_enh_size_mult; 1227 u32 tot_enh_size_mult = 0; 1228 u8 wr_rel_set; 1229 int i, pidx, err; 1230 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1231 1232 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE) 1233 return -EINVAL; 1234 1235 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) { 1236 printf("eMMC >= 4.4 required for enhanced user data area\n"); 1237 return -EMEDIUMTYPE; 1238 } 1239 1240 if (!(mmc->part_support & PART_SUPPORT)) { 1241 printf("Card does not support partitioning\n"); 1242 return -EMEDIUMTYPE; 1243 } 1244 1245 if (!mmc->hc_wp_grp_size) { 1246 printf("Card does not define HC WP group size\n"); 1247 return -EMEDIUMTYPE; 1248 } 1249 1250 /* check partition alignment and total enhanced size */ 1251 if (conf->user.enh_size) { 1252 if (conf->user.enh_size % mmc->hc_wp_grp_size || 1253 conf->user.enh_start % mmc->hc_wp_grp_size) { 1254 printf("User data enhanced area not HC WP group " 1255 "size aligned\n"); 1256 return -EINVAL; 1257 } 1258 part_attrs |= EXT_CSD_ENH_USR; 1259 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size; 1260 if (mmc->high_capacity) { 1261 enh_start_addr = conf->user.enh_start; 1262 } else { 1263 enh_start_addr = (conf->user.enh_start << 9); 1264 } 1265 } else { 1266 enh_size_mult = 0; 1267 enh_start_addr = 0; 1268 } 1269 tot_enh_size_mult += enh_size_mult; 1270 1271 for (pidx = 0; pidx < 4; pidx++) { 1272 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) { 1273 printf("GP%i partition not HC WP group size " 1274 "aligned\n", pidx+1); 1275 return -EINVAL; 1276 } 1277 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size; 1278 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) { 1279 part_attrs |= EXT_CSD_ENH_GP(pidx); 1280 tot_enh_size_mult += gp_size_mult[pidx]; 1281 } 1282 } 1283 1284 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) { 1285 printf("Card does not support enhanced attribute\n"); 1286 return -EMEDIUMTYPE; 1287 } 1288 1289 err = mmc_send_ext_csd(mmc, ext_csd); 1290 if (err) 1291 return err; 1292 1293 max_enh_size_mult = 1294 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) + 1295 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) + 1296 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT]; 1297 if (tot_enh_size_mult > max_enh_size_mult) { 1298 printf("Total enhanced size exceeds maximum (%u > %u)\n", 1299 tot_enh_size_mult, max_enh_size_mult); 1300 return -EMEDIUMTYPE; 1301 } 1302 1303 /* The default value of EXT_CSD_WR_REL_SET is device 1304 * dependent, the values can only be changed if the 1305 * EXT_CSD_HS_CTRL_REL bit is set. The values can be 1306 * changed only once and before partitioning is completed. */ 1307 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1308 if (conf->user.wr_rel_change) { 1309 if (conf->user.wr_rel_set) 1310 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR; 1311 else 1312 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR; 1313 } 1314 for (pidx = 0; pidx < 4; pidx++) { 1315 if (conf->gp_part[pidx].wr_rel_change) { 1316 if (conf->gp_part[pidx].wr_rel_set) 1317 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx); 1318 else 1319 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx); 1320 } 1321 } 1322 1323 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] && 1324 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) { 1325 puts("Card does not support host controlled partition write " 1326 "reliability settings\n"); 1327 return -EMEDIUMTYPE; 1328 } 1329 1330 if (ext_csd[EXT_CSD_PARTITION_SETTING] & 1331 EXT_CSD_PARTITION_SETTING_COMPLETED) { 1332 printf("Card already partitioned\n"); 1333 return -EPERM; 1334 } 1335 1336 if (mode == MMC_HWPART_CONF_CHECK) 1337 return 0; 1338 1339 /* Partitioning requires high-capacity size definitions */ 1340 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) { 1341 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1342 EXT_CSD_ERASE_GROUP_DEF, 1); 1343 1344 if (err) 1345 return err; 1346 1347 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1348 1349 /* update erase group size to be high-capacity */ 1350 mmc->erase_grp_size = 1351 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1352 1353 } 1354 1355 /* all OK, write the configuration */ 1356 for (i = 0; i < 4; i++) { 1357 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1358 EXT_CSD_ENH_START_ADDR+i, 1359 (enh_start_addr >> (i*8)) & 0xFF); 1360 if (err) 1361 return err; 1362 } 1363 for (i = 0; i < 3; i++) { 1364 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1365 EXT_CSD_ENH_SIZE_MULT+i, 1366 (enh_size_mult >> (i*8)) & 0xFF); 1367 if (err) 1368 return err; 1369 } 1370 for (pidx = 0; pidx < 4; pidx++) { 1371 for (i = 0; i < 3; i++) { 1372 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1373 EXT_CSD_GP_SIZE_MULT+pidx*3+i, 1374 (gp_size_mult[pidx] >> (i*8)) & 0xFF); 1375 if (err) 1376 return err; 1377 } 1378 } 1379 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1380 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs); 1381 if (err) 1382 return err; 1383 1384 if (mode == MMC_HWPART_CONF_SET) 1385 return 0; 1386 1387 /* The WR_REL_SET is a write-once register but shall be 1388 * written before setting PART_SETTING_COMPLETED. As it is 1389 * write-once we can only write it when completing the 1390 * partitioning. */ 1391 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) { 1392 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1393 EXT_CSD_WR_REL_SET, wr_rel_set); 1394 if (err) 1395 return err; 1396 } 1397 1398 /* Setting PART_SETTING_COMPLETED confirms the partition 1399 * configuration but it only becomes effective after power 1400 * cycle, so we do not adjust the partition related settings 1401 * in the mmc struct. */ 1402 1403 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1404 EXT_CSD_PARTITION_SETTING, 1405 EXT_CSD_PARTITION_SETTING_COMPLETED); 1406 if (err) 1407 return err; 1408 1409 return 0; 1410 } 1411 1412 #if !CONFIG_IS_ENABLED(DM_MMC) 1413 int mmc_getcd(struct mmc *mmc) 1414 { 1415 int cd; 1416 1417 cd = board_mmc_getcd(mmc); 1418 1419 if (cd < 0) { 1420 if (mmc->cfg->ops->getcd) 1421 cd = mmc->cfg->ops->getcd(mmc); 1422 else 1423 cd = 1; 1424 } 1425 1426 return cd; 1427 } 1428 #endif 1429 1430 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp) 1431 { 1432 struct mmc_cmd cmd; 1433 struct mmc_data data; 1434 1435 /* Switch the frequency */ 1436 cmd.cmdidx = SD_CMD_SWITCH_FUNC; 1437 cmd.resp_type = MMC_RSP_R1; 1438 cmd.cmdarg = (mode << 31) | 0xffffff; 1439 cmd.cmdarg &= ~(0xf << (group * 4)); 1440 cmd.cmdarg |= value << (group * 4); 1441 1442 data.dest = (char *)resp; 1443 data.blocksize = 64; 1444 data.blocks = 1; 1445 data.flags = MMC_DATA_READ; 1446 1447 return mmc_send_cmd(mmc, &cmd, &data); 1448 } 1449 1450 1451 static int sd_change_freq(struct mmc *mmc) 1452 { 1453 int err; 1454 struct mmc_cmd cmd; 1455 ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2); 1456 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16); 1457 struct mmc_data data; 1458 int timeout; 1459 1460 mmc->card_caps = 0; 1461 1462 if (mmc_host_is_spi(mmc)) 1463 return 0; 1464 1465 /* Read the SCR to find out if this card supports higher speeds */ 1466 cmd.cmdidx = MMC_CMD_APP_CMD; 1467 cmd.resp_type = MMC_RSP_R1; 1468 cmd.cmdarg = mmc->rca << 16; 1469 1470 err = mmc_send_cmd(mmc, &cmd, NULL); 1471 1472 if (err) 1473 return err; 1474 1475 cmd.cmdidx = SD_CMD_APP_SEND_SCR; 1476 cmd.resp_type = MMC_RSP_R1; 1477 cmd.cmdarg = 0; 1478 1479 timeout = 3; 1480 1481 retry_scr: 1482 data.dest = (char *)scr; 1483 data.blocksize = 8; 1484 data.blocks = 1; 1485 data.flags = MMC_DATA_READ; 1486 1487 err = mmc_send_cmd(mmc, &cmd, &data); 1488 1489 if (err) { 1490 if (timeout--) 1491 goto retry_scr; 1492 1493 return err; 1494 } 1495 1496 mmc->scr[0] = __be32_to_cpu(scr[0]); 1497 mmc->scr[1] = __be32_to_cpu(scr[1]); 1498 1499 switch ((mmc->scr[0] >> 24) & 0xf) { 1500 case 0: 1501 mmc->version = SD_VERSION_1_0; 1502 break; 1503 case 1: 1504 mmc->version = SD_VERSION_1_10; 1505 break; 1506 case 2: 1507 mmc->version = SD_VERSION_2; 1508 if ((mmc->scr[0] >> 15) & 0x1) 1509 mmc->version = SD_VERSION_3; 1510 break; 1511 default: 1512 mmc->version = SD_VERSION_1_0; 1513 break; 1514 } 1515 1516 if (mmc->scr[0] & SD_DATA_4BIT) 1517 mmc->card_caps |= MMC_MODE_4BIT; 1518 1519 /* Version 1.0 doesn't support switching */ 1520 if (mmc->version == SD_VERSION_1_0) 1521 return 0; 1522 1523 timeout = 4; 1524 while (timeout--) { 1525 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1, 1526 (u8 *)switch_status); 1527 1528 if (err) 1529 return err; 1530 1531 /* The high-speed function is busy. Try again */ 1532 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY)) 1533 break; 1534 } 1535 1536 /* If high-speed isn't supported, we return */ 1537 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)) 1538 return 0; 1539 1540 /* 1541 * If the host doesn't support SD_HIGHSPEED, do not switch card to 1542 * HIGHSPEED mode even if the card support SD_HIGHSPPED. 1543 * This can avoid furthur problem when the card runs in different 1544 * mode between the host. 1545 */ 1546 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) && 1547 (mmc->cfg->host_caps & MMC_MODE_HS))) 1548 return 0; 1549 1550 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status); 1551 1552 if (err) 1553 return err; 1554 1555 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000) 1556 mmc->card_caps |= MMC_MODE_HS; 1557 1558 return 0; 1559 } 1560 1561 static int sd_read_ssr(struct mmc *mmc) 1562 { 1563 int err, i; 1564 struct mmc_cmd cmd; 1565 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16); 1566 struct mmc_data data; 1567 int timeout = 3; 1568 unsigned int au, eo, et, es; 1569 1570 cmd.cmdidx = MMC_CMD_APP_CMD; 1571 cmd.resp_type = MMC_RSP_R1; 1572 cmd.cmdarg = mmc->rca << 16; 1573 1574 err = mmc_send_cmd(mmc, &cmd, NULL); 1575 if (err) 1576 return err; 1577 1578 cmd.cmdidx = SD_CMD_APP_SD_STATUS; 1579 cmd.resp_type = MMC_RSP_R1; 1580 cmd.cmdarg = 0; 1581 1582 retry_ssr: 1583 data.dest = (char *)ssr; 1584 data.blocksize = 64; 1585 data.blocks = 1; 1586 data.flags = MMC_DATA_READ; 1587 1588 err = mmc_send_cmd(mmc, &cmd, &data); 1589 if (err) { 1590 if (timeout--) 1591 goto retry_ssr; 1592 1593 return err; 1594 } 1595 1596 for (i = 0; i < 16; i++) 1597 ssr[i] = be32_to_cpu(ssr[i]); 1598 1599 au = (ssr[2] >> 12) & 0xF; 1600 if ((au <= 9) || (mmc->version == SD_VERSION_3)) { 1601 mmc->ssr.au = sd_au_size[au]; 1602 es = (ssr[3] >> 24) & 0xFF; 1603 es |= (ssr[2] & 0xFF) << 8; 1604 et = (ssr[3] >> 18) & 0x3F; 1605 if (es && et) { 1606 eo = (ssr[3] >> 16) & 0x3; 1607 mmc->ssr.erase_timeout = (et * 1000) / es; 1608 mmc->ssr.erase_offset = eo * 1000; 1609 } 1610 } else { 1611 debug("Invalid Allocation Unit Size.\n"); 1612 } 1613 1614 return 0; 1615 } 1616 1617 /* frequency bases */ 1618 /* divided by 10 to be nice to platforms without floating point */ 1619 static const int fbase[] = { 1620 10000, 1621 100000, 1622 1000000, 1623 10000000, 1624 }; 1625 1626 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice 1627 * to platforms without floating point. 1628 */ 1629 static const u8 multipliers[] = { 1630 0, /* reserved */ 1631 10, 1632 12, 1633 13, 1634 15, 1635 20, 1636 25, 1637 30, 1638 35, 1639 40, 1640 45, 1641 50, 1642 55, 1643 60, 1644 70, 1645 80, 1646 }; 1647 1648 #if !CONFIG_IS_ENABLED(DM_MMC) 1649 static void mmc_set_ios(struct mmc *mmc) 1650 { 1651 if (mmc->cfg->ops->set_ios) 1652 mmc->cfg->ops->set_ios(mmc); 1653 } 1654 1655 static bool mmc_card_busy(struct mmc *mmc) 1656 { 1657 if (!mmc->cfg->ops->card_busy) 1658 return -ENOSYS; 1659 1660 return mmc->cfg->ops->card_busy(mmc); 1661 } 1662 1663 static bool mmc_can_card_busy(struct mmc *) 1664 { 1665 return !!mmc->cfg->ops->card_busy; 1666 } 1667 #endif 1668 1669 static int mmc_startup(struct mmc *mmc) 1670 { 1671 int err, i; 1672 uint mult, freq, tran_speed; 1673 u64 cmult, csize, capacity; 1674 struct mmc_cmd cmd; 1675 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1676 bool has_parts = false; 1677 bool part_completed; 1678 struct blk_desc *bdesc; 1679 1680 #ifdef CONFIG_MMC_SPI_CRC_ON 1681 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */ 1682 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF; 1683 cmd.resp_type = MMC_RSP_R1; 1684 cmd.cmdarg = 1; 1685 err = mmc_send_cmd(mmc, &cmd, NULL); 1686 1687 if (err) 1688 return err; 1689 } 1690 #endif 1691 #ifndef CONFIG_MMC_USE_PRE_CONFIG 1692 /* Put the Card in Identify Mode */ 1693 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID : 1694 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */ 1695 cmd.resp_type = MMC_RSP_R2; 1696 cmd.cmdarg = 0; 1697 1698 err = mmc_send_cmd(mmc, &cmd, NULL); 1699 1700 if (err) 1701 return err; 1702 1703 memcpy(mmc->cid, cmd.response, 16); 1704 1705 /* 1706 * For MMC cards, set the Relative Address. 1707 * For SD cards, get the Relatvie Address. 1708 * This also puts the cards into Standby State 1709 */ 1710 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1711 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR; 1712 cmd.cmdarg = mmc->rca << 16; 1713 cmd.resp_type = MMC_RSP_R6; 1714 1715 err = mmc_send_cmd(mmc, &cmd, NULL); 1716 1717 if (err) 1718 return err; 1719 1720 if (IS_SD(mmc)) 1721 mmc->rca = (cmd.response[0] >> 16) & 0xffff; 1722 } 1723 #endif 1724 /* Get the Card-Specific Data */ 1725 cmd.cmdidx = MMC_CMD_SEND_CSD; 1726 cmd.resp_type = MMC_RSP_R2; 1727 cmd.cmdarg = mmc->rca << 16; 1728 1729 err = mmc_send_cmd(mmc, &cmd, NULL); 1730 1731 if (err) 1732 return err; 1733 1734 mmc->csd[0] = cmd.response[0]; 1735 mmc->csd[1] = cmd.response[1]; 1736 mmc->csd[2] = cmd.response[2]; 1737 mmc->csd[3] = cmd.response[3]; 1738 1739 if (mmc->version == MMC_VERSION_UNKNOWN) { 1740 int version = (cmd.response[0] >> 26) & 0xf; 1741 1742 switch (version) { 1743 case 0: 1744 mmc->version = MMC_VERSION_1_2; 1745 break; 1746 case 1: 1747 mmc->version = MMC_VERSION_1_4; 1748 break; 1749 case 2: 1750 mmc->version = MMC_VERSION_2_2; 1751 break; 1752 case 3: 1753 mmc->version = MMC_VERSION_3; 1754 break; 1755 case 4: 1756 mmc->version = MMC_VERSION_4; 1757 break; 1758 default: 1759 mmc->version = MMC_VERSION_1_2; 1760 break; 1761 } 1762 } 1763 1764 /* divide frequency by 10, since the mults are 10x bigger */ 1765 freq = fbase[(cmd.response[0] & 0x7)]; 1766 mult = multipliers[((cmd.response[0] >> 3) & 0xf)]; 1767 1768 tran_speed = freq * mult; 1769 1770 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1); 1771 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf); 1772 1773 if (IS_SD(mmc)) 1774 mmc->write_bl_len = mmc->read_bl_len; 1775 else 1776 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf); 1777 1778 if (mmc->high_capacity) { 1779 csize = (mmc->csd[1] & 0x3f) << 16 1780 | (mmc->csd[2] & 0xffff0000) >> 16; 1781 cmult = 8; 1782 } else { 1783 csize = (mmc->csd[1] & 0x3ff) << 2 1784 | (mmc->csd[2] & 0xc0000000) >> 30; 1785 cmult = (mmc->csd[2] & 0x00038000) >> 15; 1786 } 1787 1788 mmc->capacity_user = (csize + 1) << (cmult + 2); 1789 mmc->capacity_user *= mmc->read_bl_len; 1790 mmc->capacity_boot = 0; 1791 mmc->capacity_rpmb = 0; 1792 for (i = 0; i < 4; i++) 1793 mmc->capacity_gp[i] = 0; 1794 1795 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN) 1796 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1797 1798 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN) 1799 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1800 1801 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) { 1802 cmd.cmdidx = MMC_CMD_SET_DSR; 1803 cmd.cmdarg = (mmc->dsr & 0xffff) << 16; 1804 cmd.resp_type = MMC_RSP_NONE; 1805 if (mmc_send_cmd(mmc, &cmd, NULL)) 1806 printf("MMC: SET_DSR failed\n"); 1807 } 1808 1809 /* Select the card, and put it into Transfer Mode */ 1810 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1811 cmd.cmdidx = MMC_CMD_SELECT_CARD; 1812 cmd.resp_type = MMC_RSP_R1; 1813 cmd.cmdarg = mmc->rca << 16; 1814 err = mmc_send_cmd(mmc, &cmd, NULL); 1815 1816 if (err) 1817 return err; 1818 } 1819 1820 /* 1821 * For SD, its erase group is always one sector 1822 */ 1823 mmc->erase_grp_size = 1; 1824 mmc->part_config = MMCPART_NOAVAILABLE; 1825 if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) { 1826 /* check ext_csd version and capacity */ 1827 err = mmc_send_ext_csd(mmc, ext_csd); 1828 if (err) 1829 return err; 1830 if (ext_csd[EXT_CSD_REV] >= 2) { 1831 /* 1832 * According to the JEDEC Standard, the value of 1833 * ext_csd's capacity is valid if the value is more 1834 * than 2GB 1835 */ 1836 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0 1837 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 1838 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 1839 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 1840 capacity *= MMC_MAX_BLOCK_LEN; 1841 if ((capacity >> 20) > 2 * 1024) 1842 mmc->capacity_user = capacity; 1843 } 1844 1845 switch (ext_csd[EXT_CSD_REV]) { 1846 case 1: 1847 mmc->version = MMC_VERSION_4_1; 1848 break; 1849 case 2: 1850 mmc->version = MMC_VERSION_4_2; 1851 break; 1852 case 3: 1853 mmc->version = MMC_VERSION_4_3; 1854 break; 1855 case 5: 1856 mmc->version = MMC_VERSION_4_41; 1857 break; 1858 case 6: 1859 mmc->version = MMC_VERSION_4_5; 1860 break; 1861 case 7: 1862 mmc->version = MMC_VERSION_5_0; 1863 break; 1864 case 8: 1865 mmc->version = MMC_VERSION_5_1; 1866 break; 1867 } 1868 1869 /* The partition data may be non-zero but it is only 1870 * effective if PARTITION_SETTING_COMPLETED is set in 1871 * EXT_CSD, so ignore any data if this bit is not set, 1872 * except for enabling the high-capacity group size 1873 * definition (see below). */ 1874 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] & 1875 EXT_CSD_PARTITION_SETTING_COMPLETED); 1876 1877 /* store the partition info of emmc */ 1878 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT]; 1879 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) || 1880 ext_csd[EXT_CSD_BOOT_MULT]) 1881 mmc->part_config = ext_csd[EXT_CSD_PART_CONF]; 1882 if (part_completed && 1883 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT)) 1884 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE]; 1885 if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN) 1886 mmc->esr.mmc_can_trim = 1; 1887 1888 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17; 1889 1890 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17; 1891 1892 for (i = 0; i < 4; i++) { 1893 int idx = EXT_CSD_GP_SIZE_MULT + i * 3; 1894 uint mult = (ext_csd[idx + 2] << 16) + 1895 (ext_csd[idx + 1] << 8) + ext_csd[idx]; 1896 if (mult) 1897 has_parts = true; 1898 if (!part_completed) 1899 continue; 1900 mmc->capacity_gp[i] = mult; 1901 mmc->capacity_gp[i] *= 1902 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1903 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1904 mmc->capacity_gp[i] <<= 19; 1905 } 1906 1907 if (part_completed) { 1908 mmc->enh_user_size = 1909 (ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) + 1910 (ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) + 1911 ext_csd[EXT_CSD_ENH_SIZE_MULT]; 1912 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1913 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1914 mmc->enh_user_size <<= 19; 1915 mmc->enh_user_start = 1916 (ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) + 1917 (ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) + 1918 (ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) + 1919 ext_csd[EXT_CSD_ENH_START_ADDR]; 1920 if (mmc->high_capacity) 1921 mmc->enh_user_start <<= 9; 1922 } 1923 1924 /* 1925 * Host needs to enable ERASE_GRP_DEF bit if device is 1926 * partitioned. This bit will be lost every time after a reset 1927 * or power off. This will affect erase size. 1928 */ 1929 if (part_completed) 1930 has_parts = true; 1931 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) && 1932 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB)) 1933 has_parts = true; 1934 if (has_parts) { 1935 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1936 EXT_CSD_ERASE_GROUP_DEF, 1); 1937 1938 if (err) 1939 return err; 1940 else 1941 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1942 } 1943 1944 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) { 1945 /* Read out group size from ext_csd */ 1946 mmc->erase_grp_size = 1947 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1948 /* 1949 * if high capacity and partition setting completed 1950 * SEC_COUNT is valid even if it is smaller than 2 GiB 1951 * JEDEC Standard JESD84-B45, 6.2.4 1952 */ 1953 if (mmc->high_capacity && part_completed) { 1954 capacity = (ext_csd[EXT_CSD_SEC_CNT]) | 1955 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) | 1956 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) | 1957 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24); 1958 capacity *= MMC_MAX_BLOCK_LEN; 1959 mmc->capacity_user = capacity; 1960 } 1961 } else { 1962 /* Calculate the group size from the csd value. */ 1963 int erase_gsz, erase_gmul; 1964 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10; 1965 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5; 1966 mmc->erase_grp_size = (erase_gsz + 1) 1967 * (erase_gmul + 1); 1968 } 1969 1970 mmc->hc_wp_grp_size = 1024 1971 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1972 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1973 1974 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1975 } 1976 1977 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart); 1978 if (err) 1979 return err; 1980 1981 if (IS_SD(mmc)) 1982 err = sd_change_freq(mmc); 1983 else 1984 err = mmc_change_freq(mmc); 1985 1986 if (err) 1987 return err; 1988 1989 /* Restrict card's capabilities by what the host can do */ 1990 mmc->card_caps &= mmc->cfg->host_caps; 1991 1992 if (IS_SD(mmc)) { 1993 if (mmc->card_caps & MMC_MODE_4BIT) { 1994 cmd.cmdidx = MMC_CMD_APP_CMD; 1995 cmd.resp_type = MMC_RSP_R1; 1996 cmd.cmdarg = mmc->rca << 16; 1997 1998 err = mmc_send_cmd(mmc, &cmd, NULL); 1999 if (err) 2000 return err; 2001 2002 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH; 2003 cmd.resp_type = MMC_RSP_R1; 2004 cmd.cmdarg = 2; 2005 err = mmc_send_cmd(mmc, &cmd, NULL); 2006 if (err) 2007 return err; 2008 2009 mmc_set_bus_width(mmc, 4); 2010 } 2011 2012 err = sd_read_ssr(mmc); 2013 if (err) 2014 return err; 2015 2016 if (mmc->card_caps & MMC_MODE_HS) 2017 tran_speed = MMC_HIGH_52_MAX_DTR; 2018 else 2019 tran_speed = MMC_HIGH_26_MAX_DTR; 2020 2021 mmc_set_clock(mmc, tran_speed); 2022 } 2023 2024 /* Fix the block length for DDR mode */ 2025 if (mmc_card_ddr(mmc)) { 2026 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 2027 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 2028 } 2029 2030 /* fill in device description */ 2031 bdesc = mmc_get_blk_desc(mmc); 2032 bdesc->lun = 0; 2033 bdesc->hwpart = 0; 2034 bdesc->type = 0; 2035 bdesc->blksz = mmc->read_bl_len; 2036 bdesc->log2blksz = LOG2(bdesc->blksz); 2037 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len); 2038 #if !defined(CONFIG_SPL_BUILD) || \ 2039 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \ 2040 !defined(CONFIG_USE_TINY_PRINTF)) 2041 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x", 2042 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), 2043 (mmc->cid[3] >> 16) & 0xffff); 2044 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, 2045 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, 2046 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, 2047 (mmc->cid[2] >> 24) & 0xff); 2048 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, 2049 (mmc->cid[2] >> 16) & 0xf); 2050 #else 2051 bdesc->vendor[0] = 0; 2052 bdesc->product[0] = 0; 2053 bdesc->revision[0] = 0; 2054 #endif 2055 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT) 2056 part_init(bdesc); 2057 #endif 2058 2059 return 0; 2060 } 2061 2062 #ifndef CONFIG_MMC_USE_PRE_CONFIG 2063 static int mmc_send_if_cond(struct mmc *mmc) 2064 { 2065 struct mmc_cmd cmd; 2066 int err; 2067 2068 cmd.cmdidx = SD_CMD_SEND_IF_COND; 2069 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */ 2070 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa; 2071 cmd.resp_type = MMC_RSP_R7; 2072 2073 err = mmc_send_cmd(mmc, &cmd, NULL); 2074 2075 if (err) 2076 return err; 2077 2078 if ((cmd.response[0] & 0xff) != 0xaa) 2079 return -EOPNOTSUPP; 2080 else 2081 mmc->version = SD_VERSION_2; 2082 2083 return 0; 2084 } 2085 #endif 2086 2087 #if !CONFIG_IS_ENABLED(DM_MMC) 2088 /* board-specific MMC power initializations. */ 2089 __weak void board_mmc_power_init(void) 2090 { 2091 } 2092 #endif 2093 2094 #ifndef CONFIG_MMC_USE_PRE_CONFIG 2095 static int mmc_power_init(struct mmc *mmc) 2096 { 2097 #if CONFIG_IS_ENABLED(DM_MMC) 2098 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD) 2099 struct udevice *vmmc_supply; 2100 int ret; 2101 2102 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply", 2103 &vmmc_supply); 2104 if (ret) { 2105 debug("%s: No vmmc supply\n", mmc->dev->name); 2106 return 0; 2107 } 2108 2109 ret = regulator_set_enable(vmmc_supply, true); 2110 if (ret) { 2111 puts("Error enabling VMMC supply\n"); 2112 return ret; 2113 } 2114 #endif 2115 #else /* !CONFIG_DM_MMC */ 2116 /* 2117 * Driver model should use a regulator, as above, rather than calling 2118 * out to board code. 2119 */ 2120 board_mmc_power_init(); 2121 #endif 2122 return 0; 2123 } 2124 #endif 2125 #ifdef CONFIG_MMC_USE_PRE_CONFIG 2126 static int mmc_select_card(struct mmc *mmc, int n) 2127 { 2128 struct mmc_cmd cmd; 2129 int err = 0; 2130 2131 memset(&cmd, 0, sizeof(struct mmc_cmd)); 2132 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 2133 mmc->rca = n; 2134 cmd.cmdidx = MMC_CMD_SELECT_CARD; 2135 cmd.resp_type = MMC_RSP_R1; 2136 cmd.cmdarg = mmc->rca << 16; 2137 err = mmc_send_cmd(mmc, &cmd, NULL); 2138 } 2139 2140 return err; 2141 } 2142 2143 int mmc_start_init(struct mmc *mmc) 2144 { 2145 /* 2146 * We use the MMC config set by the bootrom. 2147 * So it is no need to reset the eMMC device. 2148 */ 2149 mmc_set_bus_width(mmc, 8); 2150 mmc_set_clock(mmc, 1); 2151 mmc_set_timing(mmc, MMC_TIMING_LEGACY); 2152 /* Send cmd7 to return stand-by state*/ 2153 mmc_select_card(mmc, 0); 2154 mmc->version = MMC_VERSION_UNKNOWN; 2155 mmc->high_capacity = 1; 2156 /* 2157 * The RCA is set to 2 by rockchip bootrom, use the default 2158 * value here. 2159 */ 2160 #ifdef CONFIG_ARCH_ROCKCHIP 2161 mmc->rca = 2; 2162 #else 2163 mmc->rca = 1; 2164 #endif 2165 return 0; 2166 } 2167 #else 2168 int mmc_start_init(struct mmc *mmc) 2169 { 2170 bool no_card; 2171 int err; 2172 2173 /* we pretend there's no card when init is NULL */ 2174 no_card = mmc_getcd(mmc) == 0; 2175 #if !CONFIG_IS_ENABLED(DM_MMC) 2176 no_card = no_card || (mmc->cfg->ops->init == NULL); 2177 #endif 2178 if (no_card) { 2179 mmc->has_init = 0; 2180 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2181 printf("MMC: no card present\n"); 2182 #endif 2183 return -ENOMEDIUM; 2184 } 2185 2186 if (mmc->has_init) 2187 return 0; 2188 2189 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 2190 mmc_adapter_card_type_ident(); 2191 #endif 2192 err = mmc_power_init(mmc); 2193 if (err) 2194 return err; 2195 2196 #if CONFIG_IS_ENABLED(DM_MMC) 2197 /* The device has already been probed ready for use */ 2198 #else 2199 /* made sure it's not NULL earlier */ 2200 err = mmc->cfg->ops->init(mmc); 2201 if (err) 2202 return err; 2203 #endif 2204 mmc_set_bus_width(mmc, 1); 2205 mmc_set_clock(mmc, 1); 2206 mmc_set_timing(mmc, MMC_TIMING_LEGACY); 2207 2208 /* Reset the Card */ 2209 err = mmc_go_idle(mmc); 2210 2211 if (err) 2212 return err; 2213 2214 /* The internal partition reset to user partition(0) at every CMD0*/ 2215 mmc_get_blk_desc(mmc)->hwpart = 0; 2216 2217 /* Test for SD version 2 */ 2218 err = mmc_send_if_cond(mmc); 2219 2220 /* Now try to get the SD card's operating condition */ 2221 err = sd_send_op_cond(mmc); 2222 2223 /* If the command timed out, we check for an MMC card */ 2224 if (err == -ETIMEDOUT) { 2225 err = mmc_send_op_cond(mmc); 2226 2227 if (err) { 2228 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2229 printf("Card did not respond to voltage select!\n"); 2230 #endif 2231 return -EOPNOTSUPP; 2232 } 2233 } 2234 2235 if (!err) 2236 mmc->init_in_progress = 1; 2237 2238 return err; 2239 } 2240 #endif 2241 2242 static int mmc_complete_init(struct mmc *mmc) 2243 { 2244 int err = 0; 2245 2246 mmc->init_in_progress = 0; 2247 if (mmc->op_cond_pending) 2248 err = mmc_complete_op_cond(mmc); 2249 2250 if (!err) 2251 err = mmc_startup(mmc); 2252 if (err) 2253 mmc->has_init = 0; 2254 else 2255 mmc->has_init = 1; 2256 return err; 2257 } 2258 2259 int mmc_init(struct mmc *mmc) 2260 { 2261 int err = 0; 2262 __maybe_unused unsigned start; 2263 #if CONFIG_IS_ENABLED(DM_MMC) 2264 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev); 2265 2266 upriv->mmc = mmc; 2267 #endif 2268 if (mmc->has_init) 2269 return 0; 2270 2271 start = get_timer(0); 2272 2273 if (!mmc->init_in_progress) 2274 err = mmc_start_init(mmc); 2275 2276 if (!err) 2277 err = mmc_complete_init(mmc); 2278 if (err) 2279 printf("%s: %d, time %lu\n", __func__, err, get_timer(start)); 2280 2281 return err; 2282 } 2283 2284 int mmc_set_dsr(struct mmc *mmc, u16 val) 2285 { 2286 mmc->dsr = val; 2287 return 0; 2288 } 2289 2290 /* CPU-specific MMC initializations */ 2291 __weak int cpu_mmc_init(bd_t *bis) 2292 { 2293 return -1; 2294 } 2295 2296 /* board-specific MMC initializations. */ 2297 __weak int board_mmc_init(bd_t *bis) 2298 { 2299 return -1; 2300 } 2301 2302 void mmc_set_preinit(struct mmc *mmc, int preinit) 2303 { 2304 mmc->preinit = preinit; 2305 } 2306 2307 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD) 2308 static int mmc_probe(bd_t *bis) 2309 { 2310 return 0; 2311 } 2312 #elif CONFIG_IS_ENABLED(DM_MMC) 2313 static int mmc_probe(bd_t *bis) 2314 { 2315 int ret, i; 2316 struct uclass *uc; 2317 struct udevice *dev; 2318 2319 ret = uclass_get(UCLASS_MMC, &uc); 2320 if (ret) 2321 return ret; 2322 2323 /* 2324 * Try to add them in sequence order. Really with driver model we 2325 * should allow holes, but the current MMC list does not allow that. 2326 * So if we request 0, 1, 3 we will get 0, 1, 2. 2327 */ 2328 for (i = 0; ; i++) { 2329 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev); 2330 if (ret == -ENODEV) 2331 break; 2332 } 2333 uclass_foreach_dev(dev, uc) { 2334 ret = device_probe(dev); 2335 if (ret) 2336 printf("%s - probe failed: %d\n", dev->name, ret); 2337 } 2338 2339 return 0; 2340 } 2341 #else 2342 static int mmc_probe(bd_t *bis) 2343 { 2344 if (board_mmc_init(bis) < 0) 2345 cpu_mmc_init(bis); 2346 2347 return 0; 2348 } 2349 #endif 2350 2351 int mmc_initialize(bd_t *bis) 2352 { 2353 static int initialized = 0; 2354 int ret; 2355 if (initialized) /* Avoid initializing mmc multiple times */ 2356 return 0; 2357 initialized = 1; 2358 2359 #if !CONFIG_IS_ENABLED(BLK) 2360 #if !CONFIG_IS_ENABLED(MMC_TINY) 2361 mmc_list_init(); 2362 #endif 2363 #endif 2364 ret = mmc_probe(bis); 2365 if (ret) 2366 return ret; 2367 2368 #ifndef CONFIG_SPL_BUILD 2369 print_mmc_devices(','); 2370 #endif 2371 2372 mmc_do_preinit(); 2373 return 0; 2374 } 2375 2376 #ifdef CONFIG_CMD_BKOPS_ENABLE 2377 int mmc_set_bkops_enable(struct mmc *mmc) 2378 { 2379 int err; 2380 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 2381 2382 err = mmc_send_ext_csd(mmc, ext_csd); 2383 if (err) { 2384 puts("Could not get ext_csd register values\n"); 2385 return err; 2386 } 2387 2388 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) { 2389 puts("Background operations not supported on device\n"); 2390 return -EMEDIUMTYPE; 2391 } 2392 2393 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) { 2394 puts("Background operations already enabled\n"); 2395 return 0; 2396 } 2397 2398 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); 2399 if (err) { 2400 puts("Failed to enable manual background operations\n"); 2401 return err; 2402 } 2403 2404 puts("Enabled manual background operations\n"); 2405 2406 return 0; 2407 } 2408 #endif 2409