1 /* 2 * Copyright 2008, Freescale Semiconductor, Inc 3 * Andy Fleming 4 * 5 * Based vaguely on the Linux code 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10 #include <config.h> 11 #include <common.h> 12 #include <command.h> 13 #include <dm.h> 14 #include <dm/device-internal.h> 15 #include <errno.h> 16 #include <mmc.h> 17 #include <part.h> 18 #include <power/regulator.h> 19 #include <malloc.h> 20 #include <memalign.h> 21 #include <linux/list.h> 22 #include <div64.h> 23 #include "mmc_private.h" 24 25 static const unsigned int sd_au_size[] = { 26 0, SZ_16K / 512, SZ_32K / 512, 27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512, 28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512, 29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, 30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, 31 }; 32 33 static char mmc_ext_csd[512]; 34 35 #if CONFIG_IS_ENABLED(MMC_TINY) 36 static struct mmc mmc_static; 37 struct mmc *find_mmc_device(int dev_num) 38 { 39 return &mmc_static; 40 } 41 42 void mmc_do_preinit(void) 43 { 44 struct mmc *m = &mmc_static; 45 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 46 mmc_set_preinit(m, 1); 47 #endif 48 if (m->preinit) 49 mmc_start_init(m); 50 } 51 52 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc) 53 { 54 return &mmc->block_dev; 55 } 56 #endif 57 58 #if !CONFIG_IS_ENABLED(DM_MMC) 59 __weak int board_mmc_getwp(struct mmc *mmc) 60 { 61 return -1; 62 } 63 64 int mmc_getwp(struct mmc *mmc) 65 { 66 int wp; 67 68 wp = board_mmc_getwp(mmc); 69 70 if (wp < 0) { 71 if (mmc->cfg->ops->getwp) 72 wp = mmc->cfg->ops->getwp(mmc); 73 else 74 wp = 0; 75 } 76 77 return wp; 78 } 79 80 __weak int board_mmc_getcd(struct mmc *mmc) 81 { 82 return -1; 83 } 84 #endif 85 86 #ifdef CONFIG_MMC_TRACE 87 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd) 88 { 89 printf("CMD_SEND:%d\n", cmd->cmdidx); 90 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg); 91 } 92 93 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret) 94 { 95 int i; 96 u8 *ptr; 97 98 if (ret) { 99 printf("\t\tRET\t\t\t %d\n", ret); 100 } else { 101 switch (cmd->resp_type) { 102 case MMC_RSP_NONE: 103 printf("\t\tMMC_RSP_NONE\n"); 104 break; 105 case MMC_RSP_R1: 106 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n", 107 cmd->response[0]); 108 break; 109 case MMC_RSP_R1b: 110 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n", 111 cmd->response[0]); 112 break; 113 case MMC_RSP_R2: 114 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n", 115 cmd->response[0]); 116 printf("\t\t \t\t 0x%08X \n", 117 cmd->response[1]); 118 printf("\t\t \t\t 0x%08X \n", 119 cmd->response[2]); 120 printf("\t\t \t\t 0x%08X \n", 121 cmd->response[3]); 122 printf("\n"); 123 printf("\t\t\t\t\tDUMPING DATA\n"); 124 for (i = 0; i < 4; i++) { 125 int j; 126 printf("\t\t\t\t\t%03d - ", i*4); 127 ptr = (u8 *)&cmd->response[i]; 128 ptr += 3; 129 for (j = 0; j < 4; j++) 130 printf("%02X ", *ptr--); 131 printf("\n"); 132 } 133 break; 134 case MMC_RSP_R3: 135 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n", 136 cmd->response[0]); 137 break; 138 default: 139 printf("\t\tERROR MMC rsp not supported\n"); 140 break; 141 } 142 } 143 } 144 145 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd) 146 { 147 int status; 148 149 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9; 150 printf("CURR STATE:%d\n", status); 151 } 152 #endif 153 154 #if !CONFIG_IS_ENABLED(DM_MMC) 155 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data) 156 { 157 int ret; 158 159 mmmc_trace_before_send(mmc, cmd); 160 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data); 161 mmmc_trace_after_send(mmc, cmd, ret); 162 163 return ret; 164 } 165 #endif 166 167 int mmc_send_status(struct mmc *mmc, int timeout) 168 { 169 struct mmc_cmd cmd; 170 int err, retries = 5; 171 172 cmd.cmdidx = MMC_CMD_SEND_STATUS; 173 cmd.resp_type = MMC_RSP_R1; 174 if (!mmc_host_is_spi(mmc)) 175 cmd.cmdarg = mmc->rca << 16; 176 177 while (1) { 178 err = mmc_send_cmd(mmc, &cmd, NULL); 179 if (!err) { 180 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) && 181 (cmd.response[0] & MMC_STATUS_CURR_STATE) != 182 MMC_STATE_PRG) 183 break; 184 else if (cmd.response[0] & MMC_STATUS_MASK) { 185 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 186 printf("Status Error: 0x%08X\n", 187 cmd.response[0]); 188 #endif 189 return -ECOMM; 190 } 191 } else if (--retries < 0) 192 return err; 193 194 if (timeout-- <= 0) 195 break; 196 197 udelay(1000); 198 } 199 200 mmc_trace_state(mmc, &cmd); 201 if (timeout <= 0) { 202 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 203 printf("Timeout waiting card ready\n"); 204 #endif 205 return -ETIMEDOUT; 206 } 207 208 return 0; 209 } 210 211 int mmc_set_blocklen(struct mmc *mmc, int len) 212 { 213 struct mmc_cmd cmd; 214 215 if (mmc_card_ddr(mmc)) 216 return 0; 217 218 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN; 219 cmd.resp_type = MMC_RSP_R1; 220 cmd.cmdarg = len; 221 222 return mmc_send_cmd(mmc, &cmd, NULL); 223 } 224 225 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start, 226 lbaint_t blkcnt) 227 { 228 struct mmc_cmd cmd; 229 struct mmc_data data; 230 231 if (blkcnt > 1) 232 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 233 else 234 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 235 236 if (mmc->high_capacity) 237 cmd.cmdarg = start; 238 else 239 cmd.cmdarg = start * mmc->read_bl_len; 240 241 cmd.resp_type = MMC_RSP_R1; 242 243 data.dest = dst; 244 data.blocks = blkcnt; 245 data.blocksize = mmc->read_bl_len; 246 data.flags = MMC_DATA_READ; 247 248 if (mmc_send_cmd(mmc, &cmd, &data)) 249 return 0; 250 251 if (blkcnt > 1) { 252 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 253 cmd.cmdarg = 0; 254 cmd.resp_type = MMC_RSP_R1b; 255 if (mmc_send_cmd(mmc, &cmd, NULL)) { 256 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 257 printf("mmc fail to send stop cmd\n"); 258 #endif 259 return 0; 260 } 261 } 262 263 return blkcnt; 264 } 265 266 #ifdef CONFIG_SPL_BLK_READ_PREPARE 267 static int mmc_read_blocks_prepare(struct mmc *mmc, void *dst, lbaint_t start, 268 lbaint_t blkcnt) 269 { 270 struct mmc_cmd cmd; 271 struct mmc_data data; 272 273 if (blkcnt > 1) 274 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 275 else 276 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 277 278 if (mmc->high_capacity) 279 cmd.cmdarg = start; 280 else 281 cmd.cmdarg = start * mmc->read_bl_len; 282 283 cmd.resp_type = MMC_RSP_R1; 284 285 data.dest = dst; 286 data.blocks = blkcnt; 287 data.blocksize = mmc->read_bl_len; 288 data.flags = MMC_DATA_READ; 289 290 if (mmc_send_cmd_prepare(mmc, &cmd, &data)) 291 return 0; 292 293 return blkcnt; 294 } 295 #endif 296 297 #ifdef CONFIG_SPL_BLK_READ_PREPARE 298 #if CONFIG_IS_ENABLED(BLK) 299 ulong mmc_bread_prepare(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 300 #else 301 ulong mmc_bread_prepare(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 302 void *dst) 303 #endif 304 { 305 #if CONFIG_IS_ENABLED(BLK) 306 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 307 #endif 308 int dev_num = block_dev->devnum; 309 int timeout = 0; 310 int err; 311 312 if (blkcnt == 0) 313 return 0; 314 315 struct mmc *mmc = find_mmc_device(dev_num); 316 317 if (!mmc) 318 return 0; 319 320 if (CONFIG_IS_ENABLED(MMC_TINY)) 321 err = mmc_switch_part(mmc, block_dev->hwpart); 322 else 323 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 324 325 if (err < 0) 326 return 0; 327 328 if ((start + blkcnt) > block_dev->lba) { 329 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 330 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 331 start + blkcnt, block_dev->lba); 332 #endif 333 return 0; 334 } 335 336 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 337 debug("%s: Failed to set blocklen\n", __func__); 338 return 0; 339 } 340 341 if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) { 342 debug("%s: Failed to read blocks\n", __func__); 343 re_init_retry: 344 timeout++; 345 /* 346 * Try re-init seven times. 347 */ 348 if (timeout > 7) { 349 printf("Re-init retry timeout\n"); 350 return 0; 351 } 352 353 mmc->has_init = 0; 354 if (mmc_init(mmc)) 355 return 0; 356 357 if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) { 358 printf("%s: Re-init mmc_read_blocks_prepare error\n", 359 __func__); 360 goto re_init_retry; 361 } 362 } 363 364 return blkcnt; 365 } 366 #endif 367 368 #if CONFIG_IS_ENABLED(BLK) 369 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 370 #else 371 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 372 void *dst) 373 #endif 374 { 375 #if CONFIG_IS_ENABLED(BLK) 376 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 377 #endif 378 int dev_num = block_dev->devnum; 379 int err; 380 lbaint_t cur, blocks_todo = blkcnt; 381 382 #ifdef CONFIG_SPL_BLK_READ_PREPARE 383 if (block_dev->op_flag == BLK_PRE_RW) 384 #if CONFIG_IS_ENABLED(BLK) 385 return mmc_bread_prepare(dev, start, blkcnt, dst); 386 #else 387 return mmc_bread_prepare(block_dev, start, blkcnt, dst); 388 #endif 389 #endif 390 if (blkcnt == 0) 391 return 0; 392 393 struct mmc *mmc = find_mmc_device(dev_num); 394 if (!mmc) 395 return 0; 396 397 if (CONFIG_IS_ENABLED(MMC_TINY)) 398 err = mmc_switch_part(mmc, block_dev->hwpart); 399 else 400 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 401 402 if (err < 0) 403 return 0; 404 405 if ((start + blkcnt) > block_dev->lba) { 406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 407 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 408 start + blkcnt, block_dev->lba); 409 #endif 410 return 0; 411 } 412 413 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 414 debug("%s: Failed to set blocklen\n", __func__); 415 return 0; 416 } 417 418 do { 419 cur = (blocks_todo > mmc->cfg->b_max) ? 420 mmc->cfg->b_max : blocks_todo; 421 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 422 debug("%s: Failed to read blocks\n", __func__); 423 int timeout = 0; 424 re_init_retry: 425 timeout++; 426 /* 427 * Try re-init seven times. 428 */ 429 if (timeout > 7) { 430 printf("Re-init retry timeout\n"); 431 return 0; 432 } 433 434 mmc->has_init = 0; 435 if (mmc_init(mmc)) 436 return 0; 437 438 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 439 printf("%s: Re-init mmc_read_blocks error\n", 440 __func__); 441 goto re_init_retry; 442 } 443 } 444 blocks_todo -= cur; 445 start += cur; 446 dst += cur * mmc->read_bl_len; 447 } while (blocks_todo > 0); 448 449 return blkcnt; 450 } 451 452 void mmc_set_clock(struct mmc *mmc, uint clock) 453 { 454 if (clock > mmc->cfg->f_max) 455 clock = mmc->cfg->f_max; 456 457 if (clock < mmc->cfg->f_min) 458 clock = mmc->cfg->f_min; 459 460 mmc->clock = clock; 461 462 mmc_set_ios(mmc); 463 } 464 465 static void mmc_set_bus_width(struct mmc *mmc, uint width) 466 { 467 mmc->bus_width = width; 468 469 mmc_set_ios(mmc); 470 } 471 472 static void mmc_set_timing(struct mmc *mmc, uint timing) 473 { 474 mmc->timing = timing; 475 mmc_set_ios(mmc); 476 } 477 478 static int mmc_go_idle(struct mmc *mmc) 479 { 480 struct mmc_cmd cmd; 481 int err; 482 483 udelay(1000); 484 485 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE; 486 cmd.cmdarg = 0; 487 cmd.resp_type = MMC_RSP_NONE; 488 489 err = mmc_send_cmd(mmc, &cmd, NULL); 490 491 if (err) 492 return err; 493 494 udelay(2000); 495 496 return 0; 497 } 498 499 #ifndef CONFIG_MMC_USE_PRE_CONFIG 500 static int sd_send_op_cond(struct mmc *mmc) 501 { 502 int timeout = 1000; 503 int err; 504 struct mmc_cmd cmd; 505 506 while (1) { 507 cmd.cmdidx = MMC_CMD_APP_CMD; 508 cmd.resp_type = MMC_RSP_R1; 509 cmd.cmdarg = 0; 510 511 err = mmc_send_cmd(mmc, &cmd, NULL); 512 513 if (err) 514 return err; 515 516 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND; 517 cmd.resp_type = MMC_RSP_R3; 518 519 /* 520 * Most cards do not answer if some reserved bits 521 * in the ocr are set. However, Some controller 522 * can set bit 7 (reserved for low voltages), but 523 * how to manage low voltages SD card is not yet 524 * specified. 525 */ 526 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 : 527 (mmc->cfg->voltages & 0xff8000); 528 529 if (mmc->version == SD_VERSION_2) 530 cmd.cmdarg |= OCR_HCS; 531 532 err = mmc_send_cmd(mmc, &cmd, NULL); 533 534 if (err) 535 return err; 536 537 if (cmd.response[0] & OCR_BUSY) 538 break; 539 540 if (timeout-- <= 0) 541 return -EOPNOTSUPP; 542 543 udelay(1000); 544 } 545 546 if (mmc->version != SD_VERSION_2) 547 mmc->version = SD_VERSION_1_0; 548 549 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 550 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 551 cmd.resp_type = MMC_RSP_R3; 552 cmd.cmdarg = 0; 553 554 err = mmc_send_cmd(mmc, &cmd, NULL); 555 556 if (err) 557 return err; 558 } 559 560 mmc->ocr = cmd.response[0]; 561 562 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 563 mmc->rca = 0; 564 565 return 0; 566 } 567 #endif 568 569 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg) 570 { 571 struct mmc_cmd cmd; 572 int err; 573 574 cmd.cmdidx = MMC_CMD_SEND_OP_COND; 575 cmd.resp_type = MMC_RSP_R3; 576 cmd.cmdarg = 0; 577 if (use_arg && !mmc_host_is_spi(mmc)) 578 cmd.cmdarg = OCR_HCS | 579 (mmc->cfg->voltages & 580 (mmc->ocr & OCR_VOLTAGE_MASK)) | 581 (mmc->ocr & OCR_ACCESS_MODE); 582 583 err = mmc_send_cmd(mmc, &cmd, NULL); 584 if (err) 585 return err; 586 mmc->ocr = cmd.response[0]; 587 return 0; 588 } 589 590 #ifndef CONFIG_MMC_USE_PRE_CONFIG 591 static int mmc_send_op_cond(struct mmc *mmc) 592 { 593 int err, i; 594 595 /* Some cards seem to need this */ 596 mmc_go_idle(mmc); 597 598 /* Asking to the card its capabilities */ 599 for (i = 0; i < 2; i++) { 600 err = mmc_send_op_cond_iter(mmc, i != 0); 601 if (err) 602 return err; 603 604 /* exit if not busy (flag seems to be inverted) */ 605 if (mmc->ocr & OCR_BUSY) 606 break; 607 } 608 mmc->op_cond_pending = 1; 609 return 0; 610 } 611 #endif 612 static int mmc_complete_op_cond(struct mmc *mmc) 613 { 614 struct mmc_cmd cmd; 615 int timeout = 1000; 616 uint start; 617 int err; 618 619 mmc->op_cond_pending = 0; 620 if (!(mmc->ocr & OCR_BUSY)) { 621 /* Some cards seem to need this */ 622 mmc_go_idle(mmc); 623 624 start = get_timer(0); 625 while (1) { 626 err = mmc_send_op_cond_iter(mmc, 1); 627 if (err) 628 return err; 629 if (mmc->ocr & OCR_BUSY) 630 break; 631 if (get_timer(start) > timeout) 632 return -EOPNOTSUPP; 633 udelay(100); 634 } 635 } 636 637 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 638 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 639 cmd.resp_type = MMC_RSP_R3; 640 cmd.cmdarg = 0; 641 642 err = mmc_send_cmd(mmc, &cmd, NULL); 643 644 if (err) 645 return err; 646 647 mmc->ocr = cmd.response[0]; 648 } 649 650 mmc->version = MMC_VERSION_UNKNOWN; 651 652 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 653 mmc->rca = 1; 654 655 return 0; 656 } 657 658 659 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd) 660 { 661 struct mmc_cmd cmd; 662 struct mmc_data data; 663 int err; 664 665 #ifdef CONFIG_MMC_USE_PRE_CONFIG 666 static int initialized; 667 if (initialized) { 668 memcpy(ext_csd, mmc_ext_csd, 512); 669 return 0; 670 } 671 672 initialized = 1; 673 #endif 674 /* Get the Card Status Register */ 675 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD; 676 cmd.resp_type = MMC_RSP_R1; 677 cmd.cmdarg = 0; 678 679 data.dest = (char *)ext_csd; 680 data.blocks = 1; 681 data.blocksize = MMC_MAX_BLOCK_LEN; 682 data.flags = MMC_DATA_READ; 683 684 err = mmc_send_cmd(mmc, &cmd, &data); 685 memcpy(mmc_ext_csd, ext_csd, 512); 686 #if defined(CONFIG_MMC_USE_PRE_CONFIG) && defined(CONFIG_SPL_BUILD) 687 char *mmc_ecsd_base = NULL; 688 ulong mmc_ecsd; 689 690 mmc_ecsd = dev_read_u32_default(mmc->dev, "mmc-ecsd", 0); 691 mmc_ecsd_base = (char *)mmc_ecsd; 692 if (mmc_ecsd_base) { 693 memcpy(mmc_ecsd_base, ext_csd, 512); 694 *(unsigned int *)(mmc_ecsd_base + 512) = 0x55aa55aa; 695 } 696 #endif 697 return err; 698 } 699 700 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status) 701 { 702 struct mmc_cmd cmd; 703 u8 busy = true; 704 uint start; 705 int ret; 706 int timeout = 1000; 707 708 cmd.cmdidx = MMC_CMD_SEND_STATUS; 709 cmd.resp_type = MMC_RSP_R1; 710 cmd.cmdarg = mmc->rca << 16; 711 712 start = get_timer(0); 713 714 if (!send_status && !mmc_can_card_busy(mmc)) { 715 mdelay(timeout); 716 return 0; 717 } 718 719 do { 720 if (!send_status) { 721 busy = mmc_card_busy(mmc); 722 } else { 723 ret = mmc_send_cmd(mmc, &cmd, NULL); 724 725 if (ret) 726 return ret; 727 728 if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR) 729 return -EBADMSG; 730 busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) == 731 MMC_STATE_PRG; 732 } 733 734 if (get_timer(start) > timeout && busy) 735 return -ETIMEDOUT; 736 } while (busy); 737 738 return 0; 739 } 740 741 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value, 742 u8 send_status) 743 { 744 struct mmc_cmd cmd; 745 int retries = 3; 746 int ret; 747 748 cmd.cmdidx = MMC_CMD_SWITCH; 749 cmd.resp_type = MMC_RSP_R1b; 750 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 751 (index << 16) | 752 (value << 8); 753 754 do { 755 ret = mmc_send_cmd(mmc, &cmd, NULL); 756 757 if (!ret) 758 return mmc_poll_for_busy(mmc, send_status); 759 } while (--retries > 0 && ret); 760 761 return ret; 762 } 763 764 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value) 765 { 766 return __mmc_switch(mmc, set, index, value, true); 767 } 768 769 static int mmc_select_bus_width(struct mmc *mmc) 770 { 771 u32 ext_csd_bits[] = { 772 EXT_CSD_BUS_WIDTH_8, 773 EXT_CSD_BUS_WIDTH_4, 774 }; 775 u32 bus_widths[] = { 776 MMC_BUS_WIDTH_8BIT, 777 MMC_BUS_WIDTH_4BIT, 778 }; 779 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 780 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 781 u32 idx, bus_width = 0; 782 int err = 0; 783 784 if (mmc->version < MMC_VERSION_4 || 785 !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT))) 786 return 0; 787 788 err = mmc_send_ext_csd(mmc, ext_csd); 789 790 if (err) 791 return err; 792 793 idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1; 794 795 /* 796 * Unlike SD, MMC cards dont have a configuration register to notify 797 * supported bus width. So bus test command should be run to identify 798 * the supported bus width or compare the ext csd values of current 799 * bus width and ext csd values of 1 bit mode read earlier. 800 */ 801 for (; idx < ARRAY_SIZE(bus_widths); idx++) { 802 /* 803 * Host is capable of 8bit transfer, then switch 804 * the device to work in 8bit transfer mode. If the 805 * mmc switch command returns error then switch to 806 * 4bit transfer mode. On success set the corresponding 807 * bus width on the host. 808 */ 809 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 810 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]); 811 if (err) 812 continue; 813 814 bus_width = bus_widths[idx]; 815 mmc_set_bus_width(mmc, bus_width); 816 817 err = mmc_send_ext_csd(mmc, test_csd); 818 819 if (err) 820 continue; 821 822 /* Only compare read only fields */ 823 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] == 824 test_csd[EXT_CSD_PARTITIONING_SUPPORT]) && 825 (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] == 826 test_csd[EXT_CSD_HC_WP_GRP_SIZE]) && 827 (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) && 828 (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] == 829 test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && 830 !memcmp(&ext_csd[EXT_CSD_SEC_CNT], 831 &test_csd[EXT_CSD_SEC_CNT], 4)) { 832 err = bus_width; 833 break; 834 } else { 835 err = -EBADMSG; 836 } 837 } 838 839 return err; 840 } 841 842 #ifndef CONFIG_MMC_SIMPLE 843 static const u8 tuning_blk_pattern_4bit[] = { 844 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 845 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 846 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 847 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 848 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 849 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 850 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 851 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 852 }; 853 854 static const u8 tuning_blk_pattern_8bit[] = { 855 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 856 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 857 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 858 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 859 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 860 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 861 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 862 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 863 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 864 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 865 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 866 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 867 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 868 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 869 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 870 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 871 }; 872 873 int mmc_send_tuning(struct mmc *mmc, u32 opcode) 874 { 875 struct mmc_cmd cmd; 876 struct mmc_data data; 877 const u8 *tuning_block_pattern; 878 int size, err = 0; 879 u8 *data_buf; 880 881 if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 882 tuning_block_pattern = tuning_blk_pattern_8bit; 883 size = sizeof(tuning_blk_pattern_8bit); 884 } else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) { 885 tuning_block_pattern = tuning_blk_pattern_4bit; 886 size = sizeof(tuning_blk_pattern_4bit); 887 } else { 888 return -EINVAL; 889 } 890 891 data_buf = calloc(1, size); 892 if (!data_buf) 893 return -ENOMEM; 894 895 cmd.cmdidx = opcode; 896 cmd.resp_type = MMC_RSP_R1; 897 cmd.cmdarg = 0; 898 899 data.dest = (char *)data_buf; 900 data.blocksize = size; 901 data.blocks = 1; 902 data.flags = MMC_DATA_READ; 903 904 err = mmc_send_cmd(mmc, &cmd, &data); 905 if (err) 906 goto out; 907 908 if (memcmp(data_buf, tuning_block_pattern, size)) 909 err = -EIO; 910 out: 911 free(data_buf); 912 return err; 913 } 914 915 static int mmc_execute_tuning(struct mmc *mmc) 916 { 917 #ifdef CONFIG_DM_MMC 918 struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev); 919 #endif 920 u32 opcode; 921 922 if (IS_SD(mmc)) 923 opcode = MMC_SEND_TUNING_BLOCK; 924 else 925 opcode = MMC_SEND_TUNING_BLOCK_HS200; 926 927 #ifndef CONFIG_DM_MMC 928 if (mmc->cfg->ops->execute_tuning) { 929 return mmc->cfg->ops->execute_tuning(mmc, opcode); 930 #else 931 if (ops->execute_tuning) { 932 return ops->execute_tuning(mmc->dev, opcode); 933 #endif 934 } else { 935 debug("Tuning feature required for HS200 mode.\n"); 936 return -EIO; 937 } 938 } 939 940 static int mmc_hs200_tuning(struct mmc *mmc) 941 { 942 return mmc_execute_tuning(mmc); 943 } 944 945 #else 946 int mmc_send_tuning(struct mmc *mmc, u32 opcode) { return 0; } 947 int mmc_execute_tuning(struct mmc *mmc) { return 0; } 948 static int mmc_hs200_tuning(struct mmc *mmc) { return 0; } 949 #endif 950 951 static int mmc_select_hs(struct mmc *mmc) 952 { 953 int ret; 954 955 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 956 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS); 957 958 if (!ret) 959 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 960 961 return ret; 962 } 963 964 static int mmc_select_hs_ddr(struct mmc *mmc) 965 { 966 u32 ext_csd_bits; 967 int err = 0; 968 969 if (mmc->bus_width == MMC_BUS_WIDTH_1BIT) 970 return 0; 971 972 ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ? 973 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4; 974 975 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 976 EXT_CSD_BUS_WIDTH, ext_csd_bits); 977 if (err) 978 return err; 979 980 mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52); 981 982 return 0; 983 } 984 985 #ifndef CONFIG_MMC_SIMPLE 986 static int mmc_select_hs200(struct mmc *mmc) 987 { 988 int ret; 989 990 /* 991 * Set the bus width(4 or 8) with host's support and 992 * switch to HS200 mode if bus width is set successfully. 993 */ 994 ret = mmc_select_bus_width(mmc); 995 996 if (ret > 0) { 997 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 998 EXT_CSD_HS_TIMING, 999 EXT_CSD_TIMING_HS200, false); 1000 1001 if (ret) 1002 return ret; 1003 1004 mmc_set_timing(mmc, MMC_TIMING_MMC_HS200); 1005 } 1006 1007 return ret; 1008 } 1009 1010 static int mmc_select_hs400(struct mmc *mmc) 1011 { 1012 int ret; 1013 1014 /* Switch card to HS mode */ 1015 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1016 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false); 1017 if (ret) 1018 return ret; 1019 1020 /* Set host controller to HS timing */ 1021 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 1022 1023 /* Reduce frequency to HS frequency */ 1024 mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR); 1025 1026 ret = mmc_send_status(mmc, 1000); 1027 if (ret) 1028 return ret; 1029 1030 /* Switch card to DDR */ 1031 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1032 EXT_CSD_BUS_WIDTH, 1033 EXT_CSD_DDR_BUS_WIDTH_8); 1034 if (ret) 1035 return ret; 1036 1037 /* Switch card to HS400 */ 1038 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1039 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false); 1040 if (ret) 1041 return ret; 1042 1043 /* Set host controller to HS400 timing and frequency */ 1044 mmc_set_timing(mmc, MMC_TIMING_MMC_HS400); 1045 1046 return ret; 1047 } 1048 1049 static int mmc_select_hs400es(struct mmc *mmc) 1050 { 1051 u8 val, fixed_drv_type, card_drv_type, drive_strength; 1052 int err; 1053 1054 /* Switch card to HS mode */ 1055 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1056 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false); 1057 if (err) 1058 return err; 1059 1060 /* Set host controller to HS timing */ 1061 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 1062 1063 err = mmc_send_status(mmc, 1000); 1064 if (err) 1065 return err; 1066 1067 mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR); 1068 1069 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, 1070 EXT_CSD_DDR_BUS_WIDTH_8 | 1071 EXT_CSD_BUS_WIDTH_STROBE); 1072 if (err) { 1073 printf("switch to bus width for hs400 failed\n"); 1074 return err; 1075 } 1076 1077 /* Switch card to HS400 */ 1078 fixed_drv_type = mmc->cfg->fixed_drv_type; 1079 card_drv_type = mmc->raw_driver_strength | mmc_driver_type_mask(0); 1080 drive_strength = (card_drv_type & mmc_driver_type_mask(fixed_drv_type)) 1081 ? fixed_drv_type : 0; 1082 val = EXT_CSD_TIMING_HS400 | drive_strength << EXT_CSD_DRV_STR_SHIFT; 1083 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1084 EXT_CSD_HS_TIMING, val, false); 1085 if (err) 1086 return err; 1087 1088 /* Set host controller to HS400 timing and frequency */ 1089 mmc_set_timing(mmc, MMC_TIMING_MMC_HS400ES); 1090 1091 return mmc_set_enhanced_strobe(mmc); 1092 } 1093 #else 1094 static int mmc_select_hs200(struct mmc *mmc) { return 0; } 1095 static int mmc_select_hs400(struct mmc *mmc) { return 0; } 1096 static int mmc_select_hs400es(struct mmc *mmc) { return 0; } 1097 #endif 1098 1099 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd) 1100 { 1101 u8 card_type; 1102 u32 host_caps, avail_type = 0; 1103 1104 card_type = ext_csd[EXT_CSD_CARD_TYPE]; 1105 host_caps = mmc->cfg->host_caps; 1106 1107 if ((host_caps & MMC_MODE_HS) && 1108 (card_type & EXT_CSD_CARD_TYPE_26)) 1109 avail_type |= EXT_CSD_CARD_TYPE_26; 1110 1111 if ((host_caps & MMC_MODE_HS) && 1112 (card_type & EXT_CSD_CARD_TYPE_52)) 1113 avail_type |= EXT_CSD_CARD_TYPE_52; 1114 1115 /* 1116 * For the moment, u-boot doesn't support signal voltage 1117 * switch, therefor we assume that host support ddr52 1118 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and 1119 * hs400 are the same). 1120 */ 1121 if ((host_caps & MMC_MODE_DDR_52MHz) && 1122 (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)) 1123 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V; 1124 1125 if ((host_caps & MMC_MODE_HS200) && 1126 (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V)) 1127 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V; 1128 1129 /* 1130 * If host can support HS400, it means that host can also 1131 * support HS200. 1132 */ 1133 if ((host_caps & MMC_MODE_HS400) && 1134 (host_caps & MMC_MODE_8BIT) && 1135 (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 1136 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 1137 EXT_CSD_CARD_TYPE_HS400_1_8V; 1138 1139 if ((host_caps & MMC_MODE_HS400ES) && 1140 (host_caps & MMC_MODE_8BIT) && 1141 ext_csd[EXT_CSD_STROBE_SUPPORT] && 1142 (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 1143 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 1144 EXT_CSD_CARD_TYPE_HS400_1_8V | 1145 EXT_CSD_CARD_TYPE_HS400ES; 1146 1147 return avail_type; 1148 } 1149 1150 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type) 1151 { 1152 int clock = 0; 1153 1154 if (mmc_card_hs(mmc)) 1155 clock = (avail_type & EXT_CSD_CARD_TYPE_52) ? 1156 MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR; 1157 else if (mmc_card_hs200(mmc) || 1158 mmc_card_hs400(mmc) || 1159 mmc_card_hs400es(mmc)) 1160 clock = MMC_HS200_MAX_DTR; 1161 1162 mmc_set_clock(mmc, clock); 1163 } 1164 1165 static int mmc_change_freq(struct mmc *mmc) 1166 { 1167 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1168 u32 avail_type; 1169 int err; 1170 1171 mmc->card_caps = 0; 1172 1173 if (mmc_host_is_spi(mmc)) 1174 return 0; 1175 1176 /* Only version 4 supports high-speed */ 1177 if (mmc->version < MMC_VERSION_4) 1178 return 0; 1179 1180 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT; 1181 1182 err = mmc_send_ext_csd(mmc, ext_csd); 1183 1184 if (err) 1185 return err; 1186 1187 avail_type = mmc_select_card_type(mmc, ext_csd); 1188 1189 if (avail_type & EXT_CSD_CARD_TYPE_HS400ES) { 1190 err = mmc_select_bus_width(mmc); 1191 if (err > 0 && mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 1192 err = mmc_select_hs400es(mmc); 1193 mmc_set_bus_speed(mmc, avail_type); 1194 if (!err) 1195 return err; 1196 } 1197 } 1198 1199 if (avail_type & EXT_CSD_CARD_TYPE_HS200) 1200 err = mmc_select_hs200(mmc); 1201 else if (avail_type & EXT_CSD_CARD_TYPE_HS) 1202 err = mmc_select_hs(mmc); 1203 else 1204 err = -EINVAL; 1205 1206 if (err) 1207 return err; 1208 1209 mmc_set_bus_speed(mmc, avail_type); 1210 1211 if (mmc_card_hs200(mmc)) { 1212 err = mmc_hs200_tuning(mmc); 1213 if (avail_type & EXT_CSD_CARD_TYPE_HS400 && 1214 mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 1215 err = mmc_select_hs400(mmc); 1216 mmc_set_bus_speed(mmc, avail_type); 1217 } 1218 } else if (!mmc_card_hs400es(mmc)) { 1219 err = mmc_select_bus_width(mmc) > 0 ? 0 : err; 1220 if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52) 1221 err = mmc_select_hs_ddr(mmc); 1222 } 1223 1224 return err; 1225 } 1226 1227 static int mmc_set_capacity(struct mmc *mmc, int part_num) 1228 { 1229 switch (part_num) { 1230 case 0: 1231 mmc->capacity = mmc->capacity_user; 1232 break; 1233 case 1: 1234 case 2: 1235 mmc->capacity = mmc->capacity_boot; 1236 break; 1237 case 3: 1238 mmc->capacity = mmc->capacity_rpmb; 1239 break; 1240 case 4: 1241 case 5: 1242 case 6: 1243 case 7: 1244 mmc->capacity = mmc->capacity_gp[part_num - 4]; 1245 break; 1246 default: 1247 return -1; 1248 } 1249 1250 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1251 1252 return 0; 1253 } 1254 1255 int mmc_switch_part(struct mmc *mmc, unsigned int part_num) 1256 { 1257 int ret; 1258 1259 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF, 1260 (mmc->part_config & ~PART_ACCESS_MASK) 1261 | (part_num & PART_ACCESS_MASK)); 1262 1263 /* 1264 * Set the capacity if the switch succeeded or was intended 1265 * to return to representing the raw device. 1266 */ 1267 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) { 1268 ret = mmc_set_capacity(mmc, part_num); 1269 mmc_get_blk_desc(mmc)->hwpart = part_num; 1270 } 1271 1272 return ret; 1273 } 1274 1275 int mmc_hwpart_config(struct mmc *mmc, 1276 const struct mmc_hwpart_conf *conf, 1277 enum mmc_hwpart_conf_mode mode) 1278 { 1279 u8 part_attrs = 0; 1280 u32 enh_size_mult; 1281 u32 enh_start_addr; 1282 u32 gp_size_mult[4]; 1283 u32 max_enh_size_mult; 1284 u32 tot_enh_size_mult = 0; 1285 u8 wr_rel_set; 1286 int i, pidx, err; 1287 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1288 1289 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE) 1290 return -EINVAL; 1291 1292 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) { 1293 printf("eMMC >= 4.4 required for enhanced user data area\n"); 1294 return -EMEDIUMTYPE; 1295 } 1296 1297 if (!(mmc->part_support & PART_SUPPORT)) { 1298 printf("Card does not support partitioning\n"); 1299 return -EMEDIUMTYPE; 1300 } 1301 1302 if (!mmc->hc_wp_grp_size) { 1303 printf("Card does not define HC WP group size\n"); 1304 return -EMEDIUMTYPE; 1305 } 1306 1307 /* check partition alignment and total enhanced size */ 1308 if (conf->user.enh_size) { 1309 if (conf->user.enh_size % mmc->hc_wp_grp_size || 1310 conf->user.enh_start % mmc->hc_wp_grp_size) { 1311 printf("User data enhanced area not HC WP group " 1312 "size aligned\n"); 1313 return -EINVAL; 1314 } 1315 part_attrs |= EXT_CSD_ENH_USR; 1316 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size; 1317 if (mmc->high_capacity) { 1318 enh_start_addr = conf->user.enh_start; 1319 } else { 1320 enh_start_addr = (conf->user.enh_start << 9); 1321 } 1322 } else { 1323 enh_size_mult = 0; 1324 enh_start_addr = 0; 1325 } 1326 tot_enh_size_mult += enh_size_mult; 1327 1328 for (pidx = 0; pidx < 4; pidx++) { 1329 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) { 1330 printf("GP%i partition not HC WP group size " 1331 "aligned\n", pidx+1); 1332 return -EINVAL; 1333 } 1334 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size; 1335 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) { 1336 part_attrs |= EXT_CSD_ENH_GP(pidx); 1337 tot_enh_size_mult += gp_size_mult[pidx]; 1338 } 1339 } 1340 1341 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) { 1342 printf("Card does not support enhanced attribute\n"); 1343 return -EMEDIUMTYPE; 1344 } 1345 1346 err = mmc_send_ext_csd(mmc, ext_csd); 1347 if (err) 1348 return err; 1349 1350 max_enh_size_mult = 1351 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) + 1352 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) + 1353 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT]; 1354 if (tot_enh_size_mult > max_enh_size_mult) { 1355 printf("Total enhanced size exceeds maximum (%u > %u)\n", 1356 tot_enh_size_mult, max_enh_size_mult); 1357 return -EMEDIUMTYPE; 1358 } 1359 1360 /* The default value of EXT_CSD_WR_REL_SET is device 1361 * dependent, the values can only be changed if the 1362 * EXT_CSD_HS_CTRL_REL bit is set. The values can be 1363 * changed only once and before partitioning is completed. */ 1364 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1365 if (conf->user.wr_rel_change) { 1366 if (conf->user.wr_rel_set) 1367 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR; 1368 else 1369 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR; 1370 } 1371 for (pidx = 0; pidx < 4; pidx++) { 1372 if (conf->gp_part[pidx].wr_rel_change) { 1373 if (conf->gp_part[pidx].wr_rel_set) 1374 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx); 1375 else 1376 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx); 1377 } 1378 } 1379 1380 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] && 1381 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) { 1382 puts("Card does not support host controlled partition write " 1383 "reliability settings\n"); 1384 return -EMEDIUMTYPE; 1385 } 1386 1387 if (ext_csd[EXT_CSD_PARTITION_SETTING] & 1388 EXT_CSD_PARTITION_SETTING_COMPLETED) { 1389 printf("Card already partitioned\n"); 1390 return -EPERM; 1391 } 1392 1393 if (mode == MMC_HWPART_CONF_CHECK) 1394 return 0; 1395 1396 /* Partitioning requires high-capacity size definitions */ 1397 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) { 1398 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1399 EXT_CSD_ERASE_GROUP_DEF, 1); 1400 1401 if (err) 1402 return err; 1403 1404 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1405 1406 /* update erase group size to be high-capacity */ 1407 mmc->erase_grp_size = 1408 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1409 1410 } 1411 1412 /* all OK, write the configuration */ 1413 for (i = 0; i < 4; i++) { 1414 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1415 EXT_CSD_ENH_START_ADDR+i, 1416 (enh_start_addr >> (i*8)) & 0xFF); 1417 if (err) 1418 return err; 1419 } 1420 for (i = 0; i < 3; i++) { 1421 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1422 EXT_CSD_ENH_SIZE_MULT+i, 1423 (enh_size_mult >> (i*8)) & 0xFF); 1424 if (err) 1425 return err; 1426 } 1427 for (pidx = 0; pidx < 4; pidx++) { 1428 for (i = 0; i < 3; i++) { 1429 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1430 EXT_CSD_GP_SIZE_MULT+pidx*3+i, 1431 (gp_size_mult[pidx] >> (i*8)) & 0xFF); 1432 if (err) 1433 return err; 1434 } 1435 } 1436 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1437 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs); 1438 if (err) 1439 return err; 1440 1441 if (mode == MMC_HWPART_CONF_SET) 1442 return 0; 1443 1444 /* The WR_REL_SET is a write-once register but shall be 1445 * written before setting PART_SETTING_COMPLETED. As it is 1446 * write-once we can only write it when completing the 1447 * partitioning. */ 1448 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) { 1449 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1450 EXT_CSD_WR_REL_SET, wr_rel_set); 1451 if (err) 1452 return err; 1453 } 1454 1455 /* Setting PART_SETTING_COMPLETED confirms the partition 1456 * configuration but it only becomes effective after power 1457 * cycle, so we do not adjust the partition related settings 1458 * in the mmc struct. */ 1459 1460 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1461 EXT_CSD_PARTITION_SETTING, 1462 EXT_CSD_PARTITION_SETTING_COMPLETED); 1463 if (err) 1464 return err; 1465 1466 return 0; 1467 } 1468 1469 #if !CONFIG_IS_ENABLED(DM_MMC) 1470 int mmc_getcd(struct mmc *mmc) 1471 { 1472 int cd; 1473 1474 cd = board_mmc_getcd(mmc); 1475 1476 if (cd < 0) { 1477 if (mmc->cfg->ops->getcd) 1478 cd = mmc->cfg->ops->getcd(mmc); 1479 else 1480 cd = 1; 1481 } 1482 1483 return cd; 1484 } 1485 #endif 1486 1487 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp) 1488 { 1489 struct mmc_cmd cmd; 1490 struct mmc_data data; 1491 1492 /* Switch the frequency */ 1493 cmd.cmdidx = SD_CMD_SWITCH_FUNC; 1494 cmd.resp_type = MMC_RSP_R1; 1495 cmd.cmdarg = (mode << 31) | 0xffffff; 1496 cmd.cmdarg &= ~(0xf << (group * 4)); 1497 cmd.cmdarg |= value << (group * 4); 1498 1499 data.dest = (char *)resp; 1500 data.blocksize = 64; 1501 data.blocks = 1; 1502 data.flags = MMC_DATA_READ; 1503 1504 return mmc_send_cmd(mmc, &cmd, &data); 1505 } 1506 1507 1508 static int sd_change_freq(struct mmc *mmc) 1509 { 1510 int err; 1511 struct mmc_cmd cmd; 1512 ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2); 1513 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16); 1514 struct mmc_data data; 1515 int timeout; 1516 1517 mmc->card_caps = 0; 1518 1519 if (mmc_host_is_spi(mmc)) 1520 return 0; 1521 1522 /* Read the SCR to find out if this card supports higher speeds */ 1523 cmd.cmdidx = MMC_CMD_APP_CMD; 1524 cmd.resp_type = MMC_RSP_R1; 1525 cmd.cmdarg = mmc->rca << 16; 1526 1527 err = mmc_send_cmd(mmc, &cmd, NULL); 1528 1529 if (err) 1530 return err; 1531 1532 cmd.cmdidx = SD_CMD_APP_SEND_SCR; 1533 cmd.resp_type = MMC_RSP_R1; 1534 cmd.cmdarg = 0; 1535 1536 timeout = 3; 1537 1538 retry_scr: 1539 data.dest = (char *)scr; 1540 data.blocksize = 8; 1541 data.blocks = 1; 1542 data.flags = MMC_DATA_READ; 1543 1544 err = mmc_send_cmd(mmc, &cmd, &data); 1545 1546 if (err) { 1547 if (timeout--) 1548 goto retry_scr; 1549 1550 return err; 1551 } 1552 1553 mmc->scr[0] = __be32_to_cpu(scr[0]); 1554 mmc->scr[1] = __be32_to_cpu(scr[1]); 1555 1556 switch ((mmc->scr[0] >> 24) & 0xf) { 1557 case 0: 1558 mmc->version = SD_VERSION_1_0; 1559 break; 1560 case 1: 1561 mmc->version = SD_VERSION_1_10; 1562 break; 1563 case 2: 1564 mmc->version = SD_VERSION_2; 1565 if ((mmc->scr[0] >> 15) & 0x1) 1566 mmc->version = SD_VERSION_3; 1567 break; 1568 default: 1569 mmc->version = SD_VERSION_1_0; 1570 break; 1571 } 1572 1573 if (mmc->scr[0] & SD_DATA_4BIT) 1574 mmc->card_caps |= MMC_MODE_4BIT; 1575 1576 /* Version 1.0 doesn't support switching */ 1577 if (mmc->version == SD_VERSION_1_0) 1578 return 0; 1579 1580 timeout = 4; 1581 while (timeout--) { 1582 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1, 1583 (u8 *)switch_status); 1584 1585 if (err) 1586 return err; 1587 1588 /* The high-speed function is busy. Try again */ 1589 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY)) 1590 break; 1591 } 1592 1593 /* If high-speed isn't supported, we return */ 1594 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)) 1595 return 0; 1596 1597 /* 1598 * If the host doesn't support SD_HIGHSPEED, do not switch card to 1599 * HIGHSPEED mode even if the card support SD_HIGHSPPED. 1600 * This can avoid furthur problem when the card runs in different 1601 * mode between the host. 1602 */ 1603 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) && 1604 (mmc->cfg->host_caps & MMC_MODE_HS))) 1605 return 0; 1606 1607 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status); 1608 1609 if (err) 1610 return err; 1611 1612 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000) 1613 mmc->card_caps |= MMC_MODE_HS; 1614 1615 return 0; 1616 } 1617 1618 static int sd_read_ssr(struct mmc *mmc) 1619 { 1620 int err, i; 1621 struct mmc_cmd cmd; 1622 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16); 1623 struct mmc_data data; 1624 int timeout = 3; 1625 unsigned int au, eo, et, es; 1626 1627 cmd.cmdidx = MMC_CMD_APP_CMD; 1628 cmd.resp_type = MMC_RSP_R1; 1629 cmd.cmdarg = mmc->rca << 16; 1630 1631 err = mmc_send_cmd(mmc, &cmd, NULL); 1632 if (err) 1633 return err; 1634 1635 cmd.cmdidx = SD_CMD_APP_SD_STATUS; 1636 cmd.resp_type = MMC_RSP_R1; 1637 cmd.cmdarg = 0; 1638 1639 retry_ssr: 1640 data.dest = (char *)ssr; 1641 data.blocksize = 64; 1642 data.blocks = 1; 1643 data.flags = MMC_DATA_READ; 1644 1645 err = mmc_send_cmd(mmc, &cmd, &data); 1646 if (err) { 1647 if (timeout--) 1648 goto retry_ssr; 1649 1650 return err; 1651 } 1652 1653 for (i = 0; i < 16; i++) 1654 ssr[i] = be32_to_cpu(ssr[i]); 1655 1656 au = (ssr[2] >> 12) & 0xF; 1657 if ((au <= 9) || (mmc->version == SD_VERSION_3)) { 1658 mmc->ssr.au = sd_au_size[au]; 1659 es = (ssr[3] >> 24) & 0xFF; 1660 es |= (ssr[2] & 0xFF) << 8; 1661 et = (ssr[3] >> 18) & 0x3F; 1662 if (es && et) { 1663 eo = (ssr[3] >> 16) & 0x3; 1664 mmc->ssr.erase_timeout = (et * 1000) / es; 1665 mmc->ssr.erase_offset = eo * 1000; 1666 } 1667 } else { 1668 debug("Invalid Allocation Unit Size.\n"); 1669 } 1670 1671 return 0; 1672 } 1673 1674 /* frequency bases */ 1675 /* divided by 10 to be nice to platforms without floating point */ 1676 static const int fbase[] = { 1677 10000, 1678 100000, 1679 1000000, 1680 10000000, 1681 }; 1682 1683 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice 1684 * to platforms without floating point. 1685 */ 1686 static const u8 multipliers[] = { 1687 0, /* reserved */ 1688 10, 1689 12, 1690 13, 1691 15, 1692 20, 1693 25, 1694 30, 1695 35, 1696 40, 1697 45, 1698 50, 1699 55, 1700 60, 1701 70, 1702 80, 1703 }; 1704 1705 #if !CONFIG_IS_ENABLED(DM_MMC) 1706 static void mmc_set_ios(struct mmc *mmc) 1707 { 1708 if (mmc->cfg->ops->set_ios) 1709 mmc->cfg->ops->set_ios(mmc); 1710 } 1711 1712 static bool mmc_card_busy(struct mmc *mmc) 1713 { 1714 if (!mmc->cfg->ops->card_busy) 1715 return -ENOSYS; 1716 1717 return mmc->cfg->ops->card_busy(mmc); 1718 } 1719 1720 static bool mmc_can_card_busy(struct mmc *) 1721 { 1722 return !!mmc->cfg->ops->card_busy; 1723 } 1724 #endif 1725 1726 static int mmc_startup(struct mmc *mmc) 1727 { 1728 int err, i; 1729 uint mult, freq, tran_speed; 1730 u64 cmult, csize, capacity; 1731 struct mmc_cmd cmd; 1732 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1733 bool has_parts = false; 1734 bool part_completed; 1735 struct blk_desc *bdesc; 1736 1737 #ifdef CONFIG_MMC_SPI_CRC_ON 1738 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */ 1739 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF; 1740 cmd.resp_type = MMC_RSP_R1; 1741 cmd.cmdarg = 1; 1742 err = mmc_send_cmd(mmc, &cmd, NULL); 1743 1744 if (err) 1745 return err; 1746 } 1747 #endif 1748 #ifndef CONFIG_MMC_USE_PRE_CONFIG 1749 /* Put the Card in Identify Mode */ 1750 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID : 1751 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */ 1752 cmd.resp_type = MMC_RSP_R2; 1753 cmd.cmdarg = 0; 1754 1755 err = mmc_send_cmd(mmc, &cmd, NULL); 1756 1757 if (err) 1758 return err; 1759 1760 memcpy(mmc->cid, cmd.response, 16); 1761 1762 /* 1763 * For MMC cards, set the Relative Address. 1764 * For SD cards, get the Relatvie Address. 1765 * This also puts the cards into Standby State 1766 */ 1767 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1768 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR; 1769 cmd.cmdarg = mmc->rca << 16; 1770 cmd.resp_type = MMC_RSP_R6; 1771 1772 err = mmc_send_cmd(mmc, &cmd, NULL); 1773 1774 if (err) 1775 return err; 1776 1777 if (IS_SD(mmc)) 1778 mmc->rca = (cmd.response[0] >> 16) & 0xffff; 1779 } 1780 #endif 1781 /* Get the Card-Specific Data */ 1782 cmd.cmdidx = MMC_CMD_SEND_CSD; 1783 cmd.resp_type = MMC_RSP_R2; 1784 cmd.cmdarg = mmc->rca << 16; 1785 1786 err = mmc_send_cmd(mmc, &cmd, NULL); 1787 1788 if (err) 1789 return err; 1790 1791 mmc->csd[0] = cmd.response[0]; 1792 mmc->csd[1] = cmd.response[1]; 1793 mmc->csd[2] = cmd.response[2]; 1794 mmc->csd[3] = cmd.response[3]; 1795 1796 if (mmc->version == MMC_VERSION_UNKNOWN) { 1797 int version = (cmd.response[0] >> 26) & 0xf; 1798 1799 switch (version) { 1800 case 0: 1801 mmc->version = MMC_VERSION_1_2; 1802 break; 1803 case 1: 1804 mmc->version = MMC_VERSION_1_4; 1805 break; 1806 case 2: 1807 mmc->version = MMC_VERSION_2_2; 1808 break; 1809 case 3: 1810 mmc->version = MMC_VERSION_3; 1811 break; 1812 case 4: 1813 mmc->version = MMC_VERSION_4; 1814 break; 1815 default: 1816 mmc->version = MMC_VERSION_1_2; 1817 break; 1818 } 1819 } 1820 1821 /* divide frequency by 10, since the mults are 10x bigger */ 1822 freq = fbase[(cmd.response[0] & 0x7)]; 1823 mult = multipliers[((cmd.response[0] >> 3) & 0xf)]; 1824 1825 tran_speed = freq * mult; 1826 1827 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1); 1828 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf); 1829 1830 if (IS_SD(mmc)) 1831 mmc->write_bl_len = mmc->read_bl_len; 1832 else 1833 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf); 1834 1835 if (mmc->high_capacity) { 1836 csize = (mmc->csd[1] & 0x3f) << 16 1837 | (mmc->csd[2] & 0xffff0000) >> 16; 1838 cmult = 8; 1839 } else { 1840 csize = (mmc->csd[1] & 0x3ff) << 2 1841 | (mmc->csd[2] & 0xc0000000) >> 30; 1842 cmult = (mmc->csd[2] & 0x00038000) >> 15; 1843 } 1844 1845 mmc->capacity_user = (csize + 1) << (cmult + 2); 1846 mmc->capacity_user *= mmc->read_bl_len; 1847 mmc->capacity_boot = 0; 1848 mmc->capacity_rpmb = 0; 1849 for (i = 0; i < 4; i++) 1850 mmc->capacity_gp[i] = 0; 1851 1852 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN) 1853 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1854 1855 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN) 1856 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1857 1858 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) { 1859 cmd.cmdidx = MMC_CMD_SET_DSR; 1860 cmd.cmdarg = (mmc->dsr & 0xffff) << 16; 1861 cmd.resp_type = MMC_RSP_NONE; 1862 if (mmc_send_cmd(mmc, &cmd, NULL)) 1863 printf("MMC: SET_DSR failed\n"); 1864 } 1865 1866 /* Select the card, and put it into Transfer Mode */ 1867 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1868 cmd.cmdidx = MMC_CMD_SELECT_CARD; 1869 cmd.resp_type = MMC_RSP_R1; 1870 cmd.cmdarg = mmc->rca << 16; 1871 err = mmc_send_cmd(mmc, &cmd, NULL); 1872 1873 if (err) 1874 return err; 1875 } 1876 1877 /* 1878 * For SD, its erase group is always one sector 1879 */ 1880 mmc->erase_grp_size = 1; 1881 mmc->part_config = MMCPART_NOAVAILABLE; 1882 if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) { 1883 /* check ext_csd version and capacity */ 1884 err = mmc_send_ext_csd(mmc, ext_csd); 1885 if (err) 1886 return err; 1887 if (ext_csd[EXT_CSD_REV] >= 2) { 1888 /* 1889 * According to the JEDEC Standard, the value of 1890 * ext_csd's capacity is valid if the value is more 1891 * than 2GB 1892 */ 1893 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0 1894 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 1895 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 1896 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 1897 capacity *= MMC_MAX_BLOCK_LEN; 1898 if ((capacity >> 20) > 2 * 1024) 1899 mmc->capacity_user = capacity; 1900 } 1901 1902 switch (ext_csd[EXT_CSD_REV]) { 1903 case 1: 1904 mmc->version = MMC_VERSION_4_1; 1905 break; 1906 case 2: 1907 mmc->version = MMC_VERSION_4_2; 1908 break; 1909 case 3: 1910 mmc->version = MMC_VERSION_4_3; 1911 break; 1912 case 5: 1913 mmc->version = MMC_VERSION_4_41; 1914 break; 1915 case 6: 1916 mmc->version = MMC_VERSION_4_5; 1917 break; 1918 case 7: 1919 mmc->version = MMC_VERSION_5_0; 1920 break; 1921 case 8: 1922 mmc->version = MMC_VERSION_5_1; 1923 break; 1924 } 1925 1926 /* The partition data may be non-zero but it is only 1927 * effective if PARTITION_SETTING_COMPLETED is set in 1928 * EXT_CSD, so ignore any data if this bit is not set, 1929 * except for enabling the high-capacity group size 1930 * definition (see below). */ 1931 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] & 1932 EXT_CSD_PARTITION_SETTING_COMPLETED); 1933 1934 /* store the partition info of emmc */ 1935 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT]; 1936 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) || 1937 ext_csd[EXT_CSD_BOOT_MULT]) 1938 mmc->part_config = ext_csd[EXT_CSD_PART_CONF]; 1939 if (part_completed && 1940 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT)) 1941 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE]; 1942 if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN) 1943 mmc->esr.mmc_can_trim = 1; 1944 1945 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17; 1946 1947 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17; 1948 1949 for (i = 0; i < 4; i++) { 1950 int idx = EXT_CSD_GP_SIZE_MULT + i * 3; 1951 uint mult = (ext_csd[idx + 2] << 16) + 1952 (ext_csd[idx + 1] << 8) + ext_csd[idx]; 1953 if (mult) 1954 has_parts = true; 1955 if (!part_completed) 1956 continue; 1957 mmc->capacity_gp[i] = mult; 1958 mmc->capacity_gp[i] *= 1959 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1960 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1961 mmc->capacity_gp[i] <<= 19; 1962 } 1963 1964 if (part_completed) { 1965 mmc->enh_user_size = 1966 (ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) + 1967 (ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) + 1968 ext_csd[EXT_CSD_ENH_SIZE_MULT]; 1969 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1970 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1971 mmc->enh_user_size <<= 19; 1972 mmc->enh_user_start = 1973 (ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) + 1974 (ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) + 1975 (ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) + 1976 ext_csd[EXT_CSD_ENH_START_ADDR]; 1977 if (mmc->high_capacity) 1978 mmc->enh_user_start <<= 9; 1979 } 1980 1981 /* 1982 * Host needs to enable ERASE_GRP_DEF bit if device is 1983 * partitioned. This bit will be lost every time after a reset 1984 * or power off. This will affect erase size. 1985 */ 1986 if (part_completed) 1987 has_parts = true; 1988 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) && 1989 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB)) 1990 has_parts = true; 1991 if (has_parts) { 1992 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1993 EXT_CSD_ERASE_GROUP_DEF, 1); 1994 1995 if (err) 1996 return err; 1997 else 1998 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1999 } 2000 2001 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) { 2002 /* Read out group size from ext_csd */ 2003 mmc->erase_grp_size = 2004 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 2005 /* 2006 * if high capacity and partition setting completed 2007 * SEC_COUNT is valid even if it is smaller than 2 GiB 2008 * JEDEC Standard JESD84-B45, 6.2.4 2009 */ 2010 if (mmc->high_capacity && part_completed) { 2011 capacity = (ext_csd[EXT_CSD_SEC_CNT]) | 2012 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) | 2013 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) | 2014 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24); 2015 capacity *= MMC_MAX_BLOCK_LEN; 2016 mmc->capacity_user = capacity; 2017 } 2018 } else { 2019 /* Calculate the group size from the csd value. */ 2020 int erase_gsz, erase_gmul; 2021 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10; 2022 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5; 2023 mmc->erase_grp_size = (erase_gsz + 1) 2024 * (erase_gmul + 1); 2025 } 2026 2027 mmc->hc_wp_grp_size = 1024 2028 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 2029 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 2030 2031 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 2032 2033 mmc->raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH]; 2034 } 2035 2036 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart); 2037 if (err) 2038 return err; 2039 2040 if (IS_SD(mmc)) 2041 err = sd_change_freq(mmc); 2042 else 2043 err = mmc_change_freq(mmc); 2044 2045 if (err) 2046 return err; 2047 2048 /* Restrict card's capabilities by what the host can do */ 2049 mmc->card_caps &= mmc->cfg->host_caps; 2050 2051 if (IS_SD(mmc)) { 2052 if (mmc->card_caps & MMC_MODE_4BIT) { 2053 cmd.cmdidx = MMC_CMD_APP_CMD; 2054 cmd.resp_type = MMC_RSP_R1; 2055 cmd.cmdarg = mmc->rca << 16; 2056 2057 err = mmc_send_cmd(mmc, &cmd, NULL); 2058 if (err) 2059 return err; 2060 2061 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH; 2062 cmd.resp_type = MMC_RSP_R1; 2063 cmd.cmdarg = 2; 2064 err = mmc_send_cmd(mmc, &cmd, NULL); 2065 if (err) 2066 return err; 2067 2068 mmc_set_bus_width(mmc, 4); 2069 } 2070 2071 err = sd_read_ssr(mmc); 2072 if (err) 2073 return err; 2074 2075 if (mmc->card_caps & MMC_MODE_HS) 2076 tran_speed = MMC_HIGH_52_MAX_DTR; 2077 else 2078 tran_speed = MMC_HIGH_26_MAX_DTR; 2079 2080 mmc_set_clock(mmc, tran_speed); 2081 } 2082 2083 /* Fix the block length for DDR mode */ 2084 if (mmc_card_ddr(mmc)) { 2085 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 2086 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 2087 } 2088 2089 /* fill in device description */ 2090 bdesc = mmc_get_blk_desc(mmc); 2091 bdesc->lun = 0; 2092 bdesc->hwpart = 0; 2093 bdesc->type = 0; 2094 bdesc->blksz = mmc->read_bl_len; 2095 bdesc->log2blksz = LOG2(bdesc->blksz); 2096 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len); 2097 #if !defined(CONFIG_SPL_BUILD) || \ 2098 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \ 2099 !defined(CONFIG_USE_TINY_PRINTF)) 2100 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x", 2101 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), 2102 (mmc->cid[3] >> 16) & 0xffff); 2103 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, 2104 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, 2105 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, 2106 (mmc->cid[2] >> 24) & 0xff); 2107 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, 2108 (mmc->cid[2] >> 16) & 0xf); 2109 #else 2110 bdesc->vendor[0] = 0; 2111 bdesc->product[0] = 0; 2112 bdesc->revision[0] = 0; 2113 #endif 2114 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT) 2115 part_init(bdesc); 2116 #endif 2117 2118 return 0; 2119 } 2120 2121 #ifndef CONFIG_MMC_USE_PRE_CONFIG 2122 static int mmc_send_if_cond(struct mmc *mmc) 2123 { 2124 struct mmc_cmd cmd; 2125 int err; 2126 2127 cmd.cmdidx = SD_CMD_SEND_IF_COND; 2128 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */ 2129 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa; 2130 cmd.resp_type = MMC_RSP_R7; 2131 2132 err = mmc_send_cmd(mmc, &cmd, NULL); 2133 2134 if (err) 2135 return err; 2136 2137 if ((cmd.response[0] & 0xff) != 0xaa) 2138 return -EOPNOTSUPP; 2139 else 2140 mmc->version = SD_VERSION_2; 2141 2142 return 0; 2143 } 2144 #endif 2145 2146 #if !CONFIG_IS_ENABLED(DM_MMC) 2147 /* board-specific MMC power initializations. */ 2148 __weak void board_mmc_power_init(void) 2149 { 2150 } 2151 #endif 2152 2153 #ifndef CONFIG_MMC_USE_PRE_CONFIG 2154 static int mmc_power_init(struct mmc *mmc) 2155 { 2156 #if CONFIG_IS_ENABLED(DM_MMC) 2157 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD) 2158 struct udevice *vmmc_supply; 2159 int ret; 2160 2161 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply", 2162 &vmmc_supply); 2163 if (ret) { 2164 debug("%s: No vmmc supply\n", mmc->dev->name); 2165 return 0; 2166 } 2167 2168 ret = regulator_set_enable(vmmc_supply, true); 2169 if (ret) { 2170 puts("Error enabling VMMC supply\n"); 2171 return ret; 2172 } 2173 #endif 2174 #else /* !CONFIG_DM_MMC */ 2175 /* 2176 * Driver model should use a regulator, as above, rather than calling 2177 * out to board code. 2178 */ 2179 board_mmc_power_init(); 2180 #endif 2181 return 0; 2182 } 2183 #endif 2184 #ifdef CONFIG_MMC_USE_PRE_CONFIG 2185 static int mmc_select_card(struct mmc *mmc, int n) 2186 { 2187 struct mmc_cmd cmd; 2188 int err = 0; 2189 2190 memset(&cmd, 0, sizeof(struct mmc_cmd)); 2191 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 2192 mmc->rca = n; 2193 cmd.cmdidx = MMC_CMD_SELECT_CARD; 2194 cmd.resp_type = MMC_RSP_R1; 2195 cmd.cmdarg = mmc->rca << 16; 2196 err = mmc_send_cmd(mmc, &cmd, NULL); 2197 } 2198 2199 return err; 2200 } 2201 2202 int mmc_start_init(struct mmc *mmc) 2203 { 2204 /* 2205 * We use the MMC config set by the bootrom. 2206 * So it is no need to reset the eMMC device. 2207 */ 2208 mmc_set_bus_width(mmc, 8); 2209 mmc_set_clock(mmc, 1); 2210 mmc_set_timing(mmc, MMC_TIMING_LEGACY); 2211 /* Send cmd7 to return stand-by state*/ 2212 mmc_select_card(mmc, 0); 2213 mmc->version = MMC_VERSION_UNKNOWN; 2214 mmc->high_capacity = 1; 2215 /* 2216 * The RCA is set to 2 by rockchip bootrom, use the default 2217 * value here. 2218 */ 2219 #ifdef CONFIG_ARCH_ROCKCHIP 2220 mmc->rca = 2; 2221 #else 2222 mmc->rca = 1; 2223 #endif 2224 return 0; 2225 } 2226 #else 2227 int mmc_start_init(struct mmc *mmc) 2228 { 2229 bool no_card; 2230 int err; 2231 2232 /* we pretend there's no card when init is NULL */ 2233 no_card = mmc_getcd(mmc) == 0; 2234 #if !CONFIG_IS_ENABLED(DM_MMC) 2235 no_card = no_card || (mmc->cfg->ops->init == NULL); 2236 #endif 2237 if (no_card) { 2238 mmc->has_init = 0; 2239 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2240 printf("MMC: no card present\n"); 2241 #endif 2242 return -ENOMEDIUM; 2243 } 2244 2245 if (mmc->has_init) 2246 return 0; 2247 2248 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 2249 mmc_adapter_card_type_ident(); 2250 #endif 2251 err = mmc_power_init(mmc); 2252 if (err) 2253 return err; 2254 2255 #if CONFIG_IS_ENABLED(DM_MMC) 2256 /* The device has already been probed ready for use */ 2257 #else 2258 /* made sure it's not NULL earlier */ 2259 err = mmc->cfg->ops->init(mmc); 2260 if (err) 2261 return err; 2262 #endif 2263 mmc_set_bus_width(mmc, 1); 2264 mmc_set_clock(mmc, 1); 2265 mmc_set_timing(mmc, MMC_TIMING_LEGACY); 2266 2267 /* Reset the Card */ 2268 err = mmc_go_idle(mmc); 2269 2270 if (err) 2271 return err; 2272 2273 /* The internal partition reset to user partition(0) at every CMD0*/ 2274 mmc_get_blk_desc(mmc)->hwpart = 0; 2275 2276 /* Test for SD version 2 */ 2277 err = mmc_send_if_cond(mmc); 2278 2279 /* Now try to get the SD card's operating condition */ 2280 err = sd_send_op_cond(mmc); 2281 2282 /* If the command timed out, we check for an MMC card */ 2283 if (err == -ETIMEDOUT) { 2284 err = mmc_send_op_cond(mmc); 2285 2286 if (err) { 2287 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2288 printf("Card did not respond to voltage select!\n"); 2289 #endif 2290 return -EOPNOTSUPP; 2291 } 2292 } 2293 2294 if (!err) 2295 mmc->init_in_progress = 1; 2296 2297 return err; 2298 } 2299 #endif 2300 2301 static int mmc_complete_init(struct mmc *mmc) 2302 { 2303 int err = 0; 2304 2305 mmc->init_in_progress = 0; 2306 if (mmc->op_cond_pending) 2307 err = mmc_complete_op_cond(mmc); 2308 2309 if (!err) 2310 err = mmc_startup(mmc); 2311 if (err) 2312 mmc->has_init = 0; 2313 else 2314 mmc->has_init = 1; 2315 return err; 2316 } 2317 2318 int mmc_init(struct mmc *mmc) 2319 { 2320 int err = 0; 2321 __maybe_unused unsigned start; 2322 #if CONFIG_IS_ENABLED(DM_MMC) 2323 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev); 2324 2325 upriv->mmc = mmc; 2326 #endif 2327 if (mmc->has_init) 2328 return 0; 2329 2330 start = get_timer(0); 2331 2332 if (!mmc->init_in_progress) 2333 err = mmc_start_init(mmc); 2334 2335 if (!err) 2336 err = mmc_complete_init(mmc); 2337 if (err) 2338 printf("%s: %d, time %lu\n", __func__, err, get_timer(start)); 2339 2340 return err; 2341 } 2342 2343 int mmc_set_dsr(struct mmc *mmc, u16 val) 2344 { 2345 mmc->dsr = val; 2346 return 0; 2347 } 2348 2349 /* CPU-specific MMC initializations */ 2350 __weak int cpu_mmc_init(bd_t *bis) 2351 { 2352 return -1; 2353 } 2354 2355 /* board-specific MMC initializations. */ 2356 __weak int board_mmc_init(bd_t *bis) 2357 { 2358 return -1; 2359 } 2360 2361 void mmc_set_preinit(struct mmc *mmc, int preinit) 2362 { 2363 mmc->preinit = preinit; 2364 } 2365 2366 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD) 2367 static int mmc_probe(bd_t *bis) 2368 { 2369 return 0; 2370 } 2371 #elif CONFIG_IS_ENABLED(DM_MMC) 2372 static int mmc_probe(bd_t *bis) 2373 { 2374 int ret, i; 2375 struct uclass *uc; 2376 struct udevice *dev; 2377 2378 ret = uclass_get(UCLASS_MMC, &uc); 2379 if (ret) 2380 return ret; 2381 2382 /* 2383 * Try to add them in sequence order. Really with driver model we 2384 * should allow holes, but the current MMC list does not allow that. 2385 * So if we request 0, 1, 3 we will get 0, 1, 2. 2386 */ 2387 for (i = 0; ; i++) { 2388 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev); 2389 if (ret == -ENODEV) 2390 break; 2391 } 2392 uclass_foreach_dev(dev, uc) { 2393 ret = device_probe(dev); 2394 if (ret) 2395 printf("%s - probe failed: %d\n", dev->name, ret); 2396 } 2397 2398 return 0; 2399 } 2400 #else 2401 static int mmc_probe(bd_t *bis) 2402 { 2403 if (board_mmc_init(bis) < 0) 2404 cpu_mmc_init(bis); 2405 2406 return 0; 2407 } 2408 #endif 2409 2410 int mmc_initialize(bd_t *bis) 2411 { 2412 static int initialized = 0; 2413 int ret; 2414 if (initialized) /* Avoid initializing mmc multiple times */ 2415 return 0; 2416 initialized = 1; 2417 2418 #if !CONFIG_IS_ENABLED(BLK) 2419 #if !CONFIG_IS_ENABLED(MMC_TINY) 2420 mmc_list_init(); 2421 #endif 2422 #endif 2423 ret = mmc_probe(bis); 2424 if (ret) 2425 return ret; 2426 2427 #ifndef CONFIG_SPL_BUILD 2428 print_mmc_devices(','); 2429 #endif 2430 2431 mmc_do_preinit(); 2432 return 0; 2433 } 2434 2435 #ifdef CONFIG_CMD_BKOPS_ENABLE 2436 int mmc_set_bkops_enable(struct mmc *mmc) 2437 { 2438 int err; 2439 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 2440 2441 err = mmc_send_ext_csd(mmc, ext_csd); 2442 if (err) { 2443 puts("Could not get ext_csd register values\n"); 2444 return err; 2445 } 2446 2447 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) { 2448 puts("Background operations not supported on device\n"); 2449 return -EMEDIUMTYPE; 2450 } 2451 2452 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) { 2453 puts("Background operations already enabled\n"); 2454 return 0; 2455 } 2456 2457 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); 2458 if (err) { 2459 puts("Failed to enable manual background operations\n"); 2460 return err; 2461 } 2462 2463 puts("Enabled manual background operations\n"); 2464 2465 return 0; 2466 } 2467 #endif 2468