1 /* 2 * Copyright 2008, Freescale Semiconductor, Inc 3 * Andy Fleming 4 * 5 * Based vaguely on the Linux code 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10 #include <config.h> 11 #include <common.h> 12 #include <command.h> 13 #include <dm.h> 14 #include <dm/device-internal.h> 15 #include <errno.h> 16 #include <mmc.h> 17 #include <part.h> 18 #include <power/regulator.h> 19 #include <malloc.h> 20 #include <memalign.h> 21 #include <linux/list.h> 22 #include <div64.h> 23 #include "mmc_private.h" 24 25 static const unsigned int sd_au_size[] = { 26 0, SZ_16K / 512, SZ_32K / 512, 27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512, 28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512, 29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, 30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, 31 }; 32 33 #if CONFIG_IS_ENABLED(MMC_TINY) 34 static struct mmc mmc_static; 35 struct mmc *find_mmc_device(int dev_num) 36 { 37 return &mmc_static; 38 } 39 40 void mmc_do_preinit(void) 41 { 42 struct mmc *m = &mmc_static; 43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 44 mmc_set_preinit(m, 1); 45 #endif 46 if (m->preinit) 47 mmc_start_init(m); 48 } 49 50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc) 51 { 52 return &mmc->block_dev; 53 } 54 #endif 55 56 #if !CONFIG_IS_ENABLED(DM_MMC) 57 __weak int board_mmc_getwp(struct mmc *mmc) 58 { 59 return -1; 60 } 61 62 int mmc_getwp(struct mmc *mmc) 63 { 64 int wp; 65 66 wp = board_mmc_getwp(mmc); 67 68 if (wp < 0) { 69 if (mmc->cfg->ops->getwp) 70 wp = mmc->cfg->ops->getwp(mmc); 71 else 72 wp = 0; 73 } 74 75 return wp; 76 } 77 78 __weak int board_mmc_getcd(struct mmc *mmc) 79 { 80 return -1; 81 } 82 #endif 83 84 #ifdef CONFIG_MMC_TRACE 85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd) 86 { 87 printf("CMD_SEND:%d\n", cmd->cmdidx); 88 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg); 89 } 90 91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret) 92 { 93 int i; 94 u8 *ptr; 95 96 if (ret) { 97 printf("\t\tRET\t\t\t %d\n", ret); 98 } else { 99 switch (cmd->resp_type) { 100 case MMC_RSP_NONE: 101 printf("\t\tMMC_RSP_NONE\n"); 102 break; 103 case MMC_RSP_R1: 104 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n", 105 cmd->response[0]); 106 break; 107 case MMC_RSP_R1b: 108 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n", 109 cmd->response[0]); 110 break; 111 case MMC_RSP_R2: 112 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n", 113 cmd->response[0]); 114 printf("\t\t \t\t 0x%08X \n", 115 cmd->response[1]); 116 printf("\t\t \t\t 0x%08X \n", 117 cmd->response[2]); 118 printf("\t\t \t\t 0x%08X \n", 119 cmd->response[3]); 120 printf("\n"); 121 printf("\t\t\t\t\tDUMPING DATA\n"); 122 for (i = 0; i < 4; i++) { 123 int j; 124 printf("\t\t\t\t\t%03d - ", i*4); 125 ptr = (u8 *)&cmd->response[i]; 126 ptr += 3; 127 for (j = 0; j < 4; j++) 128 printf("%02X ", *ptr--); 129 printf("\n"); 130 } 131 break; 132 case MMC_RSP_R3: 133 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n", 134 cmd->response[0]); 135 break; 136 default: 137 printf("\t\tERROR MMC rsp not supported\n"); 138 break; 139 } 140 } 141 } 142 143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd) 144 { 145 int status; 146 147 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9; 148 printf("CURR STATE:%d\n", status); 149 } 150 #endif 151 152 #if !CONFIG_IS_ENABLED(DM_MMC) 153 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data) 154 { 155 int ret; 156 157 mmmc_trace_before_send(mmc, cmd); 158 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data); 159 mmmc_trace_after_send(mmc, cmd, ret); 160 161 return ret; 162 } 163 #endif 164 165 int mmc_send_status(struct mmc *mmc, int timeout) 166 { 167 struct mmc_cmd cmd; 168 int err, retries = 5; 169 170 cmd.cmdidx = MMC_CMD_SEND_STATUS; 171 cmd.resp_type = MMC_RSP_R1; 172 if (!mmc_host_is_spi(mmc)) 173 cmd.cmdarg = mmc->rca << 16; 174 175 while (1) { 176 err = mmc_send_cmd(mmc, &cmd, NULL); 177 if (!err) { 178 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) && 179 (cmd.response[0] & MMC_STATUS_CURR_STATE) != 180 MMC_STATE_PRG) 181 break; 182 else if (cmd.response[0] & MMC_STATUS_MASK) { 183 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 184 printf("Status Error: 0x%08X\n", 185 cmd.response[0]); 186 #endif 187 return -ECOMM; 188 } 189 } else if (--retries < 0) 190 return err; 191 192 if (timeout-- <= 0) 193 break; 194 195 udelay(1000); 196 } 197 198 mmc_trace_state(mmc, &cmd); 199 if (timeout <= 0) { 200 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 201 printf("Timeout waiting card ready\n"); 202 #endif 203 return -ETIMEDOUT; 204 } 205 206 return 0; 207 } 208 209 int mmc_set_blocklen(struct mmc *mmc, int len) 210 { 211 struct mmc_cmd cmd; 212 213 if (mmc->ddr_mode) 214 return 0; 215 216 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN; 217 cmd.resp_type = MMC_RSP_R1; 218 cmd.cmdarg = len; 219 220 return mmc_send_cmd(mmc, &cmd, NULL); 221 } 222 223 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start, 224 lbaint_t blkcnt) 225 { 226 struct mmc_cmd cmd; 227 struct mmc_data data; 228 229 if (blkcnt > 1) 230 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 231 else 232 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 233 234 if (mmc->high_capacity) 235 cmd.cmdarg = start; 236 else 237 cmd.cmdarg = start * mmc->read_bl_len; 238 239 cmd.resp_type = MMC_RSP_R1; 240 241 data.dest = dst; 242 data.blocks = blkcnt; 243 data.blocksize = mmc->read_bl_len; 244 data.flags = MMC_DATA_READ; 245 246 if (mmc_send_cmd(mmc, &cmd, &data)) 247 return 0; 248 249 if (blkcnt > 1) { 250 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 251 cmd.cmdarg = 0; 252 cmd.resp_type = MMC_RSP_R1b; 253 if (mmc_send_cmd(mmc, &cmd, NULL)) { 254 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 255 printf("mmc fail to send stop cmd\n"); 256 #endif 257 return 0; 258 } 259 } 260 261 return blkcnt; 262 } 263 264 #if CONFIG_IS_ENABLED(BLK) 265 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 266 #else 267 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 268 void *dst) 269 #endif 270 { 271 #if CONFIG_IS_ENABLED(BLK) 272 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 273 #endif 274 int dev_num = block_dev->devnum; 275 int err; 276 lbaint_t cur, blocks_todo = blkcnt; 277 278 if (blkcnt == 0) 279 return 0; 280 281 struct mmc *mmc = find_mmc_device(dev_num); 282 if (!mmc) 283 return 0; 284 285 if (CONFIG_IS_ENABLED(MMC_TINY)) 286 err = mmc_switch_part(mmc, block_dev->hwpart); 287 else 288 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 289 290 if (err < 0) 291 return 0; 292 293 if ((start + blkcnt) > block_dev->lba) { 294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 295 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 296 start + blkcnt, block_dev->lba); 297 #endif 298 return 0; 299 } 300 301 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 302 debug("%s: Failed to set blocklen\n", __func__); 303 return 0; 304 } 305 306 do { 307 cur = (blocks_todo > mmc->cfg->b_max) ? 308 mmc->cfg->b_max : blocks_todo; 309 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 310 debug("%s: Failed to read blocks\n", __func__); 311 return 0; 312 } 313 blocks_todo -= cur; 314 start += cur; 315 dst += cur * mmc->read_bl_len; 316 } while (blocks_todo > 0); 317 318 return blkcnt; 319 } 320 321 static void mmc_set_timing(struct mmc *mmc, uint timing) 322 { 323 mmc->timing = timing; 324 mmc_set_ios(mmc); 325 } 326 327 static int mmc_go_idle(struct mmc *mmc) 328 { 329 struct mmc_cmd cmd; 330 int err; 331 332 udelay(1000); 333 334 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE; 335 cmd.cmdarg = 0; 336 cmd.resp_type = MMC_RSP_NONE; 337 338 err = mmc_send_cmd(mmc, &cmd, NULL); 339 340 if (err) 341 return err; 342 343 udelay(2000); 344 345 return 0; 346 } 347 348 static int sd_send_op_cond(struct mmc *mmc) 349 { 350 int timeout = 1000; 351 int err; 352 struct mmc_cmd cmd; 353 354 while (1) { 355 cmd.cmdidx = MMC_CMD_APP_CMD; 356 cmd.resp_type = MMC_RSP_R1; 357 cmd.cmdarg = 0; 358 359 err = mmc_send_cmd(mmc, &cmd, NULL); 360 361 if (err) 362 return err; 363 364 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND; 365 cmd.resp_type = MMC_RSP_R3; 366 367 /* 368 * Most cards do not answer if some reserved bits 369 * in the ocr are set. However, Some controller 370 * can set bit 7 (reserved for low voltages), but 371 * how to manage low voltages SD card is not yet 372 * specified. 373 */ 374 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 : 375 (mmc->cfg->voltages & 0xff8000); 376 377 if (mmc->version == SD_VERSION_2) 378 cmd.cmdarg |= OCR_HCS; 379 380 err = mmc_send_cmd(mmc, &cmd, NULL); 381 382 if (err) 383 return err; 384 385 if (cmd.response[0] & OCR_BUSY) 386 break; 387 388 if (timeout-- <= 0) 389 return -EOPNOTSUPP; 390 391 udelay(1000); 392 } 393 394 if (mmc->version != SD_VERSION_2) 395 mmc->version = SD_VERSION_1_0; 396 397 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 398 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 399 cmd.resp_type = MMC_RSP_R3; 400 cmd.cmdarg = 0; 401 402 err = mmc_send_cmd(mmc, &cmd, NULL); 403 404 if (err) 405 return err; 406 } 407 408 mmc->ocr = cmd.response[0]; 409 410 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 411 mmc->rca = 0; 412 413 return 0; 414 } 415 416 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg) 417 { 418 struct mmc_cmd cmd; 419 int err; 420 421 cmd.cmdidx = MMC_CMD_SEND_OP_COND; 422 cmd.resp_type = MMC_RSP_R3; 423 cmd.cmdarg = 0; 424 if (use_arg && !mmc_host_is_spi(mmc)) 425 cmd.cmdarg = OCR_HCS | 426 (mmc->cfg->voltages & 427 (mmc->ocr & OCR_VOLTAGE_MASK)) | 428 (mmc->ocr & OCR_ACCESS_MODE); 429 430 err = mmc_send_cmd(mmc, &cmd, NULL); 431 if (err) 432 return err; 433 mmc->ocr = cmd.response[0]; 434 return 0; 435 } 436 437 static int mmc_send_op_cond(struct mmc *mmc) 438 { 439 int err, i; 440 441 /* Some cards seem to need this */ 442 mmc_go_idle(mmc); 443 444 /* Asking to the card its capabilities */ 445 for (i = 0; i < 2; i++) { 446 err = mmc_send_op_cond_iter(mmc, i != 0); 447 if (err) 448 return err; 449 450 /* exit if not busy (flag seems to be inverted) */ 451 if (mmc->ocr & OCR_BUSY) 452 break; 453 } 454 mmc->op_cond_pending = 1; 455 return 0; 456 } 457 458 static int mmc_complete_op_cond(struct mmc *mmc) 459 { 460 struct mmc_cmd cmd; 461 int timeout = 1000; 462 uint start; 463 int err; 464 465 mmc->op_cond_pending = 0; 466 if (!(mmc->ocr & OCR_BUSY)) { 467 /* Some cards seem to need this */ 468 mmc_go_idle(mmc); 469 470 start = get_timer(0); 471 while (1) { 472 err = mmc_send_op_cond_iter(mmc, 1); 473 if (err) 474 return err; 475 if (mmc->ocr & OCR_BUSY) 476 break; 477 if (get_timer(start) > timeout) 478 return -EOPNOTSUPP; 479 udelay(100); 480 } 481 } 482 483 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 484 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 485 cmd.resp_type = MMC_RSP_R3; 486 cmd.cmdarg = 0; 487 488 err = mmc_send_cmd(mmc, &cmd, NULL); 489 490 if (err) 491 return err; 492 493 mmc->ocr = cmd.response[0]; 494 } 495 496 mmc->version = MMC_VERSION_UNKNOWN; 497 498 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 499 mmc->rca = 1; 500 501 return 0; 502 } 503 504 505 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd) 506 { 507 struct mmc_cmd cmd; 508 struct mmc_data data; 509 int err; 510 511 /* Get the Card Status Register */ 512 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD; 513 cmd.resp_type = MMC_RSP_R1; 514 cmd.cmdarg = 0; 515 516 data.dest = (char *)ext_csd; 517 data.blocks = 1; 518 data.blocksize = MMC_MAX_BLOCK_LEN; 519 data.flags = MMC_DATA_READ; 520 521 err = mmc_send_cmd(mmc, &cmd, &data); 522 523 return err; 524 } 525 526 static int mmc_poll_for_busy(struct mmc *mmc) 527 { 528 struct mmc_cmd cmd; 529 u8 busy = true; 530 uint start; 531 int ret; 532 int timeout = 1000; 533 534 cmd.cmdidx = MMC_CMD_SEND_STATUS; 535 cmd.resp_type = MMC_RSP_R1; 536 cmd.cmdarg = mmc->rca << 16; 537 538 start = get_timer(0); 539 540 do { 541 if (mmc_can_card_busy(mmc)) { 542 busy = mmc_card_busy(mmc); 543 } else { 544 ret = mmc_send_cmd(mmc, &cmd, NULL); 545 546 if (ret) 547 return ret; 548 549 if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR) 550 return -EBADMSG; 551 busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) == 552 MMC_STATE_PRG; 553 } 554 555 if (get_timer(start) > timeout && busy) 556 return -ETIMEDOUT; 557 } while (busy); 558 559 return 0; 560 } 561 562 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value, 563 u8 send_status) 564 { 565 struct mmc_cmd cmd; 566 int retries = 3; 567 int ret; 568 569 cmd.cmdidx = MMC_CMD_SWITCH; 570 cmd.resp_type = MMC_RSP_R1b; 571 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 572 (index << 16) | 573 (value << 8); 574 575 do { 576 ret = mmc_send_cmd(mmc, &cmd, NULL); 577 578 if (!ret && send_status) 579 return mmc_poll_for_busy(mmc); 580 } while (--retries > 0 && ret); 581 582 return ret; 583 } 584 585 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value) 586 { 587 return __mmc_switch(mmc, set, index, value, true); 588 } 589 590 static int mmc_select_hs(struct mmc *mmc) 591 { 592 int ret; 593 594 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 595 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS); 596 597 if (!ret) 598 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 599 600 return ret; 601 } 602 603 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd) 604 { 605 u8 card_type; 606 u32 host_caps, avail_type = 0; 607 608 card_type = ext_csd[EXT_CSD_CARD_TYPE]; 609 host_caps = mmc->cfg->host_caps; 610 611 if ((host_caps & MMC_MODE_HS) && 612 (card_type & EXT_CSD_CARD_TYPE_26)) 613 avail_type |= EXT_CSD_CARD_TYPE_26; 614 615 if ((host_caps & MMC_MODE_HS) && 616 (card_type & EXT_CSD_CARD_TYPE_52)) 617 avail_type |= EXT_CSD_CARD_TYPE_52; 618 619 /* 620 * For the moment, u-boot doesn't support signal voltage 621 * switch, therefor we assume that host support ddr52 622 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and 623 * hs400 are the same). 624 */ 625 if ((host_caps & MMC_MODE_DDR_52MHz) && 626 (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)) 627 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V; 628 629 if ((host_caps & MMC_MODE_HS200) && 630 (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V)) 631 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V; 632 633 /* 634 * If host can support HS400, it means that host can also 635 * support HS200. 636 */ 637 if ((host_caps & MMC_MODE_HS400) && 638 (host_caps & MMC_MODE_8BIT) && 639 (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 640 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 641 EXT_CSD_CARD_TYPE_HS400_1_8V; 642 643 if ((host_caps & MMC_MODE_HS400ES) && 644 (host_caps & MMC_MODE_8BIT) && 645 ext_csd[EXT_CSD_STROBE_SUPPORT] && 646 (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 647 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 648 EXT_CSD_CARD_TYPE_HS400_1_8V | 649 EXT_CSD_CARD_TYPE_HS400ES; 650 651 return avail_type; 652 } 653 654 static int mmc_change_freq(struct mmc *mmc) 655 { 656 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 657 char cardtype; 658 u32 avail_type; 659 int err; 660 661 mmc->card_caps = 0; 662 663 if (mmc_host_is_spi(mmc)) 664 return 0; 665 666 /* Only version 4 supports high-speed */ 667 if (mmc->version < MMC_VERSION_4) 668 return 0; 669 670 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT; 671 672 err = mmc_send_ext_csd(mmc, ext_csd); 673 674 if (err) 675 return err; 676 677 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0xf; 678 avail_type = mmc_select_card_type(mmc, ext_csd); 679 680 if (avail_type & EXT_CSD_CARD_TYPE_HS) 681 err = mmc_select_hs(mmc); 682 else 683 err = -EINVAL; 684 685 if (err) 686 return err; 687 688 /* Now check to see that it worked */ 689 err = mmc_send_ext_csd(mmc, ext_csd); 690 691 if (err) 692 return err; 693 694 /* No high-speed support */ 695 if (!ext_csd[EXT_CSD_HS_TIMING]) 696 return 0; 697 698 /* High Speed is set, there are two types: 52MHz and 26MHz */ 699 if (cardtype & EXT_CSD_CARD_TYPE_52) { 700 if (cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V) 701 mmc->card_caps |= MMC_MODE_DDR_52MHz; 702 mmc->card_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS; 703 } else { 704 mmc->card_caps |= MMC_MODE_HS; 705 } 706 707 return 0; 708 } 709 710 static int mmc_set_capacity(struct mmc *mmc, int part_num) 711 { 712 switch (part_num) { 713 case 0: 714 mmc->capacity = mmc->capacity_user; 715 break; 716 case 1: 717 case 2: 718 mmc->capacity = mmc->capacity_boot; 719 break; 720 case 3: 721 mmc->capacity = mmc->capacity_rpmb; 722 break; 723 case 4: 724 case 5: 725 case 6: 726 case 7: 727 mmc->capacity = mmc->capacity_gp[part_num - 4]; 728 break; 729 default: 730 return -1; 731 } 732 733 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len); 734 735 return 0; 736 } 737 738 int mmc_switch_part(struct mmc *mmc, unsigned int part_num) 739 { 740 int ret; 741 742 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF, 743 (mmc->part_config & ~PART_ACCESS_MASK) 744 | (part_num & PART_ACCESS_MASK)); 745 746 /* 747 * Set the capacity if the switch succeeded or was intended 748 * to return to representing the raw device. 749 */ 750 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) { 751 ret = mmc_set_capacity(mmc, part_num); 752 mmc_get_blk_desc(mmc)->hwpart = part_num; 753 } 754 755 return ret; 756 } 757 758 int mmc_hwpart_config(struct mmc *mmc, 759 const struct mmc_hwpart_conf *conf, 760 enum mmc_hwpart_conf_mode mode) 761 { 762 u8 part_attrs = 0; 763 u32 enh_size_mult; 764 u32 enh_start_addr; 765 u32 gp_size_mult[4]; 766 u32 max_enh_size_mult; 767 u32 tot_enh_size_mult = 0; 768 u8 wr_rel_set; 769 int i, pidx, err; 770 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 771 772 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE) 773 return -EINVAL; 774 775 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) { 776 printf("eMMC >= 4.4 required for enhanced user data area\n"); 777 return -EMEDIUMTYPE; 778 } 779 780 if (!(mmc->part_support & PART_SUPPORT)) { 781 printf("Card does not support partitioning\n"); 782 return -EMEDIUMTYPE; 783 } 784 785 if (!mmc->hc_wp_grp_size) { 786 printf("Card does not define HC WP group size\n"); 787 return -EMEDIUMTYPE; 788 } 789 790 /* check partition alignment and total enhanced size */ 791 if (conf->user.enh_size) { 792 if (conf->user.enh_size % mmc->hc_wp_grp_size || 793 conf->user.enh_start % mmc->hc_wp_grp_size) { 794 printf("User data enhanced area not HC WP group " 795 "size aligned\n"); 796 return -EINVAL; 797 } 798 part_attrs |= EXT_CSD_ENH_USR; 799 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size; 800 if (mmc->high_capacity) { 801 enh_start_addr = conf->user.enh_start; 802 } else { 803 enh_start_addr = (conf->user.enh_start << 9); 804 } 805 } else { 806 enh_size_mult = 0; 807 enh_start_addr = 0; 808 } 809 tot_enh_size_mult += enh_size_mult; 810 811 for (pidx = 0; pidx < 4; pidx++) { 812 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) { 813 printf("GP%i partition not HC WP group size " 814 "aligned\n", pidx+1); 815 return -EINVAL; 816 } 817 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size; 818 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) { 819 part_attrs |= EXT_CSD_ENH_GP(pidx); 820 tot_enh_size_mult += gp_size_mult[pidx]; 821 } 822 } 823 824 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) { 825 printf("Card does not support enhanced attribute\n"); 826 return -EMEDIUMTYPE; 827 } 828 829 err = mmc_send_ext_csd(mmc, ext_csd); 830 if (err) 831 return err; 832 833 max_enh_size_mult = 834 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) + 835 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) + 836 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT]; 837 if (tot_enh_size_mult > max_enh_size_mult) { 838 printf("Total enhanced size exceeds maximum (%u > %u)\n", 839 tot_enh_size_mult, max_enh_size_mult); 840 return -EMEDIUMTYPE; 841 } 842 843 /* The default value of EXT_CSD_WR_REL_SET is device 844 * dependent, the values can only be changed if the 845 * EXT_CSD_HS_CTRL_REL bit is set. The values can be 846 * changed only once and before partitioning is completed. */ 847 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 848 if (conf->user.wr_rel_change) { 849 if (conf->user.wr_rel_set) 850 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR; 851 else 852 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR; 853 } 854 for (pidx = 0; pidx < 4; pidx++) { 855 if (conf->gp_part[pidx].wr_rel_change) { 856 if (conf->gp_part[pidx].wr_rel_set) 857 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx); 858 else 859 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx); 860 } 861 } 862 863 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] && 864 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) { 865 puts("Card does not support host controlled partition write " 866 "reliability settings\n"); 867 return -EMEDIUMTYPE; 868 } 869 870 if (ext_csd[EXT_CSD_PARTITION_SETTING] & 871 EXT_CSD_PARTITION_SETTING_COMPLETED) { 872 printf("Card already partitioned\n"); 873 return -EPERM; 874 } 875 876 if (mode == MMC_HWPART_CONF_CHECK) 877 return 0; 878 879 /* Partitioning requires high-capacity size definitions */ 880 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) { 881 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 882 EXT_CSD_ERASE_GROUP_DEF, 1); 883 884 if (err) 885 return err; 886 887 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 888 889 /* update erase group size to be high-capacity */ 890 mmc->erase_grp_size = 891 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 892 893 } 894 895 /* all OK, write the configuration */ 896 for (i = 0; i < 4; i++) { 897 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 898 EXT_CSD_ENH_START_ADDR+i, 899 (enh_start_addr >> (i*8)) & 0xFF); 900 if (err) 901 return err; 902 } 903 for (i = 0; i < 3; i++) { 904 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 905 EXT_CSD_ENH_SIZE_MULT+i, 906 (enh_size_mult >> (i*8)) & 0xFF); 907 if (err) 908 return err; 909 } 910 for (pidx = 0; pidx < 4; pidx++) { 911 for (i = 0; i < 3; i++) { 912 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 913 EXT_CSD_GP_SIZE_MULT+pidx*3+i, 914 (gp_size_mult[pidx] >> (i*8)) & 0xFF); 915 if (err) 916 return err; 917 } 918 } 919 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 920 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs); 921 if (err) 922 return err; 923 924 if (mode == MMC_HWPART_CONF_SET) 925 return 0; 926 927 /* The WR_REL_SET is a write-once register but shall be 928 * written before setting PART_SETTING_COMPLETED. As it is 929 * write-once we can only write it when completing the 930 * partitioning. */ 931 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) { 932 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 933 EXT_CSD_WR_REL_SET, wr_rel_set); 934 if (err) 935 return err; 936 } 937 938 /* Setting PART_SETTING_COMPLETED confirms the partition 939 * configuration but it only becomes effective after power 940 * cycle, so we do not adjust the partition related settings 941 * in the mmc struct. */ 942 943 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 944 EXT_CSD_PARTITION_SETTING, 945 EXT_CSD_PARTITION_SETTING_COMPLETED); 946 if (err) 947 return err; 948 949 return 0; 950 } 951 952 #if !CONFIG_IS_ENABLED(DM_MMC) 953 int mmc_getcd(struct mmc *mmc) 954 { 955 int cd; 956 957 cd = board_mmc_getcd(mmc); 958 959 if (cd < 0) { 960 if (mmc->cfg->ops->getcd) 961 cd = mmc->cfg->ops->getcd(mmc); 962 else 963 cd = 1; 964 } 965 966 return cd; 967 } 968 #endif 969 970 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp) 971 { 972 struct mmc_cmd cmd; 973 struct mmc_data data; 974 975 /* Switch the frequency */ 976 cmd.cmdidx = SD_CMD_SWITCH_FUNC; 977 cmd.resp_type = MMC_RSP_R1; 978 cmd.cmdarg = (mode << 31) | 0xffffff; 979 cmd.cmdarg &= ~(0xf << (group * 4)); 980 cmd.cmdarg |= value << (group * 4); 981 982 data.dest = (char *)resp; 983 data.blocksize = 64; 984 data.blocks = 1; 985 data.flags = MMC_DATA_READ; 986 987 return mmc_send_cmd(mmc, &cmd, &data); 988 } 989 990 991 static int sd_change_freq(struct mmc *mmc) 992 { 993 int err; 994 struct mmc_cmd cmd; 995 ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2); 996 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16); 997 struct mmc_data data; 998 int timeout; 999 1000 mmc->card_caps = 0; 1001 1002 if (mmc_host_is_spi(mmc)) 1003 return 0; 1004 1005 /* Read the SCR to find out if this card supports higher speeds */ 1006 cmd.cmdidx = MMC_CMD_APP_CMD; 1007 cmd.resp_type = MMC_RSP_R1; 1008 cmd.cmdarg = mmc->rca << 16; 1009 1010 err = mmc_send_cmd(mmc, &cmd, NULL); 1011 1012 if (err) 1013 return err; 1014 1015 cmd.cmdidx = SD_CMD_APP_SEND_SCR; 1016 cmd.resp_type = MMC_RSP_R1; 1017 cmd.cmdarg = 0; 1018 1019 timeout = 3; 1020 1021 retry_scr: 1022 data.dest = (char *)scr; 1023 data.blocksize = 8; 1024 data.blocks = 1; 1025 data.flags = MMC_DATA_READ; 1026 1027 err = mmc_send_cmd(mmc, &cmd, &data); 1028 1029 if (err) { 1030 if (timeout--) 1031 goto retry_scr; 1032 1033 return err; 1034 } 1035 1036 mmc->scr[0] = __be32_to_cpu(scr[0]); 1037 mmc->scr[1] = __be32_to_cpu(scr[1]); 1038 1039 switch ((mmc->scr[0] >> 24) & 0xf) { 1040 case 0: 1041 mmc->version = SD_VERSION_1_0; 1042 break; 1043 case 1: 1044 mmc->version = SD_VERSION_1_10; 1045 break; 1046 case 2: 1047 mmc->version = SD_VERSION_2; 1048 if ((mmc->scr[0] >> 15) & 0x1) 1049 mmc->version = SD_VERSION_3; 1050 break; 1051 default: 1052 mmc->version = SD_VERSION_1_0; 1053 break; 1054 } 1055 1056 if (mmc->scr[0] & SD_DATA_4BIT) 1057 mmc->card_caps |= MMC_MODE_4BIT; 1058 1059 /* Version 1.0 doesn't support switching */ 1060 if (mmc->version == SD_VERSION_1_0) 1061 return 0; 1062 1063 timeout = 4; 1064 while (timeout--) { 1065 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1, 1066 (u8 *)switch_status); 1067 1068 if (err) 1069 return err; 1070 1071 /* The high-speed function is busy. Try again */ 1072 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY)) 1073 break; 1074 } 1075 1076 /* If high-speed isn't supported, we return */ 1077 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)) 1078 return 0; 1079 1080 /* 1081 * If the host doesn't support SD_HIGHSPEED, do not switch card to 1082 * HIGHSPEED mode even if the card support SD_HIGHSPPED. 1083 * This can avoid furthur problem when the card runs in different 1084 * mode between the host. 1085 */ 1086 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) && 1087 (mmc->cfg->host_caps & MMC_MODE_HS))) 1088 return 0; 1089 1090 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status); 1091 1092 if (err) 1093 return err; 1094 1095 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000) 1096 mmc->card_caps |= MMC_MODE_HS; 1097 1098 return 0; 1099 } 1100 1101 static int sd_read_ssr(struct mmc *mmc) 1102 { 1103 int err, i; 1104 struct mmc_cmd cmd; 1105 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16); 1106 struct mmc_data data; 1107 int timeout = 3; 1108 unsigned int au, eo, et, es; 1109 1110 cmd.cmdidx = MMC_CMD_APP_CMD; 1111 cmd.resp_type = MMC_RSP_R1; 1112 cmd.cmdarg = mmc->rca << 16; 1113 1114 err = mmc_send_cmd(mmc, &cmd, NULL); 1115 if (err) 1116 return err; 1117 1118 cmd.cmdidx = SD_CMD_APP_SD_STATUS; 1119 cmd.resp_type = MMC_RSP_R1; 1120 cmd.cmdarg = 0; 1121 1122 retry_ssr: 1123 data.dest = (char *)ssr; 1124 data.blocksize = 64; 1125 data.blocks = 1; 1126 data.flags = MMC_DATA_READ; 1127 1128 err = mmc_send_cmd(mmc, &cmd, &data); 1129 if (err) { 1130 if (timeout--) 1131 goto retry_ssr; 1132 1133 return err; 1134 } 1135 1136 for (i = 0; i < 16; i++) 1137 ssr[i] = be32_to_cpu(ssr[i]); 1138 1139 au = (ssr[2] >> 12) & 0xF; 1140 if ((au <= 9) || (mmc->version == SD_VERSION_3)) { 1141 mmc->ssr.au = sd_au_size[au]; 1142 es = (ssr[3] >> 24) & 0xFF; 1143 es |= (ssr[2] & 0xFF) << 8; 1144 et = (ssr[3] >> 18) & 0x3F; 1145 if (es && et) { 1146 eo = (ssr[3] >> 16) & 0x3; 1147 mmc->ssr.erase_timeout = (et * 1000) / es; 1148 mmc->ssr.erase_offset = eo * 1000; 1149 } 1150 } else { 1151 debug("Invalid Allocation Unit Size.\n"); 1152 } 1153 1154 return 0; 1155 } 1156 1157 /* frequency bases */ 1158 /* divided by 10 to be nice to platforms without floating point */ 1159 static const int fbase[] = { 1160 10000, 1161 100000, 1162 1000000, 1163 10000000, 1164 }; 1165 1166 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice 1167 * to platforms without floating point. 1168 */ 1169 static const u8 multipliers[] = { 1170 0, /* reserved */ 1171 10, 1172 12, 1173 13, 1174 15, 1175 20, 1176 25, 1177 30, 1178 35, 1179 40, 1180 45, 1181 50, 1182 55, 1183 60, 1184 70, 1185 80, 1186 }; 1187 1188 #if !CONFIG_IS_ENABLED(DM_MMC) 1189 static void mmc_set_ios(struct mmc *mmc) 1190 { 1191 if (mmc->cfg->ops->set_ios) 1192 mmc->cfg->ops->set_ios(mmc); 1193 } 1194 1195 static bool mmc_card_busy(struct mmc *mmc) 1196 { 1197 if (!mmc->cfg->ops->card_busy) 1198 return -ENOSYS; 1199 1200 return mmc->cfg->ops->card_busy(mmc); 1201 } 1202 1203 static bool mmc_can_card_busy(struct mmc *) 1204 { 1205 return !!mmc->cfg->ops->card_busy; 1206 } 1207 #endif 1208 1209 void mmc_set_clock(struct mmc *mmc, uint clock) 1210 { 1211 if (clock > mmc->cfg->f_max) 1212 clock = mmc->cfg->f_max; 1213 1214 if (clock < mmc->cfg->f_min) 1215 clock = mmc->cfg->f_min; 1216 1217 mmc->clock = clock; 1218 1219 mmc_set_ios(mmc); 1220 } 1221 1222 static void mmc_set_bus_width(struct mmc *mmc, uint width) 1223 { 1224 mmc->bus_width = width; 1225 1226 mmc_set_ios(mmc); 1227 } 1228 1229 static int mmc_startup(struct mmc *mmc) 1230 { 1231 int err, i; 1232 uint mult, freq; 1233 u64 cmult, csize, capacity; 1234 struct mmc_cmd cmd; 1235 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1236 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 1237 bool has_parts = false; 1238 bool part_completed; 1239 struct blk_desc *bdesc; 1240 1241 #ifdef CONFIG_MMC_SPI_CRC_ON 1242 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */ 1243 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF; 1244 cmd.resp_type = MMC_RSP_R1; 1245 cmd.cmdarg = 1; 1246 err = mmc_send_cmd(mmc, &cmd, NULL); 1247 1248 if (err) 1249 return err; 1250 } 1251 #endif 1252 1253 /* Put the Card in Identify Mode */ 1254 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID : 1255 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */ 1256 cmd.resp_type = MMC_RSP_R2; 1257 cmd.cmdarg = 0; 1258 1259 err = mmc_send_cmd(mmc, &cmd, NULL); 1260 1261 if (err) 1262 return err; 1263 1264 memcpy(mmc->cid, cmd.response, 16); 1265 1266 /* 1267 * For MMC cards, set the Relative Address. 1268 * For SD cards, get the Relatvie Address. 1269 * This also puts the cards into Standby State 1270 */ 1271 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1272 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR; 1273 cmd.cmdarg = mmc->rca << 16; 1274 cmd.resp_type = MMC_RSP_R6; 1275 1276 err = mmc_send_cmd(mmc, &cmd, NULL); 1277 1278 if (err) 1279 return err; 1280 1281 if (IS_SD(mmc)) 1282 mmc->rca = (cmd.response[0] >> 16) & 0xffff; 1283 } 1284 1285 /* Get the Card-Specific Data */ 1286 cmd.cmdidx = MMC_CMD_SEND_CSD; 1287 cmd.resp_type = MMC_RSP_R2; 1288 cmd.cmdarg = mmc->rca << 16; 1289 1290 err = mmc_send_cmd(mmc, &cmd, NULL); 1291 1292 if (err) 1293 return err; 1294 1295 mmc->csd[0] = cmd.response[0]; 1296 mmc->csd[1] = cmd.response[1]; 1297 mmc->csd[2] = cmd.response[2]; 1298 mmc->csd[3] = cmd.response[3]; 1299 1300 if (mmc->version == MMC_VERSION_UNKNOWN) { 1301 int version = (cmd.response[0] >> 26) & 0xf; 1302 1303 switch (version) { 1304 case 0: 1305 mmc->version = MMC_VERSION_1_2; 1306 break; 1307 case 1: 1308 mmc->version = MMC_VERSION_1_4; 1309 break; 1310 case 2: 1311 mmc->version = MMC_VERSION_2_2; 1312 break; 1313 case 3: 1314 mmc->version = MMC_VERSION_3; 1315 break; 1316 case 4: 1317 mmc->version = MMC_VERSION_4; 1318 break; 1319 default: 1320 mmc->version = MMC_VERSION_1_2; 1321 break; 1322 } 1323 } 1324 1325 /* divide frequency by 10, since the mults are 10x bigger */ 1326 freq = fbase[(cmd.response[0] & 0x7)]; 1327 mult = multipliers[((cmd.response[0] >> 3) & 0xf)]; 1328 1329 mmc->tran_speed = freq * mult; 1330 1331 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1); 1332 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf); 1333 1334 if (IS_SD(mmc)) 1335 mmc->write_bl_len = mmc->read_bl_len; 1336 else 1337 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf); 1338 1339 if (mmc->high_capacity) { 1340 csize = (mmc->csd[1] & 0x3f) << 16 1341 | (mmc->csd[2] & 0xffff0000) >> 16; 1342 cmult = 8; 1343 } else { 1344 csize = (mmc->csd[1] & 0x3ff) << 2 1345 | (mmc->csd[2] & 0xc0000000) >> 30; 1346 cmult = (mmc->csd[2] & 0x00038000) >> 15; 1347 } 1348 1349 mmc->capacity_user = (csize + 1) << (cmult + 2); 1350 mmc->capacity_user *= mmc->read_bl_len; 1351 mmc->capacity_boot = 0; 1352 mmc->capacity_rpmb = 0; 1353 for (i = 0; i < 4; i++) 1354 mmc->capacity_gp[i] = 0; 1355 1356 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN) 1357 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1358 1359 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN) 1360 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1361 1362 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) { 1363 cmd.cmdidx = MMC_CMD_SET_DSR; 1364 cmd.cmdarg = (mmc->dsr & 0xffff) << 16; 1365 cmd.resp_type = MMC_RSP_NONE; 1366 if (mmc_send_cmd(mmc, &cmd, NULL)) 1367 printf("MMC: SET_DSR failed\n"); 1368 } 1369 1370 /* Select the card, and put it into Transfer Mode */ 1371 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1372 cmd.cmdidx = MMC_CMD_SELECT_CARD; 1373 cmd.resp_type = MMC_RSP_R1; 1374 cmd.cmdarg = mmc->rca << 16; 1375 err = mmc_send_cmd(mmc, &cmd, NULL); 1376 1377 if (err) 1378 return err; 1379 } 1380 1381 /* 1382 * For SD, its erase group is always one sector 1383 */ 1384 mmc->erase_grp_size = 1; 1385 mmc->part_config = MMCPART_NOAVAILABLE; 1386 if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) { 1387 /* check ext_csd version and capacity */ 1388 err = mmc_send_ext_csd(mmc, ext_csd); 1389 if (err) 1390 return err; 1391 if (ext_csd[EXT_CSD_REV] >= 2) { 1392 /* 1393 * According to the JEDEC Standard, the value of 1394 * ext_csd's capacity is valid if the value is more 1395 * than 2GB 1396 */ 1397 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0 1398 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 1399 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 1400 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 1401 capacity *= MMC_MAX_BLOCK_LEN; 1402 if ((capacity >> 20) > 2 * 1024) 1403 mmc->capacity_user = capacity; 1404 } 1405 1406 switch (ext_csd[EXT_CSD_REV]) { 1407 case 1: 1408 mmc->version = MMC_VERSION_4_1; 1409 break; 1410 case 2: 1411 mmc->version = MMC_VERSION_4_2; 1412 break; 1413 case 3: 1414 mmc->version = MMC_VERSION_4_3; 1415 break; 1416 case 5: 1417 mmc->version = MMC_VERSION_4_41; 1418 break; 1419 case 6: 1420 mmc->version = MMC_VERSION_4_5; 1421 break; 1422 case 7: 1423 mmc->version = MMC_VERSION_5_0; 1424 break; 1425 case 8: 1426 mmc->version = MMC_VERSION_5_1; 1427 break; 1428 } 1429 1430 /* The partition data may be non-zero but it is only 1431 * effective if PARTITION_SETTING_COMPLETED is set in 1432 * EXT_CSD, so ignore any data if this bit is not set, 1433 * except for enabling the high-capacity group size 1434 * definition (see below). */ 1435 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] & 1436 EXT_CSD_PARTITION_SETTING_COMPLETED); 1437 1438 /* store the partition info of emmc */ 1439 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT]; 1440 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) || 1441 ext_csd[EXT_CSD_BOOT_MULT]) 1442 mmc->part_config = ext_csd[EXT_CSD_PART_CONF]; 1443 if (part_completed && 1444 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT)) 1445 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE]; 1446 1447 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17; 1448 1449 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17; 1450 1451 for (i = 0; i < 4; i++) { 1452 int idx = EXT_CSD_GP_SIZE_MULT + i * 3; 1453 uint mult = (ext_csd[idx + 2] << 16) + 1454 (ext_csd[idx + 1] << 8) + ext_csd[idx]; 1455 if (mult) 1456 has_parts = true; 1457 if (!part_completed) 1458 continue; 1459 mmc->capacity_gp[i] = mult; 1460 mmc->capacity_gp[i] *= 1461 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1462 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1463 mmc->capacity_gp[i] <<= 19; 1464 } 1465 1466 if (part_completed) { 1467 mmc->enh_user_size = 1468 (ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) + 1469 (ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) + 1470 ext_csd[EXT_CSD_ENH_SIZE_MULT]; 1471 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1472 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1473 mmc->enh_user_size <<= 19; 1474 mmc->enh_user_start = 1475 (ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) + 1476 (ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) + 1477 (ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) + 1478 ext_csd[EXT_CSD_ENH_START_ADDR]; 1479 if (mmc->high_capacity) 1480 mmc->enh_user_start <<= 9; 1481 } 1482 1483 /* 1484 * Host needs to enable ERASE_GRP_DEF bit if device is 1485 * partitioned. This bit will be lost every time after a reset 1486 * or power off. This will affect erase size. 1487 */ 1488 if (part_completed) 1489 has_parts = true; 1490 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) && 1491 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB)) 1492 has_parts = true; 1493 if (has_parts) { 1494 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1495 EXT_CSD_ERASE_GROUP_DEF, 1); 1496 1497 if (err) 1498 return err; 1499 else 1500 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1501 } 1502 1503 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) { 1504 /* Read out group size from ext_csd */ 1505 mmc->erase_grp_size = 1506 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1507 /* 1508 * if high capacity and partition setting completed 1509 * SEC_COUNT is valid even if it is smaller than 2 GiB 1510 * JEDEC Standard JESD84-B45, 6.2.4 1511 */ 1512 if (mmc->high_capacity && part_completed) { 1513 capacity = (ext_csd[EXT_CSD_SEC_CNT]) | 1514 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) | 1515 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) | 1516 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24); 1517 capacity *= MMC_MAX_BLOCK_LEN; 1518 mmc->capacity_user = capacity; 1519 } 1520 } else { 1521 /* Calculate the group size from the csd value. */ 1522 int erase_gsz, erase_gmul; 1523 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10; 1524 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5; 1525 mmc->erase_grp_size = (erase_gsz + 1) 1526 * (erase_gmul + 1); 1527 } 1528 1529 mmc->hc_wp_grp_size = 1024 1530 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1531 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1532 1533 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1534 } 1535 1536 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart); 1537 if (err) 1538 return err; 1539 1540 if (IS_SD(mmc)) 1541 err = sd_change_freq(mmc); 1542 else 1543 err = mmc_change_freq(mmc); 1544 1545 if (err) 1546 return err; 1547 1548 /* Restrict card's capabilities by what the host can do */ 1549 mmc->card_caps &= mmc->cfg->host_caps; 1550 1551 if (IS_SD(mmc)) { 1552 if (mmc->card_caps & MMC_MODE_4BIT) { 1553 cmd.cmdidx = MMC_CMD_APP_CMD; 1554 cmd.resp_type = MMC_RSP_R1; 1555 cmd.cmdarg = mmc->rca << 16; 1556 1557 err = mmc_send_cmd(mmc, &cmd, NULL); 1558 if (err) 1559 return err; 1560 1561 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH; 1562 cmd.resp_type = MMC_RSP_R1; 1563 cmd.cmdarg = 2; 1564 err = mmc_send_cmd(mmc, &cmd, NULL); 1565 if (err) 1566 return err; 1567 1568 mmc_set_bus_width(mmc, 4); 1569 } 1570 1571 err = sd_read_ssr(mmc); 1572 if (err) 1573 return err; 1574 1575 if (mmc->card_caps & MMC_MODE_HS) 1576 mmc->tran_speed = 50000000; 1577 else 1578 mmc->tran_speed = 25000000; 1579 } else if (mmc->version >= MMC_VERSION_4) { 1580 /* Only version 4 of MMC supports wider bus widths */ 1581 int idx; 1582 1583 /* An array of possible bus widths in order of preference */ 1584 static unsigned ext_csd_bits[] = { 1585 EXT_CSD_DDR_BUS_WIDTH_8, 1586 EXT_CSD_DDR_BUS_WIDTH_4, 1587 EXT_CSD_BUS_WIDTH_8, 1588 EXT_CSD_BUS_WIDTH_4, 1589 EXT_CSD_BUS_WIDTH_1, 1590 }; 1591 1592 /* An array to map CSD bus widths to host cap bits */ 1593 static unsigned ext_to_hostcaps[] = { 1594 [EXT_CSD_DDR_BUS_WIDTH_4] = 1595 MMC_MODE_DDR_52MHz | MMC_MODE_4BIT, 1596 [EXT_CSD_DDR_BUS_WIDTH_8] = 1597 MMC_MODE_DDR_52MHz | MMC_MODE_8BIT, 1598 [EXT_CSD_BUS_WIDTH_4] = MMC_MODE_4BIT, 1599 [EXT_CSD_BUS_WIDTH_8] = MMC_MODE_8BIT, 1600 }; 1601 1602 /* An array to map chosen bus width to an integer */ 1603 static unsigned widths[] = { 1604 8, 4, 8, 4, 1, 1605 }; 1606 1607 for (idx=0; idx < ARRAY_SIZE(ext_csd_bits); idx++) { 1608 unsigned int extw = ext_csd_bits[idx]; 1609 unsigned int caps = ext_to_hostcaps[extw]; 1610 1611 /* 1612 * If the bus width is still not changed, 1613 * don't try to set the default again. 1614 * Otherwise, recover from switch attempts 1615 * by switching to 1-bit bus width. 1616 */ 1617 if (extw == EXT_CSD_BUS_WIDTH_1 && 1618 mmc->bus_width == 1) { 1619 err = 0; 1620 break; 1621 } 1622 1623 /* 1624 * Check to make sure the card and controller support 1625 * these capabilities 1626 */ 1627 if ((mmc->card_caps & caps) != caps) 1628 continue; 1629 1630 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1631 EXT_CSD_BUS_WIDTH, extw); 1632 1633 if (err) 1634 continue; 1635 1636 mmc->ddr_mode = (caps & MMC_MODE_DDR_52MHz) ? 1 : 0; 1637 mmc_set_bus_width(mmc, widths[idx]); 1638 1639 err = mmc_send_ext_csd(mmc, test_csd); 1640 1641 if (err) 1642 continue; 1643 1644 /* Only compare read only fields */ 1645 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] 1646 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] && 1647 ext_csd[EXT_CSD_HC_WP_GRP_SIZE] 1648 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] && 1649 ext_csd[EXT_CSD_REV] 1650 == test_csd[EXT_CSD_REV] && 1651 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1652 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] && 1653 memcmp(&ext_csd[EXT_CSD_SEC_CNT], 1654 &test_csd[EXT_CSD_SEC_CNT], 4) == 0) 1655 break; 1656 else 1657 err = -EBADMSG; 1658 } 1659 1660 if (err) 1661 return err; 1662 1663 if (mmc->card_caps & MMC_MODE_HS) { 1664 if (mmc->card_caps & MMC_MODE_HS_52MHz) 1665 mmc->tran_speed = 52000000; 1666 else 1667 mmc->tran_speed = 26000000; 1668 } 1669 } 1670 1671 mmc_set_clock(mmc, mmc->tran_speed); 1672 1673 /* Fix the block length for DDR mode */ 1674 if (mmc->ddr_mode) { 1675 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1676 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1677 } 1678 1679 /* fill in device description */ 1680 bdesc = mmc_get_blk_desc(mmc); 1681 bdesc->lun = 0; 1682 bdesc->hwpart = 0; 1683 bdesc->type = 0; 1684 bdesc->blksz = mmc->read_bl_len; 1685 bdesc->log2blksz = LOG2(bdesc->blksz); 1686 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1687 #if !defined(CONFIG_SPL_BUILD) || \ 1688 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \ 1689 !defined(CONFIG_USE_TINY_PRINTF)) 1690 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x", 1691 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), 1692 (mmc->cid[3] >> 16) & 0xffff); 1693 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, 1694 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, 1695 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, 1696 (mmc->cid[2] >> 24) & 0xff); 1697 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, 1698 (mmc->cid[2] >> 16) & 0xf); 1699 #else 1700 bdesc->vendor[0] = 0; 1701 bdesc->product[0] = 0; 1702 bdesc->revision[0] = 0; 1703 #endif 1704 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT) 1705 part_init(bdesc); 1706 #endif 1707 1708 return 0; 1709 } 1710 1711 static int mmc_send_if_cond(struct mmc *mmc) 1712 { 1713 struct mmc_cmd cmd; 1714 int err; 1715 1716 cmd.cmdidx = SD_CMD_SEND_IF_COND; 1717 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */ 1718 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa; 1719 cmd.resp_type = MMC_RSP_R7; 1720 1721 err = mmc_send_cmd(mmc, &cmd, NULL); 1722 1723 if (err) 1724 return err; 1725 1726 if ((cmd.response[0] & 0xff) != 0xaa) 1727 return -EOPNOTSUPP; 1728 else 1729 mmc->version = SD_VERSION_2; 1730 1731 return 0; 1732 } 1733 1734 #if !CONFIG_IS_ENABLED(DM_MMC) 1735 /* board-specific MMC power initializations. */ 1736 __weak void board_mmc_power_init(void) 1737 { 1738 } 1739 #endif 1740 1741 static int mmc_power_init(struct mmc *mmc) 1742 { 1743 #if CONFIG_IS_ENABLED(DM_MMC) 1744 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD) 1745 struct udevice *vmmc_supply; 1746 int ret; 1747 1748 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply", 1749 &vmmc_supply); 1750 if (ret) { 1751 debug("%s: No vmmc supply\n", mmc->dev->name); 1752 return 0; 1753 } 1754 1755 ret = regulator_set_enable(vmmc_supply, true); 1756 if (ret) { 1757 puts("Error enabling VMMC supply\n"); 1758 return ret; 1759 } 1760 #endif 1761 #else /* !CONFIG_DM_MMC */ 1762 /* 1763 * Driver model should use a regulator, as above, rather than calling 1764 * out to board code. 1765 */ 1766 board_mmc_power_init(); 1767 #endif 1768 return 0; 1769 } 1770 1771 int mmc_start_init(struct mmc *mmc) 1772 { 1773 bool no_card; 1774 int err; 1775 1776 /* we pretend there's no card when init is NULL */ 1777 no_card = mmc_getcd(mmc) == 0; 1778 #if !CONFIG_IS_ENABLED(DM_MMC) 1779 no_card = no_card || (mmc->cfg->ops->init == NULL); 1780 #endif 1781 if (no_card) { 1782 mmc->has_init = 0; 1783 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 1784 printf("MMC: no card present\n"); 1785 #endif 1786 return -ENOMEDIUM; 1787 } 1788 1789 if (mmc->has_init) 1790 return 0; 1791 1792 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 1793 mmc_adapter_card_type_ident(); 1794 #endif 1795 err = mmc_power_init(mmc); 1796 if (err) 1797 return err; 1798 1799 #if CONFIG_IS_ENABLED(DM_MMC) 1800 /* The device has already been probed ready for use */ 1801 #else 1802 /* made sure it's not NULL earlier */ 1803 err = mmc->cfg->ops->init(mmc); 1804 if (err) 1805 return err; 1806 #endif 1807 mmc->ddr_mode = 0; 1808 mmc_set_bus_width(mmc, 1); 1809 mmc_set_clock(mmc, 1); 1810 mmc_set_timing(mmc, MMC_TIMING_LEGACY); 1811 1812 /* Reset the Card */ 1813 err = mmc_go_idle(mmc); 1814 1815 if (err) 1816 return err; 1817 1818 /* The internal partition reset to user partition(0) at every CMD0*/ 1819 mmc_get_blk_desc(mmc)->hwpart = 0; 1820 1821 /* Test for SD version 2 */ 1822 err = mmc_send_if_cond(mmc); 1823 1824 /* Now try to get the SD card's operating condition */ 1825 err = sd_send_op_cond(mmc); 1826 1827 /* If the command timed out, we check for an MMC card */ 1828 if (err == -ETIMEDOUT) { 1829 err = mmc_send_op_cond(mmc); 1830 1831 if (err) { 1832 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 1833 printf("Card did not respond to voltage select!\n"); 1834 #endif 1835 return -EOPNOTSUPP; 1836 } 1837 } 1838 1839 if (!err) 1840 mmc->init_in_progress = 1; 1841 1842 return err; 1843 } 1844 1845 static int mmc_complete_init(struct mmc *mmc) 1846 { 1847 int err = 0; 1848 1849 mmc->init_in_progress = 0; 1850 if (mmc->op_cond_pending) 1851 err = mmc_complete_op_cond(mmc); 1852 1853 if (!err) 1854 err = mmc_startup(mmc); 1855 if (err) 1856 mmc->has_init = 0; 1857 else 1858 mmc->has_init = 1; 1859 return err; 1860 } 1861 1862 int mmc_init(struct mmc *mmc) 1863 { 1864 int err = 0; 1865 __maybe_unused unsigned start; 1866 #if CONFIG_IS_ENABLED(DM_MMC) 1867 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev); 1868 1869 upriv->mmc = mmc; 1870 #endif 1871 if (mmc->has_init) 1872 return 0; 1873 1874 start = get_timer(0); 1875 1876 if (!mmc->init_in_progress) 1877 err = mmc_start_init(mmc); 1878 1879 if (!err) 1880 err = mmc_complete_init(mmc); 1881 if (err) 1882 printf("%s: %d, time %lu\n", __func__, err, get_timer(start)); 1883 1884 return err; 1885 } 1886 1887 int mmc_set_dsr(struct mmc *mmc, u16 val) 1888 { 1889 mmc->dsr = val; 1890 return 0; 1891 } 1892 1893 /* CPU-specific MMC initializations */ 1894 __weak int cpu_mmc_init(bd_t *bis) 1895 { 1896 return -1; 1897 } 1898 1899 /* board-specific MMC initializations. */ 1900 __weak int board_mmc_init(bd_t *bis) 1901 { 1902 return -1; 1903 } 1904 1905 void mmc_set_preinit(struct mmc *mmc, int preinit) 1906 { 1907 mmc->preinit = preinit; 1908 } 1909 1910 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD) 1911 static int mmc_probe(bd_t *bis) 1912 { 1913 return 0; 1914 } 1915 #elif CONFIG_IS_ENABLED(DM_MMC) 1916 static int mmc_probe(bd_t *bis) 1917 { 1918 int ret, i; 1919 struct uclass *uc; 1920 struct udevice *dev; 1921 1922 ret = uclass_get(UCLASS_MMC, &uc); 1923 if (ret) 1924 return ret; 1925 1926 /* 1927 * Try to add them in sequence order. Really with driver model we 1928 * should allow holes, but the current MMC list does not allow that. 1929 * So if we request 0, 1, 3 we will get 0, 1, 2. 1930 */ 1931 for (i = 0; ; i++) { 1932 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev); 1933 if (ret == -ENODEV) 1934 break; 1935 } 1936 uclass_foreach_dev(dev, uc) { 1937 ret = device_probe(dev); 1938 if (ret) 1939 printf("%s - probe failed: %d\n", dev->name, ret); 1940 } 1941 1942 return 0; 1943 } 1944 #else 1945 static int mmc_probe(bd_t *bis) 1946 { 1947 if (board_mmc_init(bis) < 0) 1948 cpu_mmc_init(bis); 1949 1950 return 0; 1951 } 1952 #endif 1953 1954 int mmc_initialize(bd_t *bis) 1955 { 1956 static int initialized = 0; 1957 int ret; 1958 if (initialized) /* Avoid initializing mmc multiple times */ 1959 return 0; 1960 initialized = 1; 1961 1962 #if !CONFIG_IS_ENABLED(BLK) 1963 #if !CONFIG_IS_ENABLED(MMC_TINY) 1964 mmc_list_init(); 1965 #endif 1966 #endif 1967 ret = mmc_probe(bis); 1968 if (ret) 1969 return ret; 1970 1971 #ifndef CONFIG_SPL_BUILD 1972 print_mmc_devices(','); 1973 #endif 1974 1975 mmc_do_preinit(); 1976 return 0; 1977 } 1978 1979 #ifdef CONFIG_CMD_BKOPS_ENABLE 1980 int mmc_set_bkops_enable(struct mmc *mmc) 1981 { 1982 int err; 1983 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1984 1985 err = mmc_send_ext_csd(mmc, ext_csd); 1986 if (err) { 1987 puts("Could not get ext_csd register values\n"); 1988 return err; 1989 } 1990 1991 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) { 1992 puts("Background operations not supported on device\n"); 1993 return -EMEDIUMTYPE; 1994 } 1995 1996 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) { 1997 puts("Background operations already enabled\n"); 1998 return 0; 1999 } 2000 2001 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); 2002 if (err) { 2003 puts("Failed to enable manual background operations\n"); 2004 return err; 2005 } 2006 2007 puts("Enabled manual background operations\n"); 2008 2009 return 0; 2010 } 2011 #endif 2012