1 /* 2 * Copyright 2008, Freescale Semiconductor, Inc 3 * Andy Fleming 4 * 5 * Based vaguely on the Linux code 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10 #include <config.h> 11 #include <common.h> 12 #include <command.h> 13 #include <dm.h> 14 #include <dm/device-internal.h> 15 #include <errno.h> 16 #include <mmc.h> 17 #include <part.h> 18 #include <power/regulator.h> 19 #include <malloc.h> 20 #include <memalign.h> 21 #include <linux/list.h> 22 #include <div64.h> 23 #include "mmc_private.h" 24 25 static const unsigned int sd_au_size[] = { 26 0, SZ_16K / 512, SZ_32K / 512, 27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512, 28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512, 29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, 30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, 31 }; 32 33 #if CONFIG_IS_ENABLED(MMC_TINY) 34 static struct mmc mmc_static; 35 struct mmc *find_mmc_device(int dev_num) 36 { 37 return &mmc_static; 38 } 39 40 void mmc_do_preinit(void) 41 { 42 struct mmc *m = &mmc_static; 43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 44 mmc_set_preinit(m, 1); 45 #endif 46 if (m->preinit) 47 mmc_start_init(m); 48 } 49 50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc) 51 { 52 return &mmc->block_dev; 53 } 54 #endif 55 56 #if !CONFIG_IS_ENABLED(DM_MMC) 57 __weak int board_mmc_getwp(struct mmc *mmc) 58 { 59 return -1; 60 } 61 62 int mmc_getwp(struct mmc *mmc) 63 { 64 int wp; 65 66 wp = board_mmc_getwp(mmc); 67 68 if (wp < 0) { 69 if (mmc->cfg->ops->getwp) 70 wp = mmc->cfg->ops->getwp(mmc); 71 else 72 wp = 0; 73 } 74 75 return wp; 76 } 77 78 __weak int board_mmc_getcd(struct mmc *mmc) 79 { 80 return -1; 81 } 82 #endif 83 84 #ifdef CONFIG_MMC_TRACE 85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd) 86 { 87 printf("CMD_SEND:%d\n", cmd->cmdidx); 88 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg); 89 } 90 91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret) 92 { 93 int i; 94 u8 *ptr; 95 96 if (ret) { 97 printf("\t\tRET\t\t\t %d\n", ret); 98 } else { 99 switch (cmd->resp_type) { 100 case MMC_RSP_NONE: 101 printf("\t\tMMC_RSP_NONE\n"); 102 break; 103 case MMC_RSP_R1: 104 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n", 105 cmd->response[0]); 106 break; 107 case MMC_RSP_R1b: 108 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n", 109 cmd->response[0]); 110 break; 111 case MMC_RSP_R2: 112 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n", 113 cmd->response[0]); 114 printf("\t\t \t\t 0x%08X \n", 115 cmd->response[1]); 116 printf("\t\t \t\t 0x%08X \n", 117 cmd->response[2]); 118 printf("\t\t \t\t 0x%08X \n", 119 cmd->response[3]); 120 printf("\n"); 121 printf("\t\t\t\t\tDUMPING DATA\n"); 122 for (i = 0; i < 4; i++) { 123 int j; 124 printf("\t\t\t\t\t%03d - ", i*4); 125 ptr = (u8 *)&cmd->response[i]; 126 ptr += 3; 127 for (j = 0; j < 4; j++) 128 printf("%02X ", *ptr--); 129 printf("\n"); 130 } 131 break; 132 case MMC_RSP_R3: 133 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n", 134 cmd->response[0]); 135 break; 136 default: 137 printf("\t\tERROR MMC rsp not supported\n"); 138 break; 139 } 140 } 141 } 142 143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd) 144 { 145 int status; 146 147 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9; 148 printf("CURR STATE:%d\n", status); 149 } 150 #endif 151 152 #if !CONFIG_IS_ENABLED(DM_MMC) 153 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data) 154 { 155 int ret; 156 157 mmmc_trace_before_send(mmc, cmd); 158 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data); 159 mmmc_trace_after_send(mmc, cmd, ret); 160 161 return ret; 162 } 163 #endif 164 165 int mmc_send_status(struct mmc *mmc, int timeout) 166 { 167 struct mmc_cmd cmd; 168 int err, retries = 5; 169 170 cmd.cmdidx = MMC_CMD_SEND_STATUS; 171 cmd.resp_type = MMC_RSP_R1; 172 if (!mmc_host_is_spi(mmc)) 173 cmd.cmdarg = mmc->rca << 16; 174 175 while (1) { 176 err = mmc_send_cmd(mmc, &cmd, NULL); 177 if (!err) { 178 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) && 179 (cmd.response[0] & MMC_STATUS_CURR_STATE) != 180 MMC_STATE_PRG) 181 break; 182 else if (cmd.response[0] & MMC_STATUS_MASK) { 183 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 184 printf("Status Error: 0x%08X\n", 185 cmd.response[0]); 186 #endif 187 return -ECOMM; 188 } 189 } else if (--retries < 0) 190 return err; 191 192 if (timeout-- <= 0) 193 break; 194 195 udelay(1000); 196 } 197 198 mmc_trace_state(mmc, &cmd); 199 if (timeout <= 0) { 200 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 201 printf("Timeout waiting card ready\n"); 202 #endif 203 return -ETIMEDOUT; 204 } 205 206 return 0; 207 } 208 209 int mmc_set_blocklen(struct mmc *mmc, int len) 210 { 211 struct mmc_cmd cmd; 212 213 if (mmc->ddr_mode) 214 return 0; 215 216 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN; 217 cmd.resp_type = MMC_RSP_R1; 218 cmd.cmdarg = len; 219 220 return mmc_send_cmd(mmc, &cmd, NULL); 221 } 222 223 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start, 224 lbaint_t blkcnt) 225 { 226 struct mmc_cmd cmd; 227 struct mmc_data data; 228 229 if (blkcnt > 1) 230 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 231 else 232 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 233 234 if (mmc->high_capacity) 235 cmd.cmdarg = start; 236 else 237 cmd.cmdarg = start * mmc->read_bl_len; 238 239 cmd.resp_type = MMC_RSP_R1; 240 241 data.dest = dst; 242 data.blocks = blkcnt; 243 data.blocksize = mmc->read_bl_len; 244 data.flags = MMC_DATA_READ; 245 246 if (mmc_send_cmd(mmc, &cmd, &data)) 247 return 0; 248 249 if (blkcnt > 1) { 250 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 251 cmd.cmdarg = 0; 252 cmd.resp_type = MMC_RSP_R1b; 253 if (mmc_send_cmd(mmc, &cmd, NULL)) { 254 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 255 printf("mmc fail to send stop cmd\n"); 256 #endif 257 return 0; 258 } 259 } 260 261 return blkcnt; 262 } 263 264 #if CONFIG_IS_ENABLED(BLK) 265 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 266 #else 267 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 268 void *dst) 269 #endif 270 { 271 #if CONFIG_IS_ENABLED(BLK) 272 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 273 #endif 274 int dev_num = block_dev->devnum; 275 int err; 276 lbaint_t cur, blocks_todo = blkcnt; 277 278 if (blkcnt == 0) 279 return 0; 280 281 struct mmc *mmc = find_mmc_device(dev_num); 282 if (!mmc) 283 return 0; 284 285 if (CONFIG_IS_ENABLED(MMC_TINY)) 286 err = mmc_switch_part(mmc, block_dev->hwpart); 287 else 288 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 289 290 if (err < 0) 291 return 0; 292 293 if ((start + blkcnt) > block_dev->lba) { 294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 295 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 296 start + blkcnt, block_dev->lba); 297 #endif 298 return 0; 299 } 300 301 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 302 debug("%s: Failed to set blocklen\n", __func__); 303 return 0; 304 } 305 306 do { 307 cur = (blocks_todo > mmc->cfg->b_max) ? 308 mmc->cfg->b_max : blocks_todo; 309 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 310 debug("%s: Failed to read blocks\n", __func__); 311 return 0; 312 } 313 blocks_todo -= cur; 314 start += cur; 315 dst += cur * mmc->read_bl_len; 316 } while (blocks_todo > 0); 317 318 return blkcnt; 319 } 320 321 static int mmc_go_idle(struct mmc *mmc) 322 { 323 struct mmc_cmd cmd; 324 int err; 325 326 udelay(1000); 327 328 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE; 329 cmd.cmdarg = 0; 330 cmd.resp_type = MMC_RSP_NONE; 331 332 err = mmc_send_cmd(mmc, &cmd, NULL); 333 334 if (err) 335 return err; 336 337 udelay(2000); 338 339 return 0; 340 } 341 342 static int sd_send_op_cond(struct mmc *mmc) 343 { 344 int timeout = 1000; 345 int err; 346 struct mmc_cmd cmd; 347 348 while (1) { 349 cmd.cmdidx = MMC_CMD_APP_CMD; 350 cmd.resp_type = MMC_RSP_R1; 351 cmd.cmdarg = 0; 352 353 err = mmc_send_cmd(mmc, &cmd, NULL); 354 355 if (err) 356 return err; 357 358 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND; 359 cmd.resp_type = MMC_RSP_R3; 360 361 /* 362 * Most cards do not answer if some reserved bits 363 * in the ocr are set. However, Some controller 364 * can set bit 7 (reserved for low voltages), but 365 * how to manage low voltages SD card is not yet 366 * specified. 367 */ 368 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 : 369 (mmc->cfg->voltages & 0xff8000); 370 371 if (mmc->version == SD_VERSION_2) 372 cmd.cmdarg |= OCR_HCS; 373 374 err = mmc_send_cmd(mmc, &cmd, NULL); 375 376 if (err) 377 return err; 378 379 if (cmd.response[0] & OCR_BUSY) 380 break; 381 382 if (timeout-- <= 0) 383 return -EOPNOTSUPP; 384 385 udelay(1000); 386 } 387 388 if (mmc->version != SD_VERSION_2) 389 mmc->version = SD_VERSION_1_0; 390 391 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 392 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 393 cmd.resp_type = MMC_RSP_R3; 394 cmd.cmdarg = 0; 395 396 err = mmc_send_cmd(mmc, &cmd, NULL); 397 398 if (err) 399 return err; 400 } 401 402 mmc->ocr = cmd.response[0]; 403 404 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 405 mmc->rca = 0; 406 407 return 0; 408 } 409 410 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg) 411 { 412 struct mmc_cmd cmd; 413 int err; 414 415 cmd.cmdidx = MMC_CMD_SEND_OP_COND; 416 cmd.resp_type = MMC_RSP_R3; 417 cmd.cmdarg = 0; 418 if (use_arg && !mmc_host_is_spi(mmc)) 419 cmd.cmdarg = OCR_HCS | 420 (mmc->cfg->voltages & 421 (mmc->ocr & OCR_VOLTAGE_MASK)) | 422 (mmc->ocr & OCR_ACCESS_MODE); 423 424 err = mmc_send_cmd(mmc, &cmd, NULL); 425 if (err) 426 return err; 427 mmc->ocr = cmd.response[0]; 428 return 0; 429 } 430 431 static int mmc_send_op_cond(struct mmc *mmc) 432 { 433 int err, i; 434 435 /* Some cards seem to need this */ 436 mmc_go_idle(mmc); 437 438 /* Asking to the card its capabilities */ 439 for (i = 0; i < 2; i++) { 440 err = mmc_send_op_cond_iter(mmc, i != 0); 441 if (err) 442 return err; 443 444 /* exit if not busy (flag seems to be inverted) */ 445 if (mmc->ocr & OCR_BUSY) 446 break; 447 } 448 mmc->op_cond_pending = 1; 449 return 0; 450 } 451 452 static int mmc_complete_op_cond(struct mmc *mmc) 453 { 454 struct mmc_cmd cmd; 455 int timeout = 1000; 456 uint start; 457 int err; 458 459 mmc->op_cond_pending = 0; 460 if (!(mmc->ocr & OCR_BUSY)) { 461 /* Some cards seem to need this */ 462 mmc_go_idle(mmc); 463 464 start = get_timer(0); 465 while (1) { 466 err = mmc_send_op_cond_iter(mmc, 1); 467 if (err) 468 return err; 469 if (mmc->ocr & OCR_BUSY) 470 break; 471 if (get_timer(start) > timeout) 472 return -EOPNOTSUPP; 473 udelay(100); 474 } 475 } 476 477 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 478 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 479 cmd.resp_type = MMC_RSP_R3; 480 cmd.cmdarg = 0; 481 482 err = mmc_send_cmd(mmc, &cmd, NULL); 483 484 if (err) 485 return err; 486 487 mmc->ocr = cmd.response[0]; 488 } 489 490 mmc->version = MMC_VERSION_UNKNOWN; 491 492 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 493 mmc->rca = 1; 494 495 return 0; 496 } 497 498 499 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd) 500 { 501 struct mmc_cmd cmd; 502 struct mmc_data data; 503 int err; 504 505 /* Get the Card Status Register */ 506 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD; 507 cmd.resp_type = MMC_RSP_R1; 508 cmd.cmdarg = 0; 509 510 data.dest = (char *)ext_csd; 511 data.blocks = 1; 512 data.blocksize = MMC_MAX_BLOCK_LEN; 513 data.flags = MMC_DATA_READ; 514 515 err = mmc_send_cmd(mmc, &cmd, &data); 516 517 return err; 518 } 519 520 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value) 521 { 522 struct mmc_cmd cmd; 523 int timeout = 1000; 524 int retries = 3; 525 int ret; 526 527 cmd.cmdidx = MMC_CMD_SWITCH; 528 cmd.resp_type = MMC_RSP_R1b; 529 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 530 (index << 16) | 531 (value << 8); 532 533 while (retries > 0) { 534 ret = mmc_send_cmd(mmc, &cmd, NULL); 535 536 /* Waiting for the ready status */ 537 if (!ret) { 538 ret = mmc_send_status(mmc, timeout); 539 return ret; 540 } 541 542 retries--; 543 } 544 545 return ret; 546 547 } 548 549 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd) 550 { 551 u8 card_type; 552 u32 host_caps, avail_type = 0; 553 554 card_type = ext_csd[EXT_CSD_CARD_TYPE]; 555 host_caps = mmc->cfg->host_caps; 556 557 if ((host_caps & MMC_MODE_HS) && 558 (card_type & EXT_CSD_CARD_TYPE_26)) 559 avail_type |= EXT_CSD_CARD_TYPE_26; 560 561 if ((host_caps & MMC_MODE_HS) && 562 (card_type & EXT_CSD_CARD_TYPE_52)) 563 avail_type |= EXT_CSD_CARD_TYPE_52; 564 565 /* 566 * For the moment, u-boot doesn't support signal voltage 567 * switch, therefor we assume that host support ddr52 568 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and 569 * hs400 are the same). 570 */ 571 if ((host_caps & MMC_MODE_DDR_52MHz) && 572 (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)) 573 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V; 574 575 if ((host_caps & MMC_MODE_HS200) && 576 (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V)) 577 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V; 578 579 /* 580 * If host can support HS400, it means that host can also 581 * support HS200. 582 */ 583 if ((host_caps & MMC_MODE_HS400) && 584 (host_caps & MMC_MODE_8BIT) && 585 (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 586 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 587 EXT_CSD_CARD_TYPE_HS400_1_8V; 588 589 if ((host_caps & MMC_MODE_HS400ES) && 590 (host_caps & MMC_MODE_8BIT) && 591 ext_csd[EXT_CSD_STROBE_SUPPORT] && 592 (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 593 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 594 EXT_CSD_CARD_TYPE_HS400_1_8V | 595 EXT_CSD_CARD_TYPE_HS400ES; 596 597 return avail_type; 598 } 599 600 static int mmc_change_freq(struct mmc *mmc) 601 { 602 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 603 char cardtype; 604 u32 avail_type; 605 int err; 606 607 mmc->card_caps = 0; 608 609 if (mmc_host_is_spi(mmc)) 610 return 0; 611 612 /* Only version 4 supports high-speed */ 613 if (mmc->version < MMC_VERSION_4) 614 return 0; 615 616 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT; 617 618 err = mmc_send_ext_csd(mmc, ext_csd); 619 620 if (err) 621 return err; 622 623 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0xf; 624 avail_type = mmc_select_card_type(mmc, ext_csd); 625 626 if (avail_type & EXT_CSD_CARD_TYPE_HS) 627 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 628 EXT_CSD_HS_TIMING, 1); 629 else 630 err = -EINVAL; 631 632 if (err) 633 return err; 634 635 /* Now check to see that it worked */ 636 err = mmc_send_ext_csd(mmc, ext_csd); 637 638 if (err) 639 return err; 640 641 /* No high-speed support */ 642 if (!ext_csd[EXT_CSD_HS_TIMING]) 643 return 0; 644 645 /* High Speed is set, there are two types: 52MHz and 26MHz */ 646 if (cardtype & EXT_CSD_CARD_TYPE_52) { 647 if (cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V) 648 mmc->card_caps |= MMC_MODE_DDR_52MHz; 649 mmc->card_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS; 650 } else { 651 mmc->card_caps |= MMC_MODE_HS; 652 } 653 654 return 0; 655 } 656 657 static int mmc_set_capacity(struct mmc *mmc, int part_num) 658 { 659 switch (part_num) { 660 case 0: 661 mmc->capacity = mmc->capacity_user; 662 break; 663 case 1: 664 case 2: 665 mmc->capacity = mmc->capacity_boot; 666 break; 667 case 3: 668 mmc->capacity = mmc->capacity_rpmb; 669 break; 670 case 4: 671 case 5: 672 case 6: 673 case 7: 674 mmc->capacity = mmc->capacity_gp[part_num - 4]; 675 break; 676 default: 677 return -1; 678 } 679 680 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len); 681 682 return 0; 683 } 684 685 int mmc_switch_part(struct mmc *mmc, unsigned int part_num) 686 { 687 int ret; 688 689 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF, 690 (mmc->part_config & ~PART_ACCESS_MASK) 691 | (part_num & PART_ACCESS_MASK)); 692 693 /* 694 * Set the capacity if the switch succeeded or was intended 695 * to return to representing the raw device. 696 */ 697 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) { 698 ret = mmc_set_capacity(mmc, part_num); 699 mmc_get_blk_desc(mmc)->hwpart = part_num; 700 } 701 702 return ret; 703 } 704 705 int mmc_hwpart_config(struct mmc *mmc, 706 const struct mmc_hwpart_conf *conf, 707 enum mmc_hwpart_conf_mode mode) 708 { 709 u8 part_attrs = 0; 710 u32 enh_size_mult; 711 u32 enh_start_addr; 712 u32 gp_size_mult[4]; 713 u32 max_enh_size_mult; 714 u32 tot_enh_size_mult = 0; 715 u8 wr_rel_set; 716 int i, pidx, err; 717 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 718 719 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE) 720 return -EINVAL; 721 722 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) { 723 printf("eMMC >= 4.4 required for enhanced user data area\n"); 724 return -EMEDIUMTYPE; 725 } 726 727 if (!(mmc->part_support & PART_SUPPORT)) { 728 printf("Card does not support partitioning\n"); 729 return -EMEDIUMTYPE; 730 } 731 732 if (!mmc->hc_wp_grp_size) { 733 printf("Card does not define HC WP group size\n"); 734 return -EMEDIUMTYPE; 735 } 736 737 /* check partition alignment and total enhanced size */ 738 if (conf->user.enh_size) { 739 if (conf->user.enh_size % mmc->hc_wp_grp_size || 740 conf->user.enh_start % mmc->hc_wp_grp_size) { 741 printf("User data enhanced area not HC WP group " 742 "size aligned\n"); 743 return -EINVAL; 744 } 745 part_attrs |= EXT_CSD_ENH_USR; 746 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size; 747 if (mmc->high_capacity) { 748 enh_start_addr = conf->user.enh_start; 749 } else { 750 enh_start_addr = (conf->user.enh_start << 9); 751 } 752 } else { 753 enh_size_mult = 0; 754 enh_start_addr = 0; 755 } 756 tot_enh_size_mult += enh_size_mult; 757 758 for (pidx = 0; pidx < 4; pidx++) { 759 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) { 760 printf("GP%i partition not HC WP group size " 761 "aligned\n", pidx+1); 762 return -EINVAL; 763 } 764 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size; 765 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) { 766 part_attrs |= EXT_CSD_ENH_GP(pidx); 767 tot_enh_size_mult += gp_size_mult[pidx]; 768 } 769 } 770 771 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) { 772 printf("Card does not support enhanced attribute\n"); 773 return -EMEDIUMTYPE; 774 } 775 776 err = mmc_send_ext_csd(mmc, ext_csd); 777 if (err) 778 return err; 779 780 max_enh_size_mult = 781 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) + 782 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) + 783 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT]; 784 if (tot_enh_size_mult > max_enh_size_mult) { 785 printf("Total enhanced size exceeds maximum (%u > %u)\n", 786 tot_enh_size_mult, max_enh_size_mult); 787 return -EMEDIUMTYPE; 788 } 789 790 /* The default value of EXT_CSD_WR_REL_SET is device 791 * dependent, the values can only be changed if the 792 * EXT_CSD_HS_CTRL_REL bit is set. The values can be 793 * changed only once and before partitioning is completed. */ 794 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 795 if (conf->user.wr_rel_change) { 796 if (conf->user.wr_rel_set) 797 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR; 798 else 799 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR; 800 } 801 for (pidx = 0; pidx < 4; pidx++) { 802 if (conf->gp_part[pidx].wr_rel_change) { 803 if (conf->gp_part[pidx].wr_rel_set) 804 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx); 805 else 806 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx); 807 } 808 } 809 810 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] && 811 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) { 812 puts("Card does not support host controlled partition write " 813 "reliability settings\n"); 814 return -EMEDIUMTYPE; 815 } 816 817 if (ext_csd[EXT_CSD_PARTITION_SETTING] & 818 EXT_CSD_PARTITION_SETTING_COMPLETED) { 819 printf("Card already partitioned\n"); 820 return -EPERM; 821 } 822 823 if (mode == MMC_HWPART_CONF_CHECK) 824 return 0; 825 826 /* Partitioning requires high-capacity size definitions */ 827 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) { 828 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 829 EXT_CSD_ERASE_GROUP_DEF, 1); 830 831 if (err) 832 return err; 833 834 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 835 836 /* update erase group size to be high-capacity */ 837 mmc->erase_grp_size = 838 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 839 840 } 841 842 /* all OK, write the configuration */ 843 for (i = 0; i < 4; i++) { 844 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 845 EXT_CSD_ENH_START_ADDR+i, 846 (enh_start_addr >> (i*8)) & 0xFF); 847 if (err) 848 return err; 849 } 850 for (i = 0; i < 3; i++) { 851 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 852 EXT_CSD_ENH_SIZE_MULT+i, 853 (enh_size_mult >> (i*8)) & 0xFF); 854 if (err) 855 return err; 856 } 857 for (pidx = 0; pidx < 4; pidx++) { 858 for (i = 0; i < 3; i++) { 859 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 860 EXT_CSD_GP_SIZE_MULT+pidx*3+i, 861 (gp_size_mult[pidx] >> (i*8)) & 0xFF); 862 if (err) 863 return err; 864 } 865 } 866 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 867 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs); 868 if (err) 869 return err; 870 871 if (mode == MMC_HWPART_CONF_SET) 872 return 0; 873 874 /* The WR_REL_SET is a write-once register but shall be 875 * written before setting PART_SETTING_COMPLETED. As it is 876 * write-once we can only write it when completing the 877 * partitioning. */ 878 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) { 879 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 880 EXT_CSD_WR_REL_SET, wr_rel_set); 881 if (err) 882 return err; 883 } 884 885 /* Setting PART_SETTING_COMPLETED confirms the partition 886 * configuration but it only becomes effective after power 887 * cycle, so we do not adjust the partition related settings 888 * in the mmc struct. */ 889 890 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 891 EXT_CSD_PARTITION_SETTING, 892 EXT_CSD_PARTITION_SETTING_COMPLETED); 893 if (err) 894 return err; 895 896 return 0; 897 } 898 899 #if !CONFIG_IS_ENABLED(DM_MMC) 900 int mmc_getcd(struct mmc *mmc) 901 { 902 int cd; 903 904 cd = board_mmc_getcd(mmc); 905 906 if (cd < 0) { 907 if (mmc->cfg->ops->getcd) 908 cd = mmc->cfg->ops->getcd(mmc); 909 else 910 cd = 1; 911 } 912 913 return cd; 914 } 915 #endif 916 917 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp) 918 { 919 struct mmc_cmd cmd; 920 struct mmc_data data; 921 922 /* Switch the frequency */ 923 cmd.cmdidx = SD_CMD_SWITCH_FUNC; 924 cmd.resp_type = MMC_RSP_R1; 925 cmd.cmdarg = (mode << 31) | 0xffffff; 926 cmd.cmdarg &= ~(0xf << (group * 4)); 927 cmd.cmdarg |= value << (group * 4); 928 929 data.dest = (char *)resp; 930 data.blocksize = 64; 931 data.blocks = 1; 932 data.flags = MMC_DATA_READ; 933 934 return mmc_send_cmd(mmc, &cmd, &data); 935 } 936 937 938 static int sd_change_freq(struct mmc *mmc) 939 { 940 int err; 941 struct mmc_cmd cmd; 942 ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2); 943 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16); 944 struct mmc_data data; 945 int timeout; 946 947 mmc->card_caps = 0; 948 949 if (mmc_host_is_spi(mmc)) 950 return 0; 951 952 /* Read the SCR to find out if this card supports higher speeds */ 953 cmd.cmdidx = MMC_CMD_APP_CMD; 954 cmd.resp_type = MMC_RSP_R1; 955 cmd.cmdarg = mmc->rca << 16; 956 957 err = mmc_send_cmd(mmc, &cmd, NULL); 958 959 if (err) 960 return err; 961 962 cmd.cmdidx = SD_CMD_APP_SEND_SCR; 963 cmd.resp_type = MMC_RSP_R1; 964 cmd.cmdarg = 0; 965 966 timeout = 3; 967 968 retry_scr: 969 data.dest = (char *)scr; 970 data.blocksize = 8; 971 data.blocks = 1; 972 data.flags = MMC_DATA_READ; 973 974 err = mmc_send_cmd(mmc, &cmd, &data); 975 976 if (err) { 977 if (timeout--) 978 goto retry_scr; 979 980 return err; 981 } 982 983 mmc->scr[0] = __be32_to_cpu(scr[0]); 984 mmc->scr[1] = __be32_to_cpu(scr[1]); 985 986 switch ((mmc->scr[0] >> 24) & 0xf) { 987 case 0: 988 mmc->version = SD_VERSION_1_0; 989 break; 990 case 1: 991 mmc->version = SD_VERSION_1_10; 992 break; 993 case 2: 994 mmc->version = SD_VERSION_2; 995 if ((mmc->scr[0] >> 15) & 0x1) 996 mmc->version = SD_VERSION_3; 997 break; 998 default: 999 mmc->version = SD_VERSION_1_0; 1000 break; 1001 } 1002 1003 if (mmc->scr[0] & SD_DATA_4BIT) 1004 mmc->card_caps |= MMC_MODE_4BIT; 1005 1006 /* Version 1.0 doesn't support switching */ 1007 if (mmc->version == SD_VERSION_1_0) 1008 return 0; 1009 1010 timeout = 4; 1011 while (timeout--) { 1012 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1, 1013 (u8 *)switch_status); 1014 1015 if (err) 1016 return err; 1017 1018 /* The high-speed function is busy. Try again */ 1019 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY)) 1020 break; 1021 } 1022 1023 /* If high-speed isn't supported, we return */ 1024 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)) 1025 return 0; 1026 1027 /* 1028 * If the host doesn't support SD_HIGHSPEED, do not switch card to 1029 * HIGHSPEED mode even if the card support SD_HIGHSPPED. 1030 * This can avoid furthur problem when the card runs in different 1031 * mode between the host. 1032 */ 1033 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) && 1034 (mmc->cfg->host_caps & MMC_MODE_HS))) 1035 return 0; 1036 1037 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status); 1038 1039 if (err) 1040 return err; 1041 1042 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000) 1043 mmc->card_caps |= MMC_MODE_HS; 1044 1045 return 0; 1046 } 1047 1048 static int sd_read_ssr(struct mmc *mmc) 1049 { 1050 int err, i; 1051 struct mmc_cmd cmd; 1052 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16); 1053 struct mmc_data data; 1054 int timeout = 3; 1055 unsigned int au, eo, et, es; 1056 1057 cmd.cmdidx = MMC_CMD_APP_CMD; 1058 cmd.resp_type = MMC_RSP_R1; 1059 cmd.cmdarg = mmc->rca << 16; 1060 1061 err = mmc_send_cmd(mmc, &cmd, NULL); 1062 if (err) 1063 return err; 1064 1065 cmd.cmdidx = SD_CMD_APP_SD_STATUS; 1066 cmd.resp_type = MMC_RSP_R1; 1067 cmd.cmdarg = 0; 1068 1069 retry_ssr: 1070 data.dest = (char *)ssr; 1071 data.blocksize = 64; 1072 data.blocks = 1; 1073 data.flags = MMC_DATA_READ; 1074 1075 err = mmc_send_cmd(mmc, &cmd, &data); 1076 if (err) { 1077 if (timeout--) 1078 goto retry_ssr; 1079 1080 return err; 1081 } 1082 1083 for (i = 0; i < 16; i++) 1084 ssr[i] = be32_to_cpu(ssr[i]); 1085 1086 au = (ssr[2] >> 12) & 0xF; 1087 if ((au <= 9) || (mmc->version == SD_VERSION_3)) { 1088 mmc->ssr.au = sd_au_size[au]; 1089 es = (ssr[3] >> 24) & 0xFF; 1090 es |= (ssr[2] & 0xFF) << 8; 1091 et = (ssr[3] >> 18) & 0x3F; 1092 if (es && et) { 1093 eo = (ssr[3] >> 16) & 0x3; 1094 mmc->ssr.erase_timeout = (et * 1000) / es; 1095 mmc->ssr.erase_offset = eo * 1000; 1096 } 1097 } else { 1098 debug("Invalid Allocation Unit Size.\n"); 1099 } 1100 1101 return 0; 1102 } 1103 1104 /* frequency bases */ 1105 /* divided by 10 to be nice to platforms without floating point */ 1106 static const int fbase[] = { 1107 10000, 1108 100000, 1109 1000000, 1110 10000000, 1111 }; 1112 1113 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice 1114 * to platforms without floating point. 1115 */ 1116 static const u8 multipliers[] = { 1117 0, /* reserved */ 1118 10, 1119 12, 1120 13, 1121 15, 1122 20, 1123 25, 1124 30, 1125 35, 1126 40, 1127 45, 1128 50, 1129 55, 1130 60, 1131 70, 1132 80, 1133 }; 1134 1135 #if !CONFIG_IS_ENABLED(DM_MMC) 1136 static void mmc_set_ios(struct mmc *mmc) 1137 { 1138 if (mmc->cfg->ops->set_ios) 1139 mmc->cfg->ops->set_ios(mmc); 1140 } 1141 #endif 1142 1143 void mmc_set_clock(struct mmc *mmc, uint clock) 1144 { 1145 if (clock > mmc->cfg->f_max) 1146 clock = mmc->cfg->f_max; 1147 1148 if (clock < mmc->cfg->f_min) 1149 clock = mmc->cfg->f_min; 1150 1151 mmc->clock = clock; 1152 1153 mmc_set_ios(mmc); 1154 } 1155 1156 static void mmc_set_bus_width(struct mmc *mmc, uint width) 1157 { 1158 mmc->bus_width = width; 1159 1160 mmc_set_ios(mmc); 1161 } 1162 1163 static int mmc_startup(struct mmc *mmc) 1164 { 1165 int err, i; 1166 uint mult, freq; 1167 u64 cmult, csize, capacity; 1168 struct mmc_cmd cmd; 1169 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1170 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 1171 bool has_parts = false; 1172 bool part_completed; 1173 struct blk_desc *bdesc; 1174 1175 #ifdef CONFIG_MMC_SPI_CRC_ON 1176 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */ 1177 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF; 1178 cmd.resp_type = MMC_RSP_R1; 1179 cmd.cmdarg = 1; 1180 err = mmc_send_cmd(mmc, &cmd, NULL); 1181 1182 if (err) 1183 return err; 1184 } 1185 #endif 1186 1187 /* Put the Card in Identify Mode */ 1188 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID : 1189 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */ 1190 cmd.resp_type = MMC_RSP_R2; 1191 cmd.cmdarg = 0; 1192 1193 err = mmc_send_cmd(mmc, &cmd, NULL); 1194 1195 if (err) 1196 return err; 1197 1198 memcpy(mmc->cid, cmd.response, 16); 1199 1200 /* 1201 * For MMC cards, set the Relative Address. 1202 * For SD cards, get the Relatvie Address. 1203 * This also puts the cards into Standby State 1204 */ 1205 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1206 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR; 1207 cmd.cmdarg = mmc->rca << 16; 1208 cmd.resp_type = MMC_RSP_R6; 1209 1210 err = mmc_send_cmd(mmc, &cmd, NULL); 1211 1212 if (err) 1213 return err; 1214 1215 if (IS_SD(mmc)) 1216 mmc->rca = (cmd.response[0] >> 16) & 0xffff; 1217 } 1218 1219 /* Get the Card-Specific Data */ 1220 cmd.cmdidx = MMC_CMD_SEND_CSD; 1221 cmd.resp_type = MMC_RSP_R2; 1222 cmd.cmdarg = mmc->rca << 16; 1223 1224 err = mmc_send_cmd(mmc, &cmd, NULL); 1225 1226 if (err) 1227 return err; 1228 1229 mmc->csd[0] = cmd.response[0]; 1230 mmc->csd[1] = cmd.response[1]; 1231 mmc->csd[2] = cmd.response[2]; 1232 mmc->csd[3] = cmd.response[3]; 1233 1234 if (mmc->version == MMC_VERSION_UNKNOWN) { 1235 int version = (cmd.response[0] >> 26) & 0xf; 1236 1237 switch (version) { 1238 case 0: 1239 mmc->version = MMC_VERSION_1_2; 1240 break; 1241 case 1: 1242 mmc->version = MMC_VERSION_1_4; 1243 break; 1244 case 2: 1245 mmc->version = MMC_VERSION_2_2; 1246 break; 1247 case 3: 1248 mmc->version = MMC_VERSION_3; 1249 break; 1250 case 4: 1251 mmc->version = MMC_VERSION_4; 1252 break; 1253 default: 1254 mmc->version = MMC_VERSION_1_2; 1255 break; 1256 } 1257 } 1258 1259 /* divide frequency by 10, since the mults are 10x bigger */ 1260 freq = fbase[(cmd.response[0] & 0x7)]; 1261 mult = multipliers[((cmd.response[0] >> 3) & 0xf)]; 1262 1263 mmc->tran_speed = freq * mult; 1264 1265 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1); 1266 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf); 1267 1268 if (IS_SD(mmc)) 1269 mmc->write_bl_len = mmc->read_bl_len; 1270 else 1271 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf); 1272 1273 if (mmc->high_capacity) { 1274 csize = (mmc->csd[1] & 0x3f) << 16 1275 | (mmc->csd[2] & 0xffff0000) >> 16; 1276 cmult = 8; 1277 } else { 1278 csize = (mmc->csd[1] & 0x3ff) << 2 1279 | (mmc->csd[2] & 0xc0000000) >> 30; 1280 cmult = (mmc->csd[2] & 0x00038000) >> 15; 1281 } 1282 1283 mmc->capacity_user = (csize + 1) << (cmult + 2); 1284 mmc->capacity_user *= mmc->read_bl_len; 1285 mmc->capacity_boot = 0; 1286 mmc->capacity_rpmb = 0; 1287 for (i = 0; i < 4; i++) 1288 mmc->capacity_gp[i] = 0; 1289 1290 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN) 1291 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1292 1293 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN) 1294 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1295 1296 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) { 1297 cmd.cmdidx = MMC_CMD_SET_DSR; 1298 cmd.cmdarg = (mmc->dsr & 0xffff) << 16; 1299 cmd.resp_type = MMC_RSP_NONE; 1300 if (mmc_send_cmd(mmc, &cmd, NULL)) 1301 printf("MMC: SET_DSR failed\n"); 1302 } 1303 1304 /* Select the card, and put it into Transfer Mode */ 1305 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1306 cmd.cmdidx = MMC_CMD_SELECT_CARD; 1307 cmd.resp_type = MMC_RSP_R1; 1308 cmd.cmdarg = mmc->rca << 16; 1309 err = mmc_send_cmd(mmc, &cmd, NULL); 1310 1311 if (err) 1312 return err; 1313 } 1314 1315 /* 1316 * For SD, its erase group is always one sector 1317 */ 1318 mmc->erase_grp_size = 1; 1319 mmc->part_config = MMCPART_NOAVAILABLE; 1320 if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) { 1321 /* check ext_csd version and capacity */ 1322 err = mmc_send_ext_csd(mmc, ext_csd); 1323 if (err) 1324 return err; 1325 if (ext_csd[EXT_CSD_REV] >= 2) { 1326 /* 1327 * According to the JEDEC Standard, the value of 1328 * ext_csd's capacity is valid if the value is more 1329 * than 2GB 1330 */ 1331 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0 1332 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 1333 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 1334 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 1335 capacity *= MMC_MAX_BLOCK_LEN; 1336 if ((capacity >> 20) > 2 * 1024) 1337 mmc->capacity_user = capacity; 1338 } 1339 1340 switch (ext_csd[EXT_CSD_REV]) { 1341 case 1: 1342 mmc->version = MMC_VERSION_4_1; 1343 break; 1344 case 2: 1345 mmc->version = MMC_VERSION_4_2; 1346 break; 1347 case 3: 1348 mmc->version = MMC_VERSION_4_3; 1349 break; 1350 case 5: 1351 mmc->version = MMC_VERSION_4_41; 1352 break; 1353 case 6: 1354 mmc->version = MMC_VERSION_4_5; 1355 break; 1356 case 7: 1357 mmc->version = MMC_VERSION_5_0; 1358 break; 1359 case 8: 1360 mmc->version = MMC_VERSION_5_1; 1361 break; 1362 } 1363 1364 /* The partition data may be non-zero but it is only 1365 * effective if PARTITION_SETTING_COMPLETED is set in 1366 * EXT_CSD, so ignore any data if this bit is not set, 1367 * except for enabling the high-capacity group size 1368 * definition (see below). */ 1369 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] & 1370 EXT_CSD_PARTITION_SETTING_COMPLETED); 1371 1372 /* store the partition info of emmc */ 1373 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT]; 1374 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) || 1375 ext_csd[EXT_CSD_BOOT_MULT]) 1376 mmc->part_config = ext_csd[EXT_CSD_PART_CONF]; 1377 if (part_completed && 1378 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT)) 1379 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE]; 1380 1381 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17; 1382 1383 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17; 1384 1385 for (i = 0; i < 4; i++) { 1386 int idx = EXT_CSD_GP_SIZE_MULT + i * 3; 1387 uint mult = (ext_csd[idx + 2] << 16) + 1388 (ext_csd[idx + 1] << 8) + ext_csd[idx]; 1389 if (mult) 1390 has_parts = true; 1391 if (!part_completed) 1392 continue; 1393 mmc->capacity_gp[i] = mult; 1394 mmc->capacity_gp[i] *= 1395 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1396 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1397 mmc->capacity_gp[i] <<= 19; 1398 } 1399 1400 if (part_completed) { 1401 mmc->enh_user_size = 1402 (ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) + 1403 (ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) + 1404 ext_csd[EXT_CSD_ENH_SIZE_MULT]; 1405 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1406 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1407 mmc->enh_user_size <<= 19; 1408 mmc->enh_user_start = 1409 (ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) + 1410 (ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) + 1411 (ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) + 1412 ext_csd[EXT_CSD_ENH_START_ADDR]; 1413 if (mmc->high_capacity) 1414 mmc->enh_user_start <<= 9; 1415 } 1416 1417 /* 1418 * Host needs to enable ERASE_GRP_DEF bit if device is 1419 * partitioned. This bit will be lost every time after a reset 1420 * or power off. This will affect erase size. 1421 */ 1422 if (part_completed) 1423 has_parts = true; 1424 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) && 1425 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB)) 1426 has_parts = true; 1427 if (has_parts) { 1428 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1429 EXT_CSD_ERASE_GROUP_DEF, 1); 1430 1431 if (err) 1432 return err; 1433 else 1434 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1435 } 1436 1437 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) { 1438 /* Read out group size from ext_csd */ 1439 mmc->erase_grp_size = 1440 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1441 /* 1442 * if high capacity and partition setting completed 1443 * SEC_COUNT is valid even if it is smaller than 2 GiB 1444 * JEDEC Standard JESD84-B45, 6.2.4 1445 */ 1446 if (mmc->high_capacity && part_completed) { 1447 capacity = (ext_csd[EXT_CSD_SEC_CNT]) | 1448 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) | 1449 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) | 1450 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24); 1451 capacity *= MMC_MAX_BLOCK_LEN; 1452 mmc->capacity_user = capacity; 1453 } 1454 } else { 1455 /* Calculate the group size from the csd value. */ 1456 int erase_gsz, erase_gmul; 1457 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10; 1458 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5; 1459 mmc->erase_grp_size = (erase_gsz + 1) 1460 * (erase_gmul + 1); 1461 } 1462 1463 mmc->hc_wp_grp_size = 1024 1464 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1465 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1466 1467 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1468 } 1469 1470 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart); 1471 if (err) 1472 return err; 1473 1474 if (IS_SD(mmc)) 1475 err = sd_change_freq(mmc); 1476 else 1477 err = mmc_change_freq(mmc); 1478 1479 if (err) 1480 return err; 1481 1482 /* Restrict card's capabilities by what the host can do */ 1483 mmc->card_caps &= mmc->cfg->host_caps; 1484 1485 if (IS_SD(mmc)) { 1486 if (mmc->card_caps & MMC_MODE_4BIT) { 1487 cmd.cmdidx = MMC_CMD_APP_CMD; 1488 cmd.resp_type = MMC_RSP_R1; 1489 cmd.cmdarg = mmc->rca << 16; 1490 1491 err = mmc_send_cmd(mmc, &cmd, NULL); 1492 if (err) 1493 return err; 1494 1495 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH; 1496 cmd.resp_type = MMC_RSP_R1; 1497 cmd.cmdarg = 2; 1498 err = mmc_send_cmd(mmc, &cmd, NULL); 1499 if (err) 1500 return err; 1501 1502 mmc_set_bus_width(mmc, 4); 1503 } 1504 1505 err = sd_read_ssr(mmc); 1506 if (err) 1507 return err; 1508 1509 if (mmc->card_caps & MMC_MODE_HS) 1510 mmc->tran_speed = 50000000; 1511 else 1512 mmc->tran_speed = 25000000; 1513 } else if (mmc->version >= MMC_VERSION_4) { 1514 /* Only version 4 of MMC supports wider bus widths */ 1515 int idx; 1516 1517 /* An array of possible bus widths in order of preference */ 1518 static unsigned ext_csd_bits[] = { 1519 EXT_CSD_DDR_BUS_WIDTH_8, 1520 EXT_CSD_DDR_BUS_WIDTH_4, 1521 EXT_CSD_BUS_WIDTH_8, 1522 EXT_CSD_BUS_WIDTH_4, 1523 EXT_CSD_BUS_WIDTH_1, 1524 }; 1525 1526 /* An array to map CSD bus widths to host cap bits */ 1527 static unsigned ext_to_hostcaps[] = { 1528 [EXT_CSD_DDR_BUS_WIDTH_4] = 1529 MMC_MODE_DDR_52MHz | MMC_MODE_4BIT, 1530 [EXT_CSD_DDR_BUS_WIDTH_8] = 1531 MMC_MODE_DDR_52MHz | MMC_MODE_8BIT, 1532 [EXT_CSD_BUS_WIDTH_4] = MMC_MODE_4BIT, 1533 [EXT_CSD_BUS_WIDTH_8] = MMC_MODE_8BIT, 1534 }; 1535 1536 /* An array to map chosen bus width to an integer */ 1537 static unsigned widths[] = { 1538 8, 4, 8, 4, 1, 1539 }; 1540 1541 for (idx=0; idx < ARRAY_SIZE(ext_csd_bits); idx++) { 1542 unsigned int extw = ext_csd_bits[idx]; 1543 unsigned int caps = ext_to_hostcaps[extw]; 1544 1545 /* 1546 * If the bus width is still not changed, 1547 * don't try to set the default again. 1548 * Otherwise, recover from switch attempts 1549 * by switching to 1-bit bus width. 1550 */ 1551 if (extw == EXT_CSD_BUS_WIDTH_1 && 1552 mmc->bus_width == 1) { 1553 err = 0; 1554 break; 1555 } 1556 1557 /* 1558 * Check to make sure the card and controller support 1559 * these capabilities 1560 */ 1561 if ((mmc->card_caps & caps) != caps) 1562 continue; 1563 1564 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1565 EXT_CSD_BUS_WIDTH, extw); 1566 1567 if (err) 1568 continue; 1569 1570 mmc->ddr_mode = (caps & MMC_MODE_DDR_52MHz) ? 1 : 0; 1571 mmc_set_bus_width(mmc, widths[idx]); 1572 1573 err = mmc_send_ext_csd(mmc, test_csd); 1574 1575 if (err) 1576 continue; 1577 1578 /* Only compare read only fields */ 1579 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] 1580 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] && 1581 ext_csd[EXT_CSD_HC_WP_GRP_SIZE] 1582 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] && 1583 ext_csd[EXT_CSD_REV] 1584 == test_csd[EXT_CSD_REV] && 1585 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1586 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] && 1587 memcmp(&ext_csd[EXT_CSD_SEC_CNT], 1588 &test_csd[EXT_CSD_SEC_CNT], 4) == 0) 1589 break; 1590 else 1591 err = -EBADMSG; 1592 } 1593 1594 if (err) 1595 return err; 1596 1597 if (mmc->card_caps & MMC_MODE_HS) { 1598 if (mmc->card_caps & MMC_MODE_HS_52MHz) 1599 mmc->tran_speed = 52000000; 1600 else 1601 mmc->tran_speed = 26000000; 1602 } 1603 } 1604 1605 mmc_set_clock(mmc, mmc->tran_speed); 1606 1607 /* Fix the block length for DDR mode */ 1608 if (mmc->ddr_mode) { 1609 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1610 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1611 } 1612 1613 /* fill in device description */ 1614 bdesc = mmc_get_blk_desc(mmc); 1615 bdesc->lun = 0; 1616 bdesc->hwpart = 0; 1617 bdesc->type = 0; 1618 bdesc->blksz = mmc->read_bl_len; 1619 bdesc->log2blksz = LOG2(bdesc->blksz); 1620 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1621 #if !defined(CONFIG_SPL_BUILD) || \ 1622 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \ 1623 !defined(CONFIG_USE_TINY_PRINTF)) 1624 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x", 1625 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), 1626 (mmc->cid[3] >> 16) & 0xffff); 1627 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, 1628 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, 1629 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, 1630 (mmc->cid[2] >> 24) & 0xff); 1631 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, 1632 (mmc->cid[2] >> 16) & 0xf); 1633 #else 1634 bdesc->vendor[0] = 0; 1635 bdesc->product[0] = 0; 1636 bdesc->revision[0] = 0; 1637 #endif 1638 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT) 1639 part_init(bdesc); 1640 #endif 1641 1642 return 0; 1643 } 1644 1645 static int mmc_send_if_cond(struct mmc *mmc) 1646 { 1647 struct mmc_cmd cmd; 1648 int err; 1649 1650 cmd.cmdidx = SD_CMD_SEND_IF_COND; 1651 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */ 1652 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa; 1653 cmd.resp_type = MMC_RSP_R7; 1654 1655 err = mmc_send_cmd(mmc, &cmd, NULL); 1656 1657 if (err) 1658 return err; 1659 1660 if ((cmd.response[0] & 0xff) != 0xaa) 1661 return -EOPNOTSUPP; 1662 else 1663 mmc->version = SD_VERSION_2; 1664 1665 return 0; 1666 } 1667 1668 #if !CONFIG_IS_ENABLED(DM_MMC) 1669 /* board-specific MMC power initializations. */ 1670 __weak void board_mmc_power_init(void) 1671 { 1672 } 1673 #endif 1674 1675 static int mmc_power_init(struct mmc *mmc) 1676 { 1677 #if CONFIG_IS_ENABLED(DM_MMC) 1678 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD) 1679 struct udevice *vmmc_supply; 1680 int ret; 1681 1682 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply", 1683 &vmmc_supply); 1684 if (ret) { 1685 debug("%s: No vmmc supply\n", mmc->dev->name); 1686 return 0; 1687 } 1688 1689 ret = regulator_set_enable(vmmc_supply, true); 1690 if (ret) { 1691 puts("Error enabling VMMC supply\n"); 1692 return ret; 1693 } 1694 #endif 1695 #else /* !CONFIG_DM_MMC */ 1696 /* 1697 * Driver model should use a regulator, as above, rather than calling 1698 * out to board code. 1699 */ 1700 board_mmc_power_init(); 1701 #endif 1702 return 0; 1703 } 1704 1705 int mmc_start_init(struct mmc *mmc) 1706 { 1707 bool no_card; 1708 int err; 1709 1710 /* we pretend there's no card when init is NULL */ 1711 no_card = mmc_getcd(mmc) == 0; 1712 #if !CONFIG_IS_ENABLED(DM_MMC) 1713 no_card = no_card || (mmc->cfg->ops->init == NULL); 1714 #endif 1715 if (no_card) { 1716 mmc->has_init = 0; 1717 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 1718 printf("MMC: no card present\n"); 1719 #endif 1720 return -ENOMEDIUM; 1721 } 1722 1723 if (mmc->has_init) 1724 return 0; 1725 1726 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 1727 mmc_adapter_card_type_ident(); 1728 #endif 1729 err = mmc_power_init(mmc); 1730 if (err) 1731 return err; 1732 1733 #if CONFIG_IS_ENABLED(DM_MMC) 1734 /* The device has already been probed ready for use */ 1735 #else 1736 /* made sure it's not NULL earlier */ 1737 err = mmc->cfg->ops->init(mmc); 1738 if (err) 1739 return err; 1740 #endif 1741 mmc->ddr_mode = 0; 1742 mmc_set_bus_width(mmc, 1); 1743 mmc_set_clock(mmc, 1); 1744 1745 /* Reset the Card */ 1746 err = mmc_go_idle(mmc); 1747 1748 if (err) 1749 return err; 1750 1751 /* The internal partition reset to user partition(0) at every CMD0*/ 1752 mmc_get_blk_desc(mmc)->hwpart = 0; 1753 1754 /* Test for SD version 2 */ 1755 err = mmc_send_if_cond(mmc); 1756 1757 /* Now try to get the SD card's operating condition */ 1758 err = sd_send_op_cond(mmc); 1759 1760 /* If the command timed out, we check for an MMC card */ 1761 if (err == -ETIMEDOUT) { 1762 err = mmc_send_op_cond(mmc); 1763 1764 if (err) { 1765 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 1766 printf("Card did not respond to voltage select!\n"); 1767 #endif 1768 return -EOPNOTSUPP; 1769 } 1770 } 1771 1772 if (!err) 1773 mmc->init_in_progress = 1; 1774 1775 return err; 1776 } 1777 1778 static int mmc_complete_init(struct mmc *mmc) 1779 { 1780 int err = 0; 1781 1782 mmc->init_in_progress = 0; 1783 if (mmc->op_cond_pending) 1784 err = mmc_complete_op_cond(mmc); 1785 1786 if (!err) 1787 err = mmc_startup(mmc); 1788 if (err) 1789 mmc->has_init = 0; 1790 else 1791 mmc->has_init = 1; 1792 return err; 1793 } 1794 1795 int mmc_init(struct mmc *mmc) 1796 { 1797 int err = 0; 1798 __maybe_unused unsigned start; 1799 #if CONFIG_IS_ENABLED(DM_MMC) 1800 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev); 1801 1802 upriv->mmc = mmc; 1803 #endif 1804 if (mmc->has_init) 1805 return 0; 1806 1807 start = get_timer(0); 1808 1809 if (!mmc->init_in_progress) 1810 err = mmc_start_init(mmc); 1811 1812 if (!err) 1813 err = mmc_complete_init(mmc); 1814 if (err) 1815 printf("%s: %d, time %lu\n", __func__, err, get_timer(start)); 1816 1817 return err; 1818 } 1819 1820 int mmc_set_dsr(struct mmc *mmc, u16 val) 1821 { 1822 mmc->dsr = val; 1823 return 0; 1824 } 1825 1826 /* CPU-specific MMC initializations */ 1827 __weak int cpu_mmc_init(bd_t *bis) 1828 { 1829 return -1; 1830 } 1831 1832 /* board-specific MMC initializations. */ 1833 __weak int board_mmc_init(bd_t *bis) 1834 { 1835 return -1; 1836 } 1837 1838 void mmc_set_preinit(struct mmc *mmc, int preinit) 1839 { 1840 mmc->preinit = preinit; 1841 } 1842 1843 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD) 1844 static int mmc_probe(bd_t *bis) 1845 { 1846 return 0; 1847 } 1848 #elif CONFIG_IS_ENABLED(DM_MMC) 1849 static int mmc_probe(bd_t *bis) 1850 { 1851 int ret, i; 1852 struct uclass *uc; 1853 struct udevice *dev; 1854 1855 ret = uclass_get(UCLASS_MMC, &uc); 1856 if (ret) 1857 return ret; 1858 1859 /* 1860 * Try to add them in sequence order. Really with driver model we 1861 * should allow holes, but the current MMC list does not allow that. 1862 * So if we request 0, 1, 3 we will get 0, 1, 2. 1863 */ 1864 for (i = 0; ; i++) { 1865 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev); 1866 if (ret == -ENODEV) 1867 break; 1868 } 1869 uclass_foreach_dev(dev, uc) { 1870 ret = device_probe(dev); 1871 if (ret) 1872 printf("%s - probe failed: %d\n", dev->name, ret); 1873 } 1874 1875 return 0; 1876 } 1877 #else 1878 static int mmc_probe(bd_t *bis) 1879 { 1880 if (board_mmc_init(bis) < 0) 1881 cpu_mmc_init(bis); 1882 1883 return 0; 1884 } 1885 #endif 1886 1887 int mmc_initialize(bd_t *bis) 1888 { 1889 static int initialized = 0; 1890 int ret; 1891 if (initialized) /* Avoid initializing mmc multiple times */ 1892 return 0; 1893 initialized = 1; 1894 1895 #if !CONFIG_IS_ENABLED(BLK) 1896 #if !CONFIG_IS_ENABLED(MMC_TINY) 1897 mmc_list_init(); 1898 #endif 1899 #endif 1900 ret = mmc_probe(bis); 1901 if (ret) 1902 return ret; 1903 1904 #ifndef CONFIG_SPL_BUILD 1905 print_mmc_devices(','); 1906 #endif 1907 1908 mmc_do_preinit(); 1909 return 0; 1910 } 1911 1912 #ifdef CONFIG_CMD_BKOPS_ENABLE 1913 int mmc_set_bkops_enable(struct mmc *mmc) 1914 { 1915 int err; 1916 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1917 1918 err = mmc_send_ext_csd(mmc, ext_csd); 1919 if (err) { 1920 puts("Could not get ext_csd register values\n"); 1921 return err; 1922 } 1923 1924 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) { 1925 puts("Background operations not supported on device\n"); 1926 return -EMEDIUMTYPE; 1927 } 1928 1929 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) { 1930 puts("Background operations already enabled\n"); 1931 return 0; 1932 } 1933 1934 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); 1935 if (err) { 1936 puts("Failed to enable manual background operations\n"); 1937 return err; 1938 } 1939 1940 puts("Enabled manual background operations\n"); 1941 1942 return 0; 1943 } 1944 #endif 1945