1 /* 2 * Copyright 2008, Freescale Semiconductor, Inc 3 * Andy Fleming 4 * 5 * Based vaguely on the Linux code 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10 #include <config.h> 11 #include <common.h> 12 #include <command.h> 13 #include <dm.h> 14 #include <dm/device-internal.h> 15 #include <errno.h> 16 #include <mmc.h> 17 #include <part.h> 18 #include <power/regulator.h> 19 #include <malloc.h> 20 #include <memalign.h> 21 #include <linux/list.h> 22 #include <div64.h> 23 #include "mmc_private.h" 24 25 static const unsigned int sd_au_size[] = { 26 0, SZ_16K / 512, SZ_32K / 512, 27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512, 28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512, 29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, 30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, 31 }; 32 33 #if CONFIG_IS_ENABLED(MMC_TINY) 34 static struct mmc mmc_static; 35 struct mmc *find_mmc_device(int dev_num) 36 { 37 return &mmc_static; 38 } 39 40 void mmc_do_preinit(void) 41 { 42 struct mmc *m = &mmc_static; 43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 44 mmc_set_preinit(m, 1); 45 #endif 46 if (m->preinit) 47 mmc_start_init(m); 48 } 49 50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc) 51 { 52 return &mmc->block_dev; 53 } 54 #endif 55 56 #if !CONFIG_IS_ENABLED(DM_MMC) 57 __weak int board_mmc_getwp(struct mmc *mmc) 58 { 59 return -1; 60 } 61 62 int mmc_getwp(struct mmc *mmc) 63 { 64 int wp; 65 66 wp = board_mmc_getwp(mmc); 67 68 if (wp < 0) { 69 if (mmc->cfg->ops->getwp) 70 wp = mmc->cfg->ops->getwp(mmc); 71 else 72 wp = 0; 73 } 74 75 return wp; 76 } 77 78 __weak int board_mmc_getcd(struct mmc *mmc) 79 { 80 return -1; 81 } 82 #endif 83 84 #ifdef CONFIG_MMC_TRACE 85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd) 86 { 87 printf("CMD_SEND:%d\n", cmd->cmdidx); 88 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg); 89 } 90 91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret) 92 { 93 int i; 94 u8 *ptr; 95 96 if (ret) { 97 printf("\t\tRET\t\t\t %d\n", ret); 98 } else { 99 switch (cmd->resp_type) { 100 case MMC_RSP_NONE: 101 printf("\t\tMMC_RSP_NONE\n"); 102 break; 103 case MMC_RSP_R1: 104 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n", 105 cmd->response[0]); 106 break; 107 case MMC_RSP_R1b: 108 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n", 109 cmd->response[0]); 110 break; 111 case MMC_RSP_R2: 112 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n", 113 cmd->response[0]); 114 printf("\t\t \t\t 0x%08X \n", 115 cmd->response[1]); 116 printf("\t\t \t\t 0x%08X \n", 117 cmd->response[2]); 118 printf("\t\t \t\t 0x%08X \n", 119 cmd->response[3]); 120 printf("\n"); 121 printf("\t\t\t\t\tDUMPING DATA\n"); 122 for (i = 0; i < 4; i++) { 123 int j; 124 printf("\t\t\t\t\t%03d - ", i*4); 125 ptr = (u8 *)&cmd->response[i]; 126 ptr += 3; 127 for (j = 0; j < 4; j++) 128 printf("%02X ", *ptr--); 129 printf("\n"); 130 } 131 break; 132 case MMC_RSP_R3: 133 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n", 134 cmd->response[0]); 135 break; 136 default: 137 printf("\t\tERROR MMC rsp not supported\n"); 138 break; 139 } 140 } 141 } 142 143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd) 144 { 145 int status; 146 147 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9; 148 printf("CURR STATE:%d\n", status); 149 } 150 #endif 151 152 #if !CONFIG_IS_ENABLED(DM_MMC) 153 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data) 154 { 155 int ret; 156 157 mmmc_trace_before_send(mmc, cmd); 158 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data); 159 mmmc_trace_after_send(mmc, cmd, ret); 160 161 return ret; 162 } 163 #endif 164 165 int mmc_send_status(struct mmc *mmc, int timeout) 166 { 167 struct mmc_cmd cmd; 168 int err, retries = 5; 169 170 cmd.cmdidx = MMC_CMD_SEND_STATUS; 171 cmd.resp_type = MMC_RSP_R1; 172 if (!mmc_host_is_spi(mmc)) 173 cmd.cmdarg = mmc->rca << 16; 174 175 while (1) { 176 err = mmc_send_cmd(mmc, &cmd, NULL); 177 if (!err) { 178 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) && 179 (cmd.response[0] & MMC_STATUS_CURR_STATE) != 180 MMC_STATE_PRG) 181 break; 182 else if (cmd.response[0] & MMC_STATUS_MASK) { 183 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 184 printf("Status Error: 0x%08X\n", 185 cmd.response[0]); 186 #endif 187 return -ECOMM; 188 } 189 } else if (--retries < 0) 190 return err; 191 192 if (timeout-- <= 0) 193 break; 194 195 udelay(1000); 196 } 197 198 mmc_trace_state(mmc, &cmd); 199 if (timeout <= 0) { 200 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 201 printf("Timeout waiting card ready\n"); 202 #endif 203 return -ETIMEDOUT; 204 } 205 206 return 0; 207 } 208 209 int mmc_set_blocklen(struct mmc *mmc, int len) 210 { 211 struct mmc_cmd cmd; 212 213 if (mmc->ddr_mode) 214 return 0; 215 216 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN; 217 cmd.resp_type = MMC_RSP_R1; 218 cmd.cmdarg = len; 219 220 return mmc_send_cmd(mmc, &cmd, NULL); 221 } 222 223 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start, 224 lbaint_t blkcnt) 225 { 226 struct mmc_cmd cmd; 227 struct mmc_data data; 228 229 if (blkcnt > 1) 230 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 231 else 232 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 233 234 if (mmc->high_capacity) 235 cmd.cmdarg = start; 236 else 237 cmd.cmdarg = start * mmc->read_bl_len; 238 239 cmd.resp_type = MMC_RSP_R1; 240 241 data.dest = dst; 242 data.blocks = blkcnt; 243 data.blocksize = mmc->read_bl_len; 244 data.flags = MMC_DATA_READ; 245 246 if (mmc_send_cmd(mmc, &cmd, &data)) 247 return 0; 248 249 if (blkcnt > 1) { 250 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 251 cmd.cmdarg = 0; 252 cmd.resp_type = MMC_RSP_R1b; 253 if (mmc_send_cmd(mmc, &cmd, NULL)) { 254 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 255 printf("mmc fail to send stop cmd\n"); 256 #endif 257 return 0; 258 } 259 } 260 261 return blkcnt; 262 } 263 264 #if CONFIG_IS_ENABLED(BLK) 265 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 266 #else 267 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 268 void *dst) 269 #endif 270 { 271 #if CONFIG_IS_ENABLED(BLK) 272 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 273 #endif 274 int dev_num = block_dev->devnum; 275 int err; 276 lbaint_t cur, blocks_todo = blkcnt; 277 278 if (blkcnt == 0) 279 return 0; 280 281 struct mmc *mmc = find_mmc_device(dev_num); 282 if (!mmc) 283 return 0; 284 285 if (CONFIG_IS_ENABLED(MMC_TINY)) 286 err = mmc_switch_part(mmc, block_dev->hwpart); 287 else 288 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 289 290 if (err < 0) 291 return 0; 292 293 if ((start + blkcnt) > block_dev->lba) { 294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 295 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 296 start + blkcnt, block_dev->lba); 297 #endif 298 return 0; 299 } 300 301 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 302 debug("%s: Failed to set blocklen\n", __func__); 303 return 0; 304 } 305 306 do { 307 cur = (blocks_todo > mmc->cfg->b_max) ? 308 mmc->cfg->b_max : blocks_todo; 309 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 310 debug("%s: Failed to read blocks\n", __func__); 311 return 0; 312 } 313 blocks_todo -= cur; 314 start += cur; 315 dst += cur * mmc->read_bl_len; 316 } while (blocks_todo > 0); 317 318 return blkcnt; 319 } 320 321 static void mmc_set_timing(struct mmc *mmc, uint timing) 322 { 323 mmc->timing = timing; 324 mmc_set_ios(mmc); 325 } 326 327 static int mmc_go_idle(struct mmc *mmc) 328 { 329 struct mmc_cmd cmd; 330 int err; 331 332 udelay(1000); 333 334 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE; 335 cmd.cmdarg = 0; 336 cmd.resp_type = MMC_RSP_NONE; 337 338 err = mmc_send_cmd(mmc, &cmd, NULL); 339 340 if (err) 341 return err; 342 343 udelay(2000); 344 345 return 0; 346 } 347 348 static int sd_send_op_cond(struct mmc *mmc) 349 { 350 int timeout = 1000; 351 int err; 352 struct mmc_cmd cmd; 353 354 while (1) { 355 cmd.cmdidx = MMC_CMD_APP_CMD; 356 cmd.resp_type = MMC_RSP_R1; 357 cmd.cmdarg = 0; 358 359 err = mmc_send_cmd(mmc, &cmd, NULL); 360 361 if (err) 362 return err; 363 364 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND; 365 cmd.resp_type = MMC_RSP_R3; 366 367 /* 368 * Most cards do not answer if some reserved bits 369 * in the ocr are set. However, Some controller 370 * can set bit 7 (reserved for low voltages), but 371 * how to manage low voltages SD card is not yet 372 * specified. 373 */ 374 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 : 375 (mmc->cfg->voltages & 0xff8000); 376 377 if (mmc->version == SD_VERSION_2) 378 cmd.cmdarg |= OCR_HCS; 379 380 err = mmc_send_cmd(mmc, &cmd, NULL); 381 382 if (err) 383 return err; 384 385 if (cmd.response[0] & OCR_BUSY) 386 break; 387 388 if (timeout-- <= 0) 389 return -EOPNOTSUPP; 390 391 udelay(1000); 392 } 393 394 if (mmc->version != SD_VERSION_2) 395 mmc->version = SD_VERSION_1_0; 396 397 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 398 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 399 cmd.resp_type = MMC_RSP_R3; 400 cmd.cmdarg = 0; 401 402 err = mmc_send_cmd(mmc, &cmd, NULL); 403 404 if (err) 405 return err; 406 } 407 408 mmc->ocr = cmd.response[0]; 409 410 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 411 mmc->rca = 0; 412 413 return 0; 414 } 415 416 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg) 417 { 418 struct mmc_cmd cmd; 419 int err; 420 421 cmd.cmdidx = MMC_CMD_SEND_OP_COND; 422 cmd.resp_type = MMC_RSP_R3; 423 cmd.cmdarg = 0; 424 if (use_arg && !mmc_host_is_spi(mmc)) 425 cmd.cmdarg = OCR_HCS | 426 (mmc->cfg->voltages & 427 (mmc->ocr & OCR_VOLTAGE_MASK)) | 428 (mmc->ocr & OCR_ACCESS_MODE); 429 430 err = mmc_send_cmd(mmc, &cmd, NULL); 431 if (err) 432 return err; 433 mmc->ocr = cmd.response[0]; 434 return 0; 435 } 436 437 static int mmc_send_op_cond(struct mmc *mmc) 438 { 439 int err, i; 440 441 /* Some cards seem to need this */ 442 mmc_go_idle(mmc); 443 444 /* Asking to the card its capabilities */ 445 for (i = 0; i < 2; i++) { 446 err = mmc_send_op_cond_iter(mmc, i != 0); 447 if (err) 448 return err; 449 450 /* exit if not busy (flag seems to be inverted) */ 451 if (mmc->ocr & OCR_BUSY) 452 break; 453 } 454 mmc->op_cond_pending = 1; 455 return 0; 456 } 457 458 static int mmc_complete_op_cond(struct mmc *mmc) 459 { 460 struct mmc_cmd cmd; 461 int timeout = 1000; 462 uint start; 463 int err; 464 465 mmc->op_cond_pending = 0; 466 if (!(mmc->ocr & OCR_BUSY)) { 467 /* Some cards seem to need this */ 468 mmc_go_idle(mmc); 469 470 start = get_timer(0); 471 while (1) { 472 err = mmc_send_op_cond_iter(mmc, 1); 473 if (err) 474 return err; 475 if (mmc->ocr & OCR_BUSY) 476 break; 477 if (get_timer(start) > timeout) 478 return -EOPNOTSUPP; 479 udelay(100); 480 } 481 } 482 483 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 484 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 485 cmd.resp_type = MMC_RSP_R3; 486 cmd.cmdarg = 0; 487 488 err = mmc_send_cmd(mmc, &cmd, NULL); 489 490 if (err) 491 return err; 492 493 mmc->ocr = cmd.response[0]; 494 } 495 496 mmc->version = MMC_VERSION_UNKNOWN; 497 498 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 499 mmc->rca = 1; 500 501 return 0; 502 } 503 504 505 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd) 506 { 507 struct mmc_cmd cmd; 508 struct mmc_data data; 509 int err; 510 511 /* Get the Card Status Register */ 512 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD; 513 cmd.resp_type = MMC_RSP_R1; 514 cmd.cmdarg = 0; 515 516 data.dest = (char *)ext_csd; 517 data.blocks = 1; 518 data.blocksize = MMC_MAX_BLOCK_LEN; 519 data.flags = MMC_DATA_READ; 520 521 err = mmc_send_cmd(mmc, &cmd, &data); 522 523 return err; 524 } 525 526 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value) 527 { 528 struct mmc_cmd cmd; 529 int timeout = 1000; 530 int retries = 3; 531 int ret; 532 533 cmd.cmdidx = MMC_CMD_SWITCH; 534 cmd.resp_type = MMC_RSP_R1b; 535 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 536 (index << 16) | 537 (value << 8); 538 539 while (retries > 0) { 540 ret = mmc_send_cmd(mmc, &cmd, NULL); 541 542 /* Waiting for the ready status */ 543 if (!ret) { 544 ret = mmc_send_status(mmc, timeout); 545 return ret; 546 } 547 548 retries--; 549 } 550 551 return ret; 552 553 } 554 555 static int mmc_select_hs(struct mmc *mmc) 556 { 557 int ret; 558 559 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 560 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS); 561 562 if (!ret) 563 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 564 565 return ret; 566 } 567 568 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd) 569 { 570 u8 card_type; 571 u32 host_caps, avail_type = 0; 572 573 card_type = ext_csd[EXT_CSD_CARD_TYPE]; 574 host_caps = mmc->cfg->host_caps; 575 576 if ((host_caps & MMC_MODE_HS) && 577 (card_type & EXT_CSD_CARD_TYPE_26)) 578 avail_type |= EXT_CSD_CARD_TYPE_26; 579 580 if ((host_caps & MMC_MODE_HS) && 581 (card_type & EXT_CSD_CARD_TYPE_52)) 582 avail_type |= EXT_CSD_CARD_TYPE_52; 583 584 /* 585 * For the moment, u-boot doesn't support signal voltage 586 * switch, therefor we assume that host support ddr52 587 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and 588 * hs400 are the same). 589 */ 590 if ((host_caps & MMC_MODE_DDR_52MHz) && 591 (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)) 592 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V; 593 594 if ((host_caps & MMC_MODE_HS200) && 595 (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V)) 596 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V; 597 598 /* 599 * If host can support HS400, it means that host can also 600 * support HS200. 601 */ 602 if ((host_caps & MMC_MODE_HS400) && 603 (host_caps & MMC_MODE_8BIT) && 604 (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 605 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 606 EXT_CSD_CARD_TYPE_HS400_1_8V; 607 608 if ((host_caps & MMC_MODE_HS400ES) && 609 (host_caps & MMC_MODE_8BIT) && 610 ext_csd[EXT_CSD_STROBE_SUPPORT] && 611 (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 612 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 613 EXT_CSD_CARD_TYPE_HS400_1_8V | 614 EXT_CSD_CARD_TYPE_HS400ES; 615 616 return avail_type; 617 } 618 619 static int mmc_change_freq(struct mmc *mmc) 620 { 621 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 622 char cardtype; 623 u32 avail_type; 624 int err; 625 626 mmc->card_caps = 0; 627 628 if (mmc_host_is_spi(mmc)) 629 return 0; 630 631 /* Only version 4 supports high-speed */ 632 if (mmc->version < MMC_VERSION_4) 633 return 0; 634 635 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT; 636 637 err = mmc_send_ext_csd(mmc, ext_csd); 638 639 if (err) 640 return err; 641 642 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0xf; 643 avail_type = mmc_select_card_type(mmc, ext_csd); 644 645 if (avail_type & EXT_CSD_CARD_TYPE_HS) 646 err = mmc_select_hs(mmc); 647 else 648 err = -EINVAL; 649 650 if (err) 651 return err; 652 653 /* Now check to see that it worked */ 654 err = mmc_send_ext_csd(mmc, ext_csd); 655 656 if (err) 657 return err; 658 659 /* No high-speed support */ 660 if (!ext_csd[EXT_CSD_HS_TIMING]) 661 return 0; 662 663 /* High Speed is set, there are two types: 52MHz and 26MHz */ 664 if (cardtype & EXT_CSD_CARD_TYPE_52) { 665 if (cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V) 666 mmc->card_caps |= MMC_MODE_DDR_52MHz; 667 mmc->card_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS; 668 } else { 669 mmc->card_caps |= MMC_MODE_HS; 670 } 671 672 return 0; 673 } 674 675 static int mmc_set_capacity(struct mmc *mmc, int part_num) 676 { 677 switch (part_num) { 678 case 0: 679 mmc->capacity = mmc->capacity_user; 680 break; 681 case 1: 682 case 2: 683 mmc->capacity = mmc->capacity_boot; 684 break; 685 case 3: 686 mmc->capacity = mmc->capacity_rpmb; 687 break; 688 case 4: 689 case 5: 690 case 6: 691 case 7: 692 mmc->capacity = mmc->capacity_gp[part_num - 4]; 693 break; 694 default: 695 return -1; 696 } 697 698 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len); 699 700 return 0; 701 } 702 703 int mmc_switch_part(struct mmc *mmc, unsigned int part_num) 704 { 705 int ret; 706 707 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF, 708 (mmc->part_config & ~PART_ACCESS_MASK) 709 | (part_num & PART_ACCESS_MASK)); 710 711 /* 712 * Set the capacity if the switch succeeded or was intended 713 * to return to representing the raw device. 714 */ 715 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) { 716 ret = mmc_set_capacity(mmc, part_num); 717 mmc_get_blk_desc(mmc)->hwpart = part_num; 718 } 719 720 return ret; 721 } 722 723 int mmc_hwpart_config(struct mmc *mmc, 724 const struct mmc_hwpart_conf *conf, 725 enum mmc_hwpart_conf_mode mode) 726 { 727 u8 part_attrs = 0; 728 u32 enh_size_mult; 729 u32 enh_start_addr; 730 u32 gp_size_mult[4]; 731 u32 max_enh_size_mult; 732 u32 tot_enh_size_mult = 0; 733 u8 wr_rel_set; 734 int i, pidx, err; 735 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 736 737 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE) 738 return -EINVAL; 739 740 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) { 741 printf("eMMC >= 4.4 required for enhanced user data area\n"); 742 return -EMEDIUMTYPE; 743 } 744 745 if (!(mmc->part_support & PART_SUPPORT)) { 746 printf("Card does not support partitioning\n"); 747 return -EMEDIUMTYPE; 748 } 749 750 if (!mmc->hc_wp_grp_size) { 751 printf("Card does not define HC WP group size\n"); 752 return -EMEDIUMTYPE; 753 } 754 755 /* check partition alignment and total enhanced size */ 756 if (conf->user.enh_size) { 757 if (conf->user.enh_size % mmc->hc_wp_grp_size || 758 conf->user.enh_start % mmc->hc_wp_grp_size) { 759 printf("User data enhanced area not HC WP group " 760 "size aligned\n"); 761 return -EINVAL; 762 } 763 part_attrs |= EXT_CSD_ENH_USR; 764 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size; 765 if (mmc->high_capacity) { 766 enh_start_addr = conf->user.enh_start; 767 } else { 768 enh_start_addr = (conf->user.enh_start << 9); 769 } 770 } else { 771 enh_size_mult = 0; 772 enh_start_addr = 0; 773 } 774 tot_enh_size_mult += enh_size_mult; 775 776 for (pidx = 0; pidx < 4; pidx++) { 777 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) { 778 printf("GP%i partition not HC WP group size " 779 "aligned\n", pidx+1); 780 return -EINVAL; 781 } 782 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size; 783 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) { 784 part_attrs |= EXT_CSD_ENH_GP(pidx); 785 tot_enh_size_mult += gp_size_mult[pidx]; 786 } 787 } 788 789 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) { 790 printf("Card does not support enhanced attribute\n"); 791 return -EMEDIUMTYPE; 792 } 793 794 err = mmc_send_ext_csd(mmc, ext_csd); 795 if (err) 796 return err; 797 798 max_enh_size_mult = 799 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) + 800 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) + 801 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT]; 802 if (tot_enh_size_mult > max_enh_size_mult) { 803 printf("Total enhanced size exceeds maximum (%u > %u)\n", 804 tot_enh_size_mult, max_enh_size_mult); 805 return -EMEDIUMTYPE; 806 } 807 808 /* The default value of EXT_CSD_WR_REL_SET is device 809 * dependent, the values can only be changed if the 810 * EXT_CSD_HS_CTRL_REL bit is set. The values can be 811 * changed only once and before partitioning is completed. */ 812 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 813 if (conf->user.wr_rel_change) { 814 if (conf->user.wr_rel_set) 815 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR; 816 else 817 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR; 818 } 819 for (pidx = 0; pidx < 4; pidx++) { 820 if (conf->gp_part[pidx].wr_rel_change) { 821 if (conf->gp_part[pidx].wr_rel_set) 822 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx); 823 else 824 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx); 825 } 826 } 827 828 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] && 829 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) { 830 puts("Card does not support host controlled partition write " 831 "reliability settings\n"); 832 return -EMEDIUMTYPE; 833 } 834 835 if (ext_csd[EXT_CSD_PARTITION_SETTING] & 836 EXT_CSD_PARTITION_SETTING_COMPLETED) { 837 printf("Card already partitioned\n"); 838 return -EPERM; 839 } 840 841 if (mode == MMC_HWPART_CONF_CHECK) 842 return 0; 843 844 /* Partitioning requires high-capacity size definitions */ 845 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) { 846 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 847 EXT_CSD_ERASE_GROUP_DEF, 1); 848 849 if (err) 850 return err; 851 852 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 853 854 /* update erase group size to be high-capacity */ 855 mmc->erase_grp_size = 856 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 857 858 } 859 860 /* all OK, write the configuration */ 861 for (i = 0; i < 4; i++) { 862 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 863 EXT_CSD_ENH_START_ADDR+i, 864 (enh_start_addr >> (i*8)) & 0xFF); 865 if (err) 866 return err; 867 } 868 for (i = 0; i < 3; i++) { 869 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 870 EXT_CSD_ENH_SIZE_MULT+i, 871 (enh_size_mult >> (i*8)) & 0xFF); 872 if (err) 873 return err; 874 } 875 for (pidx = 0; pidx < 4; pidx++) { 876 for (i = 0; i < 3; i++) { 877 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 878 EXT_CSD_GP_SIZE_MULT+pidx*3+i, 879 (gp_size_mult[pidx] >> (i*8)) & 0xFF); 880 if (err) 881 return err; 882 } 883 } 884 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 885 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs); 886 if (err) 887 return err; 888 889 if (mode == MMC_HWPART_CONF_SET) 890 return 0; 891 892 /* The WR_REL_SET is a write-once register but shall be 893 * written before setting PART_SETTING_COMPLETED. As it is 894 * write-once we can only write it when completing the 895 * partitioning. */ 896 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) { 897 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 898 EXT_CSD_WR_REL_SET, wr_rel_set); 899 if (err) 900 return err; 901 } 902 903 /* Setting PART_SETTING_COMPLETED confirms the partition 904 * configuration but it only becomes effective after power 905 * cycle, so we do not adjust the partition related settings 906 * in the mmc struct. */ 907 908 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 909 EXT_CSD_PARTITION_SETTING, 910 EXT_CSD_PARTITION_SETTING_COMPLETED); 911 if (err) 912 return err; 913 914 return 0; 915 } 916 917 #if !CONFIG_IS_ENABLED(DM_MMC) 918 int mmc_getcd(struct mmc *mmc) 919 { 920 int cd; 921 922 cd = board_mmc_getcd(mmc); 923 924 if (cd < 0) { 925 if (mmc->cfg->ops->getcd) 926 cd = mmc->cfg->ops->getcd(mmc); 927 else 928 cd = 1; 929 } 930 931 return cd; 932 } 933 #endif 934 935 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp) 936 { 937 struct mmc_cmd cmd; 938 struct mmc_data data; 939 940 /* Switch the frequency */ 941 cmd.cmdidx = SD_CMD_SWITCH_FUNC; 942 cmd.resp_type = MMC_RSP_R1; 943 cmd.cmdarg = (mode << 31) | 0xffffff; 944 cmd.cmdarg &= ~(0xf << (group * 4)); 945 cmd.cmdarg |= value << (group * 4); 946 947 data.dest = (char *)resp; 948 data.blocksize = 64; 949 data.blocks = 1; 950 data.flags = MMC_DATA_READ; 951 952 return mmc_send_cmd(mmc, &cmd, &data); 953 } 954 955 956 static int sd_change_freq(struct mmc *mmc) 957 { 958 int err; 959 struct mmc_cmd cmd; 960 ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2); 961 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16); 962 struct mmc_data data; 963 int timeout; 964 965 mmc->card_caps = 0; 966 967 if (mmc_host_is_spi(mmc)) 968 return 0; 969 970 /* Read the SCR to find out if this card supports higher speeds */ 971 cmd.cmdidx = MMC_CMD_APP_CMD; 972 cmd.resp_type = MMC_RSP_R1; 973 cmd.cmdarg = mmc->rca << 16; 974 975 err = mmc_send_cmd(mmc, &cmd, NULL); 976 977 if (err) 978 return err; 979 980 cmd.cmdidx = SD_CMD_APP_SEND_SCR; 981 cmd.resp_type = MMC_RSP_R1; 982 cmd.cmdarg = 0; 983 984 timeout = 3; 985 986 retry_scr: 987 data.dest = (char *)scr; 988 data.blocksize = 8; 989 data.blocks = 1; 990 data.flags = MMC_DATA_READ; 991 992 err = mmc_send_cmd(mmc, &cmd, &data); 993 994 if (err) { 995 if (timeout--) 996 goto retry_scr; 997 998 return err; 999 } 1000 1001 mmc->scr[0] = __be32_to_cpu(scr[0]); 1002 mmc->scr[1] = __be32_to_cpu(scr[1]); 1003 1004 switch ((mmc->scr[0] >> 24) & 0xf) { 1005 case 0: 1006 mmc->version = SD_VERSION_1_0; 1007 break; 1008 case 1: 1009 mmc->version = SD_VERSION_1_10; 1010 break; 1011 case 2: 1012 mmc->version = SD_VERSION_2; 1013 if ((mmc->scr[0] >> 15) & 0x1) 1014 mmc->version = SD_VERSION_3; 1015 break; 1016 default: 1017 mmc->version = SD_VERSION_1_0; 1018 break; 1019 } 1020 1021 if (mmc->scr[0] & SD_DATA_4BIT) 1022 mmc->card_caps |= MMC_MODE_4BIT; 1023 1024 /* Version 1.0 doesn't support switching */ 1025 if (mmc->version == SD_VERSION_1_0) 1026 return 0; 1027 1028 timeout = 4; 1029 while (timeout--) { 1030 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1, 1031 (u8 *)switch_status); 1032 1033 if (err) 1034 return err; 1035 1036 /* The high-speed function is busy. Try again */ 1037 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY)) 1038 break; 1039 } 1040 1041 /* If high-speed isn't supported, we return */ 1042 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)) 1043 return 0; 1044 1045 /* 1046 * If the host doesn't support SD_HIGHSPEED, do not switch card to 1047 * HIGHSPEED mode even if the card support SD_HIGHSPPED. 1048 * This can avoid furthur problem when the card runs in different 1049 * mode between the host. 1050 */ 1051 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) && 1052 (mmc->cfg->host_caps & MMC_MODE_HS))) 1053 return 0; 1054 1055 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status); 1056 1057 if (err) 1058 return err; 1059 1060 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000) 1061 mmc->card_caps |= MMC_MODE_HS; 1062 1063 return 0; 1064 } 1065 1066 static int sd_read_ssr(struct mmc *mmc) 1067 { 1068 int err, i; 1069 struct mmc_cmd cmd; 1070 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16); 1071 struct mmc_data data; 1072 int timeout = 3; 1073 unsigned int au, eo, et, es; 1074 1075 cmd.cmdidx = MMC_CMD_APP_CMD; 1076 cmd.resp_type = MMC_RSP_R1; 1077 cmd.cmdarg = mmc->rca << 16; 1078 1079 err = mmc_send_cmd(mmc, &cmd, NULL); 1080 if (err) 1081 return err; 1082 1083 cmd.cmdidx = SD_CMD_APP_SD_STATUS; 1084 cmd.resp_type = MMC_RSP_R1; 1085 cmd.cmdarg = 0; 1086 1087 retry_ssr: 1088 data.dest = (char *)ssr; 1089 data.blocksize = 64; 1090 data.blocks = 1; 1091 data.flags = MMC_DATA_READ; 1092 1093 err = mmc_send_cmd(mmc, &cmd, &data); 1094 if (err) { 1095 if (timeout--) 1096 goto retry_ssr; 1097 1098 return err; 1099 } 1100 1101 for (i = 0; i < 16; i++) 1102 ssr[i] = be32_to_cpu(ssr[i]); 1103 1104 au = (ssr[2] >> 12) & 0xF; 1105 if ((au <= 9) || (mmc->version == SD_VERSION_3)) { 1106 mmc->ssr.au = sd_au_size[au]; 1107 es = (ssr[3] >> 24) & 0xFF; 1108 es |= (ssr[2] & 0xFF) << 8; 1109 et = (ssr[3] >> 18) & 0x3F; 1110 if (es && et) { 1111 eo = (ssr[3] >> 16) & 0x3; 1112 mmc->ssr.erase_timeout = (et * 1000) / es; 1113 mmc->ssr.erase_offset = eo * 1000; 1114 } 1115 } else { 1116 debug("Invalid Allocation Unit Size.\n"); 1117 } 1118 1119 return 0; 1120 } 1121 1122 /* frequency bases */ 1123 /* divided by 10 to be nice to platforms without floating point */ 1124 static const int fbase[] = { 1125 10000, 1126 100000, 1127 1000000, 1128 10000000, 1129 }; 1130 1131 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice 1132 * to platforms without floating point. 1133 */ 1134 static const u8 multipliers[] = { 1135 0, /* reserved */ 1136 10, 1137 12, 1138 13, 1139 15, 1140 20, 1141 25, 1142 30, 1143 35, 1144 40, 1145 45, 1146 50, 1147 55, 1148 60, 1149 70, 1150 80, 1151 }; 1152 1153 #if !CONFIG_IS_ENABLED(DM_MMC) 1154 static void mmc_set_ios(struct mmc *mmc) 1155 { 1156 if (mmc->cfg->ops->set_ios) 1157 mmc->cfg->ops->set_ios(mmc); 1158 } 1159 1160 static bool mmc_card_busy(struct mmc *mmc) 1161 { 1162 if (!mmc->cfg->ops->card_busy) 1163 return -ENOSYS; 1164 1165 return mmc->cfg->ops->card_busy(mmc); 1166 } 1167 1168 static bool mmc_can_card_busy(struct mmc *) 1169 { 1170 return !!mmc->cfg->ops->card_busy; 1171 } 1172 #endif 1173 1174 void mmc_set_clock(struct mmc *mmc, uint clock) 1175 { 1176 if (clock > mmc->cfg->f_max) 1177 clock = mmc->cfg->f_max; 1178 1179 if (clock < mmc->cfg->f_min) 1180 clock = mmc->cfg->f_min; 1181 1182 mmc->clock = clock; 1183 1184 mmc_set_ios(mmc); 1185 } 1186 1187 static void mmc_set_bus_width(struct mmc *mmc, uint width) 1188 { 1189 mmc->bus_width = width; 1190 1191 mmc_set_ios(mmc); 1192 } 1193 1194 static int mmc_startup(struct mmc *mmc) 1195 { 1196 int err, i; 1197 uint mult, freq; 1198 u64 cmult, csize, capacity; 1199 struct mmc_cmd cmd; 1200 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1201 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 1202 bool has_parts = false; 1203 bool part_completed; 1204 struct blk_desc *bdesc; 1205 1206 #ifdef CONFIG_MMC_SPI_CRC_ON 1207 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */ 1208 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF; 1209 cmd.resp_type = MMC_RSP_R1; 1210 cmd.cmdarg = 1; 1211 err = mmc_send_cmd(mmc, &cmd, NULL); 1212 1213 if (err) 1214 return err; 1215 } 1216 #endif 1217 1218 /* Put the Card in Identify Mode */ 1219 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID : 1220 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */ 1221 cmd.resp_type = MMC_RSP_R2; 1222 cmd.cmdarg = 0; 1223 1224 err = mmc_send_cmd(mmc, &cmd, NULL); 1225 1226 if (err) 1227 return err; 1228 1229 memcpy(mmc->cid, cmd.response, 16); 1230 1231 /* 1232 * For MMC cards, set the Relative Address. 1233 * For SD cards, get the Relatvie Address. 1234 * This also puts the cards into Standby State 1235 */ 1236 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1237 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR; 1238 cmd.cmdarg = mmc->rca << 16; 1239 cmd.resp_type = MMC_RSP_R6; 1240 1241 err = mmc_send_cmd(mmc, &cmd, NULL); 1242 1243 if (err) 1244 return err; 1245 1246 if (IS_SD(mmc)) 1247 mmc->rca = (cmd.response[0] >> 16) & 0xffff; 1248 } 1249 1250 /* Get the Card-Specific Data */ 1251 cmd.cmdidx = MMC_CMD_SEND_CSD; 1252 cmd.resp_type = MMC_RSP_R2; 1253 cmd.cmdarg = mmc->rca << 16; 1254 1255 err = mmc_send_cmd(mmc, &cmd, NULL); 1256 1257 if (err) 1258 return err; 1259 1260 mmc->csd[0] = cmd.response[0]; 1261 mmc->csd[1] = cmd.response[1]; 1262 mmc->csd[2] = cmd.response[2]; 1263 mmc->csd[3] = cmd.response[3]; 1264 1265 if (mmc->version == MMC_VERSION_UNKNOWN) { 1266 int version = (cmd.response[0] >> 26) & 0xf; 1267 1268 switch (version) { 1269 case 0: 1270 mmc->version = MMC_VERSION_1_2; 1271 break; 1272 case 1: 1273 mmc->version = MMC_VERSION_1_4; 1274 break; 1275 case 2: 1276 mmc->version = MMC_VERSION_2_2; 1277 break; 1278 case 3: 1279 mmc->version = MMC_VERSION_3; 1280 break; 1281 case 4: 1282 mmc->version = MMC_VERSION_4; 1283 break; 1284 default: 1285 mmc->version = MMC_VERSION_1_2; 1286 break; 1287 } 1288 } 1289 1290 /* divide frequency by 10, since the mults are 10x bigger */ 1291 freq = fbase[(cmd.response[0] & 0x7)]; 1292 mult = multipliers[((cmd.response[0] >> 3) & 0xf)]; 1293 1294 mmc->tran_speed = freq * mult; 1295 1296 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1); 1297 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf); 1298 1299 if (IS_SD(mmc)) 1300 mmc->write_bl_len = mmc->read_bl_len; 1301 else 1302 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf); 1303 1304 if (mmc->high_capacity) { 1305 csize = (mmc->csd[1] & 0x3f) << 16 1306 | (mmc->csd[2] & 0xffff0000) >> 16; 1307 cmult = 8; 1308 } else { 1309 csize = (mmc->csd[1] & 0x3ff) << 2 1310 | (mmc->csd[2] & 0xc0000000) >> 30; 1311 cmult = (mmc->csd[2] & 0x00038000) >> 15; 1312 } 1313 1314 mmc->capacity_user = (csize + 1) << (cmult + 2); 1315 mmc->capacity_user *= mmc->read_bl_len; 1316 mmc->capacity_boot = 0; 1317 mmc->capacity_rpmb = 0; 1318 for (i = 0; i < 4; i++) 1319 mmc->capacity_gp[i] = 0; 1320 1321 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN) 1322 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1323 1324 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN) 1325 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1326 1327 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) { 1328 cmd.cmdidx = MMC_CMD_SET_DSR; 1329 cmd.cmdarg = (mmc->dsr & 0xffff) << 16; 1330 cmd.resp_type = MMC_RSP_NONE; 1331 if (mmc_send_cmd(mmc, &cmd, NULL)) 1332 printf("MMC: SET_DSR failed\n"); 1333 } 1334 1335 /* Select the card, and put it into Transfer Mode */ 1336 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1337 cmd.cmdidx = MMC_CMD_SELECT_CARD; 1338 cmd.resp_type = MMC_RSP_R1; 1339 cmd.cmdarg = mmc->rca << 16; 1340 err = mmc_send_cmd(mmc, &cmd, NULL); 1341 1342 if (err) 1343 return err; 1344 } 1345 1346 /* 1347 * For SD, its erase group is always one sector 1348 */ 1349 mmc->erase_grp_size = 1; 1350 mmc->part_config = MMCPART_NOAVAILABLE; 1351 if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) { 1352 /* check ext_csd version and capacity */ 1353 err = mmc_send_ext_csd(mmc, ext_csd); 1354 if (err) 1355 return err; 1356 if (ext_csd[EXT_CSD_REV] >= 2) { 1357 /* 1358 * According to the JEDEC Standard, the value of 1359 * ext_csd's capacity is valid if the value is more 1360 * than 2GB 1361 */ 1362 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0 1363 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 1364 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 1365 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 1366 capacity *= MMC_MAX_BLOCK_LEN; 1367 if ((capacity >> 20) > 2 * 1024) 1368 mmc->capacity_user = capacity; 1369 } 1370 1371 switch (ext_csd[EXT_CSD_REV]) { 1372 case 1: 1373 mmc->version = MMC_VERSION_4_1; 1374 break; 1375 case 2: 1376 mmc->version = MMC_VERSION_4_2; 1377 break; 1378 case 3: 1379 mmc->version = MMC_VERSION_4_3; 1380 break; 1381 case 5: 1382 mmc->version = MMC_VERSION_4_41; 1383 break; 1384 case 6: 1385 mmc->version = MMC_VERSION_4_5; 1386 break; 1387 case 7: 1388 mmc->version = MMC_VERSION_5_0; 1389 break; 1390 case 8: 1391 mmc->version = MMC_VERSION_5_1; 1392 break; 1393 } 1394 1395 /* The partition data may be non-zero but it is only 1396 * effective if PARTITION_SETTING_COMPLETED is set in 1397 * EXT_CSD, so ignore any data if this bit is not set, 1398 * except for enabling the high-capacity group size 1399 * definition (see below). */ 1400 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] & 1401 EXT_CSD_PARTITION_SETTING_COMPLETED); 1402 1403 /* store the partition info of emmc */ 1404 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT]; 1405 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) || 1406 ext_csd[EXT_CSD_BOOT_MULT]) 1407 mmc->part_config = ext_csd[EXT_CSD_PART_CONF]; 1408 if (part_completed && 1409 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT)) 1410 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE]; 1411 1412 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17; 1413 1414 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17; 1415 1416 for (i = 0; i < 4; i++) { 1417 int idx = EXT_CSD_GP_SIZE_MULT + i * 3; 1418 uint mult = (ext_csd[idx + 2] << 16) + 1419 (ext_csd[idx + 1] << 8) + ext_csd[idx]; 1420 if (mult) 1421 has_parts = true; 1422 if (!part_completed) 1423 continue; 1424 mmc->capacity_gp[i] = mult; 1425 mmc->capacity_gp[i] *= 1426 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1427 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1428 mmc->capacity_gp[i] <<= 19; 1429 } 1430 1431 if (part_completed) { 1432 mmc->enh_user_size = 1433 (ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) + 1434 (ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) + 1435 ext_csd[EXT_CSD_ENH_SIZE_MULT]; 1436 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1437 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1438 mmc->enh_user_size <<= 19; 1439 mmc->enh_user_start = 1440 (ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) + 1441 (ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) + 1442 (ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) + 1443 ext_csd[EXT_CSD_ENH_START_ADDR]; 1444 if (mmc->high_capacity) 1445 mmc->enh_user_start <<= 9; 1446 } 1447 1448 /* 1449 * Host needs to enable ERASE_GRP_DEF bit if device is 1450 * partitioned. This bit will be lost every time after a reset 1451 * or power off. This will affect erase size. 1452 */ 1453 if (part_completed) 1454 has_parts = true; 1455 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) && 1456 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB)) 1457 has_parts = true; 1458 if (has_parts) { 1459 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1460 EXT_CSD_ERASE_GROUP_DEF, 1); 1461 1462 if (err) 1463 return err; 1464 else 1465 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1466 } 1467 1468 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) { 1469 /* Read out group size from ext_csd */ 1470 mmc->erase_grp_size = 1471 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1472 /* 1473 * if high capacity and partition setting completed 1474 * SEC_COUNT is valid even if it is smaller than 2 GiB 1475 * JEDEC Standard JESD84-B45, 6.2.4 1476 */ 1477 if (mmc->high_capacity && part_completed) { 1478 capacity = (ext_csd[EXT_CSD_SEC_CNT]) | 1479 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) | 1480 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) | 1481 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24); 1482 capacity *= MMC_MAX_BLOCK_LEN; 1483 mmc->capacity_user = capacity; 1484 } 1485 } else { 1486 /* Calculate the group size from the csd value. */ 1487 int erase_gsz, erase_gmul; 1488 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10; 1489 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5; 1490 mmc->erase_grp_size = (erase_gsz + 1) 1491 * (erase_gmul + 1); 1492 } 1493 1494 mmc->hc_wp_grp_size = 1024 1495 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1496 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1497 1498 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1499 } 1500 1501 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart); 1502 if (err) 1503 return err; 1504 1505 if (IS_SD(mmc)) 1506 err = sd_change_freq(mmc); 1507 else 1508 err = mmc_change_freq(mmc); 1509 1510 if (err) 1511 return err; 1512 1513 /* Restrict card's capabilities by what the host can do */ 1514 mmc->card_caps &= mmc->cfg->host_caps; 1515 1516 if (IS_SD(mmc)) { 1517 if (mmc->card_caps & MMC_MODE_4BIT) { 1518 cmd.cmdidx = MMC_CMD_APP_CMD; 1519 cmd.resp_type = MMC_RSP_R1; 1520 cmd.cmdarg = mmc->rca << 16; 1521 1522 err = mmc_send_cmd(mmc, &cmd, NULL); 1523 if (err) 1524 return err; 1525 1526 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH; 1527 cmd.resp_type = MMC_RSP_R1; 1528 cmd.cmdarg = 2; 1529 err = mmc_send_cmd(mmc, &cmd, NULL); 1530 if (err) 1531 return err; 1532 1533 mmc_set_bus_width(mmc, 4); 1534 } 1535 1536 err = sd_read_ssr(mmc); 1537 if (err) 1538 return err; 1539 1540 if (mmc->card_caps & MMC_MODE_HS) 1541 mmc->tran_speed = 50000000; 1542 else 1543 mmc->tran_speed = 25000000; 1544 } else if (mmc->version >= MMC_VERSION_4) { 1545 /* Only version 4 of MMC supports wider bus widths */ 1546 int idx; 1547 1548 /* An array of possible bus widths in order of preference */ 1549 static unsigned ext_csd_bits[] = { 1550 EXT_CSD_DDR_BUS_WIDTH_8, 1551 EXT_CSD_DDR_BUS_WIDTH_4, 1552 EXT_CSD_BUS_WIDTH_8, 1553 EXT_CSD_BUS_WIDTH_4, 1554 EXT_CSD_BUS_WIDTH_1, 1555 }; 1556 1557 /* An array to map CSD bus widths to host cap bits */ 1558 static unsigned ext_to_hostcaps[] = { 1559 [EXT_CSD_DDR_BUS_WIDTH_4] = 1560 MMC_MODE_DDR_52MHz | MMC_MODE_4BIT, 1561 [EXT_CSD_DDR_BUS_WIDTH_8] = 1562 MMC_MODE_DDR_52MHz | MMC_MODE_8BIT, 1563 [EXT_CSD_BUS_WIDTH_4] = MMC_MODE_4BIT, 1564 [EXT_CSD_BUS_WIDTH_8] = MMC_MODE_8BIT, 1565 }; 1566 1567 /* An array to map chosen bus width to an integer */ 1568 static unsigned widths[] = { 1569 8, 4, 8, 4, 1, 1570 }; 1571 1572 for (idx=0; idx < ARRAY_SIZE(ext_csd_bits); idx++) { 1573 unsigned int extw = ext_csd_bits[idx]; 1574 unsigned int caps = ext_to_hostcaps[extw]; 1575 1576 /* 1577 * If the bus width is still not changed, 1578 * don't try to set the default again. 1579 * Otherwise, recover from switch attempts 1580 * by switching to 1-bit bus width. 1581 */ 1582 if (extw == EXT_CSD_BUS_WIDTH_1 && 1583 mmc->bus_width == 1) { 1584 err = 0; 1585 break; 1586 } 1587 1588 /* 1589 * Check to make sure the card and controller support 1590 * these capabilities 1591 */ 1592 if ((mmc->card_caps & caps) != caps) 1593 continue; 1594 1595 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1596 EXT_CSD_BUS_WIDTH, extw); 1597 1598 if (err) 1599 continue; 1600 1601 mmc->ddr_mode = (caps & MMC_MODE_DDR_52MHz) ? 1 : 0; 1602 mmc_set_bus_width(mmc, widths[idx]); 1603 1604 err = mmc_send_ext_csd(mmc, test_csd); 1605 1606 if (err) 1607 continue; 1608 1609 /* Only compare read only fields */ 1610 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] 1611 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] && 1612 ext_csd[EXT_CSD_HC_WP_GRP_SIZE] 1613 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] && 1614 ext_csd[EXT_CSD_REV] 1615 == test_csd[EXT_CSD_REV] && 1616 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1617 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] && 1618 memcmp(&ext_csd[EXT_CSD_SEC_CNT], 1619 &test_csd[EXT_CSD_SEC_CNT], 4) == 0) 1620 break; 1621 else 1622 err = -EBADMSG; 1623 } 1624 1625 if (err) 1626 return err; 1627 1628 if (mmc->card_caps & MMC_MODE_HS) { 1629 if (mmc->card_caps & MMC_MODE_HS_52MHz) 1630 mmc->tran_speed = 52000000; 1631 else 1632 mmc->tran_speed = 26000000; 1633 } 1634 } 1635 1636 mmc_set_clock(mmc, mmc->tran_speed); 1637 1638 /* Fix the block length for DDR mode */ 1639 if (mmc->ddr_mode) { 1640 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1641 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1642 } 1643 1644 /* fill in device description */ 1645 bdesc = mmc_get_blk_desc(mmc); 1646 bdesc->lun = 0; 1647 bdesc->hwpart = 0; 1648 bdesc->type = 0; 1649 bdesc->blksz = mmc->read_bl_len; 1650 bdesc->log2blksz = LOG2(bdesc->blksz); 1651 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1652 #if !defined(CONFIG_SPL_BUILD) || \ 1653 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \ 1654 !defined(CONFIG_USE_TINY_PRINTF)) 1655 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x", 1656 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), 1657 (mmc->cid[3] >> 16) & 0xffff); 1658 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, 1659 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, 1660 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, 1661 (mmc->cid[2] >> 24) & 0xff); 1662 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, 1663 (mmc->cid[2] >> 16) & 0xf); 1664 #else 1665 bdesc->vendor[0] = 0; 1666 bdesc->product[0] = 0; 1667 bdesc->revision[0] = 0; 1668 #endif 1669 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT) 1670 part_init(bdesc); 1671 #endif 1672 1673 return 0; 1674 } 1675 1676 static int mmc_send_if_cond(struct mmc *mmc) 1677 { 1678 struct mmc_cmd cmd; 1679 int err; 1680 1681 cmd.cmdidx = SD_CMD_SEND_IF_COND; 1682 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */ 1683 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa; 1684 cmd.resp_type = MMC_RSP_R7; 1685 1686 err = mmc_send_cmd(mmc, &cmd, NULL); 1687 1688 if (err) 1689 return err; 1690 1691 if ((cmd.response[0] & 0xff) != 0xaa) 1692 return -EOPNOTSUPP; 1693 else 1694 mmc->version = SD_VERSION_2; 1695 1696 return 0; 1697 } 1698 1699 #if !CONFIG_IS_ENABLED(DM_MMC) 1700 /* board-specific MMC power initializations. */ 1701 __weak void board_mmc_power_init(void) 1702 { 1703 } 1704 #endif 1705 1706 static int mmc_power_init(struct mmc *mmc) 1707 { 1708 #if CONFIG_IS_ENABLED(DM_MMC) 1709 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD) 1710 struct udevice *vmmc_supply; 1711 int ret; 1712 1713 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply", 1714 &vmmc_supply); 1715 if (ret) { 1716 debug("%s: No vmmc supply\n", mmc->dev->name); 1717 return 0; 1718 } 1719 1720 ret = regulator_set_enable(vmmc_supply, true); 1721 if (ret) { 1722 puts("Error enabling VMMC supply\n"); 1723 return ret; 1724 } 1725 #endif 1726 #else /* !CONFIG_DM_MMC */ 1727 /* 1728 * Driver model should use a regulator, as above, rather than calling 1729 * out to board code. 1730 */ 1731 board_mmc_power_init(); 1732 #endif 1733 return 0; 1734 } 1735 1736 int mmc_start_init(struct mmc *mmc) 1737 { 1738 bool no_card; 1739 int err; 1740 1741 /* we pretend there's no card when init is NULL */ 1742 no_card = mmc_getcd(mmc) == 0; 1743 #if !CONFIG_IS_ENABLED(DM_MMC) 1744 no_card = no_card || (mmc->cfg->ops->init == NULL); 1745 #endif 1746 if (no_card) { 1747 mmc->has_init = 0; 1748 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 1749 printf("MMC: no card present\n"); 1750 #endif 1751 return -ENOMEDIUM; 1752 } 1753 1754 if (mmc->has_init) 1755 return 0; 1756 1757 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 1758 mmc_adapter_card_type_ident(); 1759 #endif 1760 err = mmc_power_init(mmc); 1761 if (err) 1762 return err; 1763 1764 #if CONFIG_IS_ENABLED(DM_MMC) 1765 /* The device has already been probed ready for use */ 1766 #else 1767 /* made sure it's not NULL earlier */ 1768 err = mmc->cfg->ops->init(mmc); 1769 if (err) 1770 return err; 1771 #endif 1772 mmc->ddr_mode = 0; 1773 mmc_set_bus_width(mmc, 1); 1774 mmc_set_clock(mmc, 1); 1775 mmc_set_timing(mmc, MMC_TIMING_LEGACY); 1776 1777 /* Reset the Card */ 1778 err = mmc_go_idle(mmc); 1779 1780 if (err) 1781 return err; 1782 1783 /* The internal partition reset to user partition(0) at every CMD0*/ 1784 mmc_get_blk_desc(mmc)->hwpart = 0; 1785 1786 /* Test for SD version 2 */ 1787 err = mmc_send_if_cond(mmc); 1788 1789 /* Now try to get the SD card's operating condition */ 1790 err = sd_send_op_cond(mmc); 1791 1792 /* If the command timed out, we check for an MMC card */ 1793 if (err == -ETIMEDOUT) { 1794 err = mmc_send_op_cond(mmc); 1795 1796 if (err) { 1797 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 1798 printf("Card did not respond to voltage select!\n"); 1799 #endif 1800 return -EOPNOTSUPP; 1801 } 1802 } 1803 1804 if (!err) 1805 mmc->init_in_progress = 1; 1806 1807 return err; 1808 } 1809 1810 static int mmc_complete_init(struct mmc *mmc) 1811 { 1812 int err = 0; 1813 1814 mmc->init_in_progress = 0; 1815 if (mmc->op_cond_pending) 1816 err = mmc_complete_op_cond(mmc); 1817 1818 if (!err) 1819 err = mmc_startup(mmc); 1820 if (err) 1821 mmc->has_init = 0; 1822 else 1823 mmc->has_init = 1; 1824 return err; 1825 } 1826 1827 int mmc_init(struct mmc *mmc) 1828 { 1829 int err = 0; 1830 __maybe_unused unsigned start; 1831 #if CONFIG_IS_ENABLED(DM_MMC) 1832 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev); 1833 1834 upriv->mmc = mmc; 1835 #endif 1836 if (mmc->has_init) 1837 return 0; 1838 1839 start = get_timer(0); 1840 1841 if (!mmc->init_in_progress) 1842 err = mmc_start_init(mmc); 1843 1844 if (!err) 1845 err = mmc_complete_init(mmc); 1846 if (err) 1847 printf("%s: %d, time %lu\n", __func__, err, get_timer(start)); 1848 1849 return err; 1850 } 1851 1852 int mmc_set_dsr(struct mmc *mmc, u16 val) 1853 { 1854 mmc->dsr = val; 1855 return 0; 1856 } 1857 1858 /* CPU-specific MMC initializations */ 1859 __weak int cpu_mmc_init(bd_t *bis) 1860 { 1861 return -1; 1862 } 1863 1864 /* board-specific MMC initializations. */ 1865 __weak int board_mmc_init(bd_t *bis) 1866 { 1867 return -1; 1868 } 1869 1870 void mmc_set_preinit(struct mmc *mmc, int preinit) 1871 { 1872 mmc->preinit = preinit; 1873 } 1874 1875 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD) 1876 static int mmc_probe(bd_t *bis) 1877 { 1878 return 0; 1879 } 1880 #elif CONFIG_IS_ENABLED(DM_MMC) 1881 static int mmc_probe(bd_t *bis) 1882 { 1883 int ret, i; 1884 struct uclass *uc; 1885 struct udevice *dev; 1886 1887 ret = uclass_get(UCLASS_MMC, &uc); 1888 if (ret) 1889 return ret; 1890 1891 /* 1892 * Try to add them in sequence order. Really with driver model we 1893 * should allow holes, but the current MMC list does not allow that. 1894 * So if we request 0, 1, 3 we will get 0, 1, 2. 1895 */ 1896 for (i = 0; ; i++) { 1897 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev); 1898 if (ret == -ENODEV) 1899 break; 1900 } 1901 uclass_foreach_dev(dev, uc) { 1902 ret = device_probe(dev); 1903 if (ret) 1904 printf("%s - probe failed: %d\n", dev->name, ret); 1905 } 1906 1907 return 0; 1908 } 1909 #else 1910 static int mmc_probe(bd_t *bis) 1911 { 1912 if (board_mmc_init(bis) < 0) 1913 cpu_mmc_init(bis); 1914 1915 return 0; 1916 } 1917 #endif 1918 1919 int mmc_initialize(bd_t *bis) 1920 { 1921 static int initialized = 0; 1922 int ret; 1923 if (initialized) /* Avoid initializing mmc multiple times */ 1924 return 0; 1925 initialized = 1; 1926 1927 #if !CONFIG_IS_ENABLED(BLK) 1928 #if !CONFIG_IS_ENABLED(MMC_TINY) 1929 mmc_list_init(); 1930 #endif 1931 #endif 1932 ret = mmc_probe(bis); 1933 if (ret) 1934 return ret; 1935 1936 #ifndef CONFIG_SPL_BUILD 1937 print_mmc_devices(','); 1938 #endif 1939 1940 mmc_do_preinit(); 1941 return 0; 1942 } 1943 1944 #ifdef CONFIG_CMD_BKOPS_ENABLE 1945 int mmc_set_bkops_enable(struct mmc *mmc) 1946 { 1947 int err; 1948 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1949 1950 err = mmc_send_ext_csd(mmc, ext_csd); 1951 if (err) { 1952 puts("Could not get ext_csd register values\n"); 1953 return err; 1954 } 1955 1956 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) { 1957 puts("Background operations not supported on device\n"); 1958 return -EMEDIUMTYPE; 1959 } 1960 1961 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) { 1962 puts("Background operations already enabled\n"); 1963 return 0; 1964 } 1965 1966 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); 1967 if (err) { 1968 puts("Failed to enable manual background operations\n"); 1969 return err; 1970 } 1971 1972 puts("Enabled manual background operations\n"); 1973 1974 return 0; 1975 } 1976 #endif 1977