1 /* 2 * Copyright 2008, Freescale Semiconductor, Inc 3 * Andy Fleming 4 * 5 * Based vaguely on the Linux code 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10 #include <config.h> 11 #include <common.h> 12 #include <command.h> 13 #include <dm.h> 14 #include <dm/device-internal.h> 15 #include <errno.h> 16 #include <mmc.h> 17 #include <part.h> 18 #include <power/regulator.h> 19 #include <malloc.h> 20 #include <memalign.h> 21 #include <linux/list.h> 22 #include <div64.h> 23 #include "mmc_private.h" 24 25 static const unsigned int sd_au_size[] = { 26 0, SZ_16K / 512, SZ_32K / 512, 27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512, 28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512, 29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, 30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, 31 }; 32 33 static char mmc_ext_csd[512]; 34 35 #if CONFIG_IS_ENABLED(MMC_TINY) 36 static struct mmc mmc_static; 37 struct mmc *find_mmc_device(int dev_num) 38 { 39 return &mmc_static; 40 } 41 42 void mmc_do_preinit(void) 43 { 44 struct mmc *m = &mmc_static; 45 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 46 mmc_set_preinit(m, 1); 47 #endif 48 if (m->preinit) 49 mmc_start_init(m); 50 } 51 52 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc) 53 { 54 return &mmc->block_dev; 55 } 56 #endif 57 58 #if !CONFIG_IS_ENABLED(DM_MMC) 59 __weak int board_mmc_getwp(struct mmc *mmc) 60 { 61 return -1; 62 } 63 64 int mmc_getwp(struct mmc *mmc) 65 { 66 int wp; 67 68 wp = board_mmc_getwp(mmc); 69 70 if (wp < 0) { 71 if (mmc->cfg->ops->getwp) 72 wp = mmc->cfg->ops->getwp(mmc); 73 else 74 wp = 0; 75 } 76 77 return wp; 78 } 79 80 __weak int board_mmc_getcd(struct mmc *mmc) 81 { 82 return -1; 83 } 84 #endif 85 86 #ifdef CONFIG_MMC_TRACE 87 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd) 88 { 89 printf("CMD_SEND:%d\n", cmd->cmdidx); 90 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg); 91 } 92 93 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret) 94 { 95 int i; 96 u8 *ptr; 97 98 if (ret) { 99 printf("\t\tRET\t\t\t %d\n", ret); 100 } else { 101 switch (cmd->resp_type) { 102 case MMC_RSP_NONE: 103 printf("\t\tMMC_RSP_NONE\n"); 104 break; 105 case MMC_RSP_R1: 106 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n", 107 cmd->response[0]); 108 break; 109 case MMC_RSP_R1b: 110 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n", 111 cmd->response[0]); 112 break; 113 case MMC_RSP_R2: 114 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n", 115 cmd->response[0]); 116 printf("\t\t \t\t 0x%08X \n", 117 cmd->response[1]); 118 printf("\t\t \t\t 0x%08X \n", 119 cmd->response[2]); 120 printf("\t\t \t\t 0x%08X \n", 121 cmd->response[3]); 122 printf("\n"); 123 printf("\t\t\t\t\tDUMPING DATA\n"); 124 for (i = 0; i < 4; i++) { 125 int j; 126 printf("\t\t\t\t\t%03d - ", i*4); 127 ptr = (u8 *)&cmd->response[i]; 128 ptr += 3; 129 for (j = 0; j < 4; j++) 130 printf("%02X ", *ptr--); 131 printf("\n"); 132 } 133 break; 134 case MMC_RSP_R3: 135 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n", 136 cmd->response[0]); 137 break; 138 default: 139 printf("\t\tERROR MMC rsp not supported\n"); 140 break; 141 } 142 } 143 } 144 145 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd) 146 { 147 int status; 148 149 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9; 150 printf("CURR STATE:%d\n", status); 151 } 152 #endif 153 154 #if !CONFIG_IS_ENABLED(DM_MMC) 155 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data) 156 { 157 int ret; 158 159 mmmc_trace_before_send(mmc, cmd); 160 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data); 161 mmmc_trace_after_send(mmc, cmd, ret); 162 163 return ret; 164 } 165 #endif 166 167 int mmc_send_status(struct mmc *mmc, int timeout) 168 { 169 struct mmc_cmd cmd; 170 int err, retries = 5; 171 172 cmd.cmdidx = MMC_CMD_SEND_STATUS; 173 cmd.resp_type = MMC_RSP_R1; 174 if (!mmc_host_is_spi(mmc)) 175 cmd.cmdarg = mmc->rca << 16; 176 177 while (1) { 178 err = mmc_send_cmd(mmc, &cmd, NULL); 179 if (!err) { 180 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) && 181 (cmd.response[0] & MMC_STATUS_CURR_STATE) != 182 MMC_STATE_PRG) 183 break; 184 else if (cmd.response[0] & MMC_STATUS_MASK) { 185 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 186 printf("Status Error: 0x%08X\n", 187 cmd.response[0]); 188 #endif 189 return -ECOMM; 190 } 191 } else if (--retries < 0) 192 return err; 193 194 if (timeout-- <= 0) 195 break; 196 197 udelay(1000); 198 } 199 200 mmc_trace_state(mmc, &cmd); 201 if (timeout <= 0) { 202 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 203 printf("Timeout waiting card ready\n"); 204 #endif 205 return -ETIMEDOUT; 206 } 207 208 return 0; 209 } 210 211 int mmc_set_blocklen(struct mmc *mmc, int len) 212 { 213 struct mmc_cmd cmd; 214 215 if (mmc_card_ddr(mmc)) 216 return 0; 217 218 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN; 219 cmd.resp_type = MMC_RSP_R1; 220 cmd.cmdarg = len; 221 222 return mmc_send_cmd(mmc, &cmd, NULL); 223 } 224 225 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start, 226 lbaint_t blkcnt) 227 { 228 struct mmc_cmd cmd; 229 struct mmc_data data; 230 231 if (blkcnt > 1) 232 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 233 else 234 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 235 236 if (mmc->high_capacity) 237 cmd.cmdarg = start; 238 else 239 cmd.cmdarg = start * mmc->read_bl_len; 240 241 cmd.resp_type = MMC_RSP_R1; 242 243 data.dest = dst; 244 data.blocks = blkcnt; 245 data.blocksize = mmc->read_bl_len; 246 data.flags = MMC_DATA_READ; 247 248 if (mmc_send_cmd(mmc, &cmd, &data)) 249 return 0; 250 251 if (blkcnt > 1) { 252 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 253 cmd.cmdarg = 0; 254 cmd.resp_type = MMC_RSP_R1b; 255 if (mmc_send_cmd(mmc, &cmd, NULL)) { 256 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 257 printf("mmc fail to send stop cmd\n"); 258 #endif 259 return 0; 260 } 261 } 262 263 return blkcnt; 264 } 265 266 #ifdef CONFIG_SPL_BLK_READ_PREPARE 267 static int mmc_read_blocks_prepare(struct mmc *mmc, void *dst, lbaint_t start, 268 lbaint_t blkcnt) 269 { 270 struct mmc_cmd cmd; 271 struct mmc_data data; 272 273 if (blkcnt > 1) 274 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 275 else 276 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 277 278 if (mmc->high_capacity) 279 cmd.cmdarg = start; 280 else 281 cmd.cmdarg = start * mmc->read_bl_len; 282 283 cmd.resp_type = MMC_RSP_R1; 284 285 data.dest = dst; 286 data.blocks = blkcnt; 287 data.blocksize = mmc->read_bl_len; 288 data.flags = MMC_DATA_READ; 289 290 if (mmc_send_cmd_prepare(mmc, &cmd, &data)) 291 return 0; 292 293 return blkcnt; 294 } 295 #endif 296 297 #ifdef CONFIG_SPL_BLK_READ_PREPARE 298 #if CONFIG_IS_ENABLED(BLK) 299 ulong mmc_bread_prepare(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 300 #else 301 ulong mmc_bread_prepare(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 302 void *dst) 303 #endif 304 { 305 #if CONFIG_IS_ENABLED(BLK) 306 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 307 #endif 308 int dev_num = block_dev->devnum; 309 int timeout = 0; 310 int err; 311 312 if (blkcnt == 0) 313 return 0; 314 315 struct mmc *mmc = find_mmc_device(dev_num); 316 317 if (!mmc) 318 return 0; 319 320 if (CONFIG_IS_ENABLED(MMC_TINY)) 321 err = mmc_switch_part(mmc, block_dev->hwpart); 322 else 323 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 324 325 if (err < 0) 326 return 0; 327 328 if ((start + blkcnt) > block_dev->lba) { 329 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 330 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 331 start + blkcnt, block_dev->lba); 332 #endif 333 return 0; 334 } 335 336 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 337 debug("%s: Failed to set blocklen\n", __func__); 338 return 0; 339 } 340 341 if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) { 342 debug("%s: Failed to read blocks\n", __func__); 343 re_init_retry: 344 timeout++; 345 /* 346 * Try re-init seven times. 347 */ 348 if (timeout > 7) { 349 printf("Re-init retry timeout\n"); 350 return 0; 351 } 352 353 mmc->has_init = 0; 354 if (mmc_init(mmc)) 355 return 0; 356 357 if (mmc_read_blocks_prepare(mmc, dst, start, blkcnt) != blkcnt) { 358 printf("%s: Re-init mmc_read_blocks_prepare error\n", 359 __func__); 360 goto re_init_retry; 361 } 362 } 363 364 return blkcnt; 365 } 366 #endif 367 368 #if CONFIG_IS_ENABLED(BLK) 369 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 370 #else 371 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 372 void *dst) 373 #endif 374 { 375 #if CONFIG_IS_ENABLED(BLK) 376 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 377 #endif 378 int dev_num = block_dev->devnum; 379 int err; 380 lbaint_t cur, blocks_todo = blkcnt; 381 382 #ifdef CONFIG_SPL_BLK_READ_PREPARE 383 if (block_dev->op_flag == BLK_PRE_RW) 384 #if CONFIG_IS_ENABLED(BLK) 385 return mmc_bread_prepare(dev, start, blkcnt, dst); 386 #else 387 return mmc_bread_prepare(block_dev, start, blkcnt, dst); 388 #endif 389 #endif 390 if (blkcnt == 0) 391 return 0; 392 393 struct mmc *mmc = find_mmc_device(dev_num); 394 if (!mmc) 395 return 0; 396 397 if (CONFIG_IS_ENABLED(MMC_TINY)) 398 err = mmc_switch_part(mmc, block_dev->hwpart); 399 else 400 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 401 402 if (err < 0) 403 return 0; 404 405 if ((start + blkcnt) > block_dev->lba) { 406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 407 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 408 start + blkcnt, block_dev->lba); 409 #endif 410 return 0; 411 } 412 413 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 414 debug("%s: Failed to set blocklen\n", __func__); 415 return 0; 416 } 417 418 do { 419 cur = (blocks_todo > mmc->cfg->b_max) ? 420 mmc->cfg->b_max : blocks_todo; 421 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 422 debug("%s: Failed to read blocks\n", __func__); 423 int timeout = 0; 424 re_init_retry: 425 timeout++; 426 /* 427 * Try re-init seven times. 428 */ 429 if (timeout > 7) { 430 printf("Re-init retry timeout\n"); 431 return 0; 432 } 433 434 mmc->has_init = 0; 435 if (mmc_init(mmc)) 436 return 0; 437 438 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 439 printf("%s: Re-init mmc_read_blocks error\n", 440 __func__); 441 goto re_init_retry; 442 } 443 } 444 blocks_todo -= cur; 445 start += cur; 446 dst += cur * mmc->read_bl_len; 447 } while (blocks_todo > 0); 448 449 return blkcnt; 450 } 451 452 void mmc_set_clock(struct mmc *mmc, uint clock) 453 { 454 if (clock > mmc->cfg->f_max) 455 clock = mmc->cfg->f_max; 456 457 if (clock < mmc->cfg->f_min) 458 clock = mmc->cfg->f_min; 459 460 mmc->clock = clock; 461 462 mmc_set_ios(mmc); 463 } 464 465 static void mmc_set_bus_width(struct mmc *mmc, uint width) 466 { 467 mmc->bus_width = width; 468 469 mmc_set_ios(mmc); 470 } 471 472 static void mmc_set_timing(struct mmc *mmc, uint timing) 473 { 474 mmc->timing = timing; 475 mmc_set_ios(mmc); 476 } 477 478 static int mmc_go_idle(struct mmc *mmc) 479 { 480 struct mmc_cmd cmd; 481 int err; 482 483 udelay(1000); 484 485 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE; 486 cmd.cmdarg = 0; 487 cmd.resp_type = MMC_RSP_NONE; 488 489 err = mmc_send_cmd(mmc, &cmd, NULL); 490 491 if (err) 492 return err; 493 494 udelay(2000); 495 496 return 0; 497 } 498 499 #ifndef CONFIG_MMC_USE_PRE_CONFIG 500 static int sd_send_op_cond(struct mmc *mmc) 501 { 502 int timeout = 1000; 503 int err; 504 struct mmc_cmd cmd; 505 506 while (1) { 507 cmd.cmdidx = MMC_CMD_APP_CMD; 508 cmd.resp_type = MMC_RSP_R1; 509 cmd.cmdarg = 0; 510 511 err = mmc_send_cmd(mmc, &cmd, NULL); 512 513 if (err) 514 return err; 515 516 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND; 517 cmd.resp_type = MMC_RSP_R3; 518 519 /* 520 * Most cards do not answer if some reserved bits 521 * in the ocr are set. However, Some controller 522 * can set bit 7 (reserved for low voltages), but 523 * how to manage low voltages SD card is not yet 524 * specified. 525 */ 526 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 : 527 (mmc->cfg->voltages & 0xff8000); 528 529 if (mmc->version == SD_VERSION_2) 530 cmd.cmdarg |= OCR_HCS; 531 532 err = mmc_send_cmd(mmc, &cmd, NULL); 533 534 if (err) 535 return err; 536 537 if (cmd.response[0] & OCR_BUSY) 538 break; 539 540 if (timeout-- <= 0) 541 return -EOPNOTSUPP; 542 543 udelay(1000); 544 } 545 546 if (mmc->version != SD_VERSION_2) 547 mmc->version = SD_VERSION_1_0; 548 549 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 550 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 551 cmd.resp_type = MMC_RSP_R3; 552 cmd.cmdarg = 0; 553 554 err = mmc_send_cmd(mmc, &cmd, NULL); 555 556 if (err) 557 return err; 558 } 559 560 mmc->ocr = cmd.response[0]; 561 562 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 563 mmc->rca = 0; 564 565 return 0; 566 } 567 #endif 568 569 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg) 570 { 571 struct mmc_cmd cmd; 572 int err; 573 574 cmd.cmdidx = MMC_CMD_SEND_OP_COND; 575 cmd.resp_type = MMC_RSP_R3; 576 cmd.cmdarg = 0; 577 if (use_arg && !mmc_host_is_spi(mmc)) 578 cmd.cmdarg = OCR_HCS | 579 (mmc->cfg->voltages & 580 (mmc->ocr & OCR_VOLTAGE_MASK)) | 581 (mmc->ocr & OCR_ACCESS_MODE); 582 583 err = mmc_send_cmd(mmc, &cmd, NULL); 584 if (err) 585 return err; 586 mmc->ocr = cmd.response[0]; 587 return 0; 588 } 589 590 #ifndef CONFIG_MMC_USE_PRE_CONFIG 591 static int mmc_send_op_cond(struct mmc *mmc) 592 { 593 int err, i; 594 595 /* Some cards seem to need this */ 596 mmc_go_idle(mmc); 597 598 /* Asking to the card its capabilities */ 599 for (i = 0; i < 2; i++) { 600 err = mmc_send_op_cond_iter(mmc, i != 0); 601 if (err) 602 return err; 603 604 /* exit if not busy (flag seems to be inverted) */ 605 if (mmc->ocr & OCR_BUSY) 606 break; 607 } 608 mmc->op_cond_pending = 1; 609 return 0; 610 } 611 #endif 612 static int mmc_complete_op_cond(struct mmc *mmc) 613 { 614 struct mmc_cmd cmd; 615 int timeout = 1000; 616 uint start; 617 int err; 618 619 mmc->op_cond_pending = 0; 620 if (!(mmc->ocr & OCR_BUSY)) { 621 /* Some cards seem to need this */ 622 mmc_go_idle(mmc); 623 624 start = get_timer(0); 625 while (1) { 626 err = mmc_send_op_cond_iter(mmc, 1); 627 if (err) 628 return err; 629 if (mmc->ocr & OCR_BUSY) 630 break; 631 if (get_timer(start) > timeout) 632 return -EOPNOTSUPP; 633 udelay(100); 634 } 635 } 636 637 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 638 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 639 cmd.resp_type = MMC_RSP_R3; 640 cmd.cmdarg = 0; 641 642 err = mmc_send_cmd(mmc, &cmd, NULL); 643 644 if (err) 645 return err; 646 647 mmc->ocr = cmd.response[0]; 648 } 649 650 mmc->version = MMC_VERSION_UNKNOWN; 651 652 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 653 mmc->rca = 1; 654 655 return 0; 656 } 657 658 659 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd) 660 { 661 struct mmc_cmd cmd; 662 struct mmc_data data; 663 int err; 664 665 #ifdef CONFIG_MMC_USE_PRE_CONFIG 666 static int initialized; 667 if (initialized) { 668 memcpy(ext_csd, mmc_ext_csd, 512); 669 return 0; 670 } 671 672 initialized = 1; 673 #endif 674 /* Get the Card Status Register */ 675 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD; 676 cmd.resp_type = MMC_RSP_R1; 677 cmd.cmdarg = 0; 678 679 data.dest = (char *)ext_csd; 680 data.blocks = 1; 681 data.blocksize = MMC_MAX_BLOCK_LEN; 682 data.flags = MMC_DATA_READ; 683 684 err = mmc_send_cmd(mmc, &cmd, &data); 685 memcpy(mmc_ext_csd, ext_csd, 512); 686 #if defined(CONFIG_MMC_USE_PRE_CONFIG) && defined(CONFIG_SPL_BUILD) 687 char *mmc_ecsd_base = NULL; 688 ulong mmc_ecsd; 689 690 mmc_ecsd = dev_read_u32_default(mmc->dev, "mmc-ecsd", 0); 691 mmc_ecsd_base = (char *)mmc_ecsd; 692 if (mmc_ecsd_base) { 693 memcpy(mmc_ecsd_base, ext_csd, 512); 694 *(unsigned int *)(mmc_ecsd_base + 512) = 0x55aa55aa; 695 } 696 #endif 697 return err; 698 } 699 700 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status) 701 { 702 struct mmc_cmd cmd; 703 u8 busy = true; 704 uint start; 705 int ret; 706 int timeout = 1000; 707 708 cmd.cmdidx = MMC_CMD_SEND_STATUS; 709 cmd.resp_type = MMC_RSP_R1; 710 cmd.cmdarg = mmc->rca << 16; 711 712 start = get_timer(0); 713 714 if (!send_status && !mmc_can_card_busy(mmc)) { 715 mdelay(timeout); 716 return 0; 717 } 718 719 do { 720 if (!send_status) { 721 busy = mmc_card_busy(mmc); 722 } else { 723 ret = mmc_send_cmd(mmc, &cmd, NULL); 724 725 if (ret) 726 return ret; 727 728 if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR) 729 return -EBADMSG; 730 busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) == 731 MMC_STATE_PRG; 732 } 733 734 if (get_timer(start) > timeout && busy) 735 return -ETIMEDOUT; 736 } while (busy); 737 738 return 0; 739 } 740 741 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value, 742 u8 send_status) 743 { 744 struct mmc_cmd cmd; 745 int retries = 3; 746 int ret; 747 748 cmd.cmdidx = MMC_CMD_SWITCH; 749 cmd.resp_type = MMC_RSP_R1b; 750 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 751 (index << 16) | 752 (value << 8); 753 754 do { 755 ret = mmc_send_cmd(mmc, &cmd, NULL); 756 757 if (!ret) 758 return mmc_poll_for_busy(mmc, send_status); 759 } while (--retries > 0 && ret); 760 761 return ret; 762 } 763 764 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value) 765 { 766 return __mmc_switch(mmc, set, index, value, true); 767 } 768 769 static int mmc_select_bus_width(struct mmc *mmc) 770 { 771 u32 ext_csd_bits[] = { 772 EXT_CSD_BUS_WIDTH_8, 773 EXT_CSD_BUS_WIDTH_4, 774 }; 775 u32 bus_widths[] = { 776 MMC_BUS_WIDTH_8BIT, 777 MMC_BUS_WIDTH_4BIT, 778 }; 779 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 780 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 781 u32 idx, bus_width = 0; 782 int err = 0; 783 784 if (mmc->version < MMC_VERSION_4 || 785 !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT))) 786 return 0; 787 788 err = mmc_send_ext_csd(mmc, ext_csd); 789 790 if (err) 791 return err; 792 793 idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1; 794 795 /* 796 * Unlike SD, MMC cards dont have a configuration register to notify 797 * supported bus width. So bus test command should be run to identify 798 * the supported bus width or compare the ext csd values of current 799 * bus width and ext csd values of 1 bit mode read earlier. 800 */ 801 for (; idx < ARRAY_SIZE(bus_widths); idx++) { 802 /* 803 * Host is capable of 8bit transfer, then switch 804 * the device to work in 8bit transfer mode. If the 805 * mmc switch command returns error then switch to 806 * 4bit transfer mode. On success set the corresponding 807 * bus width on the host. 808 */ 809 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 810 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]); 811 if (err) 812 continue; 813 814 bus_width = bus_widths[idx]; 815 mmc_set_bus_width(mmc, bus_width); 816 817 err = mmc_send_ext_csd(mmc, test_csd); 818 819 if (err) 820 continue; 821 822 /* Only compare read only fields */ 823 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] == 824 test_csd[EXT_CSD_PARTITIONING_SUPPORT]) && 825 (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] == 826 test_csd[EXT_CSD_HC_WP_GRP_SIZE]) && 827 (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) && 828 (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] == 829 test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && 830 !memcmp(&ext_csd[EXT_CSD_SEC_CNT], 831 &test_csd[EXT_CSD_SEC_CNT], 4)) { 832 err = bus_width; 833 break; 834 } else { 835 err = -EBADMSG; 836 } 837 } 838 839 return err; 840 } 841 842 #ifndef CONFIG_MMC_SIMPLE 843 static const u8 tuning_blk_pattern_4bit[] = { 844 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 845 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 846 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 847 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 848 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 849 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 850 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 851 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 852 }; 853 854 static const u8 tuning_blk_pattern_8bit[] = { 855 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 856 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 857 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 858 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 859 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 860 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 861 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 862 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 863 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 864 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 865 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 866 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 867 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 868 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 869 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 870 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 871 }; 872 873 int mmc_send_tuning(struct mmc *mmc, u32 opcode) 874 { 875 struct mmc_cmd cmd; 876 struct mmc_data data; 877 const u8 *tuning_block_pattern; 878 int size, err = 0; 879 u8 *data_buf; 880 881 if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 882 tuning_block_pattern = tuning_blk_pattern_8bit; 883 size = sizeof(tuning_blk_pattern_8bit); 884 } else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) { 885 tuning_block_pattern = tuning_blk_pattern_4bit; 886 size = sizeof(tuning_blk_pattern_4bit); 887 } else { 888 return -EINVAL; 889 } 890 891 data_buf = calloc(1, size); 892 if (!data_buf) 893 return -ENOMEM; 894 895 cmd.cmdidx = opcode; 896 cmd.resp_type = MMC_RSP_R1; 897 cmd.cmdarg = 0; 898 899 data.dest = (char *)data_buf; 900 data.blocksize = size; 901 data.blocks = 1; 902 data.flags = MMC_DATA_READ; 903 904 err = mmc_send_cmd(mmc, &cmd, &data); 905 if (err) { 906 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 907 cmd.cmdarg = 0; 908 cmd.resp_type = MMC_RSP_R1b; 909 mmc_send_cmd(mmc, &cmd, NULL); 910 goto out; 911 } 912 if (memcmp(data_buf, tuning_block_pattern, size)) 913 err = -EIO; 914 out: 915 free(data_buf); 916 return err; 917 } 918 919 static int mmc_execute_tuning(struct mmc *mmc) 920 { 921 #ifdef CONFIG_DM_MMC 922 struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev); 923 #endif 924 u32 opcode; 925 926 if (IS_SD(mmc)) 927 opcode = MMC_SEND_TUNING_BLOCK; 928 else 929 opcode = MMC_SEND_TUNING_BLOCK_HS200; 930 931 #ifndef CONFIG_DM_MMC 932 if (mmc->cfg->ops->execute_tuning) { 933 return mmc->cfg->ops->execute_tuning(mmc, opcode); 934 #else 935 if (ops->execute_tuning) { 936 return ops->execute_tuning(mmc->dev, opcode); 937 #endif 938 } else { 939 debug("Tuning feature required for HS200 mode.\n"); 940 return -EIO; 941 } 942 } 943 944 static int mmc_hs200_tuning(struct mmc *mmc) 945 { 946 return mmc_execute_tuning(mmc); 947 } 948 949 #else 950 int mmc_send_tuning(struct mmc *mmc, u32 opcode) { return 0; } 951 int mmc_execute_tuning(struct mmc *mmc) { return 0; } 952 static int mmc_hs200_tuning(struct mmc *mmc) { return 0; } 953 #endif 954 955 static int mmc_select_hs(struct mmc *mmc) 956 { 957 int ret; 958 959 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 960 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS); 961 962 if (!ret) 963 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 964 965 return ret; 966 } 967 968 static int mmc_select_hs_ddr(struct mmc *mmc) 969 { 970 u32 ext_csd_bits; 971 int err = 0; 972 973 if (mmc->bus_width == MMC_BUS_WIDTH_1BIT) 974 return 0; 975 976 ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ? 977 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4; 978 979 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 980 EXT_CSD_BUS_WIDTH, ext_csd_bits); 981 if (err) 982 return err; 983 984 mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52); 985 986 return 0; 987 } 988 989 #ifndef CONFIG_MMC_SIMPLE 990 static int mmc_select_hs200(struct mmc *mmc) 991 { 992 int ret; 993 994 /* 995 * Set the bus width(4 or 8) with host's support and 996 * switch to HS200 mode if bus width is set successfully. 997 */ 998 ret = mmc_select_bus_width(mmc); 999 1000 if (ret > 0) { 1001 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1002 EXT_CSD_HS_TIMING, 1003 EXT_CSD_TIMING_HS200, false); 1004 1005 if (ret) 1006 return ret; 1007 1008 mmc_set_timing(mmc, MMC_TIMING_MMC_HS200); 1009 } 1010 1011 return ret; 1012 } 1013 1014 static int mmc_switch_to_hs400(struct mmc *mmc) 1015 { 1016 u8 val, fixed_drv_type, card_drv_type, drive_strength; 1017 1018 fixed_drv_type = mmc->cfg->fixed_drv_type; 1019 card_drv_type = mmc->raw_driver_strength | mmc_driver_type_mask(0); 1020 drive_strength = (card_drv_type & mmc_driver_type_mask(fixed_drv_type)) 1021 ? fixed_drv_type : 0; 1022 val = EXT_CSD_TIMING_HS400 | drive_strength << EXT_CSD_DRV_STR_SHIFT; 1023 1024 return __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, false); 1025 } 1026 1027 static int mmc_select_hs400(struct mmc *mmc) 1028 { 1029 int ret; 1030 1031 /* Switch card to HS mode */ 1032 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1033 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false); 1034 if (ret) 1035 return ret; 1036 1037 /* Set host controller to HS timing */ 1038 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 1039 1040 /* Reduce frequency to HS frequency */ 1041 mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR); 1042 1043 ret = mmc_send_status(mmc, 1000); 1044 if (ret) 1045 return ret; 1046 1047 /* Switch card to DDR */ 1048 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1049 EXT_CSD_BUS_WIDTH, 1050 EXT_CSD_DDR_BUS_WIDTH_8); 1051 if (ret) 1052 return ret; 1053 1054 /* Switch card to HS400 */ 1055 ret = mmc_switch_to_hs400(mmc); 1056 if (ret) 1057 return ret; 1058 1059 /* Set host controller to HS400 timing and frequency */ 1060 mmc_set_timing(mmc, MMC_TIMING_MMC_HS400); 1061 1062 return ret; 1063 } 1064 1065 static int mmc_select_hs400es(struct mmc *mmc) 1066 { 1067 int err; 1068 1069 /* Switch card to HS mode */ 1070 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1071 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false); 1072 if (err) 1073 return err; 1074 1075 /* Set host controller to HS timing */ 1076 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 1077 1078 err = mmc_send_status(mmc, 1000); 1079 if (err) 1080 return err; 1081 1082 mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR); 1083 1084 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, 1085 EXT_CSD_DDR_BUS_WIDTH_8 | 1086 EXT_CSD_BUS_WIDTH_STROBE); 1087 if (err) { 1088 printf("switch to bus width for hs400 failed\n"); 1089 return err; 1090 } 1091 1092 /* Switch card to HS400 */ 1093 err = mmc_switch_to_hs400(mmc); 1094 if (err) 1095 return err; 1096 1097 /* Set host controller to HS400 timing and frequency */ 1098 mmc_set_timing(mmc, MMC_TIMING_MMC_HS400ES); 1099 1100 return mmc_set_enhanced_strobe(mmc); 1101 } 1102 #else 1103 static int mmc_select_hs200(struct mmc *mmc) { return 0; } 1104 static int mmc_select_hs400(struct mmc *mmc) { return 0; } 1105 static int mmc_select_hs400es(struct mmc *mmc) { return 0; } 1106 #endif 1107 1108 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd) 1109 { 1110 u8 card_type; 1111 u32 host_caps, avail_type = 0; 1112 1113 card_type = ext_csd[EXT_CSD_CARD_TYPE]; 1114 host_caps = mmc->cfg->host_caps; 1115 1116 if ((host_caps & MMC_MODE_HS) && 1117 (card_type & EXT_CSD_CARD_TYPE_26)) 1118 avail_type |= EXT_CSD_CARD_TYPE_26; 1119 1120 if ((host_caps & MMC_MODE_HS) && 1121 (card_type & EXT_CSD_CARD_TYPE_52)) 1122 avail_type |= EXT_CSD_CARD_TYPE_52; 1123 1124 /* 1125 * For the moment, u-boot doesn't support signal voltage 1126 * switch, therefor we assume that host support ddr52 1127 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and 1128 * hs400 are the same). 1129 */ 1130 if ((host_caps & MMC_MODE_DDR_52MHz) && 1131 (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)) 1132 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V; 1133 1134 if ((host_caps & MMC_MODE_HS200) && 1135 (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V)) 1136 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V; 1137 1138 /* 1139 * If host can support HS400, it means that host can also 1140 * support HS200. 1141 */ 1142 if ((host_caps & MMC_MODE_HS400) && 1143 (host_caps & MMC_MODE_8BIT) && 1144 (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 1145 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 1146 EXT_CSD_CARD_TYPE_HS400_1_8V; 1147 1148 if ((host_caps & MMC_MODE_HS400ES) && 1149 (host_caps & MMC_MODE_8BIT) && 1150 ext_csd[EXT_CSD_STROBE_SUPPORT] && 1151 (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 1152 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 1153 EXT_CSD_CARD_TYPE_HS400_1_8V | 1154 EXT_CSD_CARD_TYPE_HS400ES; 1155 1156 return avail_type; 1157 } 1158 1159 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type) 1160 { 1161 int clock = 0; 1162 1163 if (mmc_card_hs(mmc)) 1164 clock = (avail_type & EXT_CSD_CARD_TYPE_52) ? 1165 MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR; 1166 else if (mmc_card_hs200(mmc) || 1167 mmc_card_hs400(mmc) || 1168 mmc_card_hs400es(mmc)) 1169 clock = MMC_HS200_MAX_DTR; 1170 1171 mmc_set_clock(mmc, clock); 1172 } 1173 1174 static int mmc_change_freq(struct mmc *mmc) 1175 { 1176 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1177 u32 avail_type; 1178 int err; 1179 1180 mmc->card_caps = 0; 1181 1182 if (mmc_host_is_spi(mmc)) 1183 return 0; 1184 1185 /* Only version 4 supports high-speed */ 1186 if (mmc->version < MMC_VERSION_4) 1187 return 0; 1188 1189 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT; 1190 1191 err = mmc_send_ext_csd(mmc, ext_csd); 1192 1193 if (err) 1194 return err; 1195 1196 avail_type = mmc_select_card_type(mmc, ext_csd); 1197 1198 if (avail_type & EXT_CSD_CARD_TYPE_HS400ES) { 1199 err = mmc_select_bus_width(mmc); 1200 if (err > 0 && mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 1201 err = mmc_select_hs400es(mmc); 1202 mmc_set_bus_speed(mmc, avail_type); 1203 if (!err) 1204 return err; 1205 } 1206 } 1207 1208 if (avail_type & EXT_CSD_CARD_TYPE_HS200) 1209 err = mmc_select_hs200(mmc); 1210 else if (avail_type & EXT_CSD_CARD_TYPE_HS) 1211 err = mmc_select_hs(mmc); 1212 else 1213 err = -EINVAL; 1214 1215 if (err) 1216 return err; 1217 1218 mmc_set_bus_speed(mmc, avail_type); 1219 1220 if (mmc_card_hs200(mmc)) { 1221 err = mmc_hs200_tuning(mmc); 1222 if (avail_type & EXT_CSD_CARD_TYPE_HS400 && 1223 mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 1224 err = mmc_select_hs400(mmc); 1225 mmc_set_bus_speed(mmc, avail_type); 1226 } 1227 } else if (!mmc_card_hs400es(mmc)) { 1228 err = mmc_select_bus_width(mmc) > 0 ? 0 : err; 1229 if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52) 1230 err = mmc_select_hs_ddr(mmc); 1231 } 1232 1233 return err; 1234 } 1235 1236 static int mmc_set_capacity(struct mmc *mmc, int part_num) 1237 { 1238 switch (part_num) { 1239 case 0: 1240 mmc->capacity = mmc->capacity_user; 1241 break; 1242 case 1: 1243 case 2: 1244 mmc->capacity = mmc->capacity_boot; 1245 break; 1246 case 3: 1247 mmc->capacity = mmc->capacity_rpmb; 1248 break; 1249 case 4: 1250 case 5: 1251 case 6: 1252 case 7: 1253 mmc->capacity = mmc->capacity_gp[part_num - 4]; 1254 break; 1255 default: 1256 return -1; 1257 } 1258 1259 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1260 1261 return 0; 1262 } 1263 1264 int mmc_switch_part(struct mmc *mmc, unsigned int part_num) 1265 { 1266 int ret; 1267 1268 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF, 1269 (mmc->part_config & ~PART_ACCESS_MASK) 1270 | (part_num & PART_ACCESS_MASK)); 1271 1272 /* 1273 * Set the capacity if the switch succeeded or was intended 1274 * to return to representing the raw device. 1275 */ 1276 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) { 1277 ret = mmc_set_capacity(mmc, part_num); 1278 mmc_get_blk_desc(mmc)->hwpart = part_num; 1279 } 1280 1281 return ret; 1282 } 1283 1284 int mmc_hwpart_config(struct mmc *mmc, 1285 const struct mmc_hwpart_conf *conf, 1286 enum mmc_hwpart_conf_mode mode) 1287 { 1288 u8 part_attrs = 0; 1289 u32 enh_size_mult; 1290 u32 enh_start_addr; 1291 u32 gp_size_mult[4]; 1292 u32 max_enh_size_mult; 1293 u32 tot_enh_size_mult = 0; 1294 u8 wr_rel_set; 1295 int i, pidx, err; 1296 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1297 1298 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE) 1299 return -EINVAL; 1300 1301 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) { 1302 printf("eMMC >= 4.4 required for enhanced user data area\n"); 1303 return -EMEDIUMTYPE; 1304 } 1305 1306 if (!(mmc->part_support & PART_SUPPORT)) { 1307 printf("Card does not support partitioning\n"); 1308 return -EMEDIUMTYPE; 1309 } 1310 1311 if (!mmc->hc_wp_grp_size) { 1312 printf("Card does not define HC WP group size\n"); 1313 return -EMEDIUMTYPE; 1314 } 1315 1316 /* check partition alignment and total enhanced size */ 1317 if (conf->user.enh_size) { 1318 if (conf->user.enh_size % mmc->hc_wp_grp_size || 1319 conf->user.enh_start % mmc->hc_wp_grp_size) { 1320 printf("User data enhanced area not HC WP group " 1321 "size aligned\n"); 1322 return -EINVAL; 1323 } 1324 part_attrs |= EXT_CSD_ENH_USR; 1325 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size; 1326 if (mmc->high_capacity) { 1327 enh_start_addr = conf->user.enh_start; 1328 } else { 1329 enh_start_addr = (conf->user.enh_start << 9); 1330 } 1331 } else { 1332 enh_size_mult = 0; 1333 enh_start_addr = 0; 1334 } 1335 tot_enh_size_mult += enh_size_mult; 1336 1337 for (pidx = 0; pidx < 4; pidx++) { 1338 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) { 1339 printf("GP%i partition not HC WP group size " 1340 "aligned\n", pidx+1); 1341 return -EINVAL; 1342 } 1343 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size; 1344 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) { 1345 part_attrs |= EXT_CSD_ENH_GP(pidx); 1346 tot_enh_size_mult += gp_size_mult[pidx]; 1347 } 1348 } 1349 1350 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) { 1351 printf("Card does not support enhanced attribute\n"); 1352 return -EMEDIUMTYPE; 1353 } 1354 1355 err = mmc_send_ext_csd(mmc, ext_csd); 1356 if (err) 1357 return err; 1358 1359 max_enh_size_mult = 1360 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) + 1361 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) + 1362 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT]; 1363 if (tot_enh_size_mult > max_enh_size_mult) { 1364 printf("Total enhanced size exceeds maximum (%u > %u)\n", 1365 tot_enh_size_mult, max_enh_size_mult); 1366 return -EMEDIUMTYPE; 1367 } 1368 1369 /* The default value of EXT_CSD_WR_REL_SET is device 1370 * dependent, the values can only be changed if the 1371 * EXT_CSD_HS_CTRL_REL bit is set. The values can be 1372 * changed only once and before partitioning is completed. */ 1373 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1374 if (conf->user.wr_rel_change) { 1375 if (conf->user.wr_rel_set) 1376 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR; 1377 else 1378 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR; 1379 } 1380 for (pidx = 0; pidx < 4; pidx++) { 1381 if (conf->gp_part[pidx].wr_rel_change) { 1382 if (conf->gp_part[pidx].wr_rel_set) 1383 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx); 1384 else 1385 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx); 1386 } 1387 } 1388 1389 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] && 1390 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) { 1391 puts("Card does not support host controlled partition write " 1392 "reliability settings\n"); 1393 return -EMEDIUMTYPE; 1394 } 1395 1396 if (ext_csd[EXT_CSD_PARTITION_SETTING] & 1397 EXT_CSD_PARTITION_SETTING_COMPLETED) { 1398 printf("Card already partitioned\n"); 1399 return -EPERM; 1400 } 1401 1402 if (mode == MMC_HWPART_CONF_CHECK) 1403 return 0; 1404 1405 /* Partitioning requires high-capacity size definitions */ 1406 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) { 1407 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1408 EXT_CSD_ERASE_GROUP_DEF, 1); 1409 1410 if (err) 1411 return err; 1412 1413 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1414 1415 /* update erase group size to be high-capacity */ 1416 mmc->erase_grp_size = 1417 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1418 1419 } 1420 1421 /* all OK, write the configuration */ 1422 for (i = 0; i < 4; i++) { 1423 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1424 EXT_CSD_ENH_START_ADDR+i, 1425 (enh_start_addr >> (i*8)) & 0xFF); 1426 if (err) 1427 return err; 1428 } 1429 for (i = 0; i < 3; i++) { 1430 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1431 EXT_CSD_ENH_SIZE_MULT+i, 1432 (enh_size_mult >> (i*8)) & 0xFF); 1433 if (err) 1434 return err; 1435 } 1436 for (pidx = 0; pidx < 4; pidx++) { 1437 for (i = 0; i < 3; i++) { 1438 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1439 EXT_CSD_GP_SIZE_MULT+pidx*3+i, 1440 (gp_size_mult[pidx] >> (i*8)) & 0xFF); 1441 if (err) 1442 return err; 1443 } 1444 } 1445 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1446 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs); 1447 if (err) 1448 return err; 1449 1450 if (mode == MMC_HWPART_CONF_SET) 1451 return 0; 1452 1453 /* The WR_REL_SET is a write-once register but shall be 1454 * written before setting PART_SETTING_COMPLETED. As it is 1455 * write-once we can only write it when completing the 1456 * partitioning. */ 1457 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) { 1458 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1459 EXT_CSD_WR_REL_SET, wr_rel_set); 1460 if (err) 1461 return err; 1462 } 1463 1464 /* Setting PART_SETTING_COMPLETED confirms the partition 1465 * configuration but it only becomes effective after power 1466 * cycle, so we do not adjust the partition related settings 1467 * in the mmc struct. */ 1468 1469 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1470 EXT_CSD_PARTITION_SETTING, 1471 EXT_CSD_PARTITION_SETTING_COMPLETED); 1472 if (err) 1473 return err; 1474 1475 return 0; 1476 } 1477 1478 #if !CONFIG_IS_ENABLED(DM_MMC) 1479 int mmc_getcd(struct mmc *mmc) 1480 { 1481 int cd; 1482 1483 cd = board_mmc_getcd(mmc); 1484 1485 if (cd < 0) { 1486 if (mmc->cfg->ops->getcd) 1487 cd = mmc->cfg->ops->getcd(mmc); 1488 else 1489 cd = 1; 1490 } 1491 1492 return cd; 1493 } 1494 #endif 1495 1496 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp) 1497 { 1498 struct mmc_cmd cmd; 1499 struct mmc_data data; 1500 1501 /* Switch the frequency */ 1502 cmd.cmdidx = SD_CMD_SWITCH_FUNC; 1503 cmd.resp_type = MMC_RSP_R1; 1504 cmd.cmdarg = (mode << 31) | 0xffffff; 1505 cmd.cmdarg &= ~(0xf << (group * 4)); 1506 cmd.cmdarg |= value << (group * 4); 1507 1508 data.dest = (char *)resp; 1509 data.blocksize = 64; 1510 data.blocks = 1; 1511 data.flags = MMC_DATA_READ; 1512 1513 return mmc_send_cmd(mmc, &cmd, &data); 1514 } 1515 1516 1517 static int sd_change_freq(struct mmc *mmc) 1518 { 1519 int err; 1520 struct mmc_cmd cmd; 1521 ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2); 1522 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16); 1523 struct mmc_data data; 1524 int timeout; 1525 1526 mmc->card_caps = 0; 1527 1528 if (mmc_host_is_spi(mmc)) 1529 return 0; 1530 1531 /* Read the SCR to find out if this card supports higher speeds */ 1532 cmd.cmdidx = MMC_CMD_APP_CMD; 1533 cmd.resp_type = MMC_RSP_R1; 1534 cmd.cmdarg = mmc->rca << 16; 1535 1536 err = mmc_send_cmd(mmc, &cmd, NULL); 1537 1538 if (err) 1539 return err; 1540 1541 cmd.cmdidx = SD_CMD_APP_SEND_SCR; 1542 cmd.resp_type = MMC_RSP_R1; 1543 cmd.cmdarg = 0; 1544 1545 timeout = 3; 1546 1547 retry_scr: 1548 data.dest = (char *)scr; 1549 data.blocksize = 8; 1550 data.blocks = 1; 1551 data.flags = MMC_DATA_READ; 1552 1553 err = mmc_send_cmd(mmc, &cmd, &data); 1554 1555 if (err) { 1556 if (timeout--) 1557 goto retry_scr; 1558 1559 return err; 1560 } 1561 1562 mmc->scr[0] = __be32_to_cpu(scr[0]); 1563 mmc->scr[1] = __be32_to_cpu(scr[1]); 1564 1565 switch ((mmc->scr[0] >> 24) & 0xf) { 1566 case 0: 1567 mmc->version = SD_VERSION_1_0; 1568 break; 1569 case 1: 1570 mmc->version = SD_VERSION_1_10; 1571 break; 1572 case 2: 1573 mmc->version = SD_VERSION_2; 1574 if ((mmc->scr[0] >> 15) & 0x1) 1575 mmc->version = SD_VERSION_3; 1576 break; 1577 default: 1578 mmc->version = SD_VERSION_1_0; 1579 break; 1580 } 1581 1582 if (mmc->scr[0] & SD_DATA_4BIT) 1583 mmc->card_caps |= MMC_MODE_4BIT; 1584 1585 /* Version 1.0 doesn't support switching */ 1586 if (mmc->version == SD_VERSION_1_0) 1587 return 0; 1588 1589 timeout = 4; 1590 while (timeout--) { 1591 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1, 1592 (u8 *)switch_status); 1593 1594 if (err) 1595 return err; 1596 1597 /* The high-speed function is busy. Try again */ 1598 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY)) 1599 break; 1600 } 1601 1602 /* If high-speed isn't supported, we return */ 1603 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)) 1604 return 0; 1605 1606 /* 1607 * If the host doesn't support SD_HIGHSPEED, do not switch card to 1608 * HIGHSPEED mode even if the card support SD_HIGHSPPED. 1609 * This can avoid furthur problem when the card runs in different 1610 * mode between the host. 1611 */ 1612 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) && 1613 (mmc->cfg->host_caps & MMC_MODE_HS))) 1614 return 0; 1615 1616 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status); 1617 1618 if (err) 1619 return err; 1620 1621 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000) 1622 mmc->card_caps |= MMC_MODE_HS; 1623 1624 return 0; 1625 } 1626 1627 static int sd_read_ssr(struct mmc *mmc) 1628 { 1629 int err, i; 1630 struct mmc_cmd cmd; 1631 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16); 1632 struct mmc_data data; 1633 int timeout = 3; 1634 unsigned int au, eo, et, es; 1635 1636 cmd.cmdidx = MMC_CMD_APP_CMD; 1637 cmd.resp_type = MMC_RSP_R1; 1638 cmd.cmdarg = mmc->rca << 16; 1639 1640 err = mmc_send_cmd(mmc, &cmd, NULL); 1641 if (err) 1642 return err; 1643 1644 cmd.cmdidx = SD_CMD_APP_SD_STATUS; 1645 cmd.resp_type = MMC_RSP_R1; 1646 cmd.cmdarg = 0; 1647 1648 retry_ssr: 1649 data.dest = (char *)ssr; 1650 data.blocksize = 64; 1651 data.blocks = 1; 1652 data.flags = MMC_DATA_READ; 1653 1654 err = mmc_send_cmd(mmc, &cmd, &data); 1655 if (err) { 1656 if (timeout--) 1657 goto retry_ssr; 1658 1659 return err; 1660 } 1661 1662 for (i = 0; i < 16; i++) 1663 ssr[i] = be32_to_cpu(ssr[i]); 1664 1665 au = (ssr[2] >> 12) & 0xF; 1666 if ((au <= 9) || (mmc->version == SD_VERSION_3)) { 1667 mmc->ssr.au = sd_au_size[au]; 1668 es = (ssr[3] >> 24) & 0xFF; 1669 es |= (ssr[2] & 0xFF) << 8; 1670 et = (ssr[3] >> 18) & 0x3F; 1671 if (es && et) { 1672 eo = (ssr[3] >> 16) & 0x3; 1673 mmc->ssr.erase_timeout = (et * 1000) / es; 1674 mmc->ssr.erase_offset = eo * 1000; 1675 } 1676 } else { 1677 debug("Invalid Allocation Unit Size.\n"); 1678 } 1679 1680 return 0; 1681 } 1682 1683 /* frequency bases */ 1684 /* divided by 10 to be nice to platforms without floating point */ 1685 static const int fbase[] = { 1686 10000, 1687 100000, 1688 1000000, 1689 10000000, 1690 }; 1691 1692 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice 1693 * to platforms without floating point. 1694 */ 1695 static const u8 multipliers[] = { 1696 0, /* reserved */ 1697 10, 1698 12, 1699 13, 1700 15, 1701 20, 1702 25, 1703 30, 1704 35, 1705 40, 1706 45, 1707 50, 1708 55, 1709 60, 1710 70, 1711 80, 1712 }; 1713 1714 #if !CONFIG_IS_ENABLED(DM_MMC) 1715 static void mmc_set_ios(struct mmc *mmc) 1716 { 1717 if (mmc->cfg->ops->set_ios) 1718 mmc->cfg->ops->set_ios(mmc); 1719 } 1720 1721 static bool mmc_card_busy(struct mmc *mmc) 1722 { 1723 if (!mmc->cfg->ops->card_busy) 1724 return -ENOSYS; 1725 1726 return mmc->cfg->ops->card_busy(mmc); 1727 } 1728 1729 static bool mmc_can_card_busy(struct mmc *) 1730 { 1731 return !!mmc->cfg->ops->card_busy; 1732 } 1733 #endif 1734 1735 static int mmc_startup(struct mmc *mmc) 1736 { 1737 int err, i; 1738 uint mult, freq, tran_speed; 1739 u64 cmult, csize, capacity; 1740 struct mmc_cmd cmd; 1741 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1742 bool has_parts = false; 1743 bool part_completed; 1744 struct blk_desc *bdesc; 1745 1746 #ifdef CONFIG_MMC_SPI_CRC_ON 1747 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */ 1748 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF; 1749 cmd.resp_type = MMC_RSP_R1; 1750 cmd.cmdarg = 1; 1751 err = mmc_send_cmd(mmc, &cmd, NULL); 1752 1753 if (err) 1754 return err; 1755 } 1756 #endif 1757 #ifndef CONFIG_MMC_USE_PRE_CONFIG 1758 /* Put the Card in Identify Mode */ 1759 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID : 1760 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */ 1761 cmd.resp_type = MMC_RSP_R2; 1762 cmd.cmdarg = 0; 1763 1764 err = mmc_send_cmd(mmc, &cmd, NULL); 1765 1766 if (err) 1767 return err; 1768 1769 memcpy(mmc->cid, cmd.response, 16); 1770 1771 /* 1772 * For MMC cards, set the Relative Address. 1773 * For SD cards, get the Relatvie Address. 1774 * This also puts the cards into Standby State 1775 */ 1776 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1777 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR; 1778 cmd.cmdarg = mmc->rca << 16; 1779 cmd.resp_type = MMC_RSP_R6; 1780 1781 err = mmc_send_cmd(mmc, &cmd, NULL); 1782 1783 if (err) 1784 return err; 1785 1786 if (IS_SD(mmc)) 1787 mmc->rca = (cmd.response[0] >> 16) & 0xffff; 1788 } 1789 #endif 1790 /* Get the Card-Specific Data */ 1791 cmd.cmdidx = MMC_CMD_SEND_CSD; 1792 cmd.resp_type = MMC_RSP_R2; 1793 cmd.cmdarg = mmc->rca << 16; 1794 1795 err = mmc_send_cmd(mmc, &cmd, NULL); 1796 1797 if (err) 1798 return err; 1799 1800 mmc->csd[0] = cmd.response[0]; 1801 mmc->csd[1] = cmd.response[1]; 1802 mmc->csd[2] = cmd.response[2]; 1803 mmc->csd[3] = cmd.response[3]; 1804 1805 if (mmc->version == MMC_VERSION_UNKNOWN) { 1806 int version = (cmd.response[0] >> 26) & 0xf; 1807 1808 switch (version) { 1809 case 0: 1810 mmc->version = MMC_VERSION_1_2; 1811 break; 1812 case 1: 1813 mmc->version = MMC_VERSION_1_4; 1814 break; 1815 case 2: 1816 mmc->version = MMC_VERSION_2_2; 1817 break; 1818 case 3: 1819 mmc->version = MMC_VERSION_3; 1820 break; 1821 case 4: 1822 mmc->version = MMC_VERSION_4; 1823 break; 1824 default: 1825 mmc->version = MMC_VERSION_1_2; 1826 break; 1827 } 1828 } 1829 1830 /* divide frequency by 10, since the mults are 10x bigger */ 1831 freq = fbase[(cmd.response[0] & 0x7)]; 1832 mult = multipliers[((cmd.response[0] >> 3) & 0xf)]; 1833 1834 tran_speed = freq * mult; 1835 1836 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1); 1837 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf); 1838 1839 if (IS_SD(mmc)) 1840 mmc->write_bl_len = mmc->read_bl_len; 1841 else 1842 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf); 1843 1844 if (mmc->high_capacity) { 1845 csize = (mmc->csd[1] & 0x3f) << 16 1846 | (mmc->csd[2] & 0xffff0000) >> 16; 1847 cmult = 8; 1848 } else { 1849 csize = (mmc->csd[1] & 0x3ff) << 2 1850 | (mmc->csd[2] & 0xc0000000) >> 30; 1851 cmult = (mmc->csd[2] & 0x00038000) >> 15; 1852 } 1853 1854 mmc->capacity_user = (csize + 1) << (cmult + 2); 1855 mmc->capacity_user *= mmc->read_bl_len; 1856 mmc->capacity_boot = 0; 1857 mmc->capacity_rpmb = 0; 1858 for (i = 0; i < 4; i++) 1859 mmc->capacity_gp[i] = 0; 1860 1861 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN) 1862 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1863 1864 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN) 1865 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1866 1867 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) { 1868 cmd.cmdidx = MMC_CMD_SET_DSR; 1869 cmd.cmdarg = (mmc->dsr & 0xffff) << 16; 1870 cmd.resp_type = MMC_RSP_NONE; 1871 if (mmc_send_cmd(mmc, &cmd, NULL)) 1872 printf("MMC: SET_DSR failed\n"); 1873 } 1874 1875 /* Select the card, and put it into Transfer Mode */ 1876 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1877 cmd.cmdidx = MMC_CMD_SELECT_CARD; 1878 cmd.resp_type = MMC_RSP_R1; 1879 cmd.cmdarg = mmc->rca << 16; 1880 err = mmc_send_cmd(mmc, &cmd, NULL); 1881 1882 if (err) 1883 return err; 1884 } 1885 1886 /* 1887 * For SD, its erase group is always one sector 1888 */ 1889 mmc->erase_grp_size = 1; 1890 mmc->part_config = MMCPART_NOAVAILABLE; 1891 if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) { 1892 /* select high speed to reduce initialization time */ 1893 mmc_select_hs(mmc); 1894 mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR); 1895 1896 /* check ext_csd version and capacity */ 1897 err = mmc_send_ext_csd(mmc, ext_csd); 1898 if (err) 1899 return err; 1900 if (ext_csd[EXT_CSD_REV] >= 2) { 1901 /* 1902 * According to the JEDEC Standard, the value of 1903 * ext_csd's capacity is valid if the value is more 1904 * than 2GB 1905 */ 1906 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0 1907 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 1908 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 1909 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 1910 capacity *= MMC_MAX_BLOCK_LEN; 1911 if ((capacity >> 20) > 2 * 1024) 1912 mmc->capacity_user = capacity; 1913 } 1914 1915 switch (ext_csd[EXT_CSD_REV]) { 1916 case 1: 1917 mmc->version = MMC_VERSION_4_1; 1918 break; 1919 case 2: 1920 mmc->version = MMC_VERSION_4_2; 1921 break; 1922 case 3: 1923 mmc->version = MMC_VERSION_4_3; 1924 break; 1925 case 5: 1926 mmc->version = MMC_VERSION_4_41; 1927 break; 1928 case 6: 1929 mmc->version = MMC_VERSION_4_5; 1930 break; 1931 case 7: 1932 mmc->version = MMC_VERSION_5_0; 1933 break; 1934 case 8: 1935 mmc->version = MMC_VERSION_5_1; 1936 break; 1937 } 1938 1939 /* The partition data may be non-zero but it is only 1940 * effective if PARTITION_SETTING_COMPLETED is set in 1941 * EXT_CSD, so ignore any data if this bit is not set, 1942 * except for enabling the high-capacity group size 1943 * definition (see below). */ 1944 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] & 1945 EXT_CSD_PARTITION_SETTING_COMPLETED); 1946 1947 /* store the partition info of emmc */ 1948 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT]; 1949 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) || 1950 ext_csd[EXT_CSD_BOOT_MULT]) 1951 mmc->part_config = ext_csd[EXT_CSD_PART_CONF]; 1952 if (part_completed && 1953 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT)) 1954 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE]; 1955 if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN) 1956 mmc->esr.mmc_can_trim = 1; 1957 1958 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17; 1959 1960 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17; 1961 1962 for (i = 0; i < 4; i++) { 1963 int idx = EXT_CSD_GP_SIZE_MULT + i * 3; 1964 uint mult = (ext_csd[idx + 2] << 16) + 1965 (ext_csd[idx + 1] << 8) + ext_csd[idx]; 1966 if (mult) 1967 has_parts = true; 1968 if (!part_completed) 1969 continue; 1970 mmc->capacity_gp[i] = mult; 1971 mmc->capacity_gp[i] *= 1972 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1973 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1974 mmc->capacity_gp[i] <<= 19; 1975 } 1976 1977 if (part_completed) { 1978 mmc->enh_user_size = 1979 (ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) + 1980 (ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) + 1981 ext_csd[EXT_CSD_ENH_SIZE_MULT]; 1982 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1983 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1984 mmc->enh_user_size <<= 19; 1985 mmc->enh_user_start = 1986 (ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) + 1987 (ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) + 1988 (ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) + 1989 ext_csd[EXT_CSD_ENH_START_ADDR]; 1990 if (mmc->high_capacity) 1991 mmc->enh_user_start <<= 9; 1992 } 1993 1994 /* 1995 * Host needs to enable ERASE_GRP_DEF bit if device is 1996 * partitioned. This bit will be lost every time after a reset 1997 * or power off. This will affect erase size. 1998 */ 1999 if (part_completed) 2000 has_parts = true; 2001 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) && 2002 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB)) 2003 has_parts = true; 2004 if (has_parts) { 2005 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 2006 EXT_CSD_ERASE_GROUP_DEF, 1); 2007 2008 if (err) 2009 return err; 2010 else 2011 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 2012 } 2013 2014 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) { 2015 /* Read out group size from ext_csd */ 2016 mmc->erase_grp_size = 2017 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 2018 /* 2019 * if high capacity and partition setting completed 2020 * SEC_COUNT is valid even if it is smaller than 2 GiB 2021 * JEDEC Standard JESD84-B45, 6.2.4 2022 */ 2023 if (mmc->high_capacity && part_completed) { 2024 capacity = (ext_csd[EXT_CSD_SEC_CNT]) | 2025 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) | 2026 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) | 2027 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24); 2028 capacity *= MMC_MAX_BLOCK_LEN; 2029 mmc->capacity_user = capacity; 2030 } 2031 } else { 2032 /* Calculate the group size from the csd value. */ 2033 int erase_gsz, erase_gmul; 2034 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10; 2035 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5; 2036 mmc->erase_grp_size = (erase_gsz + 1) 2037 * (erase_gmul + 1); 2038 } 2039 2040 mmc->hc_wp_grp_size = 1024 2041 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 2042 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 2043 2044 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 2045 2046 mmc->raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH]; 2047 } 2048 2049 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart); 2050 if (err) 2051 return err; 2052 2053 if (IS_SD(mmc)) 2054 err = sd_change_freq(mmc); 2055 else 2056 err = mmc_change_freq(mmc); 2057 2058 if (err) 2059 return err; 2060 2061 /* Restrict card's capabilities by what the host can do */ 2062 mmc->card_caps &= mmc->cfg->host_caps; 2063 2064 if (IS_SD(mmc)) { 2065 if (mmc->card_caps & MMC_MODE_4BIT) { 2066 cmd.cmdidx = MMC_CMD_APP_CMD; 2067 cmd.resp_type = MMC_RSP_R1; 2068 cmd.cmdarg = mmc->rca << 16; 2069 2070 err = mmc_send_cmd(mmc, &cmd, NULL); 2071 if (err) 2072 return err; 2073 2074 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH; 2075 cmd.resp_type = MMC_RSP_R1; 2076 cmd.cmdarg = 2; 2077 err = mmc_send_cmd(mmc, &cmd, NULL); 2078 if (err) 2079 return err; 2080 2081 mmc_set_bus_width(mmc, 4); 2082 } 2083 2084 err = sd_read_ssr(mmc); 2085 if (err) 2086 return err; 2087 2088 if (mmc->card_caps & MMC_MODE_HS) 2089 tran_speed = MMC_HIGH_52_MAX_DTR; 2090 else 2091 tran_speed = MMC_HIGH_26_MAX_DTR; 2092 2093 mmc_set_clock(mmc, tran_speed); 2094 } 2095 2096 /* Fix the block length for DDR mode */ 2097 if (mmc_card_ddr(mmc)) { 2098 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 2099 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 2100 } 2101 2102 /* fill in device description */ 2103 bdesc = mmc_get_blk_desc(mmc); 2104 bdesc->lun = 0; 2105 bdesc->hwpart = 0; 2106 bdesc->type = 0; 2107 bdesc->blksz = mmc->read_bl_len; 2108 bdesc->log2blksz = LOG2(bdesc->blksz); 2109 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len); 2110 #if !defined(CONFIG_SPL_BUILD) || \ 2111 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \ 2112 !defined(CONFIG_USE_TINY_PRINTF)) 2113 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x", 2114 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), 2115 (mmc->cid[3] >> 16) & 0xffff); 2116 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, 2117 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, 2118 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, 2119 (mmc->cid[2] >> 24) & 0xff); 2120 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, 2121 (mmc->cid[2] >> 16) & 0xf); 2122 #else 2123 bdesc->vendor[0] = 0; 2124 bdesc->product[0] = 0; 2125 bdesc->revision[0] = 0; 2126 #endif 2127 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT) 2128 part_init(bdesc); 2129 #endif 2130 2131 return 0; 2132 } 2133 2134 #ifndef CONFIG_MMC_USE_PRE_CONFIG 2135 static int mmc_send_if_cond(struct mmc *mmc) 2136 { 2137 struct mmc_cmd cmd; 2138 int err; 2139 2140 cmd.cmdidx = SD_CMD_SEND_IF_COND; 2141 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */ 2142 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa; 2143 cmd.resp_type = MMC_RSP_R7; 2144 2145 err = mmc_send_cmd(mmc, &cmd, NULL); 2146 2147 if (err) 2148 return err; 2149 2150 if ((cmd.response[0] & 0xff) != 0xaa) 2151 return -EOPNOTSUPP; 2152 else 2153 mmc->version = SD_VERSION_2; 2154 2155 return 0; 2156 } 2157 #endif 2158 2159 #if !CONFIG_IS_ENABLED(DM_MMC) 2160 /* board-specific MMC power initializations. */ 2161 __weak void board_mmc_power_init(void) 2162 { 2163 } 2164 #endif 2165 2166 #ifndef CONFIG_MMC_USE_PRE_CONFIG 2167 static int mmc_power_init(struct mmc *mmc) 2168 { 2169 #if CONFIG_IS_ENABLED(DM_MMC) 2170 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD) 2171 struct udevice *vmmc_supply; 2172 int ret; 2173 2174 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply", 2175 &vmmc_supply); 2176 if (ret) { 2177 debug("%s: No vmmc supply\n", mmc->dev->name); 2178 return 0; 2179 } 2180 2181 ret = regulator_set_enable(vmmc_supply, true); 2182 if (ret) { 2183 puts("Error enabling VMMC supply\n"); 2184 return ret; 2185 } 2186 #endif 2187 #else /* !CONFIG_DM_MMC */ 2188 /* 2189 * Driver model should use a regulator, as above, rather than calling 2190 * out to board code. 2191 */ 2192 board_mmc_power_init(); 2193 #endif 2194 return 0; 2195 } 2196 #endif 2197 #ifdef CONFIG_MMC_USE_PRE_CONFIG 2198 static int mmc_select_card(struct mmc *mmc, int n) 2199 { 2200 struct mmc_cmd cmd; 2201 int err = 0; 2202 2203 memset(&cmd, 0, sizeof(struct mmc_cmd)); 2204 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 2205 mmc->rca = n; 2206 cmd.cmdidx = MMC_CMD_SELECT_CARD; 2207 cmd.resp_type = MMC_RSP_R1; 2208 cmd.cmdarg = mmc->rca << 16; 2209 err = mmc_send_cmd(mmc, &cmd, NULL); 2210 } 2211 2212 return err; 2213 } 2214 2215 int mmc_start_init(struct mmc *mmc) 2216 { 2217 int bus_width = 1; 2218 /* 2219 * We use the MMC config set by the bootrom. 2220 * So it is no need to reset the eMMC device. 2221 */ 2222 if (mmc->cfg->host_caps & MMC_MODE_8BIT) 2223 bus_width = 8; 2224 else if (mmc->cfg->host_caps & MMC_MODE_4BIT) 2225 bus_width = 4; 2226 mmc_set_bus_width(mmc, bus_width); 2227 2228 mmc_set_clock(mmc, 1); 2229 mmc_set_timing(mmc, MMC_TIMING_LEGACY); 2230 /* Send cmd7 to return stand-by state*/ 2231 mmc_select_card(mmc, 0); 2232 mmc->version = MMC_VERSION_UNKNOWN; 2233 mmc->high_capacity = 1; 2234 /* 2235 * The RCA is set to 2 by rockchip bootrom, use the default 2236 * value here. 2237 */ 2238 #ifdef CONFIG_ARCH_ROCKCHIP 2239 mmc->rca = 2; 2240 #else 2241 mmc->rca = 1; 2242 #endif 2243 return 0; 2244 } 2245 #else 2246 int mmc_start_init(struct mmc *mmc) 2247 { 2248 bool no_card; 2249 int err; 2250 2251 /* we pretend there's no card when init is NULL */ 2252 no_card = mmc_getcd(mmc) == 0; 2253 #if !CONFIG_IS_ENABLED(DM_MMC) 2254 no_card = no_card || (mmc->cfg->ops->init == NULL); 2255 #endif 2256 if (no_card) { 2257 mmc->has_init = 0; 2258 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2259 printf("MMC: no card present\n"); 2260 #endif 2261 return -ENOMEDIUM; 2262 } 2263 2264 if (mmc->has_init) 2265 return 0; 2266 2267 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 2268 mmc_adapter_card_type_ident(); 2269 #endif 2270 err = mmc_power_init(mmc); 2271 if (err) 2272 return err; 2273 2274 #if CONFIG_IS_ENABLED(DM_MMC) 2275 /* The device has already been probed ready for use */ 2276 #else 2277 /* made sure it's not NULL earlier */ 2278 err = mmc->cfg->ops->init(mmc); 2279 if (err) 2280 return err; 2281 #endif 2282 mmc_set_bus_width(mmc, 1); 2283 mmc_set_clock(mmc, 1); 2284 mmc_set_timing(mmc, MMC_TIMING_LEGACY); 2285 2286 /* Reset the Card */ 2287 err = mmc_go_idle(mmc); 2288 2289 if (err) 2290 return err; 2291 2292 /* The internal partition reset to user partition(0) at every CMD0*/ 2293 mmc_get_blk_desc(mmc)->hwpart = 0; 2294 2295 /* Test for SD version 2 */ 2296 err = mmc_send_if_cond(mmc); 2297 2298 /* Now try to get the SD card's operating condition */ 2299 err = sd_send_op_cond(mmc); 2300 2301 /* If the command timed out, we check for an MMC card */ 2302 if (err == -ETIMEDOUT) { 2303 err = mmc_send_op_cond(mmc); 2304 2305 if (err) { 2306 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2307 printf("Card did not respond to voltage select!\n"); 2308 #endif 2309 return -EOPNOTSUPP; 2310 } 2311 } 2312 2313 if (!err) 2314 mmc->init_in_progress = 1; 2315 2316 return err; 2317 } 2318 #endif 2319 2320 static int mmc_complete_init(struct mmc *mmc) 2321 { 2322 int err = 0; 2323 2324 mmc->init_in_progress = 0; 2325 if (mmc->op_cond_pending) 2326 err = mmc_complete_op_cond(mmc); 2327 2328 if (!err) 2329 err = mmc_startup(mmc); 2330 if (err) 2331 mmc->has_init = 0; 2332 else 2333 mmc->has_init = 1; 2334 return err; 2335 } 2336 2337 int mmc_init(struct mmc *mmc) 2338 { 2339 int err = 0; 2340 __maybe_unused unsigned start; 2341 #if CONFIG_IS_ENABLED(DM_MMC) 2342 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev); 2343 2344 upriv->mmc = mmc; 2345 #endif 2346 if (mmc->has_init) 2347 return 0; 2348 2349 start = get_timer(0); 2350 2351 if (!mmc->init_in_progress) 2352 err = mmc_start_init(mmc); 2353 2354 if (!err) 2355 err = mmc_complete_init(mmc); 2356 if (err) 2357 printf("%s: %d, time %lu\n", __func__, err, get_timer(start)); 2358 2359 return err; 2360 } 2361 2362 int mmc_set_dsr(struct mmc *mmc, u16 val) 2363 { 2364 mmc->dsr = val; 2365 return 0; 2366 } 2367 2368 /* CPU-specific MMC initializations */ 2369 __weak int cpu_mmc_init(bd_t *bis) 2370 { 2371 return -1; 2372 } 2373 2374 /* board-specific MMC initializations. */ 2375 __weak int board_mmc_init(bd_t *bis) 2376 { 2377 return -1; 2378 } 2379 2380 void mmc_set_preinit(struct mmc *mmc, int preinit) 2381 { 2382 mmc->preinit = preinit; 2383 } 2384 2385 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD) 2386 static int mmc_probe(bd_t *bis) 2387 { 2388 return 0; 2389 } 2390 #elif CONFIG_IS_ENABLED(DM_MMC) 2391 static int mmc_probe(bd_t *bis) 2392 { 2393 int ret, i; 2394 struct uclass *uc; 2395 struct udevice *dev; 2396 2397 ret = uclass_get(UCLASS_MMC, &uc); 2398 if (ret) 2399 return ret; 2400 2401 /* 2402 * Try to add them in sequence order. Really with driver model we 2403 * should allow holes, but the current MMC list does not allow that. 2404 * So if we request 0, 1, 3 we will get 0, 1, 2. 2405 */ 2406 for (i = 0; ; i++) { 2407 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev); 2408 if (ret == -ENODEV) 2409 break; 2410 } 2411 uclass_foreach_dev(dev, uc) { 2412 ret = device_probe(dev); 2413 if (ret) 2414 printf("%s - probe failed: %d\n", dev->name, ret); 2415 } 2416 2417 return 0; 2418 } 2419 #else 2420 static int mmc_probe(bd_t *bis) 2421 { 2422 if (board_mmc_init(bis) < 0) 2423 cpu_mmc_init(bis); 2424 2425 return 0; 2426 } 2427 #endif 2428 2429 int mmc_initialize(bd_t *bis) 2430 { 2431 static int initialized = 0; 2432 int ret; 2433 if (initialized) /* Avoid initializing mmc multiple times */ 2434 return 0; 2435 initialized = 1; 2436 2437 #if !CONFIG_IS_ENABLED(BLK) 2438 #if !CONFIG_IS_ENABLED(MMC_TINY) 2439 mmc_list_init(); 2440 #endif 2441 #endif 2442 ret = mmc_probe(bis); 2443 if (ret) 2444 return ret; 2445 2446 #ifndef CONFIG_SPL_BUILD 2447 print_mmc_devices(','); 2448 #endif 2449 2450 mmc_do_preinit(); 2451 return 0; 2452 } 2453 2454 #ifdef CONFIG_CMD_BKOPS_ENABLE 2455 int mmc_set_bkops_enable(struct mmc *mmc) 2456 { 2457 int err; 2458 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 2459 2460 err = mmc_send_ext_csd(mmc, ext_csd); 2461 if (err) { 2462 puts("Could not get ext_csd register values\n"); 2463 return err; 2464 } 2465 2466 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) { 2467 puts("Background operations not supported on device\n"); 2468 return -EMEDIUMTYPE; 2469 } 2470 2471 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) { 2472 puts("Background operations already enabled\n"); 2473 return 0; 2474 } 2475 2476 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); 2477 if (err) { 2478 puts("Failed to enable manual background operations\n"); 2479 return err; 2480 } 2481 2482 puts("Enabled manual background operations\n"); 2483 2484 return 0; 2485 } 2486 #endif 2487