1 /* 2 * Copyright 2008, Freescale Semiconductor, Inc 3 * Andy Fleming 4 * 5 * Based vaguely on the Linux code 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10 #include <config.h> 11 #include <common.h> 12 #include <command.h> 13 #include <dm.h> 14 #include <dm/device-internal.h> 15 #include <errno.h> 16 #include <mmc.h> 17 #include <part.h> 18 #include <power/regulator.h> 19 #include <malloc.h> 20 #include <memalign.h> 21 #include <linux/list.h> 22 #include <div64.h> 23 #include "mmc_private.h" 24 25 static const unsigned int sd_au_size[] = { 26 0, SZ_16K / 512, SZ_32K / 512, 27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512, 28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512, 29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, 30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, 31 }; 32 33 #if CONFIG_IS_ENABLED(MMC_TINY) 34 static struct mmc mmc_static; 35 struct mmc *find_mmc_device(int dev_num) 36 { 37 return &mmc_static; 38 } 39 40 void mmc_do_preinit(void) 41 { 42 struct mmc *m = &mmc_static; 43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 44 mmc_set_preinit(m, 1); 45 #endif 46 if (m->preinit) 47 mmc_start_init(m); 48 } 49 50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc) 51 { 52 return &mmc->block_dev; 53 } 54 #endif 55 56 #if !CONFIG_IS_ENABLED(DM_MMC) 57 __weak int board_mmc_getwp(struct mmc *mmc) 58 { 59 return -1; 60 } 61 62 int mmc_getwp(struct mmc *mmc) 63 { 64 int wp; 65 66 wp = board_mmc_getwp(mmc); 67 68 if (wp < 0) { 69 if (mmc->cfg->ops->getwp) 70 wp = mmc->cfg->ops->getwp(mmc); 71 else 72 wp = 0; 73 } 74 75 return wp; 76 } 77 78 __weak int board_mmc_getcd(struct mmc *mmc) 79 { 80 return -1; 81 } 82 #endif 83 84 #ifdef CONFIG_MMC_TRACE 85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd) 86 { 87 printf("CMD_SEND:%d\n", cmd->cmdidx); 88 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg); 89 } 90 91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret) 92 { 93 int i; 94 u8 *ptr; 95 96 if (ret) { 97 printf("\t\tRET\t\t\t %d\n", ret); 98 } else { 99 switch (cmd->resp_type) { 100 case MMC_RSP_NONE: 101 printf("\t\tMMC_RSP_NONE\n"); 102 break; 103 case MMC_RSP_R1: 104 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n", 105 cmd->response[0]); 106 break; 107 case MMC_RSP_R1b: 108 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n", 109 cmd->response[0]); 110 break; 111 case MMC_RSP_R2: 112 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n", 113 cmd->response[0]); 114 printf("\t\t \t\t 0x%08X \n", 115 cmd->response[1]); 116 printf("\t\t \t\t 0x%08X \n", 117 cmd->response[2]); 118 printf("\t\t \t\t 0x%08X \n", 119 cmd->response[3]); 120 printf("\n"); 121 printf("\t\t\t\t\tDUMPING DATA\n"); 122 for (i = 0; i < 4; i++) { 123 int j; 124 printf("\t\t\t\t\t%03d - ", i*4); 125 ptr = (u8 *)&cmd->response[i]; 126 ptr += 3; 127 for (j = 0; j < 4; j++) 128 printf("%02X ", *ptr--); 129 printf("\n"); 130 } 131 break; 132 case MMC_RSP_R3: 133 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n", 134 cmd->response[0]); 135 break; 136 default: 137 printf("\t\tERROR MMC rsp not supported\n"); 138 break; 139 } 140 } 141 } 142 143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd) 144 { 145 int status; 146 147 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9; 148 printf("CURR STATE:%d\n", status); 149 } 150 #endif 151 152 #if !CONFIG_IS_ENABLED(DM_MMC) 153 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data) 154 { 155 int ret; 156 157 mmmc_trace_before_send(mmc, cmd); 158 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data); 159 mmmc_trace_after_send(mmc, cmd, ret); 160 161 return ret; 162 } 163 #endif 164 165 int mmc_send_status(struct mmc *mmc, int timeout) 166 { 167 struct mmc_cmd cmd; 168 int err, retries = 5; 169 170 cmd.cmdidx = MMC_CMD_SEND_STATUS; 171 cmd.resp_type = MMC_RSP_R1; 172 if (!mmc_host_is_spi(mmc)) 173 cmd.cmdarg = mmc->rca << 16; 174 175 while (1) { 176 err = mmc_send_cmd(mmc, &cmd, NULL); 177 if (!err) { 178 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) && 179 (cmd.response[0] & MMC_STATUS_CURR_STATE) != 180 MMC_STATE_PRG) 181 break; 182 else if (cmd.response[0] & MMC_STATUS_MASK) { 183 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 184 printf("Status Error: 0x%08X\n", 185 cmd.response[0]); 186 #endif 187 return -ECOMM; 188 } 189 } else if (--retries < 0) 190 return err; 191 192 if (timeout-- <= 0) 193 break; 194 195 udelay(1000); 196 } 197 198 mmc_trace_state(mmc, &cmd); 199 if (timeout <= 0) { 200 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 201 printf("Timeout waiting card ready\n"); 202 #endif 203 return -ETIMEDOUT; 204 } 205 206 return 0; 207 } 208 209 int mmc_set_blocklen(struct mmc *mmc, int len) 210 { 211 struct mmc_cmd cmd; 212 213 if (mmc_card_ddr(mmc)) 214 return 0; 215 216 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN; 217 cmd.resp_type = MMC_RSP_R1; 218 cmd.cmdarg = len; 219 220 return mmc_send_cmd(mmc, &cmd, NULL); 221 } 222 223 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start, 224 lbaint_t blkcnt) 225 { 226 struct mmc_cmd cmd; 227 struct mmc_data data; 228 229 if (blkcnt > 1) 230 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 231 else 232 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 233 234 if (mmc->high_capacity) 235 cmd.cmdarg = start; 236 else 237 cmd.cmdarg = start * mmc->read_bl_len; 238 239 cmd.resp_type = MMC_RSP_R1; 240 241 data.dest = dst; 242 data.blocks = blkcnt; 243 data.blocksize = mmc->read_bl_len; 244 data.flags = MMC_DATA_READ; 245 246 if (mmc_send_cmd(mmc, &cmd, &data)) 247 return 0; 248 249 if (blkcnt > 1) { 250 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 251 cmd.cmdarg = 0; 252 cmd.resp_type = MMC_RSP_R1b; 253 if (mmc_send_cmd(mmc, &cmd, NULL)) { 254 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 255 printf("mmc fail to send stop cmd\n"); 256 #endif 257 return 0; 258 } 259 } 260 261 return blkcnt; 262 } 263 264 #if CONFIG_IS_ENABLED(BLK) 265 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 266 #else 267 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 268 void *dst) 269 #endif 270 { 271 #if CONFIG_IS_ENABLED(BLK) 272 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 273 #endif 274 int dev_num = block_dev->devnum; 275 int err; 276 lbaint_t cur, blocks_todo = blkcnt; 277 278 if (blkcnt == 0) 279 return 0; 280 281 struct mmc *mmc = find_mmc_device(dev_num); 282 if (!mmc) 283 return 0; 284 285 if (CONFIG_IS_ENABLED(MMC_TINY)) 286 err = mmc_switch_part(mmc, block_dev->hwpart); 287 else 288 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 289 290 if (err < 0) 291 return 0; 292 293 if ((start + blkcnt) > block_dev->lba) { 294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 295 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 296 start + blkcnt, block_dev->lba); 297 #endif 298 return 0; 299 } 300 301 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 302 debug("%s: Failed to set blocklen\n", __func__); 303 return 0; 304 } 305 306 do { 307 cur = (blocks_todo > mmc->cfg->b_max) ? 308 mmc->cfg->b_max : blocks_todo; 309 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 310 debug("%s: Failed to read blocks\n", __func__); 311 int timeout = 0; 312 re_init_retry: 313 timeout++; 314 /* 315 * Try re-init seven times. 316 */ 317 if (timeout > 7) { 318 printf("Re-init retry timeout\n"); 319 return 0; 320 } 321 322 mmc->has_init = 0; 323 if (mmc_init(mmc)) 324 return 0; 325 326 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 327 printf("%s: Re-init mmc_read_blocks error\n", 328 __func__); 329 goto re_init_retry; 330 } 331 } 332 blocks_todo -= cur; 333 start += cur; 334 dst += cur * mmc->read_bl_len; 335 } while (blocks_todo > 0); 336 337 return blkcnt; 338 } 339 340 void mmc_set_clock(struct mmc *mmc, uint clock) 341 { 342 if (clock > mmc->cfg->f_max) 343 clock = mmc->cfg->f_max; 344 345 if (clock < mmc->cfg->f_min) 346 clock = mmc->cfg->f_min; 347 348 mmc->clock = clock; 349 350 mmc_set_ios(mmc); 351 } 352 353 static void mmc_set_bus_width(struct mmc *mmc, uint width) 354 { 355 mmc->bus_width = width; 356 357 mmc_set_ios(mmc); 358 } 359 360 static void mmc_set_timing(struct mmc *mmc, uint timing) 361 { 362 mmc->timing = timing; 363 mmc_set_ios(mmc); 364 } 365 366 static int mmc_go_idle(struct mmc *mmc) 367 { 368 struct mmc_cmd cmd; 369 int err; 370 371 udelay(1000); 372 373 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE; 374 cmd.cmdarg = 0; 375 cmd.resp_type = MMC_RSP_NONE; 376 377 err = mmc_send_cmd(mmc, &cmd, NULL); 378 379 if (err) 380 return err; 381 382 udelay(2000); 383 384 return 0; 385 } 386 387 static int sd_send_op_cond(struct mmc *mmc) 388 { 389 int timeout = 1000; 390 int err; 391 struct mmc_cmd cmd; 392 393 while (1) { 394 cmd.cmdidx = MMC_CMD_APP_CMD; 395 cmd.resp_type = MMC_RSP_R1; 396 cmd.cmdarg = 0; 397 398 err = mmc_send_cmd(mmc, &cmd, NULL); 399 400 if (err) 401 return err; 402 403 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND; 404 cmd.resp_type = MMC_RSP_R3; 405 406 /* 407 * Most cards do not answer if some reserved bits 408 * in the ocr are set. However, Some controller 409 * can set bit 7 (reserved for low voltages), but 410 * how to manage low voltages SD card is not yet 411 * specified. 412 */ 413 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 : 414 (mmc->cfg->voltages & 0xff8000); 415 416 if (mmc->version == SD_VERSION_2) 417 cmd.cmdarg |= OCR_HCS; 418 419 err = mmc_send_cmd(mmc, &cmd, NULL); 420 421 if (err) 422 return err; 423 424 if (cmd.response[0] & OCR_BUSY) 425 break; 426 427 if (timeout-- <= 0) 428 return -EOPNOTSUPP; 429 430 udelay(1000); 431 } 432 433 if (mmc->version != SD_VERSION_2) 434 mmc->version = SD_VERSION_1_0; 435 436 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 437 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 438 cmd.resp_type = MMC_RSP_R3; 439 cmd.cmdarg = 0; 440 441 err = mmc_send_cmd(mmc, &cmd, NULL); 442 443 if (err) 444 return err; 445 } 446 447 mmc->ocr = cmd.response[0]; 448 449 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 450 mmc->rca = 0; 451 452 return 0; 453 } 454 455 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg) 456 { 457 struct mmc_cmd cmd; 458 int err; 459 460 cmd.cmdidx = MMC_CMD_SEND_OP_COND; 461 cmd.resp_type = MMC_RSP_R3; 462 cmd.cmdarg = 0; 463 if (use_arg && !mmc_host_is_spi(mmc)) 464 cmd.cmdarg = OCR_HCS | 465 (mmc->cfg->voltages & 466 (mmc->ocr & OCR_VOLTAGE_MASK)) | 467 (mmc->ocr & OCR_ACCESS_MODE); 468 469 err = mmc_send_cmd(mmc, &cmd, NULL); 470 if (err) 471 return err; 472 mmc->ocr = cmd.response[0]; 473 return 0; 474 } 475 476 static int mmc_send_op_cond(struct mmc *mmc) 477 { 478 int err, i; 479 480 /* Some cards seem to need this */ 481 mmc_go_idle(mmc); 482 483 /* Asking to the card its capabilities */ 484 for (i = 0; i < 2; i++) { 485 err = mmc_send_op_cond_iter(mmc, i != 0); 486 if (err) 487 return err; 488 489 /* exit if not busy (flag seems to be inverted) */ 490 if (mmc->ocr & OCR_BUSY) 491 break; 492 } 493 mmc->op_cond_pending = 1; 494 return 0; 495 } 496 497 static int mmc_complete_op_cond(struct mmc *mmc) 498 { 499 struct mmc_cmd cmd; 500 int timeout = 1000; 501 uint start; 502 int err; 503 504 mmc->op_cond_pending = 0; 505 if (!(mmc->ocr & OCR_BUSY)) { 506 /* Some cards seem to need this */ 507 mmc_go_idle(mmc); 508 509 start = get_timer(0); 510 while (1) { 511 err = mmc_send_op_cond_iter(mmc, 1); 512 if (err) 513 return err; 514 if (mmc->ocr & OCR_BUSY) 515 break; 516 if (get_timer(start) > timeout) 517 return -EOPNOTSUPP; 518 udelay(100); 519 } 520 } 521 522 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 523 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 524 cmd.resp_type = MMC_RSP_R3; 525 cmd.cmdarg = 0; 526 527 err = mmc_send_cmd(mmc, &cmd, NULL); 528 529 if (err) 530 return err; 531 532 mmc->ocr = cmd.response[0]; 533 } 534 535 mmc->version = MMC_VERSION_UNKNOWN; 536 537 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 538 mmc->rca = 1; 539 540 return 0; 541 } 542 543 544 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd) 545 { 546 struct mmc_cmd cmd; 547 struct mmc_data data; 548 int err; 549 550 /* Get the Card Status Register */ 551 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD; 552 cmd.resp_type = MMC_RSP_R1; 553 cmd.cmdarg = 0; 554 555 data.dest = (char *)ext_csd; 556 data.blocks = 1; 557 data.blocksize = MMC_MAX_BLOCK_LEN; 558 data.flags = MMC_DATA_READ; 559 560 err = mmc_send_cmd(mmc, &cmd, &data); 561 562 return err; 563 } 564 565 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status) 566 { 567 struct mmc_cmd cmd; 568 u8 busy = true; 569 uint start; 570 int ret; 571 int timeout = 1000; 572 573 cmd.cmdidx = MMC_CMD_SEND_STATUS; 574 cmd.resp_type = MMC_RSP_R1; 575 cmd.cmdarg = mmc->rca << 16; 576 577 start = get_timer(0); 578 579 if (!send_status && !mmc_can_card_busy(mmc)) { 580 mdelay(timeout); 581 return 0; 582 } 583 584 do { 585 if (!send_status) { 586 busy = mmc_card_busy(mmc); 587 } else { 588 ret = mmc_send_cmd(mmc, &cmd, NULL); 589 590 if (ret) 591 return ret; 592 593 if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR) 594 return -EBADMSG; 595 busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) == 596 MMC_STATE_PRG; 597 } 598 599 if (get_timer(start) > timeout && busy) 600 return -ETIMEDOUT; 601 } while (busy); 602 603 return 0; 604 } 605 606 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value, 607 u8 send_status) 608 { 609 struct mmc_cmd cmd; 610 int retries = 3; 611 int ret; 612 613 cmd.cmdidx = MMC_CMD_SWITCH; 614 cmd.resp_type = MMC_RSP_R1b; 615 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 616 (index << 16) | 617 (value << 8); 618 619 do { 620 ret = mmc_send_cmd(mmc, &cmd, NULL); 621 622 if (!ret) 623 return mmc_poll_for_busy(mmc, send_status); 624 } while (--retries > 0 && ret); 625 626 return ret; 627 } 628 629 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value) 630 { 631 return __mmc_switch(mmc, set, index, value, true); 632 } 633 634 static int mmc_select_bus_width(struct mmc *mmc) 635 { 636 u32 ext_csd_bits[] = { 637 EXT_CSD_BUS_WIDTH_8, 638 EXT_CSD_BUS_WIDTH_4, 639 }; 640 u32 bus_widths[] = { 641 MMC_BUS_WIDTH_8BIT, 642 MMC_BUS_WIDTH_4BIT, 643 }; 644 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 645 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 646 u32 idx, bus_width = 0; 647 int err = 0; 648 649 if (mmc->version < MMC_VERSION_4 || 650 !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT))) 651 return 0; 652 653 err = mmc_send_ext_csd(mmc, ext_csd); 654 655 if (err) 656 return err; 657 658 idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1; 659 660 /* 661 * Unlike SD, MMC cards dont have a configuration register to notify 662 * supported bus width. So bus test command should be run to identify 663 * the supported bus width or compare the ext csd values of current 664 * bus width and ext csd values of 1 bit mode read earlier. 665 */ 666 for (; idx < ARRAY_SIZE(bus_widths); idx++) { 667 /* 668 * Host is capable of 8bit transfer, then switch 669 * the device to work in 8bit transfer mode. If the 670 * mmc switch command returns error then switch to 671 * 4bit transfer mode. On success set the corresponding 672 * bus width on the host. 673 */ 674 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 675 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]); 676 if (err) 677 continue; 678 679 bus_width = bus_widths[idx]; 680 mmc_set_bus_width(mmc, bus_width); 681 682 err = mmc_send_ext_csd(mmc, test_csd); 683 684 if (err) 685 continue; 686 687 /* Only compare read only fields */ 688 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] == 689 test_csd[EXT_CSD_PARTITIONING_SUPPORT]) && 690 (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] == 691 test_csd[EXT_CSD_HC_WP_GRP_SIZE]) && 692 (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) && 693 (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] == 694 test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && 695 !memcmp(&ext_csd[EXT_CSD_SEC_CNT], 696 &test_csd[EXT_CSD_SEC_CNT], 4)) { 697 err = bus_width; 698 break; 699 } else { 700 err = -EBADMSG; 701 } 702 } 703 704 return err; 705 } 706 707 static const u8 tuning_blk_pattern_4bit[] = { 708 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 709 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 710 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 711 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 712 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 713 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 714 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 715 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 716 }; 717 718 static const u8 tuning_blk_pattern_8bit[] = { 719 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 720 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 721 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 722 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 723 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 724 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 725 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 726 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 727 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 728 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 729 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 730 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 731 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 732 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 733 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 734 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 735 }; 736 737 int mmc_send_tuning(struct mmc *mmc, u32 opcode) 738 { 739 struct mmc_cmd cmd; 740 struct mmc_data data; 741 const u8 *tuning_block_pattern; 742 int size, err = 0; 743 u8 *data_buf; 744 745 if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 746 tuning_block_pattern = tuning_blk_pattern_8bit; 747 size = sizeof(tuning_blk_pattern_8bit); 748 } else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) { 749 tuning_block_pattern = tuning_blk_pattern_4bit; 750 size = sizeof(tuning_blk_pattern_4bit); 751 } else { 752 return -EINVAL; 753 } 754 755 data_buf = calloc(1, size); 756 if (!data_buf) 757 return -ENOMEM; 758 759 cmd.cmdidx = opcode; 760 cmd.resp_type = MMC_RSP_R1; 761 cmd.cmdarg = 0; 762 763 data.dest = (char *)data_buf; 764 data.blocksize = size; 765 data.blocks = 1; 766 data.flags = MMC_DATA_READ; 767 768 err = mmc_send_cmd(mmc, &cmd, &data); 769 if (err) 770 goto out; 771 772 if (memcmp(data_buf, tuning_block_pattern, size)) 773 err = -EIO; 774 out: 775 free(data_buf); 776 return err; 777 } 778 779 static int mmc_execute_tuning(struct mmc *mmc) 780 { 781 #ifdef CONFIG_DM_MMC 782 struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev); 783 #endif 784 u32 opcode; 785 786 if (IS_SD(mmc)) 787 opcode = MMC_SEND_TUNING_BLOCK; 788 else 789 opcode = MMC_SEND_TUNING_BLOCK_HS200; 790 791 #ifndef CONFIG_DM_MMC 792 if (mmc->cfg->ops->execute_tuning) { 793 return mmc->cfg->ops->execute_tuning(mmc, opcode); 794 #else 795 if (ops->execute_tuning) { 796 return ops->execute_tuning(mmc->dev, opcode); 797 #endif 798 } else { 799 debug("Tuning feature required for HS200 mode.\n"); 800 return -EIO; 801 } 802 } 803 804 static int mmc_hs200_tuning(struct mmc *mmc) 805 { 806 return mmc_execute_tuning(mmc); 807 } 808 809 static int mmc_select_hs(struct mmc *mmc) 810 { 811 int ret; 812 813 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 814 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS); 815 816 if (!ret) 817 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 818 819 return ret; 820 } 821 822 static int mmc_select_hs_ddr(struct mmc *mmc) 823 { 824 u32 ext_csd_bits; 825 int err = 0; 826 827 if (mmc->bus_width == MMC_BUS_WIDTH_1BIT) 828 return 0; 829 830 ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ? 831 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4; 832 833 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 834 EXT_CSD_BUS_WIDTH, ext_csd_bits); 835 if (err) 836 return err; 837 838 mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52); 839 840 return 0; 841 } 842 843 #ifndef CONFIG_SPL_BUILD 844 static int mmc_select_hs200(struct mmc *mmc) 845 { 846 int ret; 847 848 /* 849 * Set the bus width(4 or 8) with host's support and 850 * switch to HS200 mode if bus width is set successfully. 851 */ 852 ret = mmc_select_bus_width(mmc); 853 854 if (ret > 0) { 855 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 856 EXT_CSD_HS_TIMING, 857 EXT_CSD_TIMING_HS200, false); 858 859 if (ret) 860 return ret; 861 862 mmc_set_timing(mmc, MMC_TIMING_MMC_HS200); 863 } 864 865 return ret; 866 } 867 #endif 868 869 static int mmc_select_hs400(struct mmc *mmc) 870 { 871 int ret; 872 873 /* Switch card to HS mode */ 874 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 875 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false); 876 if (ret) 877 return ret; 878 879 /* Set host controller to HS timing */ 880 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 881 882 /* Reduce frequency to HS frequency */ 883 mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR); 884 885 ret = mmc_send_status(mmc, 1000); 886 if (ret) 887 return ret; 888 889 /* Switch card to DDR */ 890 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 891 EXT_CSD_BUS_WIDTH, 892 EXT_CSD_DDR_BUS_WIDTH_8); 893 if (ret) 894 return ret; 895 896 /* Switch card to HS400 */ 897 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 898 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false); 899 if (ret) 900 return ret; 901 902 /* Set host controller to HS400 timing and frequency */ 903 mmc_set_timing(mmc, MMC_TIMING_MMC_HS400); 904 905 return ret; 906 } 907 908 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd) 909 { 910 u8 card_type; 911 u32 host_caps, avail_type = 0; 912 913 card_type = ext_csd[EXT_CSD_CARD_TYPE]; 914 host_caps = mmc->cfg->host_caps; 915 916 if ((host_caps & MMC_MODE_HS) && 917 (card_type & EXT_CSD_CARD_TYPE_26)) 918 avail_type |= EXT_CSD_CARD_TYPE_26; 919 920 if ((host_caps & MMC_MODE_HS) && 921 (card_type & EXT_CSD_CARD_TYPE_52)) 922 avail_type |= EXT_CSD_CARD_TYPE_52; 923 924 /* 925 * For the moment, u-boot doesn't support signal voltage 926 * switch, therefor we assume that host support ddr52 927 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and 928 * hs400 are the same). 929 */ 930 if ((host_caps & MMC_MODE_DDR_52MHz) && 931 (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)) 932 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V; 933 934 if ((host_caps & MMC_MODE_HS200) && 935 (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V)) 936 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V; 937 938 /* 939 * If host can support HS400, it means that host can also 940 * support HS200. 941 */ 942 if ((host_caps & MMC_MODE_HS400) && 943 (host_caps & MMC_MODE_8BIT) && 944 (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 945 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 946 EXT_CSD_CARD_TYPE_HS400_1_8V; 947 948 if ((host_caps & MMC_MODE_HS400ES) && 949 (host_caps & MMC_MODE_8BIT) && 950 ext_csd[EXT_CSD_STROBE_SUPPORT] && 951 (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 952 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 953 EXT_CSD_CARD_TYPE_HS400_1_8V | 954 EXT_CSD_CARD_TYPE_HS400ES; 955 956 return avail_type; 957 } 958 959 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type) 960 { 961 int clock = 0; 962 963 if (mmc_card_hs(mmc)) 964 clock = (avail_type & EXT_CSD_CARD_TYPE_52) ? 965 MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR; 966 else if (mmc_card_hs200(mmc) || 967 mmc_card_hs400(mmc) || 968 mmc_card_hs400es(mmc)) 969 clock = MMC_HS200_MAX_DTR; 970 971 mmc_set_clock(mmc, clock); 972 } 973 974 static int mmc_change_freq(struct mmc *mmc) 975 { 976 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 977 u32 avail_type; 978 int err; 979 980 mmc->card_caps = 0; 981 982 if (mmc_host_is_spi(mmc)) 983 return 0; 984 985 /* Only version 4 supports high-speed */ 986 if (mmc->version < MMC_VERSION_4) 987 return 0; 988 989 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT; 990 991 err = mmc_send_ext_csd(mmc, ext_csd); 992 993 if (err) 994 return err; 995 996 avail_type = mmc_select_card_type(mmc, ext_csd); 997 998 #ifndef CONFIG_SPL_BUILD 999 if (avail_type & EXT_CSD_CARD_TYPE_HS200) 1000 err = mmc_select_hs200(mmc); 1001 else 1002 #endif 1003 if (avail_type & EXT_CSD_CARD_TYPE_HS) 1004 err = mmc_select_hs(mmc); 1005 else 1006 err = -EINVAL; 1007 1008 if (err) 1009 return err; 1010 1011 mmc_set_bus_speed(mmc, avail_type); 1012 1013 if (mmc_card_hs200(mmc)) { 1014 err = mmc_hs200_tuning(mmc); 1015 if (avail_type & EXT_CSD_CARD_TYPE_HS400 && 1016 mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 1017 err = mmc_select_hs400(mmc); 1018 mmc_set_bus_speed(mmc, avail_type); 1019 } 1020 } else if (!mmc_card_hs400es(mmc)) { 1021 err = mmc_select_bus_width(mmc) > 0 ? 0 : err; 1022 if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52) 1023 err = mmc_select_hs_ddr(mmc); 1024 } 1025 1026 return err; 1027 } 1028 1029 static int mmc_set_capacity(struct mmc *mmc, int part_num) 1030 { 1031 switch (part_num) { 1032 case 0: 1033 mmc->capacity = mmc->capacity_user; 1034 break; 1035 case 1: 1036 case 2: 1037 mmc->capacity = mmc->capacity_boot; 1038 break; 1039 case 3: 1040 mmc->capacity = mmc->capacity_rpmb; 1041 break; 1042 case 4: 1043 case 5: 1044 case 6: 1045 case 7: 1046 mmc->capacity = mmc->capacity_gp[part_num - 4]; 1047 break; 1048 default: 1049 return -1; 1050 } 1051 1052 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1053 1054 return 0; 1055 } 1056 1057 int mmc_switch_part(struct mmc *mmc, unsigned int part_num) 1058 { 1059 int ret; 1060 1061 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF, 1062 (mmc->part_config & ~PART_ACCESS_MASK) 1063 | (part_num & PART_ACCESS_MASK)); 1064 1065 /* 1066 * Set the capacity if the switch succeeded or was intended 1067 * to return to representing the raw device. 1068 */ 1069 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) { 1070 ret = mmc_set_capacity(mmc, part_num); 1071 mmc_get_blk_desc(mmc)->hwpart = part_num; 1072 } 1073 1074 return ret; 1075 } 1076 1077 int mmc_hwpart_config(struct mmc *mmc, 1078 const struct mmc_hwpart_conf *conf, 1079 enum mmc_hwpart_conf_mode mode) 1080 { 1081 u8 part_attrs = 0; 1082 u32 enh_size_mult; 1083 u32 enh_start_addr; 1084 u32 gp_size_mult[4]; 1085 u32 max_enh_size_mult; 1086 u32 tot_enh_size_mult = 0; 1087 u8 wr_rel_set; 1088 int i, pidx, err; 1089 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1090 1091 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE) 1092 return -EINVAL; 1093 1094 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) { 1095 printf("eMMC >= 4.4 required for enhanced user data area\n"); 1096 return -EMEDIUMTYPE; 1097 } 1098 1099 if (!(mmc->part_support & PART_SUPPORT)) { 1100 printf("Card does not support partitioning\n"); 1101 return -EMEDIUMTYPE; 1102 } 1103 1104 if (!mmc->hc_wp_grp_size) { 1105 printf("Card does not define HC WP group size\n"); 1106 return -EMEDIUMTYPE; 1107 } 1108 1109 /* check partition alignment and total enhanced size */ 1110 if (conf->user.enh_size) { 1111 if (conf->user.enh_size % mmc->hc_wp_grp_size || 1112 conf->user.enh_start % mmc->hc_wp_grp_size) { 1113 printf("User data enhanced area not HC WP group " 1114 "size aligned\n"); 1115 return -EINVAL; 1116 } 1117 part_attrs |= EXT_CSD_ENH_USR; 1118 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size; 1119 if (mmc->high_capacity) { 1120 enh_start_addr = conf->user.enh_start; 1121 } else { 1122 enh_start_addr = (conf->user.enh_start << 9); 1123 } 1124 } else { 1125 enh_size_mult = 0; 1126 enh_start_addr = 0; 1127 } 1128 tot_enh_size_mult += enh_size_mult; 1129 1130 for (pidx = 0; pidx < 4; pidx++) { 1131 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) { 1132 printf("GP%i partition not HC WP group size " 1133 "aligned\n", pidx+1); 1134 return -EINVAL; 1135 } 1136 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size; 1137 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) { 1138 part_attrs |= EXT_CSD_ENH_GP(pidx); 1139 tot_enh_size_mult += gp_size_mult[pidx]; 1140 } 1141 } 1142 1143 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) { 1144 printf("Card does not support enhanced attribute\n"); 1145 return -EMEDIUMTYPE; 1146 } 1147 1148 err = mmc_send_ext_csd(mmc, ext_csd); 1149 if (err) 1150 return err; 1151 1152 max_enh_size_mult = 1153 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) + 1154 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) + 1155 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT]; 1156 if (tot_enh_size_mult > max_enh_size_mult) { 1157 printf("Total enhanced size exceeds maximum (%u > %u)\n", 1158 tot_enh_size_mult, max_enh_size_mult); 1159 return -EMEDIUMTYPE; 1160 } 1161 1162 /* The default value of EXT_CSD_WR_REL_SET is device 1163 * dependent, the values can only be changed if the 1164 * EXT_CSD_HS_CTRL_REL bit is set. The values can be 1165 * changed only once and before partitioning is completed. */ 1166 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1167 if (conf->user.wr_rel_change) { 1168 if (conf->user.wr_rel_set) 1169 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR; 1170 else 1171 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR; 1172 } 1173 for (pidx = 0; pidx < 4; pidx++) { 1174 if (conf->gp_part[pidx].wr_rel_change) { 1175 if (conf->gp_part[pidx].wr_rel_set) 1176 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx); 1177 else 1178 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx); 1179 } 1180 } 1181 1182 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] && 1183 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) { 1184 puts("Card does not support host controlled partition write " 1185 "reliability settings\n"); 1186 return -EMEDIUMTYPE; 1187 } 1188 1189 if (ext_csd[EXT_CSD_PARTITION_SETTING] & 1190 EXT_CSD_PARTITION_SETTING_COMPLETED) { 1191 printf("Card already partitioned\n"); 1192 return -EPERM; 1193 } 1194 1195 if (mode == MMC_HWPART_CONF_CHECK) 1196 return 0; 1197 1198 /* Partitioning requires high-capacity size definitions */ 1199 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) { 1200 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1201 EXT_CSD_ERASE_GROUP_DEF, 1); 1202 1203 if (err) 1204 return err; 1205 1206 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1207 1208 /* update erase group size to be high-capacity */ 1209 mmc->erase_grp_size = 1210 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1211 1212 } 1213 1214 /* all OK, write the configuration */ 1215 for (i = 0; i < 4; i++) { 1216 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1217 EXT_CSD_ENH_START_ADDR+i, 1218 (enh_start_addr >> (i*8)) & 0xFF); 1219 if (err) 1220 return err; 1221 } 1222 for (i = 0; i < 3; i++) { 1223 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1224 EXT_CSD_ENH_SIZE_MULT+i, 1225 (enh_size_mult >> (i*8)) & 0xFF); 1226 if (err) 1227 return err; 1228 } 1229 for (pidx = 0; pidx < 4; pidx++) { 1230 for (i = 0; i < 3; i++) { 1231 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1232 EXT_CSD_GP_SIZE_MULT+pidx*3+i, 1233 (gp_size_mult[pidx] >> (i*8)) & 0xFF); 1234 if (err) 1235 return err; 1236 } 1237 } 1238 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1239 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs); 1240 if (err) 1241 return err; 1242 1243 if (mode == MMC_HWPART_CONF_SET) 1244 return 0; 1245 1246 /* The WR_REL_SET is a write-once register but shall be 1247 * written before setting PART_SETTING_COMPLETED. As it is 1248 * write-once we can only write it when completing the 1249 * partitioning. */ 1250 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) { 1251 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1252 EXT_CSD_WR_REL_SET, wr_rel_set); 1253 if (err) 1254 return err; 1255 } 1256 1257 /* Setting PART_SETTING_COMPLETED confirms the partition 1258 * configuration but it only becomes effective after power 1259 * cycle, so we do not adjust the partition related settings 1260 * in the mmc struct. */ 1261 1262 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1263 EXT_CSD_PARTITION_SETTING, 1264 EXT_CSD_PARTITION_SETTING_COMPLETED); 1265 if (err) 1266 return err; 1267 1268 return 0; 1269 } 1270 1271 #if !CONFIG_IS_ENABLED(DM_MMC) 1272 int mmc_getcd(struct mmc *mmc) 1273 { 1274 int cd; 1275 1276 cd = board_mmc_getcd(mmc); 1277 1278 if (cd < 0) { 1279 if (mmc->cfg->ops->getcd) 1280 cd = mmc->cfg->ops->getcd(mmc); 1281 else 1282 cd = 1; 1283 } 1284 1285 return cd; 1286 } 1287 #endif 1288 1289 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp) 1290 { 1291 struct mmc_cmd cmd; 1292 struct mmc_data data; 1293 1294 /* Switch the frequency */ 1295 cmd.cmdidx = SD_CMD_SWITCH_FUNC; 1296 cmd.resp_type = MMC_RSP_R1; 1297 cmd.cmdarg = (mode << 31) | 0xffffff; 1298 cmd.cmdarg &= ~(0xf << (group * 4)); 1299 cmd.cmdarg |= value << (group * 4); 1300 1301 data.dest = (char *)resp; 1302 data.blocksize = 64; 1303 data.blocks = 1; 1304 data.flags = MMC_DATA_READ; 1305 1306 return mmc_send_cmd(mmc, &cmd, &data); 1307 } 1308 1309 1310 static int sd_change_freq(struct mmc *mmc) 1311 { 1312 int err; 1313 struct mmc_cmd cmd; 1314 ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2); 1315 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16); 1316 struct mmc_data data; 1317 int timeout; 1318 1319 mmc->card_caps = 0; 1320 1321 if (mmc_host_is_spi(mmc)) 1322 return 0; 1323 1324 /* Read the SCR to find out if this card supports higher speeds */ 1325 cmd.cmdidx = MMC_CMD_APP_CMD; 1326 cmd.resp_type = MMC_RSP_R1; 1327 cmd.cmdarg = mmc->rca << 16; 1328 1329 err = mmc_send_cmd(mmc, &cmd, NULL); 1330 1331 if (err) 1332 return err; 1333 1334 cmd.cmdidx = SD_CMD_APP_SEND_SCR; 1335 cmd.resp_type = MMC_RSP_R1; 1336 cmd.cmdarg = 0; 1337 1338 timeout = 3; 1339 1340 retry_scr: 1341 data.dest = (char *)scr; 1342 data.blocksize = 8; 1343 data.blocks = 1; 1344 data.flags = MMC_DATA_READ; 1345 1346 err = mmc_send_cmd(mmc, &cmd, &data); 1347 1348 if (err) { 1349 if (timeout--) 1350 goto retry_scr; 1351 1352 return err; 1353 } 1354 1355 mmc->scr[0] = __be32_to_cpu(scr[0]); 1356 mmc->scr[1] = __be32_to_cpu(scr[1]); 1357 1358 switch ((mmc->scr[0] >> 24) & 0xf) { 1359 case 0: 1360 mmc->version = SD_VERSION_1_0; 1361 break; 1362 case 1: 1363 mmc->version = SD_VERSION_1_10; 1364 break; 1365 case 2: 1366 mmc->version = SD_VERSION_2; 1367 if ((mmc->scr[0] >> 15) & 0x1) 1368 mmc->version = SD_VERSION_3; 1369 break; 1370 default: 1371 mmc->version = SD_VERSION_1_0; 1372 break; 1373 } 1374 1375 if (mmc->scr[0] & SD_DATA_4BIT) 1376 mmc->card_caps |= MMC_MODE_4BIT; 1377 1378 /* Version 1.0 doesn't support switching */ 1379 if (mmc->version == SD_VERSION_1_0) 1380 return 0; 1381 1382 timeout = 4; 1383 while (timeout--) { 1384 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1, 1385 (u8 *)switch_status); 1386 1387 if (err) 1388 return err; 1389 1390 /* The high-speed function is busy. Try again */ 1391 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY)) 1392 break; 1393 } 1394 1395 /* If high-speed isn't supported, we return */ 1396 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)) 1397 return 0; 1398 1399 /* 1400 * If the host doesn't support SD_HIGHSPEED, do not switch card to 1401 * HIGHSPEED mode even if the card support SD_HIGHSPPED. 1402 * This can avoid furthur problem when the card runs in different 1403 * mode between the host. 1404 */ 1405 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) && 1406 (mmc->cfg->host_caps & MMC_MODE_HS))) 1407 return 0; 1408 1409 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status); 1410 1411 if (err) 1412 return err; 1413 1414 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000) 1415 mmc->card_caps |= MMC_MODE_HS; 1416 1417 return 0; 1418 } 1419 1420 static int sd_read_ssr(struct mmc *mmc) 1421 { 1422 int err, i; 1423 struct mmc_cmd cmd; 1424 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16); 1425 struct mmc_data data; 1426 int timeout = 3; 1427 unsigned int au, eo, et, es; 1428 1429 cmd.cmdidx = MMC_CMD_APP_CMD; 1430 cmd.resp_type = MMC_RSP_R1; 1431 cmd.cmdarg = mmc->rca << 16; 1432 1433 err = mmc_send_cmd(mmc, &cmd, NULL); 1434 if (err) 1435 return err; 1436 1437 cmd.cmdidx = SD_CMD_APP_SD_STATUS; 1438 cmd.resp_type = MMC_RSP_R1; 1439 cmd.cmdarg = 0; 1440 1441 retry_ssr: 1442 data.dest = (char *)ssr; 1443 data.blocksize = 64; 1444 data.blocks = 1; 1445 data.flags = MMC_DATA_READ; 1446 1447 err = mmc_send_cmd(mmc, &cmd, &data); 1448 if (err) { 1449 if (timeout--) 1450 goto retry_ssr; 1451 1452 return err; 1453 } 1454 1455 for (i = 0; i < 16; i++) 1456 ssr[i] = be32_to_cpu(ssr[i]); 1457 1458 au = (ssr[2] >> 12) & 0xF; 1459 if ((au <= 9) || (mmc->version == SD_VERSION_3)) { 1460 mmc->ssr.au = sd_au_size[au]; 1461 es = (ssr[3] >> 24) & 0xFF; 1462 es |= (ssr[2] & 0xFF) << 8; 1463 et = (ssr[3] >> 18) & 0x3F; 1464 if (es && et) { 1465 eo = (ssr[3] >> 16) & 0x3; 1466 mmc->ssr.erase_timeout = (et * 1000) / es; 1467 mmc->ssr.erase_offset = eo * 1000; 1468 } 1469 } else { 1470 debug("Invalid Allocation Unit Size.\n"); 1471 } 1472 1473 return 0; 1474 } 1475 1476 /* frequency bases */ 1477 /* divided by 10 to be nice to platforms without floating point */ 1478 static const int fbase[] = { 1479 10000, 1480 100000, 1481 1000000, 1482 10000000, 1483 }; 1484 1485 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice 1486 * to platforms without floating point. 1487 */ 1488 static const u8 multipliers[] = { 1489 0, /* reserved */ 1490 10, 1491 12, 1492 13, 1493 15, 1494 20, 1495 25, 1496 30, 1497 35, 1498 40, 1499 45, 1500 50, 1501 55, 1502 60, 1503 70, 1504 80, 1505 }; 1506 1507 #if !CONFIG_IS_ENABLED(DM_MMC) 1508 static void mmc_set_ios(struct mmc *mmc) 1509 { 1510 if (mmc->cfg->ops->set_ios) 1511 mmc->cfg->ops->set_ios(mmc); 1512 } 1513 1514 static bool mmc_card_busy(struct mmc *mmc) 1515 { 1516 if (!mmc->cfg->ops->card_busy) 1517 return -ENOSYS; 1518 1519 return mmc->cfg->ops->card_busy(mmc); 1520 } 1521 1522 static bool mmc_can_card_busy(struct mmc *) 1523 { 1524 return !!mmc->cfg->ops->card_busy; 1525 } 1526 #endif 1527 1528 static int mmc_startup(struct mmc *mmc) 1529 { 1530 int err, i; 1531 uint mult, freq, tran_speed; 1532 u64 cmult, csize, capacity; 1533 struct mmc_cmd cmd; 1534 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1535 bool has_parts = false; 1536 bool part_completed; 1537 struct blk_desc *bdesc; 1538 1539 #ifdef CONFIG_MMC_SPI_CRC_ON 1540 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */ 1541 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF; 1542 cmd.resp_type = MMC_RSP_R1; 1543 cmd.cmdarg = 1; 1544 err = mmc_send_cmd(mmc, &cmd, NULL); 1545 1546 if (err) 1547 return err; 1548 } 1549 #endif 1550 1551 /* Put the Card in Identify Mode */ 1552 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID : 1553 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */ 1554 cmd.resp_type = MMC_RSP_R2; 1555 cmd.cmdarg = 0; 1556 1557 err = mmc_send_cmd(mmc, &cmd, NULL); 1558 1559 if (err) 1560 return err; 1561 1562 memcpy(mmc->cid, cmd.response, 16); 1563 1564 /* 1565 * For MMC cards, set the Relative Address. 1566 * For SD cards, get the Relatvie Address. 1567 * This also puts the cards into Standby State 1568 */ 1569 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1570 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR; 1571 cmd.cmdarg = mmc->rca << 16; 1572 cmd.resp_type = MMC_RSP_R6; 1573 1574 err = mmc_send_cmd(mmc, &cmd, NULL); 1575 1576 if (err) 1577 return err; 1578 1579 if (IS_SD(mmc)) 1580 mmc->rca = (cmd.response[0] >> 16) & 0xffff; 1581 } 1582 1583 /* Get the Card-Specific Data */ 1584 cmd.cmdidx = MMC_CMD_SEND_CSD; 1585 cmd.resp_type = MMC_RSP_R2; 1586 cmd.cmdarg = mmc->rca << 16; 1587 1588 err = mmc_send_cmd(mmc, &cmd, NULL); 1589 1590 if (err) 1591 return err; 1592 1593 mmc->csd[0] = cmd.response[0]; 1594 mmc->csd[1] = cmd.response[1]; 1595 mmc->csd[2] = cmd.response[2]; 1596 mmc->csd[3] = cmd.response[3]; 1597 1598 if (mmc->version == MMC_VERSION_UNKNOWN) { 1599 int version = (cmd.response[0] >> 26) & 0xf; 1600 1601 switch (version) { 1602 case 0: 1603 mmc->version = MMC_VERSION_1_2; 1604 break; 1605 case 1: 1606 mmc->version = MMC_VERSION_1_4; 1607 break; 1608 case 2: 1609 mmc->version = MMC_VERSION_2_2; 1610 break; 1611 case 3: 1612 mmc->version = MMC_VERSION_3; 1613 break; 1614 case 4: 1615 mmc->version = MMC_VERSION_4; 1616 break; 1617 default: 1618 mmc->version = MMC_VERSION_1_2; 1619 break; 1620 } 1621 } 1622 1623 /* divide frequency by 10, since the mults are 10x bigger */ 1624 freq = fbase[(cmd.response[0] & 0x7)]; 1625 mult = multipliers[((cmd.response[0] >> 3) & 0xf)]; 1626 1627 tran_speed = freq * mult; 1628 1629 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1); 1630 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf); 1631 1632 if (IS_SD(mmc)) 1633 mmc->write_bl_len = mmc->read_bl_len; 1634 else 1635 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf); 1636 1637 if (mmc->high_capacity) { 1638 csize = (mmc->csd[1] & 0x3f) << 16 1639 | (mmc->csd[2] & 0xffff0000) >> 16; 1640 cmult = 8; 1641 } else { 1642 csize = (mmc->csd[1] & 0x3ff) << 2 1643 | (mmc->csd[2] & 0xc0000000) >> 30; 1644 cmult = (mmc->csd[2] & 0x00038000) >> 15; 1645 } 1646 1647 mmc->capacity_user = (csize + 1) << (cmult + 2); 1648 mmc->capacity_user *= mmc->read_bl_len; 1649 mmc->capacity_boot = 0; 1650 mmc->capacity_rpmb = 0; 1651 for (i = 0; i < 4; i++) 1652 mmc->capacity_gp[i] = 0; 1653 1654 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN) 1655 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1656 1657 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN) 1658 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1659 1660 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) { 1661 cmd.cmdidx = MMC_CMD_SET_DSR; 1662 cmd.cmdarg = (mmc->dsr & 0xffff) << 16; 1663 cmd.resp_type = MMC_RSP_NONE; 1664 if (mmc_send_cmd(mmc, &cmd, NULL)) 1665 printf("MMC: SET_DSR failed\n"); 1666 } 1667 1668 /* Select the card, and put it into Transfer Mode */ 1669 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1670 cmd.cmdidx = MMC_CMD_SELECT_CARD; 1671 cmd.resp_type = MMC_RSP_R1; 1672 cmd.cmdarg = mmc->rca << 16; 1673 err = mmc_send_cmd(mmc, &cmd, NULL); 1674 1675 if (err) 1676 return err; 1677 } 1678 1679 /* 1680 * For SD, its erase group is always one sector 1681 */ 1682 mmc->erase_grp_size = 1; 1683 mmc->part_config = MMCPART_NOAVAILABLE; 1684 if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) { 1685 /* check ext_csd version and capacity */ 1686 err = mmc_send_ext_csd(mmc, ext_csd); 1687 if (err) 1688 return err; 1689 if (ext_csd[EXT_CSD_REV] >= 2) { 1690 /* 1691 * According to the JEDEC Standard, the value of 1692 * ext_csd's capacity is valid if the value is more 1693 * than 2GB 1694 */ 1695 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0 1696 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 1697 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 1698 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 1699 capacity *= MMC_MAX_BLOCK_LEN; 1700 if ((capacity >> 20) > 2 * 1024) 1701 mmc->capacity_user = capacity; 1702 } 1703 1704 switch (ext_csd[EXT_CSD_REV]) { 1705 case 1: 1706 mmc->version = MMC_VERSION_4_1; 1707 break; 1708 case 2: 1709 mmc->version = MMC_VERSION_4_2; 1710 break; 1711 case 3: 1712 mmc->version = MMC_VERSION_4_3; 1713 break; 1714 case 5: 1715 mmc->version = MMC_VERSION_4_41; 1716 break; 1717 case 6: 1718 mmc->version = MMC_VERSION_4_5; 1719 break; 1720 case 7: 1721 mmc->version = MMC_VERSION_5_0; 1722 break; 1723 case 8: 1724 mmc->version = MMC_VERSION_5_1; 1725 break; 1726 } 1727 1728 /* The partition data may be non-zero but it is only 1729 * effective if PARTITION_SETTING_COMPLETED is set in 1730 * EXT_CSD, so ignore any data if this bit is not set, 1731 * except for enabling the high-capacity group size 1732 * definition (see below). */ 1733 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] & 1734 EXT_CSD_PARTITION_SETTING_COMPLETED); 1735 1736 /* store the partition info of emmc */ 1737 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT]; 1738 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) || 1739 ext_csd[EXT_CSD_BOOT_MULT]) 1740 mmc->part_config = ext_csd[EXT_CSD_PART_CONF]; 1741 if (part_completed && 1742 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT)) 1743 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE]; 1744 if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN) 1745 mmc->esr.mmc_can_trim = 1; 1746 1747 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17; 1748 1749 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17; 1750 1751 for (i = 0; i < 4; i++) { 1752 int idx = EXT_CSD_GP_SIZE_MULT + i * 3; 1753 uint mult = (ext_csd[idx + 2] << 16) + 1754 (ext_csd[idx + 1] << 8) + ext_csd[idx]; 1755 if (mult) 1756 has_parts = true; 1757 if (!part_completed) 1758 continue; 1759 mmc->capacity_gp[i] = mult; 1760 mmc->capacity_gp[i] *= 1761 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1762 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1763 mmc->capacity_gp[i] <<= 19; 1764 } 1765 1766 if (part_completed) { 1767 mmc->enh_user_size = 1768 (ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) + 1769 (ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) + 1770 ext_csd[EXT_CSD_ENH_SIZE_MULT]; 1771 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1772 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1773 mmc->enh_user_size <<= 19; 1774 mmc->enh_user_start = 1775 (ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) + 1776 (ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) + 1777 (ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) + 1778 ext_csd[EXT_CSD_ENH_START_ADDR]; 1779 if (mmc->high_capacity) 1780 mmc->enh_user_start <<= 9; 1781 } 1782 1783 /* 1784 * Host needs to enable ERASE_GRP_DEF bit if device is 1785 * partitioned. This bit will be lost every time after a reset 1786 * or power off. This will affect erase size. 1787 */ 1788 if (part_completed) 1789 has_parts = true; 1790 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) && 1791 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB)) 1792 has_parts = true; 1793 if (has_parts) { 1794 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1795 EXT_CSD_ERASE_GROUP_DEF, 1); 1796 1797 if (err) 1798 return err; 1799 else 1800 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1801 } 1802 1803 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) { 1804 /* Read out group size from ext_csd */ 1805 mmc->erase_grp_size = 1806 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1807 /* 1808 * if high capacity and partition setting completed 1809 * SEC_COUNT is valid even if it is smaller than 2 GiB 1810 * JEDEC Standard JESD84-B45, 6.2.4 1811 */ 1812 if (mmc->high_capacity && part_completed) { 1813 capacity = (ext_csd[EXT_CSD_SEC_CNT]) | 1814 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) | 1815 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) | 1816 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24); 1817 capacity *= MMC_MAX_BLOCK_LEN; 1818 mmc->capacity_user = capacity; 1819 } 1820 } else { 1821 /* Calculate the group size from the csd value. */ 1822 int erase_gsz, erase_gmul; 1823 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10; 1824 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5; 1825 mmc->erase_grp_size = (erase_gsz + 1) 1826 * (erase_gmul + 1); 1827 } 1828 1829 mmc->hc_wp_grp_size = 1024 1830 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1831 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1832 1833 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1834 } 1835 1836 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart); 1837 if (err) 1838 return err; 1839 1840 if (IS_SD(mmc)) 1841 err = sd_change_freq(mmc); 1842 else 1843 err = mmc_change_freq(mmc); 1844 1845 if (err) 1846 return err; 1847 1848 /* Restrict card's capabilities by what the host can do */ 1849 mmc->card_caps &= mmc->cfg->host_caps; 1850 1851 if (IS_SD(mmc)) { 1852 if (mmc->card_caps & MMC_MODE_4BIT) { 1853 cmd.cmdidx = MMC_CMD_APP_CMD; 1854 cmd.resp_type = MMC_RSP_R1; 1855 cmd.cmdarg = mmc->rca << 16; 1856 1857 err = mmc_send_cmd(mmc, &cmd, NULL); 1858 if (err) 1859 return err; 1860 1861 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH; 1862 cmd.resp_type = MMC_RSP_R1; 1863 cmd.cmdarg = 2; 1864 err = mmc_send_cmd(mmc, &cmd, NULL); 1865 if (err) 1866 return err; 1867 1868 mmc_set_bus_width(mmc, 4); 1869 } 1870 1871 err = sd_read_ssr(mmc); 1872 if (err) 1873 return err; 1874 1875 if (mmc->card_caps & MMC_MODE_HS) 1876 tran_speed = 50000000; 1877 else 1878 tran_speed = 25000000; 1879 1880 mmc_set_clock(mmc, tran_speed); 1881 } 1882 1883 /* Fix the block length for DDR mode */ 1884 if (mmc_card_ddr(mmc)) { 1885 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1886 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1887 } 1888 1889 /* fill in device description */ 1890 bdesc = mmc_get_blk_desc(mmc); 1891 bdesc->lun = 0; 1892 bdesc->hwpart = 0; 1893 bdesc->type = 0; 1894 bdesc->blksz = mmc->read_bl_len; 1895 bdesc->log2blksz = LOG2(bdesc->blksz); 1896 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1897 #if !defined(CONFIG_SPL_BUILD) || \ 1898 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \ 1899 !defined(CONFIG_USE_TINY_PRINTF)) 1900 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x", 1901 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), 1902 (mmc->cid[3] >> 16) & 0xffff); 1903 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, 1904 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, 1905 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, 1906 (mmc->cid[2] >> 24) & 0xff); 1907 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, 1908 (mmc->cid[2] >> 16) & 0xf); 1909 #else 1910 bdesc->vendor[0] = 0; 1911 bdesc->product[0] = 0; 1912 bdesc->revision[0] = 0; 1913 #endif 1914 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT) 1915 part_init(bdesc); 1916 #endif 1917 1918 return 0; 1919 } 1920 1921 static int mmc_send_if_cond(struct mmc *mmc) 1922 { 1923 struct mmc_cmd cmd; 1924 int err; 1925 1926 cmd.cmdidx = SD_CMD_SEND_IF_COND; 1927 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */ 1928 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa; 1929 cmd.resp_type = MMC_RSP_R7; 1930 1931 err = mmc_send_cmd(mmc, &cmd, NULL); 1932 1933 if (err) 1934 return err; 1935 1936 if ((cmd.response[0] & 0xff) != 0xaa) 1937 return -EOPNOTSUPP; 1938 else 1939 mmc->version = SD_VERSION_2; 1940 1941 return 0; 1942 } 1943 1944 #if !CONFIG_IS_ENABLED(DM_MMC) 1945 /* board-specific MMC power initializations. */ 1946 __weak void board_mmc_power_init(void) 1947 { 1948 } 1949 #endif 1950 1951 static int mmc_power_init(struct mmc *mmc) 1952 { 1953 #if CONFIG_IS_ENABLED(DM_MMC) 1954 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD) 1955 struct udevice *vmmc_supply; 1956 int ret; 1957 1958 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply", 1959 &vmmc_supply); 1960 if (ret) { 1961 debug("%s: No vmmc supply\n", mmc->dev->name); 1962 return 0; 1963 } 1964 1965 ret = regulator_set_enable(vmmc_supply, true); 1966 if (ret) { 1967 puts("Error enabling VMMC supply\n"); 1968 return ret; 1969 } 1970 #endif 1971 #else /* !CONFIG_DM_MMC */ 1972 /* 1973 * Driver model should use a regulator, as above, rather than calling 1974 * out to board code. 1975 */ 1976 board_mmc_power_init(); 1977 #endif 1978 return 0; 1979 } 1980 1981 int mmc_start_init(struct mmc *mmc) 1982 { 1983 bool no_card; 1984 int err; 1985 1986 /* we pretend there's no card when init is NULL */ 1987 no_card = mmc_getcd(mmc) == 0; 1988 #if !CONFIG_IS_ENABLED(DM_MMC) 1989 no_card = no_card || (mmc->cfg->ops->init == NULL); 1990 #endif 1991 if (no_card) { 1992 mmc->has_init = 0; 1993 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 1994 printf("MMC: no card present\n"); 1995 #endif 1996 return -ENOMEDIUM; 1997 } 1998 1999 if (mmc->has_init) 2000 return 0; 2001 2002 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 2003 mmc_adapter_card_type_ident(); 2004 #endif 2005 err = mmc_power_init(mmc); 2006 if (err) 2007 return err; 2008 2009 #if CONFIG_IS_ENABLED(DM_MMC) 2010 /* The device has already been probed ready for use */ 2011 #else 2012 /* made sure it's not NULL earlier */ 2013 err = mmc->cfg->ops->init(mmc); 2014 if (err) 2015 return err; 2016 #endif 2017 mmc_set_bus_width(mmc, 1); 2018 mmc_set_clock(mmc, 1); 2019 mmc_set_timing(mmc, MMC_TIMING_LEGACY); 2020 2021 /* Reset the Card */ 2022 err = mmc_go_idle(mmc); 2023 2024 if (err) 2025 return err; 2026 2027 /* The internal partition reset to user partition(0) at every CMD0*/ 2028 mmc_get_blk_desc(mmc)->hwpart = 0; 2029 2030 /* Test for SD version 2 */ 2031 err = mmc_send_if_cond(mmc); 2032 2033 /* Now try to get the SD card's operating condition */ 2034 err = sd_send_op_cond(mmc); 2035 2036 /* If the command timed out, we check for an MMC card */ 2037 if (err == -ETIMEDOUT) { 2038 err = mmc_send_op_cond(mmc); 2039 2040 if (err) { 2041 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2042 printf("Card did not respond to voltage select!\n"); 2043 #endif 2044 return -EOPNOTSUPP; 2045 } 2046 } 2047 2048 if (!err) 2049 mmc->init_in_progress = 1; 2050 2051 return err; 2052 } 2053 2054 static int mmc_complete_init(struct mmc *mmc) 2055 { 2056 int err = 0; 2057 2058 mmc->init_in_progress = 0; 2059 if (mmc->op_cond_pending) 2060 err = mmc_complete_op_cond(mmc); 2061 2062 if (!err) 2063 err = mmc_startup(mmc); 2064 if (err) 2065 mmc->has_init = 0; 2066 else 2067 mmc->has_init = 1; 2068 return err; 2069 } 2070 2071 int mmc_init(struct mmc *mmc) 2072 { 2073 int err = 0; 2074 __maybe_unused unsigned start; 2075 #if CONFIG_IS_ENABLED(DM_MMC) 2076 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev); 2077 2078 upriv->mmc = mmc; 2079 #endif 2080 if (mmc->has_init) 2081 return 0; 2082 2083 start = get_timer(0); 2084 2085 if (!mmc->init_in_progress) 2086 err = mmc_start_init(mmc); 2087 2088 if (!err) 2089 err = mmc_complete_init(mmc); 2090 if (err) 2091 printf("%s: %d, time %lu\n", __func__, err, get_timer(start)); 2092 2093 return err; 2094 } 2095 2096 int mmc_set_dsr(struct mmc *mmc, u16 val) 2097 { 2098 mmc->dsr = val; 2099 return 0; 2100 } 2101 2102 /* CPU-specific MMC initializations */ 2103 __weak int cpu_mmc_init(bd_t *bis) 2104 { 2105 return -1; 2106 } 2107 2108 /* board-specific MMC initializations. */ 2109 __weak int board_mmc_init(bd_t *bis) 2110 { 2111 return -1; 2112 } 2113 2114 void mmc_set_preinit(struct mmc *mmc, int preinit) 2115 { 2116 mmc->preinit = preinit; 2117 } 2118 2119 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD) 2120 static int mmc_probe(bd_t *bis) 2121 { 2122 return 0; 2123 } 2124 #elif CONFIG_IS_ENABLED(DM_MMC) 2125 static int mmc_probe(bd_t *bis) 2126 { 2127 int ret, i; 2128 struct uclass *uc; 2129 struct udevice *dev; 2130 2131 ret = uclass_get(UCLASS_MMC, &uc); 2132 if (ret) 2133 return ret; 2134 2135 /* 2136 * Try to add them in sequence order. Really with driver model we 2137 * should allow holes, but the current MMC list does not allow that. 2138 * So if we request 0, 1, 3 we will get 0, 1, 2. 2139 */ 2140 for (i = 0; ; i++) { 2141 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev); 2142 if (ret == -ENODEV) 2143 break; 2144 } 2145 uclass_foreach_dev(dev, uc) { 2146 ret = device_probe(dev); 2147 if (ret) 2148 printf("%s - probe failed: %d\n", dev->name, ret); 2149 } 2150 2151 return 0; 2152 } 2153 #else 2154 static int mmc_probe(bd_t *bis) 2155 { 2156 if (board_mmc_init(bis) < 0) 2157 cpu_mmc_init(bis); 2158 2159 return 0; 2160 } 2161 #endif 2162 2163 int mmc_initialize(bd_t *bis) 2164 { 2165 static int initialized = 0; 2166 int ret; 2167 if (initialized) /* Avoid initializing mmc multiple times */ 2168 return 0; 2169 initialized = 1; 2170 2171 #if !CONFIG_IS_ENABLED(BLK) 2172 #if !CONFIG_IS_ENABLED(MMC_TINY) 2173 mmc_list_init(); 2174 #endif 2175 #endif 2176 ret = mmc_probe(bis); 2177 if (ret) 2178 return ret; 2179 2180 #ifndef CONFIG_SPL_BUILD 2181 print_mmc_devices(','); 2182 #endif 2183 2184 mmc_do_preinit(); 2185 return 0; 2186 } 2187 2188 #ifdef CONFIG_CMD_BKOPS_ENABLE 2189 int mmc_set_bkops_enable(struct mmc *mmc) 2190 { 2191 int err; 2192 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 2193 2194 err = mmc_send_ext_csd(mmc, ext_csd); 2195 if (err) { 2196 puts("Could not get ext_csd register values\n"); 2197 return err; 2198 } 2199 2200 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) { 2201 puts("Background operations not supported on device\n"); 2202 return -EMEDIUMTYPE; 2203 } 2204 2205 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) { 2206 puts("Background operations already enabled\n"); 2207 return 0; 2208 } 2209 2210 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); 2211 if (err) { 2212 puts("Failed to enable manual background operations\n"); 2213 return err; 2214 } 2215 2216 puts("Enabled manual background operations\n"); 2217 2218 return 0; 2219 } 2220 #endif 2221