1 /* 2 * Copyright 2008, Freescale Semiconductor, Inc 3 * Andy Fleming 4 * 5 * Based vaguely on the Linux code 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10 #include <config.h> 11 #include <common.h> 12 #include <command.h> 13 #include <dm.h> 14 #include <dm/device-internal.h> 15 #include <errno.h> 16 #include <mmc.h> 17 #include <part.h> 18 #include <power/regulator.h> 19 #include <malloc.h> 20 #include <memalign.h> 21 #include <linux/list.h> 22 #include <div64.h> 23 #include "mmc_private.h" 24 25 static const unsigned int sd_au_size[] = { 26 0, SZ_16K / 512, SZ_32K / 512, 27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512, 28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512, 29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, 30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, 31 }; 32 33 #if CONFIG_IS_ENABLED(MMC_TINY) 34 static struct mmc mmc_static; 35 struct mmc *find_mmc_device(int dev_num) 36 { 37 return &mmc_static; 38 } 39 40 void mmc_do_preinit(void) 41 { 42 struct mmc *m = &mmc_static; 43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 44 mmc_set_preinit(m, 1); 45 #endif 46 if (m->preinit) 47 mmc_start_init(m); 48 } 49 50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc) 51 { 52 return &mmc->block_dev; 53 } 54 #endif 55 56 #if !CONFIG_IS_ENABLED(DM_MMC) 57 __weak int board_mmc_getwp(struct mmc *mmc) 58 { 59 return -1; 60 } 61 62 int mmc_getwp(struct mmc *mmc) 63 { 64 int wp; 65 66 wp = board_mmc_getwp(mmc); 67 68 if (wp < 0) { 69 if (mmc->cfg->ops->getwp) 70 wp = mmc->cfg->ops->getwp(mmc); 71 else 72 wp = 0; 73 } 74 75 return wp; 76 } 77 78 __weak int board_mmc_getcd(struct mmc *mmc) 79 { 80 return -1; 81 } 82 #endif 83 84 #ifdef CONFIG_MMC_TRACE 85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd) 86 { 87 printf("CMD_SEND:%d\n", cmd->cmdidx); 88 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg); 89 } 90 91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret) 92 { 93 int i; 94 u8 *ptr; 95 96 if (ret) { 97 printf("\t\tRET\t\t\t %d\n", ret); 98 } else { 99 switch (cmd->resp_type) { 100 case MMC_RSP_NONE: 101 printf("\t\tMMC_RSP_NONE\n"); 102 break; 103 case MMC_RSP_R1: 104 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n", 105 cmd->response[0]); 106 break; 107 case MMC_RSP_R1b: 108 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n", 109 cmd->response[0]); 110 break; 111 case MMC_RSP_R2: 112 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n", 113 cmd->response[0]); 114 printf("\t\t \t\t 0x%08X \n", 115 cmd->response[1]); 116 printf("\t\t \t\t 0x%08X \n", 117 cmd->response[2]); 118 printf("\t\t \t\t 0x%08X \n", 119 cmd->response[3]); 120 printf("\n"); 121 printf("\t\t\t\t\tDUMPING DATA\n"); 122 for (i = 0; i < 4; i++) { 123 int j; 124 printf("\t\t\t\t\t%03d - ", i*4); 125 ptr = (u8 *)&cmd->response[i]; 126 ptr += 3; 127 for (j = 0; j < 4; j++) 128 printf("%02X ", *ptr--); 129 printf("\n"); 130 } 131 break; 132 case MMC_RSP_R3: 133 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n", 134 cmd->response[0]); 135 break; 136 default: 137 printf("\t\tERROR MMC rsp not supported\n"); 138 break; 139 } 140 } 141 } 142 143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd) 144 { 145 int status; 146 147 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9; 148 printf("CURR STATE:%d\n", status); 149 } 150 #endif 151 152 #if !CONFIG_IS_ENABLED(DM_MMC) 153 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data) 154 { 155 int ret; 156 157 mmmc_trace_before_send(mmc, cmd); 158 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data); 159 mmmc_trace_after_send(mmc, cmd, ret); 160 161 return ret; 162 } 163 #endif 164 165 int mmc_send_status(struct mmc *mmc, int timeout) 166 { 167 struct mmc_cmd cmd; 168 int err, retries = 5; 169 170 cmd.cmdidx = MMC_CMD_SEND_STATUS; 171 cmd.resp_type = MMC_RSP_R1; 172 if (!mmc_host_is_spi(mmc)) 173 cmd.cmdarg = mmc->rca << 16; 174 175 while (1) { 176 err = mmc_send_cmd(mmc, &cmd, NULL); 177 if (!err) { 178 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) && 179 (cmd.response[0] & MMC_STATUS_CURR_STATE) != 180 MMC_STATE_PRG) 181 break; 182 else if (cmd.response[0] & MMC_STATUS_MASK) { 183 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 184 printf("Status Error: 0x%08X\n", 185 cmd.response[0]); 186 #endif 187 return -ECOMM; 188 } 189 } else if (--retries < 0) 190 return err; 191 192 if (timeout-- <= 0) 193 break; 194 195 udelay(1000); 196 } 197 198 mmc_trace_state(mmc, &cmd); 199 if (timeout <= 0) { 200 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 201 printf("Timeout waiting card ready\n"); 202 #endif 203 return -ETIMEDOUT; 204 } 205 206 return 0; 207 } 208 209 int mmc_set_blocklen(struct mmc *mmc, int len) 210 { 211 struct mmc_cmd cmd; 212 213 if (mmc_card_ddr(mmc)) 214 return 0; 215 216 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN; 217 cmd.resp_type = MMC_RSP_R1; 218 cmd.cmdarg = len; 219 220 return mmc_send_cmd(mmc, &cmd, NULL); 221 } 222 223 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start, 224 lbaint_t blkcnt) 225 { 226 struct mmc_cmd cmd; 227 struct mmc_data data; 228 229 if (blkcnt > 1) 230 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 231 else 232 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 233 234 if (mmc->high_capacity) 235 cmd.cmdarg = start; 236 else 237 cmd.cmdarg = start * mmc->read_bl_len; 238 239 cmd.resp_type = MMC_RSP_R1; 240 241 data.dest = dst; 242 data.blocks = blkcnt; 243 data.blocksize = mmc->read_bl_len; 244 data.flags = MMC_DATA_READ; 245 246 if (mmc_send_cmd(mmc, &cmd, &data)) 247 return 0; 248 249 if (blkcnt > 1) { 250 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 251 cmd.cmdarg = 0; 252 cmd.resp_type = MMC_RSP_R1b; 253 if (mmc_send_cmd(mmc, &cmd, NULL)) { 254 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 255 printf("mmc fail to send stop cmd\n"); 256 #endif 257 return 0; 258 } 259 } 260 261 return blkcnt; 262 } 263 264 #if CONFIG_IS_ENABLED(BLK) 265 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 266 #else 267 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 268 void *dst) 269 #endif 270 { 271 #if CONFIG_IS_ENABLED(BLK) 272 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 273 #endif 274 int dev_num = block_dev->devnum; 275 int err; 276 lbaint_t cur, blocks_todo = blkcnt; 277 278 if (blkcnt == 0) 279 return 0; 280 281 struct mmc *mmc = find_mmc_device(dev_num); 282 if (!mmc) 283 return 0; 284 285 if (CONFIG_IS_ENABLED(MMC_TINY)) 286 err = mmc_switch_part(mmc, block_dev->hwpart); 287 else 288 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 289 290 if (err < 0) 291 return 0; 292 293 if ((start + blkcnt) > block_dev->lba) { 294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 295 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 296 start + blkcnt, block_dev->lba); 297 #endif 298 return 0; 299 } 300 301 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 302 debug("%s: Failed to set blocklen\n", __func__); 303 return 0; 304 } 305 306 do { 307 cur = (blocks_todo > mmc->cfg->b_max) ? 308 mmc->cfg->b_max : blocks_todo; 309 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 310 debug("%s: Failed to read blocks\n", __func__); 311 int timeout = 0; 312 re_init_retry: 313 timeout++; 314 /* 315 * Try re-init seven times. 316 */ 317 if (timeout > 7) { 318 printf("Re-init retry timeout\n"); 319 return 0; 320 } 321 322 mmc->has_init = 0; 323 if (mmc_init(mmc)) 324 return 0; 325 326 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 327 printf("%s: Re-init mmc_read_blocks error\n", 328 __func__); 329 goto re_init_retry; 330 } 331 } 332 blocks_todo -= cur; 333 start += cur; 334 dst += cur * mmc->read_bl_len; 335 } while (blocks_todo > 0); 336 337 return blkcnt; 338 } 339 340 void mmc_set_clock(struct mmc *mmc, uint clock) 341 { 342 if (clock > mmc->cfg->f_max) 343 clock = mmc->cfg->f_max; 344 345 if (clock < mmc->cfg->f_min) 346 clock = mmc->cfg->f_min; 347 348 mmc->clock = clock; 349 350 mmc_set_ios(mmc); 351 } 352 353 static void mmc_set_bus_width(struct mmc *mmc, uint width) 354 { 355 mmc->bus_width = width; 356 357 mmc_set_ios(mmc); 358 } 359 360 static void mmc_set_timing(struct mmc *mmc, uint timing) 361 { 362 mmc->timing = timing; 363 mmc_set_ios(mmc); 364 } 365 366 static int mmc_go_idle(struct mmc *mmc) 367 { 368 struct mmc_cmd cmd; 369 int err; 370 371 udelay(1000); 372 373 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE; 374 cmd.cmdarg = 0; 375 cmd.resp_type = MMC_RSP_NONE; 376 377 err = mmc_send_cmd(mmc, &cmd, NULL); 378 379 if (err) 380 return err; 381 382 udelay(2000); 383 384 return 0; 385 } 386 387 static int sd_send_op_cond(struct mmc *mmc) 388 { 389 int timeout = 1000; 390 int err; 391 struct mmc_cmd cmd; 392 393 while (1) { 394 cmd.cmdidx = MMC_CMD_APP_CMD; 395 cmd.resp_type = MMC_RSP_R1; 396 cmd.cmdarg = 0; 397 398 err = mmc_send_cmd(mmc, &cmd, NULL); 399 400 if (err) 401 return err; 402 403 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND; 404 cmd.resp_type = MMC_RSP_R3; 405 406 /* 407 * Most cards do not answer if some reserved bits 408 * in the ocr are set. However, Some controller 409 * can set bit 7 (reserved for low voltages), but 410 * how to manage low voltages SD card is not yet 411 * specified. 412 */ 413 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 : 414 (mmc->cfg->voltages & 0xff8000); 415 416 if (mmc->version == SD_VERSION_2) 417 cmd.cmdarg |= OCR_HCS; 418 419 err = mmc_send_cmd(mmc, &cmd, NULL); 420 421 if (err) 422 return err; 423 424 if (cmd.response[0] & OCR_BUSY) 425 break; 426 427 if (timeout-- <= 0) 428 return -EOPNOTSUPP; 429 430 udelay(1000); 431 } 432 433 if (mmc->version != SD_VERSION_2) 434 mmc->version = SD_VERSION_1_0; 435 436 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 437 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 438 cmd.resp_type = MMC_RSP_R3; 439 cmd.cmdarg = 0; 440 441 err = mmc_send_cmd(mmc, &cmd, NULL); 442 443 if (err) 444 return err; 445 } 446 447 mmc->ocr = cmd.response[0]; 448 449 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 450 mmc->rca = 0; 451 452 return 0; 453 } 454 455 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg) 456 { 457 struct mmc_cmd cmd; 458 int err; 459 460 cmd.cmdidx = MMC_CMD_SEND_OP_COND; 461 cmd.resp_type = MMC_RSP_R3; 462 cmd.cmdarg = 0; 463 if (use_arg && !mmc_host_is_spi(mmc)) 464 cmd.cmdarg = OCR_HCS | 465 (mmc->cfg->voltages & 466 (mmc->ocr & OCR_VOLTAGE_MASK)) | 467 (mmc->ocr & OCR_ACCESS_MODE); 468 469 err = mmc_send_cmd(mmc, &cmd, NULL); 470 if (err) 471 return err; 472 mmc->ocr = cmd.response[0]; 473 return 0; 474 } 475 476 static int mmc_send_op_cond(struct mmc *mmc) 477 { 478 int err, i; 479 480 /* Some cards seem to need this */ 481 mmc_go_idle(mmc); 482 483 /* Asking to the card its capabilities */ 484 for (i = 0; i < 2; i++) { 485 err = mmc_send_op_cond_iter(mmc, i != 0); 486 if (err) 487 return err; 488 489 /* exit if not busy (flag seems to be inverted) */ 490 if (mmc->ocr & OCR_BUSY) 491 break; 492 } 493 mmc->op_cond_pending = 1; 494 return 0; 495 } 496 497 static int mmc_complete_op_cond(struct mmc *mmc) 498 { 499 struct mmc_cmd cmd; 500 int timeout = 1000; 501 uint start; 502 int err; 503 504 mmc->op_cond_pending = 0; 505 if (!(mmc->ocr & OCR_BUSY)) { 506 /* Some cards seem to need this */ 507 mmc_go_idle(mmc); 508 509 start = get_timer(0); 510 while (1) { 511 err = mmc_send_op_cond_iter(mmc, 1); 512 if (err) 513 return err; 514 if (mmc->ocr & OCR_BUSY) 515 break; 516 if (get_timer(start) > timeout) 517 return -EOPNOTSUPP; 518 udelay(100); 519 } 520 } 521 522 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 523 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 524 cmd.resp_type = MMC_RSP_R3; 525 cmd.cmdarg = 0; 526 527 err = mmc_send_cmd(mmc, &cmd, NULL); 528 529 if (err) 530 return err; 531 532 mmc->ocr = cmd.response[0]; 533 } 534 535 mmc->version = MMC_VERSION_UNKNOWN; 536 537 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 538 mmc->rca = 1; 539 540 return 0; 541 } 542 543 544 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd) 545 { 546 struct mmc_cmd cmd; 547 struct mmc_data data; 548 int err; 549 550 /* Get the Card Status Register */ 551 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD; 552 cmd.resp_type = MMC_RSP_R1; 553 cmd.cmdarg = 0; 554 555 data.dest = (char *)ext_csd; 556 data.blocks = 1; 557 data.blocksize = MMC_MAX_BLOCK_LEN; 558 data.flags = MMC_DATA_READ; 559 560 err = mmc_send_cmd(mmc, &cmd, &data); 561 562 return err; 563 } 564 565 static int mmc_poll_for_busy(struct mmc *mmc, u8 send_status) 566 { 567 struct mmc_cmd cmd; 568 u8 busy = true; 569 uint start; 570 int ret; 571 int timeout = 1000; 572 573 cmd.cmdidx = MMC_CMD_SEND_STATUS; 574 cmd.resp_type = MMC_RSP_R1; 575 cmd.cmdarg = mmc->rca << 16; 576 577 start = get_timer(0); 578 579 if (!send_status && !mmc_can_card_busy(mmc)) { 580 mdelay(timeout); 581 return 0; 582 } 583 584 do { 585 if (!send_status) { 586 busy = mmc_card_busy(mmc); 587 } else { 588 ret = mmc_send_cmd(mmc, &cmd, NULL); 589 590 if (ret) 591 return ret; 592 593 if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR) 594 return -EBADMSG; 595 busy = (cmd.response[0] & MMC_STATUS_CURR_STATE) == 596 MMC_STATE_PRG; 597 } 598 599 if (get_timer(start) > timeout && busy) 600 return -ETIMEDOUT; 601 } while (busy); 602 603 return 0; 604 } 605 606 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value, 607 u8 send_status) 608 { 609 struct mmc_cmd cmd; 610 int retries = 3; 611 int ret; 612 613 cmd.cmdidx = MMC_CMD_SWITCH; 614 cmd.resp_type = MMC_RSP_R1b; 615 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 616 (index << 16) | 617 (value << 8); 618 619 do { 620 ret = mmc_send_cmd(mmc, &cmd, NULL); 621 622 if (!ret) 623 return mmc_poll_for_busy(mmc, send_status); 624 } while (--retries > 0 && ret); 625 626 return ret; 627 } 628 629 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value) 630 { 631 return __mmc_switch(mmc, set, index, value, true); 632 } 633 634 static int mmc_select_bus_width(struct mmc *mmc) 635 { 636 u32 ext_csd_bits[] = { 637 EXT_CSD_BUS_WIDTH_8, 638 EXT_CSD_BUS_WIDTH_4, 639 }; 640 u32 bus_widths[] = { 641 MMC_BUS_WIDTH_8BIT, 642 MMC_BUS_WIDTH_4BIT, 643 }; 644 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 645 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 646 u32 idx, bus_width = 0; 647 int err = 0; 648 649 if (mmc->version < MMC_VERSION_4 || 650 !(mmc->cfg->host_caps & (MMC_MODE_4BIT | MMC_MODE_8BIT))) 651 return 0; 652 653 err = mmc_send_ext_csd(mmc, ext_csd); 654 655 if (err) 656 return err; 657 658 idx = (mmc->cfg->host_caps & MMC_MODE_8BIT) ? 0 : 1; 659 660 /* 661 * Unlike SD, MMC cards dont have a configuration register to notify 662 * supported bus width. So bus test command should be run to identify 663 * the supported bus width or compare the ext csd values of current 664 * bus width and ext csd values of 1 bit mode read earlier. 665 */ 666 for (; idx < ARRAY_SIZE(bus_widths); idx++) { 667 /* 668 * Host is capable of 8bit transfer, then switch 669 * the device to work in 8bit transfer mode. If the 670 * mmc switch command returns error then switch to 671 * 4bit transfer mode. On success set the corresponding 672 * bus width on the host. 673 */ 674 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 675 EXT_CSD_BUS_WIDTH, ext_csd_bits[idx]); 676 if (err) 677 continue; 678 679 bus_width = bus_widths[idx]; 680 mmc_set_bus_width(mmc, bus_width); 681 682 err = mmc_send_ext_csd(mmc, test_csd); 683 684 if (err) 685 continue; 686 687 /* Only compare read only fields */ 688 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] == 689 test_csd[EXT_CSD_PARTITIONING_SUPPORT]) && 690 (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] == 691 test_csd[EXT_CSD_HC_WP_GRP_SIZE]) && 692 (ext_csd[EXT_CSD_REV] == test_csd[EXT_CSD_REV]) && 693 (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] == 694 test_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && 695 !memcmp(&ext_csd[EXT_CSD_SEC_CNT], 696 &test_csd[EXT_CSD_SEC_CNT], 4)) { 697 err = bus_width; 698 break; 699 } else { 700 err = -EBADMSG; 701 } 702 } 703 704 return err; 705 } 706 707 static const u8 tuning_blk_pattern_4bit[] = { 708 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 709 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 710 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 711 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 712 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 713 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 714 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 715 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 716 }; 717 718 static const u8 tuning_blk_pattern_8bit[] = { 719 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 720 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 721 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 722 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 723 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 724 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 725 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 726 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 727 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 728 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 729 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 730 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 731 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 732 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 733 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 734 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 735 }; 736 737 int mmc_send_tuning(struct mmc *mmc, u32 opcode) 738 { 739 struct mmc_cmd cmd; 740 struct mmc_data data; 741 const u8 *tuning_block_pattern; 742 int size, err = 0; 743 u8 *data_buf; 744 745 if (mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 746 tuning_block_pattern = tuning_blk_pattern_8bit; 747 size = sizeof(tuning_blk_pattern_8bit); 748 } else if (mmc->bus_width == MMC_BUS_WIDTH_4BIT) { 749 tuning_block_pattern = tuning_blk_pattern_4bit; 750 size = sizeof(tuning_blk_pattern_4bit); 751 } else { 752 return -EINVAL; 753 } 754 755 data_buf = calloc(1, size); 756 if (!data_buf) 757 return -ENOMEM; 758 759 cmd.cmdidx = opcode; 760 cmd.resp_type = MMC_RSP_R1; 761 cmd.cmdarg = 0; 762 763 data.dest = (char *)data_buf; 764 data.blocksize = size; 765 data.blocks = 1; 766 data.flags = MMC_DATA_READ; 767 768 err = mmc_send_cmd(mmc, &cmd, &data); 769 if (err) 770 goto out; 771 772 if (memcmp(data_buf, tuning_block_pattern, size)) 773 err = -EIO; 774 out: 775 free(data_buf); 776 return err; 777 } 778 779 static int mmc_execute_tuning(struct mmc *mmc) 780 { 781 #ifdef CONFIG_DM_MMC 782 struct dm_mmc_ops *ops = mmc_get_ops(mmc->dev); 783 #endif 784 u32 opcode; 785 786 if (IS_SD(mmc)) 787 opcode = MMC_SEND_TUNING_BLOCK; 788 else 789 opcode = MMC_SEND_TUNING_BLOCK_HS200; 790 791 #ifndef CONFIG_DM_MMC 792 if (mmc->cfg->ops->execute_tuning) { 793 return mmc->cfg->ops->execute_tuning(mmc, opcode); 794 #else 795 if (ops->execute_tuning) { 796 return ops->execute_tuning(mmc->dev, opcode); 797 #endif 798 } else { 799 debug("Tuning feature required for HS200 mode.\n"); 800 return -EIO; 801 } 802 } 803 804 static int mmc_hs200_tuning(struct mmc *mmc) 805 { 806 return mmc_execute_tuning(mmc); 807 } 808 809 static int mmc_select_hs(struct mmc *mmc) 810 { 811 int ret; 812 813 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 814 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS); 815 816 if (!ret) 817 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 818 819 return ret; 820 } 821 822 static int mmc_select_hs_ddr(struct mmc *mmc) 823 { 824 u32 ext_csd_bits; 825 int err = 0; 826 827 if (mmc->bus_width == MMC_BUS_WIDTH_1BIT) 828 return 0; 829 830 ext_csd_bits = (mmc->bus_width == MMC_BUS_WIDTH_8BIT) ? 831 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4; 832 833 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 834 EXT_CSD_BUS_WIDTH, ext_csd_bits); 835 if (err) 836 return err; 837 838 mmc_set_timing(mmc, MMC_TIMING_MMC_DDR52); 839 840 return 0; 841 } 842 843 #ifndef CONFIG_SPL_BUILD 844 static int mmc_select_hs200(struct mmc *mmc) 845 { 846 int ret; 847 struct mmc_cmd cmd; 848 849 /* 850 * Set the bus width(4 or 8) with host's support and 851 * switch to HS200 mode if bus width is set successfully. 852 */ 853 ret = mmc_select_bus_width(mmc); 854 855 if (ret > 0) { 856 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 857 EXT_CSD_HS_TIMING, 858 EXT_CSD_TIMING_HS200, false); 859 860 if (ret) 861 return ret; 862 863 mmc_set_timing(mmc, MMC_TIMING_MMC_HS200); 864 865 cmd.cmdidx = MMC_CMD_SEND_STATUS; 866 cmd.resp_type = MMC_RSP_R1; 867 cmd.cmdarg = mmc->rca << 16; 868 869 ret = mmc_send_cmd(mmc, &cmd, NULL); 870 871 if (ret) 872 return ret; 873 874 if (cmd.response[0] & MMC_STATUS_SWITCH_ERROR) 875 return -EBADMSG; 876 } 877 878 return ret; 879 } 880 #endif 881 882 static int mmc_select_hs400(struct mmc *mmc) 883 { 884 int ret; 885 886 /* Switch card to HS mode */ 887 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 888 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, false); 889 if (ret) 890 return ret; 891 892 /* Set host controller to HS timing */ 893 mmc_set_timing(mmc, MMC_TIMING_MMC_HS); 894 895 /* Reduce frequency to HS frequency */ 896 mmc_set_clock(mmc, MMC_HIGH_52_MAX_DTR); 897 898 ret = mmc_send_status(mmc, 1000); 899 if (ret) 900 return ret; 901 902 /* Switch card to DDR */ 903 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 904 EXT_CSD_BUS_WIDTH, 905 EXT_CSD_DDR_BUS_WIDTH_8); 906 if (ret) 907 return ret; 908 909 /* Switch card to HS400 */ 910 ret = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 911 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, false); 912 if (ret) 913 return ret; 914 915 /* Set host controller to HS400 timing and frequency */ 916 mmc_set_timing(mmc, MMC_TIMING_MMC_HS400); 917 918 return ret; 919 } 920 921 static u32 mmc_select_card_type(struct mmc *mmc, u8 *ext_csd) 922 { 923 u8 card_type; 924 u32 host_caps, avail_type = 0; 925 926 card_type = ext_csd[EXT_CSD_CARD_TYPE]; 927 host_caps = mmc->cfg->host_caps; 928 929 if ((host_caps & MMC_MODE_HS) && 930 (card_type & EXT_CSD_CARD_TYPE_26)) 931 avail_type |= EXT_CSD_CARD_TYPE_26; 932 933 if ((host_caps & MMC_MODE_HS) && 934 (card_type & EXT_CSD_CARD_TYPE_52)) 935 avail_type |= EXT_CSD_CARD_TYPE_52; 936 937 /* 938 * For the moment, u-boot doesn't support signal voltage 939 * switch, therefor we assume that host support ddr52 940 * at 1.8v or 3.3v I/O(1.2v I/O not supported, hs200 and 941 * hs400 are the same). 942 */ 943 if ((host_caps & MMC_MODE_DDR_52MHz) && 944 (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)) 945 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V; 946 947 if ((host_caps & MMC_MODE_HS200) && 948 (card_type & EXT_CSD_CARD_TYPE_HS200_1_8V)) 949 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V; 950 951 /* 952 * If host can support HS400, it means that host can also 953 * support HS200. 954 */ 955 if ((host_caps & MMC_MODE_HS400) && 956 (host_caps & MMC_MODE_8BIT) && 957 (card_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 958 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 959 EXT_CSD_CARD_TYPE_HS400_1_8V; 960 961 if ((host_caps & MMC_MODE_HS400ES) && 962 (host_caps & MMC_MODE_8BIT) && 963 ext_csd[EXT_CSD_STROBE_SUPPORT] && 964 (avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)) 965 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V | 966 EXT_CSD_CARD_TYPE_HS400_1_8V | 967 EXT_CSD_CARD_TYPE_HS400ES; 968 969 return avail_type; 970 } 971 972 static void mmc_set_bus_speed(struct mmc *mmc, u8 avail_type) 973 { 974 int clock = 0; 975 976 if (mmc_card_hs(mmc)) 977 clock = (avail_type & EXT_CSD_CARD_TYPE_52) ? 978 MMC_HIGH_52_MAX_DTR : MMC_HIGH_26_MAX_DTR; 979 else if (mmc_card_hs200(mmc) || 980 mmc_card_hs400(mmc) || 981 mmc_card_hs400es(mmc)) 982 clock = MMC_HS200_MAX_DTR; 983 984 mmc_set_clock(mmc, clock); 985 } 986 987 static int mmc_change_freq(struct mmc *mmc) 988 { 989 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 990 u32 avail_type; 991 int err; 992 993 mmc->card_caps = 0; 994 995 if (mmc_host_is_spi(mmc)) 996 return 0; 997 998 /* Only version 4 supports high-speed */ 999 if (mmc->version < MMC_VERSION_4) 1000 return 0; 1001 1002 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT; 1003 1004 err = mmc_send_ext_csd(mmc, ext_csd); 1005 1006 if (err) 1007 return err; 1008 1009 avail_type = mmc_select_card_type(mmc, ext_csd); 1010 1011 #ifndef CONFIG_SPL_BUILD 1012 if (avail_type & EXT_CSD_CARD_TYPE_HS200) 1013 err = mmc_select_hs200(mmc); 1014 else 1015 #endif 1016 if (avail_type & EXT_CSD_CARD_TYPE_HS) 1017 err = mmc_select_hs(mmc); 1018 else 1019 err = -EINVAL; 1020 1021 if (err) 1022 return err; 1023 1024 mmc_set_bus_speed(mmc, avail_type); 1025 1026 if (mmc_card_hs200(mmc)) { 1027 err = mmc_hs200_tuning(mmc); 1028 if (avail_type & EXT_CSD_CARD_TYPE_HS400 && 1029 mmc->bus_width == MMC_BUS_WIDTH_8BIT) { 1030 err = mmc_select_hs400(mmc); 1031 mmc_set_bus_speed(mmc, avail_type); 1032 } 1033 } else if (!mmc_card_hs400es(mmc)) { 1034 err = mmc_select_bus_width(mmc) > 0 ? 0 : err; 1035 if (!err && avail_type & EXT_CSD_CARD_TYPE_DDR_52) 1036 err = mmc_select_hs_ddr(mmc); 1037 } 1038 1039 return err; 1040 } 1041 1042 static int mmc_set_capacity(struct mmc *mmc, int part_num) 1043 { 1044 switch (part_num) { 1045 case 0: 1046 mmc->capacity = mmc->capacity_user; 1047 break; 1048 case 1: 1049 case 2: 1050 mmc->capacity = mmc->capacity_boot; 1051 break; 1052 case 3: 1053 mmc->capacity = mmc->capacity_rpmb; 1054 break; 1055 case 4: 1056 case 5: 1057 case 6: 1058 case 7: 1059 mmc->capacity = mmc->capacity_gp[part_num - 4]; 1060 break; 1061 default: 1062 return -1; 1063 } 1064 1065 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1066 1067 return 0; 1068 } 1069 1070 int mmc_switch_part(struct mmc *mmc, unsigned int part_num) 1071 { 1072 int ret; 1073 1074 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF, 1075 (mmc->part_config & ~PART_ACCESS_MASK) 1076 | (part_num & PART_ACCESS_MASK)); 1077 1078 /* 1079 * Set the capacity if the switch succeeded or was intended 1080 * to return to representing the raw device. 1081 */ 1082 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) { 1083 ret = mmc_set_capacity(mmc, part_num); 1084 mmc_get_blk_desc(mmc)->hwpart = part_num; 1085 } 1086 1087 return ret; 1088 } 1089 1090 int mmc_hwpart_config(struct mmc *mmc, 1091 const struct mmc_hwpart_conf *conf, 1092 enum mmc_hwpart_conf_mode mode) 1093 { 1094 u8 part_attrs = 0; 1095 u32 enh_size_mult; 1096 u32 enh_start_addr; 1097 u32 gp_size_mult[4]; 1098 u32 max_enh_size_mult; 1099 u32 tot_enh_size_mult = 0; 1100 u8 wr_rel_set; 1101 int i, pidx, err; 1102 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1103 1104 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE) 1105 return -EINVAL; 1106 1107 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) { 1108 printf("eMMC >= 4.4 required for enhanced user data area\n"); 1109 return -EMEDIUMTYPE; 1110 } 1111 1112 if (!(mmc->part_support & PART_SUPPORT)) { 1113 printf("Card does not support partitioning\n"); 1114 return -EMEDIUMTYPE; 1115 } 1116 1117 if (!mmc->hc_wp_grp_size) { 1118 printf("Card does not define HC WP group size\n"); 1119 return -EMEDIUMTYPE; 1120 } 1121 1122 /* check partition alignment and total enhanced size */ 1123 if (conf->user.enh_size) { 1124 if (conf->user.enh_size % mmc->hc_wp_grp_size || 1125 conf->user.enh_start % mmc->hc_wp_grp_size) { 1126 printf("User data enhanced area not HC WP group " 1127 "size aligned\n"); 1128 return -EINVAL; 1129 } 1130 part_attrs |= EXT_CSD_ENH_USR; 1131 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size; 1132 if (mmc->high_capacity) { 1133 enh_start_addr = conf->user.enh_start; 1134 } else { 1135 enh_start_addr = (conf->user.enh_start << 9); 1136 } 1137 } else { 1138 enh_size_mult = 0; 1139 enh_start_addr = 0; 1140 } 1141 tot_enh_size_mult += enh_size_mult; 1142 1143 for (pidx = 0; pidx < 4; pidx++) { 1144 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) { 1145 printf("GP%i partition not HC WP group size " 1146 "aligned\n", pidx+1); 1147 return -EINVAL; 1148 } 1149 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size; 1150 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) { 1151 part_attrs |= EXT_CSD_ENH_GP(pidx); 1152 tot_enh_size_mult += gp_size_mult[pidx]; 1153 } 1154 } 1155 1156 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) { 1157 printf("Card does not support enhanced attribute\n"); 1158 return -EMEDIUMTYPE; 1159 } 1160 1161 err = mmc_send_ext_csd(mmc, ext_csd); 1162 if (err) 1163 return err; 1164 1165 max_enh_size_mult = 1166 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) + 1167 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) + 1168 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT]; 1169 if (tot_enh_size_mult > max_enh_size_mult) { 1170 printf("Total enhanced size exceeds maximum (%u > %u)\n", 1171 tot_enh_size_mult, max_enh_size_mult); 1172 return -EMEDIUMTYPE; 1173 } 1174 1175 /* The default value of EXT_CSD_WR_REL_SET is device 1176 * dependent, the values can only be changed if the 1177 * EXT_CSD_HS_CTRL_REL bit is set. The values can be 1178 * changed only once and before partitioning is completed. */ 1179 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1180 if (conf->user.wr_rel_change) { 1181 if (conf->user.wr_rel_set) 1182 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR; 1183 else 1184 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR; 1185 } 1186 for (pidx = 0; pidx < 4; pidx++) { 1187 if (conf->gp_part[pidx].wr_rel_change) { 1188 if (conf->gp_part[pidx].wr_rel_set) 1189 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx); 1190 else 1191 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx); 1192 } 1193 } 1194 1195 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] && 1196 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) { 1197 puts("Card does not support host controlled partition write " 1198 "reliability settings\n"); 1199 return -EMEDIUMTYPE; 1200 } 1201 1202 if (ext_csd[EXT_CSD_PARTITION_SETTING] & 1203 EXT_CSD_PARTITION_SETTING_COMPLETED) { 1204 printf("Card already partitioned\n"); 1205 return -EPERM; 1206 } 1207 1208 if (mode == MMC_HWPART_CONF_CHECK) 1209 return 0; 1210 1211 /* Partitioning requires high-capacity size definitions */ 1212 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) { 1213 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1214 EXT_CSD_ERASE_GROUP_DEF, 1); 1215 1216 if (err) 1217 return err; 1218 1219 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1220 1221 /* update erase group size to be high-capacity */ 1222 mmc->erase_grp_size = 1223 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1224 1225 } 1226 1227 /* all OK, write the configuration */ 1228 for (i = 0; i < 4; i++) { 1229 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1230 EXT_CSD_ENH_START_ADDR+i, 1231 (enh_start_addr >> (i*8)) & 0xFF); 1232 if (err) 1233 return err; 1234 } 1235 for (i = 0; i < 3; i++) { 1236 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1237 EXT_CSD_ENH_SIZE_MULT+i, 1238 (enh_size_mult >> (i*8)) & 0xFF); 1239 if (err) 1240 return err; 1241 } 1242 for (pidx = 0; pidx < 4; pidx++) { 1243 for (i = 0; i < 3; i++) { 1244 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1245 EXT_CSD_GP_SIZE_MULT+pidx*3+i, 1246 (gp_size_mult[pidx] >> (i*8)) & 0xFF); 1247 if (err) 1248 return err; 1249 } 1250 } 1251 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1252 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs); 1253 if (err) 1254 return err; 1255 1256 if (mode == MMC_HWPART_CONF_SET) 1257 return 0; 1258 1259 /* The WR_REL_SET is a write-once register but shall be 1260 * written before setting PART_SETTING_COMPLETED. As it is 1261 * write-once we can only write it when completing the 1262 * partitioning. */ 1263 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) { 1264 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1265 EXT_CSD_WR_REL_SET, wr_rel_set); 1266 if (err) 1267 return err; 1268 } 1269 1270 /* Setting PART_SETTING_COMPLETED confirms the partition 1271 * configuration but it only becomes effective after power 1272 * cycle, so we do not adjust the partition related settings 1273 * in the mmc struct. */ 1274 1275 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1276 EXT_CSD_PARTITION_SETTING, 1277 EXT_CSD_PARTITION_SETTING_COMPLETED); 1278 if (err) 1279 return err; 1280 1281 return 0; 1282 } 1283 1284 #if !CONFIG_IS_ENABLED(DM_MMC) 1285 int mmc_getcd(struct mmc *mmc) 1286 { 1287 int cd; 1288 1289 cd = board_mmc_getcd(mmc); 1290 1291 if (cd < 0) { 1292 if (mmc->cfg->ops->getcd) 1293 cd = mmc->cfg->ops->getcd(mmc); 1294 else 1295 cd = 1; 1296 } 1297 1298 return cd; 1299 } 1300 #endif 1301 1302 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp) 1303 { 1304 struct mmc_cmd cmd; 1305 struct mmc_data data; 1306 1307 /* Switch the frequency */ 1308 cmd.cmdidx = SD_CMD_SWITCH_FUNC; 1309 cmd.resp_type = MMC_RSP_R1; 1310 cmd.cmdarg = (mode << 31) | 0xffffff; 1311 cmd.cmdarg &= ~(0xf << (group * 4)); 1312 cmd.cmdarg |= value << (group * 4); 1313 1314 data.dest = (char *)resp; 1315 data.blocksize = 64; 1316 data.blocks = 1; 1317 data.flags = MMC_DATA_READ; 1318 1319 return mmc_send_cmd(mmc, &cmd, &data); 1320 } 1321 1322 1323 static int sd_change_freq(struct mmc *mmc) 1324 { 1325 int err; 1326 struct mmc_cmd cmd; 1327 ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2); 1328 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16); 1329 struct mmc_data data; 1330 int timeout; 1331 1332 mmc->card_caps = 0; 1333 1334 if (mmc_host_is_spi(mmc)) 1335 return 0; 1336 1337 /* Read the SCR to find out if this card supports higher speeds */ 1338 cmd.cmdidx = MMC_CMD_APP_CMD; 1339 cmd.resp_type = MMC_RSP_R1; 1340 cmd.cmdarg = mmc->rca << 16; 1341 1342 err = mmc_send_cmd(mmc, &cmd, NULL); 1343 1344 if (err) 1345 return err; 1346 1347 cmd.cmdidx = SD_CMD_APP_SEND_SCR; 1348 cmd.resp_type = MMC_RSP_R1; 1349 cmd.cmdarg = 0; 1350 1351 timeout = 3; 1352 1353 retry_scr: 1354 data.dest = (char *)scr; 1355 data.blocksize = 8; 1356 data.blocks = 1; 1357 data.flags = MMC_DATA_READ; 1358 1359 err = mmc_send_cmd(mmc, &cmd, &data); 1360 1361 if (err) { 1362 if (timeout--) 1363 goto retry_scr; 1364 1365 return err; 1366 } 1367 1368 mmc->scr[0] = __be32_to_cpu(scr[0]); 1369 mmc->scr[1] = __be32_to_cpu(scr[1]); 1370 1371 switch ((mmc->scr[0] >> 24) & 0xf) { 1372 case 0: 1373 mmc->version = SD_VERSION_1_0; 1374 break; 1375 case 1: 1376 mmc->version = SD_VERSION_1_10; 1377 break; 1378 case 2: 1379 mmc->version = SD_VERSION_2; 1380 if ((mmc->scr[0] >> 15) & 0x1) 1381 mmc->version = SD_VERSION_3; 1382 break; 1383 default: 1384 mmc->version = SD_VERSION_1_0; 1385 break; 1386 } 1387 1388 if (mmc->scr[0] & SD_DATA_4BIT) 1389 mmc->card_caps |= MMC_MODE_4BIT; 1390 1391 /* Version 1.0 doesn't support switching */ 1392 if (mmc->version == SD_VERSION_1_0) 1393 return 0; 1394 1395 timeout = 4; 1396 while (timeout--) { 1397 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1, 1398 (u8 *)switch_status); 1399 1400 if (err) 1401 return err; 1402 1403 /* The high-speed function is busy. Try again */ 1404 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY)) 1405 break; 1406 } 1407 1408 /* If high-speed isn't supported, we return */ 1409 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)) 1410 return 0; 1411 1412 /* 1413 * If the host doesn't support SD_HIGHSPEED, do not switch card to 1414 * HIGHSPEED mode even if the card support SD_HIGHSPPED. 1415 * This can avoid furthur problem when the card runs in different 1416 * mode between the host. 1417 */ 1418 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) && 1419 (mmc->cfg->host_caps & MMC_MODE_HS))) 1420 return 0; 1421 1422 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status); 1423 1424 if (err) 1425 return err; 1426 1427 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000) 1428 mmc->card_caps |= MMC_MODE_HS; 1429 1430 return 0; 1431 } 1432 1433 static int sd_read_ssr(struct mmc *mmc) 1434 { 1435 int err, i; 1436 struct mmc_cmd cmd; 1437 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16); 1438 struct mmc_data data; 1439 int timeout = 3; 1440 unsigned int au, eo, et, es; 1441 1442 cmd.cmdidx = MMC_CMD_APP_CMD; 1443 cmd.resp_type = MMC_RSP_R1; 1444 cmd.cmdarg = mmc->rca << 16; 1445 1446 err = mmc_send_cmd(mmc, &cmd, NULL); 1447 if (err) 1448 return err; 1449 1450 cmd.cmdidx = SD_CMD_APP_SD_STATUS; 1451 cmd.resp_type = MMC_RSP_R1; 1452 cmd.cmdarg = 0; 1453 1454 retry_ssr: 1455 data.dest = (char *)ssr; 1456 data.blocksize = 64; 1457 data.blocks = 1; 1458 data.flags = MMC_DATA_READ; 1459 1460 err = mmc_send_cmd(mmc, &cmd, &data); 1461 if (err) { 1462 if (timeout--) 1463 goto retry_ssr; 1464 1465 return err; 1466 } 1467 1468 for (i = 0; i < 16; i++) 1469 ssr[i] = be32_to_cpu(ssr[i]); 1470 1471 au = (ssr[2] >> 12) & 0xF; 1472 if ((au <= 9) || (mmc->version == SD_VERSION_3)) { 1473 mmc->ssr.au = sd_au_size[au]; 1474 es = (ssr[3] >> 24) & 0xFF; 1475 es |= (ssr[2] & 0xFF) << 8; 1476 et = (ssr[3] >> 18) & 0x3F; 1477 if (es && et) { 1478 eo = (ssr[3] >> 16) & 0x3; 1479 mmc->ssr.erase_timeout = (et * 1000) / es; 1480 mmc->ssr.erase_offset = eo * 1000; 1481 } 1482 } else { 1483 debug("Invalid Allocation Unit Size.\n"); 1484 } 1485 1486 return 0; 1487 } 1488 1489 /* frequency bases */ 1490 /* divided by 10 to be nice to platforms without floating point */ 1491 static const int fbase[] = { 1492 10000, 1493 100000, 1494 1000000, 1495 10000000, 1496 }; 1497 1498 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice 1499 * to platforms without floating point. 1500 */ 1501 static const u8 multipliers[] = { 1502 0, /* reserved */ 1503 10, 1504 12, 1505 13, 1506 15, 1507 20, 1508 25, 1509 30, 1510 35, 1511 40, 1512 45, 1513 50, 1514 55, 1515 60, 1516 70, 1517 80, 1518 }; 1519 1520 #if !CONFIG_IS_ENABLED(DM_MMC) 1521 static void mmc_set_ios(struct mmc *mmc) 1522 { 1523 if (mmc->cfg->ops->set_ios) 1524 mmc->cfg->ops->set_ios(mmc); 1525 } 1526 1527 static bool mmc_card_busy(struct mmc *mmc) 1528 { 1529 if (!mmc->cfg->ops->card_busy) 1530 return -ENOSYS; 1531 1532 return mmc->cfg->ops->card_busy(mmc); 1533 } 1534 1535 static bool mmc_can_card_busy(struct mmc *) 1536 { 1537 return !!mmc->cfg->ops->card_busy; 1538 } 1539 #endif 1540 1541 static int mmc_startup(struct mmc *mmc) 1542 { 1543 int err, i; 1544 uint mult, freq, tran_speed; 1545 u64 cmult, csize, capacity; 1546 struct mmc_cmd cmd; 1547 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1548 bool has_parts = false; 1549 bool part_completed; 1550 struct blk_desc *bdesc; 1551 1552 #ifdef CONFIG_MMC_SPI_CRC_ON 1553 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */ 1554 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF; 1555 cmd.resp_type = MMC_RSP_R1; 1556 cmd.cmdarg = 1; 1557 err = mmc_send_cmd(mmc, &cmd, NULL); 1558 1559 if (err) 1560 return err; 1561 } 1562 #endif 1563 1564 /* Put the Card in Identify Mode */ 1565 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID : 1566 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */ 1567 cmd.resp_type = MMC_RSP_R2; 1568 cmd.cmdarg = 0; 1569 1570 err = mmc_send_cmd(mmc, &cmd, NULL); 1571 1572 if (err) 1573 return err; 1574 1575 memcpy(mmc->cid, cmd.response, 16); 1576 1577 /* 1578 * For MMC cards, set the Relative Address. 1579 * For SD cards, get the Relatvie Address. 1580 * This also puts the cards into Standby State 1581 */ 1582 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1583 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR; 1584 cmd.cmdarg = mmc->rca << 16; 1585 cmd.resp_type = MMC_RSP_R6; 1586 1587 err = mmc_send_cmd(mmc, &cmd, NULL); 1588 1589 if (err) 1590 return err; 1591 1592 if (IS_SD(mmc)) 1593 mmc->rca = (cmd.response[0] >> 16) & 0xffff; 1594 } 1595 1596 /* Get the Card-Specific Data */ 1597 cmd.cmdidx = MMC_CMD_SEND_CSD; 1598 cmd.resp_type = MMC_RSP_R2; 1599 cmd.cmdarg = mmc->rca << 16; 1600 1601 err = mmc_send_cmd(mmc, &cmd, NULL); 1602 1603 if (err) 1604 return err; 1605 1606 mmc->csd[0] = cmd.response[0]; 1607 mmc->csd[1] = cmd.response[1]; 1608 mmc->csd[2] = cmd.response[2]; 1609 mmc->csd[3] = cmd.response[3]; 1610 1611 if (mmc->version == MMC_VERSION_UNKNOWN) { 1612 int version = (cmd.response[0] >> 26) & 0xf; 1613 1614 switch (version) { 1615 case 0: 1616 mmc->version = MMC_VERSION_1_2; 1617 break; 1618 case 1: 1619 mmc->version = MMC_VERSION_1_4; 1620 break; 1621 case 2: 1622 mmc->version = MMC_VERSION_2_2; 1623 break; 1624 case 3: 1625 mmc->version = MMC_VERSION_3; 1626 break; 1627 case 4: 1628 mmc->version = MMC_VERSION_4; 1629 break; 1630 default: 1631 mmc->version = MMC_VERSION_1_2; 1632 break; 1633 } 1634 } 1635 1636 /* divide frequency by 10, since the mults are 10x bigger */ 1637 freq = fbase[(cmd.response[0] & 0x7)]; 1638 mult = multipliers[((cmd.response[0] >> 3) & 0xf)]; 1639 1640 tran_speed = freq * mult; 1641 1642 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1); 1643 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf); 1644 1645 if (IS_SD(mmc)) 1646 mmc->write_bl_len = mmc->read_bl_len; 1647 else 1648 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf); 1649 1650 if (mmc->high_capacity) { 1651 csize = (mmc->csd[1] & 0x3f) << 16 1652 | (mmc->csd[2] & 0xffff0000) >> 16; 1653 cmult = 8; 1654 } else { 1655 csize = (mmc->csd[1] & 0x3ff) << 2 1656 | (mmc->csd[2] & 0xc0000000) >> 30; 1657 cmult = (mmc->csd[2] & 0x00038000) >> 15; 1658 } 1659 1660 mmc->capacity_user = (csize + 1) << (cmult + 2); 1661 mmc->capacity_user *= mmc->read_bl_len; 1662 mmc->capacity_boot = 0; 1663 mmc->capacity_rpmb = 0; 1664 for (i = 0; i < 4; i++) 1665 mmc->capacity_gp[i] = 0; 1666 1667 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN) 1668 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1669 1670 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN) 1671 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1672 1673 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) { 1674 cmd.cmdidx = MMC_CMD_SET_DSR; 1675 cmd.cmdarg = (mmc->dsr & 0xffff) << 16; 1676 cmd.resp_type = MMC_RSP_NONE; 1677 if (mmc_send_cmd(mmc, &cmd, NULL)) 1678 printf("MMC: SET_DSR failed\n"); 1679 } 1680 1681 /* Select the card, and put it into Transfer Mode */ 1682 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1683 cmd.cmdidx = MMC_CMD_SELECT_CARD; 1684 cmd.resp_type = MMC_RSP_R1; 1685 cmd.cmdarg = mmc->rca << 16; 1686 err = mmc_send_cmd(mmc, &cmd, NULL); 1687 1688 if (err) 1689 return err; 1690 } 1691 1692 /* 1693 * For SD, its erase group is always one sector 1694 */ 1695 mmc->erase_grp_size = 1; 1696 mmc->part_config = MMCPART_NOAVAILABLE; 1697 if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) { 1698 /* check ext_csd version and capacity */ 1699 err = mmc_send_ext_csd(mmc, ext_csd); 1700 if (err) 1701 return err; 1702 if (ext_csd[EXT_CSD_REV] >= 2) { 1703 /* 1704 * According to the JEDEC Standard, the value of 1705 * ext_csd's capacity is valid if the value is more 1706 * than 2GB 1707 */ 1708 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0 1709 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 1710 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 1711 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 1712 capacity *= MMC_MAX_BLOCK_LEN; 1713 if ((capacity >> 20) > 2 * 1024) 1714 mmc->capacity_user = capacity; 1715 } 1716 1717 switch (ext_csd[EXT_CSD_REV]) { 1718 case 1: 1719 mmc->version = MMC_VERSION_4_1; 1720 break; 1721 case 2: 1722 mmc->version = MMC_VERSION_4_2; 1723 break; 1724 case 3: 1725 mmc->version = MMC_VERSION_4_3; 1726 break; 1727 case 5: 1728 mmc->version = MMC_VERSION_4_41; 1729 break; 1730 case 6: 1731 mmc->version = MMC_VERSION_4_5; 1732 break; 1733 case 7: 1734 mmc->version = MMC_VERSION_5_0; 1735 break; 1736 case 8: 1737 mmc->version = MMC_VERSION_5_1; 1738 break; 1739 } 1740 1741 /* The partition data may be non-zero but it is only 1742 * effective if PARTITION_SETTING_COMPLETED is set in 1743 * EXT_CSD, so ignore any data if this bit is not set, 1744 * except for enabling the high-capacity group size 1745 * definition (see below). */ 1746 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] & 1747 EXT_CSD_PARTITION_SETTING_COMPLETED); 1748 1749 /* store the partition info of emmc */ 1750 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT]; 1751 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) || 1752 ext_csd[EXT_CSD_BOOT_MULT]) 1753 mmc->part_config = ext_csd[EXT_CSD_PART_CONF]; 1754 if (part_completed && 1755 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT)) 1756 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE]; 1757 if (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] & EXT_CSD_SEC_GB_CL_EN) 1758 mmc->esr.mmc_can_trim = 1; 1759 1760 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17; 1761 1762 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17; 1763 1764 for (i = 0; i < 4; i++) { 1765 int idx = EXT_CSD_GP_SIZE_MULT + i * 3; 1766 uint mult = (ext_csd[idx + 2] << 16) + 1767 (ext_csd[idx + 1] << 8) + ext_csd[idx]; 1768 if (mult) 1769 has_parts = true; 1770 if (!part_completed) 1771 continue; 1772 mmc->capacity_gp[i] = mult; 1773 mmc->capacity_gp[i] *= 1774 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1775 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1776 mmc->capacity_gp[i] <<= 19; 1777 } 1778 1779 if (part_completed) { 1780 mmc->enh_user_size = 1781 (ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) + 1782 (ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) + 1783 ext_csd[EXT_CSD_ENH_SIZE_MULT]; 1784 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1785 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1786 mmc->enh_user_size <<= 19; 1787 mmc->enh_user_start = 1788 (ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) + 1789 (ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) + 1790 (ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) + 1791 ext_csd[EXT_CSD_ENH_START_ADDR]; 1792 if (mmc->high_capacity) 1793 mmc->enh_user_start <<= 9; 1794 } 1795 1796 /* 1797 * Host needs to enable ERASE_GRP_DEF bit if device is 1798 * partitioned. This bit will be lost every time after a reset 1799 * or power off. This will affect erase size. 1800 */ 1801 if (part_completed) 1802 has_parts = true; 1803 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) && 1804 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB)) 1805 has_parts = true; 1806 if (has_parts) { 1807 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1808 EXT_CSD_ERASE_GROUP_DEF, 1); 1809 1810 if (err) 1811 return err; 1812 else 1813 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1814 } 1815 1816 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) { 1817 /* Read out group size from ext_csd */ 1818 mmc->erase_grp_size = 1819 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1820 /* 1821 * if high capacity and partition setting completed 1822 * SEC_COUNT is valid even if it is smaller than 2 GiB 1823 * JEDEC Standard JESD84-B45, 6.2.4 1824 */ 1825 if (mmc->high_capacity && part_completed) { 1826 capacity = (ext_csd[EXT_CSD_SEC_CNT]) | 1827 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) | 1828 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) | 1829 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24); 1830 capacity *= MMC_MAX_BLOCK_LEN; 1831 mmc->capacity_user = capacity; 1832 } 1833 } else { 1834 /* Calculate the group size from the csd value. */ 1835 int erase_gsz, erase_gmul; 1836 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10; 1837 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5; 1838 mmc->erase_grp_size = (erase_gsz + 1) 1839 * (erase_gmul + 1); 1840 } 1841 1842 mmc->hc_wp_grp_size = 1024 1843 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1844 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1845 1846 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1847 } 1848 1849 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart); 1850 if (err) 1851 return err; 1852 1853 if (IS_SD(mmc)) 1854 err = sd_change_freq(mmc); 1855 else 1856 err = mmc_change_freq(mmc); 1857 1858 if (err) 1859 return err; 1860 1861 /* Restrict card's capabilities by what the host can do */ 1862 mmc->card_caps &= mmc->cfg->host_caps; 1863 1864 if (IS_SD(mmc)) { 1865 if (mmc->card_caps & MMC_MODE_4BIT) { 1866 cmd.cmdidx = MMC_CMD_APP_CMD; 1867 cmd.resp_type = MMC_RSP_R1; 1868 cmd.cmdarg = mmc->rca << 16; 1869 1870 err = mmc_send_cmd(mmc, &cmd, NULL); 1871 if (err) 1872 return err; 1873 1874 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH; 1875 cmd.resp_type = MMC_RSP_R1; 1876 cmd.cmdarg = 2; 1877 err = mmc_send_cmd(mmc, &cmd, NULL); 1878 if (err) 1879 return err; 1880 1881 mmc_set_bus_width(mmc, 4); 1882 } 1883 1884 err = sd_read_ssr(mmc); 1885 if (err) 1886 return err; 1887 1888 if (mmc->card_caps & MMC_MODE_HS) 1889 tran_speed = 50000000; 1890 else 1891 tran_speed = 25000000; 1892 1893 mmc_set_clock(mmc, tran_speed); 1894 } 1895 1896 /* Fix the block length for DDR mode */ 1897 if (mmc_card_ddr(mmc)) { 1898 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1899 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1900 } 1901 1902 /* fill in device description */ 1903 bdesc = mmc_get_blk_desc(mmc); 1904 bdesc->lun = 0; 1905 bdesc->hwpart = 0; 1906 bdesc->type = 0; 1907 bdesc->blksz = mmc->read_bl_len; 1908 bdesc->log2blksz = LOG2(bdesc->blksz); 1909 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1910 #if !defined(CONFIG_SPL_BUILD) || \ 1911 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \ 1912 !defined(CONFIG_USE_TINY_PRINTF)) 1913 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x", 1914 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), 1915 (mmc->cid[3] >> 16) & 0xffff); 1916 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, 1917 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, 1918 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, 1919 (mmc->cid[2] >> 24) & 0xff); 1920 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, 1921 (mmc->cid[2] >> 16) & 0xf); 1922 #else 1923 bdesc->vendor[0] = 0; 1924 bdesc->product[0] = 0; 1925 bdesc->revision[0] = 0; 1926 #endif 1927 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT) 1928 part_init(bdesc); 1929 #endif 1930 1931 return 0; 1932 } 1933 1934 static int mmc_send_if_cond(struct mmc *mmc) 1935 { 1936 struct mmc_cmd cmd; 1937 int err; 1938 1939 cmd.cmdidx = SD_CMD_SEND_IF_COND; 1940 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */ 1941 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa; 1942 cmd.resp_type = MMC_RSP_R7; 1943 1944 err = mmc_send_cmd(mmc, &cmd, NULL); 1945 1946 if (err) 1947 return err; 1948 1949 if ((cmd.response[0] & 0xff) != 0xaa) 1950 return -EOPNOTSUPP; 1951 else 1952 mmc->version = SD_VERSION_2; 1953 1954 return 0; 1955 } 1956 1957 #if !CONFIG_IS_ENABLED(DM_MMC) 1958 /* board-specific MMC power initializations. */ 1959 __weak void board_mmc_power_init(void) 1960 { 1961 } 1962 #endif 1963 1964 static int mmc_power_init(struct mmc *mmc) 1965 { 1966 #if CONFIG_IS_ENABLED(DM_MMC) 1967 #if defined(CONFIG_DM_REGULATOR) && !defined(CONFIG_SPL_BUILD) 1968 struct udevice *vmmc_supply; 1969 int ret; 1970 1971 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply", 1972 &vmmc_supply); 1973 if (ret) { 1974 debug("%s: No vmmc supply\n", mmc->dev->name); 1975 return 0; 1976 } 1977 1978 ret = regulator_set_enable(vmmc_supply, true); 1979 if (ret) { 1980 puts("Error enabling VMMC supply\n"); 1981 return ret; 1982 } 1983 #endif 1984 #else /* !CONFIG_DM_MMC */ 1985 /* 1986 * Driver model should use a regulator, as above, rather than calling 1987 * out to board code. 1988 */ 1989 board_mmc_power_init(); 1990 #endif 1991 return 0; 1992 } 1993 1994 int mmc_start_init(struct mmc *mmc) 1995 { 1996 bool no_card; 1997 int err; 1998 1999 /* we pretend there's no card when init is NULL */ 2000 no_card = mmc_getcd(mmc) == 0; 2001 #if !CONFIG_IS_ENABLED(DM_MMC) 2002 no_card = no_card || (mmc->cfg->ops->init == NULL); 2003 #endif 2004 if (no_card) { 2005 mmc->has_init = 0; 2006 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2007 printf("MMC: no card present\n"); 2008 #endif 2009 return -ENOMEDIUM; 2010 } 2011 2012 if (mmc->has_init) 2013 return 0; 2014 2015 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 2016 mmc_adapter_card_type_ident(); 2017 #endif 2018 err = mmc_power_init(mmc); 2019 if (err) 2020 return err; 2021 2022 #if CONFIG_IS_ENABLED(DM_MMC) 2023 /* The device has already been probed ready for use */ 2024 #else 2025 /* made sure it's not NULL earlier */ 2026 err = mmc->cfg->ops->init(mmc); 2027 if (err) 2028 return err; 2029 #endif 2030 mmc_set_bus_width(mmc, 1); 2031 mmc_set_clock(mmc, 1); 2032 mmc_set_timing(mmc, MMC_TIMING_LEGACY); 2033 2034 /* Reset the Card */ 2035 err = mmc_go_idle(mmc); 2036 2037 if (err) 2038 return err; 2039 2040 /* The internal partition reset to user partition(0) at every CMD0*/ 2041 mmc_get_blk_desc(mmc)->hwpart = 0; 2042 2043 /* Test for SD version 2 */ 2044 err = mmc_send_if_cond(mmc); 2045 2046 /* Now try to get the SD card's operating condition */ 2047 err = sd_send_op_cond(mmc); 2048 2049 /* If the command timed out, we check for an MMC card */ 2050 if (err == -ETIMEDOUT) { 2051 err = mmc_send_op_cond(mmc); 2052 2053 if (err) { 2054 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2055 printf("Card did not respond to voltage select!\n"); 2056 #endif 2057 return -EOPNOTSUPP; 2058 } 2059 } 2060 2061 if (!err) 2062 mmc->init_in_progress = 1; 2063 2064 return err; 2065 } 2066 2067 static int mmc_complete_init(struct mmc *mmc) 2068 { 2069 int err = 0; 2070 2071 mmc->init_in_progress = 0; 2072 if (mmc->op_cond_pending) 2073 err = mmc_complete_op_cond(mmc); 2074 2075 if (!err) 2076 err = mmc_startup(mmc); 2077 if (err) 2078 mmc->has_init = 0; 2079 else 2080 mmc->has_init = 1; 2081 return err; 2082 } 2083 2084 int mmc_init(struct mmc *mmc) 2085 { 2086 int err = 0; 2087 __maybe_unused unsigned start; 2088 #if CONFIG_IS_ENABLED(DM_MMC) 2089 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev); 2090 2091 upriv->mmc = mmc; 2092 #endif 2093 if (mmc->has_init) 2094 return 0; 2095 2096 start = get_timer(0); 2097 2098 if (!mmc->init_in_progress) 2099 err = mmc_start_init(mmc); 2100 2101 if (!err) 2102 err = mmc_complete_init(mmc); 2103 if (err) 2104 printf("%s: %d, time %lu\n", __func__, err, get_timer(start)); 2105 2106 return err; 2107 } 2108 2109 int mmc_set_dsr(struct mmc *mmc, u16 val) 2110 { 2111 mmc->dsr = val; 2112 return 0; 2113 } 2114 2115 /* CPU-specific MMC initializations */ 2116 __weak int cpu_mmc_init(bd_t *bis) 2117 { 2118 return -1; 2119 } 2120 2121 /* board-specific MMC initializations. */ 2122 __weak int board_mmc_init(bd_t *bis) 2123 { 2124 return -1; 2125 } 2126 2127 void mmc_set_preinit(struct mmc *mmc, int preinit) 2128 { 2129 mmc->preinit = preinit; 2130 } 2131 2132 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD) 2133 static int mmc_probe(bd_t *bis) 2134 { 2135 return 0; 2136 } 2137 #elif CONFIG_IS_ENABLED(DM_MMC) 2138 static int mmc_probe(bd_t *bis) 2139 { 2140 int ret, i; 2141 struct uclass *uc; 2142 struct udevice *dev; 2143 2144 ret = uclass_get(UCLASS_MMC, &uc); 2145 if (ret) 2146 return ret; 2147 2148 /* 2149 * Try to add them in sequence order. Really with driver model we 2150 * should allow holes, but the current MMC list does not allow that. 2151 * So if we request 0, 1, 3 we will get 0, 1, 2. 2152 */ 2153 for (i = 0; ; i++) { 2154 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev); 2155 if (ret == -ENODEV) 2156 break; 2157 } 2158 uclass_foreach_dev(dev, uc) { 2159 ret = device_probe(dev); 2160 if (ret) 2161 printf("%s - probe failed: %d\n", dev->name, ret); 2162 } 2163 2164 return 0; 2165 } 2166 #else 2167 static int mmc_probe(bd_t *bis) 2168 { 2169 if (board_mmc_init(bis) < 0) 2170 cpu_mmc_init(bis); 2171 2172 return 0; 2173 } 2174 #endif 2175 2176 int mmc_initialize(bd_t *bis) 2177 { 2178 static int initialized = 0; 2179 int ret; 2180 if (initialized) /* Avoid initializing mmc multiple times */ 2181 return 0; 2182 initialized = 1; 2183 2184 #if !CONFIG_IS_ENABLED(BLK) 2185 #if !CONFIG_IS_ENABLED(MMC_TINY) 2186 mmc_list_init(); 2187 #endif 2188 #endif 2189 ret = mmc_probe(bis); 2190 if (ret) 2191 return ret; 2192 2193 #ifndef CONFIG_SPL_BUILD 2194 print_mmc_devices(','); 2195 #endif 2196 2197 mmc_do_preinit(); 2198 return 0; 2199 } 2200 2201 #ifdef CONFIG_CMD_BKOPS_ENABLE 2202 int mmc_set_bkops_enable(struct mmc *mmc) 2203 { 2204 int err; 2205 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 2206 2207 err = mmc_send_ext_csd(mmc, ext_csd); 2208 if (err) { 2209 puts("Could not get ext_csd register values\n"); 2210 return err; 2211 } 2212 2213 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) { 2214 puts("Background operations not supported on device\n"); 2215 return -EMEDIUMTYPE; 2216 } 2217 2218 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) { 2219 puts("Background operations already enabled\n"); 2220 return 0; 2221 } 2222 2223 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); 2224 if (err) { 2225 puts("Failed to enable manual background operations\n"); 2226 return err; 2227 } 2228 2229 puts("Enabled manual background operations\n"); 2230 2231 return 0; 2232 } 2233 #endif 2234