1 // SPDX-License-Identifier: GPL-2.0+ 2 /** 3 * ufs.c - Universal Flash Subsystem (UFS) driver 4 * 5 * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported 6 * to u-boot. 7 * 8 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com 9 */ 10 #include <charset.h> 11 #include <common.h> 12 #include <dm.h> 13 #include <log.h> 14 #include <dm/lists.h> 15 #include <dm/device-internal.h> 16 #include <malloc.h> 17 #include <hexdump.h> 18 #include <scsi.h> 19 #include <asm/io.h> 20 #include <asm/dma-mapping.h> 21 #include <linux/bitops.h> 22 #include <linux/delay.h> 23 24 #if defined(CONFIG_SUPPORT_USBPLUG) 25 #include "ufs-rockchip-usbplug.h" 26 #endif 27 28 #include "ufs.h" 29 30 #if defined(CONFIG_ROCKCHIP_UFS_RPMB) 31 #include "ufs-rockchip-rpmb.h" 32 #endif 33 34 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ 35 UTP_TASK_REQ_COMPL |\ 36 UFSHCD_ERROR_MASK) 37 /* maximum number of link-startup retries */ 38 #define DME_LINKSTARTUP_RETRIES 3 39 40 /* maximum number of retries for a general UIC command */ 41 #define UFS_UIC_COMMAND_RETRIES 3 42 43 /* Query request retries */ 44 #define QUERY_REQ_RETRIES 3 45 /* Query request timeout */ 46 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ 47 48 /* maximum timeout in ms for a general UIC command */ 49 #define UFS_UIC_CMD_TIMEOUT 1000 50 /* NOP OUT retries waiting for NOP IN response */ 51 /* Polling time to wait for fDeviceInit */ 52 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */ 53 54 #define NOP_OUT_RETRIES 10 55 /* Timeout after 30 msecs if NOP OUT hangs without response */ 56 #define NOP_OUT_TIMEOUT 30 /* msecs */ 57 58 /* Only use one Task Tag for all requests */ 59 #define TASK_TAG 0 60 61 /* Expose the flag value from utp_upiu_query.value */ 62 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF 63 64 #define MAX_PRDT_ENTRY 262144 65 66 /* maximum bytes per request */ 67 #define UFS_MAX_BYTES (128 * 256 * 1024) 68 69 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba); 70 static inline void ufshcd_hba_stop(struct ufs_hba *hba); 71 static int ufshcd_hba_enable(struct ufs_hba *hba); 72 73 /* 74 * ufshcd_wait_for_register - wait for register value to change 75 */ 76 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, 77 u32 val, unsigned long timeout_ms) 78 { 79 int err = 0; 80 unsigned long start = get_timer(0); 81 82 /* ignore bits that we don't intend to wait on */ 83 val = val & mask; 84 85 while ((ufshcd_readl(hba, reg) & mask) != val) { 86 if (get_timer(start) > timeout_ms) { 87 if ((ufshcd_readl(hba, reg) & mask) != val) 88 err = -ETIMEDOUT; 89 break; 90 } 91 } 92 93 return err; 94 } 95 96 /** 97 * ufshcd_init_pwr_info - setting the POR (power on reset) 98 * values in hba power info 99 */ 100 static void ufshcd_init_pwr_info(struct ufs_hba *hba) 101 { 102 hba->pwr_info.gear_rx = UFS_PWM_G1; 103 hba->pwr_info.gear_tx = UFS_PWM_G1; 104 hba->pwr_info.lane_rx = 1; 105 hba->pwr_info.lane_tx = 1; 106 hba->pwr_info.pwr_rx = SLOWAUTO_MODE; 107 hba->pwr_info.pwr_tx = SLOWAUTO_MODE; 108 hba->pwr_info.hs_rate = 0; 109 } 110 111 /** 112 * ufshcd_print_pwr_info - print power params as saved in hba 113 * power info 114 */ 115 static void ufshcd_print_pwr_info(struct ufs_hba *hba) 116 { 117 static const char * const names[] = { 118 "INVALID MODE", 119 "FAST MODE", 120 "SLOW_MODE", 121 "INVALID MODE", 122 "FASTAUTO_MODE", 123 "SLOWAUTO_MODE", 124 "INVALID MODE", 125 }; 126 127 dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", 128 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, 129 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, 130 names[hba->pwr_info.pwr_rx], 131 names[hba->pwr_info.pwr_tx], 132 hba->pwr_info.hs_rate); 133 } 134 135 /** 136 * ufshcd_ready_for_uic_cmd - Check if controller is ready 137 * to accept UIC commands 138 */ 139 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) 140 { 141 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) 142 return true; 143 else 144 return false; 145 } 146 147 /** 148 * ufshcd_get_uic_cmd_result - Get the UIC command result 149 */ 150 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) 151 { 152 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & 153 MASK_UIC_COMMAND_RESULT; 154 } 155 156 /** 157 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command 158 */ 159 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) 160 { 161 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); 162 } 163 164 /** 165 * ufshcd_is_device_present - Check if any device connected to 166 * the host controller 167 */ 168 static inline bool ufshcd_is_device_present(struct ufs_hba *hba) 169 { 170 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & 171 DEVICE_PRESENT) ? true : false; 172 } 173 174 /** 175 * ufshcd_send_uic_cmd - UFS Interconnect layer command API 176 * 177 */ 178 static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 179 { 180 unsigned long start = 0; 181 u32 intr_status; 182 u32 enabled_intr_status; 183 184 if (!ufshcd_ready_for_uic_cmd(hba)) { 185 dev_err(hba->dev, 186 "Controller not ready to accept UIC commands\n"); 187 return -EIO; 188 } 189 190 debug("sending uic command:%d\n", uic_cmd->command); 191 192 /* Write Args */ 193 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); 194 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); 195 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); 196 197 /* Write UIC Cmd */ 198 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, 199 REG_UIC_COMMAND); 200 201 start = get_timer(0); 202 do { 203 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 204 enabled_intr_status = intr_status & hba->intr_mask; 205 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); 206 207 if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { 208 dev_err(hba->dev, 209 "Timedout waiting for UIC response\n"); 210 211 return -ETIMEDOUT; 212 } 213 214 if (enabled_intr_status & UFSHCD_ERROR_MASK) { 215 dev_err(hba->dev, "Error in status:%08x\n", 216 enabled_intr_status); 217 218 return -1; 219 } 220 } while (!(enabled_intr_status & UFSHCD_UIC_MASK)); 221 222 uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba); 223 uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba); 224 225 debug("Sent successfully\n"); 226 227 return 0; 228 } 229 230 /** 231 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET 232 * 233 */ 234 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, 235 u32 mib_val, u8 peer) 236 { 237 struct uic_command uic_cmd = {0}; 238 static const char *const action[] = { 239 "dme-set", 240 "dme-peer-set" 241 }; 242 const char *set = action[!!peer]; 243 int ret; 244 int retries = UFS_UIC_COMMAND_RETRIES; 245 246 uic_cmd.command = peer ? 247 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; 248 uic_cmd.argument1 = attr_sel; 249 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); 250 uic_cmd.argument3 = mib_val; 251 252 do { 253 /* for peer attributes we retry upon failure */ 254 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 255 if (ret) 256 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", 257 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); 258 } while (ret && peer && --retries); 259 260 if (ret) 261 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", 262 set, UIC_GET_ATTR_ID(attr_sel), mib_val, 263 UFS_UIC_COMMAND_RETRIES - retries); 264 265 return ret; 266 } 267 268 /** 269 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET 270 * 271 */ 272 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, 273 u32 *mib_val, u8 peer) 274 { 275 struct uic_command uic_cmd = {0}; 276 static const char *const action[] = { 277 "dme-get", 278 "dme-peer-get" 279 }; 280 const char *get = action[!!peer]; 281 int ret; 282 int retries = UFS_UIC_COMMAND_RETRIES; 283 284 uic_cmd.command = peer ? 285 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; 286 uic_cmd.argument1 = attr_sel; 287 288 do { 289 /* for peer attributes we retry upon failure */ 290 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 291 if (ret) 292 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", 293 get, UIC_GET_ATTR_ID(attr_sel), ret); 294 } while (ret && peer && --retries); 295 296 if (ret) 297 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", 298 get, UIC_GET_ATTR_ID(attr_sel), 299 UFS_UIC_COMMAND_RETRIES - retries); 300 301 if (mib_val && !ret) 302 *mib_val = uic_cmd.argument3; 303 304 return ret; 305 } 306 307 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) 308 { 309 u32 tx_lanes, i, err = 0; 310 311 if (!peer) 312 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 313 &tx_lanes); 314 else 315 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 316 &tx_lanes); 317 for (i = 0; i < tx_lanes; i++) { 318 if (!peer) 319 err = ufshcd_dme_set(hba, 320 UIC_ARG_MIB_SEL(TX_LCC_ENABLE, 321 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), 322 0); 323 else 324 err = ufshcd_dme_peer_set(hba, 325 UIC_ARG_MIB_SEL(TX_LCC_ENABLE, 326 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), 327 0); 328 if (err) { 329 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d\n", 330 __func__, peer, i, err); 331 break; 332 } 333 } 334 335 return err; 336 } 337 338 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) 339 { 340 return ufshcd_disable_tx_lcc(hba, true); 341 } 342 343 /** 344 * ufshcd_dme_link_startup - Notify Unipro to perform link startup 345 * 346 */ 347 static int ufshcd_dme_link_startup(struct ufs_hba *hba) 348 { 349 struct uic_command uic_cmd = {0}; 350 int ret; 351 352 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; 353 354 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 355 if (ret) 356 dev_dbg(hba->dev, 357 "dme-link-startup: error code %d\n", ret); 358 return ret; 359 } 360 361 int ufshcd_dme_enable(struct ufs_hba *hba) 362 { 363 struct uic_command uic_cmd = {0}; 364 int ret; 365 366 uic_cmd.command = UIC_CMD_DME_ENABLE; 367 368 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 369 if (ret) 370 dev_err(hba->dev, 371 "dme-enable: error code %d\n", ret); 372 return ret; 373 } 374 375 int ufshcd_dme_reset(struct ufs_hba *hba) 376 { 377 struct uic_command uic_cmd = {0}; 378 int ret; 379 380 uic_cmd.command = UIC_CMD_DME_RESET; 381 382 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 383 if (ret) 384 dev_err(hba->dev, 385 "dme-reset: error code %d\n", ret); 386 return ret; 387 } 388 389 /** 390 * ufshcd_disable_intr_aggr - Disables interrupt aggregation. 391 * 392 */ 393 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) 394 { 395 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 396 } 397 398 /** 399 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY 400 */ 401 static inline int ufshcd_get_lists_status(u32 reg) 402 { 403 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); 404 } 405 406 /** 407 * ufshcd_enable_run_stop_reg - Enable run-stop registers, 408 * When run-stop registers are set to 1, it indicates the 409 * host controller that it can process the requests 410 */ 411 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) 412 { 413 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, 414 REG_UTP_TASK_REQ_LIST_RUN_STOP); 415 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, 416 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); 417 } 418 419 /** 420 * ufshcd_enable_intr - enable interrupts 421 */ 422 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) 423 { 424 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); 425 u32 rw; 426 427 if (hba->version == UFSHCI_VERSION_10) { 428 rw = set & INTERRUPT_MASK_RW_VER_10; 429 set = rw | ((set ^ intrs) & intrs); 430 } else { 431 set |= intrs; 432 } 433 434 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); 435 436 hba->intr_mask = set; 437 } 438 439 /** 440 * ufshcd_make_hba_operational - Make UFS controller operational 441 * 442 * To bring UFS host controller to operational state, 443 * 1. Enable required interrupts 444 * 2. Configure interrupt aggregation 445 * 3. Program UTRL and UTMRL base address 446 * 4. Configure run-stop-registers 447 * 448 */ 449 static int ufshcd_make_hba_operational(struct ufs_hba *hba) 450 { 451 int err = 0; 452 u32 reg; 453 454 /* Enable required interrupts */ 455 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); 456 457 /* Disable interrupt aggregation */ 458 ufshcd_disable_intr_aggr(hba); 459 460 /* Configure UTRL and UTMRL base address registers */ 461 ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl), 462 REG_UTP_TRANSFER_REQ_LIST_BASE_L); 463 ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl), 464 REG_UTP_TRANSFER_REQ_LIST_BASE_H); 465 ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl), 466 REG_UTP_TASK_REQ_LIST_BASE_L); 467 ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl), 468 REG_UTP_TASK_REQ_LIST_BASE_H); 469 470 /* 471 * UCRDY, UTMRLDY and UTRLRDY bits must be 1 472 */ 473 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); 474 if (!(ufshcd_get_lists_status(reg))) { 475 ufshcd_enable_run_stop_reg(hba); 476 } else { 477 dev_err(hba->dev, 478 "Host controller not ready to process requests\n"); 479 err = -EIO; 480 goto out; 481 } 482 483 out: 484 return err; 485 } 486 487 /** 488 * ufshcd_link_startup - Initialize unipro link startup 489 */ 490 static int ufshcd_link_startup(struct ufs_hba *hba) 491 { 492 int ret; 493 int retries = DME_LINKSTARTUP_RETRIES; 494 bool link_startup_again = true; 495 496 link_startup: 497 do { 498 ufshcd_ops_link_startup_notify(hba, PRE_CHANGE); 499 500 ret = ufshcd_dme_link_startup(hba); 501 502 /* check if device is detected by inter-connect layer */ 503 if (!ret && !ufshcd_is_device_present(hba)) { 504 dev_err(hba->dev, "%s: Device not present\n", __func__); 505 ret = -ENXIO; 506 goto out; 507 } 508 509 /* 510 * DME link lost indication is only received when link is up, 511 * but we can't be sure if the link is up until link startup 512 * succeeds. So reset the local Uni-Pro and try again. 513 */ 514 if (ret && ufshcd_hba_enable(hba)) 515 goto out; 516 } while (ret && retries--); 517 518 if (ret) 519 /* failed to get the link up... retire */ 520 goto out; 521 522 if (link_startup_again) { 523 link_startup_again = false; 524 retries = DME_LINKSTARTUP_RETRIES; 525 goto link_startup; 526 } 527 528 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ 529 ufshcd_init_pwr_info(hba); 530 531 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { 532 ret = ufshcd_disable_device_tx_lcc(hba); 533 if (ret) 534 goto out; 535 } 536 537 /* Include any host controller configuration via UIC commands */ 538 ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE); 539 if (ret) 540 goto out; 541 542 ret = ufshcd_make_hba_operational(hba); 543 out: 544 if (ret) 545 dev_err(hba->dev, "link startup failed %d\n", ret); 546 547 return ret; 548 } 549 550 /** 551 * ufshcd_hba_stop - Send controller to reset state 552 */ 553 static inline void ufshcd_hba_stop(struct ufs_hba *hba) 554 { 555 int err; 556 557 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); 558 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, 559 CONTROLLER_ENABLE, CONTROLLER_DISABLE, 560 10); 561 if (err) 562 dev_err(hba->dev, "%s: Controller disable failed\n", __func__); 563 } 564 565 /** 566 * ufshcd_is_hba_active - Get controller state 567 */ 568 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) 569 { 570 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE) 571 ? false : true; 572 } 573 574 /** 575 * ufshcd_hba_start - Start controller initialization sequence 576 */ 577 static inline void ufshcd_hba_start(struct ufs_hba *hba) 578 { 579 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); 580 } 581 582 /** 583 * ufshcd_hba_enable - initialize the controller 584 */ 585 static int ufshcd_hba_enable(struct ufs_hba *hba) 586 { 587 int retry; 588 589 if (!ufshcd_is_hba_active(hba)) 590 /* change controller state to "reset state" */ 591 ufshcd_hba_stop(hba); 592 593 ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE); 594 595 /* start controller initialization sequence */ 596 ufshcd_hba_start(hba); 597 598 /* 599 * To initialize a UFS host controller HCE bit must be set to 1. 600 * During initialization the HCE bit value changes from 1->0->1. 601 * When the host controller completes initialization sequence 602 * it sets the value of HCE bit to 1. The same HCE bit is read back 603 * to check if the controller has completed initialization sequence. 604 * So without this delay the value HCE = 1, set in the previous 605 * instruction might be read back. 606 * This delay can be changed based on the controller. 607 */ 608 mdelay(1); 609 610 /* wait for the host controller to complete initialization */ 611 retry = 10; 612 while (ufshcd_is_hba_active(hba)) { 613 if (retry) { 614 retry--; 615 } else { 616 dev_err(hba->dev, "Controller enable failed\n"); 617 return -EIO; 618 } 619 mdelay(5); 620 } 621 622 /* enable UIC related interrupts */ 623 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); 624 625 if (ufshcd_ops_hce_enable_notify(hba, POST_CHANGE)) 626 return -EIO; 627 628 return 0; 629 } 630 631 /** 632 * ufshcd_host_memory_configure - configure local reference block with 633 * memory offsets 634 */ 635 static void ufshcd_host_memory_configure(struct ufs_hba *hba) 636 { 637 struct utp_transfer_req_desc *utrdlp; 638 dma_addr_t cmd_desc_dma_addr; 639 u16 response_offset; 640 u16 prdt_offset; 641 642 utrdlp = hba->utrdl; 643 cmd_desc_dma_addr = (dma_addr_t)hba->ucdl; 644 645 utrdlp->command_desc_base_addr_lo = 646 cpu_to_le32(lower_32_bits(cmd_desc_dma_addr)); 647 utrdlp->command_desc_base_addr_hi = 648 cpu_to_le32(upper_32_bits(cmd_desc_dma_addr)); 649 650 response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu); 651 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); 652 653 utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2); 654 utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2); 655 utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); 656 657 hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl; 658 hba->ucd_rsp_ptr = 659 (struct utp_upiu_rsp *)&hba->ucdl->response_upiu; 660 hba->ucd_prdt_ptr = 661 (struct ufshcd_sg_entry *)&hba->ucdl->prd_table; 662 } 663 664 /** 665 * ufshcd_memory_alloc - allocate memory for host memory space data structures 666 */ 667 static int ufshcd_memory_alloc(struct ufs_hba *hba) 668 { 669 /* Allocate one Transfer Request Descriptor 670 * Should be aligned to 1k boundary. 671 */ 672 hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc)); 673 if (!hba->utrdl) { 674 dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n"); 675 return -ENOMEM; 676 } 677 678 /* Allocate one Command Descriptor 679 * Should be aligned to 1k boundary. 680 */ 681 hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc)); 682 if (!hba->ucdl) { 683 dev_err(hba->dev, "Command descriptor memory allocation failed\n"); 684 return -ENOMEM; 685 } 686 687 hba->dev_desc = memalign(ARCH_DMA_MINALIGN, sizeof(struct ufs_device_descriptor)); 688 if (!hba->dev_desc) { 689 dev_err(hba->dev, "memory allocation failed\n"); 690 return -ENOMEM; 691 } 692 693 #if defined(CONFIG_SUPPORT_USBPLUG) 694 hba->rc_desc = memalign(ARCH_DMA_MINALIGN, sizeof(struct ufs_configuration_descriptor)); 695 hba->wc_desc = memalign(ARCH_DMA_MINALIGN, sizeof(struct ufs_configuration_descriptor)); 696 hba->geo_desc = memalign(ARCH_DMA_MINALIGN, sizeof(struct ufs_geometry_descriptor)); 697 if (!hba->rc_desc || !hba->wc_desc || !hba->geo_desc) { 698 dev_err(hba->dev, "memory allocation failed\n"); 699 return -ENOMEM; 700 } 701 #endif 702 return 0; 703 } 704 705 /** 706 * ufshcd_get_intr_mask - Get the interrupt bit mask 707 */ 708 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) 709 { 710 u32 intr_mask = 0; 711 712 switch (hba->version) { 713 case UFSHCI_VERSION_10: 714 intr_mask = INTERRUPT_MASK_ALL_VER_10; 715 break; 716 case UFSHCI_VERSION_11: 717 case UFSHCI_VERSION_20: 718 intr_mask = INTERRUPT_MASK_ALL_VER_11; 719 break; 720 case UFSHCI_VERSION_21: 721 default: 722 intr_mask = INTERRUPT_MASK_ALL_VER_21; 723 break; 724 } 725 726 return intr_mask; 727 } 728 729 /** 730 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA 731 */ 732 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) 733 { 734 return ufshcd_readl(hba, REG_UFS_VERSION); 735 } 736 737 /** 738 * ufshcd_get_upmcrs - Get the power mode change request status 739 */ 740 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) 741 { 742 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; 743 } 744 745 /** 746 * ufshcd_cache_flush_and_invalidate - Flush and invalidate cache 747 * 748 * Flush and invalidate cache in aligned address..address+size range. 749 * The invalidation is in place to avoid stale data in cache. 750 */ 751 static void ufshcd_cache_flush_and_invalidate(void *addr, unsigned long size) 752 { 753 uintptr_t aaddr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1); 754 unsigned long asize = ALIGN(size, ARCH_DMA_MINALIGN); 755 756 flush_dcache_range(aaddr, aaddr + asize); 757 invalidate_dcache_range(aaddr, aaddr + asize); 758 } 759 760 /** 761 * ufshcd_prepare_req_desc_hdr() - Fills the requests header 762 * descriptor according to request 763 */ 764 static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc *req_desc, 765 u32 *upiu_flags, 766 enum dma_data_direction cmd_dir) 767 { 768 u32 data_direction; 769 u32 dword_0; 770 771 if (cmd_dir == DMA_FROM_DEVICE) { 772 data_direction = UTP_DEVICE_TO_HOST; 773 *upiu_flags = UPIU_CMD_FLAGS_READ; 774 } else if (cmd_dir == DMA_TO_DEVICE) { 775 data_direction = UTP_HOST_TO_DEVICE; 776 *upiu_flags = UPIU_CMD_FLAGS_WRITE; 777 } else { 778 data_direction = UTP_NO_DATA_TRANSFER; 779 *upiu_flags = UPIU_CMD_FLAGS_NONE; 780 } 781 782 dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET); 783 784 /* Enable Interrupt for command */ 785 dword_0 |= UTP_REQ_DESC_INT_CMD; 786 787 /* Transfer request descriptor header fields */ 788 req_desc->header.dword_0 = cpu_to_le32(dword_0); 789 /* dword_1 is reserved, hence it is set to 0 */ 790 req_desc->header.dword_1 = 0; 791 /* 792 * assigning invalid value for command status. Controller 793 * updates OCS on command completion, with the command 794 * status 795 */ 796 req_desc->header.dword_2 = 797 cpu_to_le32(OCS_INVALID_COMMAND_STATUS); 798 /* dword_3 is reserved, hence it is set to 0 */ 799 req_desc->header.dword_3 = 0; 800 801 req_desc->prd_table_length = 0; 802 803 ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc)); 804 } 805 806 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, 807 u32 upiu_flags) 808 { 809 struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; 810 struct ufs_query *query = &hba->dev_cmd.query; 811 u16 len = be16_to_cpu(query->request.upiu_req.length); 812 813 /* Query request header */ 814 ucd_req_ptr->header.dword_0 = 815 UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ, 816 upiu_flags, 0, TASK_TAG); 817 ucd_req_ptr->header.dword_1 = 818 UPIU_HEADER_DWORD(0, query->request.query_func, 819 0, 0); 820 821 /* Data segment length only need for WRITE_DESC */ 822 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) 823 ucd_req_ptr->header.dword_2 = 824 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len); 825 else 826 ucd_req_ptr->header.dword_2 = 0; 827 828 /* Copy the Query Request buffer as is */ 829 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE); 830 831 /* Copy the Descriptor */ 832 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) { 833 memcpy(ucd_req_ptr + 1, query->descriptor, len); 834 ufshcd_cache_flush_and_invalidate(ucd_req_ptr, 835 ALIGN(sizeof(*ucd_req_ptr) + len, ARCH_DMA_MINALIGN)); 836 } else { 837 ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr)); 838 } 839 840 memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 841 ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); 842 } 843 844 static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba) 845 { 846 struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; 847 848 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); 849 850 /* command descriptor fields */ 851 ucd_req_ptr->header.dword_0 = 852 UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, TASK_TAG); 853 /* clear rest of the fields of basic header */ 854 ucd_req_ptr->header.dword_1 = 0; 855 ucd_req_ptr->header.dword_2 = 0; 856 857 memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 858 859 ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr)); 860 ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); 861 } 862 863 /** 864 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU) 865 * for Device Management Purposes 866 */ 867 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, 868 enum dev_cmd_type cmd_type) 869 { 870 u32 upiu_flags; 871 int ret = 0; 872 struct utp_transfer_req_desc *req_desc = hba->utrdl; 873 874 hba->dev_cmd.type = cmd_type; 875 876 ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, DMA_NONE); 877 switch (cmd_type) { 878 case DEV_CMD_TYPE_QUERY: 879 ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags); 880 break; 881 case DEV_CMD_TYPE_NOP: 882 ufshcd_prepare_utp_nop_upiu(hba); 883 break; 884 default: 885 ret = -EINVAL; 886 } 887 888 return ret; 889 } 890 891 static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) 892 { 893 unsigned long start; 894 u32 intr_status; 895 u32 enabled_intr_status; 896 897 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); 898 899 start = get_timer(0); 900 do { 901 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 902 enabled_intr_status = intr_status & hba->intr_mask; 903 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); 904 905 if (get_timer(start) > QUERY_REQ_TIMEOUT) { 906 dev_err(hba->dev, 907 "Timedout waiting for UTP response\n"); 908 909 return -ETIMEDOUT; 910 } 911 912 if (enabled_intr_status & UFSHCD_ERROR_MASK) { 913 dev_err(hba->dev, "Error in status:%08x\n", 914 enabled_intr_status); 915 916 return -1; 917 } 918 } while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL)); 919 920 return 0; 921 } 922 923 /** 924 * ufshcd_get_req_rsp - returns the TR response transaction type 925 */ 926 static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) 927 { 928 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; 929 } 930 931 /** 932 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status 933 * 934 */ 935 static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba) 936 { 937 return le32_to_cpu(hba->utrdl->header.dword_2) & MASK_OCS; 938 } 939 940 static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) 941 { 942 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; 943 } 944 945 static int ufshcd_check_query_response(struct ufs_hba *hba) 946 { 947 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; 948 949 /* Get the UPIU response */ 950 query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >> 951 UPIU_RSP_CODE_OFFSET; 952 return query_res->response; 953 } 954 955 /** 956 * ufshcd_copy_query_response() - Copy the Query Response and the data 957 * descriptor 958 */ 959 static int ufshcd_copy_query_response(struct ufs_hba *hba) 960 { 961 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; 962 963 memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); 964 965 /* Get the descriptor */ 966 if (hba->dev_cmd.query.descriptor && 967 hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { 968 u8 *descp = (u8 *)hba->ucd_rsp_ptr + 969 GENERAL_UPIU_REQUEST_SIZE; 970 u16 resp_len; 971 u16 buf_len; 972 973 /* data segment length */ 974 resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) & 975 MASK_QUERY_DATA_SEG_LEN; 976 buf_len = 977 be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length); 978 if (likely(buf_len >= resp_len)) { 979 int size = ALIGN(GENERAL_UPIU_REQUEST_SIZE + resp_len, ARCH_DMA_MINALIGN); 980 981 invalidate_dcache_range((uintptr_t)hba->ucd_rsp_ptr, (uintptr_t)hba->ucd_rsp_ptr + size); 982 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); 983 } else { 984 dev_warn(hba->dev, 985 "%s: Response size is bigger than buffer", 986 __func__); 987 return -EINVAL; 988 } 989 } 990 991 return 0; 992 } 993 994 /** 995 * ufshcd_exec_dev_cmd - API for sending device management requests 996 */ 997 int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, int timeout) 998 { 999 int err; 1000 int resp; 1001 1002 err = ufshcd_comp_devman_upiu(hba, cmd_type); 1003 if (err) 1004 return err; 1005 1006 err = ufshcd_send_command(hba, TASK_TAG); 1007 if (err) 1008 return err; 1009 1010 err = ufshcd_get_tr_ocs(hba); 1011 if (err) { 1012 dev_err(hba->dev, "Error in OCS:%d\n", err); 1013 return -EINVAL; 1014 } 1015 1016 resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); 1017 switch (resp) { 1018 case UPIU_TRANSACTION_NOP_IN: 1019 break; 1020 case UPIU_TRANSACTION_QUERY_RSP: 1021 err = ufshcd_check_query_response(hba); 1022 if (!err) 1023 err = ufshcd_copy_query_response(hba); 1024 break; 1025 case UPIU_TRANSACTION_REJECT_UPIU: 1026 /* TODO: handle Reject UPIU Response */ 1027 err = -EPERM; 1028 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", 1029 __func__); 1030 break; 1031 default: 1032 err = -EINVAL; 1033 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", 1034 __func__, resp); 1035 } 1036 1037 return err; 1038 } 1039 1040 /** 1041 * ufshcd_init_query() - init the query response and request parameters 1042 */ 1043 static inline void ufshcd_init_query(struct ufs_hba *hba, 1044 struct ufs_query_req **request, 1045 struct ufs_query_res **response, 1046 enum query_opcode opcode, 1047 u8 idn, u8 index, u8 selector) 1048 { 1049 *request = &hba->dev_cmd.query.request; 1050 *response = &hba->dev_cmd.query.response; 1051 memset(*request, 0, sizeof(struct ufs_query_req)); 1052 memset(*response, 0, sizeof(struct ufs_query_res)); 1053 (*request)->upiu_req.opcode = opcode; 1054 (*request)->upiu_req.idn = idn; 1055 (*request)->upiu_req.index = index; 1056 (*request)->upiu_req.selector = selector; 1057 } 1058 1059 /** 1060 * ufshcd_query_flag() - API function for sending flag query requests 1061 */ 1062 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, 1063 enum flag_idn idn, bool *flag_res) 1064 { 1065 struct ufs_query_req *request = NULL; 1066 struct ufs_query_res *response = NULL; 1067 int err, index = 0, selector = 0; 1068 int timeout = QUERY_REQ_TIMEOUT; 1069 1070 ufshcd_init_query(hba, &request, &response, opcode, idn, index, 1071 selector); 1072 1073 switch (opcode) { 1074 case UPIU_QUERY_OPCODE_SET_FLAG: 1075 case UPIU_QUERY_OPCODE_CLEAR_FLAG: 1076 case UPIU_QUERY_OPCODE_TOGGLE_FLAG: 1077 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 1078 break; 1079 case UPIU_QUERY_OPCODE_READ_FLAG: 1080 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 1081 if (!flag_res) { 1082 /* No dummy reads */ 1083 dev_err(hba->dev, "%s: Invalid argument for read request\n", 1084 __func__); 1085 err = -EINVAL; 1086 goto out; 1087 } 1088 break; 1089 default: 1090 dev_err(hba->dev, 1091 "%s: Expected query flag opcode but got = %d\n", 1092 __func__, opcode); 1093 err = -EINVAL; 1094 goto out; 1095 } 1096 1097 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); 1098 1099 if (err) { 1100 dev_err(hba->dev, 1101 "%s: Sending flag query for idn %d failed, err = %d\n", 1102 __func__, idn, err); 1103 goto out; 1104 } 1105 1106 if (flag_res) 1107 *flag_res = (be32_to_cpu(response->upiu_res.value) & 1108 MASK_QUERY_UPIU_FLAG_LOC) & 0x1; 1109 1110 out: 1111 return err; 1112 } 1113 1114 static int ufshcd_query_flag_retry(struct ufs_hba *hba, 1115 enum query_opcode opcode, 1116 enum flag_idn idn, bool *flag_res) 1117 { 1118 int ret; 1119 int retries; 1120 1121 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { 1122 ret = ufshcd_query_flag(hba, opcode, idn, flag_res); 1123 if (ret) 1124 dev_dbg(hba->dev, 1125 "%s: failed with error %d, retries %d\n", 1126 __func__, ret, retries); 1127 else 1128 break; 1129 } 1130 1131 if (ret) 1132 dev_err(hba->dev, 1133 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n", 1134 __func__, opcode, idn, ret, retries); 1135 return ret; 1136 } 1137 1138 static int __ufshcd_query_descriptor(struct ufs_hba *hba, 1139 enum query_opcode opcode, 1140 enum desc_idn idn, u8 index, u8 selector, 1141 u8 *desc_buf, int *buf_len) 1142 { 1143 struct ufs_query_req *request = NULL; 1144 struct ufs_query_res *response = NULL; 1145 int err; 1146 1147 if (!desc_buf) { 1148 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", 1149 __func__, opcode); 1150 err = -EINVAL; 1151 goto out; 1152 } 1153 1154 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { 1155 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", 1156 __func__, *buf_len); 1157 err = -EINVAL; 1158 goto out; 1159 } 1160 1161 ufshcd_init_query(hba, &request, &response, opcode, idn, index, selector); 1162 hba->dev_cmd.query.descriptor = desc_buf; 1163 request->upiu_req.length = cpu_to_be16(*buf_len); 1164 1165 switch (opcode) { 1166 case UPIU_QUERY_OPCODE_WRITE_DESC: 1167 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 1168 break; 1169 case UPIU_QUERY_OPCODE_READ_DESC: 1170 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 1171 break; 1172 default: 1173 dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n", 1174 __func__, opcode); 1175 err = -EINVAL; 1176 goto out; 1177 } 1178 1179 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); 1180 1181 if (err) { 1182 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", 1183 __func__, opcode, idn, index, err); 1184 goto out; 1185 } 1186 1187 hba->dev_cmd.query.descriptor = NULL; 1188 *buf_len = be16_to_cpu(response->upiu_res.length); 1189 1190 out: 1191 return err; 1192 } 1193 1194 /** 1195 * ufshcd_query_descriptor_retry - API function for sending descriptor requests 1196 */ 1197 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode, 1198 enum desc_idn idn, u8 index, u8 selector, 1199 u8 *desc_buf, int *buf_len) 1200 { 1201 int err; 1202 int retries; 1203 1204 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { 1205 err = __ufshcd_query_descriptor(hba, opcode, idn, index, 1206 selector, desc_buf, buf_len); 1207 if (!err || err == -EINVAL) 1208 break; 1209 } 1210 1211 return err; 1212 } 1213 1214 /** 1215 * ufshcd_read_desc_length - read the specified descriptor length from header 1216 */ 1217 int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id, 1218 int desc_index, int *desc_length) 1219 { 1220 int ret; 1221 u8 header[QUERY_DESC_HDR_SIZE]; 1222 int header_len = QUERY_DESC_HDR_SIZE; 1223 1224 if (desc_id >= QUERY_DESC_IDN_MAX) 1225 return -EINVAL; 1226 1227 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, 1228 desc_id, desc_index, 0, header, 1229 &header_len); 1230 1231 if (ret) { 1232 dev_err(hba->dev, "%s: Failed to get descriptor header id %d\n", 1233 __func__, desc_id); 1234 return ret; 1235 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) { 1236 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch\n", 1237 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET], 1238 desc_id); 1239 ret = -EINVAL; 1240 } 1241 1242 *desc_length = header[QUERY_DESC_LENGTH_OFFSET]; 1243 1244 return ret; 1245 } 1246 1247 static void ufshcd_init_desc_sizes(struct ufs_hba *hba) 1248 { 1249 int err; 1250 1251 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0, 1252 &hba->desc_size.dev_desc); 1253 if (err) 1254 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; 1255 1256 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0, 1257 &hba->desc_size.pwr_desc); 1258 if (err) 1259 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; 1260 1261 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0, 1262 &hba->desc_size.interc_desc); 1263 if (err) 1264 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; 1265 1266 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0, 1267 &hba->desc_size.conf_desc); 1268 if (err) 1269 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; 1270 1271 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0, 1272 &hba->desc_size.unit_desc); 1273 if (err) 1274 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; 1275 1276 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0, 1277 &hba->desc_size.geom_desc); 1278 if (err) 1279 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; 1280 1281 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0, 1282 &hba->desc_size.hlth_desc); 1283 if (err) 1284 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; 1285 } 1286 1287 /** 1288 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length 1289 * 1290 */ 1291 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, 1292 int *desc_len) 1293 { 1294 switch (desc_id) { 1295 case QUERY_DESC_IDN_DEVICE: 1296 *desc_len = hba->desc_size.dev_desc; 1297 break; 1298 case QUERY_DESC_IDN_POWER: 1299 *desc_len = hba->desc_size.pwr_desc; 1300 break; 1301 case QUERY_DESC_IDN_GEOMETRY: 1302 *desc_len = hba->desc_size.geom_desc; 1303 break; 1304 case QUERY_DESC_IDN_CONFIGURATION: 1305 *desc_len = hba->desc_size.conf_desc; 1306 break; 1307 case QUERY_DESC_IDN_UNIT: 1308 *desc_len = hba->desc_size.unit_desc; 1309 break; 1310 case QUERY_DESC_IDN_INTERCONNECT: 1311 *desc_len = hba->desc_size.interc_desc; 1312 break; 1313 case QUERY_DESC_IDN_STRING: 1314 *desc_len = QUERY_DESC_MAX_SIZE; 1315 break; 1316 case QUERY_DESC_IDN_HEALTH: 1317 *desc_len = hba->desc_size.hlth_desc; 1318 break; 1319 case QUERY_DESC_IDN_RFU_0: 1320 case QUERY_DESC_IDN_RFU_1: 1321 *desc_len = 0; 1322 break; 1323 default: 1324 *desc_len = 0; 1325 return -EINVAL; 1326 } 1327 return 0; 1328 } 1329 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length); 1330 1331 /** 1332 * ufshcd_read_desc_param - read the specified descriptor parameter 1333 * 1334 */ 1335 int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, 1336 int desc_index, u8 param_offset, u8 *param_read_buf, 1337 u8 param_size) 1338 { 1339 int ret; 1340 u8 *desc_buf; 1341 int buff_len; 1342 bool is_kmalloc = true; 1343 1344 /* Safety check */ 1345 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) 1346 return -EINVAL; 1347 1348 /* Get the max length of descriptor from structure filled up at probe 1349 * time. 1350 */ 1351 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); 1352 1353 /* Sanity checks */ 1354 if (ret || !buff_len) { 1355 dev_err(hba->dev, "%s: Failed to get full descriptor length\n", 1356 __func__); 1357 return ret; 1358 } 1359 1360 /* Check whether we need temp memory */ 1361 if (param_offset != 0 || param_size < buff_len) { 1362 desc_buf = kmalloc(buff_len, GFP_KERNEL); 1363 if (!desc_buf) 1364 return -ENOMEM; 1365 } else { 1366 desc_buf = param_read_buf; 1367 is_kmalloc = false; 1368 } 1369 1370 /* Request for full descriptor */ 1371 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, 1372 desc_id, desc_index, 0, desc_buf, 1373 &buff_len); 1374 1375 if (ret) { 1376 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n", 1377 __func__, desc_id, desc_index, param_offset, ret); 1378 goto out; 1379 } 1380 1381 /* Sanity check */ 1382 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { 1383 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", 1384 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); 1385 ret = -EINVAL; 1386 goto out; 1387 } 1388 1389 /* Check wherher we will not copy more data, than available */ 1390 if (is_kmalloc && param_size > buff_len) 1391 param_size = buff_len; 1392 1393 if (is_kmalloc) 1394 memcpy(param_read_buf, &desc_buf[param_offset], param_size); 1395 out: 1396 if (is_kmalloc) 1397 kfree(desc_buf); 1398 return ret; 1399 } 1400 1401 /* replace non-printable or non-ASCII characters with spaces */ 1402 static inline void ufshcd_remove_non_printable(uint8_t *val) 1403 { 1404 if (!val) 1405 return; 1406 1407 if (*val < 0x20 || *val > 0x7e) 1408 *val = ' '; 1409 } 1410 1411 /** 1412 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power 1413 * state) and waits for it to take effect. 1414 * 1415 */ 1416 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) 1417 { 1418 unsigned long start = 0; 1419 u8 status; 1420 int ret; 1421 1422 ret = ufshcd_send_uic_cmd(hba, cmd); 1423 if (ret) { 1424 dev_err(hba->dev, 1425 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", 1426 cmd->command, cmd->argument3, ret); 1427 1428 return ret; 1429 } 1430 1431 start = get_timer(0); 1432 do { 1433 status = ufshcd_get_upmcrs(hba); 1434 if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { 1435 dev_err(hba->dev, 1436 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", 1437 cmd->command, status); 1438 ret = (status != PWR_OK) ? status : -1; 1439 break; 1440 } 1441 } while (status != PWR_LOCAL); 1442 1443 return ret; 1444 } 1445 1446 /** 1447 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change 1448 * using DME_SET primitives. 1449 */ 1450 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) 1451 { 1452 struct uic_command uic_cmd = {0}; 1453 int ret; 1454 1455 uic_cmd.command = UIC_CMD_DME_SET; 1456 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); 1457 uic_cmd.argument3 = mode; 1458 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 1459 1460 return ret; 1461 } 1462 1463 static 1464 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba, 1465 struct scsi_cmd *pccb, u32 upiu_flags) 1466 { 1467 struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; 1468 unsigned int cdb_len; 1469 1470 /* command descriptor fields */ 1471 ucd_req_ptr->header.dword_0 = 1472 UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags, 1473 pccb->lun, TASK_TAG); 1474 ucd_req_ptr->header.dword_1 = 1475 UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); 1476 1477 /* Total EHS length and Data segment length will be zero */ 1478 ucd_req_ptr->header.dword_2 = 0; 1479 1480 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen); 1481 1482 cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE); 1483 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); 1484 memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len); 1485 1486 memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 1487 ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr)); 1488 ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); 1489 } 1490 1491 static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry, 1492 unsigned char *buf, ulong len) 1493 { 1494 entry->size = cpu_to_le32(len) | GENMASK(1, 0); 1495 entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf)); 1496 entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf)); 1497 } 1498 1499 static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb) 1500 { 1501 struct utp_transfer_req_desc *req_desc = hba->utrdl; 1502 struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr; 1503 uintptr_t aaddr = (uintptr_t)(pccb->pdata) & ~(ARCH_DMA_MINALIGN - 1); 1504 ulong datalen = pccb->datalen; 1505 int table_length; 1506 u8 *buf; 1507 int i; 1508 1509 if (!datalen) { 1510 req_desc->prd_table_length = 0; 1511 ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc)); 1512 return; 1513 } 1514 1515 if (pccb->dma_dir == DMA_TO_DEVICE) { /* Write to device */ 1516 flush_dcache_range(aaddr, ALIGN(aaddr + datalen + ARCH_DMA_MINALIGN - 1, ARCH_DMA_MINALIGN)); 1517 } 1518 1519 /* In any case, invalidate cache to avoid stale data in it. */ 1520 invalidate_dcache_range(aaddr, ALIGN(aaddr + datalen + ARCH_DMA_MINALIGN - 1, ARCH_DMA_MINALIGN)); 1521 1522 table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY); 1523 buf = pccb->pdata; 1524 i = table_length; 1525 while (--i) { 1526 prepare_prdt_desc(&prd_table[table_length - i - 1], buf, 1527 MAX_PRDT_ENTRY - 1); 1528 buf += MAX_PRDT_ENTRY; 1529 datalen -= MAX_PRDT_ENTRY; 1530 } 1531 1532 prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1); 1533 1534 req_desc->prd_table_length = table_length; 1535 ufshcd_cache_flush_and_invalidate(prd_table, sizeof(*prd_table) * table_length); 1536 ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc)); 1537 } 1538 1539 int ufs_send_scsi_cmd(struct ufs_hba *hba, struct scsi_cmd *pccb) 1540 { 1541 struct utp_transfer_req_desc *req_desc = hba->utrdl; 1542 u32 upiu_flags; 1543 int ocs, result = 0, retry_count = 3; 1544 u8 scsi_status; 1545 1546 /* cmd do not set lun for ufs 2.1 */ 1547 if (hba->dev_desc->w_spec_version == 0x1002) /* verison 0x210 in big end */ 1548 pccb->cmd[1] &= 0x1F; 1549 retry: 1550 ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, pccb->dma_dir); 1551 ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags); 1552 prepare_prdt_table(hba, pccb); 1553 1554 if (ufshcd_send_command(hba, TASK_TAG) == -ETIMEDOUT && retry_count) { 1555 retry_count--; 1556 goto retry; 1557 } 1558 1559 ocs = ufshcd_get_tr_ocs(hba); 1560 switch (ocs) { 1561 case OCS_SUCCESS: 1562 result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); 1563 switch (result) { 1564 case UPIU_TRANSACTION_RESPONSE: 1565 result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr); 1566 1567 scsi_status = result & MASK_SCSI_STATUS; 1568 if (pccb->cmd[0] == SCSI_TST_U_RDY && scsi_status) { 1569 /* Test ready cmd will fail with Phison UFS, break to continue */ 1570 if (retry_count) { 1571 retry_count--; 1572 goto retry; 1573 } 1574 break; 1575 } 1576 if (scsi_status) 1577 return -EINVAL; 1578 1579 break; 1580 case UPIU_TRANSACTION_REJECT_UPIU: 1581 /* TODO: handle Reject UPIU Response */ 1582 dev_err(hba->dev, 1583 "Reject UPIU not fully implemented\n"); 1584 return -EINVAL; 1585 default: 1586 dev_err(hba->dev, 1587 "Unexpected request response code = %x\n", 1588 result); 1589 return -EINVAL; 1590 } 1591 break; 1592 default: 1593 dev_err(hba->dev, "OCS error from controller = %x\n", ocs); 1594 return -EINVAL; 1595 } 1596 1597 return 0; 1598 } 1599 1600 static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb) 1601 { 1602 struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent); 1603 1604 return ufs_send_scsi_cmd(hba, pccb); 1605 } 1606 1607 static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id, 1608 int desc_index, u8 *buf, u32 size) 1609 { 1610 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); 1611 } 1612 1613 static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) 1614 { 1615 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); 1616 } 1617 1618 /** 1619 * ufshcd_read_string_desc - read string descriptor 1620 * 1621 */ 1622 int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, 1623 u8 *buf, u32 size, bool ascii) 1624 { 1625 int err = 0; 1626 1627 err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf, 1628 size); 1629 1630 if (err) { 1631 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", 1632 __func__, QUERY_REQ_RETRIES, err); 1633 goto out; 1634 } 1635 1636 if (ascii) { 1637 int desc_len; 1638 int ascii_len; 1639 int i; 1640 u8 *buff_ascii; 1641 1642 desc_len = buf[0]; 1643 /* remove header and divide by 2 to move from UTF16 to UTF8 */ 1644 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; 1645 if (size < ascii_len + QUERY_DESC_HDR_SIZE) { 1646 dev_err(hba->dev, "%s: buffer allocated size is too small\n", 1647 __func__); 1648 err = -ENOMEM; 1649 goto out; 1650 } 1651 1652 buff_ascii = kmalloc(ALIGN(ascii_len, ARCH_DMA_MINALIGN), GFP_KERNEL); 1653 if (!buff_ascii) { 1654 err = -ENOMEM; 1655 goto out; 1656 } 1657 1658 /* 1659 * the descriptor contains string in UTF16 format 1660 * we need to convert to utf-8 so it can be displayed 1661 */ 1662 utf16_to_utf8(buff_ascii, 1663 (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len); 1664 1665 /* replace non-printable or non-ASCII characters with spaces */ 1666 for (i = 0; i < ascii_len; i++) 1667 ufshcd_remove_non_printable(&buff_ascii[i]); 1668 1669 memset(buf + QUERY_DESC_HDR_SIZE, 0, 1670 size - QUERY_DESC_HDR_SIZE); 1671 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); 1672 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; 1673 kfree(buff_ascii); 1674 } 1675 out: 1676 return err; 1677 } 1678 1679 static int ufs_get_device_desc(struct ufs_hba *hba, struct ufs_device_descriptor *dev_desc) 1680 { 1681 int err; 1682 size_t buff_len; 1683 1684 buff_len = sizeof(*dev_desc); 1685 if (buff_len > hba->desc_size.dev_desc) 1686 buff_len = hba->desc_size.dev_desc; 1687 1688 err = ufshcd_read_device_desc(hba, (u8 *)dev_desc, buff_len); 1689 if (err) 1690 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", 1691 __func__, err); 1692 1693 return err; 1694 } 1695 1696 /** 1697 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device 1698 */ 1699 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) 1700 { 1701 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; 1702 1703 if (hba->max_pwr_info.is_valid) 1704 return 0; 1705 1706 pwr_info->pwr_tx = FAST_MODE; 1707 pwr_info->pwr_rx = FAST_MODE; 1708 pwr_info->hs_rate = PA_HS_MODE_B; 1709 1710 /* Get the connected lane count */ 1711 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), 1712 &pwr_info->lane_rx); 1713 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 1714 &pwr_info->lane_tx); 1715 1716 if (!pwr_info->lane_rx || !pwr_info->lane_tx) { 1717 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", 1718 __func__, pwr_info->lane_rx, pwr_info->lane_tx); 1719 return -EINVAL; 1720 } 1721 1722 /* 1723 * First, get the maximum gears of HS speed. 1724 * If a zero value, it means there is no HSGEAR capability. 1725 * Then, get the maximum gears of PWM speed. 1726 */ 1727 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); 1728 if (!pwr_info->gear_rx) { 1729 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), 1730 &pwr_info->gear_rx); 1731 if (!pwr_info->gear_rx) { 1732 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", 1733 __func__, pwr_info->gear_rx); 1734 return -EINVAL; 1735 } 1736 pwr_info->pwr_rx = SLOW_MODE; 1737 } 1738 1739 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), 1740 &pwr_info->gear_tx); 1741 if (!pwr_info->gear_tx) { 1742 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), 1743 &pwr_info->gear_tx); 1744 if (!pwr_info->gear_tx) { 1745 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", 1746 __func__, pwr_info->gear_tx); 1747 return -EINVAL; 1748 } 1749 pwr_info->pwr_tx = SLOW_MODE; 1750 } 1751 1752 hba->max_pwr_info.is_valid = true; 1753 return 0; 1754 } 1755 1756 static int ufshcd_change_power_mode(struct ufs_hba *hba, 1757 struct ufs_pa_layer_attr *pwr_mode) 1758 { 1759 int ret; 1760 1761 /* if already configured to the requested pwr_mode */ 1762 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && 1763 pwr_mode->gear_tx == hba->pwr_info.gear_tx && 1764 pwr_mode->lane_rx == hba->pwr_info.lane_rx && 1765 pwr_mode->lane_tx == hba->pwr_info.lane_tx && 1766 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && 1767 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && 1768 pwr_mode->hs_rate == hba->pwr_info.hs_rate) { 1769 dev_dbg(hba->dev, "%s: power already configured\n", __func__); 1770 return 0; 1771 } 1772 1773 /* 1774 * Configure attributes for power mode change with below. 1775 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, 1776 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, 1777 * - PA_HSSERIES 1778 */ 1779 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); 1780 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), 1781 pwr_mode->lane_rx); 1782 if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE) 1783 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); 1784 else 1785 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); 1786 1787 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); 1788 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), 1789 pwr_mode->lane_tx); 1790 if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE) 1791 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); 1792 else 1793 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); 1794 1795 if (pwr_mode->pwr_rx == FASTAUTO_MODE || 1796 pwr_mode->pwr_tx == FASTAUTO_MODE || 1797 pwr_mode->pwr_rx == FAST_MODE || 1798 pwr_mode->pwr_tx == FAST_MODE) 1799 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), 1800 pwr_mode->hs_rate); 1801 1802 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 | 1803 pwr_mode->pwr_tx); 1804 1805 if (ret) { 1806 dev_err(hba->dev, 1807 "%s: power mode change failed %d\n", __func__, ret); 1808 1809 return ret; 1810 } 1811 1812 /* Copy new Power Mode to power info */ 1813 memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr)); 1814 1815 return ret; 1816 } 1817 1818 /** 1819 * ufshcd_verify_dev_init() - Verify device initialization 1820 * 1821 */ 1822 static int ufshcd_verify_dev_init(struct ufs_hba *hba) 1823 { 1824 int retries; 1825 int err; 1826 1827 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { 1828 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, 1829 NOP_OUT_TIMEOUT); 1830 if (!err || err == -ETIMEDOUT) 1831 break; 1832 1833 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); 1834 } 1835 1836 if (err) 1837 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); 1838 1839 return err; 1840 } 1841 1842 /** 1843 * ufshcd_complete_dev_init() - checks device readiness 1844 */ 1845 static int ufshcd_complete_dev_init(struct ufs_hba *hba) 1846 { 1847 unsigned long start = 0; 1848 int i; 1849 int err; 1850 bool flag_res = 1; 1851 1852 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, 1853 QUERY_FLAG_IDN_FDEVICEINIT, NULL); 1854 if (err) { 1855 dev_err(hba->dev, 1856 "%s setting fDeviceInit flag failed with error %d\n", 1857 __func__, err); 1858 goto out; 1859 } 1860 1861 /* poll for max. 1500ms for fDeviceInit flag to clear */ 1862 start = get_timer(0); 1863 for (i = 0; i < 3000 && !err && flag_res; i++) { 1864 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, 1865 QUERY_FLAG_IDN_FDEVICEINIT, 1866 &flag_res); 1867 if (get_timer(start) > FDEVICEINIT_COMPL_TIMEOUT) 1868 break; 1869 udelay(500); 1870 } 1871 1872 if (err) 1873 dev_err(hba->dev, 1874 "%s reading fDeviceInit flag failed with error %d\n", 1875 __func__, err); 1876 else if (flag_res) 1877 dev_err(hba->dev, 1878 "%s fDeviceInit was not cleared by the device\n", 1879 __func__); 1880 1881 out: 1882 return err; 1883 } 1884 1885 static void ufshcd_def_desc_sizes(struct ufs_hba *hba) 1886 { 1887 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; 1888 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; 1889 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; 1890 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; 1891 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; 1892 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; 1893 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; 1894 } 1895 1896 int _ufs_start(struct ufs_hba *hba) 1897 { 1898 int ret; 1899 1900 ret = ufshcd_link_startup(hba); 1901 if (ret) 1902 return ret; 1903 1904 ret = ufshcd_verify_dev_init(hba); 1905 if (ret) 1906 return ret; 1907 1908 ret = ufshcd_complete_dev_init(hba); 1909 if (ret) 1910 return ret; 1911 1912 /* Init check for device descriptor sizes */ 1913 ufshcd_init_desc_sizes(hba); 1914 1915 ret = ufs_get_device_desc(hba, hba->dev_desc); 1916 if (ret) { 1917 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", 1918 __func__, ret); 1919 1920 return ret; 1921 } 1922 1923 return ret; 1924 } 1925 1926 int ufs_start(struct ufs_hba *hba) 1927 { 1928 int ret; 1929 1930 ret = _ufs_start(hba); 1931 if (ret) 1932 return ret; 1933 1934 #if defined(CONFIG_SUPPORT_USBPLUG) 1935 ret = ufs_create_partition_inventory(hba); 1936 if (ret) { 1937 dev_err(hba->dev, "%s: Failed to creat partition. err = %d\n", __func__, ret); 1938 return ret; 1939 } 1940 #endif 1941 if (ufshcd_get_max_pwr_mode(hba)) { 1942 dev_err(hba->dev, 1943 "%s: Failed getting max supported power mode\n", 1944 __func__); 1945 } else { 1946 ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info); 1947 if (ret) { 1948 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", 1949 __func__, ret); 1950 1951 return ret; 1952 } 1953 1954 printf("Device at %s up at:", hba->dev->name); 1955 ufshcd_print_pwr_info(hba); 1956 } 1957 1958 #if defined(CONFIG_ROCKCHIP_UFS_RPMB) 1959 ufs_rpmb_init(hba); 1960 #endif 1961 1962 return 0; 1963 } 1964 1965 int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops) 1966 { 1967 struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev); 1968 struct scsi_platdata *scsi_plat; 1969 struct udevice *scsi_dev; 1970 int err; 1971 1972 device_find_first_child(ufs_dev, &scsi_dev); 1973 if (!scsi_dev) 1974 return -ENODEV; 1975 1976 scsi_plat = dev_get_uclass_platdata(scsi_dev); 1977 scsi_plat->max_id = UFSHCD_MAX_ID; 1978 scsi_plat->max_lun = UFS_MAX_LUNS; 1979 //scsi_plat->max_bytes_per_req = UFS_MAX_BYTES; 1980 1981 hba->dev = ufs_dev; 1982 hba->ops = hba_ops; 1983 hba->mmio_base = (void *)dev_read_addr(ufs_dev); 1984 1985 /* Set descriptor lengths to specification defaults */ 1986 ufshcd_def_desc_sizes(hba); 1987 1988 ufshcd_ops_init(hba); 1989 1990 /* Read capabilties registers */ 1991 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); 1992 1993 /* Get UFS version supported by the controller */ 1994 hba->version = ufshcd_get_ufs_version(hba); 1995 if (hba->version != UFSHCI_VERSION_10 && 1996 hba->version != UFSHCI_VERSION_11 && 1997 hba->version != UFSHCI_VERSION_20 && 1998 hba->version != UFSHCI_VERSION_21) 1999 dev_err(hba->dev, "invalid UFS version 0x%x\n", 2000 hba->version); 2001 2002 /* Get Interrupt bit mask per version */ 2003 hba->intr_mask = ufshcd_get_intr_mask(hba); 2004 2005 /* Allocate memory for host memory space */ 2006 err = ufshcd_memory_alloc(hba); 2007 if (err) { 2008 dev_err(hba->dev, "Memory allocation failed\n"); 2009 return err; 2010 } 2011 2012 /* Configure Local data structures */ 2013 ufshcd_host_memory_configure(hba); 2014 2015 /* 2016 * In order to avoid any spurious interrupt immediately after 2017 * registering UFS controller interrupt handler, clear any pending UFS 2018 * interrupt status and disable all the UFS interrupts. 2019 */ 2020 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), 2021 REG_INTERRUPT_STATUS); 2022 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); 2023 2024 err = ufshcd_hba_enable(hba); 2025 if (err) { 2026 dev_err(hba->dev, "Host controller enable failed\n"); 2027 return err; 2028 } 2029 2030 err = ufs_start(hba); 2031 if (err) 2032 return err; 2033 2034 return 0; 2035 } 2036 2037 int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp) 2038 { 2039 int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi", 2040 scsi_devp); 2041 2042 return ret; 2043 } 2044 2045 static struct scsi_ops ufs_ops = { 2046 .exec = ufs_scsi_exec, 2047 }; 2048 2049 int ufs_probe_dev(int index) 2050 { 2051 struct udevice *dev; 2052 2053 return uclass_get_device(UCLASS_UFS, index, &dev); 2054 } 2055 2056 int ufs_probe(void) 2057 { 2058 struct udevice *dev; 2059 int ret, i; 2060 2061 for (i = 0;; i++) { 2062 ret = uclass_get_device(UCLASS_UFS, i, &dev); 2063 if (ret == -ENODEV) 2064 break; 2065 } 2066 2067 return 0; 2068 } 2069 2070 U_BOOT_DRIVER(ufs_scsi) = { 2071 .id = UCLASS_SCSI, 2072 .name = "ufs_scsi", 2073 .ops = &ufs_ops, 2074 }; 2075