1 // SPDX-License-Identifier: GPL-2.0+ 2 /** 3 * ufs.c - Universal Flash Subsystem (UFS) driver 4 * 5 * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported 6 * to u-boot. 7 * 8 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com 9 */ 10 #include <charset.h> 11 #include <common.h> 12 #include <dm.h> 13 #include <log.h> 14 #include <dm/lists.h> 15 #include <dm/device-internal.h> 16 #include <malloc.h> 17 #include <hexdump.h> 18 #include <scsi.h> 19 #include <asm/io.h> 20 #include <asm/dma-mapping.h> 21 #include <linux/bitops.h> 22 #include <linux/delay.h> 23 24 #if defined(CONFIG_SUPPORT_USBPLUG) 25 #include "ufs-rockchip-usbplug.h" 26 #endif 27 #include "ufs.h" 28 29 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ 30 UTP_TASK_REQ_COMPL |\ 31 UFSHCD_ERROR_MASK) 32 /* maximum number of link-startup retries */ 33 #define DME_LINKSTARTUP_RETRIES 3 34 35 /* maximum number of retries for a general UIC command */ 36 #define UFS_UIC_COMMAND_RETRIES 3 37 38 /* Query request retries */ 39 #define QUERY_REQ_RETRIES 3 40 /* Query request timeout */ 41 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ 42 43 /* maximum timeout in ms for a general UIC command */ 44 #define UFS_UIC_CMD_TIMEOUT 1000 45 /* NOP OUT retries waiting for NOP IN response */ 46 #define NOP_OUT_RETRIES 10 47 /* Timeout after 30 msecs if NOP OUT hangs without response */ 48 #define NOP_OUT_TIMEOUT 30 /* msecs */ 49 50 /* Only use one Task Tag for all requests */ 51 #define TASK_TAG 0 52 53 /* Expose the flag value from utp_upiu_query.value */ 54 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF 55 56 #define MAX_PRDT_ENTRY 262144 57 58 /* maximum bytes per request */ 59 #define UFS_MAX_BYTES (128 * 256 * 1024) 60 61 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba); 62 static inline void ufshcd_hba_stop(struct ufs_hba *hba); 63 static int ufshcd_hba_enable(struct ufs_hba *hba); 64 65 /* 66 * ufshcd_wait_for_register - wait for register value to change 67 */ 68 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, 69 u32 val, unsigned long timeout_ms) 70 { 71 int err = 0; 72 unsigned long start = get_timer(0); 73 74 /* ignore bits that we don't intend to wait on */ 75 val = val & mask; 76 77 while ((ufshcd_readl(hba, reg) & mask) != val) { 78 if (get_timer(start) > timeout_ms) { 79 if ((ufshcd_readl(hba, reg) & mask) != val) 80 err = -ETIMEDOUT; 81 break; 82 } 83 } 84 85 return err; 86 } 87 88 /** 89 * ufshcd_init_pwr_info - setting the POR (power on reset) 90 * values in hba power info 91 */ 92 static void ufshcd_init_pwr_info(struct ufs_hba *hba) 93 { 94 hba->pwr_info.gear_rx = UFS_PWM_G1; 95 hba->pwr_info.gear_tx = UFS_PWM_G1; 96 hba->pwr_info.lane_rx = 1; 97 hba->pwr_info.lane_tx = 1; 98 hba->pwr_info.pwr_rx = SLOWAUTO_MODE; 99 hba->pwr_info.pwr_tx = SLOWAUTO_MODE; 100 hba->pwr_info.hs_rate = 0; 101 } 102 103 /** 104 * ufshcd_print_pwr_info - print power params as saved in hba 105 * power info 106 */ 107 static void ufshcd_print_pwr_info(struct ufs_hba *hba) 108 { 109 static const char * const names[] = { 110 "INVALID MODE", 111 "FAST MODE", 112 "SLOW_MODE", 113 "INVALID MODE", 114 "FASTAUTO_MODE", 115 "SLOWAUTO_MODE", 116 "INVALID MODE", 117 }; 118 119 dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", 120 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, 121 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, 122 names[hba->pwr_info.pwr_rx], 123 names[hba->pwr_info.pwr_tx], 124 hba->pwr_info.hs_rate); 125 } 126 127 /** 128 * ufshcd_ready_for_uic_cmd - Check if controller is ready 129 * to accept UIC commands 130 */ 131 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) 132 { 133 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) 134 return true; 135 else 136 return false; 137 } 138 139 /** 140 * ufshcd_get_uic_cmd_result - Get the UIC command result 141 */ 142 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) 143 { 144 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & 145 MASK_UIC_COMMAND_RESULT; 146 } 147 148 /** 149 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command 150 */ 151 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) 152 { 153 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); 154 } 155 156 /** 157 * ufshcd_is_device_present - Check if any device connected to 158 * the host controller 159 */ 160 static inline bool ufshcd_is_device_present(struct ufs_hba *hba) 161 { 162 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & 163 DEVICE_PRESENT) ? true : false; 164 } 165 166 /** 167 * ufshcd_send_uic_cmd - UFS Interconnect layer command API 168 * 169 */ 170 static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 171 { 172 unsigned long start = 0; 173 u32 intr_status; 174 u32 enabled_intr_status; 175 176 if (!ufshcd_ready_for_uic_cmd(hba)) { 177 dev_err(hba->dev, 178 "Controller not ready to accept UIC commands\n"); 179 return -EIO; 180 } 181 182 debug("sending uic command:%d\n", uic_cmd->command); 183 184 /* Write Args */ 185 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); 186 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); 187 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); 188 189 /* Write UIC Cmd */ 190 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, 191 REG_UIC_COMMAND); 192 193 start = get_timer(0); 194 do { 195 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 196 enabled_intr_status = intr_status & hba->intr_mask; 197 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); 198 199 if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { 200 dev_err(hba->dev, 201 "Timedout waiting for UIC response\n"); 202 203 return -ETIMEDOUT; 204 } 205 206 if (enabled_intr_status & UFSHCD_ERROR_MASK) { 207 dev_err(hba->dev, "Error in status:%08x\n", 208 enabled_intr_status); 209 210 return -1; 211 } 212 } while (!(enabled_intr_status & UFSHCD_UIC_MASK)); 213 214 uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba); 215 uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba); 216 217 debug("Sent successfully\n"); 218 219 return 0; 220 } 221 222 /** 223 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET 224 * 225 */ 226 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, 227 u32 mib_val, u8 peer) 228 { 229 struct uic_command uic_cmd = {0}; 230 static const char *const action[] = { 231 "dme-set", 232 "dme-peer-set" 233 }; 234 const char *set = action[!!peer]; 235 int ret; 236 int retries = UFS_UIC_COMMAND_RETRIES; 237 238 uic_cmd.command = peer ? 239 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; 240 uic_cmd.argument1 = attr_sel; 241 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); 242 uic_cmd.argument3 = mib_val; 243 244 do { 245 /* for peer attributes we retry upon failure */ 246 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 247 if (ret) 248 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", 249 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); 250 } while (ret && peer && --retries); 251 252 if (ret) 253 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", 254 set, UIC_GET_ATTR_ID(attr_sel), mib_val, 255 UFS_UIC_COMMAND_RETRIES - retries); 256 257 return ret; 258 } 259 260 /** 261 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET 262 * 263 */ 264 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, 265 u32 *mib_val, u8 peer) 266 { 267 struct uic_command uic_cmd = {0}; 268 static const char *const action[] = { 269 "dme-get", 270 "dme-peer-get" 271 }; 272 const char *get = action[!!peer]; 273 int ret; 274 int retries = UFS_UIC_COMMAND_RETRIES; 275 276 uic_cmd.command = peer ? 277 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; 278 uic_cmd.argument1 = attr_sel; 279 280 do { 281 /* for peer attributes we retry upon failure */ 282 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 283 if (ret) 284 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", 285 get, UIC_GET_ATTR_ID(attr_sel), ret); 286 } while (ret && peer && --retries); 287 288 if (ret) 289 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", 290 get, UIC_GET_ATTR_ID(attr_sel), 291 UFS_UIC_COMMAND_RETRIES - retries); 292 293 if (mib_val && !ret) 294 *mib_val = uic_cmd.argument3; 295 296 return ret; 297 } 298 299 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) 300 { 301 u32 tx_lanes, i, err = 0; 302 303 if (!peer) 304 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 305 &tx_lanes); 306 else 307 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 308 &tx_lanes); 309 for (i = 0; i < tx_lanes; i++) { 310 if (!peer) 311 err = ufshcd_dme_set(hba, 312 UIC_ARG_MIB_SEL(TX_LCC_ENABLE, 313 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), 314 0); 315 else 316 err = ufshcd_dme_peer_set(hba, 317 UIC_ARG_MIB_SEL(TX_LCC_ENABLE, 318 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), 319 0); 320 if (err) { 321 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d\n", 322 __func__, peer, i, err); 323 break; 324 } 325 } 326 327 return err; 328 } 329 330 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) 331 { 332 return ufshcd_disable_tx_lcc(hba, true); 333 } 334 335 /** 336 * ufshcd_dme_link_startup - Notify Unipro to perform link startup 337 * 338 */ 339 static int ufshcd_dme_link_startup(struct ufs_hba *hba) 340 { 341 struct uic_command uic_cmd = {0}; 342 int ret; 343 344 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; 345 346 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 347 if (ret) 348 dev_dbg(hba->dev, 349 "dme-link-startup: error code %d\n", ret); 350 return ret; 351 } 352 353 int ufshcd_dme_enable(struct ufs_hba *hba) 354 { 355 struct uic_command uic_cmd = {0}; 356 int ret; 357 358 uic_cmd.command = UIC_CMD_DME_ENABLE; 359 360 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 361 if (ret) 362 dev_err(hba->dev, 363 "dme-enable: error code %d\n", ret); 364 return ret; 365 } 366 367 int ufshcd_dme_reset(struct ufs_hba *hba) 368 { 369 struct uic_command uic_cmd = {0}; 370 int ret; 371 372 uic_cmd.command = UIC_CMD_DME_RESET; 373 374 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 375 if (ret) 376 dev_err(hba->dev, 377 "dme-reset: error code %d\n", ret); 378 return ret; 379 } 380 381 /** 382 * ufshcd_disable_intr_aggr - Disables interrupt aggregation. 383 * 384 */ 385 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) 386 { 387 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 388 } 389 390 /** 391 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY 392 */ 393 static inline int ufshcd_get_lists_status(u32 reg) 394 { 395 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); 396 } 397 398 /** 399 * ufshcd_enable_run_stop_reg - Enable run-stop registers, 400 * When run-stop registers are set to 1, it indicates the 401 * host controller that it can process the requests 402 */ 403 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) 404 { 405 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, 406 REG_UTP_TASK_REQ_LIST_RUN_STOP); 407 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, 408 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); 409 } 410 411 /** 412 * ufshcd_enable_intr - enable interrupts 413 */ 414 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) 415 { 416 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); 417 u32 rw; 418 419 if (hba->version == UFSHCI_VERSION_10) { 420 rw = set & INTERRUPT_MASK_RW_VER_10; 421 set = rw | ((set ^ intrs) & intrs); 422 } else { 423 set |= intrs; 424 } 425 426 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); 427 428 hba->intr_mask = set; 429 } 430 431 /** 432 * ufshcd_make_hba_operational - Make UFS controller operational 433 * 434 * To bring UFS host controller to operational state, 435 * 1. Enable required interrupts 436 * 2. Configure interrupt aggregation 437 * 3. Program UTRL and UTMRL base address 438 * 4. Configure run-stop-registers 439 * 440 */ 441 static int ufshcd_make_hba_operational(struct ufs_hba *hba) 442 { 443 int err = 0; 444 u32 reg; 445 446 /* Enable required interrupts */ 447 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); 448 449 /* Disable interrupt aggregation */ 450 ufshcd_disable_intr_aggr(hba); 451 452 /* Configure UTRL and UTMRL base address registers */ 453 ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl), 454 REG_UTP_TRANSFER_REQ_LIST_BASE_L); 455 ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl), 456 REG_UTP_TRANSFER_REQ_LIST_BASE_H); 457 ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl), 458 REG_UTP_TASK_REQ_LIST_BASE_L); 459 ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl), 460 REG_UTP_TASK_REQ_LIST_BASE_H); 461 462 /* 463 * UCRDY, UTMRLDY and UTRLRDY bits must be 1 464 */ 465 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); 466 if (!(ufshcd_get_lists_status(reg))) { 467 ufshcd_enable_run_stop_reg(hba); 468 } else { 469 dev_err(hba->dev, 470 "Host controller not ready to process requests\n"); 471 err = -EIO; 472 goto out; 473 } 474 475 out: 476 return err; 477 } 478 479 /** 480 * ufshcd_link_startup - Initialize unipro link startup 481 */ 482 static int ufshcd_link_startup(struct ufs_hba *hba) 483 { 484 int ret; 485 int retries = DME_LINKSTARTUP_RETRIES; 486 bool link_startup_again = true; 487 488 link_startup: 489 do { 490 ufshcd_ops_link_startup_notify(hba, PRE_CHANGE); 491 492 ret = ufshcd_dme_link_startup(hba); 493 494 /* check if device is detected by inter-connect layer */ 495 if (!ret && !ufshcd_is_device_present(hba)) { 496 dev_err(hba->dev, "%s: Device not present\n", __func__); 497 ret = -ENXIO; 498 goto out; 499 } 500 501 /* 502 * DME link lost indication is only received when link is up, 503 * but we can't be sure if the link is up until link startup 504 * succeeds. So reset the local Uni-Pro and try again. 505 */ 506 if (ret && ufshcd_hba_enable(hba)) 507 goto out; 508 } while (ret && retries--); 509 510 if (ret) 511 /* failed to get the link up... retire */ 512 goto out; 513 514 if (link_startup_again) { 515 link_startup_again = false; 516 retries = DME_LINKSTARTUP_RETRIES; 517 goto link_startup; 518 } 519 520 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ 521 ufshcd_init_pwr_info(hba); 522 523 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { 524 ret = ufshcd_disable_device_tx_lcc(hba); 525 if (ret) 526 goto out; 527 } 528 529 /* Include any host controller configuration via UIC commands */ 530 ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE); 531 if (ret) 532 goto out; 533 534 ret = ufshcd_make_hba_operational(hba); 535 out: 536 if (ret) 537 dev_err(hba->dev, "link startup failed %d\n", ret); 538 539 return ret; 540 } 541 542 /** 543 * ufshcd_hba_stop - Send controller to reset state 544 */ 545 static inline void ufshcd_hba_stop(struct ufs_hba *hba) 546 { 547 int err; 548 549 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); 550 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, 551 CONTROLLER_ENABLE, CONTROLLER_DISABLE, 552 10); 553 if (err) 554 dev_err(hba->dev, "%s: Controller disable failed\n", __func__); 555 } 556 557 /** 558 * ufshcd_is_hba_active - Get controller state 559 */ 560 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) 561 { 562 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE) 563 ? false : true; 564 } 565 566 /** 567 * ufshcd_hba_start - Start controller initialization sequence 568 */ 569 static inline void ufshcd_hba_start(struct ufs_hba *hba) 570 { 571 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); 572 } 573 574 /** 575 * ufshcd_hba_enable - initialize the controller 576 */ 577 static int ufshcd_hba_enable(struct ufs_hba *hba) 578 { 579 int retry; 580 581 if (!ufshcd_is_hba_active(hba)) 582 /* change controller state to "reset state" */ 583 ufshcd_hba_stop(hba); 584 585 ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE); 586 587 /* start controller initialization sequence */ 588 ufshcd_hba_start(hba); 589 590 /* 591 * To initialize a UFS host controller HCE bit must be set to 1. 592 * During initialization the HCE bit value changes from 1->0->1. 593 * When the host controller completes initialization sequence 594 * it sets the value of HCE bit to 1. The same HCE bit is read back 595 * to check if the controller has completed initialization sequence. 596 * So without this delay the value HCE = 1, set in the previous 597 * instruction might be read back. 598 * This delay can be changed based on the controller. 599 */ 600 mdelay(1); 601 602 /* wait for the host controller to complete initialization */ 603 retry = 10; 604 while (ufshcd_is_hba_active(hba)) { 605 if (retry) { 606 retry--; 607 } else { 608 dev_err(hba->dev, "Controller enable failed\n"); 609 return -EIO; 610 } 611 mdelay(5); 612 } 613 614 /* enable UIC related interrupts */ 615 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); 616 617 ufshcd_ops_hce_enable_notify(hba, POST_CHANGE); 618 619 return 0; 620 } 621 622 /** 623 * ufshcd_host_memory_configure - configure local reference block with 624 * memory offsets 625 */ 626 static void ufshcd_host_memory_configure(struct ufs_hba *hba) 627 { 628 struct utp_transfer_req_desc *utrdlp; 629 dma_addr_t cmd_desc_dma_addr; 630 u16 response_offset; 631 u16 prdt_offset; 632 633 utrdlp = hba->utrdl; 634 cmd_desc_dma_addr = (dma_addr_t)hba->ucdl; 635 636 utrdlp->command_desc_base_addr_lo = 637 cpu_to_le32(lower_32_bits(cmd_desc_dma_addr)); 638 utrdlp->command_desc_base_addr_hi = 639 cpu_to_le32(upper_32_bits(cmd_desc_dma_addr)); 640 641 response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu); 642 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); 643 644 utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2); 645 utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2); 646 utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); 647 648 hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl; 649 hba->ucd_rsp_ptr = 650 (struct utp_upiu_rsp *)&hba->ucdl->response_upiu; 651 hba->ucd_prdt_ptr = 652 (struct ufshcd_sg_entry *)&hba->ucdl->prd_table; 653 } 654 655 /** 656 * ufshcd_memory_alloc - allocate memory for host memory space data structures 657 */ 658 static int ufshcd_memory_alloc(struct ufs_hba *hba) 659 { 660 /* Allocate one Transfer Request Descriptor 661 * Should be aligned to 1k boundary. 662 */ 663 hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc)); 664 if (!hba->utrdl) { 665 dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n"); 666 return -ENOMEM; 667 } 668 669 /* Allocate one Command Descriptor 670 * Should be aligned to 1k boundary. 671 */ 672 hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc)); 673 if (!hba->ucdl) { 674 dev_err(hba->dev, "Command descriptor memory allocation failed\n"); 675 return -ENOMEM; 676 } 677 678 hba->dev_desc = memalign(ARCH_DMA_MINALIGN, sizeof(struct ufs_device_descriptor)); 679 if (!hba->dev_desc) { 680 dev_err(hba->dev, "memory allocation failed\n"); 681 return -ENOMEM; 682 } 683 684 #if defined(CONFIG_SUPPORT_USBPLUG) 685 hba->rc_desc = memalign(ARCH_DMA_MINALIGN, sizeof(struct ufs_configuration_descriptor)); 686 hba->wc_desc = memalign(ARCH_DMA_MINALIGN, sizeof(struct ufs_configuration_descriptor)); 687 hba->geo_desc = memalign(ARCH_DMA_MINALIGN, sizeof(struct ufs_geometry_descriptor)); 688 if (!hba->rc_desc || !hba->wc_desc || !hba->geo_desc) { 689 dev_err(hba->dev, "memory allocation failed\n"); 690 return -ENOMEM; 691 } 692 #endif 693 return 0; 694 } 695 696 /** 697 * ufshcd_get_intr_mask - Get the interrupt bit mask 698 */ 699 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) 700 { 701 u32 intr_mask = 0; 702 703 switch (hba->version) { 704 case UFSHCI_VERSION_10: 705 intr_mask = INTERRUPT_MASK_ALL_VER_10; 706 break; 707 case UFSHCI_VERSION_11: 708 case UFSHCI_VERSION_20: 709 intr_mask = INTERRUPT_MASK_ALL_VER_11; 710 break; 711 case UFSHCI_VERSION_21: 712 default: 713 intr_mask = INTERRUPT_MASK_ALL_VER_21; 714 break; 715 } 716 717 return intr_mask; 718 } 719 720 /** 721 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA 722 */ 723 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) 724 { 725 return ufshcd_readl(hba, REG_UFS_VERSION); 726 } 727 728 /** 729 * ufshcd_get_upmcrs - Get the power mode change request status 730 */ 731 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) 732 { 733 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; 734 } 735 736 /** 737 * ufshcd_cache_flush_and_invalidate - Flush and invalidate cache 738 * 739 * Flush and invalidate cache in aligned address..address+size range. 740 * The invalidation is in place to avoid stale data in cache. 741 */ 742 static void ufshcd_cache_flush_and_invalidate(void *addr, unsigned long size) 743 { 744 uintptr_t aaddr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1); 745 unsigned long asize = ALIGN(size, ARCH_DMA_MINALIGN); 746 747 flush_dcache_range(aaddr, aaddr + asize); 748 invalidate_dcache_range(aaddr, aaddr + asize); 749 } 750 751 /** 752 * ufshcd_prepare_req_desc_hdr() - Fills the requests header 753 * descriptor according to request 754 */ 755 static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc *req_desc, 756 u32 *upiu_flags, 757 enum dma_data_direction cmd_dir) 758 { 759 u32 data_direction; 760 u32 dword_0; 761 762 if (cmd_dir == DMA_FROM_DEVICE) { 763 data_direction = UTP_DEVICE_TO_HOST; 764 *upiu_flags = UPIU_CMD_FLAGS_READ; 765 } else if (cmd_dir == DMA_TO_DEVICE) { 766 data_direction = UTP_HOST_TO_DEVICE; 767 *upiu_flags = UPIU_CMD_FLAGS_WRITE; 768 } else { 769 data_direction = UTP_NO_DATA_TRANSFER; 770 *upiu_flags = UPIU_CMD_FLAGS_NONE; 771 } 772 773 dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET); 774 775 /* Enable Interrupt for command */ 776 dword_0 |= UTP_REQ_DESC_INT_CMD; 777 778 /* Transfer request descriptor header fields */ 779 req_desc->header.dword_0 = cpu_to_le32(dword_0); 780 /* dword_1 is reserved, hence it is set to 0 */ 781 req_desc->header.dword_1 = 0; 782 /* 783 * assigning invalid value for command status. Controller 784 * updates OCS on command completion, with the command 785 * status 786 */ 787 req_desc->header.dword_2 = 788 cpu_to_le32(OCS_INVALID_COMMAND_STATUS); 789 /* dword_3 is reserved, hence it is set to 0 */ 790 req_desc->header.dword_3 = 0; 791 792 req_desc->prd_table_length = 0; 793 794 ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc)); 795 } 796 797 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, 798 u32 upiu_flags) 799 { 800 struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; 801 struct ufs_query *query = &hba->dev_cmd.query; 802 u16 len = be16_to_cpu(query->request.upiu_req.length); 803 804 /* Query request header */ 805 ucd_req_ptr->header.dword_0 = 806 UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ, 807 upiu_flags, 0, TASK_TAG); 808 ucd_req_ptr->header.dword_1 = 809 UPIU_HEADER_DWORD(0, query->request.query_func, 810 0, 0); 811 812 /* Data segment length only need for WRITE_DESC */ 813 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) 814 ucd_req_ptr->header.dword_2 = 815 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len); 816 else 817 ucd_req_ptr->header.dword_2 = 0; 818 819 /* Copy the Query Request buffer as is */ 820 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE); 821 822 /* Copy the Descriptor */ 823 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) { 824 memcpy(ucd_req_ptr + 1, query->descriptor, len); 825 ufshcd_cache_flush_and_invalidate(ucd_req_ptr, 826 ALIGN(sizeof(*ucd_req_ptr) + len, ARCH_DMA_MINALIGN)); 827 } else { 828 ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr)); 829 } 830 831 memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 832 ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); 833 } 834 835 static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba) 836 { 837 struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; 838 839 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); 840 841 /* command descriptor fields */ 842 ucd_req_ptr->header.dword_0 = 843 UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, TASK_TAG); 844 /* clear rest of the fields of basic header */ 845 ucd_req_ptr->header.dword_1 = 0; 846 ucd_req_ptr->header.dword_2 = 0; 847 848 memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 849 850 ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr)); 851 ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); 852 } 853 854 /** 855 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU) 856 * for Device Management Purposes 857 */ 858 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, 859 enum dev_cmd_type cmd_type) 860 { 861 u32 upiu_flags; 862 int ret = 0; 863 struct utp_transfer_req_desc *req_desc = hba->utrdl; 864 865 hba->dev_cmd.type = cmd_type; 866 867 ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, DMA_NONE); 868 switch (cmd_type) { 869 case DEV_CMD_TYPE_QUERY: 870 ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags); 871 break; 872 case DEV_CMD_TYPE_NOP: 873 ufshcd_prepare_utp_nop_upiu(hba); 874 break; 875 default: 876 ret = -EINVAL; 877 } 878 879 return ret; 880 } 881 882 static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) 883 { 884 unsigned long start; 885 u32 intr_status; 886 u32 enabled_intr_status; 887 888 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); 889 890 start = get_timer(0); 891 do { 892 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 893 enabled_intr_status = intr_status & hba->intr_mask; 894 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); 895 896 if (get_timer(start) > QUERY_REQ_TIMEOUT) { 897 dev_err(hba->dev, 898 "Timedout waiting for UTP response\n"); 899 900 return -ETIMEDOUT; 901 } 902 903 if (enabled_intr_status & UFSHCD_ERROR_MASK) { 904 dev_err(hba->dev, "Error in status:%08x\n", 905 enabled_intr_status); 906 907 return -1; 908 } 909 } while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL)); 910 911 return 0; 912 } 913 914 /** 915 * ufshcd_get_req_rsp - returns the TR response transaction type 916 */ 917 static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) 918 { 919 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; 920 } 921 922 /** 923 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status 924 * 925 */ 926 static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba) 927 { 928 return le32_to_cpu(hba->utrdl->header.dword_2) & MASK_OCS; 929 } 930 931 static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) 932 { 933 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; 934 } 935 936 static int ufshcd_check_query_response(struct ufs_hba *hba) 937 { 938 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; 939 940 /* Get the UPIU response */ 941 query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >> 942 UPIU_RSP_CODE_OFFSET; 943 return query_res->response; 944 } 945 946 /** 947 * ufshcd_copy_query_response() - Copy the Query Response and the data 948 * descriptor 949 */ 950 static int ufshcd_copy_query_response(struct ufs_hba *hba) 951 { 952 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; 953 954 memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); 955 956 /* Get the descriptor */ 957 if (hba->dev_cmd.query.descriptor && 958 hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { 959 u8 *descp = (u8 *)hba->ucd_rsp_ptr + 960 GENERAL_UPIU_REQUEST_SIZE; 961 u16 resp_len; 962 u16 buf_len; 963 964 /* data segment length */ 965 resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) & 966 MASK_QUERY_DATA_SEG_LEN; 967 buf_len = 968 be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length); 969 if (likely(buf_len >= resp_len)) { 970 int size = ALIGN(GENERAL_UPIU_REQUEST_SIZE + resp_len, ARCH_DMA_MINALIGN); 971 972 invalidate_dcache_range((uintptr_t)hba->ucd_rsp_ptr, (uintptr_t)hba->ucd_rsp_ptr + size); 973 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); 974 } else { 975 dev_warn(hba->dev, 976 "%s: Response size is bigger than buffer", 977 __func__); 978 return -EINVAL; 979 } 980 } else if (hba->dev_cmd.query.descriptor && hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_ATTR) { 981 u8 *value = (u8 *)&query_res->upiu_res.value; 982 hba->dev_cmd.query.descriptor[0] = value[11]; 983 hba->dev_cmd.query.descriptor[1] = value[10]; 984 hba->dev_cmd.query.descriptor[2] = value[9]; 985 hba->dev_cmd.query.descriptor[3] = value[8]; 986 } 987 988 return 0; 989 } 990 991 /** 992 * ufshcd_exec_dev_cmd - API for sending device management requests 993 */ 994 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, 995 int timeout) 996 { 997 int err; 998 int resp; 999 1000 err = ufshcd_comp_devman_upiu(hba, cmd_type); 1001 if (err) 1002 return err; 1003 1004 err = ufshcd_send_command(hba, TASK_TAG); 1005 if (err) 1006 return err; 1007 1008 err = ufshcd_get_tr_ocs(hba); 1009 if (err) { 1010 dev_err(hba->dev, "Error in OCS:%d\n", err); 1011 return -EINVAL; 1012 } 1013 1014 resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); 1015 switch (resp) { 1016 case UPIU_TRANSACTION_NOP_IN: 1017 break; 1018 case UPIU_TRANSACTION_QUERY_RSP: 1019 err = ufshcd_check_query_response(hba); 1020 if (!err) 1021 err = ufshcd_copy_query_response(hba); 1022 break; 1023 case UPIU_TRANSACTION_REJECT_UPIU: 1024 /* TODO: handle Reject UPIU Response */ 1025 err = -EPERM; 1026 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", 1027 __func__); 1028 break; 1029 default: 1030 err = -EINVAL; 1031 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", 1032 __func__, resp); 1033 } 1034 1035 return err; 1036 } 1037 1038 /** 1039 * ufshcd_init_query() - init the query response and request parameters 1040 */ 1041 static inline void ufshcd_init_query(struct ufs_hba *hba, 1042 struct ufs_query_req **request, 1043 struct ufs_query_res **response, 1044 enum query_opcode opcode, 1045 u8 idn, u8 index, u8 selector) 1046 { 1047 *request = &hba->dev_cmd.query.request; 1048 *response = &hba->dev_cmd.query.response; 1049 memset(*request, 0, sizeof(struct ufs_query_req)); 1050 memset(*response, 0, sizeof(struct ufs_query_res)); 1051 (*request)->upiu_req.opcode = opcode; 1052 (*request)->upiu_req.idn = idn; 1053 (*request)->upiu_req.index = index; 1054 (*request)->upiu_req.selector = selector; 1055 } 1056 1057 /** 1058 * ufshcd_query_flag() - API function for sending flag query requests 1059 */ 1060 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, 1061 enum flag_idn idn, bool *flag_res) 1062 { 1063 struct ufs_query_req *request = NULL; 1064 struct ufs_query_res *response = NULL; 1065 int err, index = 0, selector = 0; 1066 int timeout = QUERY_REQ_TIMEOUT; 1067 1068 ufshcd_init_query(hba, &request, &response, opcode, idn, index, 1069 selector); 1070 1071 switch (opcode) { 1072 case UPIU_QUERY_OPCODE_SET_FLAG: 1073 case UPIU_QUERY_OPCODE_CLEAR_FLAG: 1074 case UPIU_QUERY_OPCODE_TOGGLE_FLAG: 1075 case UPIU_QUERY_OPCODE_WRITE_ATTR: 1076 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 1077 break; 1078 case UPIU_QUERY_OPCODE_READ_ATTR: 1079 case UPIU_QUERY_OPCODE_READ_FLAG: 1080 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 1081 if (!flag_res) { 1082 /* No dummy reads */ 1083 dev_err(hba->dev, "%s: Invalid argument for read request\n", 1084 __func__); 1085 err = -EINVAL; 1086 goto out; 1087 } 1088 break; 1089 default: 1090 dev_err(hba->dev, 1091 "%s: Expected query flag opcode but got = %d\n", 1092 __func__, opcode); 1093 err = -EINVAL; 1094 goto out; 1095 } 1096 1097 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); 1098 1099 if (err) { 1100 dev_err(hba->dev, 1101 "%s: Sending flag query for idn %d failed, err = %d\n", 1102 __func__, idn, err); 1103 goto out; 1104 } 1105 1106 if (flag_res) 1107 *flag_res = (be32_to_cpu(response->upiu_res.value) & 1108 MASK_QUERY_UPIU_FLAG_LOC) & 0x1; 1109 1110 out: 1111 return err; 1112 } 1113 1114 static int ufshcd_query_flag_retry(struct ufs_hba *hba, 1115 enum query_opcode opcode, 1116 enum flag_idn idn, bool *flag_res) 1117 { 1118 int ret; 1119 int retries; 1120 1121 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { 1122 ret = ufshcd_query_flag(hba, opcode, idn, flag_res); 1123 if (ret) 1124 dev_dbg(hba->dev, 1125 "%s: failed with error %d, retries %d\n", 1126 __func__, ret, retries); 1127 else 1128 break; 1129 } 1130 1131 if (ret) 1132 dev_err(hba->dev, 1133 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n", 1134 __func__, opcode, idn, ret, retries); 1135 return ret; 1136 } 1137 1138 static int __ufshcd_query_descriptor(struct ufs_hba *hba, 1139 enum query_opcode opcode, 1140 enum desc_idn idn, u8 index, u8 selector, 1141 u8 *desc_buf, int *buf_len) 1142 { 1143 struct ufs_query_req *request = NULL; 1144 struct ufs_query_res *response = NULL; 1145 int err; 1146 1147 if (!desc_buf) { 1148 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", 1149 __func__, opcode); 1150 err = -EINVAL; 1151 goto out; 1152 } 1153 1154 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { 1155 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", 1156 __func__, *buf_len); 1157 err = -EINVAL; 1158 goto out; 1159 } 1160 1161 ufshcd_init_query(hba, &request, &response, opcode, idn, index, selector); 1162 hba->dev_cmd.query.descriptor = desc_buf; 1163 request->upiu_req.length = cpu_to_be16(*buf_len); 1164 1165 switch (opcode) { 1166 case UPIU_QUERY_OPCODE_WRITE_ATTR: 1167 request->upiu_req.value = (desc_buf[0] << 24 | desc_buf[1] << 16 | desc_buf[2] << 8 | desc_buf[3]); 1168 case UPIU_QUERY_OPCODE_WRITE_DESC: 1169 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 1170 break; 1171 case UPIU_QUERY_OPCODE_READ_ATTR: 1172 case UPIU_QUERY_OPCODE_READ_DESC: 1173 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 1174 break; 1175 default: 1176 dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n", 1177 __func__, opcode); 1178 err = -EINVAL; 1179 goto out; 1180 } 1181 1182 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); 1183 1184 if (err) { 1185 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", 1186 __func__, opcode, idn, index, err); 1187 goto out; 1188 } 1189 1190 hba->dev_cmd.query.descriptor = NULL; 1191 *buf_len = be16_to_cpu(response->upiu_res.length); 1192 1193 out: 1194 return err; 1195 } 1196 1197 /** 1198 * ufshcd_query_descriptor_retry - API function for sending descriptor requests 1199 */ 1200 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode, 1201 enum desc_idn idn, u8 index, u8 selector, 1202 u8 *desc_buf, int *buf_len) 1203 { 1204 int err; 1205 int retries; 1206 1207 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { 1208 err = __ufshcd_query_descriptor(hba, opcode, idn, index, 1209 selector, desc_buf, buf_len); 1210 if (!err || err == -EINVAL) 1211 break; 1212 } 1213 1214 return err; 1215 } 1216 1217 /** 1218 * ufshcd_read_desc_length - read the specified descriptor length from header 1219 */ 1220 int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id, 1221 int desc_index, int *desc_length) 1222 { 1223 int ret; 1224 u8 header[QUERY_DESC_HDR_SIZE]; 1225 int header_len = QUERY_DESC_HDR_SIZE; 1226 1227 if (desc_id >= QUERY_DESC_IDN_MAX) 1228 return -EINVAL; 1229 1230 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, 1231 desc_id, desc_index, 0, header, 1232 &header_len); 1233 1234 if (ret) { 1235 dev_err(hba->dev, "%s: Failed to get descriptor header id %d\n", 1236 __func__, desc_id); 1237 return ret; 1238 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) { 1239 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch\n", 1240 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET], 1241 desc_id); 1242 ret = -EINVAL; 1243 } 1244 1245 *desc_length = header[QUERY_DESC_LENGTH_OFFSET]; 1246 1247 return ret; 1248 } 1249 1250 static void ufshcd_init_desc_sizes(struct ufs_hba *hba) 1251 { 1252 int err; 1253 1254 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0, 1255 &hba->desc_size.dev_desc); 1256 if (err) 1257 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; 1258 1259 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0, 1260 &hba->desc_size.pwr_desc); 1261 if (err) 1262 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; 1263 1264 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0, 1265 &hba->desc_size.interc_desc); 1266 if (err) 1267 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; 1268 1269 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0, 1270 &hba->desc_size.conf_desc); 1271 if (err) 1272 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; 1273 1274 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0, 1275 &hba->desc_size.unit_desc); 1276 if (err) 1277 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; 1278 1279 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0, 1280 &hba->desc_size.geom_desc); 1281 if (err) 1282 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; 1283 1284 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0, 1285 &hba->desc_size.hlth_desc); 1286 if (err) 1287 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; 1288 } 1289 1290 /** 1291 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length 1292 * 1293 */ 1294 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, 1295 int *desc_len) 1296 { 1297 switch (desc_id) { 1298 case QUERY_DESC_IDN_DEVICE: 1299 *desc_len = hba->desc_size.dev_desc; 1300 break; 1301 case QUERY_DESC_IDN_POWER: 1302 *desc_len = hba->desc_size.pwr_desc; 1303 break; 1304 case QUERY_DESC_IDN_GEOMETRY: 1305 *desc_len = hba->desc_size.geom_desc; 1306 break; 1307 case QUERY_DESC_IDN_CONFIGURATION: 1308 *desc_len = hba->desc_size.conf_desc; 1309 break; 1310 case QUERY_DESC_IDN_UNIT: 1311 *desc_len = hba->desc_size.unit_desc; 1312 break; 1313 case QUERY_DESC_IDN_INTERCONNECT: 1314 *desc_len = hba->desc_size.interc_desc; 1315 break; 1316 case QUERY_DESC_IDN_STRING: 1317 *desc_len = QUERY_DESC_MAX_SIZE; 1318 break; 1319 case QUERY_DESC_IDN_HEALTH: 1320 *desc_len = hba->desc_size.hlth_desc; 1321 break; 1322 case QUERY_DESC_IDN_RFU_0: 1323 case QUERY_DESC_IDN_RFU_1: 1324 *desc_len = 0; 1325 break; 1326 default: 1327 *desc_len = 0; 1328 return -EINVAL; 1329 } 1330 return 0; 1331 } 1332 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length); 1333 1334 /** 1335 * ufshcd_read_desc_param - read the specified descriptor parameter 1336 * 1337 */ 1338 int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, 1339 int desc_index, u8 param_offset, u8 *param_read_buf, 1340 u8 param_size) 1341 { 1342 int ret; 1343 u8 *desc_buf; 1344 int buff_len; 1345 bool is_kmalloc = true; 1346 1347 /* Safety check */ 1348 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) 1349 return -EINVAL; 1350 1351 /* Get the max length of descriptor from structure filled up at probe 1352 * time. 1353 */ 1354 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); 1355 1356 /* Sanity checks */ 1357 if (ret || !buff_len) { 1358 dev_err(hba->dev, "%s: Failed to get full descriptor length\n", 1359 __func__); 1360 return ret; 1361 } 1362 1363 /* Check whether we need temp memory */ 1364 if (param_offset != 0 || param_size < buff_len) { 1365 desc_buf = kmalloc(buff_len, GFP_KERNEL); 1366 if (!desc_buf) 1367 return -ENOMEM; 1368 } else { 1369 desc_buf = param_read_buf; 1370 is_kmalloc = false; 1371 } 1372 1373 /* Request for full descriptor */ 1374 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, 1375 desc_id, desc_index, 0, desc_buf, 1376 &buff_len); 1377 1378 if (ret) { 1379 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n", 1380 __func__, desc_id, desc_index, param_offset, ret); 1381 goto out; 1382 } 1383 1384 /* Sanity check */ 1385 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { 1386 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", 1387 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); 1388 ret = -EINVAL; 1389 goto out; 1390 } 1391 1392 /* Check wherher we will not copy more data, than available */ 1393 if (is_kmalloc && param_size > buff_len) 1394 param_size = buff_len; 1395 1396 if (is_kmalloc) 1397 memcpy(param_read_buf, &desc_buf[param_offset], param_size); 1398 out: 1399 if (is_kmalloc) 1400 kfree(desc_buf); 1401 return ret; 1402 } 1403 1404 /* replace non-printable or non-ASCII characters with spaces */ 1405 static inline void ufshcd_remove_non_printable(uint8_t *val) 1406 { 1407 if (!val) 1408 return; 1409 1410 if (*val < 0x20 || *val > 0x7e) 1411 *val = ' '; 1412 } 1413 1414 /** 1415 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power 1416 * state) and waits for it to take effect. 1417 * 1418 */ 1419 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) 1420 { 1421 unsigned long start = 0; 1422 u8 status; 1423 int ret; 1424 1425 ret = ufshcd_send_uic_cmd(hba, cmd); 1426 if (ret) { 1427 dev_err(hba->dev, 1428 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", 1429 cmd->command, cmd->argument3, ret); 1430 1431 return ret; 1432 } 1433 1434 start = get_timer(0); 1435 do { 1436 status = ufshcd_get_upmcrs(hba); 1437 if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { 1438 dev_err(hba->dev, 1439 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", 1440 cmd->command, status); 1441 ret = (status != PWR_OK) ? status : -1; 1442 break; 1443 } 1444 } while (status != PWR_LOCAL); 1445 1446 return ret; 1447 } 1448 1449 /** 1450 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change 1451 * using DME_SET primitives. 1452 */ 1453 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) 1454 { 1455 struct uic_command uic_cmd = {0}; 1456 int ret; 1457 1458 uic_cmd.command = UIC_CMD_DME_SET; 1459 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); 1460 uic_cmd.argument3 = mode; 1461 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 1462 1463 return ret; 1464 } 1465 1466 static 1467 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba, 1468 struct scsi_cmd *pccb, u32 upiu_flags) 1469 { 1470 struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; 1471 unsigned int cdb_len; 1472 1473 /* command descriptor fields */ 1474 ucd_req_ptr->header.dword_0 = 1475 UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags, 1476 pccb->lun, TASK_TAG); 1477 ucd_req_ptr->header.dword_1 = 1478 UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); 1479 1480 /* Total EHS length and Data segment length will be zero */ 1481 ucd_req_ptr->header.dword_2 = 0; 1482 1483 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen); 1484 1485 cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE); 1486 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); 1487 memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len); 1488 1489 memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 1490 ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr)); 1491 ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); 1492 } 1493 1494 static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry, 1495 unsigned char *buf, ulong len) 1496 { 1497 entry->size = cpu_to_le32(len) | GENMASK(1, 0); 1498 entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf)); 1499 entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf)); 1500 } 1501 1502 static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb) 1503 { 1504 struct utp_transfer_req_desc *req_desc = hba->utrdl; 1505 struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr; 1506 uintptr_t aaddr = (uintptr_t)(pccb->pdata) & ~(ARCH_DMA_MINALIGN - 1); 1507 ulong datalen = pccb->datalen; 1508 int table_length; 1509 u8 *buf; 1510 int i; 1511 1512 if (!datalen) { 1513 req_desc->prd_table_length = 0; 1514 ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc)); 1515 return; 1516 } 1517 1518 if (pccb->dma_dir == DMA_TO_DEVICE) { /* Write to device */ 1519 flush_dcache_range(aaddr, ALIGN(aaddr + datalen + ARCH_DMA_MINALIGN - 1, ARCH_DMA_MINALIGN)); 1520 } 1521 1522 /* In any case, invalidate cache to avoid stale data in it. */ 1523 invalidate_dcache_range(aaddr, ALIGN(aaddr + datalen + ARCH_DMA_MINALIGN - 1, ARCH_DMA_MINALIGN)); 1524 1525 table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY); 1526 buf = pccb->pdata; 1527 i = table_length; 1528 while (--i) { 1529 prepare_prdt_desc(&prd_table[table_length - i - 1], buf, 1530 MAX_PRDT_ENTRY - 1); 1531 buf += MAX_PRDT_ENTRY; 1532 datalen -= MAX_PRDT_ENTRY; 1533 } 1534 1535 prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1); 1536 1537 req_desc->prd_table_length = table_length; 1538 ufshcd_cache_flush_and_invalidate(prd_table, sizeof(*prd_table) * table_length); 1539 ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc)); 1540 } 1541 1542 static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb) 1543 { 1544 struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent); 1545 struct utp_transfer_req_desc *req_desc = hba->utrdl; 1546 u32 upiu_flags; 1547 int ocs, result = 0; 1548 u8 scsi_status; 1549 1550 /* cmd do not set lun for ufs 2.1 */ 1551 if (hba->dev_desc->w_spec_version == 0x1002) /* verison 0x210 in big end */ 1552 pccb->cmd[1] &= 0x1F; 1553 1554 ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, pccb->dma_dir); 1555 ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags); 1556 prepare_prdt_table(hba, pccb); 1557 1558 ufshcd_send_command(hba, TASK_TAG); 1559 1560 ocs = ufshcd_get_tr_ocs(hba); 1561 switch (ocs) { 1562 case OCS_SUCCESS: 1563 result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); 1564 switch (result) { 1565 case UPIU_TRANSACTION_RESPONSE: 1566 result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr); 1567 1568 scsi_status = result & MASK_SCSI_STATUS; 1569 if (scsi_status) 1570 return -EINVAL; 1571 1572 break; 1573 case UPIU_TRANSACTION_REJECT_UPIU: 1574 /* TODO: handle Reject UPIU Response */ 1575 dev_err(hba->dev, 1576 "Reject UPIU not fully implemented\n"); 1577 return -EINVAL; 1578 default: 1579 dev_err(hba->dev, 1580 "Unexpected request response code = %x\n", 1581 result); 1582 return -EINVAL; 1583 } 1584 break; 1585 default: 1586 dev_err(hba->dev, "OCS error from controller = %x\n", ocs); 1587 return -EINVAL; 1588 } 1589 1590 return 0; 1591 } 1592 1593 static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id, 1594 int desc_index, u8 *buf, u32 size) 1595 { 1596 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); 1597 } 1598 1599 static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) 1600 { 1601 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); 1602 } 1603 1604 /** 1605 * ufshcd_read_string_desc - read string descriptor 1606 * 1607 */ 1608 int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, 1609 u8 *buf, u32 size, bool ascii) 1610 { 1611 int err = 0; 1612 1613 err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf, 1614 size); 1615 1616 if (err) { 1617 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", 1618 __func__, QUERY_REQ_RETRIES, err); 1619 goto out; 1620 } 1621 1622 if (ascii) { 1623 int desc_len; 1624 int ascii_len; 1625 int i; 1626 u8 *buff_ascii; 1627 1628 desc_len = buf[0]; 1629 /* remove header and divide by 2 to move from UTF16 to UTF8 */ 1630 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; 1631 if (size < ascii_len + QUERY_DESC_HDR_SIZE) { 1632 dev_err(hba->dev, "%s: buffer allocated size is too small\n", 1633 __func__); 1634 err = -ENOMEM; 1635 goto out; 1636 } 1637 1638 buff_ascii = kmalloc(ALIGN(ascii_len, ARCH_DMA_MINALIGN), GFP_KERNEL); 1639 if (!buff_ascii) { 1640 err = -ENOMEM; 1641 goto out; 1642 } 1643 1644 /* 1645 * the descriptor contains string in UTF16 format 1646 * we need to convert to utf-8 so it can be displayed 1647 */ 1648 utf16_to_utf8(buff_ascii, 1649 (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len); 1650 1651 /* replace non-printable or non-ASCII characters with spaces */ 1652 for (i = 0; i < ascii_len; i++) 1653 ufshcd_remove_non_printable(&buff_ascii[i]); 1654 1655 memset(buf + QUERY_DESC_HDR_SIZE, 0, 1656 size - QUERY_DESC_HDR_SIZE); 1657 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); 1658 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; 1659 kfree(buff_ascii); 1660 } 1661 out: 1662 return err; 1663 } 1664 1665 static int ufs_get_device_desc(struct ufs_hba *hba, struct ufs_device_descriptor *dev_desc) 1666 { 1667 int err; 1668 size_t buff_len; 1669 1670 buff_len = sizeof(*dev_desc); 1671 if (buff_len > hba->desc_size.dev_desc) 1672 buff_len = hba->desc_size.dev_desc; 1673 1674 err = ufshcd_read_device_desc(hba, (u8 *)dev_desc, buff_len); 1675 if (err) 1676 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", 1677 __func__, err); 1678 1679 return err; 1680 } 1681 1682 /** 1683 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device 1684 */ 1685 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) 1686 { 1687 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; 1688 1689 if (hba->max_pwr_info.is_valid) 1690 return 0; 1691 1692 pwr_info->pwr_tx = FAST_MODE; 1693 pwr_info->pwr_rx = FAST_MODE; 1694 pwr_info->hs_rate = PA_HS_MODE_B; 1695 1696 /* Get the connected lane count */ 1697 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), 1698 &pwr_info->lane_rx); 1699 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 1700 &pwr_info->lane_tx); 1701 1702 if (!pwr_info->lane_rx || !pwr_info->lane_tx) { 1703 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", 1704 __func__, pwr_info->lane_rx, pwr_info->lane_tx); 1705 return -EINVAL; 1706 } 1707 1708 /* 1709 * First, get the maximum gears of HS speed. 1710 * If a zero value, it means there is no HSGEAR capability. 1711 * Then, get the maximum gears of PWM speed. 1712 */ 1713 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); 1714 if (!pwr_info->gear_rx) { 1715 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), 1716 &pwr_info->gear_rx); 1717 if (!pwr_info->gear_rx) { 1718 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", 1719 __func__, pwr_info->gear_rx); 1720 return -EINVAL; 1721 } 1722 pwr_info->pwr_rx = SLOW_MODE; 1723 } 1724 1725 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), 1726 &pwr_info->gear_tx); 1727 if (!pwr_info->gear_tx) { 1728 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), 1729 &pwr_info->gear_tx); 1730 if (!pwr_info->gear_tx) { 1731 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", 1732 __func__, pwr_info->gear_tx); 1733 return -EINVAL; 1734 } 1735 pwr_info->pwr_tx = SLOW_MODE; 1736 } 1737 1738 hba->max_pwr_info.is_valid = true; 1739 return 0; 1740 } 1741 1742 static int ufshcd_change_power_mode(struct ufs_hba *hba, 1743 struct ufs_pa_layer_attr *pwr_mode) 1744 { 1745 int ret; 1746 1747 /* if already configured to the requested pwr_mode */ 1748 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && 1749 pwr_mode->gear_tx == hba->pwr_info.gear_tx && 1750 pwr_mode->lane_rx == hba->pwr_info.lane_rx && 1751 pwr_mode->lane_tx == hba->pwr_info.lane_tx && 1752 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && 1753 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && 1754 pwr_mode->hs_rate == hba->pwr_info.hs_rate) { 1755 dev_dbg(hba->dev, "%s: power already configured\n", __func__); 1756 return 0; 1757 } 1758 1759 /* 1760 * Configure attributes for power mode change with below. 1761 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, 1762 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, 1763 * - PA_HSSERIES 1764 */ 1765 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); 1766 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), 1767 pwr_mode->lane_rx); 1768 if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE) 1769 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); 1770 else 1771 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); 1772 1773 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); 1774 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), 1775 pwr_mode->lane_tx); 1776 if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE) 1777 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); 1778 else 1779 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); 1780 1781 if (pwr_mode->pwr_rx == FASTAUTO_MODE || 1782 pwr_mode->pwr_tx == FASTAUTO_MODE || 1783 pwr_mode->pwr_rx == FAST_MODE || 1784 pwr_mode->pwr_tx == FAST_MODE) 1785 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), 1786 pwr_mode->hs_rate); 1787 1788 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 | 1789 pwr_mode->pwr_tx); 1790 1791 if (ret) { 1792 dev_err(hba->dev, 1793 "%s: power mode change failed %d\n", __func__, ret); 1794 1795 return ret; 1796 } 1797 1798 /* Copy new Power Mode to power info */ 1799 memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr)); 1800 1801 return ret; 1802 } 1803 1804 /** 1805 * ufshcd_verify_dev_init() - Verify device initialization 1806 * 1807 */ 1808 static int ufshcd_verify_dev_init(struct ufs_hba *hba) 1809 { 1810 int retries; 1811 int err; 1812 1813 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { 1814 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, 1815 NOP_OUT_TIMEOUT); 1816 if (!err || err == -ETIMEDOUT) 1817 break; 1818 1819 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); 1820 } 1821 1822 if (err) 1823 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); 1824 1825 return err; 1826 } 1827 1828 /** 1829 * ufshcd_complete_dev_init() - checks device readiness 1830 */ 1831 static int ufshcd_complete_dev_init(struct ufs_hba *hba) 1832 { 1833 int i; 1834 int err; 1835 bool flag_res = 1; 1836 1837 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, 1838 QUERY_FLAG_IDN_FDEVICEINIT, NULL); 1839 if (err) { 1840 dev_err(hba->dev, 1841 "%s setting fDeviceInit flag failed with error %d\n", 1842 __func__, err); 1843 goto out; 1844 } 1845 1846 /* poll for max. 1000 iterations for fDeviceInit flag to clear */ 1847 for (i = 0; i < 1000 && !err && flag_res; i++) 1848 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, 1849 QUERY_FLAG_IDN_FDEVICEINIT, 1850 &flag_res); 1851 1852 if (err) 1853 dev_err(hba->dev, 1854 "%s reading fDeviceInit flag failed with error %d\n", 1855 __func__, err); 1856 else if (flag_res) 1857 dev_err(hba->dev, 1858 "%s fDeviceInit was not cleared by the device\n", 1859 __func__); 1860 1861 out: 1862 return err; 1863 } 1864 1865 static void ufshcd_def_desc_sizes(struct ufs_hba *hba) 1866 { 1867 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; 1868 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; 1869 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; 1870 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; 1871 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; 1872 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; 1873 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; 1874 } 1875 1876 int _ufs_start(struct ufs_hba *hba) 1877 { 1878 int ret; 1879 1880 ret = ufshcd_link_startup(hba); 1881 if (ret) 1882 return ret; 1883 1884 ret = ufshcd_verify_dev_init(hba); 1885 if (ret) 1886 return ret; 1887 1888 ret = ufshcd_complete_dev_init(hba); 1889 if (ret) 1890 return ret; 1891 1892 /* Init check for device descriptor sizes */ 1893 ufshcd_init_desc_sizes(hba); 1894 1895 ret = ufs_get_device_desc(hba, hba->dev_desc); 1896 if (ret) { 1897 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", 1898 __func__, ret); 1899 1900 return ret; 1901 } 1902 1903 return ret; 1904 } 1905 1906 int ufs_start(struct ufs_hba *hba) 1907 { 1908 int ret; 1909 1910 ret = _ufs_start(hba); 1911 if (ret) 1912 return ret; 1913 1914 #if defined(CONFIG_SUPPORT_USBPLUG) 1915 ret = ufs_create_partition_inventory(hba); 1916 if (ret) { 1917 dev_err(hba->dev, "%s: Failed to creat partition. err = %d\n", __func__, ret); 1918 return ret; 1919 } 1920 #endif 1921 if (ufshcd_get_max_pwr_mode(hba)) { 1922 dev_err(hba->dev, 1923 "%s: Failed getting max supported power mode\n", 1924 __func__); 1925 } else { 1926 ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info); 1927 if (ret) { 1928 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", 1929 __func__, ret); 1930 1931 return ret; 1932 } 1933 1934 printf("Device at %s up at:", hba->dev->name); 1935 ufshcd_print_pwr_info(hba); 1936 } 1937 1938 return 0; 1939 } 1940 1941 int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops) 1942 { 1943 struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev); 1944 struct scsi_platdata *scsi_plat; 1945 struct udevice *scsi_dev; 1946 int err; 1947 1948 device_find_first_child(ufs_dev, &scsi_dev); 1949 if (!scsi_dev) 1950 return -ENODEV; 1951 1952 scsi_plat = dev_get_uclass_platdata(scsi_dev); 1953 scsi_plat->max_id = UFSHCD_MAX_ID; 1954 scsi_plat->max_lun = UFS_MAX_LUNS; 1955 //scsi_plat->max_bytes_per_req = UFS_MAX_BYTES; 1956 1957 hba->dev = ufs_dev; 1958 hba->ops = hba_ops; 1959 hba->mmio_base = (void *)dev_read_addr(ufs_dev); 1960 1961 /* Set descriptor lengths to specification defaults */ 1962 ufshcd_def_desc_sizes(hba); 1963 1964 ufshcd_ops_init(hba); 1965 1966 /* Read capabilties registers */ 1967 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); 1968 1969 /* Get UFS version supported by the controller */ 1970 hba->version = ufshcd_get_ufs_version(hba); 1971 if (hba->version != UFSHCI_VERSION_10 && 1972 hba->version != UFSHCI_VERSION_11 && 1973 hba->version != UFSHCI_VERSION_20 && 1974 hba->version != UFSHCI_VERSION_21) 1975 dev_err(hba->dev, "invalid UFS version 0x%x\n", 1976 hba->version); 1977 1978 /* Get Interrupt bit mask per version */ 1979 hba->intr_mask = ufshcd_get_intr_mask(hba); 1980 1981 /* Allocate memory for host memory space */ 1982 err = ufshcd_memory_alloc(hba); 1983 if (err) { 1984 dev_err(hba->dev, "Memory allocation failed\n"); 1985 return err; 1986 } 1987 1988 /* Configure Local data structures */ 1989 ufshcd_host_memory_configure(hba); 1990 1991 /* 1992 * In order to avoid any spurious interrupt immediately after 1993 * registering UFS controller interrupt handler, clear any pending UFS 1994 * interrupt status and disable all the UFS interrupts. 1995 */ 1996 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), 1997 REG_INTERRUPT_STATUS); 1998 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); 1999 2000 err = ufshcd_hba_enable(hba); 2001 if (err) { 2002 dev_err(hba->dev, "Host controller enable failed\n"); 2003 return err; 2004 } 2005 2006 err = ufs_start(hba); 2007 if (err) 2008 return err; 2009 2010 return 0; 2011 } 2012 2013 int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp) 2014 { 2015 int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi", 2016 scsi_devp); 2017 2018 return ret; 2019 } 2020 2021 static struct scsi_ops ufs_ops = { 2022 .exec = ufs_scsi_exec, 2023 }; 2024 2025 int ufs_probe_dev(int index) 2026 { 2027 struct udevice *dev; 2028 2029 return uclass_get_device(UCLASS_UFS, index, &dev); 2030 } 2031 2032 int ufs_probe(void) 2033 { 2034 struct udevice *dev; 2035 int ret, i; 2036 2037 for (i = 0;; i++) { 2038 ret = uclass_get_device(UCLASS_UFS, i, &dev); 2039 if (ret == -ENODEV) 2040 break; 2041 } 2042 2043 return 0; 2044 } 2045 2046 U_BOOT_DRIVER(ufs_scsi) = { 2047 .id = UCLASS_SCSI, 2048 .name = "ufs_scsi", 2049 .ops = &ufs_ops, 2050 }; 2051