Lines Matching +full:ufs +full:- +full:2

1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
17 #include <linux/blk-pm.h>
20 #include "ufshcd-add-info.h"
23 #include "ufs-sysfs.h"
24 #include "ufs-debugfs.h"
26 #include "ufshcd-crypto.h"
32 #include <trace/events/ufs.h>
59 /* maximum number of link-startup retries */
110 return -EINVAL; in ufshcd_dump_regs()
114 return -ENOMEM; in ufshcd_dump_regs()
135 UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
136 UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
157 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
165 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
167 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
169 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
209 /* UFS cards deviations table */
263 return tag >= 0 && tag < hba->nutrs; in ufshcd_valid_tag()
268 if (!hba->is_irq_enabled) { in ufshcd_enable_irq()
269 enable_irq(hba->irq); in ufshcd_enable_irq()
270 hba->is_irq_enabled = true; in ufshcd_enable_irq()
276 if (hba->is_irq_enabled) { in ufshcd_disable_irq()
277 disable_irq(hba->irq); in ufshcd_disable_irq()
278 hba->is_irq_enabled = false; in ufshcd_disable_irq()
291 dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret); in ufshcd_wb_config()
293 dev_info(hba->dev, "%s: Write Booster Configured\n", __func__); in ufshcd_wb_config()
296 dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n", in ufshcd_wb_config()
298 if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)) in ufshcd_wb_config()
304 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) in ufshcd_scsi_unblock_requests()
305 scsi_unblock_requests(hba->host); in ufshcd_scsi_unblock_requests()
310 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) in ufshcd_scsi_block_requests()
311 scsi_block_requests(hba->host); in ufshcd_scsi_block_requests()
317 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; in ufshcd_add_cmd_upiu_trace()
319 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb); in ufshcd_add_cmd_upiu_trace()
325 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; in ufshcd_add_query_upiu_trace()
327 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr); in ufshcd_add_query_upiu_trace()
333 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag]; in ufshcd_add_tm_upiu_trace()
336 trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header, in ufshcd_add_tm_upiu_trace()
337 &descp->input_param1); in ufshcd_add_tm_upiu_trace()
352 cmd = ucmd->command; in ufshcd_add_uic_command_trace()
356 trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd, in ufshcd_add_uic_command_trace()
365 sector_t lba = -1; in ufshcd_add_command_trace()
368 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_add_command_trace()
369 struct scsi_cmnd *cmd = lrbp->cmd; in ufshcd_add_command_trace()
370 int transfer_len = -1; in ufshcd_add_command_trace()
382 opcode = cmd->cmnd[0]; in ufshcd_add_command_trace()
388 if (cmd->request && cmd->request->bio) in ufshcd_add_command_trace()
389 lba = cmd->request->bio->bi_iter.bi_sector; in ufshcd_add_command_trace()
391 lrbp->ucd_req_ptr->sc.exp_data_transfer_len); in ufshcd_add_command_trace()
393 group_id = lrbp->cmd->cmnd[6]; in ufshcd_add_command_trace()
395 if (cmd->request) { in ufshcd_add_command_trace()
397 transfer_len = blk_rq_bytes(cmd->request); in ufshcd_add_command_trace()
404 trace_ufshcd_command(dev_name(hba->dev), str, tag, in ufshcd_add_command_trace()
411 struct list_head *head = &hba->clk_list_head; in ufshcd_print_clk_freqs()
417 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq && in ufshcd_print_clk_freqs()
418 clki->max_freq) in ufshcd_print_clk_freqs()
419 dev_err(hba->dev, "clk: %s, rate: %u\n", in ufshcd_print_clk_freqs()
420 clki->name, clki->curr_freq); in ufshcd_print_clk_freqs()
434 e = &hba->ufs_stats.event[id]; in ufshcd_print_evt()
437 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH; in ufshcd_print_evt()
439 if (e->tstamp[p] == 0) in ufshcd_print_evt()
441 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, in ufshcd_print_evt()
442 e->val[p], ktime_to_us(e->tstamp[p])); in ufshcd_print_evt()
447 dev_err(hba->dev, "No record of %s\n", err_name); in ufshcd_print_evt()
481 for_each_set_bit(tag, &bitmap, hba->nutrs) { in ufshcd_print_trs()
482 lrbp = &hba->lrb[tag]; in ufshcd_print_trs()
484 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", in ufshcd_print_trs()
485 tag, ktime_to_us(lrbp->issue_time_stamp)); in ufshcd_print_trs()
486 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", in ufshcd_print_trs()
487 tag, ktime_to_us(lrbp->compl_time_stamp)); in ufshcd_print_trs()
488 dev_err(hba->dev, in ufshcd_print_trs()
489 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n", in ufshcd_print_trs()
490 tag, (u64)lrbp->utrd_dma_addr); in ufshcd_print_trs()
492 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr, in ufshcd_print_trs()
494 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, in ufshcd_print_trs()
495 (u64)lrbp->ucd_req_dma_addr); in ufshcd_print_trs()
496 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr, in ufshcd_print_trs()
498 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, in ufshcd_print_trs()
499 (u64)lrbp->ucd_rsp_dma_addr); in ufshcd_print_trs()
500 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr, in ufshcd_print_trs()
504 lrbp->utr_descriptor_ptr->prd_table_length); in ufshcd_print_trs()
505 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) in ufshcd_print_trs()
506 prdt_length /= hba->sg_entry_size; in ufshcd_print_trs()
508 dev_err(hba->dev, in ufshcd_print_trs()
509 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n", in ufshcd_print_trs()
511 (u64)lrbp->ucd_prdt_dma_addr); in ufshcd_print_trs()
514 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr, in ufshcd_print_trs()
515 hba->sg_entry_size * prdt_length); in ufshcd_print_trs()
523 for_each_set_bit(tag, &bitmap, hba->nutmrs) { in ufshcd_print_tmrs()
524 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag]; in ufshcd_print_tmrs()
526 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); in ufshcd_print_tmrs()
533 struct scsi_device *sdev_ufs = hba->sdev_ufs_device; in ufshcd_print_host_state()
535 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); in ufshcd_print_host_state()
536 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", in ufshcd_print_host_state()
537 hba->outstanding_reqs, hba->outstanding_tasks); in ufshcd_print_host_state()
538 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", in ufshcd_print_host_state()
539 hba->saved_err, hba->saved_uic_err); in ufshcd_print_host_state()
540 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", in ufshcd_print_host_state()
541 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_print_host_state()
542 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n", in ufshcd_print_host_state()
543 hba->pm_op_in_progress, hba->is_sys_suspended); in ufshcd_print_host_state()
544 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n", in ufshcd_print_host_state()
545 hba->auto_bkops_enabled, hba->host->host_self_blocked); in ufshcd_print_host_state()
546 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state); in ufshcd_print_host_state()
547 dev_err(hba->dev, in ufshcd_print_host_state()
549 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp), in ufshcd_print_host_state()
550 hba->ufs_stats.hibern8_exit_cnt); in ufshcd_print_host_state()
551 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", in ufshcd_print_host_state()
552 ktime_to_us(hba->ufs_stats.last_intr_ts), in ufshcd_print_host_state()
553 hba->ufs_stats.last_intr_status); in ufshcd_print_host_state()
554 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", in ufshcd_print_host_state()
555 hba->eh_flags, hba->req_abort_count); in ufshcd_print_host_state()
556 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", in ufshcd_print_host_state()
557 hba->ufs_version, hba->capabilities, hba->caps); in ufshcd_print_host_state()
558 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, in ufshcd_print_host_state()
559 hba->dev_quirks); in ufshcd_print_host_state()
561 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n", in ufshcd_print_host_state()
562 sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev); in ufshcd_print_host_state()
568 * ufshcd_print_pwr_info - print power params as saved in hba
570 * @hba: per-adapter instance
584 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", in ufshcd_print_pwr_info()
586 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, in ufshcd_print_pwr_info()
587 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, in ufshcd_print_pwr_info()
588 names[hba->pwr_info.pwr_rx], in ufshcd_print_pwr_info()
589 names[hba->pwr_info.pwr_tx], in ufshcd_print_pwr_info()
590 hba->pwr_info.hs_rate); in ufshcd_print_pwr_info()
606 * ufshcd_wait_for_register - wait for register value to change
607 * @hba: per-adapter interface
615 * -ETIMEDOUT on error, zero on success.
631 err = -ETIMEDOUT; in ufshcd_wait_for_register()
640 * ufshcd_get_intr_mask - Get the interrupt bit mask
647 if (hba->ufs_version == ufshci_version(1, 0)) in ufshcd_get_intr_mask()
649 if (hba->ufs_version <= ufshci_version(2, 0)) in ufshcd_get_intr_mask()
656 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
665 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) in ufshcd_get_ufs_version()
673 * function, we convert it to the same scheme as ufs 2.0+. in ufshcd_get_ufs_version()
682 * ufshcd_is_device_present - Check if any device connected to
695 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
703 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; in ufshcd_get_tr_ocs()
707 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
713 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) in ufshcd_utrl_clear()
721 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
727 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) in ufshcd_utmrl_clear()
734 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
740 clear_bit(tag, &hba->outstanding_reqs); in ufshcd_outstanding_req_clear()
744 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
755 * ufshcd_get_uic_cmd_result - Get the UIC command result
768 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
780 * ufshcd_get_req_rsp - returns the TR response transaction type
786 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; in ufshcd_get_req_rsp()
790 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
799 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; in ufshcd_get_rsp_upiu_result()
803 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
812 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & in ufshcd_get_rsp_upiu_data_seg_len()
817 * ufshcd_is_exception_event - Check if the device raised an exception event
827 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & in ufshcd_is_exception_event()
832 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
844 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
859 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
868 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
869 * When run-stop registers are set to 1, it indicates the
882 * ufshcd_hba_start - Start controller initialization sequence
896 * ufshcd_is_hba_active - Get controller state
910 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_get_local_unipro_ver()
935 * ufshcd_set_clk_freq - set UFS controller clock frequencies
946 struct list_head *head = &hba->clk_list_head; in ufshcd_set_clk_freq()
952 if (!IS_ERR_OR_NULL(clki->clk)) { in ufshcd_set_clk_freq()
953 if (scale_up && clki->max_freq) { in ufshcd_set_clk_freq()
954 if (clki->curr_freq == clki->max_freq) in ufshcd_set_clk_freq()
957 ret = clk_set_rate(clki->clk, clki->max_freq); in ufshcd_set_clk_freq()
959 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_set_clk_freq()
960 __func__, clki->name, in ufshcd_set_clk_freq()
961 clki->max_freq, ret); in ufshcd_set_clk_freq()
964 trace_ufshcd_clk_scaling(dev_name(hba->dev), in ufshcd_set_clk_freq()
965 "scaled up", clki->name, in ufshcd_set_clk_freq()
966 clki->curr_freq, in ufshcd_set_clk_freq()
967 clki->max_freq); in ufshcd_set_clk_freq()
969 clki->curr_freq = clki->max_freq; in ufshcd_set_clk_freq()
971 } else if (!scale_up && clki->min_freq) { in ufshcd_set_clk_freq()
972 if (clki->curr_freq == clki->min_freq) in ufshcd_set_clk_freq()
975 ret = clk_set_rate(clki->clk, clki->min_freq); in ufshcd_set_clk_freq()
977 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_set_clk_freq()
978 __func__, clki->name, in ufshcd_set_clk_freq()
979 clki->min_freq, ret); in ufshcd_set_clk_freq()
982 trace_ufshcd_clk_scaling(dev_name(hba->dev), in ufshcd_set_clk_freq()
983 "scaled down", clki->name, in ufshcd_set_clk_freq()
984 clki->curr_freq, in ufshcd_set_clk_freq()
985 clki->min_freq); in ufshcd_set_clk_freq()
986 clki->curr_freq = clki->min_freq; in ufshcd_set_clk_freq()
989 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, in ufshcd_set_clk_freq()
990 clki->name, clk_get_rate(clki->clk)); in ufshcd_set_clk_freq()
998 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1023 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), in ufshcd_scale_clks()
1030 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1040 struct list_head *head = &hba->clk_list_head; in ufshcd_is_devfreq_scaling_required()
1046 if (!IS_ERR_OR_NULL(clki->clk)) { in ufshcd_is_devfreq_scaling_required()
1047 if (scale_up && clki->max_freq) { in ufshcd_is_devfreq_scaling_required()
1048 if (clki->curr_freq == clki->max_freq) in ufshcd_is_devfreq_scaling_required()
1051 } else if (!scale_up && clki->min_freq) { in ufshcd_is_devfreq_scaling_required()
1052 if (clki->curr_freq == clki->min_freq) in ufshcd_is_devfreq_scaling_required()
1073 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1080 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { in ufshcd_wait_for_doorbell_clr()
1081 ret = -EBUSY; in ufshcd_wait_for_doorbell_clr()
1094 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1106 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1110 dev_err(hba->dev, in ufshcd_wait_for_doorbell_clr()
1113 ret = -EBUSY; in ufshcd_wait_for_doorbell_clr()
1116 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1122 * ufshcd_scale_gear - scale up/down UFS gear
1127 * Returns -EBUSY if scaling can't happen at this time
1128 * Returns non-zero for any other errors
1136 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info, in ufshcd_scale_gear()
1139 memcpy(&new_pwr_info, &hba->pwr_info, in ufshcd_scale_gear()
1142 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear || in ufshcd_scale_gear()
1143 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) { in ufshcd_scale_gear()
1145 memcpy(&hba->clk_scaling.saved_pwr_info.info, in ufshcd_scale_gear()
1146 &hba->pwr_info, in ufshcd_scale_gear()
1150 new_pwr_info.gear_tx = hba->clk_scaling.min_gear; in ufshcd_scale_gear()
1151 new_pwr_info.gear_rx = hba->clk_scaling.min_gear; in ufshcd_scale_gear()
1158 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", in ufshcd_scale_gear()
1160 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx, in ufshcd_scale_gear()
1175 down_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_prepare()
1177 if (!hba->clk_scaling.is_allowed || in ufshcd_clock_scaling_prepare()
1179 ret = -EBUSY; in ufshcd_clock_scaling_prepare()
1180 up_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_prepare()
1195 up_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_unprepare()
1197 up_read(&hba->clk_scaling_lock); in ufshcd_clock_scaling_unprepare()
1203 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1208 * Returns -EBUSY if scaling can't happen at this time
1209 * Returns non-zero for any other errors
1244 downgrade_write(&hba->clk_scaling_lock); in ufshcd_devfreq_scale()
1259 spin_lock_irqsave(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_suspend_work()
1260 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { in ufshcd_clk_scaling_suspend_work()
1261 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_suspend_work()
1264 hba->clk_scaling.is_suspended = true; in ufshcd_clk_scaling_suspend_work()
1265 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_suspend_work()
1276 spin_lock_irqsave(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_resume_work()
1277 if (!hba->clk_scaling.is_suspended) { in ufshcd_clk_scaling_resume_work()
1278 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_resume_work()
1281 hba->clk_scaling.is_suspended = false; in ufshcd_clk_scaling_resume_work()
1282 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_resume_work()
1284 devfreq_resume_device(hba->devfreq); in ufshcd_clk_scaling_resume_work()
1294 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_target()
1301 return -EINVAL; in ufshcd_devfreq_target()
1303 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); in ufshcd_devfreq_target()
1305 *freq = (unsigned long) clk_round_rate(clki->clk, *freq); in ufshcd_devfreq_target()
1306 spin_lock_irqsave(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1308 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1312 if (!hba->clk_scaling.active_reqs) in ufshcd_devfreq_target()
1316 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1320 /* Decide based on the rounded-off frequency and update */ in ufshcd_devfreq_target()
1321 scale_up = (*freq == clki->max_freq) ? true : false; in ufshcd_devfreq_target()
1323 *freq = clki->min_freq; in ufshcd_devfreq_target()
1329 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1333 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1338 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), in ufshcd_devfreq_target()
1344 queue_work(hba->clk_scaling.workq, in ufshcd_devfreq_target()
1345 &hba->clk_scaling.suspend_work); in ufshcd_devfreq_target()
1362 struct request_queue *q = hba->cmd_queue; in ufshcd_any_tag_in_use()
1365 blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy); in ufshcd_any_tag_in_use()
1373 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_devfreq_get_dev_status()
1375 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_get_dev_status()
1380 return -EINVAL; in ufshcd_devfreq_get_dev_status()
1384 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_devfreq_get_dev_status()
1386 if (!scaling->window_start_t) in ufshcd_devfreq_get_dev_status()
1395 stat->current_frequency = clki->curr_freq; in ufshcd_devfreq_get_dev_status()
1396 if (scaling->is_busy_started) in ufshcd_devfreq_get_dev_status()
1397 scaling->tot_busy_t += ktime_us_delta(curr_t, in ufshcd_devfreq_get_dev_status()
1398 scaling->busy_start_t); in ufshcd_devfreq_get_dev_status()
1400 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t); in ufshcd_devfreq_get_dev_status()
1401 stat->busy_time = scaling->tot_busy_t; in ufshcd_devfreq_get_dev_status()
1403 scaling->window_start_t = curr_t; in ufshcd_devfreq_get_dev_status()
1404 scaling->tot_busy_t = 0; in ufshcd_devfreq_get_dev_status()
1406 if (hba->outstanding_reqs) { in ufshcd_devfreq_get_dev_status()
1407 scaling->busy_start_t = curr_t; in ufshcd_devfreq_get_dev_status()
1408 scaling->is_busy_started = true; in ufshcd_devfreq_get_dev_status()
1410 scaling->busy_start_t = 0; in ufshcd_devfreq_get_dev_status()
1411 scaling->is_busy_started = false; in ufshcd_devfreq_get_dev_status()
1413 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_devfreq_get_dev_status()
1419 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_init()
1429 dev_pm_opp_add(hba->dev, clki->min_freq, 0); in ufshcd_devfreq_init()
1430 dev_pm_opp_add(hba->dev, clki->max_freq, 0); in ufshcd_devfreq_init()
1432 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, in ufshcd_devfreq_init()
1433 &hba->vps->ondemand_data); in ufshcd_devfreq_init()
1434 devfreq = devfreq_add_device(hba->dev, in ufshcd_devfreq_init()
1435 &hba->vps->devfreq_profile, in ufshcd_devfreq_init()
1437 &hba->vps->ondemand_data); in ufshcd_devfreq_init()
1440 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); in ufshcd_devfreq_init()
1442 dev_pm_opp_remove(hba->dev, clki->min_freq); in ufshcd_devfreq_init()
1443 dev_pm_opp_remove(hba->dev, clki->max_freq); in ufshcd_devfreq_init()
1447 hba->devfreq = devfreq; in ufshcd_devfreq_init()
1454 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_remove()
1457 if (!hba->devfreq) in ufshcd_devfreq_remove()
1460 devfreq_remove_device(hba->devfreq); in ufshcd_devfreq_remove()
1461 hba->devfreq = NULL; in ufshcd_devfreq_remove()
1464 dev_pm_opp_remove(hba->dev, clki->min_freq); in ufshcd_devfreq_remove()
1465 dev_pm_opp_remove(hba->dev, clki->max_freq); in ufshcd_devfreq_remove()
1472 devfreq_suspend_device(hba->devfreq); in __ufshcd_suspend_clkscaling()
1473 spin_lock_irqsave(hba->host->host_lock, flags); in __ufshcd_suspend_clkscaling()
1474 hba->clk_scaling.window_start_t = 0; in __ufshcd_suspend_clkscaling()
1475 spin_unlock_irqrestore(hba->host->host_lock, flags); in __ufshcd_suspend_clkscaling()
1483 cancel_work_sync(&hba->clk_scaling.suspend_work); in ufshcd_suspend_clkscaling()
1484 cancel_work_sync(&hba->clk_scaling.resume_work); in ufshcd_suspend_clkscaling()
1486 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_suspend_clkscaling()
1487 if (!hba->clk_scaling.is_suspended) { in ufshcd_suspend_clkscaling()
1489 hba->clk_scaling.is_suspended = true; in ufshcd_suspend_clkscaling()
1491 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_suspend_clkscaling()
1502 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_resume_clkscaling()
1503 if (hba->clk_scaling.is_suspended) { in ufshcd_resume_clkscaling()
1505 hba->clk_scaling.is_suspended = false; in ufshcd_resume_clkscaling()
1507 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_resume_clkscaling()
1510 devfreq_resume_device(hba->devfreq); in ufshcd_resume_clkscaling()
1518 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled); in ufshcd_clkscale_enable_show()
1529 return -EINVAL; in ufshcd_clkscale_enable_store()
1531 down(&hba->host_sem); in ufshcd_clkscale_enable_store()
1533 err = -EBUSY; in ufshcd_clkscale_enable_store()
1538 if (value == hba->clk_scaling.is_enabled) in ufshcd_clkscale_enable_store()
1541 pm_runtime_get_sync(hba->dev); in ufshcd_clkscale_enable_store()
1544 hba->clk_scaling.is_enabled = value; in ufshcd_clkscale_enable_store()
1552 dev_err(hba->dev, "%s: failed to scale clocks up %d\n", in ufshcd_clkscale_enable_store()
1557 pm_runtime_put_sync(hba->dev); in ufshcd_clkscale_enable_store()
1559 up(&hba->host_sem); in ufshcd_clkscale_enable_store()
1565 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; in ufshcd_init_clk_scaling_sysfs()
1566 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; in ufshcd_init_clk_scaling_sysfs()
1567 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); in ufshcd_init_clk_scaling_sysfs()
1568 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; in ufshcd_init_clk_scaling_sysfs()
1569 hba->clk_scaling.enable_attr.attr.mode = 0644; in ufshcd_init_clk_scaling_sysfs()
1570 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) in ufshcd_init_clk_scaling_sysfs()
1571 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); in ufshcd_init_clk_scaling_sysfs()
1576 if (hba->clk_scaling.enable_attr.attr.name) in ufshcd_remove_clk_scaling_sysfs()
1577 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); in ufshcd_remove_clk_scaling_sysfs()
1587 if (!hba->clk_scaling.min_gear) in ufshcd_init_clk_scaling()
1588 hba->clk_scaling.min_gear = UFS_HS_G1; in ufshcd_init_clk_scaling()
1590 INIT_WORK(&hba->clk_scaling.suspend_work, in ufshcd_init_clk_scaling()
1592 INIT_WORK(&hba->clk_scaling.resume_work, in ufshcd_init_clk_scaling()
1596 hba->host->host_no); in ufshcd_init_clk_scaling()
1597 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); in ufshcd_init_clk_scaling()
1599 hba->clk_scaling.is_initialized = true; in ufshcd_init_clk_scaling()
1604 if (!hba->clk_scaling.is_initialized) in ufshcd_exit_clk_scaling()
1608 destroy_workqueue(hba->clk_scaling.workq); in ufshcd_exit_clk_scaling()
1610 hba->clk_scaling.is_initialized = false; in ufshcd_exit_clk_scaling()
1620 cancel_delayed_work_sync(&hba->clk_gating.gate_work); in ufshcd_ungate_work()
1622 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_ungate_work()
1623 if (hba->clk_gating.state == CLKS_ON) { in ufshcd_ungate_work()
1624 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_ungate_work()
1628 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_ungate_work()
1637 hba->clk_gating.is_suspended = true; in ufshcd_ungate_work()
1641 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", in ufshcd_ungate_work()
1646 hba->clk_gating.is_suspended = false; in ufshcd_ungate_work()
1653 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1666 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
1667 hba->clk_gating.active_reqs++; in ufshcd_hold()
1670 switch (hba->clk_gating.state) { in ufshcd_hold()
1683 rc = -EAGAIN; in ufshcd_hold()
1684 hba->clk_gating.active_reqs--; in ufshcd_hold()
1687 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
1688 flush_result = flush_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
1689 if (hba->clk_gating.is_suspended && !flush_result) in ufshcd_hold()
1691 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
1696 if (cancel_delayed_work(&hba->clk_gating.gate_work)) { in ufshcd_hold()
1697 hba->clk_gating.state = CLKS_ON; in ufshcd_hold()
1698 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_hold()
1699 hba->clk_gating.state); in ufshcd_hold()
1709 hba->clk_gating.state = REQ_CLKS_ON; in ufshcd_hold()
1710 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_hold()
1711 hba->clk_gating.state); in ufshcd_hold()
1712 if (queue_work(hba->clk_gating.clk_gating_workq, in ufshcd_hold()
1713 &hba->clk_gating.ungate_work)) in ufshcd_hold()
1722 rc = -EAGAIN; in ufshcd_hold()
1723 hba->clk_gating.active_reqs--; in ufshcd_hold()
1727 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
1728 flush_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
1730 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
1733 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", in ufshcd_hold()
1734 __func__, hba->clk_gating.state); in ufshcd_hold()
1737 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
1750 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_gate_work()
1757 if (hba->clk_gating.is_suspended || in ufshcd_gate_work()
1758 (hba->clk_gating.state != REQ_CLKS_OFF)) { in ufshcd_gate_work()
1759 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
1760 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1761 hba->clk_gating.state); in ufshcd_gate_work()
1765 if (hba->clk_gating.active_reqs in ufshcd_gate_work()
1766 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL in ufshcd_gate_work()
1767 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks in ufshcd_gate_work()
1768 || hba->active_uic_cmd || hba->uic_async_done) in ufshcd_gate_work()
1771 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_gate_work()
1777 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
1778 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_gate_work()
1780 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1781 hba->clk_gating.state); in ufshcd_gate_work()
1802 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_gate_work()
1803 if (hba->clk_gating.state == REQ_CLKS_OFF) { in ufshcd_gate_work()
1804 hba->clk_gating.state = CLKS_OFF; in ufshcd_gate_work()
1805 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1806 hba->clk_gating.state); in ufshcd_gate_work()
1809 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_gate_work()
1820 hba->clk_gating.active_reqs--; in __ufshcd_release()
1822 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || in __ufshcd_release()
1823 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || in __ufshcd_release()
1824 hba->outstanding_tasks || in __ufshcd_release()
1825 hba->active_uic_cmd || hba->uic_async_done || in __ufshcd_release()
1826 hba->clk_gating.state == CLKS_OFF) in __ufshcd_release()
1829 hba->clk_gating.state = REQ_CLKS_OFF; in __ufshcd_release()
1830 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); in __ufshcd_release()
1831 queue_delayed_work(hba->clk_gating.clk_gating_workq, in __ufshcd_release()
1832 &hba->clk_gating.gate_work, in __ufshcd_release()
1833 msecs_to_jiffies(hba->clk_gating.delay_ms)); in __ufshcd_release()
1840 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_release()
1842 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_release()
1851 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms); in ufshcd_clkgate_delay_show()
1861 return -EINVAL; in ufshcd_clkgate_delay_store()
1863 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clkgate_delay_store()
1864 hba->clk_gating.delay_ms = value; in ufshcd_clkgate_delay_store()
1865 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clkgate_delay_store()
1874 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled); in ufshcd_clkgate_enable_show()
1885 return -EINVAL; in ufshcd_clkgate_enable_store()
1889 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clkgate_enable_store()
1890 if (value == hba->clk_gating.is_enabled) in ufshcd_clkgate_enable_store()
1896 hba->clk_gating.active_reqs++; in ufshcd_clkgate_enable_store()
1898 hba->clk_gating.is_enabled = value; in ufshcd_clkgate_enable_store()
1900 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clkgate_enable_store()
1906 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; in ufshcd_init_clk_gating_sysfs()
1907 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; in ufshcd_init_clk_gating_sysfs()
1908 sysfs_attr_init(&hba->clk_gating.delay_attr.attr); in ufshcd_init_clk_gating_sysfs()
1909 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; in ufshcd_init_clk_gating_sysfs()
1910 hba->clk_gating.delay_attr.attr.mode = 0644; in ufshcd_init_clk_gating_sysfs()
1911 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) in ufshcd_init_clk_gating_sysfs()
1912 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); in ufshcd_init_clk_gating_sysfs()
1914 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; in ufshcd_init_clk_gating_sysfs()
1915 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; in ufshcd_init_clk_gating_sysfs()
1916 sysfs_attr_init(&hba->clk_gating.enable_attr.attr); in ufshcd_init_clk_gating_sysfs()
1917 hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; in ufshcd_init_clk_gating_sysfs()
1918 hba->clk_gating.enable_attr.attr.mode = 0644; in ufshcd_init_clk_gating_sysfs()
1919 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) in ufshcd_init_clk_gating_sysfs()
1920 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); in ufshcd_init_clk_gating_sysfs()
1925 if (hba->clk_gating.delay_attr.attr.name) in ufshcd_remove_clk_gating_sysfs()
1926 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); in ufshcd_remove_clk_gating_sysfs()
1927 if (hba->clk_gating.enable_attr.attr.name) in ufshcd_remove_clk_gating_sysfs()
1928 device_remove_file(hba->dev, &hba->clk_gating.enable_attr); in ufshcd_remove_clk_gating_sysfs()
1938 hba->clk_gating.state = CLKS_ON; in ufshcd_init_clk_gating()
1940 hba->clk_gating.delay_ms = 150; in ufshcd_init_clk_gating()
1941 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); in ufshcd_init_clk_gating()
1942 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); in ufshcd_init_clk_gating()
1945 hba->host->host_no); in ufshcd_init_clk_gating()
1946 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, in ufshcd_init_clk_gating()
1951 hba->clk_gating.is_enabled = true; in ufshcd_init_clk_gating()
1952 hba->clk_gating.is_initialized = true; in ufshcd_init_clk_gating()
1957 if (!hba->clk_gating.is_initialized) in ufshcd_exit_clk_gating()
1960 cancel_work_sync(&hba->clk_gating.ungate_work); in ufshcd_exit_clk_gating()
1961 cancel_delayed_work_sync(&hba->clk_gating.gate_work); in ufshcd_exit_clk_gating()
1962 destroy_workqueue(hba->clk_gating.clk_gating_workq); in ufshcd_exit_clk_gating()
1963 hba->clk_gating.is_initialized = false; in ufshcd_exit_clk_gating()
1976 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clk_scaling_start_busy()
1977 if (!hba->clk_scaling.active_reqs++) in ufshcd_clk_scaling_start_busy()
1980 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) { in ufshcd_clk_scaling_start_busy()
1981 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clk_scaling_start_busy()
1986 queue_work(hba->clk_scaling.workq, in ufshcd_clk_scaling_start_busy()
1987 &hba->clk_scaling.resume_work); in ufshcd_clk_scaling_start_busy()
1989 if (!hba->clk_scaling.window_start_t) { in ufshcd_clk_scaling_start_busy()
1990 hba->clk_scaling.window_start_t = curr_t; in ufshcd_clk_scaling_start_busy()
1991 hba->clk_scaling.tot_busy_t = 0; in ufshcd_clk_scaling_start_busy()
1992 hba->clk_scaling.is_busy_started = false; in ufshcd_clk_scaling_start_busy()
1995 if (!hba->clk_scaling.is_busy_started) { in ufshcd_clk_scaling_start_busy()
1996 hba->clk_scaling.busy_start_t = curr_t; in ufshcd_clk_scaling_start_busy()
1997 hba->clk_scaling.is_busy_started = true; in ufshcd_clk_scaling_start_busy()
1999 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clk_scaling_start_busy()
2004 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_clk_scaling_update_busy()
2010 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clk_scaling_update_busy()
2011 hba->clk_scaling.active_reqs--; in ufshcd_clk_scaling_update_busy()
2012 if (!hba->outstanding_reqs && scaling->is_busy_started) { in ufshcd_clk_scaling_update_busy()
2013 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), in ufshcd_clk_scaling_update_busy()
2014 scaling->busy_start_t)); in ufshcd_clk_scaling_update_busy()
2015 scaling->busy_start_t = 0; in ufshcd_clk_scaling_update_busy()
2016 scaling->is_busy_started = false; in ufshcd_clk_scaling_update_busy()
2018 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clk_scaling_update_busy()
2028 return -EINVAL; in ufshcd_monitor_opcode2dir()
2034 struct ufs_hba_monitor *m = &hba->monitor; in ufshcd_should_inform_monitor()
2036 return (m->enabled && lrbp && lrbp->cmd && in ufshcd_should_inform_monitor()
2037 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) && in ufshcd_should_inform_monitor()
2038 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp)); in ufshcd_should_inform_monitor()
2043 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); in ufshcd_start_monitor()
2046 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_start_monitor()
2047 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0) in ufshcd_start_monitor()
2048 hba->monitor.busy_start_ts[dir] = ktime_get(); in ufshcd_start_monitor()
2049 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_start_monitor()
2054 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); in ufshcd_update_monitor()
2057 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_update_monitor()
2058 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { in ufshcd_update_monitor()
2059 struct request *req = lrbp->cmd->request; in ufshcd_update_monitor()
2060 struct ufs_hba_monitor *m = &hba->monitor; in ufshcd_update_monitor()
2063 now = lrbp->compl_time_stamp; in ufshcd_update_monitor()
2064 inc = ktime_sub(now, m->busy_start_ts[dir]); in ufshcd_update_monitor()
2065 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc); in ufshcd_update_monitor()
2066 m->nr_sec_rw[dir] += blk_rq_sectors(req); in ufshcd_update_monitor()
2069 m->nr_req[dir]++; in ufshcd_update_monitor()
2070 lat = ktime_sub(now, lrbp->issue_time_stamp); in ufshcd_update_monitor()
2071 m->lat_sum[dir] += lat; in ufshcd_update_monitor()
2072 if (m->lat_max[dir] < lat || !m->lat_max[dir]) in ufshcd_update_monitor()
2073 m->lat_max[dir] = lat; in ufshcd_update_monitor()
2074 if (m->lat_min[dir] > lat || !m->lat_min[dir]) in ufshcd_update_monitor()
2075 m->lat_min[dir] = lat; in ufshcd_update_monitor()
2077 m->nr_queued[dir]--; in ufshcd_update_monitor()
2079 m->busy_start_ts[dir] = now; in ufshcd_update_monitor()
2081 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_update_monitor()
2085 * ufshcd_send_command - Send SCSI or device management commands
2092 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; in ufshcd_send_command()
2094 lrbp->issue_time_stamp = ktime_get(); in ufshcd_send_command()
2095 lrbp->compl_time_stamp = ktime_set(0, 0); in ufshcd_send_command()
2101 if (hba->vops && hba->vops->setup_xfer_req) in ufshcd_send_command()
2102 hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd); in ufshcd_send_command()
2104 set_bit(task_tag, &hba->outstanding_reqs); in ufshcd_send_command()
2110 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_send_command()
2111 set_bit(task_tag, &hba->outstanding_reqs); in ufshcd_send_command()
2114 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_send_command()
2121 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2127 if (lrbp->sense_buffer && in ufshcd_copy_sense_data()
2128 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { in ufshcd_copy_sense_data()
2131 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); in ufshcd_copy_sense_data()
2134 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data, in ufshcd_copy_sense_data()
2140 * ufshcd_copy_query_response() - Copy the Query Response and the data
2148 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; in ufshcd_copy_query_response()
2150 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); in ufshcd_copy_query_response()
2153 if (hba->dev_cmd.query.descriptor && in ufshcd_copy_query_response()
2154 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { in ufshcd_copy_query_response()
2155 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + in ufshcd_copy_query_response()
2161 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & in ufshcd_copy_query_response()
2164 hba->dev_cmd.query.request.upiu_req.length); in ufshcd_copy_query_response()
2166 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); in ufshcd_copy_query_response()
2168 dev_warn(hba->dev, in ufshcd_copy_query_response()
2171 return -EINVAL; in ufshcd_copy_query_response()
2179 * ufshcd_hba_capabilities - Read controller capabilities
2188 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); in ufshcd_hba_capabilities()
2191 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; in ufshcd_hba_capabilities()
2192 hba->nutmrs = in ufshcd_hba_capabilities()
2193 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; in ufshcd_hba_capabilities()
2194 ufs_hba_add_info(hba)->reserved_slot = hba->nutrs - 1; in ufshcd_hba_capabilities()
2199 dev_err(hba->dev, "crypto setup failed\n"); in ufshcd_hba_capabilities()
2205 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2219 * ufshcd_get_upmcrs - Get the power mode change request status
2231 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2240 WARN_ON(hba->active_uic_cmd); in ufshcd_dispatch_uic_cmd()
2242 hba->active_uic_cmd = uic_cmd; in ufshcd_dispatch_uic_cmd()
2245 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); in ufshcd_dispatch_uic_cmd()
2246 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); in ufshcd_dispatch_uic_cmd()
2247 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); in ufshcd_dispatch_uic_cmd()
2252 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, in ufshcd_dispatch_uic_cmd()
2257 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2270 if (wait_for_completion_timeout(&uic_cmd->done, in ufshcd_wait_for_uic_cmd()
2272 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; in ufshcd_wait_for_uic_cmd()
2274 ret = -ETIMEDOUT; in ufshcd_wait_for_uic_cmd()
2275 dev_err(hba->dev, in ufshcd_wait_for_uic_cmd()
2277 uic_cmd->command, uic_cmd->argument3); in ufshcd_wait_for_uic_cmd()
2279 if (!uic_cmd->cmd_active) { in ufshcd_wait_for_uic_cmd()
2280 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n", in ufshcd_wait_for_uic_cmd()
2282 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; in ufshcd_wait_for_uic_cmd()
2286 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
2287 hba->active_uic_cmd = NULL; in ufshcd_wait_for_uic_cmd()
2288 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
2294 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2308 dev_err(hba->dev, in __ufshcd_send_uic_cmd()
2310 return -EIO; in __ufshcd_send_uic_cmd()
2314 init_completion(&uic_cmd->done); in __ufshcd_send_uic_cmd()
2316 uic_cmd->cmd_active = 1; in __ufshcd_send_uic_cmd()
2323 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2334 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) in ufshcd_send_uic_cmd()
2338 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
2341 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_send_uic_cmd()
2343 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_send_uic_cmd()
2347 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
2354 * ufshcd_map_sg - Map scatter-gather list to prdt
2358 * Returns 0 in case of success, non-zero value in case of failure
2369 cmd = lrbp->cmd; in ufshcd_map_sg()
2376 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) in ufshcd_map_sg()
2377 lrbp->utr_descriptor_ptr->prd_table_length = in ufshcd_map_sg()
2378 cpu_to_le16(sg_segments * hba->sg_entry_size); in ufshcd_map_sg()
2380 lrbp->utr_descriptor_ptr->prd_table_length = in ufshcd_map_sg()
2383 prd = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; in ufshcd_map_sg()
2386 prd->size = in ufshcd_map_sg()
2387 cpu_to_le32(((u32) sg_dma_len(sg))-1); in ufshcd_map_sg()
2388 prd->base_addr = in ufshcd_map_sg()
2389 cpu_to_le32(lower_32_bits(sg->dma_address)); in ufshcd_map_sg()
2390 prd->upper_addr = in ufshcd_map_sg()
2391 cpu_to_le32(upper_32_bits(sg->dma_address)); in ufshcd_map_sg()
2392 prd->reserved = 0; in ufshcd_map_sg()
2393 prd = (void *)prd + hba->sg_entry_size; in ufshcd_map_sg()
2396 lrbp->utr_descriptor_ptr->prd_table_length = 0; in ufshcd_map_sg()
2405 * ufshcd_enable_intr - enable interrupts
2413 if (hba->ufs_version == ufshci_version(1, 0)) { in ufshcd_enable_intr()
2425 * ufshcd_disable_intr - disable interrupts
2433 if (hba->ufs_version == ufshci_version(1, 0)) { in ufshcd_disable_intr()
2447 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2456 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; in ufshcd_prepare_req_desc_hdr()
2473 dword_0 = data_direction | (lrbp->command_type in ufshcd_prepare_req_desc_hdr()
2475 if (lrbp->intr_cmd) in ufshcd_prepare_req_desc_hdr()
2482 req_desc->header.dword_0 = cpu_to_le32(dword_0); in ufshcd_prepare_req_desc_hdr()
2483 req_desc->header.dword_1 = cpu_to_le32(dword_1); in ufshcd_prepare_req_desc_hdr()
2489 req_desc->header.dword_2 = in ufshcd_prepare_req_desc_hdr()
2491 req_desc->header.dword_3 = cpu_to_le32(dword_3); in ufshcd_prepare_req_desc_hdr()
2493 req_desc->prd_table_length = 0; in ufshcd_prepare_req_desc_hdr()
2497 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2505 struct scsi_cmnd *cmd = lrbp->cmd; in ufshcd_prepare_utp_scsi_cmd_upiu()
2506 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; in ufshcd_prepare_utp_scsi_cmd_upiu()
2510 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( in ufshcd_prepare_utp_scsi_cmd_upiu()
2512 lrbp->lun, lrbp->task_tag); in ufshcd_prepare_utp_scsi_cmd_upiu()
2513 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( in ufshcd_prepare_utp_scsi_cmd_upiu()
2517 ucd_req_ptr->header.dword_2 = 0; in ufshcd_prepare_utp_scsi_cmd_upiu()
2519 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); in ufshcd_prepare_utp_scsi_cmd_upiu()
2521 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); in ufshcd_prepare_utp_scsi_cmd_upiu()
2522 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); in ufshcd_prepare_utp_scsi_cmd_upiu()
2523 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); in ufshcd_prepare_utp_scsi_cmd_upiu()
2525 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); in ufshcd_prepare_utp_scsi_cmd_upiu()
2529 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2531 * @hba: UFS hba
2538 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; in ufshcd_prepare_utp_query_req_upiu()
2539 struct ufs_query *query = &hba->dev_cmd.query; in ufshcd_prepare_utp_query_req_upiu()
2540 u16 len = be16_to_cpu(query->request.upiu_req.length); in ufshcd_prepare_utp_query_req_upiu()
2543 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( in ufshcd_prepare_utp_query_req_upiu()
2545 lrbp->lun, lrbp->task_tag); in ufshcd_prepare_utp_query_req_upiu()
2546 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( in ufshcd_prepare_utp_query_req_upiu()
2547 0, query->request.query_func, 0, 0); in ufshcd_prepare_utp_query_req_upiu()
2550 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) in ufshcd_prepare_utp_query_req_upiu()
2551 ucd_req_ptr->header.dword_2 = in ufshcd_prepare_utp_query_req_upiu()
2554 ucd_req_ptr->header.dword_2 = 0; in ufshcd_prepare_utp_query_req_upiu()
2557 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, in ufshcd_prepare_utp_query_req_upiu()
2561 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) in ufshcd_prepare_utp_query_req_upiu()
2562 memcpy(ucd_req_ptr + 1, query->descriptor, len); in ufshcd_prepare_utp_query_req_upiu()
2564 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); in ufshcd_prepare_utp_query_req_upiu()
2569 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; in ufshcd_prepare_utp_nop_upiu()
2574 ucd_req_ptr->header.dword_0 = in ufshcd_prepare_utp_nop_upiu()
2576 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag); in ufshcd_prepare_utp_nop_upiu()
2578 ucd_req_ptr->header.dword_1 = 0; in ufshcd_prepare_utp_nop_upiu()
2579 ucd_req_ptr->header.dword_2 = 0; in ufshcd_prepare_utp_nop_upiu()
2581 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); in ufshcd_prepare_utp_nop_upiu()
2585 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2596 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_compose_devman_upiu()
2597 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; in ufshcd_compose_devman_upiu()
2599 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; in ufshcd_compose_devman_upiu()
2602 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) in ufshcd_compose_devman_upiu()
2604 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) in ufshcd_compose_devman_upiu()
2607 ret = -EINVAL; in ufshcd_compose_devman_upiu()
2613 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2623 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_comp_scsi_upiu()
2624 lrbp->command_type = UTP_CMD_TYPE_SCSI; in ufshcd_comp_scsi_upiu()
2626 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; in ufshcd_comp_scsi_upiu()
2628 if (likely(lrbp->cmd)) { in ufshcd_comp_scsi_upiu()
2630 lrbp->cmd->sc_data_direction); in ufshcd_comp_scsi_upiu()
2633 ret = -EINVAL; in ufshcd_comp_scsi_upiu()
2640 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2641 * @upiu_wlun_id: UPIU W-LUN id
2643 * Returns SCSI W-LUN id
2652 struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr + in ufshcd_init_lrb()
2654 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; in ufshcd_init_lrb()
2655 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + in ufshcd_init_lrb()
2661 lrb->utr_descriptor_ptr = utrdlp + i; in ufshcd_init_lrb()
2662 lrb->utrd_dma_addr = hba->utrdl_dma_addr + in ufshcd_init_lrb()
2664 lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp; in ufshcd_init_lrb()
2665 lrb->ucd_req_dma_addr = cmd_desc_element_addr; in ufshcd_init_lrb()
2666 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu; in ufshcd_init_lrb()
2667 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset; in ufshcd_init_lrb()
2668 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table; in ufshcd_init_lrb()
2669 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset; in ufshcd_init_lrb()
2673 * ufshcd_queuecommand - main entry point for SCSI requests
2677 * Returns 0 for success, non-zero in case of failure
2688 tag = cmd->request->tag; in ufshcd_queuecommand()
2690 dev_err(hba->dev, in ufshcd_queuecommand()
2691 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", in ufshcd_queuecommand()
2692 __func__, tag, cmd, cmd->request); in ufshcd_queuecommand()
2696 if (!down_read_trylock(&hba->clk_scaling_lock)) in ufshcd_queuecommand()
2699 switch (hba->ufshcd_state) { in ufshcd_queuecommand()
2709 * can we let the scsi cmd pass through, because UFS is in bad in ufshcd_queuecommand()
2714 if (hba->pm_op_in_progress) { in ufshcd_queuecommand()
2715 hba->force_reset = true; in ufshcd_queuecommand()
2717 cmd->scsi_done(cmd); in ufshcd_queuecommand()
2726 cmd->scsi_done(cmd); in ufshcd_queuecommand()
2729 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", in ufshcd_queuecommand()
2730 __func__, hba->ufshcd_state); in ufshcd_queuecommand()
2732 cmd->scsi_done(cmd); in ufshcd_queuecommand()
2736 hba->req_abort_count = 0; in ufshcd_queuecommand()
2744 (hba->clk_gating.state != CLKS_ON)); in ufshcd_queuecommand()
2746 lrbp = &hba->lrb[tag]; in ufshcd_queuecommand()
2747 WARN_ON(lrbp->cmd); in ufshcd_queuecommand()
2748 lrbp->cmd = cmd; in ufshcd_queuecommand()
2749 lrbp->sense_bufflen = UFS_SENSE_SIZE; in ufshcd_queuecommand()
2750 lrbp->sense_buffer = cmd->sense_buffer; in ufshcd_queuecommand()
2751 lrbp->task_tag = tag; in ufshcd_queuecommand()
2752 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); in ufshcd_queuecommand()
2753 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; in ufshcd_queuecommand()
2755 ufshcd_prepare_lrbp_crypto(cmd->request, lrbp); in ufshcd_queuecommand()
2757 trace_android_vh_ufs_prepare_command(hba, cmd->request, lrbp, &err); in ufshcd_queuecommand()
2759 lrbp->cmd = NULL; in ufshcd_queuecommand()
2764 lrbp->req_abort_skip = false; in ufshcd_queuecommand()
2767 if (err == -EAGAIN) { in ufshcd_queuecommand()
2768 lrbp->cmd = NULL; in ufshcd_queuecommand()
2777 lrbp->cmd = NULL; in ufshcd_queuecommand()
2786 up_read(&hba->clk_scaling_lock); in ufshcd_queuecommand()
2793 lrbp->cmd = NULL; in ufshcd_compose_dev_cmd()
2794 lrbp->sense_bufflen = 0; in ufshcd_compose_dev_cmd()
2795 lrbp->sense_buffer = NULL; in ufshcd_compose_dev_cmd()
2796 lrbp->task_tag = tag; in ufshcd_compose_dev_cmd()
2797 lrbp->lun = 0; /* device management cmd is not specific to any LUN */ in ufshcd_compose_dev_cmd()
2798 lrbp->intr_cmd = true; /* No interrupt aggregation */ in ufshcd_compose_dev_cmd()
2800 hba->dev_cmd.type = cmd_type; in ufshcd_compose_dev_cmd()
2813 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clear_cmd()
2815 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clear_cmd()
2818 * wait for for h/w to clear corresponding bit in door-bell. in ufshcd_clear_cmd()
2831 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; in ufshcd_check_query_response()
2834 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >> in ufshcd_check_query_response()
2836 return query_res->response; in ufshcd_check_query_response()
2840 * ufshcd_dev_cmd_completion() - handles device management command responses
2850 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_dev_cmd_completion()
2851 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); in ufshcd_dev_cmd_completion()
2855 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { in ufshcd_dev_cmd_completion()
2856 err = -EINVAL; in ufshcd_dev_cmd_completion()
2857 dev_err(hba->dev, "%s: unexpected response %x\n", in ufshcd_dev_cmd_completion()
2868 err = -EPERM; in ufshcd_dev_cmd_completion()
2869 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", in ufshcd_dev_cmd_completion()
2873 err = -EINVAL; in ufshcd_dev_cmd_completion()
2874 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", in ufshcd_dev_cmd_completion()
2889 time_left = wait_for_completion_timeout(hba->dev_cmd.complete, in ufshcd_wait_for_dev_cmd()
2894 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_dev_cmd()
2895 hba->dev_cmd.complete = NULL; in ufshcd_wait_for_dev_cmd()
2901 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_dev_cmd()
2904 err = -ETIMEDOUT; in ufshcd_wait_for_dev_cmd()
2905 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", in ufshcd_wait_for_dev_cmd()
2906 __func__, lrbp->task_tag); in ufshcd_wait_for_dev_cmd()
2907 if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) in ufshcd_wait_for_dev_cmd()
2909 err = -EAGAIN; in ufshcd_wait_for_dev_cmd()
2915 ufshcd_outstanding_req_clear(hba, lrbp->task_tag); in ufshcd_wait_for_dev_cmd()
2922 * ufshcd_exec_dev_cmd - API for sending device management requests
2923 * @hba: UFS hba
2928 * it is expected you hold the hba->dev_cmd.lock mutex.
2934 const u32 tag = ufs_hba_add_info(hba)->reserved_slot; in ufshcd_exec_dev_cmd()
2938 /* Protects use of ufs_hba_add_info(hba)->reserved_slot. */ in ufshcd_exec_dev_cmd()
2939 lockdep_assert_held(&hba->dev_cmd.lock); in ufshcd_exec_dev_cmd()
2941 down_read(&hba->clk_scaling_lock); in ufshcd_exec_dev_cmd()
2943 lrbp = &hba->lrb[tag]; in ufshcd_exec_dev_cmd()
2944 WARN_ON(lrbp->cmd); in ufshcd_exec_dev_cmd()
2949 hba->dev_cmd.complete = &wait; in ufshcd_exec_dev_cmd()
2961 up_read(&hba->clk_scaling_lock); in ufshcd_exec_dev_cmd()
2966 * ufshcd_init_query() - init the query response and request parameters
2967 * @hba: per-adapter instance
2979 *request = &hba->dev_cmd.query.request; in ufshcd_init_query()
2980 *response = &hba->dev_cmd.query.response; in ufshcd_init_query()
2983 (*request)->upiu_req.opcode = opcode; in ufshcd_init_query()
2984 (*request)->upiu_req.idn = idn; in ufshcd_init_query()
2985 (*request)->upiu_req.index = index; in ufshcd_init_query()
2986 (*request)->upiu_req.selector = selector; in ufshcd_init_query()
2998 dev_dbg(hba->dev, in ufshcd_query_flag_retry()
3006 dev_err(hba->dev, in ufshcd_query_flag_retry()
3014 * ufshcd_query_flag() - API function for sending flag query requests
3015 * @hba: per-adapter instance
3021 * Returns 0 for success, non-zero in case of failure
3034 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_flag()
3042 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; in ufshcd_query_flag()
3045 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; in ufshcd_query_flag()
3048 dev_err(hba->dev, "%s: Invalid argument for read request\n", in ufshcd_query_flag()
3050 err = -EINVAL; in ufshcd_query_flag()
3055 dev_err(hba->dev, in ufshcd_query_flag()
3058 err = -EINVAL; in ufshcd_query_flag()
3065 dev_err(hba->dev, in ufshcd_query_flag()
3072 *flag_res = (be32_to_cpu(response->upiu_res.value) & in ufshcd_query_flag()
3076 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_flag()
3083 * ufshcd_query_attr - API function for sending attribute requests
3084 * @hba: per-adapter instance
3091 * Returns 0 for success, non-zero in case of failure
3103 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", in ufshcd_query_attr()
3105 return -EINVAL; in ufshcd_query_attr()
3110 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_attr()
3116 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; in ufshcd_query_attr()
3117 request->upiu_req.value = cpu_to_be32(*attr_val); in ufshcd_query_attr()
3120 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; in ufshcd_query_attr()
3123 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", in ufshcd_query_attr()
3125 err = -EINVAL; in ufshcd_query_attr()
3132 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", in ufshcd_query_attr()
3137 *attr_val = be32_to_cpu(response->upiu_res.value); in ufshcd_query_attr()
3140 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_attr()
3147 * ufshcd_query_attr_retry() - API function for sending query
3149 * @hba: per-adapter instance
3157 * Returns 0 for success, non-zero in case of failure
3166 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { in ufshcd_query_attr_retry()
3170 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", in ufshcd_query_attr_retry()
3177 dev_err(hba->dev, in ufshcd_query_attr_retry()
3195 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", in __ufshcd_query_descriptor()
3197 return -EINVAL; in __ufshcd_query_descriptor()
3201 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", in __ufshcd_query_descriptor()
3203 return -EINVAL; in __ufshcd_query_descriptor()
3208 mutex_lock(&hba->dev_cmd.lock); in __ufshcd_query_descriptor()
3211 hba->dev_cmd.query.descriptor = desc_buf; in __ufshcd_query_descriptor()
3212 request->upiu_req.length = cpu_to_be16(*buf_len); in __ufshcd_query_descriptor()
3216 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; in __ufshcd_query_descriptor()
3219 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; in __ufshcd_query_descriptor()
3222 dev_err(hba->dev, in __ufshcd_query_descriptor()
3223 "%s: Expected query descriptor opcode but got = 0x%.2x\n", in __ufshcd_query_descriptor()
3225 err = -EINVAL; in __ufshcd_query_descriptor()
3232 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", in __ufshcd_query_descriptor()
3237 *buf_len = be16_to_cpu(response->upiu_res.length); in __ufshcd_query_descriptor()
3240 hba->dev_cmd.query.descriptor = NULL; in __ufshcd_query_descriptor()
3241 mutex_unlock(&hba->dev_cmd.lock); in __ufshcd_query_descriptor()
3247 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3248 * @hba: per-adapter instance
3256 * Returns 0 for success, non-zero in case of failure.
3269 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { in ufshcd_query_descriptor_retry()
3272 if (!err || err == -EINVAL) in ufshcd_query_descriptor_retry()
3281 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3293 *desc_len = hba->desc_size[desc_id]; in ufshcd_map_desc_id_to_length()
3301 if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE && in ufshcd_update_desc_length()
3303 /* For UFS 3.1, the normal unit descriptor is 10 bytes larger in ufshcd_update_desc_length()
3308 hba->desc_size[desc_id] = desc_len; in ufshcd_update_desc_length()
3312 * ufshcd_read_desc_param - read the specified descriptor parameter
3320 * Return 0 in case of success, non-zero otherwise
3336 return -EINVAL; in ufshcd_read_desc_param()
3341 dev_err(hba->dev, "%s: Failed to get desc length\n", __func__); in ufshcd_read_desc_param()
3342 return -EINVAL; in ufshcd_read_desc_param()
3346 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n", in ufshcd_read_desc_param()
3348 return -EINVAL; in ufshcd_read_desc_param()
3355 return -ENOMEM; in ufshcd_read_desc_param()
3367 …dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret … in ufshcd_read_desc_param()
3374 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", in ufshcd_read_desc_param()
3376 ret = -EINVAL; in ufshcd_read_desc_param()
3387 ret = -EINVAL; in ufshcd_read_desc_param()
3390 min_t(u32, param_size, buff_len - param_offset)); in ufshcd_read_desc_param()
3400 * struct uc_string_id - unicode string
3412 /* replace non-printable or non-ASCII characters with spaces */
3419 * ufshcd_read_string_desc - read string descriptor
3429 * * -ENOMEM: on allocation failure
3430 * * -EINVAL: on a wrong parameter
3440 return -EINVAL; in ufshcd_read_string_desc()
3444 return -ENOMEM; in ufshcd_read_string_desc()
3449 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", in ufshcd_read_string_desc()
3455 if (uc_str->len <= QUERY_DESC_HDR_SIZE) { in ufshcd_read_string_desc()
3456 dev_dbg(hba->dev, "String Desc is of zero length\n"); in ufshcd_read_string_desc()
3465 /* remove header and divide by 2 to move from UTF16 to UTF8 */ in ufshcd_read_string_desc()
3466 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1; in ufshcd_read_string_desc()
3469 ret = -ENOMEM; in ufshcd_read_string_desc()
3475 * we need to convert to utf-8 so it can be displayed in ufshcd_read_string_desc()
3477 ret = utf16s_to_utf8s(uc_str->uc, in ufshcd_read_string_desc()
3478 uc_str->len - QUERY_DESC_HDR_SIZE, in ufshcd_read_string_desc()
3481 /* replace non-printable or non-ASCII characters with spaces */ in ufshcd_read_string_desc()
3488 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL); in ufshcd_read_string_desc()
3490 ret = -ENOMEM; in ufshcd_read_string_desc()
3493 ret = uc_str->len; in ufshcd_read_string_desc()
3502 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3509 * Return 0 in case of success, non-zero otherwise
3521 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset)) in ufshcd_read_unit_desc_param()
3522 return -EOPNOTSUPP; in ufshcd_read_unit_desc_param()
3533 if (hba->dev_info.wspecversion >= 0x300) { in ufshcd_get_ref_clk_gating_wait()
3538 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n", in ufshcd_get_ref_clk_gating_wait()
3543 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n", in ufshcd_get_ref_clk_gating_wait()
3547 hba->dev_info.clk_gating_wait_us = gating_wait; in ufshcd_get_ref_clk_gating_wait()
3554 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3559 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3564 * Returns 0 for success, non-zero in case of failure
3571 ucdl_size = (sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs); in ufshcd_memory_alloc()
3572 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3574 &hba->ucdl_dma_addr, in ufshcd_memory_alloc()
3579 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE in ufshcd_memory_alloc()
3580 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will in ufshcd_memory_alloc()
3583 if (!hba->ucdl_base_addr || in ufshcd_memory_alloc()
3584 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
3585 dev_err(hba->dev, in ufshcd_memory_alloc()
3594 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); in ufshcd_memory_alloc()
3595 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3597 &hba->utrdl_dma_addr, in ufshcd_memory_alloc()
3599 if (!hba->utrdl_base_addr || in ufshcd_memory_alloc()
3600 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
3601 dev_err(hba->dev, in ufshcd_memory_alloc()
3610 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; in ufshcd_memory_alloc()
3611 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3613 &hba->utmrdl_dma_addr, in ufshcd_memory_alloc()
3615 if (!hba->utmrdl_base_addr || in ufshcd_memory_alloc()
3616 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
3617 dev_err(hba->dev, in ufshcd_memory_alloc()
3623 hba->lrb = devm_kcalloc(hba->dev, in ufshcd_memory_alloc()
3624 hba->nutrs, sizeof(struct ufshcd_lrb), in ufshcd_memory_alloc()
3626 if (!hba->lrb) { in ufshcd_memory_alloc()
3627 dev_err(hba->dev, "LRB Memory allocation failed\n"); in ufshcd_memory_alloc()
3632 return -ENOMEM; in ufshcd_memory_alloc()
3636 * ufshcd_host_memory_configure - configure local reference block with
3643 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3658 utrdlp = hba->utrdl_base_addr; in ufshcd_host_memory_configure()
3666 cmd_desc_dma_addr = hba->ucdl_dma_addr; in ufshcd_host_memory_configure()
3668 for (i = 0; i < hba->nutrs; i++) { in ufshcd_host_memory_configure()
3678 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { in ufshcd_host_memory_configure()
3687 cpu_to_le16(response_offset >> 2); in ufshcd_host_memory_configure()
3689 cpu_to_le16(prdt_offset >> 2); in ufshcd_host_memory_configure()
3691 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); in ufshcd_host_memory_configure()
3694 ufshcd_init_lrb(hba, &hba->lrb[i], i); in ufshcd_host_memory_configure()
3699 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3707 * Returns 0 on success, non-zero value on failure
3718 dev_dbg(hba->dev, in ufshcd_dme_link_startup()
3719 "dme-link-startup: error code %d\n", ret); in ufshcd_dme_link_startup()
3723 * ufshcd_dme_reset - UIC command for DME_RESET
3729 * Returns 0 on success, non-zero value on failure
3740 dev_err(hba->dev, in ufshcd_dme_reset()
3741 "dme-reset: error code %d\n", ret); in ufshcd_dme_reset()
3747 * ufshcd_dme_enable - UIC command for DME_ENABLE
3752 * Returns 0 on success, non-zero value on failure
3763 dev_err(hba->dev, in ufshcd_dme_enable()
3764 "dme-enable: error code %d\n", ret); in ufshcd_dme_enable()
3774 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) in ufshcd_add_delay_before_dme_cmd()
3781 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { in ufshcd_add_delay_before_dme_cmd()
3787 hba->last_dme_cmd_tstamp)); in ufshcd_add_delay_before_dme_cmd()
3791 MIN_DELAY_BEFORE_DME_CMDS_US - delta; in ufshcd_add_delay_before_dme_cmd()
3801 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3808 * Returns 0 on success, non-zero value on failure
3815 "dme-set", in ufshcd_dme_set_attr()
3816 "dme-peer-set" in ufshcd_dme_set_attr()
3832 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", in ufshcd_dme_set_attr()
3834 } while (ret && peer && --retries); in ufshcd_dme_set_attr()
3837 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", in ufshcd_dme_set_attr()
3839 UFS_UIC_COMMAND_RETRIES - retries); in ufshcd_dme_set_attr()
3846 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3852 * Returns 0 on success, non-zero value on failure
3859 "dme-get", in ufshcd_dme_get_attr()
3860 "dme-peer-get" in ufshcd_dme_get_attr()
3869 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { in ufshcd_dme_get_attr()
3870 orig_pwr_info = hba->pwr_info; in ufshcd_dme_get_attr()
3899 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", in ufshcd_dme_get_attr()
3901 } while (ret && peer && --retries); in ufshcd_dme_get_attr()
3904 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", in ufshcd_dme_get_attr()
3906 UFS_UIC_COMMAND_RETRIES - retries); in ufshcd_dme_get_attr()
3911 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) in ufshcd_dme_get_attr()
3920 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3933 * Returns 0 on success, non-zero value on failure
3943 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
3946 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
3948 ret = -ENOLINK; in ufshcd_uic_pwr_ctrl()
3951 hba->uic_async_done = &uic_async_done; in ufshcd_uic_pwr_ctrl()
3962 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
3964 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
3966 cmd->command, cmd->argument3, ret); in ufshcd_uic_pwr_ctrl()
3970 if (!wait_for_completion_timeout(hba->uic_async_done, in ufshcd_uic_pwr_ctrl()
3972 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
3974 cmd->command, cmd->argument3); in ufshcd_uic_pwr_ctrl()
3976 if (!cmd->cmd_active) { in ufshcd_uic_pwr_ctrl()
3977 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n", in ufshcd_uic_pwr_ctrl()
3982 ret = -ETIMEDOUT; in ufshcd_uic_pwr_ctrl()
3989 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
3991 cmd->command, status); in ufshcd_uic_pwr_ctrl()
3992 ret = (status != PWR_OK) ? status : -1; in ufshcd_uic_pwr_ctrl()
4001 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4002 hba->active_uic_cmd = NULL; in ufshcd_uic_pwr_ctrl()
4003 hba->uic_async_done = NULL; in ufshcd_uic_pwr_ctrl()
4007 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
4014 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4015 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
4021 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4026 * Returns 0 on success, non-zero value on failure
4033 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { in ufshcd_uic_change_pwr_mode()
4037 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", in ufshcd_uic_change_pwr_mode()
4059 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_link_recovery()
4060 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_link_recovery()
4062 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_link_recovery()
4069 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_link_recovery()
4071 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_link_recovery()
4073 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_link_recovery()
4076 dev_err(hba->dev, "%s: link recovery failed, err %d", in ufshcd_link_recovery()
4093 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", in ufshcd_uic_hibern8_enter()
4097 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", in ufshcd_uic_hibern8_enter()
4117 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", in ufshcd_uic_hibern8_exit()
4121 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", in ufshcd_uic_hibern8_exit()
4126 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get(); in ufshcd_uic_hibern8_exit()
4127 hba->ufs_stats.hibern8_exit_cnt++; in ufshcd_uic_hibern8_exit()
4142 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_auto_hibern8_update()
4143 if (hba->ahit != ahit) { in ufshcd_auto_hibern8_update()
4144 hba->ahit = ahit; in ufshcd_auto_hibern8_update()
4147 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_auto_hibern8_update()
4149 if (update && !pm_runtime_suspended(hba->dev)) { in ufshcd_auto_hibern8_update()
4150 pm_runtime_get_sync(hba->dev); in ufshcd_auto_hibern8_update()
4154 pm_runtime_put(hba->dev); in ufshcd_auto_hibern8_update()
4166 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_auto_hibern8_enable()
4167 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); in ufshcd_auto_hibern8_enable()
4168 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_auto_hibern8_enable()
4172 * ufshcd_init_pwr_info - setting the POR (power on reset)
4174 * @hba: per-adapter instance
4178 hba->pwr_info.gear_rx = UFS_PWM_G1; in ufshcd_init_pwr_info()
4179 hba->pwr_info.gear_tx = UFS_PWM_G1; in ufshcd_init_pwr_info()
4180 hba->pwr_info.lane_rx = 1; in ufshcd_init_pwr_info()
4181 hba->pwr_info.lane_tx = 1; in ufshcd_init_pwr_info()
4182 hba->pwr_info.pwr_rx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
4183 hba->pwr_info.pwr_tx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
4184 hba->pwr_info.hs_rate = 0; in ufshcd_init_pwr_info()
4188 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4189 * @hba: per-adapter instance
4193 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; in ufshcd_get_max_pwr_mode()
4195 if (hba->max_pwr_info.is_valid) in ufshcd_get_max_pwr_mode()
4198 pwr_info->pwr_tx = FAST_MODE; in ufshcd_get_max_pwr_mode()
4199 pwr_info->pwr_rx = FAST_MODE; in ufshcd_get_max_pwr_mode()
4200 pwr_info->hs_rate = PA_HS_MODE_B; in ufshcd_get_max_pwr_mode()
4204 &pwr_info->lane_rx); in ufshcd_get_max_pwr_mode()
4206 &pwr_info->lane_tx); in ufshcd_get_max_pwr_mode()
4208 if (!pwr_info->lane_rx || !pwr_info->lane_tx) { in ufshcd_get_max_pwr_mode()
4209 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", in ufshcd_get_max_pwr_mode()
4211 pwr_info->lane_rx, in ufshcd_get_max_pwr_mode()
4212 pwr_info->lane_tx); in ufshcd_get_max_pwr_mode()
4213 return -EINVAL; in ufshcd_get_max_pwr_mode()
4221 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); in ufshcd_get_max_pwr_mode()
4222 if (!pwr_info->gear_rx) { in ufshcd_get_max_pwr_mode()
4224 &pwr_info->gear_rx); in ufshcd_get_max_pwr_mode()
4225 if (!pwr_info->gear_rx) { in ufshcd_get_max_pwr_mode()
4226 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", in ufshcd_get_max_pwr_mode()
4227 __func__, pwr_info->gear_rx); in ufshcd_get_max_pwr_mode()
4228 return -EINVAL; in ufshcd_get_max_pwr_mode()
4230 pwr_info->pwr_rx = SLOW_MODE; in ufshcd_get_max_pwr_mode()
4234 &pwr_info->gear_tx); in ufshcd_get_max_pwr_mode()
4235 if (!pwr_info->gear_tx) { in ufshcd_get_max_pwr_mode()
4237 &pwr_info->gear_tx); in ufshcd_get_max_pwr_mode()
4238 if (!pwr_info->gear_tx) { in ufshcd_get_max_pwr_mode()
4239 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", in ufshcd_get_max_pwr_mode()
4240 __func__, pwr_info->gear_tx); in ufshcd_get_max_pwr_mode()
4241 return -EINVAL; in ufshcd_get_max_pwr_mode()
4243 pwr_info->pwr_tx = SLOW_MODE; in ufshcd_get_max_pwr_mode()
4246 hba->max_pwr_info.is_valid = true; in ufshcd_get_max_pwr_mode()
4256 if (!hba->force_pmc && in ufshcd_change_power_mode()
4257 pwr_mode->gear_rx == hba->pwr_info.gear_rx && in ufshcd_change_power_mode()
4258 pwr_mode->gear_tx == hba->pwr_info.gear_tx && in ufshcd_change_power_mode()
4259 pwr_mode->lane_rx == hba->pwr_info.lane_rx && in ufshcd_change_power_mode()
4260 pwr_mode->lane_tx == hba->pwr_info.lane_tx && in ufshcd_change_power_mode()
4261 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && in ufshcd_change_power_mode()
4262 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && in ufshcd_change_power_mode()
4263 pwr_mode->hs_rate == hba->pwr_info.hs_rate) { in ufshcd_change_power_mode()
4264 dev_dbg(hba->dev, "%s: power already configured\n", __func__); in ufshcd_change_power_mode()
4270 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, in ufshcd_change_power_mode()
4271 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, in ufshcd_change_power_mode()
4272 * - PA_HSSERIES in ufshcd_change_power_mode()
4274 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); in ufshcd_change_power_mode()
4276 pwr_mode->lane_rx); in ufshcd_change_power_mode()
4277 if (pwr_mode->pwr_rx == FASTAUTO_MODE || in ufshcd_change_power_mode()
4278 pwr_mode->pwr_rx == FAST_MODE) in ufshcd_change_power_mode()
4283 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); in ufshcd_change_power_mode()
4285 pwr_mode->lane_tx); in ufshcd_change_power_mode()
4286 if (pwr_mode->pwr_tx == FASTAUTO_MODE || in ufshcd_change_power_mode()
4287 pwr_mode->pwr_tx == FAST_MODE) in ufshcd_change_power_mode()
4292 if (pwr_mode->pwr_rx == FASTAUTO_MODE || in ufshcd_change_power_mode()
4293 pwr_mode->pwr_tx == FASTAUTO_MODE || in ufshcd_change_power_mode()
4294 pwr_mode->pwr_rx == FAST_MODE || in ufshcd_change_power_mode()
4295 pwr_mode->pwr_tx == FAST_MODE) in ufshcd_change_power_mode()
4297 pwr_mode->hs_rate); in ufshcd_change_power_mode()
4299 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { in ufshcd_change_power_mode()
4321 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 in ufshcd_change_power_mode()
4322 | pwr_mode->pwr_tx); in ufshcd_change_power_mode()
4325 dev_err(hba->dev, in ufshcd_change_power_mode()
4331 memcpy(&hba->pwr_info, pwr_mode, in ufshcd_change_power_mode()
4339 * ufshcd_config_pwr_mode - configure a new power mode
4340 * @hba: per-adapter instance
4362 * ufshcd_complete_dev_init() - checks device readiness
4363 * @hba: per-adapter instance
4376 dev_err(hba->dev, in ufshcd_complete_dev_init()
4393 dev_err(hba->dev, in ufshcd_complete_dev_init()
4397 dev_err(hba->dev, in ufshcd_complete_dev_init()
4400 err = -EBUSY; in ufshcd_complete_dev_init()
4407 * ufshcd_make_hba_operational - Make UFS controller operational
4410 * To bring UFS host controller to operational state,
4412 * 2. Configure interrupt aggregation
4414 * 4. Configure run-stop-registers
4416 * Returns 0 on success, non-zero value on failure
4428 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); in ufshcd_make_hba_operational()
4433 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
4435 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
4437 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
4439 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
4455 dev_err(hba->dev, in ufshcd_make_hba_operational()
4457 err = -EIO; in ufshcd_make_hba_operational()
4465 * ufshcd_hba_stop - Send controller to reset state
4475 * while the UFS interrupt handler is active on another CPU. in ufshcd_hba_stop()
4477 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hba_stop()
4479 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hba_stop()
4485 dev_err(hba->dev, "%s: Controller disable failed\n", __func__); in ufshcd_hba_stop()
4490 * ufshcd_hba_execute_hce - initialize the controller
4497 * Returns 0 on success, non-zero value on failure
4518 * To initialize a UFS host controller HCE bit must be set to 1. in ufshcd_hba_execute_hce()
4519 * During initialization the HCE bit value changes from 1->0->1. in ufshcd_hba_execute_hce()
4527 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); in ufshcd_hba_execute_hce()
4533 retry_inner--; in ufshcd_hba_execute_hce()
4535 dev_err(hba->dev, in ufshcd_hba_execute_hce()
4538 retry_outer--; in ufshcd_hba_execute_hce()
4541 return -EIO; in ufshcd_hba_execute_hce()
4558 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) { in ufshcd_hba_enable()
4570 dev_err(hba->dev, in ufshcd_hba_enable()
4571 "Host controller enable failed with non-hce\n"); in ufshcd_hba_enable()
4603 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", in ufshcd_disable_tx_lcc()
4624 e = &hba->ufs_stats.event[id]; in ufshcd_update_evt_hist()
4625 e->val[e->pos] = val; in ufshcd_update_evt_hist()
4626 e->tstamp[e->pos] = ktime_get(); in ufshcd_update_evt_hist()
4627 e->cnt += 1; in ufshcd_update_evt_hist()
4628 e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH; in ufshcd_update_evt_hist()
4635 * ufshcd_link_startup - Initialize unipro link startup
4638 * Returns 0 for success, non-zero in case of failure
4647 * If UFS device isn't active then we will have to issue link startup in ufshcd_link_startup()
4648 * 2 times to make sure the device state move to active. in ufshcd_link_startup()
4659 /* check if device is detected by inter-connect layer */ in ufshcd_link_startup()
4664 dev_err(hba->dev, "%s: Device not present\n", __func__); in ufshcd_link_startup()
4665 ret = -ENXIO; in ufshcd_link_startup()
4672 * succeeds. So reset the local Uni-Pro and try again. in ufshcd_link_startup()
4680 } while (ret && retries--); in ufshcd_link_startup()
4696 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ in ufshcd_link_startup()
4700 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { in ufshcd_link_startup()
4716 dev_err(hba->dev, "link startup failed %d\n", ret); in ufshcd_link_startup()
4725 * ufshcd_verify_dev_init() - Verify device initialization
4726 * @hba: per-adapter instance
4740 mutex_lock(&hba->dev_cmd.lock); in ufshcd_verify_dev_init()
4741 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { in ufshcd_verify_dev_init()
4745 if (!err || err == -ETIMEDOUT) in ufshcd_verify_dev_init()
4748 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); in ufshcd_verify_dev_init()
4750 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_verify_dev_init()
4754 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); in ufshcd_verify_dev_init()
4759 * ufshcd_set_queue_depth - set lun queue depth
4763 * queueing. For WLUN, queue depth is set to 1. For best-effort
4773 hba = shost_priv(sdev->host); in ufshcd_set_queue_depth()
4775 lun_qdepth = hba->nutrs; in ufshcd_set_queue_depth()
4777 ufshcd_scsi_to_upiu_lun(sdev->lun), in ufshcd_set_queue_depth()
4783 if (ret == -EOPNOTSUPP) in ufshcd_set_queue_depth()
4787 lun_qdepth = hba->nutrs; in ufshcd_set_queue_depth()
4789 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); in ufshcd_set_queue_depth()
4791 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", in ufshcd_set_queue_depth()
4797 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4798 * @hba: per-adapter instance
4799 * @lun: UFS device lun id
4804 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4805 * Returns -EINVAL in case of invalid parameters passed to this function.
4814 ret = -EINVAL; in ufshcd_get_lu_wp()
4816 * According to UFS device spec, RPMB LU can't be write in ufshcd_get_lu_wp()
4818 * it. For other W-LUs, UNIT DESCRIPTOR is not available. in ufshcd_get_lu_wp()
4820 else if (lun >= hba->dev_info.max_lu_supported) in ufshcd_get_lu_wp()
4821 ret = -ENOTSUPP; in ufshcd_get_lu_wp()
4832 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4834 * @hba: per-adapter instance
4841 if (hba->dev_info.f_power_on_wp_en && in ufshcd_get_lu_power_on_wp_status()
4842 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_get_lu_power_on_wp_status()
4845 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), in ufshcd_get_lu_power_on_wp_status()
4848 hba->dev_info.is_lu_power_on_wp = true; in ufshcd_get_lu_power_on_wp_status()
4853 * ufshcd_slave_alloc - handle initial SCSI device configurations
4862 hba = shost_priv(sdev->host); in ufshcd_slave_alloc()
4864 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ in ufshcd_slave_alloc()
4865 sdev->use_10_for_ms = 1; in ufshcd_slave_alloc()
4868 sdev->set_dbd_for_ms = 1; in ufshcd_slave_alloc()
4871 sdev->allow_restart = 1; in ufshcd_slave_alloc()
4874 sdev->no_report_opcodes = 1; in ufshcd_slave_alloc()
4877 sdev->no_write_same = 1; in ufshcd_slave_alloc()
4887 * ufshcd_change_queue_depth - change queue depth
4895 return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue)); in ufshcd_change_queue_depth()
4900 /* skip well-known LU */ in ufshcd_hpb_destroy()
4901 if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || in ufshcd_hpb_destroy()
4902 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) in ufshcd_hpb_destroy()
4910 /* skip well-known LU */ in ufshcd_hpb_configure()
4911 if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || in ufshcd_hpb_configure()
4912 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) in ufshcd_hpb_configure()
4919 * ufshcd_slave_configure - adjust SCSI device configurations
4924 struct ufs_hba *hba = shost_priv(sdev->host); in ufshcd_slave_configure()
4925 struct request_queue *q = sdev->request_queue; in ufshcd_slave_configure()
4929 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); in ufshcd_slave_configure()
4930 if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE) in ufshcd_slave_configure()
4931 blk_queue_update_dma_alignment(q, PAGE_SIZE - 1); in ufshcd_slave_configure()
4934 sdev->rpm_autosuspend = 1; in ufshcd_slave_configure()
4944 * ufshcd_slave_destroy - remove SCSI device configurations
4951 hba = shost_priv(sdev->host); in ufshcd_slave_destroy()
4956 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { in ufshcd_slave_destroy()
4959 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_slave_destroy()
4960 hba->sdev_ufs_device = NULL; in ufshcd_slave_destroy()
4961 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_slave_destroy()
4966 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5001 * ufshcd_transfer_rsp_status - Get overall status of the response
5017 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) { in ufshcd_transfer_rsp_status()
5018 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) & in ufshcd_transfer_rsp_status()
5025 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); in ufshcd_transfer_rsp_status()
5026 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_transfer_rsp_status()
5033 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); in ufshcd_transfer_rsp_status()
5052 * UFS device needs urgent BKOPs. in ufshcd_transfer_rsp_status()
5054 if (!hba->pm_op_in_progress && in ufshcd_transfer_rsp_status()
5056 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) && in ufshcd_transfer_rsp_status()
5057 schedule_work(&hba->eeh_work)) { in ufshcd_transfer_rsp_status()
5063 pm_runtime_get_noresume(hba->dev); in ufshcd_transfer_rsp_status()
5072 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5076 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5087 dev_err_ratelimited(hba->dev, in ufshcd_transfer_rsp_status()
5089 lrbp->task_tag, lrbp->cmd && lrbp->cmd->cmnd ? in ufshcd_transfer_rsp_status()
5090 lrbp->cmd->cmnd[0] : 0); in ufshcd_transfer_rsp_status()
5104 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5106 ocs, lrbp->task_tag); in ufshcd_transfer_rsp_status()
5113 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) in ufshcd_transfer_rsp_status()
5114 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); in ufshcd_transfer_rsp_status()
5128 if (hba->active_uic_cmd && in ufshcd_is_auto_hibern8_error()
5129 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER || in ufshcd_is_auto_hibern8_error()
5130 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT)) in ufshcd_is_auto_hibern8_error()
5137 * ufshcd_uic_cmd_compl - handle completion of uic command
5142 * IRQ_HANDLED - If interrupt is valid
5143 * IRQ_NONE - If invalid interrupt
5149 spin_lock(hba->host->host_lock); in ufshcd_uic_cmd_compl()
5151 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); in ufshcd_uic_cmd_compl()
5153 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { in ufshcd_uic_cmd_compl()
5154 hba->active_uic_cmd->argument2 |= in ufshcd_uic_cmd_compl()
5156 hba->active_uic_cmd->argument3 = in ufshcd_uic_cmd_compl()
5158 if (!hba->uic_async_done) in ufshcd_uic_cmd_compl()
5159 hba->active_uic_cmd->cmd_active = 0; in ufshcd_uic_cmd_compl()
5160 complete(&hba->active_uic_cmd->done); in ufshcd_uic_cmd_compl()
5164 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { in ufshcd_uic_cmd_compl()
5165 hba->active_uic_cmd->cmd_active = 0; in ufshcd_uic_cmd_compl()
5166 complete(hba->uic_async_done); in ufshcd_uic_cmd_compl()
5171 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, in ufshcd_uic_cmd_compl()
5173 spin_unlock(hba->host->host_lock); in ufshcd_uic_cmd_compl()
5181 struct scsi_cmnd *cmd = lrbp->cmd; in ufshcd_release_scsi_cmd()
5185 lrbp->cmd = NULL; /* Mark the command as completed. */ in ufshcd_release_scsi_cmd()
5191 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5202 for_each_set_bit(index, &completed_reqs, hba->nutrs) { in __ufshcd_transfer_req_compl()
5203 if (!test_and_clear_bit(index, &hba->outstanding_reqs)) in __ufshcd_transfer_req_compl()
5205 lrbp = &hba->lrb[index]; in __ufshcd_transfer_req_compl()
5206 lrbp->compl_time_stamp = ktime_get(); in __ufshcd_transfer_req_compl()
5207 cmd = lrbp->cmd; in __ufshcd_transfer_req_compl()
5213 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp); in __ufshcd_transfer_req_compl()
5216 cmd->scsi_done(cmd); in __ufshcd_transfer_req_compl()
5217 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || in __ufshcd_transfer_req_compl()
5218 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { in __ufshcd_transfer_req_compl()
5219 if (hba->dev_cmd.complete) { in __ufshcd_transfer_req_compl()
5223 complete(hba->dev_cmd.complete); in __ufshcd_transfer_req_compl()
5231 * ufshcd_trc_handler - handle transfer requests completion
5236 * IRQ_HANDLED - If interrupt is valid
5237 * IRQ_NONE - If invalid interrupt
5251 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) in ufshcd_trc_handler()
5267 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_trc_handler()
5269 completed_reqs = tr_doorbell ^ hba->outstanding_reqs; in ufshcd_trc_handler()
5270 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_trc_handler()
5282 * ufshcd_disable_ee - disable exception event
5283 * @hba: per-adapter instance
5289 * Returns zero on success, non-zero error value on failure.
5296 if (!(hba->ee_ctrl_mask & mask)) in ufshcd_disable_ee()
5299 val = hba->ee_ctrl_mask & ~mask; in ufshcd_disable_ee()
5304 hba->ee_ctrl_mask &= ~mask; in ufshcd_disable_ee()
5310 * ufshcd_enable_ee - enable exception event
5311 * @hba: per-adapter instance
5317 * Returns zero on success, non-zero error value on failure.
5324 if (hba->ee_ctrl_mask & mask) in ufshcd_enable_ee()
5327 val = hba->ee_ctrl_mask | mask; in ufshcd_enable_ee()
5332 hba->ee_ctrl_mask |= mask; in ufshcd_enable_ee()
5338 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5339 * @hba: per-adapter instance
5346 * Returns zero on success, non-zero on failure.
5352 if (hba->auto_bkops_enabled) in ufshcd_enable_auto_bkops()
5358 dev_err(hba->dev, "%s: failed to enable bkops %d\n", in ufshcd_enable_auto_bkops()
5363 hba->auto_bkops_enabled = true; in ufshcd_enable_auto_bkops()
5364 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); in ufshcd_enable_auto_bkops()
5369 dev_err(hba->dev, "%s: failed to disable exception event %d\n", in ufshcd_enable_auto_bkops()
5376 * ufshcd_disable_auto_bkops - block device in doing background operations
5377 * @hba: per-adapter instance
5381 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5385 * Returns zero on success, non-zero on failure.
5391 if (!hba->auto_bkops_enabled) in ufshcd_disable_auto_bkops()
5400 dev_err(hba->dev, "%s: failed to enable exception event %d\n", in ufshcd_disable_auto_bkops()
5408 dev_err(hba->dev, "%s: failed to disable bkops %d\n", in ufshcd_disable_auto_bkops()
5414 hba->auto_bkops_enabled = false; in ufshcd_disable_auto_bkops()
5415 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); in ufshcd_disable_auto_bkops()
5416 hba->is_urgent_bkops_lvl_checked = false; in ufshcd_disable_auto_bkops()
5422 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5427 * as well. This function would change the auto-bkops state based on
5433 hba->auto_bkops_enabled = false; in ufshcd_force_reset_auto_bkops()
5434 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; in ufshcd_force_reset_auto_bkops()
5437 hba->auto_bkops_enabled = true; in ufshcd_force_reset_auto_bkops()
5438 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; in ufshcd_force_reset_auto_bkops()
5441 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; in ufshcd_force_reset_auto_bkops()
5442 hba->is_urgent_bkops_lvl_checked = false; in ufshcd_force_reset_auto_bkops()
5452 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5453 * @hba: per-adapter instance
5456 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5461 * Returns 0 for success, non-zero in case of failure.
5463 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5475 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", in ufshcd_bkops_ctrl()
5479 dev_err(hba->dev, "%s: invalid BKOPS status %d\n", in ufshcd_bkops_ctrl()
5481 err = -EINVAL; in ufshcd_bkops_ctrl()
5495 * ufshcd_urgent_bkops - handle urgent bkops exception event
5496 * @hba: per-adapter instance
5506 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); in ufshcd_urgent_bkops()
5520 if (hba->is_urgent_bkops_lvl_checked) in ufshcd_bkops_exception_event_handler()
5525 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", in ufshcd_bkops_exception_event_handler()
5537 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", in ufshcd_bkops_exception_event_handler()
5540 hba->urgent_bkops_lvl = curr_status; in ufshcd_bkops_exception_event_handler()
5541 hba->is_urgent_bkops_lvl_checked = true; in ufshcd_bkops_exception_event_handler()
5548 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", in ufshcd_bkops_exception_event_handler()
5561 if (!(enable ^ hba->wb_enabled)) in ufshcd_wb_ctrl()
5572 dev_err(hba->dev, "%s write booster %s failed %d\n", in ufshcd_wb_ctrl()
5577 hba->wb_enabled = enable; in ufshcd_wb_ctrl()
5578 dev_dbg(hba->dev, "%s write booster %s %d\n", in ufshcd_wb_ctrl()
5614 if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled) in ufshcd_wb_buf_flush_enable()
5622 dev_err(hba->dev, "%s WB - buf flush enable failed %d\n", in ufshcd_wb_buf_flush_enable()
5625 hba->wb_buf_flush_enabled = true; in ufshcd_wb_buf_flush_enable()
5627 dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret); in ufshcd_wb_buf_flush_enable()
5636 if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled) in ufshcd_wb_buf_flush_disable()
5644 dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n", in ufshcd_wb_buf_flush_disable()
5647 hba->wb_buf_flush_enabled = false; in ufshcd_wb_buf_flush_disable()
5648 dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret); in ufshcd_wb_buf_flush_disable()
5666 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n", in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5672 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n", in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5677 if (avail_buf < hba->vps->wb_flush_threshold) in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5692 * The ufs device needs the vcc to be ON to flush. in ufshcd_wb_need_flush()
5693 * With user-space reduction enabled, it's enough to enable flush in ufshcd_wb_need_flush()
5696 * With user-space preserved enabled, the current-buffer in ufshcd_wb_need_flush()
5707 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n", in ufshcd_wb_need_flush()
5712 if (!hba->dev_info.b_presrv_uspc_en) { in ufshcd_wb_need_flush()
5732 pm_runtime_get_sync(hba->dev); in ufshcd_rpm_dev_flush_recheck_work()
5733 pm_runtime_put_sync(hba->dev); in ufshcd_rpm_dev_flush_recheck_work()
5737 * ufshcd_exception_event_handler - handle exceptions raised by device
5750 pm_runtime_get_sync(hba->dev); in ufshcd_exception_event_handler()
5754 dev_err(hba->dev, "%s: failed to get exception status %d\n", in ufshcd_exception_event_handler()
5759 status &= hba->ee_ctrl_mask; in ufshcd_exception_event_handler()
5772 pm_runtime_put_noidle(hba->dev); in ufshcd_exception_event_handler()
5773 pm_runtime_put(hba->dev); in ufshcd_exception_event_handler()
5777 /* Complete requests that have door-bell cleared */
5785 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5787 * @hba: per-adapter instance
5796 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5801 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) in ufshcd_quirk_dl_nac_errors()
5804 if ((hba->saved_err & DEVICE_FATAL_ERROR) || in ufshcd_quirk_dl_nac_errors()
5805 ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
5806 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) in ufshcd_quirk_dl_nac_errors()
5809 if ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
5810 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { in ufshcd_quirk_dl_nac_errors()
5815 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5817 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5823 if ((hba->saved_err & INT_FATAL_ERRORS) || in ufshcd_quirk_dl_nac_errors()
5824 ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
5825 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) in ufshcd_quirk_dl_nac_errors()
5831 * - If we don't get any response then do error recovery. in ufshcd_quirk_dl_nac_errors()
5832 * - If we get response then clear the DL NAC error bit. in ufshcd_quirk_dl_nac_errors()
5835 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5837 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5843 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) in ufshcd_quirk_dl_nac_errors()
5844 hba->saved_err &= ~UIC_ERROR; in ufshcd_quirk_dl_nac_errors()
5846 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; in ufshcd_quirk_dl_nac_errors()
5847 if (!hba->saved_uic_err) in ufshcd_quirk_dl_nac_errors()
5851 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5858 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || in ufshcd_is_saved_err_fatal()
5859 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); in ufshcd_is_saved_err_fatal()
5866 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { in ufshcd_schedule_eh_work()
5867 if (hba->force_reset || ufshcd_is_link_broken(hba) || in ufshcd_schedule_eh_work()
5869 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; in ufshcd_schedule_eh_work()
5871 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; in ufshcd_schedule_eh_work()
5872 queue_work(hba->eh_wq, &hba->eh_work); in ufshcd_schedule_eh_work()
5878 down_write(&hba->clk_scaling_lock); in ufshcd_clk_scaling_allow()
5879 hba->clk_scaling.is_allowed = allow; in ufshcd_clk_scaling_allow()
5880 up_write(&hba->clk_scaling_lock); in ufshcd_clk_scaling_allow()
5886 if (hba->clk_scaling.is_enabled) in ufshcd_clk_scaling_suspend()
5891 if (hba->clk_scaling.is_enabled) in ufshcd_clk_scaling_suspend()
5898 pm_runtime_get_sync(hba->dev); in ufshcd_err_handling_prepare()
5899 if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) { in ufshcd_err_handling_prepare()
5910 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); in ufshcd_err_handling_prepare()
5911 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); in ufshcd_err_handling_prepare()
5916 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; in ufshcd_err_handling_prepare()
5921 hba->clk_scaling.is_enabled) in ufshcd_err_handling_prepare()
5927 down_write(&hba->clk_scaling_lock); in ufshcd_err_handling_prepare()
5928 up_write(&hba->clk_scaling_lock); in ufshcd_err_handling_prepare()
5929 cancel_work_sync(&hba->eeh_work); in ufshcd_err_handling_prepare()
5938 pm_runtime_put(hba->dev); in ufshcd_err_handling_unprepare()
5943 return (!hba->is_powered || hba->shutting_down || in ufshcd_err_handling_should_stop()
5944 hba->ufshcd_state == UFSHCD_STATE_ERROR || in ufshcd_err_handling_should_stop()
5945 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset || in ufshcd_err_handling_should_stop()
5952 struct Scsi_Host *shost = hba->host; in ufshcd_recover_pm_error()
5957 hba->is_sys_suspended = false; in ufshcd_recover_pm_error()
5962 ret = pm_runtime_set_active(hba->dev); in ufshcd_recover_pm_error()
5971 q = sdev->request_queue; in ufshcd_recover_pm_error()
5972 if (q->dev && (q->rpm_status == RPM_SUSPENDED || in ufshcd_recover_pm_error()
5973 q->rpm_status == RPM_SUSPENDING)) in ufshcd_recover_pm_error()
5974 pm_request_resume(q->dev); in ufshcd_recover_pm_error()
5986 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info; in ufshcd_is_pwr_mode_restore_needed()
5991 if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK)) in ufshcd_is_pwr_mode_restore_needed()
5994 if (pwr_info->pwr_tx != (mode & PWRMODE_MASK)) in ufshcd_is_pwr_mode_restore_needed()
6001 * ufshcd_err_handler - handle UFS errors that require s/w attention
6016 down(&hba->host_sem); in ufshcd_err_handler()
6017 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6019 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) in ufshcd_err_handler()
6020 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_err_handler()
6021 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6022 up(&hba->host_sem); in ufshcd_err_handler()
6026 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6028 /* Complete requests that have door-bell cleared by h/w */ in ufshcd_err_handler()
6030 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6031 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) in ufshcd_err_handler()
6032 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_err_handler()
6040 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { in ufshcd_err_handler()
6043 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6046 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6051 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || in ufshcd_err_handler()
6052 (hba->saved_uic_err && in ufshcd_err_handler()
6053 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { in ufshcd_err_handler()
6054 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR); in ufshcd_err_handler()
6056 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6060 ufshcd_print_tmrs(hba, hba->outstanding_tasks); in ufshcd_err_handler()
6061 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt); in ufshcd_err_handler()
6062 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6070 if (hba->force_reset || ufshcd_is_link_broken(hba) || in ufshcd_err_handler()
6072 ((hba->saved_err & UIC_ERROR) && in ufshcd_err_handler()
6073 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | in ufshcd_err_handler()
6080 * If LINERESET was caught, UFS might have been put to PWM mode, in ufshcd_err_handler()
6083 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) { in ufshcd_err_handler()
6084 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_err_handler()
6085 if (!hba->saved_uic_err) in ufshcd_err_handler()
6086 hba->saved_err &= ~UIC_ERROR; in ufshcd_err_handler()
6087 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6090 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6091 if (!hba->saved_err && !needs_restore) in ufshcd_err_handler()
6095 hba->silence_err_logs = true; in ufshcd_err_handler()
6097 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6099 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { in ufshcd_err_handler()
6107 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { in ufshcd_err_handler()
6118 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6119 hba->silence_err_logs = false; in ufshcd_err_handler()
6130 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6135 down_write(&hba->clk_scaling_lock); in ufshcd_err_handler()
6136 hba->force_pmc = true; in ufshcd_err_handler()
6137 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info)); in ufshcd_err_handler()
6140 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n", in ufshcd_err_handler()
6143 hba->force_pmc = false; in ufshcd_err_handler()
6145 up_write(&hba->clk_scaling_lock); in ufshcd_err_handler()
6146 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6152 hba->force_reset = false; in ufshcd_err_handler()
6153 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6156 dev_err(hba->dev, "%s: reset and restore failed with err %d\n", in ufshcd_err_handler()
6160 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6165 if (hba->ufshcd_state == UFSHCD_STATE_RESET) in ufshcd_err_handler()
6166 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_err_handler()
6167 if (hba->saved_err || hba->saved_uic_err) in ufshcd_err_handler()
6168 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", in ufshcd_err_handler()
6169 __func__, hba->saved_err, hba->saved_uic_err); in ufshcd_err_handler()
6172 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6174 up(&hba->host_sem); in ufshcd_err_handler()
6178 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6179 * @hba: per-adapter instance
6182 * IRQ_HANDLED - If interrupt is valid
6183 * IRQ_NONE - If invalid interrupt
6200 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", in ufshcd_update_uic_error()
6207 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_update_uic_error()
6208 if (hba->uic_async_done && hba->active_uic_cmd) in ufshcd_update_uic_error()
6209 cmd = hba->active_uic_cmd; in ufshcd_update_uic_error()
6214 if (cmd && (cmd->command == UIC_CMD_DME_SET)) in ufshcd_update_uic_error()
6215 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_update_uic_error()
6227 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; in ufshcd_update_uic_error()
6228 else if (hba->dev_quirks & in ufshcd_update_uic_error()
6231 hba->uic_error |= in ufshcd_update_uic_error()
6234 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; in ufshcd_update_uic_error()
6244 hba->uic_error |= UFSHCD_UIC_NL_ERROR; in ufshcd_update_uic_error()
6252 hba->uic_error |= UFSHCD_UIC_TL_ERROR; in ufshcd_update_uic_error()
6260 hba->uic_error |= UFSHCD_UIC_DME_ERROR; in ufshcd_update_uic_error()
6264 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", in ufshcd_update_uic_error()
6265 __func__, hba->uic_error); in ufshcd_update_uic_error()
6270 * ufshcd_check_errors - Check for errors that need s/w attention
6271 * @hba: per-adapter instance
6275 * IRQ_HANDLED - If interrupt is valid
6276 * IRQ_NONE - If invalid interrupt
6283 spin_lock(hba->host->host_lock); in ufshcd_check_errors()
6284 hba->errors |= UFSHCD_ERROR_MASK & intr_status; in ufshcd_check_errors()
6286 if (hba->errors & INT_FATAL_ERRORS) { in ufshcd_check_errors()
6288 hba->errors); in ufshcd_check_errors()
6292 if (hba->errors & UIC_ERROR) { in ufshcd_check_errors()
6293 hba->uic_error = 0; in ufshcd_check_errors()
6295 if (hba->uic_error) { in ufshcd_check_errors()
6296 dev_err(hba->dev, in ufshcd_check_errors()
6302 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) { in ufshcd_check_errors()
6303 dev_err(hba->dev, in ufshcd_check_errors()
6304 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n", in ufshcd_check_errors()
6305 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? in ufshcd_check_errors()
6307 hba->errors, ufshcd_get_upmcrs(hba)); in ufshcd_check_errors()
6309 hba->errors); in ufshcd_check_errors()
6321 hba->saved_err |= hba->errors; in ufshcd_check_errors()
6322 hba->saved_uic_err |= hba->uic_error; in ufshcd_check_errors()
6325 if ((hba->saved_err & in ufshcd_check_errors()
6327 (hba->saved_uic_err && in ufshcd_check_errors()
6328 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { in ufshcd_check_errors()
6329 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n", in ufshcd_check_errors()
6330 __func__, hba->saved_err, in ufshcd_check_errors()
6331 hba->saved_uic_err); in ufshcd_check_errors()
6340 * if (!queue_eh_work) - in ufshcd_check_errors()
6341 * Other errors are either non-fatal where host recovers in ufshcd_check_errors()
6345 hba->errors = 0; in ufshcd_check_errors()
6346 hba->uic_error = 0; in ufshcd_check_errors()
6347 spin_unlock(hba->host->host_lock); in ufshcd_check_errors()
6352 * ufshcd_tmc_handler - handle task management function completion
6356 * IRQ_HANDLED - If interrupt is valid
6357 * IRQ_NONE - If invalid interrupt
6361 struct request **tmf_rqs = ufs_hba_add_info(hba)->tmf_rqs; in ufshcd_tmc_handler()
6366 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_tmc_handler()
6368 issued = hba->outstanding_tasks & ~pending; in ufshcd_tmc_handler()
6369 for_each_set_bit(tag, &issued, hba->nutmrs) { in ufshcd_tmc_handler()
6371 struct completion *c = req->end_io_data; in ufshcd_tmc_handler()
6376 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_tmc_handler()
6382 * ufshcd_sl_intr - Interrupt service routine
6387 * IRQ_HANDLED - If interrupt is valid
6388 * IRQ_NONE - If invalid interrupt
6397 if (intr_status & UFSHCD_ERROR_MASK || hba->errors) in ufshcd_sl_intr()
6410 * ufshcd_intr - Main interrupt service routine
6415 * IRQ_HANDLED - If interrupt is valid
6416 * IRQ_NONE - If invalid interrupt
6423 int retries = hba->nutrs; in ufshcd_intr()
6426 hba->ufs_stats.last_intr_status = intr_status; in ufshcd_intr()
6427 hba->ufs_stats.last_intr_ts = ktime_get(); in ufshcd_intr()
6430 * There could be max of hba->nutrs reqs in flight and in worst case in ufshcd_intr()
6435 while (intr_status && retries--) { in ufshcd_intr()
6448 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", in ufshcd_intr()
6451 hba->ufs_stats.last_intr_status, in ufshcd_intr()
6465 if (!test_bit(tag, &hba->outstanding_tasks)) in ufshcd_clear_tm_cmd()
6468 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clear_tm_cmd()
6470 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clear_tm_cmd()
6483 struct request **tmf_rqs = ufs_hba_add_info(hba)->tmf_rqs; in __ufshcd_issue_tm_cmd()
6484 struct request_queue *q = hba->tmf_queue; in __ufshcd_issue_tm_cmd()
6485 struct Scsi_Host *host = hba->host; in __ufshcd_issue_tm_cmd()
6498 req->end_io_data = &wait; in __ufshcd_issue_tm_cmd()
6501 spin_lock_irqsave(host->host_lock, flags); in __ufshcd_issue_tm_cmd()
6503 task_tag = req->tag; in __ufshcd_issue_tm_cmd()
6504 tmf_rqs[req->tag] = req; in __ufshcd_issue_tm_cmd()
6505 treq->req_header.dword_0 |= cpu_to_be32(task_tag); in __ufshcd_issue_tm_cmd()
6507 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); in __ufshcd_issue_tm_cmd()
6511 __set_bit(task_tag, &hba->outstanding_tasks); in __ufshcd_issue_tm_cmd()
6520 spin_unlock_irqrestore(host->host_lock, flags); in __ufshcd_issue_tm_cmd()
6529 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", in __ufshcd_issue_tm_cmd()
6532 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", in __ufshcd_issue_tm_cmd()
6534 err = -ETIMEDOUT; in __ufshcd_issue_tm_cmd()
6537 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); in __ufshcd_issue_tm_cmd()
6542 spin_lock_irqsave(hba->host->host_lock, flags); in __ufshcd_issue_tm_cmd()
6543 tmf_rqs[req->tag] = NULL; in __ufshcd_issue_tm_cmd()
6544 __clear_bit(task_tag, &hba->outstanding_tasks); in __ufshcd_issue_tm_cmd()
6545 spin_unlock_irqrestore(hba->host->host_lock, flags); in __ufshcd_issue_tm_cmd()
6554 * ufshcd_issue_tm_cmd - issues task management commands to controller
6561 * Returns non-zero value on error, zero on success.
6586 if (err == -ETIMEDOUT) in ufshcd_issue_tm_cmd()
6591 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", in ufshcd_issue_tm_cmd()
6600 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6601 * @hba: per-adapter instance
6609 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6614 * the caller is expected to hold the hba->dev_cmd.lock mutex.
6624 const u32 tag = ufs_hba_add_info(hba)->reserved_slot; in ufshcd_issue_devman_upiu_cmd()
6629 /* Protects use of ufs_hba_add_info(hba)->reserved_slot. */ in ufshcd_issue_devman_upiu_cmd()
6630 lockdep_assert_held(&hba->dev_cmd.lock); in ufshcd_issue_devman_upiu_cmd()
6632 down_read(&hba->clk_scaling_lock); in ufshcd_issue_devman_upiu_cmd()
6634 lrbp = &hba->lrb[tag]; in ufshcd_issue_devman_upiu_cmd()
6635 WARN_ON(lrbp->cmd); in ufshcd_issue_devman_upiu_cmd()
6636 lrbp->cmd = NULL; in ufshcd_issue_devman_upiu_cmd()
6637 lrbp->sense_bufflen = 0; in ufshcd_issue_devman_upiu_cmd()
6638 lrbp->sense_buffer = NULL; in ufshcd_issue_devman_upiu_cmd()
6639 lrbp->task_tag = tag; in ufshcd_issue_devman_upiu_cmd()
6640 lrbp->lun = 0; in ufshcd_issue_devman_upiu_cmd()
6641 lrbp->intr_cmd = true; in ufshcd_issue_devman_upiu_cmd()
6643 hba->dev_cmd.type = cmd_type; in ufshcd_issue_devman_upiu_cmd()
6645 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_issue_devman_upiu_cmd()
6646 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; in ufshcd_issue_devman_upiu_cmd()
6648 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; in ufshcd_issue_devman_upiu_cmd()
6651 req_upiu->header.dword_0 |= cpu_to_be32(tag); in ufshcd_issue_devman_upiu_cmd()
6656 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr)); in ufshcd_issue_devman_upiu_cmd()
6662 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len); in ufshcd_issue_devman_upiu_cmd()
6666 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); in ufshcd_issue_devman_upiu_cmd()
6668 hba->dev_cmd.complete = &wait; in ufshcd_issue_devman_upiu_cmd()
6675 * ignore the returning value here - ufshcd_check_query_response is in ufshcd_issue_devman_upiu_cmd()
6682 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); in ufshcd_issue_devman_upiu_cmd()
6684 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu); in ufshcd_issue_devman_upiu_cmd()
6685 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & in ufshcd_issue_devman_upiu_cmd()
6692 dev_warn(hba->dev, in ufshcd_issue_devman_upiu_cmd()
6696 err = -EINVAL; in ufshcd_issue_devman_upiu_cmd()
6700 up_read(&hba->clk_scaling_lock); in ufshcd_issue_devman_upiu_cmd()
6705 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6706 * @hba: per-adapter instance
6708 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
6730 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC; in ufshcd_exec_raw_upiu_cmd()
6738 mutex_lock(&hba->dev_cmd.lock); in ufshcd_exec_raw_upiu_cmd()
6742 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_exec_raw_upiu_cmd()
6753 if (err == -ETIMEDOUT) in ufshcd_exec_raw_upiu_cmd()
6758 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__, in ufshcd_exec_raw_upiu_cmd()
6767 err = -EINVAL; in ufshcd_exec_raw_upiu_cmd()
6776 * ufshcd_eh_device_reset_handler - device reset handler registered to
6790 host = cmd->device->host; in ufshcd_eh_device_reset_handler()
6793 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); in ufshcd_eh_device_reset_handler()
6802 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { in ufshcd_eh_device_reset_handler()
6803 if (hba->lrb[pos].lun == lun) { in ufshcd_eh_device_reset_handler()
6812 hba->req_abort_count = 0; in ufshcd_eh_device_reset_handler()
6817 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); in ufshcd_eh_device_reset_handler()
6828 for_each_set_bit(tag, &bitmap, hba->nutrs) { in ufshcd_set_req_abort_skip()
6829 lrbp = &hba->lrb[tag]; in ufshcd_set_req_abort_skip()
6830 lrbp->req_abort_skip = true; in ufshcd_set_req_abort_skip()
6835 * ufshcd_try_to_abort_task - abort a specific task
6840 * command, and in host controller by clearing the door-bell register. There can
6845 * Returns zero on success, non-zero on failure
6849 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_try_to_abort_task()
6855 for (poll_cnt = 100; poll_cnt; poll_cnt--) { in ufshcd_try_to_abort_task()
6856 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_try_to_abort_task()
6860 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n", in ufshcd_try_to_abort_task()
6868 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", in ufshcd_try_to_abort_task()
6877 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", in ufshcd_try_to_abort_task()
6881 dev_err(hba->dev, in ufshcd_try_to_abort_task()
6891 err = -EBUSY; in ufshcd_try_to_abort_task()
6895 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_try_to_abort_task()
6900 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", in ufshcd_try_to_abort_task()
6908 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", in ufshcd_try_to_abort_task()
6916 * ufshcd_abort - scsi host template eh_abort_handler callback
6932 host = cmd->device->host; in ufshcd_abort()
6934 tag = cmd->request->tag; in ufshcd_abort()
6935 lrbp = &hba->lrb[tag]; in ufshcd_abort()
6937 dev_err(hba->dev, in ufshcd_abort()
6938 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", in ufshcd_abort()
6939 __func__, tag, cmd, cmd->request); in ufshcd_abort()
6946 if (!(test_bit(tag, &hba->outstanding_reqs))) { in ufshcd_abort()
6947 dev_err(hba->dev, in ufshcd_abort()
6949 __func__, tag, hba->outstanding_reqs, reg); in ufshcd_abort()
6954 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); in ufshcd_abort()
6964 if (!hba->req_abort_count) { in ufshcd_abort()
6973 hba->req_abort_count++; in ufshcd_abort()
6976 dev_err(hba->dev, in ufshcd_abort()
6984 * Task abort to the device W-LUN is illegal. When this command in ufshcd_abort()
6988 * the lrb taken by this cmd and re-set it in outstanding_reqs, in ufshcd_abort()
6991 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { in ufshcd_abort()
6992 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); in ufshcd_abort()
6994 spin_lock_irqsave(host->host_lock, flags); in ufshcd_abort()
6995 hba->force_reset = true; in ufshcd_abort()
6997 spin_unlock_irqrestore(host->host_lock, flags); in ufshcd_abort()
7002 if (lrbp->req_abort_skip) { in ufshcd_abort()
7003 dev_err(hba->dev, "%s: skipping abort\n", __func__); in ufshcd_abort()
7004 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); in ufshcd_abort()
7010 dev_err(hba->dev, "%s: failed with err %d\n", __func__, res); in ufshcd_abort()
7011 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); in ufshcd_abort()
7019 spin_lock_irqsave(host->host_lock, flags); in ufshcd_abort()
7020 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs); in ufshcd_abort()
7021 spin_unlock_irqrestore(host->host_lock, flags); in ufshcd_abort()
7035 * ufshcd_host_reset_and_restore - reset and restore host controller
7036 * @hba: per-adapter instance
7039 * local and remote (device) Uni-Pro stack and the attributes
7042 * Returns zero on success, non-zero on failure
7054 hba->silence_err_logs = true; in ufshcd_host_reset_and_restore()
7056 hba->silence_err_logs = false; in ufshcd_host_reset_and_restore()
7068 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); in ufshcd_host_reset_and_restore()
7074 * ufshcd_reset_and_restore - reset and re-initialize host/device
7075 * @hba: per-adapter instance
7077 * Reset and recover device, host and re-establish link. This
7080 * Returns zero on success, non-zero on failure
7094 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7095 saved_err = hba->saved_err; in ufshcd_reset_and_restore()
7096 saved_uic_err = hba->saved_uic_err; in ufshcd_reset_and_restore()
7097 hba->saved_err = 0; in ufshcd_reset_and_restore()
7098 hba->saved_uic_err = 0; in ufshcd_reset_and_restore()
7099 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7106 } while (err && --retries); in ufshcd_reset_and_restore()
7108 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7110 * Inform scsi mid-layer that we did reset and allow to handle in ufshcd_reset_and_restore()
7113 scsi_report_bus_reset(hba->host, 0); in ufshcd_reset_and_restore()
7115 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_reset_and_restore()
7116 hba->saved_err |= saved_err; in ufshcd_reset_and_restore()
7117 hba->saved_uic_err |= saved_uic_err; in ufshcd_reset_and_restore()
7119 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7125 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7136 hba = shost_priv(cmd->device->host); in ufshcd_eh_host_reset_handler()
7138 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7139 hba->force_reset = true; in ufshcd_eh_host_reset_handler()
7141 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); in ufshcd_eh_host_reset_handler()
7142 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7144 flush_work(&hba->eh_work); in ufshcd_eh_host_reset_handler()
7146 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7147 if (hba->ufshcd_state == UFSHCD_STATE_ERROR) in ufshcd_eh_host_reset_handler()
7149 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7155 * ufshcd_get_max_icc_level - calculate the ICC level
7169 for (i = start_scan; i >= 0; i--) { in ufshcd_get_max_icc_level()
7170 data = be16_to_cpup((__be16 *)&buff[2 * i]); in ufshcd_get_max_icc_level()
7200 * ufshcd_calc_icc_level - calculate the max ICC level
7202 * @hba: per-adapter instance
7213 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || in ufshcd_find_max_sup_active_icc_level()
7214 !hba->vreg_info.vccq2) { in ufshcd_find_max_sup_active_icc_level()
7215 dev_err(hba->dev, in ufshcd_find_max_sup_active_icc_level()
7221 if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA) in ufshcd_find_max_sup_active_icc_level()
7223 hba->vreg_info.vcc->max_uA, in ufshcd_find_max_sup_active_icc_level()
7224 POWER_DESC_MAX_ACTV_ICC_LVLS - 1, in ufshcd_find_max_sup_active_icc_level()
7227 if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA) in ufshcd_find_max_sup_active_icc_level()
7229 hba->vreg_info.vccq->max_uA, in ufshcd_find_max_sup_active_icc_level()
7233 if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA) in ufshcd_find_max_sup_active_icc_level()
7235 hba->vreg_info.vccq2->max_uA, in ufshcd_find_max_sup_active_icc_level()
7245 int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER]; in ufshcd_set_active_icc_lvl()
7256 dev_err(hba->dev, in ufshcd_set_active_icc_lvl()
7264 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level); in ufshcd_set_active_icc_lvl()
7270 dev_err(hba->dev, in ufshcd_set_active_icc_lvl()
7279 * ufshcd_scsi_add_wlus - Adds required W-LUs
7280 * @hba: per-adapter instance
7282 * UFS device specification requires the UFS devices to support 4 well known
7285 * "UFS Device" (address: 50h)
7288 * UFS device's power management needs to be controlled by "POWER CONDITION"
7290 * will take effect only when its sent to "UFS device" well known logical unit
7292 * order for the UFS host driver to send the SSU command for power management.
7301 * Returns zero on success (all required W-LUs are added successfully),
7302 * non-zero error value on failure (if failed to add any of the required W-LU).
7309 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7311 if (IS_ERR(hba->sdev_ufs_device)) { in ufshcd_scsi_add_wlus()
7312 ret = PTR_ERR(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
7313 hba->sdev_ufs_device = NULL; in ufshcd_scsi_add_wlus()
7316 scsi_device_put(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
7318 hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7320 if (IS_ERR(hba->sdev_rpmb)) { in ufshcd_scsi_add_wlus()
7321 ret = PTR_ERR(hba->sdev_rpmb); in ufshcd_scsi_add_wlus()
7324 scsi_device_put(hba->sdev_rpmb); in ufshcd_scsi_add_wlus()
7326 sdev_boot = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7329 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__); in ufshcd_scsi_add_wlus()
7335 scsi_remove_device(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
7342 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_wb_probe()
7349 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or in ufshcd_wb_probe()
7350 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES in ufshcd_wb_probe()
7353 if (!(dev_info->wspecversion >= 0x310 || in ufshcd_wb_probe()
7354 dev_info->wspecversion == 0x220 || in ufshcd_wb_probe()
7355 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))) in ufshcd_wb_probe()
7358 if (hba->desc_size[QUERY_DESC_IDN_DEVICE] < in ufshcd_wb_probe()
7362 dev_info->d_ext_ufs_feature_sup = in ufshcd_wb_probe()
7366 if (!(dev_info->d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP)) in ufshcd_wb_probe()
7375 dev_info->b_wb_buffer_type = in ufshcd_wb_probe()
7378 dev_info->b_presrv_uspc_en = in ufshcd_wb_probe()
7381 if (dev_info->b_wb_buffer_type == WB_BUF_MODE_SHARED) { in ufshcd_wb_probe()
7382 dev_info->d_wb_alloc_units = in ufshcd_wb_probe()
7385 if (!dev_info->d_wb_alloc_units) in ufshcd_wb_probe()
7396 dev_info->wb_dedicated_lu = lun; in ufshcd_wb_probe()
7407 hba->caps &= ~UFSHCD_CAP_WB_EN; in ufshcd_wb_probe()
7413 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_fixup_dev_quirks()
7418 for (f = fixups; f->quirk; f++) { in ufshcd_fixup_dev_quirks()
7419 if ((f->wmanufacturerid == dev_info->wmanufacturerid || in ufshcd_fixup_dev_quirks()
7420 f->wmanufacturerid == UFS_ANY_VENDOR) && in ufshcd_fixup_dev_quirks()
7421 ((dev_info->model && in ufshcd_fixup_dev_quirks()
7422 STR_PRFX_EQUAL(f->model, dev_info->model)) || in ufshcd_fixup_dev_quirks()
7423 !strcmp(f->model, UFS_ANY_MODEL))) in ufshcd_fixup_dev_quirks()
7424 hba->dev_quirks |= f->quirk; in ufshcd_fixup_dev_quirks()
7444 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_get_device_desc()
7448 err = -ENOMEM; in ufs_get_device_desc()
7453 hba->desc_size[QUERY_DESC_IDN_DEVICE]); in ufs_get_device_desc()
7455 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", in ufs_get_device_desc()
7464 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | in ufs_get_device_desc()
7468 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 | in ufs_get_device_desc()
7474 if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION && in ufs_get_device_desc()
7487 dev_info->hpb_enabled = true; in ufs_get_device_desc()
7491 &dev_info->model, SD_ASCII_STD); in ufs_get_device_desc()
7493 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", in ufs_get_device_desc()
7515 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_put_device_desc()
7517 kfree(dev_info->model); in ufs_put_device_desc()
7518 dev_info->model = NULL; in ufs_put_device_desc()
7522 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7523 * @hba: per-adapter instance
7526 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7530 * Returns zero on success, non-zero error value on failure.
7557 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7558 * @hba: per-adapter instance
7561 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7562 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7565 * Returns zero on success, non-zero error value on failure.
7599 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7601 * @hba: per-adapter instance
7603 * Some UFS devices require host PA_TACTIVATE to be lower than device
7607 * Returns zero on success, non-zero error value on failure.
7629 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d", in ufshcd_quirk_tune_host_pa_tactivate()
7631 return -EINVAL; in ufshcd_quirk_tune_host_pa_tactivate()
7636 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d", in ufshcd_quirk_tune_host_pa_tactivate()
7638 return -EINVAL; in ufshcd_quirk_tune_host_pa_tactivate()
7650 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1]; in ufshcd_quirk_tune_host_pa_tactivate()
7652 gran_to_us_table[peer_granularity - 1]; in ufshcd_quirk_tune_host_pa_tactivate()
7658 gran_to_us_table[peer_granularity - 1]; in ufshcd_quirk_tune_host_pa_tactivate()
7677 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) in ufshcd_tune_unipro_params()
7681 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) in ufshcd_tune_unipro_params()
7687 hba->ufs_stats.hibern8_exit_cnt = 0; in ufshcd_clear_dbg_ufs_stats()
7688 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_clear_dbg_ufs_stats()
7689 hba->req_abort_count = 0; in ufshcd_clear_dbg_ufs_stats()
7698 buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY]; in ufshcd_device_geo_params_init()
7701 err = -ENOMEM; in ufshcd_device_geo_params_init()
7708 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", in ufshcd_device_geo_params_init()
7714 hba->dev_info.max_lu_supported = 32; in ufshcd_device_geo_params_init()
7716 hba->dev_info.max_lu_supported = 8; in ufshcd_device_geo_params_init()
7718 if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >= in ufshcd_device_geo_params_init()
7753 hba->dev_ref_clk_freq = in ufshcd_parse_dev_ref_clk_freq()
7756 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) in ufshcd_parse_dev_ref_clk_freq()
7757 dev_err(hba->dev, in ufshcd_parse_dev_ref_clk_freq()
7765 u32 freq = hba->dev_ref_clk_freq; in ufshcd_set_dev_ref_clk()
7771 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", in ufshcd_set_dev_ref_clk()
7783 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", in ufshcd_set_dev_ref_clk()
7788 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", in ufshcd_set_dev_ref_clk()
7802 hba->desc_size[i] = QUERY_DESC_MAX_SIZE; in ufshcd_device_params_init()
7804 /* Init UFS geometry descriptor related parameters */ in ufshcd_device_params_init()
7809 /* Check and apply UFS device quirks */ in ufshcd_device_params_init()
7812 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", in ufshcd_device_params_init()
7821 hba->dev_info.f_power_on_wp_en = flag; in ufshcd_device_params_init()
7823 /* Probe maximum power mode co-supported by both UFS host and device */ in ufshcd_device_params_init()
7825 dev_err(hba->dev, in ufshcd_device_params_init()
7833 * ufshcd_add_lus - probe and add UFS logical units
7834 * @hba: per-adapter instance
7845 /* Initialize devfreq after UFS device is detected */ in ufshcd_add_lus()
7847 memcpy(&hba->clk_scaling.saved_pwr_info.info, in ufshcd_add_lus()
7848 &hba->pwr_info, in ufshcd_add_lus()
7850 hba->clk_scaling.saved_pwr_info.is_valid = true; in ufshcd_add_lus()
7851 hba->clk_scaling.is_allowed = true; in ufshcd_add_lus()
7857 hba->clk_scaling.is_enabled = true; in ufshcd_add_lus()
7863 scsi_scan_host(hba->host); in ufshcd_add_lus()
7864 pm_runtime_put_sync(hba->dev); in ufshcd_add_lus()
7871 * ufshcd_probe_hba - probe hba to detect device and initialize
7872 * @hba: per-adapter instance
7875 * Execute link-startup and verify device initialization
7883 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_probe_hba()
7889 if (hba->quirks & UFSHCD_QUIRK_SKIP_INTERFACE_CONFIGURATION) in ufshcd_probe_hba()
7903 /* Initiate UFS initialization, and waiting until completion */ in ufshcd_probe_hba()
7909 * Initialize UFS device parameters used by driver, these in ufshcd_probe_hba()
7910 * parameters are associated with UFS descriptors. in ufshcd_probe_hba()
7920 /* UFS device is also active now */ in ufshcd_probe_hba()
7925 if (hba->max_pwr_info.is_valid) { in ufshcd_probe_hba()
7930 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) in ufshcd_probe_hba()
7932 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); in ufshcd_probe_hba()
7934 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", in ufshcd_probe_hba()
7942 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec) in ufshcd_probe_hba()
7943 * and for removable UFS card as well, hence always set the parameter. in ufshcd_probe_hba()
7950 /* Enable Auto-Hibernate if configured */ in ufshcd_probe_hba()
7957 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_probe_hba()
7959 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_probe_hba()
7960 else if (hba->ufshcd_state == UFSHCD_STATE_RESET) in ufshcd_probe_hba()
7961 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_probe_hba()
7962 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_probe_hba()
7964 trace_ufshcd_init(dev_name(hba->dev), ret, in ufshcd_probe_hba()
7966 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_probe_hba()
7971 * ufshcd_async_scan - asynchronous execution for probing hba
7980 down(&hba->host_sem); in ufshcd_async_scan()
7981 /* Initialize hba, detect and initialize UFS device */ in ufshcd_async_scan()
7983 up(&hba->host_sem); in ufshcd_async_scan()
7987 /* Probe and add UFS logical units */ in ufshcd_async_scan()
7995 pm_runtime_put_sync(hba->dev); in ufshcd_async_scan()
8032 .this_id = -1,
8040 .dma_boundary = PAGE_SIZE - 1,
8058 if (!vreg->max_uA) in ufshcd_config_vreg_load()
8061 ret = regulator_set_load(vreg->reg, ua); in ufshcd_config_vreg_load()
8064 __func__, vreg->name, ua, ret); in ufshcd_config_vreg_load()
8073 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); in ufshcd_config_vreg_lpm()
8082 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); in ufshcd_config_vreg_hpm()
8095 reg = vreg->reg; in ufshcd_config_vreg()
8096 name = vreg->name; in ufshcd_config_vreg()
8099 uA_load = on ? vreg->max_uA : 0; in ufshcd_config_vreg()
8104 if (vreg->min_uV && vreg->max_uV) { in ufshcd_config_vreg()
8105 min_uV = on ? vreg->min_uV : 0; in ufshcd_config_vreg()
8106 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); in ufshcd_config_vreg()
8121 if (!vreg || vreg->enabled) in ufshcd_enable_vreg()
8126 ret = regulator_enable(vreg->reg); in ufshcd_enable_vreg()
8129 vreg->enabled = true; in ufshcd_enable_vreg()
8132 __func__, vreg->name, ret); in ufshcd_enable_vreg()
8141 if (!vreg || !vreg->enabled || vreg->always_on) in ufshcd_disable_vreg()
8144 ret = regulator_disable(vreg->reg); in ufshcd_disable_vreg()
8149 vreg->enabled = false; in ufshcd_disable_vreg()
8152 __func__, vreg->name, ret); in ufshcd_disable_vreg()
8161 struct device *dev = hba->dev; in ufshcd_setup_vreg()
8162 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_vreg()
8164 ret = ufshcd_toggle_vreg(dev, info->vcc, on); in ufshcd_setup_vreg()
8168 ret = ufshcd_toggle_vreg(dev, info->vccq, on); in ufshcd_setup_vreg()
8172 ret = ufshcd_toggle_vreg(dev, info->vccq2, on); in ufshcd_setup_vreg()
8176 ufshcd_toggle_vreg(dev, info->vccq2, false); in ufshcd_setup_vreg()
8177 ufshcd_toggle_vreg(dev, info->vccq, false); in ufshcd_setup_vreg()
8178 ufshcd_toggle_vreg(dev, info->vcc, false); in ufshcd_setup_vreg()
8185 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_hba_vreg()
8187 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); in ufshcd_setup_hba_vreg()
8197 vreg->reg = devm_regulator_get(dev, vreg->name); in ufshcd_get_vreg()
8198 if (IS_ERR(vreg->reg)) { in ufshcd_get_vreg()
8199 ret = PTR_ERR(vreg->reg); in ufshcd_get_vreg()
8201 __func__, vreg->name, ret); in ufshcd_get_vreg()
8210 struct device *dev = hba->dev; in ufshcd_init_vreg()
8211 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_vreg()
8213 ret = ufshcd_get_vreg(dev, info->vcc); in ufshcd_init_vreg()
8217 ret = ufshcd_get_vreg(dev, info->vccq); in ufshcd_init_vreg()
8219 ret = ufshcd_get_vreg(dev, info->vccq2); in ufshcd_init_vreg()
8226 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_hba_vreg()
8229 return ufshcd_get_vreg(hba->dev, info->vdd_hba); in ufshcd_init_hba_vreg()
8238 struct list_head *head = &hba->clk_list_head; in ufshcd_setup_clocks()
8251 if (!IS_ERR_OR_NULL(clki->clk)) { in ufshcd_setup_clocks()
8257 clki->keep_link_active) in ufshcd_setup_clocks()
8260 clk_state_changed = on ^ clki->enabled; in ufshcd_setup_clocks()
8261 if (on && !clki->enabled) { in ufshcd_setup_clocks()
8262 ret = clk_prepare_enable(clki->clk); in ufshcd_setup_clocks()
8264 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", in ufshcd_setup_clocks()
8265 __func__, clki->name, ret); in ufshcd_setup_clocks()
8268 } else if (!on && clki->enabled) { in ufshcd_setup_clocks()
8269 clk_disable_unprepare(clki->clk); in ufshcd_setup_clocks()
8271 clki->enabled = on; in ufshcd_setup_clocks()
8272 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, in ufshcd_setup_clocks()
8273 clki->name, on ? "en" : "dis"); in ufshcd_setup_clocks()
8284 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) in ufshcd_setup_clocks()
8285 clk_disable_unprepare(clki->clk); in ufshcd_setup_clocks()
8288 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_setup_clocks()
8289 hba->clk_gating.state = CLKS_ON; in ufshcd_setup_clocks()
8290 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_setup_clocks()
8291 hba->clk_gating.state); in ufshcd_setup_clocks()
8292 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_setup_clocks()
8296 trace_ufshcd_profile_clk_gating(dev_name(hba->dev), in ufshcd_setup_clocks()
8306 struct device *dev = hba->dev; in ufshcd_init_clocks()
8307 struct list_head *head = &hba->clk_list_head; in ufshcd_init_clocks()
8313 if (!clki->name) in ufshcd_init_clocks()
8316 clki->clk = devm_clk_get(dev, clki->name); in ufshcd_init_clocks()
8317 if (IS_ERR(clki->clk)) { in ufshcd_init_clocks()
8318 ret = PTR_ERR(clki->clk); in ufshcd_init_clocks()
8320 __func__, clki->name, ret); in ufshcd_init_clocks()
8329 if (!strcmp(clki->name, "ref_clk")) in ufshcd_init_clocks()
8330 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk); in ufshcd_init_clocks()
8332 if (clki->max_freq) { in ufshcd_init_clocks()
8333 ret = clk_set_rate(clki->clk, clki->max_freq); in ufshcd_init_clocks()
8335 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_init_clocks()
8336 __func__, clki->name, in ufshcd_init_clocks()
8337 clki->max_freq, ret); in ufshcd_init_clocks()
8340 clki->curr_freq = clki->max_freq; in ufshcd_init_clocks()
8343 clki->name, clk_get_rate(clki->clk)); in ufshcd_init_clocks()
8353 if (!hba->vops) in ufshcd_variant_hba_init()
8365 dev_err(hba->dev, "%s: variant %s init failed err %d\n", in ufshcd_variant_hba_init()
8372 if (!hba->vops) in ufshcd_variant_hba_exit()
8385 * Handle host controller power separately from the UFS device power in ufshcd_hba_init()
8386 * rails as it will help controlling the UFS host controller power in ufshcd_hba_init()
8387 * collapse easily which is different than UFS device power collapse. in ufshcd_hba_init()
8421 hba->is_powered = true; in ufshcd_hba_init()
8436 if (hba->is_powered) { in ufshcd_hba_exit()
8439 if (hba->eh_wq) in ufshcd_hba_exit()
8440 destroy_workqueue(hba->eh_wq); in ufshcd_hba_exit()
8446 hba->is_powered = false; in ufshcd_hba_exit()
8452 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
8458 * Returns non-zero if failed to set the requested power mode
8469 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
8470 sdp = hba->sdev_ufs_device; in ufshcd_set_dev_pwr_mode()
8474 ret = -ENODEV; in ufshcd_set_dev_pwr_mode()
8478 ret = -ENODEV; in ufshcd_set_dev_pwr_mode()
8480 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
8486 * If scsi commands fail, the scsi mid-layer schedules scsi error- in ufshcd_set_dev_pwr_mode()
8491 hba->host->eh_noresume = 1; in ufshcd_set_dev_pwr_mode()
8500 for (retries = 3; retries > 0; --retries) { in ufshcd_set_dev_pwr_mode()
8517 hba->curr_dev_pwr_mode = pwr_mode; in ufshcd_set_dev_pwr_mode()
8520 hba->host->eh_noresume = 0; in ufshcd_set_dev_pwr_mode()
8530 if (req_link_state == hba->uic_link_state) in ufshcd_link_state_transition()
8538 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_link_state_transition()
8548 (!check_for_bkops || !hba->auto_bkops_enabled)) { in ufshcd_link_state_transition()
8558 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_link_state_transition()
8583 * It seems some UFS devices may keep drawing more than sleep current in ufshcd_vreg_set_lpm()
8584 * (atleast for 500us) from UFS rails (especially from VCCQ rail). in ufshcd_vreg_set_lpm()
8585 * To avoid this situation, add 2ms delay before putting these UFS in ufshcd_vreg_set_lpm()
8589 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) in ufshcd_vreg_set_lpm()
8593 * If UFS device is either in UFS_Sleep turn off VCC rail to save some in ufshcd_vreg_set_lpm()
8596 * If UFS device and link is in OFF state, all power supplies (VCC, in ufshcd_vreg_set_lpm()
8598 * required. If UFS link is inactive (Hibern8 or OFF state) and device in ufshcd_vreg_set_lpm()
8608 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_lpm()
8612 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_lpm()
8615 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_lpm()
8616 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_lpm()
8621 * Some UFS devices require delay after VCC power rail is turned-off. in ufshcd_vreg_set_lpm()
8623 if (vcc_off && hba->vreg_info.vcc && in ufshcd_vreg_set_lpm()
8624 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM) in ufshcd_vreg_set_lpm()
8633 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_hpm()
8637 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
8640 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_hpm()
8644 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); in ufshcd_vreg_set_hpm()
8649 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
8651 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_hpm()
8669 * ufshcd_suspend - helper function for suspend operations
8673 * This function will try to put the UFS device and link into low power
8678 * both UFS device and UFS link is powered off.
8680 * NOTE: UFS device & link must be active before we enter in this function.
8682 * Returns 0 for success and non-zero for failure
8691 hba->pm_op_in_progress = 1; in ufshcd_suspend()
8694 hba->rpm_lvl : hba->spm_lvl; in ufshcd_suspend()
8709 hba->clk_gating.is_suspended = true; in ufshcd_suspend()
8719 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && in ufshcd_suspend()
8720 (req_link_state == hba->uic_link_state)) in ufshcd_suspend()
8723 /* UFS device & link must be active before we enter in this function */ in ufshcd_suspend()
8725 ret = -EINVAL; in ufshcd_suspend()
8748 hba->dev_info.b_rpm_dev_flush_capable = in ufshcd_suspend()
8749 hba->auto_bkops_enabled || in ufshcd_suspend()
8756 flush_work(&hba->eeh_work); in ufshcd_suspend()
8758 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) { in ufshcd_suspend()
8759 if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || in ufshcd_suspend()
8765 if (!hba->dev_info.b_rpm_dev_flush_capable) { in ufshcd_suspend()
8794 hba->clk_gating.state = CLKS_OFF; in ufshcd_suspend()
8795 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_suspend()
8796 hba->clk_gating.state); in ufshcd_suspend()
8818 hba->clk_gating.is_suspended = false; in ufshcd_suspend()
8819 hba->dev_info.b_rpm_dev_flush_capable = false; in ufshcd_suspend()
8823 if (hba->dev_info.b_rpm_dev_flush_capable) { in ufshcd_suspend()
8824 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, in ufshcd_suspend()
8828 hba->pm_op_in_progress = 0; in ufshcd_suspend()
8836 * ufshcd_resume - helper function for resume operations
8840 * This function basically brings the UFS device, UniPro link and controller
8843 * Returns 0 for success and non-zero for failure
8850 hba->pm_op_in_progress = 1; in ufshcd_resume()
8851 old_link_state = hba->uic_link_state; in ufshcd_resume()
8880 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", in ufshcd_resume()
8909 * keep auto-bkops enabled or else disable it. in ufshcd_resume()
8913 hba->clk_gating.is_suspended = false; in ufshcd_resume()
8918 /* Enable Auto-Hibernate if configured */ in ufshcd_resume()
8923 if (hba->dev_info.b_rpm_dev_flush_capable) { in ufshcd_resume()
8924 hba->dev_info.b_rpm_dev_flush_capable = false; in ufshcd_resume()
8925 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); in ufshcd_resume()
8928 /* Schedule clock gating in case of no access to UFS device yet */ in ufshcd_resume()
8941 hba->clk_gating.state = CLKS_OFF; in ufshcd_resume()
8942 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_resume()
8943 hba->clk_gating.state); in ufshcd_resume()
8948 hba->pm_op_in_progress = 0; in ufshcd_resume()
8955 * ufshcd_system_suspend - system suspend routine
8960 * Returns 0 for success and non-zero for failure
8967 down(&hba->host_sem); in ufshcd_system_suspend()
8969 if (!hba->is_powered) in ufshcd_system_suspend()
8972 cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work); in ufshcd_system_suspend()
8974 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == in ufshcd_system_suspend()
8975 hba->curr_dev_pwr_mode) && in ufshcd_system_suspend()
8976 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) == in ufshcd_system_suspend()
8977 hba->uic_link_state) && in ufshcd_system_suspend()
8978 pm_runtime_suspended(hba->dev) && in ufshcd_system_suspend()
8979 !hba->dev_info.b_rpm_dev_flush_capable) in ufshcd_system_suspend()
8982 if (pm_runtime_suspended(hba->dev)) { in ufshcd_system_suspend()
8984 * UFS device and/or UFS link low power states during runtime in ufshcd_system_suspend()
8998 trace_ufshcd_system_suspend(dev_name(hba->dev), ret, in ufshcd_system_suspend()
9000 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_system_suspend()
9002 hba->is_sys_suspended = true; in ufshcd_system_suspend()
9004 up(&hba->host_sem); in ufshcd_system_suspend()
9010 * ufshcd_system_resume - system resume routine
9013 * Returns 0 for success and non-zero for failure
9021 if (!hba->is_powered || pm_runtime_suspended(hba->dev)) in ufshcd_system_resume()
9030 trace_ufshcd_system_resume(dev_name(hba->dev), ret, in ufshcd_system_resume()
9032 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_system_resume()
9034 hba->is_sys_suspended = false; in ufshcd_system_resume()
9035 up(&hba->host_sem); in ufshcd_system_resume()
9041 * ufshcd_runtime_suspend - runtime suspend routine
9046 * Returns 0 for success and non-zero for failure
9053 if (!hba->is_powered) in ufshcd_runtime_suspend()
9058 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, in ufshcd_runtime_suspend()
9060 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_runtime_suspend()
9066 * ufshcd_runtime_resume - runtime resume routine
9069 * This function basically brings the UFS device, UniPro link and controller
9073 * 2. Bring the UniPro link out of Hibernate state
9074 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
9076 * 4. If auto-bkops is enabled on the device, disable it.
9080 * S1: UFS device in Active state with VCC rail ON
9082 * All the UFS/UniPro controller clocks are ON
9084 * Returns 0 for success and non-zero for failure
9091 if (!hba->is_powered) in ufshcd_runtime_resume()
9096 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, in ufshcd_runtime_resume()
9098 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_runtime_resume()
9110 * ufshcd_shutdown - shutdown routine
9113 * This function would power off both UFS device and UFS link.
9121 down(&hba->host_sem); in ufshcd_shutdown()
9122 hba->shutting_down = true; in ufshcd_shutdown()
9123 up(&hba->host_sem); in ufshcd_shutdown()
9125 if (!hba->is_powered) in ufshcd_shutdown()
9131 pm_runtime_get_sync(hba->dev); in ufshcd_shutdown()
9136 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); in ufshcd_shutdown()
9137 hba->is_powered = false; in ufshcd_shutdown()
9144 * ufshcd_remove - de-allocate SCSI host and host memory space
9152 ufs_sysfs_remove_nodes(hba->dev); in ufshcd_remove()
9153 blk_cleanup_queue(hba->tmf_queue); in ufshcd_remove()
9154 blk_mq_free_tag_set(&hba->tmf_tag_set); in ufshcd_remove()
9155 blk_cleanup_queue(hba->cmd_queue); in ufshcd_remove()
9156 scsi_remove_host(hba->host); in ufshcd_remove()
9158 ufshcd_disable_intr(hba, hba->intr_mask); in ufshcd_remove()
9165 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9170 scsi_host_put(hba->host); in ufshcd_dealloc_host()
9175 * ufshcd_set_dma_mask - Set dma mask based on the controller
9179 * Returns 0 for success, non-zero for failure
9183 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { in ufshcd_set_dma_mask()
9184 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) in ufshcd_set_dma_mask()
9187 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); in ufshcd_set_dma_mask()
9191 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
9194 * Returns 0 on success, non-zero value on failure
9205 err = -ENODEV; in ufshcd_alloc_host()
9213 err = -ENOMEM; in ufshcd_alloc_host()
9217 hba->host = host; in ufshcd_alloc_host()
9218 hba->dev = dev; in ufshcd_alloc_host()
9220 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; in ufshcd_alloc_host()
9221 hba->sg_entry_size = sizeof(struct ufshcd_sg_entry); in ufshcd_alloc_host()
9223 INIT_LIST_HEAD(&hba->clk_list_head); in ufshcd_alloc_host()
9243 * ufshcd_init - Driver initialization routine
9244 * @hba: per-adapter instance
9247 * Returns 0 on success, non-zero value on failure
9251 struct request ***tmf_rqs = &ufs_hba_add_info(hba)->tmf_rqs; in ufshcd_init()
9253 struct Scsi_Host *host = hba->host; in ufshcd_init()
9254 struct device *dev = hba->dev; in ufshcd_init()
9265 dev_err(hba->dev, in ufshcd_init()
9267 err = -ENODEV; in ufshcd_init()
9271 hba->mmio_base = mmio_base; in ufshcd_init()
9272 hba->irq = irq; in ufshcd_init()
9273 hba->vps = &ufs_hba_vps; in ufshcd_init()
9284 /* Get UFS version supported by the controller */ in ufshcd_init()
9285 hba->ufs_version = ufshcd_get_ufs_version(hba); in ufshcd_init()
9287 if (hba->ufs_version < ufshci_version(1, 0)) in ufshcd_init()
9288 dev_err(hba->dev, "invalid UFS version 0x%x\n", in ufshcd_init()
9289 hba->ufs_version); in ufshcd_init()
9292 hba->intr_mask = ufshcd_get_intr_mask(hba); in ufshcd_init()
9296 dev_err(hba->dev, "set dma mask failed\n"); in ufshcd_init()
9303 dev_err(hba->dev, "Memory allocation failed\n"); in ufshcd_init()
9310 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; in ufshcd_init()
9311 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED; in ufshcd_init()
9312 host->max_id = UFSHCD_MAX_ID; in ufshcd_init()
9313 host->max_lun = UFS_MAX_LUNS; in ufshcd_init()
9314 host->max_channel = UFSHCD_MAX_CHANNEL; in ufshcd_init()
9315 host->unique_id = host->host_no; in ufshcd_init()
9316 host->max_cmd_len = UFS_CDB_SIZE; in ufshcd_init()
9318 hba->max_pwr_info.is_valid = false; in ufshcd_init()
9322 hba->host->host_no); in ufshcd_init()
9323 hba->eh_wq = create_singlethread_workqueue(eh_wq_name); in ufshcd_init()
9324 if (!hba->eh_wq) { in ufshcd_init()
9325 dev_err(hba->dev, "%s: failed to create eh workqueue\n", in ufshcd_init()
9327 err = -ENOMEM; in ufshcd_init()
9330 INIT_WORK(&hba->eh_work, ufshcd_err_handler); in ufshcd_init()
9331 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); in ufshcd_init()
9333 sema_init(&hba->host_sem, 1); in ufshcd_init()
9336 mutex_init(&hba->uic_cmd_mutex); in ufshcd_init()
9339 mutex_init(&hba->dev_cmd.lock); in ufshcd_init()
9341 init_rwsem(&hba->clk_scaling_lock); in ufshcd_init()
9349 * registering UFS controller interrupt handler, clear any pending UFS in ufshcd_init()
9350 * interrupt status and disable all the UFS interrupts. in ufshcd_init()
9356 * Make sure that UFS interrupts are disabled and any pending interrupt in ufshcd_init()
9357 * status is cleared before registering UFS interrupt handler. in ufshcd_init()
9364 dev_err(hba->dev, "request irq failed\n"); in ufshcd_init()
9367 hba->is_irq_enabled = true; in ufshcd_init()
9370 err = scsi_add_host(host, hba->dev); in ufshcd_init()
9372 dev_err(hba->dev, "scsi_add_host failed\n"); in ufshcd_init()
9376 hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set); in ufshcd_init()
9377 if (IS_ERR(hba->cmd_queue)) { in ufshcd_init()
9378 err = PTR_ERR(hba->cmd_queue); in ufshcd_init()
9382 hba->tmf_tag_set = (struct blk_mq_tag_set) { in ufshcd_init()
9384 .queue_depth = hba->nutmrs, in ufshcd_init()
9388 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); in ufshcd_init()
9391 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); in ufshcd_init()
9392 if (IS_ERR(hba->tmf_queue)) { in ufshcd_init()
9393 err = PTR_ERR(hba->tmf_queue); in ufshcd_init()
9396 *tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, sizeof(**tmf_rqs), in ufshcd_init()
9399 err = -ENOMEM; in ufshcd_init()
9411 dev_err(hba->dev, "Host controller enable failed\n"); in ufshcd_init()
9419 * Default power saving mode is to keep UFS link in Hibern8 state in ufshcd_init()
9420 * and UFS device in sleep state. in ufshcd_init()
9422 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( in ufshcd_init()
9425 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( in ufshcd_init()
9429 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, in ufshcd_init()
9432 /* Set the default auto-hiberate idle timer value to 150 ms */ in ufshcd_init()
9433 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) { in ufshcd_init()
9434 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | in ufshcd_init()
9440 atomic_set(&hba->scsi_block_reqs_cnt, 0); in ufshcd_init()
9442 * We are assuming that device wasn't put in sleep/power-down in ufshcd_init()
9456 blk_cleanup_queue(hba->tmf_queue); in ufshcd_init()
9458 blk_mq_free_tag_set(&hba->tmf_tag_set); in ufshcd_init()
9460 blk_cleanup_queue(hba->cmd_queue); in ufshcd_init()
9462 scsi_remove_host(hba->host); in ufshcd_init()
9464 hba->is_irq_enabled = false; in ufshcd_init()
9487 MODULE_DESCRIPTION("Generic UFS host controller driver Core");