Lines Matching refs:hba
103 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, in ufshcd_dump_regs() argument
121 regs[pos / 4] = ufshcd_readl(hba, offset + pos); in ufshcd_dump_regs()
230 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
232 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
234 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
235 static void ufshcd_hba_exit(struct ufs_hba *hba);
236 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
237 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
238 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
239 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
240 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
241 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
242 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
243 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
245 static int ufshcd_change_power_mode(struct ufs_hba *hba,
247 static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
248 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
249 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
250 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
252 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
253 static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
254 static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
255 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
256 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
257 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
258 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
259 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
261 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag) in ufshcd_valid_tag() argument
263 return tag >= 0 && tag < hba->nutrs; in ufshcd_valid_tag()
266 static inline void ufshcd_enable_irq(struct ufs_hba *hba) in ufshcd_enable_irq() argument
268 if (!hba->is_irq_enabled) { in ufshcd_enable_irq()
269 enable_irq(hba->irq); in ufshcd_enable_irq()
270 hba->is_irq_enabled = true; in ufshcd_enable_irq()
274 static inline void ufshcd_disable_irq(struct ufs_hba *hba) in ufshcd_disable_irq() argument
276 if (hba->is_irq_enabled) { in ufshcd_disable_irq()
277 disable_irq(hba->irq); in ufshcd_disable_irq()
278 hba->is_irq_enabled = false; in ufshcd_disable_irq()
282 static inline void ufshcd_wb_config(struct ufs_hba *hba) in ufshcd_wb_config() argument
286 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_wb_config()
289 ret = ufshcd_wb_ctrl(hba, true); in ufshcd_wb_config()
291 dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret); in ufshcd_wb_config()
293 dev_info(hba->dev, "%s: Write Booster Configured\n", __func__); in ufshcd_wb_config()
294 ret = ufshcd_wb_toggle_flush_during_h8(hba, true); in ufshcd_wb_config()
296 dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n", in ufshcd_wb_config()
298 if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)) in ufshcd_wb_config()
299 ufshcd_wb_toggle_flush(hba, true); in ufshcd_wb_config()
302 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) in ufshcd_scsi_unblock_requests() argument
304 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) in ufshcd_scsi_unblock_requests()
305 scsi_unblock_requests(hba->host); in ufshcd_scsi_unblock_requests()
308 static void ufshcd_scsi_block_requests(struct ufs_hba *hba) in ufshcd_scsi_block_requests() argument
310 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) in ufshcd_scsi_block_requests()
311 scsi_block_requests(hba->host); in ufshcd_scsi_block_requests()
314 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, in ufshcd_add_cmd_upiu_trace() argument
317 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; in ufshcd_add_cmd_upiu_trace()
319 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb); in ufshcd_add_cmd_upiu_trace()
322 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag, in ufshcd_add_query_upiu_trace() argument
325 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; in ufshcd_add_query_upiu_trace()
327 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr); in ufshcd_add_query_upiu_trace()
330 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, in ufshcd_add_tm_upiu_trace() argument
333 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag]; in ufshcd_add_tm_upiu_trace()
335 trace_android_vh_ufs_send_tm_command(hba, tag, str); in ufshcd_add_tm_upiu_trace()
336 trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header, in ufshcd_add_tm_upiu_trace()
340 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, in ufshcd_add_uic_command_trace() argument
346 trace_android_vh_ufs_send_uic_command(hba, ucmd, str); in ufshcd_add_uic_command_trace()
354 cmd = ufshcd_readl(hba, REG_UIC_COMMAND); in ufshcd_add_uic_command_trace()
356 trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd, in ufshcd_add_uic_command_trace()
357 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), in ufshcd_add_uic_command_trace()
358 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), in ufshcd_add_uic_command_trace()
359 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); in ufshcd_add_uic_command_trace()
362 static void ufshcd_add_command_trace(struct ufs_hba *hba, in ufshcd_add_command_trace() argument
368 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_add_command_trace()
375 ufshcd_add_cmd_upiu_trace(hba, tag, str); in ufshcd_add_command_trace()
381 ufshcd_add_cmd_upiu_trace(hba, tag, str); in ufshcd_add_command_trace()
402 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_add_command_trace()
403 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_add_command_trace()
404 trace_ufshcd_command(dev_name(hba->dev), str, tag, in ufshcd_add_command_trace()
408 static void ufshcd_print_clk_freqs(struct ufs_hba *hba) in ufshcd_print_clk_freqs() argument
411 struct list_head *head = &hba->clk_list_head; in ufshcd_print_clk_freqs()
419 dev_err(hba->dev, "clk: %s, rate: %u\n", in ufshcd_print_clk_freqs()
424 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id, in ufshcd_print_evt() argument
434 e = &hba->ufs_stats.event[id]; in ufshcd_print_evt()
441 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, in ufshcd_print_evt()
447 dev_err(hba->dev, "No record of %s\n", err_name); in ufshcd_print_evt()
450 static void ufshcd_print_evt_hist(struct ufs_hba *hba) in ufshcd_print_evt_hist() argument
452 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); in ufshcd_print_evt_hist()
454 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err"); in ufshcd_print_evt_hist()
455 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err"); in ufshcd_print_evt_hist()
456 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err"); in ufshcd_print_evt_hist()
457 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err"); in ufshcd_print_evt_hist()
458 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err"); in ufshcd_print_evt_hist()
459 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR, in ufshcd_print_evt_hist()
461 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err"); in ufshcd_print_evt_hist()
462 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL, in ufshcd_print_evt_hist()
464 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail"); in ufshcd_print_evt_hist()
465 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR, in ufshcd_print_evt_hist()
467 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset"); in ufshcd_print_evt_hist()
468 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset"); in ufshcd_print_evt_hist()
469 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort"); in ufshcd_print_evt_hist()
471 ufshcd_vops_dbg_register_dump(hba); in ufshcd_print_evt_hist()
475 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) in ufshcd_print_trs() argument
481 for_each_set_bit(tag, &bitmap, hba->nutrs) { in ufshcd_print_trs()
482 lrbp = &hba->lrb[tag]; in ufshcd_print_trs()
484 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", in ufshcd_print_trs()
486 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", in ufshcd_print_trs()
488 dev_err(hba->dev, in ufshcd_print_trs()
494 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, in ufshcd_print_trs()
498 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, in ufshcd_print_trs()
505 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) in ufshcd_print_trs()
506 prdt_length /= hba->sg_entry_size; in ufshcd_print_trs()
508 dev_err(hba->dev, in ufshcd_print_trs()
515 hba->sg_entry_size * prdt_length); in ufshcd_print_trs()
519 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) in ufshcd_print_tmrs() argument
523 for_each_set_bit(tag, &bitmap, hba->nutmrs) { in ufshcd_print_tmrs()
524 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag]; in ufshcd_print_tmrs()
526 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); in ufshcd_print_tmrs()
531 static void ufshcd_print_host_state(struct ufs_hba *hba) in ufshcd_print_host_state() argument
533 struct scsi_device *sdev_ufs = hba->sdev_ufs_device; in ufshcd_print_host_state()
535 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); in ufshcd_print_host_state()
536 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", in ufshcd_print_host_state()
537 hba->outstanding_reqs, hba->outstanding_tasks); in ufshcd_print_host_state()
538 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", in ufshcd_print_host_state()
539 hba->saved_err, hba->saved_uic_err); in ufshcd_print_host_state()
540 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", in ufshcd_print_host_state()
541 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_print_host_state()
542 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n", in ufshcd_print_host_state()
543 hba->pm_op_in_progress, hba->is_sys_suspended); in ufshcd_print_host_state()
544 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n", in ufshcd_print_host_state()
545 hba->auto_bkops_enabled, hba->host->host_self_blocked); in ufshcd_print_host_state()
546 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state); in ufshcd_print_host_state()
547 dev_err(hba->dev, in ufshcd_print_host_state()
549 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp), in ufshcd_print_host_state()
550 hba->ufs_stats.hibern8_exit_cnt); in ufshcd_print_host_state()
551 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", in ufshcd_print_host_state()
552 ktime_to_us(hba->ufs_stats.last_intr_ts), in ufshcd_print_host_state()
553 hba->ufs_stats.last_intr_status); in ufshcd_print_host_state()
554 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", in ufshcd_print_host_state()
555 hba->eh_flags, hba->req_abort_count); in ufshcd_print_host_state()
556 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", in ufshcd_print_host_state()
557 hba->ufs_version, hba->capabilities, hba->caps); in ufshcd_print_host_state()
558 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, in ufshcd_print_host_state()
559 hba->dev_quirks); in ufshcd_print_host_state()
561 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n", in ufshcd_print_host_state()
564 ufshcd_print_clk_freqs(hba); in ufshcd_print_host_state()
572 static void ufshcd_print_pwr_info(struct ufs_hba *hba) in ufshcd_print_pwr_info() argument
584 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", in ufshcd_print_pwr_info()
586 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, in ufshcd_print_pwr_info()
587 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, in ufshcd_print_pwr_info()
588 names[hba->pwr_info.pwr_rx], in ufshcd_print_pwr_info()
589 names[hba->pwr_info.pwr_tx], in ufshcd_print_pwr_info()
590 hba->pwr_info.hs_rate); in ufshcd_print_pwr_info()
617 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, in ufshcd_wait_for_register() argument
627 while ((ufshcd_readl(hba, reg) & mask) != val) { in ufshcd_wait_for_register()
630 if ((ufshcd_readl(hba, reg) & mask) != val) in ufshcd_wait_for_register()
645 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) in ufshcd_get_intr_mask() argument
647 if (hba->ufs_version == ufshci_version(1, 0)) in ufshcd_get_intr_mask()
649 if (hba->ufs_version <= ufshci_version(2, 0)) in ufshcd_get_intr_mask()
661 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) in ufshcd_get_ufs_version() argument
665 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) in ufshcd_get_ufs_version()
666 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba); in ufshcd_get_ufs_version()
668 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION); in ufshcd_get_ufs_version()
688 static inline bool ufshcd_is_device_present(struct ufs_hba *hba) in ufshcd_is_device_present() argument
690 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & in ufshcd_is_device_present()
711 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) in ufshcd_utrl_clear() argument
713 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) in ufshcd_utrl_clear()
714 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); in ufshcd_utrl_clear()
716 ufshcd_writel(hba, ~(1 << pos), in ufshcd_utrl_clear()
725 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) in ufshcd_utmrl_clear() argument
727 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) in ufshcd_utmrl_clear()
728 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); in ufshcd_utmrl_clear()
730 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); in ufshcd_utmrl_clear()
738 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag) in ufshcd_outstanding_req_clear() argument
740 clear_bit(tag, &hba->outstanding_reqs); in ufshcd_outstanding_req_clear()
761 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) in ufshcd_get_uic_cmd_result() argument
763 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & in ufshcd_get_uic_cmd_result()
774 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) in ufshcd_get_dme_attr_val() argument
776 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); in ufshcd_get_dme_attr_val()
836 ufshcd_reset_intr_aggr(struct ufs_hba *hba) in ufshcd_reset_intr_aggr() argument
838 ufshcd_writel(hba, INT_AGGR_ENABLE | in ufshcd_reset_intr_aggr()
850 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) in ufshcd_config_intr_aggr() argument
852 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | in ufshcd_config_intr_aggr()
862 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) in ufshcd_disable_intr_aggr() argument
864 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); in ufshcd_disable_intr_aggr()
873 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) in ufshcd_enable_run_stop_reg() argument
875 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, in ufshcd_enable_run_stop_reg()
877 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, in ufshcd_enable_run_stop_reg()
885 static inline void ufshcd_hba_start(struct ufs_hba *hba) in ufshcd_hba_start() argument
889 if (ufshcd_crypto_enable(hba)) in ufshcd_hba_start()
892 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); in ufshcd_hba_start()
901 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) in ufshcd_is_hba_active() argument
903 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE) in ufshcd_is_hba_active()
907 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) in ufshcd_get_local_unipro_ver() argument
910 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_get_local_unipro_ver()
917 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) in ufshcd_is_unipro_pa_params_tuning_req() argument
928 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6) in ufshcd_is_unipro_pa_params_tuning_req()
942 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up) in ufshcd_set_clk_freq() argument
946 struct list_head *head = &hba->clk_list_head; in ufshcd_set_clk_freq()
959 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_set_clk_freq()
964 trace_ufshcd_clk_scaling(dev_name(hba->dev), in ufshcd_set_clk_freq()
977 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_set_clk_freq()
982 trace_ufshcd_clk_scaling(dev_name(hba->dev), in ufshcd_set_clk_freq()
989 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, in ufshcd_set_clk_freq()
1005 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) in ufshcd_scale_clks() argument
1010 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); in ufshcd_scale_clks()
1014 ret = ufshcd_set_clk_freq(hba, scale_up); in ufshcd_scale_clks()
1018 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); in ufshcd_scale_clks()
1020 ufshcd_set_clk_freq(hba, !scale_up); in ufshcd_scale_clks()
1023 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), in ufshcd_scale_clks()
1036 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, in ufshcd_is_devfreq_scaling_required() argument
1040 struct list_head *head = &hba->clk_list_head; in ufshcd_is_devfreq_scaling_required()
1062 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, in ufshcd_wait_for_doorbell_clr() argument
1072 ufshcd_hold(hba, false); in ufshcd_wait_for_doorbell_clr()
1073 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1080 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { in ufshcd_wait_for_doorbell_clr()
1085 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); in ufshcd_wait_for_doorbell_clr()
1086 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_wait_for_doorbell_clr()
1094 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1106 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1110 dev_err(hba->dev, in ufshcd_wait_for_doorbell_clr()
1116 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1117 ufshcd_release(hba); in ufshcd_wait_for_doorbell_clr()
1130 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) in ufshcd_scale_gear() argument
1136 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info, in ufshcd_scale_gear()
1139 memcpy(&new_pwr_info, &hba->pwr_info, in ufshcd_scale_gear()
1142 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear || in ufshcd_scale_gear()
1143 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) { in ufshcd_scale_gear()
1145 memcpy(&hba->clk_scaling.saved_pwr_info.info, in ufshcd_scale_gear()
1146 &hba->pwr_info, in ufshcd_scale_gear()
1150 new_pwr_info.gear_tx = hba->clk_scaling.min_gear; in ufshcd_scale_gear()
1151 new_pwr_info.gear_rx = hba->clk_scaling.min_gear; in ufshcd_scale_gear()
1156 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); in ufshcd_scale_gear()
1158 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", in ufshcd_scale_gear()
1160 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx, in ufshcd_scale_gear()
1166 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) in ufshcd_clock_scaling_prepare() argument
1174 ufshcd_scsi_block_requests(hba); in ufshcd_clock_scaling_prepare()
1175 down_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_prepare()
1177 if (!hba->clk_scaling.is_allowed || in ufshcd_clock_scaling_prepare()
1178 ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { in ufshcd_clock_scaling_prepare()
1180 up_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_prepare()
1181 ufshcd_scsi_unblock_requests(hba); in ufshcd_clock_scaling_prepare()
1186 ufshcd_hold(hba, false); in ufshcd_clock_scaling_prepare()
1192 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock) in ufshcd_clock_scaling_unprepare() argument
1195 up_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_unprepare()
1197 up_read(&hba->clk_scaling_lock); in ufshcd_clock_scaling_unprepare()
1198 ufshcd_scsi_unblock_requests(hba); in ufshcd_clock_scaling_unprepare()
1199 ufshcd_release(hba); in ufshcd_clock_scaling_unprepare()
1211 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) in ufshcd_devfreq_scale() argument
1216 ret = ufshcd_clock_scaling_prepare(hba); in ufshcd_devfreq_scale()
1222 ret = ufshcd_scale_gear(hba, false); in ufshcd_devfreq_scale()
1227 ret = ufshcd_scale_clks(hba, scale_up); in ufshcd_devfreq_scale()
1230 ufshcd_scale_gear(hba, true); in ufshcd_devfreq_scale()
1236 ret = ufshcd_scale_gear(hba, true); in ufshcd_devfreq_scale()
1238 ufshcd_scale_clks(hba, false); in ufshcd_devfreq_scale()
1244 downgrade_write(&hba->clk_scaling_lock); in ufshcd_devfreq_scale()
1246 ufshcd_wb_ctrl(hba, scale_up); in ufshcd_devfreq_scale()
1249 ufshcd_clock_scaling_unprepare(hba, is_writelock); in ufshcd_devfreq_scale()
1255 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_clk_scaling_suspend_work() local
1259 spin_lock_irqsave(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_suspend_work()
1260 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { in ufshcd_clk_scaling_suspend_work()
1261 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_suspend_work()
1264 hba->clk_scaling.is_suspended = true; in ufshcd_clk_scaling_suspend_work()
1265 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_suspend_work()
1267 __ufshcd_suspend_clkscaling(hba); in ufshcd_clk_scaling_suspend_work()
1272 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_clk_scaling_resume_work() local
1276 spin_lock_irqsave(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_resume_work()
1277 if (!hba->clk_scaling.is_suspended) { in ufshcd_clk_scaling_resume_work()
1278 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_resume_work()
1281 hba->clk_scaling.is_suspended = false; in ufshcd_clk_scaling_resume_work()
1282 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_resume_work()
1284 devfreq_resume_device(hba->devfreq); in ufshcd_clk_scaling_resume_work()
1291 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_devfreq_target() local
1294 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_target()
1300 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_devfreq_target()
1303 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); in ufshcd_devfreq_target()
1306 spin_lock_irqsave(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1307 if (ufshcd_eh_in_progress(hba)) { in ufshcd_devfreq_target()
1308 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1312 if (!hba->clk_scaling.active_reqs) in ufshcd_devfreq_target()
1316 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1325 trace_android_vh_ufs_clock_scaling(hba, &force_out, &force_scaling, &scale_up); in ufshcd_devfreq_target()
1328 if (force_out || (!force_scaling && !ufshcd_is_devfreq_scaling_required(hba, scale_up))) { in ufshcd_devfreq_target()
1329 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1333 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1336 ret = ufshcd_devfreq_scale(hba, scale_up); in ufshcd_devfreq_target()
1338 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), in ufshcd_devfreq_target()
1344 queue_work(hba->clk_scaling.workq, in ufshcd_devfreq_target()
1345 &hba->clk_scaling.suspend_work); in ufshcd_devfreq_target()
1360 static bool ufshcd_any_tag_in_use(struct ufs_hba *hba) in ufshcd_any_tag_in_use() argument
1362 struct request_queue *q = hba->cmd_queue; in ufshcd_any_tag_in_use()
1372 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_devfreq_get_dev_status() local
1373 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_devfreq_get_dev_status()
1375 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_get_dev_status()
1379 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_devfreq_get_dev_status()
1384 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_devfreq_get_dev_status()
1406 if (hba->outstanding_reqs) { in ufshcd_devfreq_get_dev_status()
1413 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_devfreq_get_dev_status()
1417 static int ufshcd_devfreq_init(struct ufs_hba *hba) in ufshcd_devfreq_init() argument
1419 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_init()
1429 dev_pm_opp_add(hba->dev, clki->min_freq, 0); in ufshcd_devfreq_init()
1430 dev_pm_opp_add(hba->dev, clki->max_freq, 0); in ufshcd_devfreq_init()
1432 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, in ufshcd_devfreq_init()
1433 &hba->vps->ondemand_data); in ufshcd_devfreq_init()
1434 devfreq = devfreq_add_device(hba->dev, in ufshcd_devfreq_init()
1435 &hba->vps->devfreq_profile, in ufshcd_devfreq_init()
1437 &hba->vps->ondemand_data); in ufshcd_devfreq_init()
1440 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); in ufshcd_devfreq_init()
1442 dev_pm_opp_remove(hba->dev, clki->min_freq); in ufshcd_devfreq_init()
1443 dev_pm_opp_remove(hba->dev, clki->max_freq); in ufshcd_devfreq_init()
1447 hba->devfreq = devfreq; in ufshcd_devfreq_init()
1452 static void ufshcd_devfreq_remove(struct ufs_hba *hba) in ufshcd_devfreq_remove() argument
1454 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_remove()
1457 if (!hba->devfreq) in ufshcd_devfreq_remove()
1460 devfreq_remove_device(hba->devfreq); in ufshcd_devfreq_remove()
1461 hba->devfreq = NULL; in ufshcd_devfreq_remove()
1464 dev_pm_opp_remove(hba->dev, clki->min_freq); in ufshcd_devfreq_remove()
1465 dev_pm_opp_remove(hba->dev, clki->max_freq); in ufshcd_devfreq_remove()
1468 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba) in __ufshcd_suspend_clkscaling() argument
1472 devfreq_suspend_device(hba->devfreq); in __ufshcd_suspend_clkscaling()
1473 spin_lock_irqsave(hba->host->host_lock, flags); in __ufshcd_suspend_clkscaling()
1474 hba->clk_scaling.window_start_t = 0; in __ufshcd_suspend_clkscaling()
1475 spin_unlock_irqrestore(hba->host->host_lock, flags); in __ufshcd_suspend_clkscaling()
1478 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) in ufshcd_suspend_clkscaling() argument
1483 cancel_work_sync(&hba->clk_scaling.suspend_work); in ufshcd_suspend_clkscaling()
1484 cancel_work_sync(&hba->clk_scaling.resume_work); in ufshcd_suspend_clkscaling()
1486 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_suspend_clkscaling()
1487 if (!hba->clk_scaling.is_suspended) { in ufshcd_suspend_clkscaling()
1489 hba->clk_scaling.is_suspended = true; in ufshcd_suspend_clkscaling()
1491 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_suspend_clkscaling()
1494 __ufshcd_suspend_clkscaling(hba); in ufshcd_suspend_clkscaling()
1497 static void ufshcd_resume_clkscaling(struct ufs_hba *hba) in ufshcd_resume_clkscaling() argument
1502 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_resume_clkscaling()
1503 if (hba->clk_scaling.is_suspended) { in ufshcd_resume_clkscaling()
1505 hba->clk_scaling.is_suspended = false; in ufshcd_resume_clkscaling()
1507 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_resume_clkscaling()
1510 devfreq_resume_device(hba->devfreq); in ufshcd_resume_clkscaling()
1516 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkscale_enable_show() local
1518 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled); in ufshcd_clkscale_enable_show()
1524 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkscale_enable_store() local
1531 down(&hba->host_sem); in ufshcd_clkscale_enable_store()
1532 if (!ufshcd_is_user_access_allowed(hba)) { in ufshcd_clkscale_enable_store()
1538 if (value == hba->clk_scaling.is_enabled) in ufshcd_clkscale_enable_store()
1541 pm_runtime_get_sync(hba->dev); in ufshcd_clkscale_enable_store()
1542 ufshcd_hold(hba, false); in ufshcd_clkscale_enable_store()
1544 hba->clk_scaling.is_enabled = value; in ufshcd_clkscale_enable_store()
1547 ufshcd_resume_clkscaling(hba); in ufshcd_clkscale_enable_store()
1549 ufshcd_suspend_clkscaling(hba); in ufshcd_clkscale_enable_store()
1550 err = ufshcd_devfreq_scale(hba, true); in ufshcd_clkscale_enable_store()
1552 dev_err(hba->dev, "%s: failed to scale clocks up %d\n", in ufshcd_clkscale_enable_store()
1556 ufshcd_release(hba); in ufshcd_clkscale_enable_store()
1557 pm_runtime_put_sync(hba->dev); in ufshcd_clkscale_enable_store()
1559 up(&hba->host_sem); in ufshcd_clkscale_enable_store()
1563 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba) in ufshcd_init_clk_scaling_sysfs() argument
1565 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; in ufshcd_init_clk_scaling_sysfs()
1566 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; in ufshcd_init_clk_scaling_sysfs()
1567 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); in ufshcd_init_clk_scaling_sysfs()
1568 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; in ufshcd_init_clk_scaling_sysfs()
1569 hba->clk_scaling.enable_attr.attr.mode = 0644; in ufshcd_init_clk_scaling_sysfs()
1570 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) in ufshcd_init_clk_scaling_sysfs()
1571 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); in ufshcd_init_clk_scaling_sysfs()
1574 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba) in ufshcd_remove_clk_scaling_sysfs() argument
1576 if (hba->clk_scaling.enable_attr.attr.name) in ufshcd_remove_clk_scaling_sysfs()
1577 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); in ufshcd_remove_clk_scaling_sysfs()
1580 static void ufshcd_init_clk_scaling(struct ufs_hba *hba) in ufshcd_init_clk_scaling() argument
1584 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_init_clk_scaling()
1587 if (!hba->clk_scaling.min_gear) in ufshcd_init_clk_scaling()
1588 hba->clk_scaling.min_gear = UFS_HS_G1; in ufshcd_init_clk_scaling()
1590 INIT_WORK(&hba->clk_scaling.suspend_work, in ufshcd_init_clk_scaling()
1592 INIT_WORK(&hba->clk_scaling.resume_work, in ufshcd_init_clk_scaling()
1596 hba->host->host_no); in ufshcd_init_clk_scaling()
1597 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); in ufshcd_init_clk_scaling()
1599 hba->clk_scaling.is_initialized = true; in ufshcd_init_clk_scaling()
1602 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) in ufshcd_exit_clk_scaling() argument
1604 if (!hba->clk_scaling.is_initialized) in ufshcd_exit_clk_scaling()
1607 ufshcd_remove_clk_scaling_sysfs(hba); in ufshcd_exit_clk_scaling()
1608 destroy_workqueue(hba->clk_scaling.workq); in ufshcd_exit_clk_scaling()
1609 ufshcd_devfreq_remove(hba); in ufshcd_exit_clk_scaling()
1610 hba->clk_scaling.is_initialized = false; in ufshcd_exit_clk_scaling()
1617 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_ungate_work() local
1620 cancel_delayed_work_sync(&hba->clk_gating.gate_work); in ufshcd_ungate_work()
1622 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_ungate_work()
1623 if (hba->clk_gating.state == CLKS_ON) { in ufshcd_ungate_work()
1624 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_ungate_work()
1628 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_ungate_work()
1629 ufshcd_hba_vreg_set_hpm(hba); in ufshcd_ungate_work()
1630 ufshcd_setup_clocks(hba, true); in ufshcd_ungate_work()
1632 ufshcd_enable_irq(hba); in ufshcd_ungate_work()
1635 if (ufshcd_can_hibern8_during_gating(hba)) { in ufshcd_ungate_work()
1637 hba->clk_gating.is_suspended = true; in ufshcd_ungate_work()
1638 if (ufshcd_is_link_hibern8(hba)) { in ufshcd_ungate_work()
1639 ret = ufshcd_uic_hibern8_exit(hba); in ufshcd_ungate_work()
1641 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", in ufshcd_ungate_work()
1644 ufshcd_set_link_active(hba); in ufshcd_ungate_work()
1646 hba->clk_gating.is_suspended = false; in ufshcd_ungate_work()
1649 ufshcd_scsi_unblock_requests(hba); in ufshcd_ungate_work()
1658 int ufshcd_hold(struct ufs_hba *hba, bool async) in ufshcd_hold() argument
1664 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_hold()
1666 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
1667 hba->clk_gating.active_reqs++; in ufshcd_hold()
1670 switch (hba->clk_gating.state) { in ufshcd_hold()
1680 if (ufshcd_can_hibern8_during_gating(hba) && in ufshcd_hold()
1681 ufshcd_is_link_hibern8(hba)) { in ufshcd_hold()
1684 hba->clk_gating.active_reqs--; in ufshcd_hold()
1687 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
1688 flush_result = flush_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
1689 if (hba->clk_gating.is_suspended && !flush_result) in ufshcd_hold()
1691 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
1696 if (cancel_delayed_work(&hba->clk_gating.gate_work)) { in ufshcd_hold()
1697 hba->clk_gating.state = CLKS_ON; in ufshcd_hold()
1698 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_hold()
1699 hba->clk_gating.state); in ufshcd_hold()
1709 hba->clk_gating.state = REQ_CLKS_ON; in ufshcd_hold()
1710 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_hold()
1711 hba->clk_gating.state); in ufshcd_hold()
1712 if (queue_work(hba->clk_gating.clk_gating_workq, in ufshcd_hold()
1713 &hba->clk_gating.ungate_work)) in ufshcd_hold()
1714 ufshcd_scsi_block_requests(hba); in ufshcd_hold()
1723 hba->clk_gating.active_reqs--; in ufshcd_hold()
1727 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
1728 flush_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
1730 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
1733 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", in ufshcd_hold()
1734 __func__, hba->clk_gating.state); in ufshcd_hold()
1737 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
1745 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_gate_work() local
1750 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_gate_work()
1757 if (hba->clk_gating.is_suspended || in ufshcd_gate_work()
1758 (hba->clk_gating.state != REQ_CLKS_OFF)) { in ufshcd_gate_work()
1759 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
1760 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1761 hba->clk_gating.state); in ufshcd_gate_work()
1765 if (hba->clk_gating.active_reqs in ufshcd_gate_work()
1766 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL in ufshcd_gate_work()
1767 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks in ufshcd_gate_work()
1768 || hba->active_uic_cmd || hba->uic_async_done) in ufshcd_gate_work()
1771 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_gate_work()
1774 if (ufshcd_can_hibern8_during_gating(hba)) { in ufshcd_gate_work()
1775 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_gate_work()
1777 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
1778 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_gate_work()
1780 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1781 hba->clk_gating.state); in ufshcd_gate_work()
1784 ufshcd_set_link_hibern8(hba); in ufshcd_gate_work()
1787 ufshcd_disable_irq(hba); in ufshcd_gate_work()
1789 ufshcd_setup_clocks(hba, false); in ufshcd_gate_work()
1792 ufshcd_hba_vreg_set_lpm(hba); in ufshcd_gate_work()
1802 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_gate_work()
1803 if (hba->clk_gating.state == REQ_CLKS_OFF) { in ufshcd_gate_work()
1804 hba->clk_gating.state = CLKS_OFF; in ufshcd_gate_work()
1805 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1806 hba->clk_gating.state); in ufshcd_gate_work()
1809 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_gate_work()
1815 static void __ufshcd_release(struct ufs_hba *hba) in __ufshcd_release() argument
1817 if (!ufshcd_is_clkgating_allowed(hba)) in __ufshcd_release()
1820 hba->clk_gating.active_reqs--; in __ufshcd_release()
1822 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || in __ufshcd_release()
1823 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || in __ufshcd_release()
1824 hba->outstanding_tasks || in __ufshcd_release()
1825 hba->active_uic_cmd || hba->uic_async_done || in __ufshcd_release()
1826 hba->clk_gating.state == CLKS_OFF) in __ufshcd_release()
1829 hba->clk_gating.state = REQ_CLKS_OFF; in __ufshcd_release()
1830 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); in __ufshcd_release()
1831 queue_delayed_work(hba->clk_gating.clk_gating_workq, in __ufshcd_release()
1832 &hba->clk_gating.gate_work, in __ufshcd_release()
1833 msecs_to_jiffies(hba->clk_gating.delay_ms)); in __ufshcd_release()
1836 void ufshcd_release(struct ufs_hba *hba) in ufshcd_release() argument
1840 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_release()
1841 __ufshcd_release(hba); in ufshcd_release()
1842 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_release()
1849 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_delay_show() local
1851 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms); in ufshcd_clkgate_delay_show()
1857 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_delay_store() local
1863 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clkgate_delay_store()
1864 hba->clk_gating.delay_ms = value; in ufshcd_clkgate_delay_store()
1865 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clkgate_delay_store()
1872 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_enable_show() local
1874 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled); in ufshcd_clkgate_enable_show()
1880 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_enable_store() local
1889 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clkgate_enable_store()
1890 if (value == hba->clk_gating.is_enabled) in ufshcd_clkgate_enable_store()
1894 __ufshcd_release(hba); in ufshcd_clkgate_enable_store()
1896 hba->clk_gating.active_reqs++; in ufshcd_clkgate_enable_store()
1898 hba->clk_gating.is_enabled = value; in ufshcd_clkgate_enable_store()
1900 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clkgate_enable_store()
1904 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba) in ufshcd_init_clk_gating_sysfs() argument
1906 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; in ufshcd_init_clk_gating_sysfs()
1907 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; in ufshcd_init_clk_gating_sysfs()
1908 sysfs_attr_init(&hba->clk_gating.delay_attr.attr); in ufshcd_init_clk_gating_sysfs()
1909 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; in ufshcd_init_clk_gating_sysfs()
1910 hba->clk_gating.delay_attr.attr.mode = 0644; in ufshcd_init_clk_gating_sysfs()
1911 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) in ufshcd_init_clk_gating_sysfs()
1912 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); in ufshcd_init_clk_gating_sysfs()
1914 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; in ufshcd_init_clk_gating_sysfs()
1915 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; in ufshcd_init_clk_gating_sysfs()
1916 sysfs_attr_init(&hba->clk_gating.enable_attr.attr); in ufshcd_init_clk_gating_sysfs()
1917 hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; in ufshcd_init_clk_gating_sysfs()
1918 hba->clk_gating.enable_attr.attr.mode = 0644; in ufshcd_init_clk_gating_sysfs()
1919 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) in ufshcd_init_clk_gating_sysfs()
1920 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); in ufshcd_init_clk_gating_sysfs()
1923 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba) in ufshcd_remove_clk_gating_sysfs() argument
1925 if (hba->clk_gating.delay_attr.attr.name) in ufshcd_remove_clk_gating_sysfs()
1926 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); in ufshcd_remove_clk_gating_sysfs()
1927 if (hba->clk_gating.enable_attr.attr.name) in ufshcd_remove_clk_gating_sysfs()
1928 device_remove_file(hba->dev, &hba->clk_gating.enable_attr); in ufshcd_remove_clk_gating_sysfs()
1931 static void ufshcd_init_clk_gating(struct ufs_hba *hba) in ufshcd_init_clk_gating() argument
1935 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_init_clk_gating()
1938 hba->clk_gating.state = CLKS_ON; in ufshcd_init_clk_gating()
1940 hba->clk_gating.delay_ms = 150; in ufshcd_init_clk_gating()
1941 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); in ufshcd_init_clk_gating()
1942 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); in ufshcd_init_clk_gating()
1945 hba->host->host_no); in ufshcd_init_clk_gating()
1946 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, in ufshcd_init_clk_gating()
1949 ufshcd_init_clk_gating_sysfs(hba); in ufshcd_init_clk_gating()
1951 hba->clk_gating.is_enabled = true; in ufshcd_init_clk_gating()
1952 hba->clk_gating.is_initialized = true; in ufshcd_init_clk_gating()
1955 static void ufshcd_exit_clk_gating(struct ufs_hba *hba) in ufshcd_exit_clk_gating() argument
1957 if (!hba->clk_gating.is_initialized) in ufshcd_exit_clk_gating()
1959 ufshcd_remove_clk_gating_sysfs(hba); in ufshcd_exit_clk_gating()
1960 cancel_work_sync(&hba->clk_gating.ungate_work); in ufshcd_exit_clk_gating()
1961 cancel_delayed_work_sync(&hba->clk_gating.gate_work); in ufshcd_exit_clk_gating()
1962 destroy_workqueue(hba->clk_gating.clk_gating_workq); in ufshcd_exit_clk_gating()
1963 hba->clk_gating.is_initialized = false; in ufshcd_exit_clk_gating()
1967 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) in ufshcd_clk_scaling_start_busy() argument
1973 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_clk_scaling_start_busy()
1976 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clk_scaling_start_busy()
1977 if (!hba->clk_scaling.active_reqs++) in ufshcd_clk_scaling_start_busy()
1980 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) { in ufshcd_clk_scaling_start_busy()
1981 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clk_scaling_start_busy()
1986 queue_work(hba->clk_scaling.workq, in ufshcd_clk_scaling_start_busy()
1987 &hba->clk_scaling.resume_work); in ufshcd_clk_scaling_start_busy()
1989 if (!hba->clk_scaling.window_start_t) { in ufshcd_clk_scaling_start_busy()
1990 hba->clk_scaling.window_start_t = curr_t; in ufshcd_clk_scaling_start_busy()
1991 hba->clk_scaling.tot_busy_t = 0; in ufshcd_clk_scaling_start_busy()
1992 hba->clk_scaling.is_busy_started = false; in ufshcd_clk_scaling_start_busy()
1995 if (!hba->clk_scaling.is_busy_started) { in ufshcd_clk_scaling_start_busy()
1996 hba->clk_scaling.busy_start_t = curr_t; in ufshcd_clk_scaling_start_busy()
1997 hba->clk_scaling.is_busy_started = true; in ufshcd_clk_scaling_start_busy()
1999 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clk_scaling_start_busy()
2002 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) in ufshcd_clk_scaling_update_busy() argument
2004 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_clk_scaling_update_busy()
2007 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_clk_scaling_update_busy()
2010 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clk_scaling_update_busy()
2011 hba->clk_scaling.active_reqs--; in ufshcd_clk_scaling_update_busy()
2012 if (!hba->outstanding_reqs && scaling->is_busy_started) { in ufshcd_clk_scaling_update_busy()
2018 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clk_scaling_update_busy()
2031 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba, in ufshcd_should_inform_monitor() argument
2034 struct ufs_hba_monitor *m = &hba->monitor; in ufshcd_should_inform_monitor()
2038 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp)); in ufshcd_should_inform_monitor()
2041 static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_start_monitor() argument
2046 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_start_monitor()
2047 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0) in ufshcd_start_monitor()
2048 hba->monitor.busy_start_ts[dir] = ktime_get(); in ufshcd_start_monitor()
2049 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_start_monitor()
2052 static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_update_monitor() argument
2057 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_update_monitor()
2058 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { in ufshcd_update_monitor()
2060 struct ufs_hba_monitor *m = &hba->monitor; in ufshcd_update_monitor()
2081 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_update_monitor()
2090 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) in ufshcd_send_command() argument
2092 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; in ufshcd_send_command()
2096 trace_android_vh_ufs_send_command(hba, lrbp); in ufshcd_send_command()
2097 ufshcd_add_command_trace(hba, task_tag, "send"); in ufshcd_send_command()
2098 ufshcd_clk_scaling_start_busy(hba); in ufshcd_send_command()
2099 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) in ufshcd_send_command()
2100 ufshcd_start_monitor(hba, lrbp); in ufshcd_send_command()
2101 if (hba->vops && hba->vops->setup_xfer_req) in ufshcd_send_command()
2102 hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd); in ufshcd_send_command()
2103 if (ufshcd_has_utrlcnr(hba)) { in ufshcd_send_command()
2104 set_bit(task_tag, &hba->outstanding_reqs); in ufshcd_send_command()
2105 ufshcd_writel(hba, 1 << task_tag, in ufshcd_send_command()
2110 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_send_command()
2111 set_bit(task_tag, &hba->outstanding_reqs); in ufshcd_send_command()
2112 ufshcd_writel(hba, 1 << task_tag, in ufshcd_send_command()
2114 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_send_command()
2146 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_copy_query_response() argument
2148 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; in ufshcd_copy_query_response()
2153 if (hba->dev_cmd.query.descriptor && in ufshcd_copy_query_response()
2164 hba->dev_cmd.query.request.upiu_req.length); in ufshcd_copy_query_response()
2166 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); in ufshcd_copy_query_response()
2168 dev_warn(hba->dev, in ufshcd_copy_query_response()
2184 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) in ufshcd_hba_capabilities() argument
2188 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); in ufshcd_hba_capabilities()
2191 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; in ufshcd_hba_capabilities()
2192 hba->nutmrs = in ufshcd_hba_capabilities()
2193 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; in ufshcd_hba_capabilities()
2194 ufs_hba_add_info(hba)->reserved_slot = hba->nutrs - 1; in ufshcd_hba_capabilities()
2197 err = ufshcd_hba_init_crypto_capabilities(hba); in ufshcd_hba_capabilities()
2199 dev_err(hba->dev, "crypto setup failed\n"); in ufshcd_hba_capabilities()
2210 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) in ufshcd_ready_for_uic_cmd() argument
2212 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) in ufshcd_ready_for_uic_cmd()
2225 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) in ufshcd_get_upmcrs() argument
2227 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; in ufshcd_get_upmcrs()
2238 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_dispatch_uic_cmd() argument
2240 WARN_ON(hba->active_uic_cmd); in ufshcd_dispatch_uic_cmd()
2242 hba->active_uic_cmd = uic_cmd; in ufshcd_dispatch_uic_cmd()
2245 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); in ufshcd_dispatch_uic_cmd()
2246 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); in ufshcd_dispatch_uic_cmd()
2247 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); in ufshcd_dispatch_uic_cmd()
2249 ufshcd_add_uic_command_trace(hba, uic_cmd, "send"); in ufshcd_dispatch_uic_cmd()
2252 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, in ufshcd_dispatch_uic_cmd()
2265 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_wait_for_uic_cmd() argument
2275 dev_err(hba->dev, in ufshcd_wait_for_uic_cmd()
2280 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n", in ufshcd_wait_for_uic_cmd()
2286 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
2287 hba->active_uic_cmd = NULL; in ufshcd_wait_for_uic_cmd()
2288 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
2304 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, in __ufshcd_send_uic_cmd() argument
2307 if (!ufshcd_ready_for_uic_cmd(hba)) { in __ufshcd_send_uic_cmd()
2308 dev_err(hba->dev, in __ufshcd_send_uic_cmd()
2317 ufshcd_dispatch_uic_cmd(hba, uic_cmd); in __ufshcd_send_uic_cmd()
2329 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_send_uic_cmd() argument
2334 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) in ufshcd_send_uic_cmd()
2337 ufshcd_hold(hba, false); in ufshcd_send_uic_cmd()
2338 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
2339 ufshcd_add_delay_before_dme_cmd(hba); in ufshcd_send_uic_cmd()
2341 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_send_uic_cmd()
2342 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); in ufshcd_send_uic_cmd()
2343 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_send_uic_cmd()
2345 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); in ufshcd_send_uic_cmd()
2347 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
2349 ufshcd_release(hba); in ufshcd_send_uic_cmd()
2360 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_map_sg() argument
2376 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) in ufshcd_map_sg()
2378 cpu_to_le16(sg_segments * hba->sg_entry_size); in ufshcd_map_sg()
2393 prd = (void *)prd + hba->sg_entry_size; in ufshcd_map_sg()
2400 trace_android_vh_ufs_fill_prdt(hba, lrbp, sg_segments, &err); in ufshcd_map_sg()
2409 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) in ufshcd_enable_intr() argument
2411 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_enable_intr()
2413 if (hba->ufs_version == ufshci_version(1, 0)) { in ufshcd_enable_intr()
2421 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); in ufshcd_enable_intr()
2429 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) in ufshcd_disable_intr() argument
2431 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_disable_intr()
2433 if (hba->ufs_version == ufshci_version(1, 0)) { in ufshcd_disable_intr()
2443 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); in ufshcd_disable_intr()
2535 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, in ufshcd_prepare_utp_query_req_upiu() argument
2539 struct ufs_query *query = &hba->dev_cmd.query; in ufshcd_prepare_utp_query_req_upiu()
2590 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba, in ufshcd_compose_devman_upiu() argument
2596 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_compose_devman_upiu()
2602 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) in ufshcd_compose_devman_upiu()
2603 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); in ufshcd_compose_devman_upiu()
2604 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) in ufshcd_compose_devman_upiu()
2618 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_comp_scsi_upiu() argument
2623 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_comp_scsi_upiu()
2650 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) in ufshcd_init_lrb() argument
2652 struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr + in ufshcd_init_lrb()
2653 i * sizeof_utp_transfer_cmd_desc(hba); in ufshcd_init_lrb()
2654 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; in ufshcd_init_lrb()
2655 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + in ufshcd_init_lrb()
2656 i * sizeof_utp_transfer_cmd_desc(hba); in ufshcd_init_lrb()
2662 lrb->utrd_dma_addr = hba->utrdl_dma_addr + in ufshcd_init_lrb()
2682 struct ufs_hba *hba; in ufshcd_queuecommand() local
2686 hba = shost_priv(host); in ufshcd_queuecommand()
2689 if (!ufshcd_valid_tag(hba, tag)) { in ufshcd_queuecommand()
2690 dev_err(hba->dev, in ufshcd_queuecommand()
2696 if (!down_read_trylock(&hba->clk_scaling_lock)) in ufshcd_queuecommand()
2699 switch (hba->ufshcd_state) { in ufshcd_queuecommand()
2714 if (hba->pm_op_in_progress) { in ufshcd_queuecommand()
2715 hba->force_reset = true; in ufshcd_queuecommand()
2729 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", in ufshcd_queuecommand()
2730 __func__, hba->ufshcd_state); in ufshcd_queuecommand()
2736 hba->req_abort_count = 0; in ufshcd_queuecommand()
2738 err = ufshcd_hold(hba, true); in ufshcd_queuecommand()
2743 WARN_ON(ufshcd_is_clkgating_allowed(hba) && in ufshcd_queuecommand()
2744 (hba->clk_gating.state != CLKS_ON)); in ufshcd_queuecommand()
2746 lrbp = &hba->lrb[tag]; in ufshcd_queuecommand()
2753 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; in ufshcd_queuecommand()
2757 trace_android_vh_ufs_prepare_command(hba, cmd->request, lrbp, &err); in ufshcd_queuecommand()
2760 ufshcd_release(hba); in ufshcd_queuecommand()
2766 err = ufshpb_prep(hba, lrbp); in ufshcd_queuecommand()
2769 ufshcd_release(hba); in ufshcd_queuecommand()
2773 ufshcd_comp_scsi_upiu(hba, lrbp); in ufshcd_queuecommand()
2775 err = ufshcd_map_sg(hba, lrbp); in ufshcd_queuecommand()
2778 ufshcd_release(hba); in ufshcd_queuecommand()
2784 ufshcd_send_command(hba, tag); in ufshcd_queuecommand()
2786 up_read(&hba->clk_scaling_lock); in ufshcd_queuecommand()
2790 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, in ufshcd_compose_dev_cmd() argument
2800 hba->dev_cmd.type = cmd_type; in ufshcd_compose_dev_cmd()
2802 return ufshcd_compose_devman_upiu(hba, lrbp); in ufshcd_compose_dev_cmd()
2806 ufshcd_clear_cmd(struct ufs_hba *hba, int tag) in ufshcd_clear_cmd() argument
2813 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clear_cmd()
2814 ufshcd_utrl_clear(hba, tag); in ufshcd_clear_cmd()
2815 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clear_cmd()
2821 err = ufshcd_wait_for_register(hba, in ufshcd_clear_cmd()
2829 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_check_query_response() argument
2831 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; in ufshcd_check_query_response()
2845 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_dev_cmd_completion() argument
2850 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_dev_cmd_completion()
2855 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { in ufshcd_dev_cmd_completion()
2857 dev_err(hba->dev, "%s: unexpected response %x\n", in ufshcd_dev_cmd_completion()
2862 err = ufshcd_check_query_response(hba, lrbp); in ufshcd_dev_cmd_completion()
2864 err = ufshcd_copy_query_response(hba, lrbp); in ufshcd_dev_cmd_completion()
2869 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", in ufshcd_dev_cmd_completion()
2874 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", in ufshcd_dev_cmd_completion()
2882 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, in ufshcd_wait_for_dev_cmd() argument
2889 time_left = wait_for_completion_timeout(hba->dev_cmd.complete, in ufshcd_wait_for_dev_cmd()
2894 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_dev_cmd()
2895 hba->dev_cmd.complete = NULL; in ufshcd_wait_for_dev_cmd()
2899 err = ufshcd_dev_cmd_completion(hba, lrbp); in ufshcd_wait_for_dev_cmd()
2901 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_dev_cmd()
2905 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", in ufshcd_wait_for_dev_cmd()
2907 if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) in ufshcd_wait_for_dev_cmd()
2915 ufshcd_outstanding_req_clear(hba, lrbp->task_tag); in ufshcd_wait_for_dev_cmd()
2930 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, in ufshcd_exec_dev_cmd() argument
2934 const u32 tag = ufs_hba_add_info(hba)->reserved_slot; in ufshcd_exec_dev_cmd()
2939 lockdep_assert_held(&hba->dev_cmd.lock); in ufshcd_exec_dev_cmd()
2941 down_read(&hba->clk_scaling_lock); in ufshcd_exec_dev_cmd()
2943 lrbp = &hba->lrb[tag]; in ufshcd_exec_dev_cmd()
2945 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); in ufshcd_exec_dev_cmd()
2949 hba->dev_cmd.complete = &wait; in ufshcd_exec_dev_cmd()
2951 ufshcd_add_query_upiu_trace(hba, tag, "query_send"); in ufshcd_exec_dev_cmd()
2955 ufshcd_send_command(hba, tag); in ufshcd_exec_dev_cmd()
2956 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); in ufshcd_exec_dev_cmd()
2957 ufshcd_add_query_upiu_trace(hba, tag, in ufshcd_exec_dev_cmd()
2961 up_read(&hba->clk_scaling_lock); in ufshcd_exec_dev_cmd()
2975 static inline void ufshcd_init_query(struct ufs_hba *hba, in ufshcd_init_query() argument
2979 *request = &hba->dev_cmd.query.request; in ufshcd_init_query()
2980 *response = &hba->dev_cmd.query.response; in ufshcd_init_query()
2989 int ufshcd_query_flag_retry(struct ufs_hba *hba, in ufshcd_query_flag_retry() argument
2996 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res); in ufshcd_query_flag_retry()
2998 dev_dbg(hba->dev, in ufshcd_query_flag_retry()
3006 dev_err(hba->dev, in ufshcd_query_flag_retry()
3023 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, in ufshcd_query_flag() argument
3031 BUG_ON(!hba); in ufshcd_query_flag()
3033 ufshcd_hold(hba, false); in ufshcd_query_flag()
3034 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_flag()
3035 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_flag()
3048 dev_err(hba->dev, "%s: Invalid argument for read request\n", in ufshcd_query_flag()
3055 dev_err(hba->dev, in ufshcd_query_flag()
3062 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); in ufshcd_query_flag()
3065 dev_err(hba->dev, in ufshcd_query_flag()
3076 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_flag()
3077 ufshcd_release(hba); in ufshcd_query_flag()
3093 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, in ufshcd_query_attr() argument
3100 BUG_ON(!hba); in ufshcd_query_attr()
3103 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", in ufshcd_query_attr()
3108 ufshcd_hold(hba, false); in ufshcd_query_attr()
3110 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_attr()
3111 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_attr()
3123 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", in ufshcd_query_attr()
3129 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in ufshcd_query_attr()
3132 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", in ufshcd_query_attr()
3140 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_attr()
3141 ufshcd_release(hba); in ufshcd_query_attr()
3159 int ufshcd_query_attr_retry(struct ufs_hba *hba, in ufshcd_query_attr_retry() argument
3167 ret = ufshcd_query_attr(hba, opcode, idn, index, in ufshcd_query_attr_retry()
3170 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", in ufshcd_query_attr_retry()
3177 dev_err(hba->dev, in ufshcd_query_attr_retry()
3184 static int __ufshcd_query_descriptor(struct ufs_hba *hba, in __ufshcd_query_descriptor() argument
3192 BUG_ON(!hba); in __ufshcd_query_descriptor()
3195 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", in __ufshcd_query_descriptor()
3201 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", in __ufshcd_query_descriptor()
3206 ufshcd_hold(hba, false); in __ufshcd_query_descriptor()
3208 mutex_lock(&hba->dev_cmd.lock); in __ufshcd_query_descriptor()
3209 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in __ufshcd_query_descriptor()
3211 hba->dev_cmd.query.descriptor = desc_buf; in __ufshcd_query_descriptor()
3222 dev_err(hba->dev, in __ufshcd_query_descriptor()
3229 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in __ufshcd_query_descriptor()
3232 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", in __ufshcd_query_descriptor()
3240 hba->dev_cmd.query.descriptor = NULL; in __ufshcd_query_descriptor()
3241 mutex_unlock(&hba->dev_cmd.lock); in __ufshcd_query_descriptor()
3242 ufshcd_release(hba); in __ufshcd_query_descriptor()
3260 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, in ufshcd_query_descriptor_retry() argument
3270 err = __ufshcd_query_descriptor(hba, opcode, idn, index, in ufshcd_query_descriptor_retry()
3286 void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, in ufshcd_map_desc_id_to_length() argument
3293 *desc_len = hba->desc_size[desc_id]; in ufshcd_map_desc_id_to_length()
3297 static void ufshcd_update_desc_length(struct ufs_hba *hba, in ufshcd_update_desc_length() argument
3301 if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE && in ufshcd_update_desc_length()
3308 hba->desc_size[desc_id] = desc_len; in ufshcd_update_desc_length()
3322 int ufshcd_read_desc_param(struct ufs_hba *hba, in ufshcd_read_desc_param() argument
3339 ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); in ufshcd_read_desc_param()
3341 dev_err(hba->dev, "%s: Failed to get desc length\n", __func__); in ufshcd_read_desc_param()
3346 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n", in ufshcd_read_desc_param()
3362 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, in ufshcd_read_desc_param()
3367 …dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret … in ufshcd_read_desc_param()
3374 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", in ufshcd_read_desc_param()
3382 ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len); in ufshcd_read_desc_param()
3432 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, in ufshcd_read_string_desc() argument
3446 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0, in ufshcd_read_string_desc()
3449 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", in ufshcd_read_string_desc()
3456 dev_dbg(hba->dev, "String Desc is of zero length\n"); in ufshcd_read_string_desc()
3511 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, in ufshcd_read_unit_desc_param() argument
3521 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset)) in ufshcd_read_unit_desc_param()
3524 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, in ufshcd_read_unit_desc_param()
3528 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba) in ufshcd_get_ref_clk_gating_wait() argument
3533 if (hba->dev_info.wspecversion >= 0x300) { in ufshcd_get_ref_clk_gating_wait()
3534 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_ref_clk_gating_wait()
3538 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n", in ufshcd_get_ref_clk_gating_wait()
3543 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n", in ufshcd_get_ref_clk_gating_wait()
3547 hba->dev_info.clk_gating_wait_us = gating_wait; in ufshcd_get_ref_clk_gating_wait()
3566 static int ufshcd_memory_alloc(struct ufs_hba *hba) in ufshcd_memory_alloc() argument
3571 ucdl_size = (sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs); in ufshcd_memory_alloc()
3572 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3574 &hba->ucdl_dma_addr, in ufshcd_memory_alloc()
3583 if (!hba->ucdl_base_addr || in ufshcd_memory_alloc()
3584 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
3585 dev_err(hba->dev, in ufshcd_memory_alloc()
3594 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); in ufshcd_memory_alloc()
3595 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3597 &hba->utrdl_dma_addr, in ufshcd_memory_alloc()
3599 if (!hba->utrdl_base_addr || in ufshcd_memory_alloc()
3600 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
3601 dev_err(hba->dev, in ufshcd_memory_alloc()
3610 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; in ufshcd_memory_alloc()
3611 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3613 &hba->utmrdl_dma_addr, in ufshcd_memory_alloc()
3615 if (!hba->utmrdl_base_addr || in ufshcd_memory_alloc()
3616 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
3617 dev_err(hba->dev, in ufshcd_memory_alloc()
3623 hba->lrb = devm_kcalloc(hba->dev, in ufshcd_memory_alloc()
3624 hba->nutrs, sizeof(struct ufshcd_lrb), in ufshcd_memory_alloc()
3626 if (!hba->lrb) { in ufshcd_memory_alloc()
3627 dev_err(hba->dev, "LRB Memory allocation failed\n"); in ufshcd_memory_alloc()
3648 static void ufshcd_host_memory_configure(struct ufs_hba *hba) in ufshcd_host_memory_configure() argument
3658 utrdlp = hba->utrdl_base_addr; in ufshcd_host_memory_configure()
3665 cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba); in ufshcd_host_memory_configure()
3666 cmd_desc_dma_addr = hba->ucdl_dma_addr; in ufshcd_host_memory_configure()
3668 for (i = 0; i < hba->nutrs; i++) { in ufshcd_host_memory_configure()
3678 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { in ufshcd_host_memory_configure()
3694 ufshcd_init_lrb(hba, &hba->lrb[i], i); in ufshcd_host_memory_configure()
3709 static int ufshcd_dme_link_startup(struct ufs_hba *hba) in ufshcd_dme_link_startup() argument
3716 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_link_startup()
3718 dev_dbg(hba->dev, in ufshcd_dme_link_startup()
3731 static int ufshcd_dme_reset(struct ufs_hba *hba) in ufshcd_dme_reset() argument
3738 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_reset()
3740 dev_err(hba->dev, in ufshcd_dme_reset()
3754 static int ufshcd_dme_enable(struct ufs_hba *hba) in ufshcd_dme_enable() argument
3761 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_enable()
3763 dev_err(hba->dev, in ufshcd_dme_enable()
3769 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) in ufshcd_add_delay_before_dme_cmd() argument
3774 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) in ufshcd_add_delay_before_dme_cmd()
3781 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { in ufshcd_add_delay_before_dme_cmd()
3787 hba->last_dme_cmd_tstamp)); in ufshcd_add_delay_before_dme_cmd()
3810 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, in ufshcd_dme_set_attr() argument
3830 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_set_attr()
3832 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", in ufshcd_dme_set_attr()
3837 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", in ufshcd_dme_set_attr()
3854 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, in ufshcd_dme_get_attr() argument
3869 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { in ufshcd_dme_get_attr()
3870 orig_pwr_info = hba->pwr_info; in ufshcd_dme_get_attr()
3885 ret = ufshcd_change_power_mode(hba, &temp_pwr_info); in ufshcd_dme_get_attr()
3897 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_get_attr()
3899 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", in ufshcd_dme_get_attr()
3904 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", in ufshcd_dme_get_attr()
3911 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) in ufshcd_dme_get_attr()
3913 ufshcd_change_power_mode(hba, &orig_pwr_info); in ufshcd_dme_get_attr()
3935 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) in ufshcd_uic_pwr_ctrl() argument
3943 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
3944 ufshcd_add_delay_before_dme_cmd(hba); in ufshcd_uic_pwr_ctrl()
3946 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
3947 if (ufshcd_is_link_broken(hba)) { in ufshcd_uic_pwr_ctrl()
3951 hba->uic_async_done = &uic_async_done; in ufshcd_uic_pwr_ctrl()
3952 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { in ufshcd_uic_pwr_ctrl()
3953 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); in ufshcd_uic_pwr_ctrl()
3961 ret = __ufshcd_send_uic_cmd(hba, cmd, false); in ufshcd_uic_pwr_ctrl()
3962 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
3964 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
3970 if (!wait_for_completion_timeout(hba->uic_async_done, in ufshcd_uic_pwr_ctrl()
3972 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
3977 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n", in ufshcd_uic_pwr_ctrl()
3987 status = ufshcd_get_upmcrs(hba); in ufshcd_uic_pwr_ctrl()
3989 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
3996 ufshcd_print_host_state(hba); in ufshcd_uic_pwr_ctrl()
3997 ufshcd_print_pwr_info(hba); in ufshcd_uic_pwr_ctrl()
3998 ufshcd_print_evt_hist(hba); in ufshcd_uic_pwr_ctrl()
4001 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4002 hba->active_uic_cmd = NULL; in ufshcd_uic_pwr_ctrl()
4003 hba->uic_async_done = NULL; in ufshcd_uic_pwr_ctrl()
4005 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); in ufshcd_uic_pwr_ctrl()
4007 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
4010 ufshcd_set_link_broken(hba); in ufshcd_uic_pwr_ctrl()
4011 ufshcd_schedule_eh_work(hba); in ufshcd_uic_pwr_ctrl()
4014 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4015 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
4028 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) in ufshcd_uic_change_pwr_mode() argument
4033 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { in ufshcd_uic_change_pwr_mode()
4034 ret = ufshcd_dme_set(hba, in ufshcd_uic_change_pwr_mode()
4037 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", in ufshcd_uic_change_pwr_mode()
4046 ufshcd_hold(hba, false); in ufshcd_uic_change_pwr_mode()
4047 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_change_pwr_mode()
4048 ufshcd_release(hba); in ufshcd_uic_change_pwr_mode()
4054 int ufshcd_link_recovery(struct ufs_hba *hba) in ufshcd_link_recovery() argument
4059 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_link_recovery()
4060 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_link_recovery()
4061 ufshcd_set_eh_in_progress(hba); in ufshcd_link_recovery()
4062 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_link_recovery()
4065 ufshcd_vops_device_reset(hba); in ufshcd_link_recovery()
4067 ret = ufshcd_host_reset_and_restore(hba); in ufshcd_link_recovery()
4069 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_link_recovery()
4071 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_link_recovery()
4072 ufshcd_clear_eh_in_progress(hba); in ufshcd_link_recovery()
4073 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_link_recovery()
4076 dev_err(hba->dev, "%s: link recovery failed, err %d", in ufshcd_link_recovery()
4083 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) in ufshcd_uic_hibern8_enter() argument
4089 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); in ufshcd_uic_hibern8_enter()
4092 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_hibern8_enter()
4093 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", in ufshcd_uic_hibern8_enter()
4097 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", in ufshcd_uic_hibern8_enter()
4100 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, in ufshcd_uic_hibern8_enter()
4107 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) in ufshcd_uic_hibern8_exit() argument
4113 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); in ufshcd_uic_hibern8_exit()
4116 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_hibern8_exit()
4117 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", in ufshcd_uic_hibern8_exit()
4121 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", in ufshcd_uic_hibern8_exit()
4124 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, in ufshcd_uic_hibern8_exit()
4126 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get(); in ufshcd_uic_hibern8_exit()
4127 hba->ufs_stats.hibern8_exit_cnt++; in ufshcd_uic_hibern8_exit()
4134 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) in ufshcd_auto_hibern8_update() argument
4139 if (!ufshcd_is_auto_hibern8_supported(hba)) in ufshcd_auto_hibern8_update()
4142 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_auto_hibern8_update()
4143 if (hba->ahit != ahit) { in ufshcd_auto_hibern8_update()
4144 hba->ahit = ahit; in ufshcd_auto_hibern8_update()
4147 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_auto_hibern8_update()
4149 if (update && !pm_runtime_suspended(hba->dev)) { in ufshcd_auto_hibern8_update()
4150 pm_runtime_get_sync(hba->dev); in ufshcd_auto_hibern8_update()
4151 ufshcd_hold(hba, false); in ufshcd_auto_hibern8_update()
4152 ufshcd_auto_hibern8_enable(hba); in ufshcd_auto_hibern8_update()
4153 ufshcd_release(hba); in ufshcd_auto_hibern8_update()
4154 pm_runtime_put(hba->dev); in ufshcd_auto_hibern8_update()
4159 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) in ufshcd_auto_hibern8_enable() argument
4163 if (!ufshcd_is_auto_hibern8_supported(hba)) in ufshcd_auto_hibern8_enable()
4166 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_auto_hibern8_enable()
4167 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); in ufshcd_auto_hibern8_enable()
4168 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_auto_hibern8_enable()
4176 static void ufshcd_init_pwr_info(struct ufs_hba *hba) in ufshcd_init_pwr_info() argument
4178 hba->pwr_info.gear_rx = UFS_PWM_G1; in ufshcd_init_pwr_info()
4179 hba->pwr_info.gear_tx = UFS_PWM_G1; in ufshcd_init_pwr_info()
4180 hba->pwr_info.lane_rx = 1; in ufshcd_init_pwr_info()
4181 hba->pwr_info.lane_tx = 1; in ufshcd_init_pwr_info()
4182 hba->pwr_info.pwr_rx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
4183 hba->pwr_info.pwr_tx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
4184 hba->pwr_info.hs_rate = 0; in ufshcd_init_pwr_info()
4191 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) in ufshcd_get_max_pwr_mode() argument
4193 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; in ufshcd_get_max_pwr_mode()
4195 if (hba->max_pwr_info.is_valid) in ufshcd_get_max_pwr_mode()
4203 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), in ufshcd_get_max_pwr_mode()
4205 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_get_max_pwr_mode()
4209 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", in ufshcd_get_max_pwr_mode()
4221 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); in ufshcd_get_max_pwr_mode()
4223 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), in ufshcd_get_max_pwr_mode()
4226 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", in ufshcd_get_max_pwr_mode()
4233 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), in ufshcd_get_max_pwr_mode()
4236 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), in ufshcd_get_max_pwr_mode()
4239 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", in ufshcd_get_max_pwr_mode()
4246 hba->max_pwr_info.is_valid = true; in ufshcd_get_max_pwr_mode()
4250 static int ufshcd_change_power_mode(struct ufs_hba *hba, in ufshcd_change_power_mode() argument
4256 if (!hba->force_pmc && in ufshcd_change_power_mode()
4257 pwr_mode->gear_rx == hba->pwr_info.gear_rx && in ufshcd_change_power_mode()
4258 pwr_mode->gear_tx == hba->pwr_info.gear_tx && in ufshcd_change_power_mode()
4259 pwr_mode->lane_rx == hba->pwr_info.lane_rx && in ufshcd_change_power_mode()
4260 pwr_mode->lane_tx == hba->pwr_info.lane_tx && in ufshcd_change_power_mode()
4261 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && in ufshcd_change_power_mode()
4262 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && in ufshcd_change_power_mode()
4263 pwr_mode->hs_rate == hba->pwr_info.hs_rate) { in ufshcd_change_power_mode()
4264 dev_dbg(hba->dev, "%s: power already configured\n", __func__); in ufshcd_change_power_mode()
4274 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); in ufshcd_change_power_mode()
4275 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), in ufshcd_change_power_mode()
4279 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); in ufshcd_change_power_mode()
4281 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); in ufshcd_change_power_mode()
4283 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); in ufshcd_change_power_mode()
4284 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), in ufshcd_change_power_mode()
4288 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); in ufshcd_change_power_mode()
4290 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); in ufshcd_change_power_mode()
4296 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), in ufshcd_change_power_mode()
4299 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { in ufshcd_change_power_mode()
4300 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), in ufshcd_change_power_mode()
4302 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), in ufshcd_change_power_mode()
4304 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), in ufshcd_change_power_mode()
4306 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), in ufshcd_change_power_mode()
4308 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), in ufshcd_change_power_mode()
4310 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), in ufshcd_change_power_mode()
4313 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), in ufshcd_change_power_mode()
4315 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), in ufshcd_change_power_mode()
4317 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), in ufshcd_change_power_mode()
4321 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 in ufshcd_change_power_mode()
4325 dev_err(hba->dev, in ufshcd_change_power_mode()
4328 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, in ufshcd_change_power_mode()
4331 memcpy(&hba->pwr_info, pwr_mode, in ufshcd_change_power_mode()
4343 int ufshcd_config_pwr_mode(struct ufs_hba *hba, in ufshcd_config_pwr_mode() argument
4349 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, in ufshcd_config_pwr_mode()
4355 ret = ufshcd_change_power_mode(hba, &final_params); in ufshcd_config_pwr_mode()
4367 static int ufshcd_complete_dev_init(struct ufs_hba *hba) in ufshcd_complete_dev_init() argument
4373 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, in ufshcd_complete_dev_init()
4376 dev_err(hba->dev, in ufshcd_complete_dev_init()
4385 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, in ufshcd_complete_dev_init()
4393 dev_err(hba->dev, in ufshcd_complete_dev_init()
4397 dev_err(hba->dev, in ufshcd_complete_dev_init()
4418 int ufshcd_make_hba_operational(struct ufs_hba *hba) in ufshcd_make_hba_operational() argument
4424 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); in ufshcd_make_hba_operational()
4427 if (ufshcd_is_intr_aggr_allowed(hba)) in ufshcd_make_hba_operational()
4428 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); in ufshcd_make_hba_operational()
4430 ufshcd_disable_intr_aggr(hba); in ufshcd_make_hba_operational()
4433 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
4435 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
4437 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
4439 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
4451 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); in ufshcd_make_hba_operational()
4453 ufshcd_enable_run_stop_reg(hba); in ufshcd_make_hba_operational()
4455 dev_err(hba->dev, in ufshcd_make_hba_operational()
4468 void ufshcd_hba_stop(struct ufs_hba *hba) in ufshcd_hba_stop() argument
4477 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hba_stop()
4478 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); in ufshcd_hba_stop()
4479 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hba_stop()
4481 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, in ufshcd_hba_stop()
4485 dev_err(hba->dev, "%s: Controller disable failed\n", __func__); in ufshcd_hba_stop()
4499 static int ufshcd_hba_execute_hce(struct ufs_hba *hba) in ufshcd_hba_execute_hce() argument
4505 if (!ufshcd_is_hba_active(hba)) in ufshcd_hba_execute_hce()
4507 ufshcd_hba_stop(hba); in ufshcd_hba_execute_hce()
4510 ufshcd_set_link_off(hba); in ufshcd_hba_execute_hce()
4512 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); in ufshcd_hba_execute_hce()
4515 ufshcd_hba_start(hba); in ufshcd_hba_execute_hce()
4527 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); in ufshcd_hba_execute_hce()
4531 while (ufshcd_is_hba_active(hba)) { in ufshcd_hba_execute_hce()
4535 dev_err(hba->dev, in ufshcd_hba_execute_hce()
4547 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); in ufshcd_hba_execute_hce()
4549 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); in ufshcd_hba_execute_hce()
4554 int ufshcd_hba_enable(struct ufs_hba *hba) in ufshcd_hba_enable() argument
4558 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) { in ufshcd_hba_enable()
4559 ufshcd_set_link_off(hba); in ufshcd_hba_enable()
4560 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); in ufshcd_hba_enable()
4563 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); in ufshcd_hba_enable()
4564 ret = ufshcd_dme_reset(hba); in ufshcd_hba_enable()
4566 ret = ufshcd_dme_enable(hba); in ufshcd_hba_enable()
4568 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); in ufshcd_hba_enable()
4570 dev_err(hba->dev, in ufshcd_hba_enable()
4574 ret = ufshcd_hba_execute_hce(hba); in ufshcd_hba_enable()
4581 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) in ufshcd_disable_tx_lcc() argument
4586 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_disable_tx_lcc()
4589 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_disable_tx_lcc()
4593 err = ufshcd_dme_set(hba, in ufshcd_disable_tx_lcc()
4598 err = ufshcd_dme_peer_set(hba, in ufshcd_disable_tx_lcc()
4603 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", in ufshcd_disable_tx_lcc()
4612 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) in ufshcd_disable_device_tx_lcc() argument
4614 return ufshcd_disable_tx_lcc(hba, true); in ufshcd_disable_device_tx_lcc()
4617 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val) in ufshcd_update_evt_hist() argument
4624 e = &hba->ufs_stats.event[id]; in ufshcd_update_evt_hist()
4630 ufshcd_vops_event_notify(hba, id, &val); in ufshcd_update_evt_hist()
4640 static int ufshcd_link_startup(struct ufs_hba *hba) in ufshcd_link_startup() argument
4650 if (!ufshcd_is_ufs_dev_active(hba)) in ufshcd_link_startup()
4655 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); in ufshcd_link_startup()
4657 ret = ufshcd_dme_link_startup(hba); in ufshcd_link_startup()
4660 if (!ret && !ufshcd_is_device_present(hba)) { in ufshcd_link_startup()
4661 ufshcd_update_evt_hist(hba, in ufshcd_link_startup()
4664 dev_err(hba->dev, "%s: Device not present\n", __func__); in ufshcd_link_startup()
4674 if (ret && ufshcd_hba_enable(hba)) { in ufshcd_link_startup()
4675 ufshcd_update_evt_hist(hba, in ufshcd_link_startup()
4684 ufshcd_update_evt_hist(hba, in ufshcd_link_startup()
4697 ufshcd_init_pwr_info(hba); in ufshcd_link_startup()
4698 ufshcd_print_pwr_info(hba); in ufshcd_link_startup()
4700 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { in ufshcd_link_startup()
4701 ret = ufshcd_disable_device_tx_lcc(hba); in ufshcd_link_startup()
4707 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); in ufshcd_link_startup()
4712 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); in ufshcd_link_startup()
4713 ret = ufshcd_make_hba_operational(hba); in ufshcd_link_startup()
4716 dev_err(hba->dev, "link startup failed %d\n", ret); in ufshcd_link_startup()
4717 ufshcd_print_host_state(hba); in ufshcd_link_startup()
4718 ufshcd_print_pwr_info(hba); in ufshcd_link_startup()
4719 ufshcd_print_evt_hist(hba); in ufshcd_link_startup()
4734 static int ufshcd_verify_dev_init(struct ufs_hba *hba) in ufshcd_verify_dev_init() argument
4739 ufshcd_hold(hba, false); in ufshcd_verify_dev_init()
4740 mutex_lock(&hba->dev_cmd.lock); in ufshcd_verify_dev_init()
4742 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, in ufshcd_verify_dev_init()
4748 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); in ufshcd_verify_dev_init()
4750 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_verify_dev_init()
4751 ufshcd_release(hba); in ufshcd_verify_dev_init()
4754 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); in ufshcd_verify_dev_init()
4771 struct ufs_hba *hba; in ufshcd_set_queue_depth() local
4773 hba = shost_priv(sdev->host); in ufshcd_set_queue_depth()
4775 lun_qdepth = hba->nutrs; in ufshcd_set_queue_depth()
4776 ret = ufshcd_read_unit_desc_param(hba, in ufshcd_set_queue_depth()
4787 lun_qdepth = hba->nutrs; in ufshcd_set_queue_depth()
4789 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); in ufshcd_set_queue_depth()
4791 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", in ufshcd_set_queue_depth()
4807 static int ufshcd_get_lu_wp(struct ufs_hba *hba, in ufshcd_get_lu_wp() argument
4820 else if (lun >= hba->dev_info.max_lu_supported) in ufshcd_get_lu_wp()
4823 ret = ufshcd_read_unit_desc_param(hba, in ufshcd_get_lu_wp()
4838 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, in ufshcd_get_lu_power_on_wp_status() argument
4841 if (hba->dev_info.f_power_on_wp_en && in ufshcd_get_lu_power_on_wp_status()
4842 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_get_lu_power_on_wp_status()
4845 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), in ufshcd_get_lu_power_on_wp_status()
4848 hba->dev_info.is_lu_power_on_wp = true; in ufshcd_get_lu_power_on_wp_status()
4860 struct ufs_hba *hba; in ufshcd_slave_alloc() local
4862 hba = shost_priv(sdev->host); in ufshcd_slave_alloc()
4881 ufshcd_get_lu_power_on_wp_status(hba, sdev); in ufshcd_slave_alloc()
4898 static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev) in ufshcd_hpb_destroy() argument
4902 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) in ufshcd_hpb_destroy()
4905 ufshpb_destroy_lu(hba, sdev); in ufshcd_hpb_destroy()
4908 static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev) in ufshcd_hpb_configure() argument
4912 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) in ufshcd_hpb_configure()
4915 ufshpb_init_hpb_lu(hba, sdev); in ufshcd_hpb_configure()
4924 struct ufs_hba *hba = shost_priv(sdev->host); in ufshcd_slave_configure() local
4927 ufshcd_hpb_configure(hba, sdev); in ufshcd_slave_configure()
4930 if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE) in ufshcd_slave_configure()
4933 if (ufshcd_is_rpm_autosuspend_allowed(hba)) in ufshcd_slave_configure()
4936 ufshcd_crypto_setup_rq_keyslot_manager(hba, q); in ufshcd_slave_configure()
4949 struct ufs_hba *hba; in ufshcd_slave_destroy() local
4951 hba = shost_priv(sdev->host); in ufshcd_slave_destroy()
4953 ufshcd_hpb_destroy(hba, sdev); in ufshcd_slave_destroy()
4959 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_slave_destroy()
4960 hba->sdev_ufs_device = NULL; in ufshcd_slave_destroy()
4961 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_slave_destroy()
5008 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_transfer_rsp_status() argument
5017 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) { in ufshcd_transfer_rsp_status()
5026 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_transfer_rsp_status()
5054 if (!hba->pm_op_in_progress && in ufshcd_transfer_rsp_status()
5055 !ufshcd_eh_in_progress(hba) && in ufshcd_transfer_rsp_status()
5057 schedule_work(&hba->eeh_work)) { in ufshcd_transfer_rsp_status()
5063 pm_runtime_get_noresume(hba->dev); in ufshcd_transfer_rsp_status()
5067 ufshpb_rsp_upiu(hba, lrbp); in ufshcd_transfer_rsp_status()
5072 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5076 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5087 dev_err_ratelimited(hba->dev, in ufshcd_transfer_rsp_status()
5104 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5107 ufshcd_print_evt_hist(hba); in ufshcd_transfer_rsp_status()
5108 ufshcd_print_host_state(hba); in ufshcd_transfer_rsp_status()
5113 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) in ufshcd_transfer_rsp_status()
5114 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); in ufshcd_transfer_rsp_status()
5118 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, in ufshcd_is_auto_hibern8_error() argument
5121 if (!ufshcd_is_auto_hibern8_supported(hba) || in ufshcd_is_auto_hibern8_error()
5122 !ufshcd_is_auto_hibern8_enabled(hba)) in ufshcd_is_auto_hibern8_error()
5128 if (hba->active_uic_cmd && in ufshcd_is_auto_hibern8_error()
5129 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER || in ufshcd_is_auto_hibern8_error()
5130 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT)) in ufshcd_is_auto_hibern8_error()
5145 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) in ufshcd_uic_cmd_compl() argument
5149 spin_lock(hba->host->host_lock); in ufshcd_uic_cmd_compl()
5150 if (ufshcd_is_auto_hibern8_error(hba, intr_status)) in ufshcd_uic_cmd_compl()
5151 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); in ufshcd_uic_cmd_compl()
5153 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { in ufshcd_uic_cmd_compl()
5154 hba->active_uic_cmd->argument2 |= in ufshcd_uic_cmd_compl()
5155 ufshcd_get_uic_cmd_result(hba); in ufshcd_uic_cmd_compl()
5156 hba->active_uic_cmd->argument3 = in ufshcd_uic_cmd_compl()
5157 ufshcd_get_dme_attr_val(hba); in ufshcd_uic_cmd_compl()
5158 if (!hba->uic_async_done) in ufshcd_uic_cmd_compl()
5159 hba->active_uic_cmd->cmd_active = 0; in ufshcd_uic_cmd_compl()
5160 complete(&hba->active_uic_cmd->done); in ufshcd_uic_cmd_compl()
5164 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { in ufshcd_uic_cmd_compl()
5165 hba->active_uic_cmd->cmd_active = 0; in ufshcd_uic_cmd_compl()
5166 complete(hba->uic_async_done); in ufshcd_uic_cmd_compl()
5171 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, in ufshcd_uic_cmd_compl()
5173 spin_unlock(hba->host->host_lock); in ufshcd_uic_cmd_compl()
5178 static void ufshcd_release_scsi_cmd(struct ufs_hba *hba, in ufshcd_release_scsi_cmd() argument
5184 ufshcd_crypto_clear_prdt(hba, lrbp); in ufshcd_release_scsi_cmd()
5186 ufshcd_release(hba); in ufshcd_release_scsi_cmd()
5187 ufshcd_clk_scaling_update_busy(hba); in ufshcd_release_scsi_cmd()
5195 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, in __ufshcd_transfer_req_compl() argument
5202 for_each_set_bit(index, &completed_reqs, hba->nutrs) { in __ufshcd_transfer_req_compl()
5203 if (!test_and_clear_bit(index, &hba->outstanding_reqs)) in __ufshcd_transfer_req_compl()
5205 lrbp = &hba->lrb[index]; in __ufshcd_transfer_req_compl()
5209 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) in __ufshcd_transfer_req_compl()
5210 ufshcd_update_monitor(hba, lrbp); in __ufshcd_transfer_req_compl()
5211 trace_android_vh_ufs_compl_command(hba, lrbp); in __ufshcd_transfer_req_compl()
5212 ufshcd_add_command_trace(hba, index, "complete"); in __ufshcd_transfer_req_compl()
5213 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp); in __ufshcd_transfer_req_compl()
5214 ufshcd_release_scsi_cmd(hba, lrbp); in __ufshcd_transfer_req_compl()
5219 if (hba->dev_cmd.complete) { in __ufshcd_transfer_req_compl()
5220 trace_android_vh_ufs_compl_command(hba, lrbp); in __ufshcd_transfer_req_compl()
5221 ufshcd_add_command_trace(hba, index, in __ufshcd_transfer_req_compl()
5223 complete(hba->dev_cmd.complete); in __ufshcd_transfer_req_compl()
5224 ufshcd_clk_scaling_update_busy(hba); in __ufshcd_transfer_req_compl()
5239 static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr) in ufshcd_trc_handler() argument
5250 if (ufshcd_is_intr_aggr_allowed(hba) && in ufshcd_trc_handler()
5251 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) in ufshcd_trc_handler()
5252 ufshcd_reset_intr_aggr(hba); in ufshcd_trc_handler()
5257 utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL); in ufshcd_trc_handler()
5259 ufshcd_writel(hba, utrlcnr, in ufshcd_trc_handler()
5267 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_trc_handler()
5268 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_trc_handler()
5269 completed_reqs = tr_doorbell ^ hba->outstanding_reqs; in ufshcd_trc_handler()
5270 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_trc_handler()
5274 __ufshcd_transfer_req_compl(hba, completed_reqs); in ufshcd_trc_handler()
5291 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) in ufshcd_disable_ee() argument
5296 if (!(hba->ee_ctrl_mask & mask)) in ufshcd_disable_ee()
5299 val = hba->ee_ctrl_mask & ~mask; in ufshcd_disable_ee()
5301 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_disable_ee()
5304 hba->ee_ctrl_mask &= ~mask; in ufshcd_disable_ee()
5319 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) in ufshcd_enable_ee() argument
5324 if (hba->ee_ctrl_mask & mask) in ufshcd_enable_ee()
5327 val = hba->ee_ctrl_mask | mask; in ufshcd_enable_ee()
5329 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_enable_ee()
5332 hba->ee_ctrl_mask |= mask; in ufshcd_enable_ee()
5348 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) in ufshcd_enable_auto_bkops() argument
5352 if (hba->auto_bkops_enabled) in ufshcd_enable_auto_bkops()
5355 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, in ufshcd_enable_auto_bkops()
5358 dev_err(hba->dev, "%s: failed to enable bkops %d\n", in ufshcd_enable_auto_bkops()
5363 hba->auto_bkops_enabled = true; in ufshcd_enable_auto_bkops()
5364 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); in ufshcd_enable_auto_bkops()
5367 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_enable_auto_bkops()
5369 dev_err(hba->dev, "%s: failed to disable exception event %d\n", in ufshcd_enable_auto_bkops()
5387 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) in ufshcd_disable_auto_bkops() argument
5391 if (!hba->auto_bkops_enabled) in ufshcd_disable_auto_bkops()
5398 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_disable_auto_bkops()
5400 dev_err(hba->dev, "%s: failed to enable exception event %d\n", in ufshcd_disable_auto_bkops()
5405 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, in ufshcd_disable_auto_bkops()
5408 dev_err(hba->dev, "%s: failed to disable bkops %d\n", in ufshcd_disable_auto_bkops()
5410 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_disable_auto_bkops()
5414 hba->auto_bkops_enabled = false; in ufshcd_disable_auto_bkops()
5415 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); in ufshcd_disable_auto_bkops()
5416 hba->is_urgent_bkops_lvl_checked = false; in ufshcd_disable_auto_bkops()
5430 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) in ufshcd_force_reset_auto_bkops() argument
5432 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) { in ufshcd_force_reset_auto_bkops()
5433 hba->auto_bkops_enabled = false; in ufshcd_force_reset_auto_bkops()
5434 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; in ufshcd_force_reset_auto_bkops()
5435 ufshcd_enable_auto_bkops(hba); in ufshcd_force_reset_auto_bkops()
5437 hba->auto_bkops_enabled = true; in ufshcd_force_reset_auto_bkops()
5438 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; in ufshcd_force_reset_auto_bkops()
5439 ufshcd_disable_auto_bkops(hba); in ufshcd_force_reset_auto_bkops()
5441 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; in ufshcd_force_reset_auto_bkops()
5442 hba->is_urgent_bkops_lvl_checked = false; in ufshcd_force_reset_auto_bkops()
5445 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) in ufshcd_get_bkops_status() argument
5447 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_bkops_status()
5467 int ufshcd_bkops_ctrl(struct ufs_hba *hba, in ufshcd_bkops_ctrl() argument
5473 err = ufshcd_get_bkops_status(hba, &curr_status); in ufshcd_bkops_ctrl()
5475 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", in ufshcd_bkops_ctrl()
5479 dev_err(hba->dev, "%s: invalid BKOPS status %d\n", in ufshcd_bkops_ctrl()
5486 err = ufshcd_enable_auto_bkops(hba); in ufshcd_bkops_ctrl()
5488 err = ufshcd_disable_auto_bkops(hba); in ufshcd_bkops_ctrl()
5504 static int ufshcd_urgent_bkops(struct ufs_hba *hba) in ufshcd_urgent_bkops() argument
5506 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); in ufshcd_urgent_bkops()
5509 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) in ufshcd_get_ee_status() argument
5511 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_ee_status()
5515 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) in ufshcd_bkops_exception_event_handler() argument
5520 if (hba->is_urgent_bkops_lvl_checked) in ufshcd_bkops_exception_event_handler()
5523 err = ufshcd_get_bkops_status(hba, &curr_status); in ufshcd_bkops_exception_event_handler()
5525 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", in ufshcd_bkops_exception_event_handler()
5537 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", in ufshcd_bkops_exception_event_handler()
5540 hba->urgent_bkops_lvl = curr_status; in ufshcd_bkops_exception_event_handler()
5541 hba->is_urgent_bkops_lvl_checked = true; in ufshcd_bkops_exception_event_handler()
5545 err = ufshcd_enable_auto_bkops(hba); in ufshcd_bkops_exception_event_handler()
5548 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", in ufshcd_bkops_exception_event_handler()
5552 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable) in ufshcd_wb_ctrl() argument
5558 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_wb_ctrl()
5561 if (!(enable ^ hba->wb_enabled)) in ufshcd_wb_ctrl()
5568 index = ufshcd_wb_get_query_index(hba); in ufshcd_wb_ctrl()
5569 ret = ufshcd_query_flag_retry(hba, opcode, in ufshcd_wb_ctrl()
5572 dev_err(hba->dev, "%s write booster %s failed %d\n", in ufshcd_wb_ctrl()
5577 hba->wb_enabled = enable; in ufshcd_wb_ctrl()
5578 dev_dbg(hba->dev, "%s write booster %s %d\n", in ufshcd_wb_ctrl()
5584 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set) in ufshcd_wb_toggle_flush_during_h8() argument
5594 index = ufshcd_wb_get_query_index(hba); in ufshcd_wb_toggle_flush_during_h8()
5595 return ufshcd_query_flag_retry(hba, val, in ufshcd_wb_toggle_flush_during_h8()
5600 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable) in ufshcd_wb_toggle_flush() argument
5603 ufshcd_wb_buf_flush_enable(hba); in ufshcd_wb_toggle_flush()
5605 ufshcd_wb_buf_flush_disable(hba); in ufshcd_wb_toggle_flush()
5609 static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba) in ufshcd_wb_buf_flush_enable() argument
5614 if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled) in ufshcd_wb_buf_flush_enable()
5617 index = ufshcd_wb_get_query_index(hba); in ufshcd_wb_buf_flush_enable()
5618 ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, in ufshcd_wb_buf_flush_enable()
5622 dev_err(hba->dev, "%s WB - buf flush enable failed %d\n", in ufshcd_wb_buf_flush_enable()
5625 hba->wb_buf_flush_enabled = true; in ufshcd_wb_buf_flush_enable()
5627 dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret); in ufshcd_wb_buf_flush_enable()
5631 static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba) in ufshcd_wb_buf_flush_disable() argument
5636 if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled) in ufshcd_wb_buf_flush_disable()
5639 index = ufshcd_wb_get_query_index(hba); in ufshcd_wb_buf_flush_disable()
5640 ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, in ufshcd_wb_buf_flush_disable()
5644 dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n", in ufshcd_wb_buf_flush_disable()
5647 hba->wb_buf_flush_enabled = false; in ufshcd_wb_buf_flush_disable()
5648 dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret); in ufshcd_wb_buf_flush_disable()
5654 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, in ufshcd_wb_presrv_usrspc_keep_vcc_on() argument
5661 index = ufshcd_wb_get_query_index(hba); in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5662 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5666 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n", in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5672 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n", in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5677 if (avail_buf < hba->vps->wb_flush_threshold) in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5683 static bool ufshcd_wb_need_flush(struct ufs_hba *hba) in ufshcd_wb_need_flush() argument
5689 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_wb_need_flush()
5702 index = ufshcd_wb_get_query_index(hba); in ufshcd_wb_need_flush()
5703 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_wb_need_flush()
5707 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n", in ufshcd_wb_need_flush()
5712 if (!hba->dev_info.b_presrv_uspc_en) { in ufshcd_wb_need_flush()
5718 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); in ufshcd_wb_need_flush()
5723 struct ufs_hba *hba = container_of(to_delayed_work(work), in ufshcd_rpm_dev_flush_recheck_work() local
5732 pm_runtime_get_sync(hba->dev); in ufshcd_rpm_dev_flush_recheck_work()
5733 pm_runtime_put_sync(hba->dev); in ufshcd_rpm_dev_flush_recheck_work()
5745 struct ufs_hba *hba; in ufshcd_exception_event_handler() local
5748 hba = container_of(work, struct ufs_hba, eeh_work); in ufshcd_exception_event_handler()
5750 pm_runtime_get_sync(hba->dev); in ufshcd_exception_event_handler()
5751 ufshcd_scsi_block_requests(hba); in ufshcd_exception_event_handler()
5752 err = ufshcd_get_ee_status(hba, &status); in ufshcd_exception_event_handler()
5754 dev_err(hba->dev, "%s: failed to get exception status %d\n", in ufshcd_exception_event_handler()
5759 status &= hba->ee_ctrl_mask; in ufshcd_exception_event_handler()
5762 ufshcd_bkops_exception_event_handler(hba); in ufshcd_exception_event_handler()
5765 ufshcd_scsi_unblock_requests(hba); in ufshcd_exception_event_handler()
5772 pm_runtime_put_noidle(hba->dev); in ufshcd_exception_event_handler()
5773 pm_runtime_put(hba->dev); in ufshcd_exception_event_handler()
5778 static void ufshcd_complete_requests(struct ufs_hba *hba) in ufshcd_complete_requests() argument
5780 ufshcd_trc_handler(hba, false); in ufshcd_complete_requests()
5781 ufshcd_tmc_handler(hba); in ufshcd_complete_requests()
5791 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) in ufshcd_quirk_dl_nac_errors() argument
5796 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5801 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) in ufshcd_quirk_dl_nac_errors()
5804 if ((hba->saved_err & DEVICE_FATAL_ERROR) || in ufshcd_quirk_dl_nac_errors()
5805 ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
5806 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) in ufshcd_quirk_dl_nac_errors()
5809 if ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
5810 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { in ufshcd_quirk_dl_nac_errors()
5815 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5817 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5823 if ((hba->saved_err & INT_FATAL_ERRORS) || in ufshcd_quirk_dl_nac_errors()
5824 ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
5825 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) in ufshcd_quirk_dl_nac_errors()
5835 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5836 err = ufshcd_verify_dev_init(hba); in ufshcd_quirk_dl_nac_errors()
5837 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5843 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) in ufshcd_quirk_dl_nac_errors()
5844 hba->saved_err &= ~UIC_ERROR; in ufshcd_quirk_dl_nac_errors()
5846 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; in ufshcd_quirk_dl_nac_errors()
5847 if (!hba->saved_uic_err) in ufshcd_quirk_dl_nac_errors()
5851 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5856 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) in ufshcd_is_saved_err_fatal() argument
5858 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || in ufshcd_is_saved_err_fatal()
5859 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); in ufshcd_is_saved_err_fatal()
5863 static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba) in ufshcd_schedule_eh_work() argument
5866 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { in ufshcd_schedule_eh_work()
5867 if (hba->force_reset || ufshcd_is_link_broken(hba) || in ufshcd_schedule_eh_work()
5868 ufshcd_is_saved_err_fatal(hba)) in ufshcd_schedule_eh_work()
5869 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; in ufshcd_schedule_eh_work()
5871 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; in ufshcd_schedule_eh_work()
5872 queue_work(hba->eh_wq, &hba->eh_work); in ufshcd_schedule_eh_work()
5876 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) in ufshcd_clk_scaling_allow() argument
5878 down_write(&hba->clk_scaling_lock); in ufshcd_clk_scaling_allow()
5879 hba->clk_scaling.is_allowed = allow; in ufshcd_clk_scaling_allow()
5880 up_write(&hba->clk_scaling_lock); in ufshcd_clk_scaling_allow()
5883 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend) in ufshcd_clk_scaling_suspend() argument
5886 if (hba->clk_scaling.is_enabled) in ufshcd_clk_scaling_suspend()
5887 ufshcd_suspend_clkscaling(hba); in ufshcd_clk_scaling_suspend()
5888 ufshcd_clk_scaling_allow(hba, false); in ufshcd_clk_scaling_suspend()
5890 ufshcd_clk_scaling_allow(hba, true); in ufshcd_clk_scaling_suspend()
5891 if (hba->clk_scaling.is_enabled) in ufshcd_clk_scaling_suspend()
5892 ufshcd_resume_clkscaling(hba); in ufshcd_clk_scaling_suspend()
5896 static void ufshcd_err_handling_prepare(struct ufs_hba *hba) in ufshcd_err_handling_prepare() argument
5898 pm_runtime_get_sync(hba->dev); in ufshcd_err_handling_prepare()
5899 if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) { in ufshcd_err_handling_prepare()
5907 ufshcd_setup_hba_vreg(hba, true); in ufshcd_err_handling_prepare()
5908 ufshcd_enable_irq(hba); in ufshcd_err_handling_prepare()
5909 ufshcd_setup_vreg(hba, true); in ufshcd_err_handling_prepare()
5910 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); in ufshcd_err_handling_prepare()
5911 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); in ufshcd_err_handling_prepare()
5912 ufshcd_hold(hba, false); in ufshcd_err_handling_prepare()
5913 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_err_handling_prepare()
5914 ufshcd_setup_clocks(hba, true); in ufshcd_err_handling_prepare()
5915 ufshcd_release(hba); in ufshcd_err_handling_prepare()
5916 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; in ufshcd_err_handling_prepare()
5917 ufshcd_vops_resume(hba, pm_op); in ufshcd_err_handling_prepare()
5919 ufshcd_hold(hba, false); in ufshcd_err_handling_prepare()
5920 if (ufshcd_is_clkscaling_supported(hba) && in ufshcd_err_handling_prepare()
5921 hba->clk_scaling.is_enabled) in ufshcd_err_handling_prepare()
5922 ufshcd_suspend_clkscaling(hba); in ufshcd_err_handling_prepare()
5923 ufshcd_clk_scaling_allow(hba, false); in ufshcd_err_handling_prepare()
5925 ufshcd_scsi_block_requests(hba); in ufshcd_err_handling_prepare()
5927 down_write(&hba->clk_scaling_lock); in ufshcd_err_handling_prepare()
5928 up_write(&hba->clk_scaling_lock); in ufshcd_err_handling_prepare()
5929 cancel_work_sync(&hba->eeh_work); in ufshcd_err_handling_prepare()
5932 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) in ufshcd_err_handling_unprepare() argument
5934 ufshcd_scsi_unblock_requests(hba); in ufshcd_err_handling_unprepare()
5935 ufshcd_release(hba); in ufshcd_err_handling_unprepare()
5936 if (ufshcd_is_clkscaling_supported(hba)) in ufshcd_err_handling_unprepare()
5937 ufshcd_clk_scaling_suspend(hba, false); in ufshcd_err_handling_unprepare()
5938 pm_runtime_put(hba->dev); in ufshcd_err_handling_unprepare()
5941 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) in ufshcd_err_handling_should_stop() argument
5943 return (!hba->is_powered || hba->shutting_down || in ufshcd_err_handling_should_stop()
5944 hba->ufshcd_state == UFSHCD_STATE_ERROR || in ufshcd_err_handling_should_stop()
5945 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset || in ufshcd_err_handling_should_stop()
5946 ufshcd_is_link_broken(hba)))); in ufshcd_err_handling_should_stop()
5950 static void ufshcd_recover_pm_error(struct ufs_hba *hba) in ufshcd_recover_pm_error() argument
5952 struct Scsi_Host *shost = hba->host; in ufshcd_recover_pm_error()
5957 hba->is_sys_suspended = false; in ufshcd_recover_pm_error()
5962 ret = pm_runtime_set_active(hba->dev); in ufshcd_recover_pm_error()
5979 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba) in ufshcd_recover_pm_error() argument
5984 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba) in ufshcd_is_pwr_mode_restore_needed() argument
5986 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info; in ufshcd_is_pwr_mode_restore_needed()
5989 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode); in ufshcd_is_pwr_mode_restore_needed()
6006 struct ufs_hba *hba; in ufshcd_err_handler() local
6014 hba = container_of(work, struct ufs_hba, eh_work); in ufshcd_err_handler()
6016 down(&hba->host_sem); in ufshcd_err_handler()
6017 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6018 if (ufshcd_err_handling_should_stop(hba)) { in ufshcd_err_handler()
6019 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) in ufshcd_err_handler()
6020 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_err_handler()
6021 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6022 up(&hba->host_sem); in ufshcd_err_handler()
6025 ufshcd_set_eh_in_progress(hba); in ufshcd_err_handler()
6026 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6027 ufshcd_err_handling_prepare(hba); in ufshcd_err_handler()
6029 ufshcd_complete_requests(hba); in ufshcd_err_handler()
6030 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6031 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) in ufshcd_err_handler()
6032 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_err_handler()
6037 if (ufshcd_err_handling_should_stop(hba)) in ufshcd_err_handler()
6040 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { in ufshcd_err_handler()
6043 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6045 ret = ufshcd_quirk_dl_nac_errors(hba); in ufshcd_err_handler()
6046 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6047 if (!ret && ufshcd_err_handling_should_stop(hba)) in ufshcd_err_handler()
6051 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || in ufshcd_err_handler()
6052 (hba->saved_uic_err && in ufshcd_err_handler()
6053 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { in ufshcd_err_handler()
6054 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR); in ufshcd_err_handler()
6056 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6057 ufshcd_print_host_state(hba); in ufshcd_err_handler()
6058 ufshcd_print_pwr_info(hba); in ufshcd_err_handler()
6059 ufshcd_print_evt_hist(hba); in ufshcd_err_handler()
6060 ufshcd_print_tmrs(hba, hba->outstanding_tasks); in ufshcd_err_handler()
6061 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt); in ufshcd_err_handler()
6062 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6070 if (hba->force_reset || ufshcd_is_link_broken(hba) || in ufshcd_err_handler()
6071 ufshcd_is_saved_err_fatal(hba) || in ufshcd_err_handler()
6072 ((hba->saved_err & UIC_ERROR) && in ufshcd_err_handler()
6073 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | in ufshcd_err_handler()
6083 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) { in ufshcd_err_handler()
6084 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_err_handler()
6085 if (!hba->saved_uic_err) in ufshcd_err_handler()
6086 hba->saved_err &= ~UIC_ERROR; in ufshcd_err_handler()
6087 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6088 if (ufshcd_is_pwr_mode_restore_needed(hba)) in ufshcd_err_handler()
6090 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6091 if (!hba->saved_err && !needs_restore) in ufshcd_err_handler()
6095 hba->silence_err_logs = true; in ufshcd_err_handler()
6097 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6099 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { in ufshcd_err_handler()
6100 if (ufshcd_try_to_abort_task(hba, tag)) { in ufshcd_err_handler()
6107 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { in ufshcd_err_handler()
6108 if (ufshcd_clear_tm_cmd(hba, tag)) { in ufshcd_err_handler()
6116 ufshcd_complete_requests(hba); in ufshcd_err_handler()
6118 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6119 hba->silence_err_logs = false; in ufshcd_err_handler()
6130 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6135 down_write(&hba->clk_scaling_lock); in ufshcd_err_handler()
6136 hba->force_pmc = true; in ufshcd_err_handler()
6137 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info)); in ufshcd_err_handler()
6140 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n", in ufshcd_err_handler()
6143 hba->force_pmc = false; in ufshcd_err_handler()
6144 ufshcd_print_pwr_info(hba); in ufshcd_err_handler()
6145 up_write(&hba->clk_scaling_lock); in ufshcd_err_handler()
6146 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6152 hba->force_reset = false; in ufshcd_err_handler()
6153 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6154 err = ufshcd_reset_and_restore(hba); in ufshcd_err_handler()
6156 dev_err(hba->dev, "%s: reset and restore failed with err %d\n", in ufshcd_err_handler()
6159 ufshcd_recover_pm_error(hba); in ufshcd_err_handler()
6160 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6165 if (hba->ufshcd_state == UFSHCD_STATE_RESET) in ufshcd_err_handler()
6166 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_err_handler()
6167 if (hba->saved_err || hba->saved_uic_err) in ufshcd_err_handler()
6168 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", in ufshcd_err_handler()
6169 __func__, hba->saved_err, hba->saved_uic_err); in ufshcd_err_handler()
6171 ufshcd_clear_eh_in_progress(hba); in ufshcd_err_handler()
6172 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6173 ufshcd_err_handling_unprepare(hba); in ufshcd_err_handler()
6174 up(&hba->host_sem); in ufshcd_err_handler()
6185 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) in ufshcd_update_uic_error() argument
6191 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); in ufshcd_update_uic_error()
6194 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg); in ufshcd_update_uic_error()
6200 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", in ufshcd_update_uic_error()
6207 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_update_uic_error()
6208 if (hba->uic_async_done && hba->active_uic_cmd) in ufshcd_update_uic_error()
6209 cmd = hba->active_uic_cmd; in ufshcd_update_uic_error()
6215 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_update_uic_error()
6221 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); in ufshcd_update_uic_error()
6224 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg); in ufshcd_update_uic_error()
6227 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; in ufshcd_update_uic_error()
6228 else if (hba->dev_quirks & in ufshcd_update_uic_error()
6231 hba->uic_error |= in ufshcd_update_uic_error()
6234 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; in ufshcd_update_uic_error()
6240 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); in ufshcd_update_uic_error()
6243 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg); in ufshcd_update_uic_error()
6244 hba->uic_error |= UFSHCD_UIC_NL_ERROR; in ufshcd_update_uic_error()
6248 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); in ufshcd_update_uic_error()
6251 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg); in ufshcd_update_uic_error()
6252 hba->uic_error |= UFSHCD_UIC_TL_ERROR; in ufshcd_update_uic_error()
6256 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); in ufshcd_update_uic_error()
6259 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg); in ufshcd_update_uic_error()
6260 hba->uic_error |= UFSHCD_UIC_DME_ERROR; in ufshcd_update_uic_error()
6264 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", in ufshcd_update_uic_error()
6265 __func__, hba->uic_error); in ufshcd_update_uic_error()
6278 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) in ufshcd_check_errors() argument
6283 spin_lock(hba->host->host_lock); in ufshcd_check_errors()
6284 hba->errors |= UFSHCD_ERROR_MASK & intr_status; in ufshcd_check_errors()
6286 if (hba->errors & INT_FATAL_ERRORS) { in ufshcd_check_errors()
6287 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR, in ufshcd_check_errors()
6288 hba->errors); in ufshcd_check_errors()
6292 if (hba->errors & UIC_ERROR) { in ufshcd_check_errors()
6293 hba->uic_error = 0; in ufshcd_check_errors()
6294 retval = ufshcd_update_uic_error(hba); in ufshcd_check_errors()
6295 if (hba->uic_error) { in ufshcd_check_errors()
6296 dev_err(hba->dev, in ufshcd_check_errors()
6302 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) { in ufshcd_check_errors()
6303 dev_err(hba->dev, in ufshcd_check_errors()
6305 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? in ufshcd_check_errors()
6307 hba->errors, ufshcd_get_upmcrs(hba)); in ufshcd_check_errors()
6308 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR, in ufshcd_check_errors()
6309 hba->errors); in ufshcd_check_errors()
6310 ufshcd_set_link_broken(hba); in ufshcd_check_errors()
6314 trace_android_vh_ufs_check_int_errors(hba, queue_eh_work); in ufshcd_check_errors()
6321 hba->saved_err |= hba->errors; in ufshcd_check_errors()
6322 hba->saved_uic_err |= hba->uic_error; in ufshcd_check_errors()
6325 if ((hba->saved_err & in ufshcd_check_errors()
6327 (hba->saved_uic_err && in ufshcd_check_errors()
6328 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { in ufshcd_check_errors()
6329 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n", in ufshcd_check_errors()
6330 __func__, hba->saved_err, in ufshcd_check_errors()
6331 hba->saved_uic_err); in ufshcd_check_errors()
6332 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, in ufshcd_check_errors()
6334 ufshcd_print_pwr_info(hba); in ufshcd_check_errors()
6336 ufshcd_schedule_eh_work(hba); in ufshcd_check_errors()
6345 hba->errors = 0; in ufshcd_check_errors()
6346 hba->uic_error = 0; in ufshcd_check_errors()
6347 spin_unlock(hba->host->host_lock); in ufshcd_check_errors()
6359 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) in ufshcd_tmc_handler() argument
6361 struct request **tmf_rqs = ufs_hba_add_info(hba)->tmf_rqs; in ufshcd_tmc_handler()
6366 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_tmc_handler()
6367 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); in ufshcd_tmc_handler()
6368 issued = hba->outstanding_tasks & ~pending; in ufshcd_tmc_handler()
6369 for_each_set_bit(tag, &issued, hba->nutmrs) { in ufshcd_tmc_handler()
6376 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_tmc_handler()
6390 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) in ufshcd_sl_intr() argument
6395 retval |= ufshcd_uic_cmd_compl(hba, intr_status); in ufshcd_sl_intr()
6397 if (intr_status & UFSHCD_ERROR_MASK || hba->errors) in ufshcd_sl_intr()
6398 retval |= ufshcd_check_errors(hba, intr_status); in ufshcd_sl_intr()
6401 retval |= ufshcd_tmc_handler(hba); in ufshcd_sl_intr()
6404 retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba)); in ufshcd_sl_intr()
6422 struct ufs_hba *hba = __hba; in ufshcd_intr() local
6423 int retries = hba->nutrs; in ufshcd_intr()
6425 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_intr()
6426 hba->ufs_stats.last_intr_status = intr_status; in ufshcd_intr()
6427 hba->ufs_stats.last_intr_ts = ktime_get(); in ufshcd_intr()
6437 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_intr()
6439 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); in ufshcd_intr()
6441 retval |= ufshcd_sl_intr(hba, enabled_intr_status); in ufshcd_intr()
6443 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_intr()
6447 !ufshcd_eh_in_progress(hba)) { in ufshcd_intr()
6448 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", in ufshcd_intr()
6451 hba->ufs_stats.last_intr_status, in ufshcd_intr()
6453 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); in ufshcd_intr()
6459 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) in ufshcd_clear_tm_cmd() argument
6465 if (!test_bit(tag, &hba->outstanding_tasks)) in ufshcd_clear_tm_cmd()
6468 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clear_tm_cmd()
6469 ufshcd_utmrl_clear(hba, tag); in ufshcd_clear_tm_cmd()
6470 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clear_tm_cmd()
6473 err = ufshcd_wait_for_register(hba, in ufshcd_clear_tm_cmd()
6480 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, in __ufshcd_issue_tm_cmd() argument
6483 struct request **tmf_rqs = ufs_hba_add_info(hba)->tmf_rqs; in __ufshcd_issue_tm_cmd()
6484 struct request_queue *q = hba->tmf_queue; in __ufshcd_issue_tm_cmd()
6485 struct Scsi_Host *host = hba->host; in __ufshcd_issue_tm_cmd()
6499 ufshcd_hold(hba, false); in __ufshcd_issue_tm_cmd()
6507 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); in __ufshcd_issue_tm_cmd()
6508 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); in __ufshcd_issue_tm_cmd()
6511 __set_bit(task_tag, &hba->outstanding_tasks); in __ufshcd_issue_tm_cmd()
6516 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); in __ufshcd_issue_tm_cmd()
6522 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send"); in __ufshcd_issue_tm_cmd()
6528 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err"); in __ufshcd_issue_tm_cmd()
6529 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", in __ufshcd_issue_tm_cmd()
6531 if (ufshcd_clear_tm_cmd(hba, task_tag)) in __ufshcd_issue_tm_cmd()
6532 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", in __ufshcd_issue_tm_cmd()
6537 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); in __ufshcd_issue_tm_cmd()
6539 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete"); in __ufshcd_issue_tm_cmd()
6542 spin_lock_irqsave(hba->host->host_lock, flags); in __ufshcd_issue_tm_cmd()
6544 __clear_bit(task_tag, &hba->outstanding_tasks); in __ufshcd_issue_tm_cmd()
6545 spin_unlock_irqrestore(hba->host->host_lock, flags); in __ufshcd_issue_tm_cmd()
6547 ufshcd_release(hba); in __ufshcd_issue_tm_cmd()
6563 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, in ufshcd_issue_tm_cmd() argument
6585 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function); in ufshcd_issue_tm_cmd()
6591 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", in ufshcd_issue_tm_cmd()
6616 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, in ufshcd_issue_devman_upiu_cmd() argument
6624 const u32 tag = ufs_hba_add_info(hba)->reserved_slot; in ufshcd_issue_devman_upiu_cmd()
6630 lockdep_assert_held(&hba->dev_cmd.lock); in ufshcd_issue_devman_upiu_cmd()
6632 down_read(&hba->clk_scaling_lock); in ufshcd_issue_devman_upiu_cmd()
6634 lrbp = &hba->lrb[tag]; in ufshcd_issue_devman_upiu_cmd()
6643 hba->dev_cmd.type = cmd_type; in ufshcd_issue_devman_upiu_cmd()
6645 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_issue_devman_upiu_cmd()
6668 hba->dev_cmd.complete = &wait; in ufshcd_issue_devman_upiu_cmd()
6673 ufshcd_send_command(hba, tag); in ufshcd_issue_devman_upiu_cmd()
6679 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT); in ufshcd_issue_devman_upiu_cmd()
6692 dev_warn(hba->dev, in ufshcd_issue_devman_upiu_cmd()
6700 up_read(&hba->clk_scaling_lock); in ufshcd_issue_devman_upiu_cmd()
6719 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, in ufshcd_exec_raw_upiu_cmd() argument
6737 ufshcd_hold(hba, false); in ufshcd_exec_raw_upiu_cmd()
6738 mutex_lock(&hba->dev_cmd.lock); in ufshcd_exec_raw_upiu_cmd()
6739 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, in ufshcd_exec_raw_upiu_cmd()
6742 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_exec_raw_upiu_cmd()
6743 ufshcd_release(hba); in ufshcd_exec_raw_upiu_cmd()
6752 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f); in ufshcd_exec_raw_upiu_cmd()
6758 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__, in ufshcd_exec_raw_upiu_cmd()
6785 struct ufs_hba *hba; in ufshcd_eh_device_reset_handler() local
6791 hba = shost_priv(host); in ufshcd_eh_device_reset_handler()
6794 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); in ufshcd_eh_device_reset_handler()
6802 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { in ufshcd_eh_device_reset_handler()
6803 if (hba->lrb[pos].lun == lun) { in ufshcd_eh_device_reset_handler()
6804 err = ufshcd_clear_cmd(hba, pos); in ufshcd_eh_device_reset_handler()
6807 __ufshcd_transfer_req_compl(hba, 1U << pos); in ufshcd_eh_device_reset_handler()
6812 hba->req_abort_count = 0; in ufshcd_eh_device_reset_handler()
6813 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err); in ufshcd_eh_device_reset_handler()
6817 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); in ufshcd_eh_device_reset_handler()
6823 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) in ufshcd_set_req_abort_skip() argument
6828 for_each_set_bit(tag, &bitmap, hba->nutrs) { in ufshcd_set_req_abort_skip()
6829 lrbp = &hba->lrb[tag]; in ufshcd_set_req_abort_skip()
6847 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) in ufshcd_try_to_abort_task() argument
6849 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_try_to_abort_task()
6856 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_try_to_abort_task()
6860 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n", in ufshcd_try_to_abort_task()
6868 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", in ufshcd_try_to_abort_task()
6870 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_try_to_abort_task()
6877 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", in ufshcd_try_to_abort_task()
6881 dev_err(hba->dev, in ufshcd_try_to_abort_task()
6895 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_try_to_abort_task()
6900 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", in ufshcd_try_to_abort_task()
6906 err = ufshcd_clear_cmd(hba, tag); in ufshcd_try_to_abort_task()
6908 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", in ufshcd_try_to_abort_task()
6924 struct ufs_hba *hba; in ufshcd_abort() local
6933 hba = shost_priv(host); in ufshcd_abort()
6935 lrbp = &hba->lrb[tag]; in ufshcd_abort()
6936 if (!ufshcd_valid_tag(hba, tag)) { in ufshcd_abort()
6937 dev_err(hba->dev, in ufshcd_abort()
6943 ufshcd_hold(hba, false); in ufshcd_abort()
6944 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_abort()
6946 if (!(test_bit(tag, &hba->outstanding_reqs))) { in ufshcd_abort()
6947 dev_err(hba->dev, in ufshcd_abort()
6949 __func__, tag, hba->outstanding_reqs, reg); in ufshcd_abort()
6954 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); in ufshcd_abort()
6964 if (!hba->req_abort_count) { in ufshcd_abort()
6965 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag); in ufshcd_abort()
6966 ufshcd_print_evt_hist(hba); in ufshcd_abort()
6967 ufshcd_print_host_state(hba); in ufshcd_abort()
6968 ufshcd_print_pwr_info(hba); in ufshcd_abort()
6969 ufshcd_print_trs(hba, 1 << tag, true); in ufshcd_abort()
6971 ufshcd_print_trs(hba, 1 << tag, false); in ufshcd_abort()
6973 hba->req_abort_count++; in ufshcd_abort()
6976 dev_err(hba->dev, in ufshcd_abort()
6979 __ufshcd_transfer_req_compl(hba, 1UL << tag); in ufshcd_abort()
6992 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); in ufshcd_abort()
6995 hba->force_reset = true; in ufshcd_abort()
6996 ufshcd_schedule_eh_work(hba); in ufshcd_abort()
7003 dev_err(hba->dev, "%s: skipping abort\n", __func__); in ufshcd_abort()
7004 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); in ufshcd_abort()
7008 res = ufshcd_try_to_abort_task(hba, tag); in ufshcd_abort()
7010 dev_err(hba->dev, "%s: failed with err %d\n", __func__, res); in ufshcd_abort()
7011 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); in ufshcd_abort()
7020 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs); in ufshcd_abort()
7024 ufshcd_release_scsi_cmd(hba, lrbp); in ufshcd_abort()
7030 ufshcd_release(hba); in ufshcd_abort()
7044 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) in ufshcd_host_reset_and_restore() argument
7048 ufshpb_reset_host(hba); in ufshcd_host_reset_and_restore()
7053 ufshcd_hba_stop(hba); in ufshcd_host_reset_and_restore()
7054 hba->silence_err_logs = true; in ufshcd_host_reset_and_restore()
7055 ufshcd_complete_requests(hba); in ufshcd_host_reset_and_restore()
7056 hba->silence_err_logs = false; in ufshcd_host_reset_and_restore()
7059 ufshcd_set_clk_freq(hba, true); in ufshcd_host_reset_and_restore()
7061 err = ufshcd_hba_enable(hba); in ufshcd_host_reset_and_restore()
7065 err = ufshcd_probe_hba(hba, false); in ufshcd_host_reset_and_restore()
7068 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); in ufshcd_host_reset_and_restore()
7069 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err); in ufshcd_host_reset_and_restore()
7082 static int ufshcd_reset_and_restore(struct ufs_hba *hba) in ufshcd_reset_and_restore() argument
7094 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7095 saved_err = hba->saved_err; in ufshcd_reset_and_restore()
7096 saved_uic_err = hba->saved_uic_err; in ufshcd_reset_and_restore()
7097 hba->saved_err = 0; in ufshcd_reset_and_restore()
7098 hba->saved_uic_err = 0; in ufshcd_reset_and_restore()
7099 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7103 ufshcd_vops_device_reset(hba); in ufshcd_reset_and_restore()
7105 err = ufshcd_host_reset_and_restore(hba); in ufshcd_reset_and_restore()
7108 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7113 scsi_report_bus_reset(hba->host, 0); in ufshcd_reset_and_restore()
7115 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_reset_and_restore()
7116 hba->saved_err |= saved_err; in ufshcd_reset_and_restore()
7117 hba->saved_uic_err |= saved_uic_err; in ufshcd_reset_and_restore()
7119 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7134 struct ufs_hba *hba; in ufshcd_eh_host_reset_handler() local
7136 hba = shost_priv(cmd->device->host); in ufshcd_eh_host_reset_handler()
7138 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7139 hba->force_reset = true; in ufshcd_eh_host_reset_handler()
7140 ufshcd_schedule_eh_work(hba); in ufshcd_eh_host_reset_handler()
7141 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); in ufshcd_eh_host_reset_handler()
7142 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7144 flush_work(&hba->eh_work); in ufshcd_eh_host_reset_handler()
7146 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7147 if (hba->ufshcd_state == UFSHCD_STATE_ERROR) in ufshcd_eh_host_reset_handler()
7149 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7208 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, in ufshcd_find_max_sup_active_icc_level() argument
7213 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || in ufshcd_find_max_sup_active_icc_level()
7214 !hba->vreg_info.vccq2) { in ufshcd_find_max_sup_active_icc_level()
7215 dev_err(hba->dev, in ufshcd_find_max_sup_active_icc_level()
7221 if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA) in ufshcd_find_max_sup_active_icc_level()
7223 hba->vreg_info.vcc->max_uA, in ufshcd_find_max_sup_active_icc_level()
7227 if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA) in ufshcd_find_max_sup_active_icc_level()
7229 hba->vreg_info.vccq->max_uA, in ufshcd_find_max_sup_active_icc_level()
7233 if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA) in ufshcd_find_max_sup_active_icc_level()
7235 hba->vreg_info.vccq2->max_uA, in ufshcd_find_max_sup_active_icc_level()
7242 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba) in ufshcd_set_active_icc_lvl() argument
7245 int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER]; in ufshcd_set_active_icc_lvl()
7253 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0, in ufshcd_set_active_icc_lvl()
7256 dev_err(hba->dev, in ufshcd_set_active_icc_lvl()
7262 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf, in ufshcd_set_active_icc_lvl()
7264 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level); in ufshcd_set_active_icc_lvl()
7266 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_set_active_icc_lvl()
7270 dev_err(hba->dev, in ufshcd_set_active_icc_lvl()
7304 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) in ufshcd_scsi_add_wlus() argument
7309 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7311 if (IS_ERR(hba->sdev_ufs_device)) { in ufshcd_scsi_add_wlus()
7312 ret = PTR_ERR(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
7313 hba->sdev_ufs_device = NULL; in ufshcd_scsi_add_wlus()
7316 scsi_device_put(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
7318 hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7320 if (IS_ERR(hba->sdev_rpmb)) { in ufshcd_scsi_add_wlus()
7321 ret = PTR_ERR(hba->sdev_rpmb); in ufshcd_scsi_add_wlus()
7324 scsi_device_put(hba->sdev_rpmb); in ufshcd_scsi_add_wlus()
7326 sdev_boot = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7329 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__); in ufshcd_scsi_add_wlus()
7335 scsi_remove_device(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
7340 static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf) in ufshcd_wb_probe() argument
7342 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_wb_probe()
7346 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_wb_probe()
7355 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))) in ufshcd_wb_probe()
7358 if (hba->desc_size[QUERY_DESC_IDN_DEVICE] < in ufshcd_wb_probe()
7390 ufshcd_read_unit_desc_param(hba, in ufshcd_wb_probe()
7407 hba->caps &= ~UFSHCD_CAP_WB_EN; in ufshcd_wb_probe()
7410 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups) in ufshcd_fixup_dev_quirks() argument
7413 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_fixup_dev_quirks()
7424 hba->dev_quirks |= f->quirk; in ufshcd_fixup_dev_quirks()
7429 static void ufs_fixup_device_setup(struct ufs_hba *hba) in ufs_fixup_device_setup() argument
7432 ufshcd_fixup_dev_quirks(hba, ufs_fixups); in ufs_fixup_device_setup()
7435 ufshcd_vops_fixup_dev_quirks(hba); in ufs_fixup_device_setup()
7438 static int ufs_get_device_desc(struct ufs_hba *hba) in ufs_get_device_desc() argument
7444 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_get_device_desc()
7452 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf, in ufs_get_device_desc()
7453 hba->desc_size[QUERY_DESC_IDN_DEVICE]); in ufs_get_device_desc()
7455 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", in ufs_get_device_desc()
7478 ufshpb_get_dev_info(hba, desc_buf); in ufs_get_device_desc()
7480 if (!ufshpb_is_legacy(hba)) in ufs_get_device_desc()
7481 err = ufshcd_query_flag_retry(hba, in ufs_get_device_desc()
7486 if (ufshpb_is_legacy(hba) || (!err && hpb_en)) in ufs_get_device_desc()
7490 err = ufshcd_read_string_desc(hba, model_index, in ufs_get_device_desc()
7493 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", in ufs_get_device_desc()
7498 ufs_fixup_device_setup(hba); in ufs_get_device_desc()
7500 ufshcd_wb_probe(hba, desc_buf); in ufs_get_device_desc()
7513 static void ufs_put_device_desc(struct ufs_hba *hba) in ufs_put_device_desc() argument
7515 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_put_device_desc()
7532 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) in ufshcd_tune_pa_tactivate() argument
7537 ret = ufshcd_dme_peer_get(hba, in ufshcd_tune_pa_tactivate()
7549 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), in ufshcd_tune_pa_tactivate()
7567 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) in ufshcd_tune_pa_hibern8time() argument
7573 ret = ufshcd_dme_get(hba, in ufshcd_tune_pa_hibern8time()
7580 ret = ufshcd_dme_peer_get(hba, in ufshcd_tune_pa_hibern8time()
7592 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), in ufshcd_tune_pa_hibern8time()
7609 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) in ufshcd_quirk_tune_host_pa_tactivate() argument
7617 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), in ufshcd_quirk_tune_host_pa_tactivate()
7622 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), in ufshcd_quirk_tune_host_pa_tactivate()
7629 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d", in ufshcd_quirk_tune_host_pa_tactivate()
7636 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d", in ufshcd_quirk_tune_host_pa_tactivate()
7641 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); in ufshcd_quirk_tune_host_pa_tactivate()
7645 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), in ufshcd_quirk_tune_host_pa_tactivate()
7660 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), in ufshcd_quirk_tune_host_pa_tactivate()
7668 static void ufshcd_tune_unipro_params(struct ufs_hba *hba) in ufshcd_tune_unipro_params() argument
7670 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { in ufshcd_tune_unipro_params()
7671 ufshcd_tune_pa_tactivate(hba); in ufshcd_tune_unipro_params()
7672 ufshcd_tune_pa_hibern8time(hba); in ufshcd_tune_unipro_params()
7675 ufshcd_vops_apply_dev_quirks(hba); in ufshcd_tune_unipro_params()
7677 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) in ufshcd_tune_unipro_params()
7679 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); in ufshcd_tune_unipro_params()
7681 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) in ufshcd_tune_unipro_params()
7682 ufshcd_quirk_tune_host_pa_tactivate(hba); in ufshcd_tune_unipro_params()
7685 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) in ufshcd_clear_dbg_ufs_stats() argument
7687 hba->ufs_stats.hibern8_exit_cnt = 0; in ufshcd_clear_dbg_ufs_stats()
7688 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_clear_dbg_ufs_stats()
7689 hba->req_abort_count = 0; in ufshcd_clear_dbg_ufs_stats()
7692 static int ufshcd_device_geo_params_init(struct ufs_hba *hba) in ufshcd_device_geo_params_init() argument
7698 buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY]; in ufshcd_device_geo_params_init()
7705 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0, in ufshcd_device_geo_params_init()
7708 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", in ufshcd_device_geo_params_init()
7714 hba->dev_info.max_lu_supported = 32; in ufshcd_device_geo_params_init()
7716 hba->dev_info.max_lu_supported = 8; in ufshcd_device_geo_params_init()
7718 if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >= in ufshcd_device_geo_params_init()
7720 ufshpb_get_geo_info(hba, desc_buf); in ufshcd_device_geo_params_init()
7747 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk) in ufshcd_parse_dev_ref_clk_freq() argument
7753 hba->dev_ref_clk_freq = in ufshcd_parse_dev_ref_clk_freq()
7756 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) in ufshcd_parse_dev_ref_clk_freq()
7757 dev_err(hba->dev, in ufshcd_parse_dev_ref_clk_freq()
7761 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) in ufshcd_set_dev_ref_clk() argument
7765 u32 freq = hba->dev_ref_clk_freq; in ufshcd_set_dev_ref_clk()
7767 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_set_dev_ref_clk()
7771 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", in ufshcd_set_dev_ref_clk()
7779 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_set_dev_ref_clk()
7783 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", in ufshcd_set_dev_ref_clk()
7788 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", in ufshcd_set_dev_ref_clk()
7795 static int ufshcd_device_params_init(struct ufs_hba *hba) in ufshcd_device_params_init() argument
7802 hba->desc_size[i] = QUERY_DESC_MAX_SIZE; in ufshcd_device_params_init()
7805 ret = ufshcd_device_geo_params_init(hba); in ufshcd_device_params_init()
7810 ret = ufs_get_device_desc(hba); in ufshcd_device_params_init()
7812 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", in ufshcd_device_params_init()
7817 ufshcd_get_ref_clk_gating_wait(hba); in ufshcd_device_params_init()
7819 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, in ufshcd_device_params_init()
7821 hba->dev_info.f_power_on_wp_en = flag; in ufshcd_device_params_init()
7824 if (ufshcd_get_max_pwr_mode(hba)) in ufshcd_device_params_init()
7825 dev_err(hba->dev, in ufshcd_device_params_init()
7836 static int ufshcd_add_lus(struct ufs_hba *hba) in ufshcd_add_lus() argument
7841 ret = ufshcd_scsi_add_wlus(hba); in ufshcd_add_lus()
7846 if (ufshcd_is_clkscaling_supported(hba)) { in ufshcd_add_lus()
7847 memcpy(&hba->clk_scaling.saved_pwr_info.info, in ufshcd_add_lus()
7848 &hba->pwr_info, in ufshcd_add_lus()
7850 hba->clk_scaling.saved_pwr_info.is_valid = true; in ufshcd_add_lus()
7851 hba->clk_scaling.is_allowed = true; in ufshcd_add_lus()
7853 ret = ufshcd_devfreq_init(hba); in ufshcd_add_lus()
7857 hba->clk_scaling.is_enabled = true; in ufshcd_add_lus()
7858 ufshcd_init_clk_scaling_sysfs(hba); in ufshcd_add_lus()
7861 ufs_bsg_probe(hba); in ufshcd_add_lus()
7862 ufshpb_init(hba); in ufshcd_add_lus()
7863 scsi_scan_host(hba->host); in ufshcd_add_lus()
7864 pm_runtime_put_sync(hba->dev); in ufshcd_add_lus()
7877 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) in ufshcd_probe_hba() argument
7883 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_probe_hba()
7885 ret = ufshcd_link_startup(hba); in ufshcd_probe_hba()
7889 if (hba->quirks & UFSHCD_QUIRK_SKIP_INTERFACE_CONFIGURATION) in ufshcd_probe_hba()
7893 ufshcd_clear_dbg_ufs_stats(hba); in ufshcd_probe_hba()
7896 ufshcd_set_link_active(hba); in ufshcd_probe_hba()
7899 ret = ufshcd_verify_dev_init(hba); in ufshcd_probe_hba()
7904 ret = ufshcd_complete_dev_init(hba); in ufshcd_probe_hba()
7913 ret = ufshcd_device_params_init(hba); in ufshcd_probe_hba()
7918 ufshcd_tune_unipro_params(hba); in ufshcd_probe_hba()
7921 ufshcd_set_ufs_dev_active(hba); in ufshcd_probe_hba()
7922 ufshcd_force_reset_auto_bkops(hba); in ufshcd_probe_hba()
7925 if (hba->max_pwr_info.is_valid) { in ufshcd_probe_hba()
7930 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) in ufshcd_probe_hba()
7931 ufshcd_set_dev_ref_clk(hba); in ufshcd_probe_hba()
7932 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); in ufshcd_probe_hba()
7934 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", in ufshcd_probe_hba()
7938 ufshcd_print_pwr_info(hba); in ufshcd_probe_hba()
7947 ufshcd_set_active_icc_lvl(hba); in ufshcd_probe_hba()
7949 ufshcd_wb_config(hba); in ufshcd_probe_hba()
7951 ufshcd_auto_hibern8_enable(hba); in ufshcd_probe_hba()
7953 ufshpb_reset(hba); in ufshcd_probe_hba()
7955 trace_android_rvh_ufs_complete_init(hba); in ufshcd_probe_hba()
7957 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_probe_hba()
7959 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_probe_hba()
7960 else if (hba->ufshcd_state == UFSHCD_STATE_RESET) in ufshcd_probe_hba()
7961 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_probe_hba()
7962 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_probe_hba()
7964 trace_ufshcd_init(dev_name(hba->dev), ret, in ufshcd_probe_hba()
7966 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_probe_hba()
7977 struct ufs_hba *hba = (struct ufs_hba *)data; in ufshcd_async_scan() local
7980 down(&hba->host_sem); in ufshcd_async_scan()
7982 ret = ufshcd_probe_hba(hba, true); in ufshcd_async_scan()
7983 up(&hba->host_sem); in ufshcd_async_scan()
7988 ret = ufshcd_add_lus(hba); in ufshcd_async_scan()
7995 pm_runtime_put_sync(hba->dev); in ufshcd_async_scan()
7996 ufshcd_hba_exit(hba); in ufshcd_async_scan()
8070 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, in ufshcd_config_vreg_lpm() argument
8073 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); in ufshcd_config_vreg_lpm()
8076 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, in ufshcd_config_vreg_hpm() argument
8082 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); in ufshcd_config_vreg_hpm()
8158 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) in ufshcd_setup_vreg() argument
8161 struct device *dev = hba->dev; in ufshcd_setup_vreg()
8162 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_vreg()
8183 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) in ufshcd_setup_hba_vreg() argument
8185 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_hba_vreg()
8187 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); in ufshcd_setup_hba_vreg()
8207 static int ufshcd_init_vreg(struct ufs_hba *hba) in ufshcd_init_vreg() argument
8210 struct device *dev = hba->dev; in ufshcd_init_vreg()
8211 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_vreg()
8224 static int ufshcd_init_hba_vreg(struct ufs_hba *hba) in ufshcd_init_hba_vreg() argument
8226 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_hba_vreg()
8229 return ufshcd_get_vreg(hba->dev, info->vdd_hba); in ufshcd_init_hba_vreg()
8234 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) in ufshcd_setup_clocks() argument
8238 struct list_head *head = &hba->clk_list_head; in ufshcd_setup_clocks()
8246 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); in ufshcd_setup_clocks()
8256 if (ufshcd_is_link_active(hba) && in ufshcd_setup_clocks()
8264 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", in ufshcd_setup_clocks()
8272 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, in ufshcd_setup_clocks()
8277 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); in ufshcd_setup_clocks()
8288 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_setup_clocks()
8289 hba->clk_gating.state = CLKS_ON; in ufshcd_setup_clocks()
8290 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_setup_clocks()
8291 hba->clk_gating.state); in ufshcd_setup_clocks()
8292 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_setup_clocks()
8296 trace_ufshcd_profile_clk_gating(dev_name(hba->dev), in ufshcd_setup_clocks()
8302 static int ufshcd_init_clocks(struct ufs_hba *hba) in ufshcd_init_clocks() argument
8306 struct device *dev = hba->dev; in ufshcd_init_clocks()
8307 struct list_head *head = &hba->clk_list_head; in ufshcd_init_clocks()
8330 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk); in ufshcd_init_clocks()
8335 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_init_clocks()
8349 static int ufshcd_variant_hba_init(struct ufs_hba *hba) in ufshcd_variant_hba_init() argument
8353 if (!hba->vops) in ufshcd_variant_hba_init()
8356 err = ufshcd_vops_init(hba); in ufshcd_variant_hba_init()
8360 err = ufshcd_vops_setup_regulators(hba, true); in ufshcd_variant_hba_init()
8362 ufshcd_vops_exit(hba); in ufshcd_variant_hba_init()
8365 dev_err(hba->dev, "%s: variant %s init failed err %d\n", in ufshcd_variant_hba_init()
8366 __func__, ufshcd_get_var_name(hba), err); in ufshcd_variant_hba_init()
8370 static void ufshcd_variant_hba_exit(struct ufs_hba *hba) in ufshcd_variant_hba_exit() argument
8372 if (!hba->vops) in ufshcd_variant_hba_exit()
8375 ufshcd_vops_setup_regulators(hba, false); in ufshcd_variant_hba_exit()
8377 ufshcd_vops_exit(hba); in ufshcd_variant_hba_exit()
8380 static int ufshcd_hba_init(struct ufs_hba *hba) in ufshcd_hba_init() argument
8391 err = ufshcd_init_hba_vreg(hba); in ufshcd_hba_init()
8395 err = ufshcd_setup_hba_vreg(hba, true); in ufshcd_hba_init()
8399 err = ufshcd_init_clocks(hba); in ufshcd_hba_init()
8403 err = ufshcd_setup_clocks(hba, true); in ufshcd_hba_init()
8407 err = ufshcd_init_vreg(hba); in ufshcd_hba_init()
8411 err = ufshcd_setup_vreg(hba, true); in ufshcd_hba_init()
8415 err = ufshcd_variant_hba_init(hba); in ufshcd_hba_init()
8419 ufs_debugfs_hba_init(hba); in ufshcd_hba_init()
8421 hba->is_powered = true; in ufshcd_hba_init()
8425 ufshcd_setup_vreg(hba, false); in ufshcd_hba_init()
8427 ufshcd_setup_clocks(hba, false); in ufshcd_hba_init()
8429 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_init()
8434 static void ufshcd_hba_exit(struct ufs_hba *hba) in ufshcd_hba_exit() argument
8436 if (hba->is_powered) { in ufshcd_hba_exit()
8437 ufshcd_exit_clk_scaling(hba); in ufshcd_hba_exit()
8438 ufshcd_exit_clk_gating(hba); in ufshcd_hba_exit()
8439 if (hba->eh_wq) in ufshcd_hba_exit()
8440 destroy_workqueue(hba->eh_wq); in ufshcd_hba_exit()
8441 ufs_debugfs_hba_exit(hba); in ufshcd_hba_exit()
8442 ufshcd_variant_hba_exit(hba); in ufshcd_hba_exit()
8443 ufshcd_setup_vreg(hba, false); in ufshcd_hba_exit()
8444 ufshcd_setup_clocks(hba, false); in ufshcd_hba_exit()
8445 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_exit()
8446 hba->is_powered = false; in ufshcd_hba_exit()
8447 ufs_put_device_desc(hba); in ufshcd_hba_exit()
8460 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, in ufshcd_set_dev_pwr_mode() argument
8469 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
8470 sdp = hba->sdev_ufs_device; in ufshcd_set_dev_pwr_mode()
8480 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
8491 hba->host->eh_noresume = 1; in ufshcd_set_dev_pwr_mode()
8517 hba->curr_dev_pwr_mode = pwr_mode; in ufshcd_set_dev_pwr_mode()
8520 hba->host->eh_noresume = 0; in ufshcd_set_dev_pwr_mode()
8524 static int ufshcd_link_state_transition(struct ufs_hba *hba, in ufshcd_link_state_transition() argument
8530 if (req_link_state == hba->uic_link_state) in ufshcd_link_state_transition()
8534 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_link_state_transition()
8536 ufshcd_set_link_hibern8(hba); in ufshcd_link_state_transition()
8538 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_link_state_transition()
8548 (!check_for_bkops || !hba->auto_bkops_enabled)) { in ufshcd_link_state_transition()
8556 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_link_state_transition()
8558 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_link_state_transition()
8566 ufshcd_hba_stop(hba); in ufshcd_link_state_transition()
8571 ufshcd_set_link_off(hba); in ufshcd_link_state_transition()
8578 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) in ufshcd_vreg_set_lpm() argument
8588 if (!ufshcd_is_link_active(hba) && in ufshcd_vreg_set_lpm()
8589 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) in ufshcd_vreg_set_lpm()
8607 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && in ufshcd_vreg_set_lpm()
8608 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_lpm()
8609 ufshcd_setup_vreg(hba, false); in ufshcd_vreg_set_lpm()
8611 } else if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_vreg_set_lpm()
8612 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_lpm()
8614 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) { in ufshcd_vreg_set_lpm()
8615 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_lpm()
8616 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_lpm()
8623 if (vcc_off && hba->vreg_info.vcc && in ufshcd_vreg_set_lpm()
8624 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM) in ufshcd_vreg_set_lpm()
8628 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) in ufshcd_vreg_set_hpm() argument
8632 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && in ufshcd_vreg_set_hpm()
8633 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_hpm()
8634 ret = ufshcd_setup_vreg(hba, true); in ufshcd_vreg_set_hpm()
8635 } else if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_vreg_set_hpm()
8636 if (!ufshcd_is_link_active(hba)) { in ufshcd_vreg_set_hpm()
8637 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
8640 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_hpm()
8644 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); in ufshcd_vreg_set_hpm()
8649 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
8651 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_hpm()
8656 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) in ufshcd_hba_vreg_set_lpm() argument
8658 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) in ufshcd_hba_vreg_set_lpm()
8659 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_vreg_set_lpm()
8662 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) in ufshcd_hba_vreg_set_hpm() argument
8664 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) in ufshcd_hba_vreg_set_hpm()
8665 ufshcd_setup_hba_vreg(hba, true); in ufshcd_hba_vreg_set_hpm()
8684 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) in ufshcd_suspend() argument
8691 hba->pm_op_in_progress = 1; in ufshcd_suspend()
8694 hba->rpm_lvl : hba->spm_lvl; in ufshcd_suspend()
8702 ufshpb_suspend(hba); in ufshcd_suspend()
8708 ufshcd_hold(hba, false); in ufshcd_suspend()
8709 hba->clk_gating.is_suspended = true; in ufshcd_suspend()
8711 if (ufshcd_is_clkscaling_supported(hba)) in ufshcd_suspend()
8712 ufshcd_clk_scaling_suspend(hba, true); in ufshcd_suspend()
8719 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && in ufshcd_suspend()
8720 (req_link_state == hba->uic_link_state)) in ufshcd_suspend()
8724 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { in ufshcd_suspend()
8730 if (ufshcd_can_autobkops_during_suspend(hba)) { in ufshcd_suspend()
8736 ret = ufshcd_urgent_bkops(hba); in ufshcd_suspend()
8741 ufshcd_disable_auto_bkops(hba); in ufshcd_suspend()
8748 hba->dev_info.b_rpm_dev_flush_capable = in ufshcd_suspend()
8749 hba->auto_bkops_enabled || in ufshcd_suspend()
8752 ufshcd_is_auto_hibern8_enabled(hba))) && in ufshcd_suspend()
8753 ufshcd_wb_need_flush(hba)); in ufshcd_suspend()
8756 flush_work(&hba->eeh_work); in ufshcd_suspend()
8758 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) { in ufshcd_suspend()
8759 if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || in ufshcd_suspend()
8762 ufshcd_disable_auto_bkops(hba); in ufshcd_suspend()
8765 if (!hba->dev_info.b_rpm_dev_flush_capable) { in ufshcd_suspend()
8766 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); in ufshcd_suspend()
8772 ret = ufshcd_link_state_transition(hba, req_link_state, 1); in ufshcd_suspend()
8782 ret = ufshcd_vops_suspend(hba, pm_op); in ufshcd_suspend()
8789 ufshcd_disable_irq(hba); in ufshcd_suspend()
8791 ufshcd_setup_clocks(hba, false); in ufshcd_suspend()
8793 if (ufshcd_is_clkgating_allowed(hba)) { in ufshcd_suspend()
8794 hba->clk_gating.state = CLKS_OFF; in ufshcd_suspend()
8795 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_suspend()
8796 hba->clk_gating.state); in ufshcd_suspend()
8799 ufshcd_vreg_set_lpm(hba); in ufshcd_suspend()
8802 ufshcd_hba_vreg_set_lpm(hba); in ufshcd_suspend()
8806 ufshcd_vreg_set_hpm(hba); in ufshcd_suspend()
8807 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) in ufshcd_suspend()
8808 ufshcd_set_link_active(hba); in ufshcd_suspend()
8809 else if (ufshcd_is_link_off(hba)) in ufshcd_suspend()
8810 ufshcd_host_reset_and_restore(hba); in ufshcd_suspend()
8812 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) in ufshcd_suspend()
8813 ufshcd_disable_auto_bkops(hba); in ufshcd_suspend()
8815 if (ufshcd_is_clkscaling_supported(hba)) in ufshcd_suspend()
8816 ufshcd_clk_scaling_suspend(hba, false); in ufshcd_suspend()
8818 hba->clk_gating.is_suspended = false; in ufshcd_suspend()
8819 hba->dev_info.b_rpm_dev_flush_capable = false; in ufshcd_suspend()
8820 ufshcd_release(hba); in ufshcd_suspend()
8821 ufshpb_resume(hba); in ufshcd_suspend()
8823 if (hba->dev_info.b_rpm_dev_flush_capable) { in ufshcd_suspend()
8824 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, in ufshcd_suspend()
8828 hba->pm_op_in_progress = 0; in ufshcd_suspend()
8831 ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret); in ufshcd_suspend()
8845 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) in ufshcd_resume() argument
8850 hba->pm_op_in_progress = 1; in ufshcd_resume()
8851 old_link_state = hba->uic_link_state; in ufshcd_resume()
8853 ufshcd_hba_vreg_set_hpm(hba); in ufshcd_resume()
8854 ret = ufshcd_vreg_set_hpm(hba); in ufshcd_resume()
8859 ret = ufshcd_setup_clocks(hba, true); in ufshcd_resume()
8864 ufshcd_enable_irq(hba); in ufshcd_resume()
8871 ret = ufshcd_vops_resume(hba, pm_op); in ufshcd_resume()
8875 if (ufshcd_is_link_hibern8(hba)) { in ufshcd_resume()
8876 ret = ufshcd_uic_hibern8_exit(hba); in ufshcd_resume()
8878 ufshcd_set_link_active(hba); in ufshcd_resume()
8880 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", in ufshcd_resume()
8884 } else if (ufshcd_is_link_off(hba)) { in ufshcd_resume()
8889 ret = ufshcd_reset_and_restore(hba); in ufshcd_resume()
8894 if (ret || !ufshcd_is_link_active(hba)) in ufshcd_resume()
8898 if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_resume()
8899 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); in ufshcd_resume()
8904 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) in ufshcd_resume()
8905 ufshcd_enable_auto_bkops(hba); in ufshcd_resume()
8911 ufshcd_urgent_bkops(hba); in ufshcd_resume()
8913 hba->clk_gating.is_suspended = false; in ufshcd_resume()
8915 if (ufshcd_is_clkscaling_supported(hba)) in ufshcd_resume()
8916 ufshcd_clk_scaling_suspend(hba, false); in ufshcd_resume()
8919 ufshcd_auto_hibern8_enable(hba); in ufshcd_resume()
8921 ufshpb_resume(hba); in ufshcd_resume()
8923 if (hba->dev_info.b_rpm_dev_flush_capable) { in ufshcd_resume()
8924 hba->dev_info.b_rpm_dev_flush_capable = false; in ufshcd_resume()
8925 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); in ufshcd_resume()
8929 ufshcd_release(hba); in ufshcd_resume()
8934 ufshcd_link_state_transition(hba, old_link_state, 0); in ufshcd_resume()
8936 ufshcd_vops_suspend(hba, pm_op); in ufshcd_resume()
8938 ufshcd_disable_irq(hba); in ufshcd_resume()
8939 ufshcd_setup_clocks(hba, false); in ufshcd_resume()
8940 if (ufshcd_is_clkgating_allowed(hba)) { in ufshcd_resume()
8941 hba->clk_gating.state = CLKS_OFF; in ufshcd_resume()
8942 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_resume()
8943 hba->clk_gating.state); in ufshcd_resume()
8946 ufshcd_vreg_set_lpm(hba); in ufshcd_resume()
8948 hba->pm_op_in_progress = 0; in ufshcd_resume()
8950 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret); in ufshcd_resume()
8962 int ufshcd_system_suspend(struct ufs_hba *hba) in ufshcd_system_suspend() argument
8967 down(&hba->host_sem); in ufshcd_system_suspend()
8969 if (!hba->is_powered) in ufshcd_system_suspend()
8972 cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work); in ufshcd_system_suspend()
8974 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == in ufshcd_system_suspend()
8975 hba->curr_dev_pwr_mode) && in ufshcd_system_suspend()
8976 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) == in ufshcd_system_suspend()
8977 hba->uic_link_state) && in ufshcd_system_suspend()
8978 pm_runtime_suspended(hba->dev) && in ufshcd_system_suspend()
8979 !hba->dev_info.b_rpm_dev_flush_capable) in ufshcd_system_suspend()
8982 if (pm_runtime_suspended(hba->dev)) { in ufshcd_system_suspend()
8991 ret = ufshcd_runtime_resume(hba); in ufshcd_system_suspend()
8996 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); in ufshcd_system_suspend()
8998 trace_ufshcd_system_suspend(dev_name(hba->dev), ret, in ufshcd_system_suspend()
9000 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_system_suspend()
9002 hba->is_sys_suspended = true; in ufshcd_system_suspend()
9004 up(&hba->host_sem); in ufshcd_system_suspend()
9016 int ufshcd_system_resume(struct ufs_hba *hba) in ufshcd_system_resume() argument
9021 if (!hba->is_powered || pm_runtime_suspended(hba->dev)) in ufshcd_system_resume()
9028 ret = ufshcd_resume(hba, UFS_SYSTEM_PM); in ufshcd_system_resume()
9030 trace_ufshcd_system_resume(dev_name(hba->dev), ret, in ufshcd_system_resume()
9032 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_system_resume()
9034 hba->is_sys_suspended = false; in ufshcd_system_resume()
9035 up(&hba->host_sem); in ufshcd_system_resume()
9048 int ufshcd_runtime_suspend(struct ufs_hba *hba) in ufshcd_runtime_suspend() argument
9053 if (!hba->is_powered) in ufshcd_runtime_suspend()
9056 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM); in ufshcd_runtime_suspend()
9058 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, in ufshcd_runtime_suspend()
9060 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_runtime_suspend()
9086 int ufshcd_runtime_resume(struct ufs_hba *hba) in ufshcd_runtime_resume() argument
9091 if (!hba->is_powered) in ufshcd_runtime_resume()
9094 ret = ufshcd_resume(hba, UFS_RUNTIME_PM); in ufshcd_runtime_resume()
9096 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, in ufshcd_runtime_resume()
9098 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_runtime_resume()
9103 int ufshcd_runtime_idle(struct ufs_hba *hba) in ufshcd_runtime_idle() argument
9117 int ufshcd_shutdown(struct ufs_hba *hba) in ufshcd_shutdown() argument
9121 down(&hba->host_sem); in ufshcd_shutdown()
9122 hba->shutting_down = true; in ufshcd_shutdown()
9123 up(&hba->host_sem); in ufshcd_shutdown()
9125 if (!hba->is_powered) in ufshcd_shutdown()
9128 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) in ufshcd_shutdown()
9131 pm_runtime_get_sync(hba->dev); in ufshcd_shutdown()
9133 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); in ufshcd_shutdown()
9136 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); in ufshcd_shutdown()
9137 hba->is_powered = false; in ufshcd_shutdown()
9148 void ufshcd_remove(struct ufs_hba *hba) in ufshcd_remove() argument
9150 ufs_bsg_remove(hba); in ufshcd_remove()
9151 ufshpb_remove(hba); in ufshcd_remove()
9152 ufs_sysfs_remove_nodes(hba->dev); in ufshcd_remove()
9153 blk_cleanup_queue(hba->tmf_queue); in ufshcd_remove()
9154 blk_mq_free_tag_set(&hba->tmf_tag_set); in ufshcd_remove()
9155 blk_cleanup_queue(hba->cmd_queue); in ufshcd_remove()
9156 scsi_remove_host(hba->host); in ufshcd_remove()
9158 ufshcd_disable_intr(hba, hba->intr_mask); in ufshcd_remove()
9159 ufshcd_hba_stop(hba); in ufshcd_remove()
9160 ufshcd_hba_exit(hba); in ufshcd_remove()
9168 void ufshcd_dealloc_host(struct ufs_hba *hba) in ufshcd_dealloc_host() argument
9170 scsi_host_put(hba->host); in ufshcd_dealloc_host()
9181 static int ufshcd_set_dma_mask(struct ufs_hba *hba) in ufshcd_set_dma_mask() argument
9183 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { in ufshcd_set_dma_mask()
9184 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) in ufshcd_set_dma_mask()
9187 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); in ufshcd_set_dma_mask()
9199 struct ufs_hba *hba; in ufshcd_alloc_host() local
9216 hba = shost_priv(host); in ufshcd_alloc_host()
9217 hba->host = host; in ufshcd_alloc_host()
9218 hba->dev = dev; in ufshcd_alloc_host()
9219 *hba_handle = hba; in ufshcd_alloc_host()
9220 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; in ufshcd_alloc_host()
9221 hba->sg_entry_size = sizeof(struct ufshcd_sg_entry); in ufshcd_alloc_host()
9223 INIT_LIST_HEAD(&hba->clk_list_head); in ufshcd_alloc_host()
9249 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) in ufshcd_init() argument
9251 struct request ***tmf_rqs = &ufs_hba_add_info(hba)->tmf_rqs; in ufshcd_init()
9253 struct Scsi_Host *host = hba->host; in ufshcd_init()
9254 struct device *dev = hba->dev; in ufshcd_init()
9262 dev_set_drvdata(dev, hba); in ufshcd_init()
9265 dev_err(hba->dev, in ufshcd_init()
9271 hba->mmio_base = mmio_base; in ufshcd_init()
9272 hba->irq = irq; in ufshcd_init()
9273 hba->vps = &ufs_hba_vps; in ufshcd_init()
9275 err = ufshcd_hba_init(hba); in ufshcd_init()
9280 err = ufshcd_hba_capabilities(hba); in ufshcd_init()
9285 hba->ufs_version = ufshcd_get_ufs_version(hba); in ufshcd_init()
9287 if (hba->ufs_version < ufshci_version(1, 0)) in ufshcd_init()
9288 dev_err(hba->dev, "invalid UFS version 0x%x\n", in ufshcd_init()
9289 hba->ufs_version); in ufshcd_init()
9292 hba->intr_mask = ufshcd_get_intr_mask(hba); in ufshcd_init()
9294 err = ufshcd_set_dma_mask(hba); in ufshcd_init()
9296 dev_err(hba->dev, "set dma mask failed\n"); in ufshcd_init()
9301 err = ufshcd_memory_alloc(hba); in ufshcd_init()
9303 dev_err(hba->dev, "Memory allocation failed\n"); in ufshcd_init()
9308 ufshcd_host_memory_configure(hba); in ufshcd_init()
9310 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; in ufshcd_init()
9311 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED; in ufshcd_init()
9318 hba->max_pwr_info.is_valid = false; in ufshcd_init()
9322 hba->host->host_no); in ufshcd_init()
9323 hba->eh_wq = create_singlethread_workqueue(eh_wq_name); in ufshcd_init()
9324 if (!hba->eh_wq) { in ufshcd_init()
9325 dev_err(hba->dev, "%s: failed to create eh workqueue\n", in ufshcd_init()
9330 INIT_WORK(&hba->eh_work, ufshcd_err_handler); in ufshcd_init()
9331 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); in ufshcd_init()
9333 sema_init(&hba->host_sem, 1); in ufshcd_init()
9336 mutex_init(&hba->uic_cmd_mutex); in ufshcd_init()
9339 mutex_init(&hba->dev_cmd.lock); in ufshcd_init()
9341 init_rwsem(&hba->clk_scaling_lock); in ufshcd_init()
9343 ufshcd_init_clk_gating(hba); in ufshcd_init()
9345 ufshcd_init_clk_scaling(hba); in ufshcd_init()
9352 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), in ufshcd_init()
9354 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); in ufshcd_init()
9362 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); in ufshcd_init()
9364 dev_err(hba->dev, "request irq failed\n"); in ufshcd_init()
9367 hba->is_irq_enabled = true; in ufshcd_init()
9370 err = scsi_add_host(host, hba->dev); in ufshcd_init()
9372 dev_err(hba->dev, "scsi_add_host failed\n"); in ufshcd_init()
9376 hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set); in ufshcd_init()
9377 if (IS_ERR(hba->cmd_queue)) { in ufshcd_init()
9378 err = PTR_ERR(hba->cmd_queue); in ufshcd_init()
9382 hba->tmf_tag_set = (struct blk_mq_tag_set) { in ufshcd_init()
9384 .queue_depth = hba->nutmrs, in ufshcd_init()
9388 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); in ufshcd_init()
9391 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); in ufshcd_init()
9392 if (IS_ERR(hba->tmf_queue)) { in ufshcd_init()
9393 err = PTR_ERR(hba->tmf_queue); in ufshcd_init()
9396 *tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, sizeof(**tmf_rqs), in ufshcd_init()
9404 ufshcd_vops_device_reset(hba); in ufshcd_init()
9406 ufshcd_init_crypto(hba); in ufshcd_init()
9409 err = ufshcd_hba_enable(hba); in ufshcd_init()
9411 dev_err(hba->dev, "Host controller enable failed\n"); in ufshcd_init()
9412 ufshcd_print_evt_hist(hba); in ufshcd_init()
9413 ufshcd_print_host_state(hba); in ufshcd_init()
9422 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( in ufshcd_init()
9425 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( in ufshcd_init()
9429 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, in ufshcd_init()
9433 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) { in ufshcd_init()
9434 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | in ufshcd_init()
9440 atomic_set(&hba->scsi_block_reqs_cnt, 0); in ufshcd_init()
9447 ufshcd_set_ufs_dev_active(hba); in ufshcd_init()
9449 async_schedule(ufshcd_async_scan, hba); in ufshcd_init()
9450 ufs_sysfs_add_nodes(hba); in ufshcd_init()
9456 blk_cleanup_queue(hba->tmf_queue); in ufshcd_init()
9458 blk_mq_free_tag_set(&hba->tmf_tag_set); in ufshcd_init()
9460 blk_cleanup_queue(hba->cmd_queue); in ufshcd_init()
9462 scsi_remove_host(hba->host); in ufshcd_init()
9464 hba->is_irq_enabled = false; in ufshcd_init()
9465 ufshcd_hba_exit(hba); in ufshcd_init()