Lines Matching refs:hba

43 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
51 static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, in ufs_qcom_dump_regs_wrapper() argument
54 ufshcd_dump_regs(hba, offset, len * 4, prefix); in ufs_qcom_dump_regs_wrapper()
57 static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) in ufs_qcom_get_connected_tx_lanes() argument
61 err = ufshcd_dme_get(hba, in ufs_qcom_get_connected_tx_lanes()
64 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n", in ufs_qcom_get_connected_tx_lanes()
123 struct device *dev = host->hba->dev; in ufs_qcom_enable_lane_clks()
164 struct device *dev = host->hba->dev; in ufs_qcom_init_lane_clks()
180 if (host->hba->lanes_per_direction > 1) { in ufs_qcom_init_lane_clks()
193 static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) in ufs_qcom_link_startup_post_change() argument
197 return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); in ufs_qcom_link_startup_post_change()
200 static int ufs_qcom_check_hibern8(struct ufs_hba *hba) in ufs_qcom_check_hibern8() argument
207 err = ufshcd_dme_get(hba, in ufs_qcom_check_hibern8()
223 err = ufshcd_dme_get(hba, in ufs_qcom_check_hibern8()
229 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", in ufs_qcom_check_hibern8()
233 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", in ufs_qcom_check_hibern8()
242 ufshcd_rmwl(host->hba, QUNIPRO_SEL, in ufs_qcom_select_unipro_mode()
252 static int ufs_qcom_host_reset(struct ufs_hba *hba) in ufs_qcom_host_reset() argument
255 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_host_reset()
259 dev_warn(hba->dev, "%s: reset control not set\n", __func__); in ufs_qcom_host_reset()
263 reenable_intr = hba->is_irq_enabled; in ufs_qcom_host_reset()
264 disable_irq(hba->irq); in ufs_qcom_host_reset()
265 hba->is_irq_enabled = false; in ufs_qcom_host_reset()
269 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", in ufs_qcom_host_reset()
283 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", in ufs_qcom_host_reset()
289 enable_irq(hba->irq); in ufs_qcom_host_reset()
290 hba->is_irq_enabled = true; in ufs_qcom_host_reset()
297 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) in ufs_qcom_power_up_sequence() argument
299 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_power_up_sequence()
306 ret = ufs_qcom_host_reset(hba); in ufs_qcom_power_up_sequence()
308 dev_warn(hba->dev, "%s: host reset returned %d\n", in ufs_qcom_power_up_sequence()
317 dev_err(hba->dev, "%s: phy init failed, ret = %d\n", in ufs_qcom_power_up_sequence()
325 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n", in ufs_qcom_power_up_sequence()
348 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) in ufs_qcom_enable_hw_clk_gating() argument
350 ufshcd_writel(hba, in ufs_qcom_enable_hw_clk_gating()
351 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, in ufs_qcom_enable_hw_clk_gating()
358 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, in ufs_qcom_hce_enable_notify() argument
361 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_hce_enable_notify()
366 ufs_qcom_power_up_sequence(hba); in ufs_qcom_hce_enable_notify()
376 err = ufs_qcom_check_hibern8(hba); in ufs_qcom_hce_enable_notify()
377 ufs_qcom_enable_hw_clk_gating(hba); in ufs_qcom_hce_enable_notify()
381 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); in ufs_qcom_hce_enable_notify()
391 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, in ufs_qcom_cfg_timers() argument
395 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_cfg_timers()
428 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba)) in ufs_qcom_cfg_timers()
432 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); in ufs_qcom_cfg_timers()
436 list_for_each_entry(clki, &hba->clk_list_head, list) { in ufs_qcom_cfg_timers()
446 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { in ufs_qcom_cfg_timers()
447 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); in ufs_qcom_cfg_timers()
467 dev_err(hba->dev, in ufs_qcom_cfg_timers()
476 dev_err(hba->dev, in ufs_qcom_cfg_timers()
484 dev_err(hba->dev, "%s: invalid rate = %d\n", in ufs_qcom_cfg_timers()
492 dev_err(hba->dev, in ufs_qcom_cfg_timers()
502 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); in ufs_qcom_cfg_timers()
506 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) != in ufs_qcom_cfg_timers()
509 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, in ufs_qcom_cfg_timers()
519 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100), in ufs_qcom_cfg_timers()
535 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, in ufs_qcom_link_startup_notify() argument
539 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_link_startup_notify()
543 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, in ufs_qcom_link_startup_notify()
545 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", in ufs_qcom_link_startup_notify()
556 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, in ufs_qcom_link_startup_notify()
566 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) in ufs_qcom_link_startup_notify()
567 err = ufshcd_disable_host_tx_lcc(hba); in ufs_qcom_link_startup_notify()
571 ufs_qcom_link_startup_post_change(hba); in ufs_qcom_link_startup_notify()
581 static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted) in ufs_qcom_device_reset_ctrl() argument
583 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_device_reset_ctrl()
592 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) in ufs_qcom_suspend() argument
594 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_suspend()
597 if (ufs_qcom_is_link_off(hba)) { in ufs_qcom_suspend()
607 ufs_qcom_device_reset_ctrl(hba, true); in ufs_qcom_suspend()
609 } else if (!ufs_qcom_is_link_active(hba)) { in ufs_qcom_suspend()
616 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) in ufs_qcom_resume() argument
618 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_resume()
622 if (ufs_qcom_is_link_off(hba)) { in ufs_qcom_resume()
625 dev_err(hba->dev, "%s: failed PHY power on: %d\n", in ufs_qcom_resume()
634 } else if (!ufs_qcom_is_link_active(hba)) { in ufs_qcom_resume()
663 gating_wait = host->hba->dev_info.clk_gating_wait_us; in ufs_qcom_dev_ref_clk_ctrl()
699 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, in ufs_qcom_pwr_change_notify() argument
704 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_pwr_change_notify()
754 if (!ufshcd_is_hs_mode(&hba->pwr_info) && in ufs_qcom_pwr_change_notify()
761 ufshcd_dme_set(hba, in ufs_qcom_pwr_change_notify()
766 ufshcd_dme_set(hba, in ufs_qcom_pwr_change_notify()
773 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, in ufs_qcom_pwr_change_notify()
776 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", in ufs_qcom_pwr_change_notify()
791 if (ufshcd_is_hs_mode(&hba->pwr_info) && in ufs_qcom_pwr_change_notify()
803 static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) in ufs_qcom_quirk_host_pa_saveconfigtime() argument
808 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), in ufs_qcom_quirk_host_pa_saveconfigtime()
814 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), in ufs_qcom_quirk_host_pa_saveconfigtime()
821 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) in ufs_qcom_apply_dev_quirks() argument
825 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) in ufs_qcom_apply_dev_quirks()
826 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); in ufs_qcom_apply_dev_quirks()
828 if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) in ufs_qcom_apply_dev_quirks()
829 hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; in ufs_qcom_apply_dev_quirks()
834 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) in ufs_qcom_get_ufs_hci_version() argument
836 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_get_ufs_hci_version()
853 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) in ufs_qcom_advertise_quirks() argument
855 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_advertise_quirks()
858 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS in ufs_qcom_advertise_quirks()
863 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; in ufs_qcom_advertise_quirks()
865 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; in ufs_qcom_advertise_quirks()
869 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; in ufs_qcom_advertise_quirks()
873 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS in ufs_qcom_advertise_quirks()
879 static void ufs_qcom_set_caps(struct ufs_hba *hba) in ufs_qcom_set_caps() argument
881 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_set_caps()
883 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; in ufs_qcom_set_caps()
884 hba->caps |= UFSHCD_CAP_CLK_SCALING; in ufs_qcom_set_caps()
885 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; in ufs_qcom_set_caps()
886 hba->caps |= UFSHCD_CAP_WB_EN; in ufs_qcom_set_caps()
887 hba->caps |= UFSHCD_CAP_CRYPTO; in ufs_qcom_set_caps()
903 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, in ufs_qcom_setup_clocks() argument
906 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_setup_clocks()
920 if (!ufs_qcom_is_link_active(hba)) { in ufs_qcom_setup_clocks()
929 if (ufshcd_is_hs_mode(&hba->pwr_info)) in ufs_qcom_setup_clocks()
945 ufs_qcom_assert_reset(host->hba); in ufs_qcom_reset_assert()
958 ufs_qcom_deassert_reset(host->hba); in ufs_qcom_reset_deassert()
995 static int ufs_qcom_init(struct ufs_hba *hba) in ufs_qcom_init() argument
998 struct device *dev = hba->dev; in ufs_qcom_init()
1014 host->hba = hba; in ufs_qcom_init()
1015 ufshcd_set_variant(hba, host); in ufs_qcom_init()
1018 host->core_reset = devm_reset_control_get(hba->dev, "rst"); in ufs_qcom_init()
1072 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, in ufs_qcom_init()
1080 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; in ufs_qcom_init()
1104 ufs_qcom_set_caps(hba); in ufs_qcom_init()
1105 ufs_qcom_advertise_quirks(hba); in ufs_qcom_init()
1111 ufs_qcom_setup_clocks(hba, true, POST_CHANGE); in ufs_qcom_init()
1113 if (hba->dev->id < MAX_UFS_QCOM_HOSTS) in ufs_qcom_init()
1114 ufs_qcom_hosts[hba->dev->id] = host; in ufs_qcom_init()
1128 ufshcd_set_variant(hba, NULL); in ufs_qcom_init()
1133 static void ufs_qcom_exit(struct ufs_hba *hba) in ufs_qcom_exit() argument
1135 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_exit()
1142 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, in ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div() argument
1151 err = ufshcd_dme_get(hba, in ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div()
1163 err = ufshcd_dme_set(hba, in ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div()
1170 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) in ufs_qcom_clk_scale_up_pre_change() argument
1176 static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) in ufs_qcom_clk_scale_up_post_change() argument
1178 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_clk_scale_up_post_change()
1184 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); in ufs_qcom_clk_scale_up_post_change()
1187 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) in ufs_qcom_clk_scale_down_pre_change() argument
1189 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_clk_scale_down_pre_change()
1196 err = ufshcd_dme_get(hba, in ufs_qcom_clk_scale_down_pre_change()
1204 err = ufshcd_dme_set(hba, in ufs_qcom_clk_scale_down_pre_change()
1212 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) in ufs_qcom_clk_scale_down_post_change() argument
1214 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_clk_scale_down_post_change()
1220 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75); in ufs_qcom_clk_scale_down_post_change()
1223 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, in ufs_qcom_clk_scale_notify() argument
1226 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_clk_scale_notify()
1231 err = ufshcd_uic_hibern8_enter(hba); in ufs_qcom_clk_scale_notify()
1235 err = ufs_qcom_clk_scale_up_pre_change(hba); in ufs_qcom_clk_scale_notify()
1237 err = ufs_qcom_clk_scale_down_pre_change(hba); in ufs_qcom_clk_scale_notify()
1239 ufshcd_uic_hibern8_exit(hba); in ufs_qcom_clk_scale_notify()
1243 err = ufs_qcom_clk_scale_up_post_change(hba); in ufs_qcom_clk_scale_notify()
1245 err = ufs_qcom_clk_scale_down_post_change(hba); in ufs_qcom_clk_scale_notify()
1249 ufshcd_uic_hibern8_exit(hba); in ufs_qcom_clk_scale_notify()
1253 ufs_qcom_cfg_timers(hba, in ufs_qcom_clk_scale_notify()
1258 ufshcd_uic_hibern8_exit(hba); in ufs_qcom_clk_scale_notify()
1265 static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, in ufs_qcom_print_hw_debug_reg_all() argument
1266 void *priv, void (*print_fn)(struct ufs_hba *hba, in ufs_qcom_print_hw_debug_reg_all() argument
1272 if (unlikely(!hba)) { in ufs_qcom_print_hw_debug_reg_all()
1277 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__); in ufs_qcom_print_hw_debug_reg_all()
1281 host = ufshcd_get_variant(hba); in ufs_qcom_print_hw_debug_reg_all()
1286 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv); in ufs_qcom_print_hw_debug_reg_all()
1288 reg = ufshcd_readl(hba, REG_UFS_CFG1); in ufs_qcom_print_hw_debug_reg_all()
1290 ufshcd_writel(hba, reg, REG_UFS_CFG1); in ufs_qcom_print_hw_debug_reg_all()
1293 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv); in ufs_qcom_print_hw_debug_reg_all()
1296 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv); in ufs_qcom_print_hw_debug_reg_all()
1299 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); in ufs_qcom_print_hw_debug_reg_all()
1302 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1); in ufs_qcom_print_hw_debug_reg_all()
1305 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); in ufs_qcom_print_hw_debug_reg_all()
1308 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv); in ufs_qcom_print_hw_debug_reg_all()
1311 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv); in ufs_qcom_print_hw_debug_reg_all()
1314 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv); in ufs_qcom_print_hw_debug_reg_all()
1317 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv); in ufs_qcom_print_hw_debug_reg_all()
1320 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv); in ufs_qcom_print_hw_debug_reg_all()
1323 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv); in ufs_qcom_print_hw_debug_reg_all()
1329 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, in ufs_qcom_enable_test_bus()
1331 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); in ufs_qcom_enable_test_bus()
1333 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1); in ufs_qcom_enable_test_bus()
1334 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); in ufs_qcom_enable_test_bus()
1348 dev_err(host->hba->dev, in ufs_qcom_testbus_cfg_is_ok()
1426 ufshcd_rmwl(host->hba, TEST_BUS_SEL, in ufs_qcom_testbus_config()
1429 ufshcd_rmwl(host->hba, mask, in ufs_qcom_testbus_config()
1442 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) in ufs_qcom_dump_dbg_regs() argument
1444 ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, in ufs_qcom_dump_dbg_regs()
1447 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); in ufs_qcom_dump_dbg_regs()
1456 static int ufs_qcom_device_reset(struct ufs_hba *hba) in ufs_qcom_device_reset() argument
1458 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_device_reset()
1468 ufs_qcom_device_reset_ctrl(hba, true); in ufs_qcom_device_reset()
1471 ufs_qcom_device_reset_ctrl(hba, false); in ufs_qcom_device_reset()
1478 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, in ufs_qcom_config_scaling_param() argument
1493 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, in ufs_qcom_config_scaling_param() argument
1552 struct ufs_hba *hba = platform_get_drvdata(pdev); in ufs_qcom_remove() local
1555 ufshcd_remove(hba); in ufs_qcom_remove()