xref: /OK3568_Linux_fs/kernel/drivers/scsi/ufs/ufs-mediatek.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2019 MediaTek Inc.
4*4882a593Smuzhiyun  * Authors:
5*4882a593Smuzhiyun  *	Stanley Chu <stanley.chu@mediatek.com>
6*4882a593Smuzhiyun  *	Peter Wang <peter.wang@mediatek.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/arm-smccc.h>
10*4882a593Smuzhiyun #include <linux/bitfield.h>
11*4882a593Smuzhiyun #include <linux/of.h>
12*4882a593Smuzhiyun #include <linux/of_address.h>
13*4882a593Smuzhiyun #include <linux/of_device.h>
14*4882a593Smuzhiyun #include <linux/phy/phy.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/regulator/consumer.h>
17*4882a593Smuzhiyun #include <linux/reset.h>
18*4882a593Smuzhiyun #include <linux/soc/mediatek/mtk_sip_svc.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "ufshcd.h"
21*4882a593Smuzhiyun #include "ufshcd-crypto.h"
22*4882a593Smuzhiyun #include "ufshcd-pltfrm.h"
23*4882a593Smuzhiyun #include "ufs_quirks.h"
24*4882a593Smuzhiyun #include "unipro.h"
25*4882a593Smuzhiyun #include "ufs-mediatek.h"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
28*4882a593Smuzhiyun #include "ufs-mediatek-trace.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define ufs_mtk_smc(cmd, val, res) \
31*4882a593Smuzhiyun 	arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
32*4882a593Smuzhiyun 		      cmd, val, 0, 0, 0, 0, 0, &(res))
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define ufs_mtk_va09_pwr_ctrl(res, on) \
35*4882a593Smuzhiyun 	ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define ufs_mtk_crypto_ctrl(res, enable) \
38*4882a593Smuzhiyun 	ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define ufs_mtk_ref_clk_notify(on, res) \
41*4882a593Smuzhiyun 	ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define ufs_mtk_device_reset_ctrl(high, res) \
44*4882a593Smuzhiyun 	ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
47*4882a593Smuzhiyun 	UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
48*4882a593Smuzhiyun 		UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
49*4882a593Smuzhiyun 	UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
50*4882a593Smuzhiyun 		UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
51*4882a593Smuzhiyun 	END_FIX
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun static const struct of_device_id ufs_mtk_of_match[] = {
55*4882a593Smuzhiyun 	{ .compatible = "mediatek,mt8183-ufshci" },
56*4882a593Smuzhiyun 	{},
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun 
ufs_mtk_is_boost_crypt_enabled(struct ufs_hba * hba)59*4882a593Smuzhiyun static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
ufs_mtk_is_va09_supported(struct ufs_hba * hba)66*4882a593Smuzhiyun static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
ufs_mtk_is_broken_vcc(struct ufs_hba * hba)73*4882a593Smuzhiyun static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
ufs_mtk_cfg_unipro_cg(struct ufs_hba * hba,bool enable)80*4882a593Smuzhiyun static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	u32 tmp;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	if (enable) {
85*4882a593Smuzhiyun 		ufshcd_dme_get(hba,
86*4882a593Smuzhiyun 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
87*4882a593Smuzhiyun 		tmp = tmp |
88*4882a593Smuzhiyun 		      (1 << RX_SYMBOL_CLK_GATE_EN) |
89*4882a593Smuzhiyun 		      (1 << SYS_CLK_GATE_EN) |
90*4882a593Smuzhiyun 		      (1 << TX_CLK_GATE_EN);
91*4882a593Smuzhiyun 		ufshcd_dme_set(hba,
92*4882a593Smuzhiyun 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 		ufshcd_dme_get(hba,
95*4882a593Smuzhiyun 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
96*4882a593Smuzhiyun 		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
97*4882a593Smuzhiyun 		ufshcd_dme_set(hba,
98*4882a593Smuzhiyun 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
99*4882a593Smuzhiyun 	} else {
100*4882a593Smuzhiyun 		ufshcd_dme_get(hba,
101*4882a593Smuzhiyun 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
102*4882a593Smuzhiyun 		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
103*4882a593Smuzhiyun 			      (1 << SYS_CLK_GATE_EN) |
104*4882a593Smuzhiyun 			      (1 << TX_CLK_GATE_EN));
105*4882a593Smuzhiyun 		ufshcd_dme_set(hba,
106*4882a593Smuzhiyun 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 		ufshcd_dme_get(hba,
109*4882a593Smuzhiyun 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
110*4882a593Smuzhiyun 		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
111*4882a593Smuzhiyun 		ufshcd_dme_set(hba,
112*4882a593Smuzhiyun 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
ufs_mtk_crypto_enable(struct ufs_hba * hba)116*4882a593Smuzhiyun static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	struct arm_smccc_res res;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	ufs_mtk_crypto_ctrl(res, 1);
121*4882a593Smuzhiyun 	if (res.a0) {
122*4882a593Smuzhiyun 		dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
123*4882a593Smuzhiyun 			 __func__, res.a0);
124*4882a593Smuzhiyun 		hba->caps &= ~UFSHCD_CAP_CRYPTO;
125*4882a593Smuzhiyun 	}
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
ufs_mtk_host_reset(struct ufs_hba * hba)128*4882a593Smuzhiyun static void ufs_mtk_host_reset(struct ufs_hba *hba)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	reset_control_assert(host->hci_reset);
133*4882a593Smuzhiyun 	reset_control_assert(host->crypto_reset);
134*4882a593Smuzhiyun 	reset_control_assert(host->unipro_reset);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	usleep_range(100, 110);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	reset_control_deassert(host->unipro_reset);
139*4882a593Smuzhiyun 	reset_control_deassert(host->crypto_reset);
140*4882a593Smuzhiyun 	reset_control_deassert(host->hci_reset);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
ufs_mtk_init_reset_control(struct ufs_hba * hba,struct reset_control ** rc,char * str)143*4882a593Smuzhiyun static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
144*4882a593Smuzhiyun 				       struct reset_control **rc,
145*4882a593Smuzhiyun 				       char *str)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	*rc = devm_reset_control_get(hba->dev, str);
148*4882a593Smuzhiyun 	if (IS_ERR(*rc)) {
149*4882a593Smuzhiyun 		dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
150*4882a593Smuzhiyun 			 str, PTR_ERR(*rc));
151*4882a593Smuzhiyun 		*rc = NULL;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
ufs_mtk_init_reset(struct ufs_hba * hba)155*4882a593Smuzhiyun static void ufs_mtk_init_reset(struct ufs_hba *hba)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	ufs_mtk_init_reset_control(hba, &host->hci_reset,
160*4882a593Smuzhiyun 				   "hci_rst");
161*4882a593Smuzhiyun 	ufs_mtk_init_reset_control(hba, &host->unipro_reset,
162*4882a593Smuzhiyun 				   "unipro_rst");
163*4882a593Smuzhiyun 	ufs_mtk_init_reset_control(hba, &host->crypto_reset,
164*4882a593Smuzhiyun 				   "crypto_rst");
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
ufs_mtk_hce_enable_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)167*4882a593Smuzhiyun static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
168*4882a593Smuzhiyun 				     enum ufs_notify_change_status status)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
171*4882a593Smuzhiyun 	unsigned long flags;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	if (status == PRE_CHANGE) {
174*4882a593Smuzhiyun 		if (host->unipro_lpm) {
175*4882a593Smuzhiyun 			hba->vps->hba_enable_delay_us = 0;
176*4882a593Smuzhiyun 		} else {
177*4882a593Smuzhiyun 			hba->vps->hba_enable_delay_us = 600;
178*4882a593Smuzhiyun 			ufs_mtk_host_reset(hba);
179*4882a593Smuzhiyun 		}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 		if (hba->caps & UFSHCD_CAP_CRYPTO)
182*4882a593Smuzhiyun 			ufs_mtk_crypto_enable(hba);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
185*4882a593Smuzhiyun 			spin_lock_irqsave(hba->host->host_lock, flags);
186*4882a593Smuzhiyun 			ufshcd_writel(hba, 0,
187*4882a593Smuzhiyun 				      REG_AUTO_HIBERNATE_IDLE_TIMER);
188*4882a593Smuzhiyun 			spin_unlock_irqrestore(hba->host->host_lock,
189*4882a593Smuzhiyun 					       flags);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 			hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
192*4882a593Smuzhiyun 			hba->ahit = 0;
193*4882a593Smuzhiyun 		}
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	return 0;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
ufs_mtk_bind_mphy(struct ufs_hba * hba)199*4882a593Smuzhiyun static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
202*4882a593Smuzhiyun 	struct device *dev = hba->dev;
203*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
204*4882a593Smuzhiyun 	int err = 0;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
209*4882a593Smuzhiyun 		/*
210*4882a593Smuzhiyun 		 * UFS driver might be probed before the phy driver does.
211*4882a593Smuzhiyun 		 * In that case we would like to return EPROBE_DEFER code.
212*4882a593Smuzhiyun 		 */
213*4882a593Smuzhiyun 		err = -EPROBE_DEFER;
214*4882a593Smuzhiyun 		dev_info(dev,
215*4882a593Smuzhiyun 			 "%s: required phy hasn't probed yet. err = %d\n",
216*4882a593Smuzhiyun 			__func__, err);
217*4882a593Smuzhiyun 	} else if (IS_ERR(host->mphy)) {
218*4882a593Smuzhiyun 		err = PTR_ERR(host->mphy);
219*4882a593Smuzhiyun 		if (err != -ENODEV) {
220*4882a593Smuzhiyun 			dev_info(dev, "%s: PHY get failed %d\n", __func__,
221*4882a593Smuzhiyun 				 err);
222*4882a593Smuzhiyun 		}
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	if (err)
226*4882a593Smuzhiyun 		host->mphy = NULL;
227*4882a593Smuzhiyun 	/*
228*4882a593Smuzhiyun 	 * Allow unbound mphy because not every platform needs specific
229*4882a593Smuzhiyun 	 * mphy control.
230*4882a593Smuzhiyun 	 */
231*4882a593Smuzhiyun 	if (err == -ENODEV)
232*4882a593Smuzhiyun 		err = 0;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	return err;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
ufs_mtk_setup_ref_clk(struct ufs_hba * hba,bool on)237*4882a593Smuzhiyun static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240*4882a593Smuzhiyun 	struct arm_smccc_res res;
241*4882a593Smuzhiyun 	ktime_t timeout, time_checked;
242*4882a593Smuzhiyun 	u32 value;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	if (host->ref_clk_enabled == on)
245*4882a593Smuzhiyun 		return 0;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	if (on) {
248*4882a593Smuzhiyun 		ufs_mtk_ref_clk_notify(on, res);
249*4882a593Smuzhiyun 		ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
250*4882a593Smuzhiyun 		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
251*4882a593Smuzhiyun 	} else {
252*4882a593Smuzhiyun 		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/* Wait for ack */
256*4882a593Smuzhiyun 	timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
257*4882a593Smuzhiyun 	do {
258*4882a593Smuzhiyun 		time_checked = ktime_get();
259*4882a593Smuzhiyun 		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		/* Wait until ack bit equals to req bit */
262*4882a593Smuzhiyun 		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
263*4882a593Smuzhiyun 			goto out;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 		usleep_range(100, 200);
266*4882a593Smuzhiyun 	} while (ktime_before(time_checked, timeout));
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	return -ETIMEDOUT;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun out:
275*4882a593Smuzhiyun 	host->ref_clk_enabled = on;
276*4882a593Smuzhiyun 	if (!on) {
277*4882a593Smuzhiyun 		ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
278*4882a593Smuzhiyun 		ufs_mtk_ref_clk_notify(on, res);
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	return 0;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba * hba,u16 gating_us,u16 ungating_us)284*4882a593Smuzhiyun static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
285*4882a593Smuzhiyun 					  u16 gating_us, u16 ungating_us)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (hba->dev_info.clk_gating_wait_us) {
290*4882a593Smuzhiyun 		host->ref_clk_gating_wait_us =
291*4882a593Smuzhiyun 			hba->dev_info.clk_gating_wait_us;
292*4882a593Smuzhiyun 	} else {
293*4882a593Smuzhiyun 		host->ref_clk_gating_wait_us = gating_us;
294*4882a593Smuzhiyun 	}
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	host->ref_clk_ungating_wait_us = ungating_us;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
ufs_mtk_wait_link_state(struct ufs_hba * hba,u32 state,unsigned long max_wait_ms)299*4882a593Smuzhiyun static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
300*4882a593Smuzhiyun 				   unsigned long max_wait_ms)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	ktime_t timeout, time_checked;
303*4882a593Smuzhiyun 	u32 val;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	timeout = ktime_add_ms(ktime_get(), max_wait_ms);
306*4882a593Smuzhiyun 	do {
307*4882a593Smuzhiyun 		time_checked = ktime_get();
308*4882a593Smuzhiyun 		ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
309*4882a593Smuzhiyun 		val = ufshcd_readl(hba, REG_UFS_PROBE);
310*4882a593Smuzhiyun 		val = val >> 28;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 		if (val == state)
313*4882a593Smuzhiyun 			return 0;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 		/* Sleep for max. 200us */
316*4882a593Smuzhiyun 		usleep_range(100, 200);
317*4882a593Smuzhiyun 	} while (ktime_before(time_checked, timeout));
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (val == state)
320*4882a593Smuzhiyun 		return 0;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	return -ETIMEDOUT;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
ufs_mtk_mphy_power_on(struct ufs_hba * hba,bool on)325*4882a593Smuzhiyun static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
328*4882a593Smuzhiyun 	struct phy *mphy = host->mphy;
329*4882a593Smuzhiyun 	struct arm_smccc_res res;
330*4882a593Smuzhiyun 	int ret = 0;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (!mphy || !(on ^ host->mphy_powered_on))
333*4882a593Smuzhiyun 		return 0;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	if (on) {
336*4882a593Smuzhiyun 		if (ufs_mtk_is_va09_supported(hba)) {
337*4882a593Smuzhiyun 			ret = regulator_enable(host->reg_va09);
338*4882a593Smuzhiyun 			if (ret < 0)
339*4882a593Smuzhiyun 				goto out;
340*4882a593Smuzhiyun 			/* wait 200 us to stablize VA09 */
341*4882a593Smuzhiyun 			usleep_range(200, 210);
342*4882a593Smuzhiyun 			ufs_mtk_va09_pwr_ctrl(res, 1);
343*4882a593Smuzhiyun 		}
344*4882a593Smuzhiyun 		phy_power_on(mphy);
345*4882a593Smuzhiyun 	} else {
346*4882a593Smuzhiyun 		phy_power_off(mphy);
347*4882a593Smuzhiyun 		if (ufs_mtk_is_va09_supported(hba)) {
348*4882a593Smuzhiyun 			ufs_mtk_va09_pwr_ctrl(res, 0);
349*4882a593Smuzhiyun 			ret = regulator_disable(host->reg_va09);
350*4882a593Smuzhiyun 			if (ret < 0)
351*4882a593Smuzhiyun 				goto out;
352*4882a593Smuzhiyun 		}
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun out:
355*4882a593Smuzhiyun 	if (ret) {
356*4882a593Smuzhiyun 		dev_info(hba->dev,
357*4882a593Smuzhiyun 			 "failed to %s va09: %d\n",
358*4882a593Smuzhiyun 			 on ? "enable" : "disable",
359*4882a593Smuzhiyun 			 ret);
360*4882a593Smuzhiyun 	} else {
361*4882a593Smuzhiyun 		host->mphy_powered_on = on;
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	return ret;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
ufs_mtk_get_host_clk(struct device * dev,const char * name,struct clk ** clk_out)367*4882a593Smuzhiyun static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
368*4882a593Smuzhiyun 				struct clk **clk_out)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	struct clk *clk;
371*4882a593Smuzhiyun 	int err = 0;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	clk = devm_clk_get(dev, name);
374*4882a593Smuzhiyun 	if (IS_ERR(clk))
375*4882a593Smuzhiyun 		err = PTR_ERR(clk);
376*4882a593Smuzhiyun 	else
377*4882a593Smuzhiyun 		*clk_out = clk;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	return err;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
ufs_mtk_boost_crypt(struct ufs_hba * hba,bool boost)382*4882a593Smuzhiyun static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
385*4882a593Smuzhiyun 	struct ufs_mtk_crypt_cfg *cfg;
386*4882a593Smuzhiyun 	struct regulator *reg;
387*4882a593Smuzhiyun 	int volt, ret;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	if (!ufs_mtk_is_boost_crypt_enabled(hba))
390*4882a593Smuzhiyun 		return;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	cfg = host->crypt;
393*4882a593Smuzhiyun 	volt = cfg->vcore_volt;
394*4882a593Smuzhiyun 	reg = cfg->reg_vcore;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	ret = clk_prepare_enable(cfg->clk_crypt_mux);
397*4882a593Smuzhiyun 	if (ret) {
398*4882a593Smuzhiyun 		dev_info(hba->dev, "clk_prepare_enable(): %d\n",
399*4882a593Smuzhiyun 			 ret);
400*4882a593Smuzhiyun 		return;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (boost) {
404*4882a593Smuzhiyun 		ret = regulator_set_voltage(reg, volt, INT_MAX);
405*4882a593Smuzhiyun 		if (ret) {
406*4882a593Smuzhiyun 			dev_info(hba->dev,
407*4882a593Smuzhiyun 				 "failed to set vcore to %d\n", volt);
408*4882a593Smuzhiyun 			goto out;
409*4882a593Smuzhiyun 		}
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 		ret = clk_set_parent(cfg->clk_crypt_mux,
412*4882a593Smuzhiyun 				     cfg->clk_crypt_perf);
413*4882a593Smuzhiyun 		if (ret) {
414*4882a593Smuzhiyun 			dev_info(hba->dev,
415*4882a593Smuzhiyun 				 "failed to set clk_crypt_perf\n");
416*4882a593Smuzhiyun 			regulator_set_voltage(reg, 0, INT_MAX);
417*4882a593Smuzhiyun 			goto out;
418*4882a593Smuzhiyun 		}
419*4882a593Smuzhiyun 	} else {
420*4882a593Smuzhiyun 		ret = clk_set_parent(cfg->clk_crypt_mux,
421*4882a593Smuzhiyun 				     cfg->clk_crypt_lp);
422*4882a593Smuzhiyun 		if (ret) {
423*4882a593Smuzhiyun 			dev_info(hba->dev,
424*4882a593Smuzhiyun 				 "failed to set clk_crypt_lp\n");
425*4882a593Smuzhiyun 			goto out;
426*4882a593Smuzhiyun 		}
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 		ret = regulator_set_voltage(reg, 0, INT_MAX);
429*4882a593Smuzhiyun 		if (ret) {
430*4882a593Smuzhiyun 			dev_info(hba->dev,
431*4882a593Smuzhiyun 				 "failed to set vcore to MIN\n");
432*4882a593Smuzhiyun 		}
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun out:
435*4882a593Smuzhiyun 	clk_disable_unprepare(cfg->clk_crypt_mux);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun 
ufs_mtk_init_host_clk(struct ufs_hba * hba,const char * name,struct clk ** clk)438*4882a593Smuzhiyun static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
439*4882a593Smuzhiyun 				 struct clk **clk)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	int ret;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
444*4882a593Smuzhiyun 	if (ret) {
445*4882a593Smuzhiyun 		dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
446*4882a593Smuzhiyun 			 name, ret);
447*4882a593Smuzhiyun 	}
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	return ret;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
ufs_mtk_init_boost_crypt(struct ufs_hba * hba)452*4882a593Smuzhiyun static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
455*4882a593Smuzhiyun 	struct ufs_mtk_crypt_cfg *cfg;
456*4882a593Smuzhiyun 	struct device *dev = hba->dev;
457*4882a593Smuzhiyun 	struct regulator *reg;
458*4882a593Smuzhiyun 	u32 volt;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
461*4882a593Smuzhiyun 				   GFP_KERNEL);
462*4882a593Smuzhiyun 	if (!host->crypt)
463*4882a593Smuzhiyun 		goto disable_caps;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
466*4882a593Smuzhiyun 	if (IS_ERR(reg)) {
467*4882a593Smuzhiyun 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
468*4882a593Smuzhiyun 			 PTR_ERR(reg));
469*4882a593Smuzhiyun 		goto disable_caps;
470*4882a593Smuzhiyun 	}
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
473*4882a593Smuzhiyun 				 &volt)) {
474*4882a593Smuzhiyun 		dev_info(dev, "failed to get boost-crypt-vcore-min");
475*4882a593Smuzhiyun 		goto disable_caps;
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	cfg = host->crypt;
479*4882a593Smuzhiyun 	if (ufs_mtk_init_host_clk(hba, "crypt_mux",
480*4882a593Smuzhiyun 				  &cfg->clk_crypt_mux))
481*4882a593Smuzhiyun 		goto disable_caps;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	if (ufs_mtk_init_host_clk(hba, "crypt_lp",
484*4882a593Smuzhiyun 				  &cfg->clk_crypt_lp))
485*4882a593Smuzhiyun 		goto disable_caps;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	if (ufs_mtk_init_host_clk(hba, "crypt_perf",
488*4882a593Smuzhiyun 				  &cfg->clk_crypt_perf))
489*4882a593Smuzhiyun 		goto disable_caps;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	cfg->reg_vcore = reg;
492*4882a593Smuzhiyun 	cfg->vcore_volt = volt;
493*4882a593Smuzhiyun 	host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun disable_caps:
496*4882a593Smuzhiyun 	return;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba * hba)499*4882a593Smuzhiyun static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	host->reg_va09 = regulator_get(hba->dev, "va09");
504*4882a593Smuzhiyun 	if (IS_ERR(host->reg_va09))
505*4882a593Smuzhiyun 		dev_info(hba->dev, "failed to get va09");
506*4882a593Smuzhiyun 	else
507*4882a593Smuzhiyun 		host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun 
ufs_mtk_init_host_caps(struct ufs_hba * hba)510*4882a593Smuzhiyun static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
513*4882a593Smuzhiyun 	struct device_node *np = hba->dev->of_node;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
516*4882a593Smuzhiyun 		ufs_mtk_init_boost_crypt(hba);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
519*4882a593Smuzhiyun 		ufs_mtk_init_va09_pwr_ctrl(hba);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
522*4882a593Smuzhiyun 		host->caps |= UFS_MTK_CAP_DISABLE_AH8;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
525*4882a593Smuzhiyun 		host->caps |= UFS_MTK_CAP_BROKEN_VCC;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	dev_info(hba->dev, "caps: 0x%x", host->caps);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun /**
531*4882a593Smuzhiyun  * ufs_mtk_setup_clocks - enables/disable clocks
532*4882a593Smuzhiyun  * @hba: host controller instance
533*4882a593Smuzhiyun  * @on: If true, enable clocks else disable them.
534*4882a593Smuzhiyun  * @status: PRE_CHANGE or POST_CHANGE notify
535*4882a593Smuzhiyun  *
536*4882a593Smuzhiyun  * Returns 0 on success, non-zero on failure.
537*4882a593Smuzhiyun  */
ufs_mtk_setup_clocks(struct ufs_hba * hba,bool on,enum ufs_notify_change_status status)538*4882a593Smuzhiyun static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
539*4882a593Smuzhiyun 				enum ufs_notify_change_status status)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
542*4882a593Smuzhiyun 	bool clk_pwr_off = false;
543*4882a593Smuzhiyun 	int ret = 0;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	/*
546*4882a593Smuzhiyun 	 * In case ufs_mtk_init() is not yet done, simply ignore.
547*4882a593Smuzhiyun 	 * This ufs_mtk_setup_clocks() shall be called from
548*4882a593Smuzhiyun 	 * ufs_mtk_init() after init is done.
549*4882a593Smuzhiyun 	 */
550*4882a593Smuzhiyun 	if (!host)
551*4882a593Smuzhiyun 		return 0;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	if (!on && status == PRE_CHANGE) {
554*4882a593Smuzhiyun 		if (ufshcd_is_link_off(hba)) {
555*4882a593Smuzhiyun 			clk_pwr_off = true;
556*4882a593Smuzhiyun 		} else if (ufshcd_is_link_hibern8(hba) ||
557*4882a593Smuzhiyun 			 (!ufshcd_can_hibern8_during_gating(hba) &&
558*4882a593Smuzhiyun 			 ufshcd_is_auto_hibern8_enabled(hba))) {
559*4882a593Smuzhiyun 			/*
560*4882a593Smuzhiyun 			 * Gate ref-clk and poweroff mphy if link state is in
561*4882a593Smuzhiyun 			 * OFF or Hibern8 by either Auto-Hibern8 or
562*4882a593Smuzhiyun 			 * ufshcd_link_state_transition().
563*4882a593Smuzhiyun 			 */
564*4882a593Smuzhiyun 			ret = ufs_mtk_wait_link_state(hba,
565*4882a593Smuzhiyun 						      VS_LINK_HIBERN8,
566*4882a593Smuzhiyun 						      15);
567*4882a593Smuzhiyun 			if (!ret)
568*4882a593Smuzhiyun 				clk_pwr_off = true;
569*4882a593Smuzhiyun 		}
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 		if (clk_pwr_off) {
572*4882a593Smuzhiyun 			ufs_mtk_boost_crypt(hba, on);
573*4882a593Smuzhiyun 			ufs_mtk_setup_ref_clk(hba, on);
574*4882a593Smuzhiyun 			phy_power_off(host->mphy);
575*4882a593Smuzhiyun 		}
576*4882a593Smuzhiyun 	} else if (on && status == POST_CHANGE) {
577*4882a593Smuzhiyun 		phy_power_on(host->mphy);
578*4882a593Smuzhiyun 		ufs_mtk_setup_ref_clk(hba, on);
579*4882a593Smuzhiyun 		ufs_mtk_boost_crypt(hba, on);
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	return ret;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
ufs_mtk_get_controller_version(struct ufs_hba * hba)585*4882a593Smuzhiyun static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
588*4882a593Smuzhiyun 	int ret, ver = 0;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	if (host->hw_ver.major)
591*4882a593Smuzhiyun 		return;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	/* Set default (minimum) version anyway */
594*4882a593Smuzhiyun 	host->hw_ver.major = 2;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
597*4882a593Smuzhiyun 	if (!ret) {
598*4882a593Smuzhiyun 		if (ver >= UFS_UNIPRO_VER_1_8) {
599*4882a593Smuzhiyun 			host->hw_ver.major = 3;
600*4882a593Smuzhiyun 			/*
601*4882a593Smuzhiyun 			 * Fix HCI version for some platforms with
602*4882a593Smuzhiyun 			 * incorrect version
603*4882a593Smuzhiyun 			 */
604*4882a593Smuzhiyun 			if (hba->ufs_version < ufshci_version(3, 0))
605*4882a593Smuzhiyun 				hba->ufs_version = ufshci_version(3, 0);
606*4882a593Smuzhiyun 		}
607*4882a593Smuzhiyun 	}
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun 
ufs_mtk_get_ufs_hci_version(struct ufs_hba * hba)610*4882a593Smuzhiyun static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun 	return hba->ufs_version;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun /**
616*4882a593Smuzhiyun  * ufs_mtk_init - find other essential mmio bases
617*4882a593Smuzhiyun  * @hba: host controller instance
618*4882a593Smuzhiyun  *
619*4882a593Smuzhiyun  * Binds PHY with controller and powers up PHY enabling clocks
620*4882a593Smuzhiyun  * and regulators.
621*4882a593Smuzhiyun  *
622*4882a593Smuzhiyun  * Returns -EPROBE_DEFER if binding fails, returns negative error
623*4882a593Smuzhiyun  * on phy power up failure and returns zero on success.
624*4882a593Smuzhiyun  */
ufs_mtk_init(struct ufs_hba * hba)625*4882a593Smuzhiyun static int ufs_mtk_init(struct ufs_hba *hba)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun 	const struct of_device_id *id;
628*4882a593Smuzhiyun 	struct device *dev = hba->dev;
629*4882a593Smuzhiyun 	struct ufs_mtk_host *host;
630*4882a593Smuzhiyun 	int err = 0;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
633*4882a593Smuzhiyun 	if (!host) {
634*4882a593Smuzhiyun 		err = -ENOMEM;
635*4882a593Smuzhiyun 		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
636*4882a593Smuzhiyun 		goto out;
637*4882a593Smuzhiyun 	}
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	host->hba = hba;
640*4882a593Smuzhiyun 	ufshcd_set_variant(hba, host);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	id = of_match_device(ufs_mtk_of_match, dev);
643*4882a593Smuzhiyun 	if (!id) {
644*4882a593Smuzhiyun 		err = -EINVAL;
645*4882a593Smuzhiyun 		goto out;
646*4882a593Smuzhiyun 	}
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	/* Initialize host capability */
649*4882a593Smuzhiyun 	ufs_mtk_init_host_caps(hba);
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	err = ufs_mtk_bind_mphy(hba);
652*4882a593Smuzhiyun 	if (err)
653*4882a593Smuzhiyun 		goto out_variant_clear;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	ufs_mtk_init_reset(hba);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	/* Enable runtime autosuspend */
658*4882a593Smuzhiyun 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	/* Enable clock-gating */
661*4882a593Smuzhiyun 	hba->caps |= UFSHCD_CAP_CLK_GATING;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	/* Enable inline encryption */
664*4882a593Smuzhiyun 	hba->caps |= UFSHCD_CAP_CRYPTO;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	/* Enable WriteBooster */
667*4882a593Smuzhiyun 	hba->caps |= UFSHCD_CAP_WB_EN;
668*4882a593Smuzhiyun 	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
669*4882a593Smuzhiyun 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
672*4882a593Smuzhiyun 		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	/*
675*4882a593Smuzhiyun 	 * ufshcd_vops_init() is invoked after
676*4882a593Smuzhiyun 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
677*4882a593Smuzhiyun 	 * phy clock setup is skipped.
678*4882a593Smuzhiyun 	 *
679*4882a593Smuzhiyun 	 * Enable phy clocks specifically here.
680*4882a593Smuzhiyun 	 */
681*4882a593Smuzhiyun 	ufs_mtk_mphy_power_on(hba, true);
682*4882a593Smuzhiyun 	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	goto out;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun out_variant_clear:
687*4882a593Smuzhiyun 	ufshcd_set_variant(hba, NULL);
688*4882a593Smuzhiyun out:
689*4882a593Smuzhiyun 	return err;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun 
ufs_mtk_pre_pwr_change(struct ufs_hba * hba,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)692*4882a593Smuzhiyun static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
693*4882a593Smuzhiyun 				  struct ufs_pa_layer_attr *dev_max_params,
694*4882a593Smuzhiyun 				  struct ufs_pa_layer_attr *dev_req_params)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
697*4882a593Smuzhiyun 	struct ufs_dev_params host_cap;
698*4882a593Smuzhiyun 	u32 adapt_val;
699*4882a593Smuzhiyun 	int ret;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX;
702*4882a593Smuzhiyun 	host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX;
703*4882a593Smuzhiyun 	host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX;
704*4882a593Smuzhiyun 	host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX;
705*4882a593Smuzhiyun 	host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX;
706*4882a593Smuzhiyun 	host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX;
707*4882a593Smuzhiyun 	host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM;
708*4882a593Smuzhiyun 	host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM;
709*4882a593Smuzhiyun 	host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS;
710*4882a593Smuzhiyun 	host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS;
711*4882a593Smuzhiyun 	host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE;
712*4882a593Smuzhiyun 	host_cap.desired_working_mode =
713*4882a593Smuzhiyun 				UFS_MTK_LIMIT_DESIRED_MODE;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	ret = ufshcd_get_pwr_dev_param(&host_cap,
716*4882a593Smuzhiyun 				       dev_max_params,
717*4882a593Smuzhiyun 				       dev_req_params);
718*4882a593Smuzhiyun 	if (ret) {
719*4882a593Smuzhiyun 		pr_info("%s: failed to determine capabilities\n",
720*4882a593Smuzhiyun 			__func__);
721*4882a593Smuzhiyun 	}
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	if (host->hw_ver.major >= 3) {
724*4882a593Smuzhiyun 		if (dev_req_params->gear_tx == UFS_HS_G4)
725*4882a593Smuzhiyun 			adapt_val = PA_INITIAL_ADAPT;
726*4882a593Smuzhiyun 		else
727*4882a593Smuzhiyun 			adapt_val = PA_NO_ADAPT;
728*4882a593Smuzhiyun 		ufshcd_dme_set(hba,
729*4882a593Smuzhiyun 			       UIC_ARG_MIB(PA_TXHSADAPTTYPE),
730*4882a593Smuzhiyun 			       adapt_val);
731*4882a593Smuzhiyun 	}
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	return ret;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun 
ufs_mtk_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)736*4882a593Smuzhiyun static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
737*4882a593Smuzhiyun 				     enum ufs_notify_change_status stage,
738*4882a593Smuzhiyun 				     struct ufs_pa_layer_attr *dev_max_params,
739*4882a593Smuzhiyun 				     struct ufs_pa_layer_attr *dev_req_params)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun 	int ret = 0;
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	switch (stage) {
744*4882a593Smuzhiyun 	case PRE_CHANGE:
745*4882a593Smuzhiyun 		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
746*4882a593Smuzhiyun 					     dev_req_params);
747*4882a593Smuzhiyun 		break;
748*4882a593Smuzhiyun 	case POST_CHANGE:
749*4882a593Smuzhiyun 		break;
750*4882a593Smuzhiyun 	default:
751*4882a593Smuzhiyun 		ret = -EINVAL;
752*4882a593Smuzhiyun 		break;
753*4882a593Smuzhiyun 	}
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	return ret;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun 
ufs_mtk_unipro_set_pm(struct ufs_hba * hba,bool lpm)758*4882a593Smuzhiyun static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, bool lpm)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun 	int ret;
761*4882a593Smuzhiyun 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	ret = ufshcd_dme_set(hba,
764*4882a593Smuzhiyun 			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
765*4882a593Smuzhiyun 			     lpm);
766*4882a593Smuzhiyun 	if (!ret || !lpm) {
767*4882a593Smuzhiyun 		/*
768*4882a593Smuzhiyun 		 * Forcibly set as non-LPM mode if UIC commands is failed
769*4882a593Smuzhiyun 		 * to use default hba_enable_delay_us value for re-enabling
770*4882a593Smuzhiyun 		 * the host.
771*4882a593Smuzhiyun 		 */
772*4882a593Smuzhiyun 		host->unipro_lpm = lpm;
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	return ret;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun 
ufs_mtk_pre_link(struct ufs_hba * hba)778*4882a593Smuzhiyun static int ufs_mtk_pre_link(struct ufs_hba *hba)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun 	int ret;
781*4882a593Smuzhiyun 	u32 tmp;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	ufs_mtk_get_controller_version(hba);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	ret = ufs_mtk_unipro_set_pm(hba, false);
786*4882a593Smuzhiyun 	if (ret)
787*4882a593Smuzhiyun 		return ret;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	/*
790*4882a593Smuzhiyun 	 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
791*4882a593Smuzhiyun 	 * to make sure that both host and device TX LCC are disabled
792*4882a593Smuzhiyun 	 * once link startup is completed.
793*4882a593Smuzhiyun 	 */
794*4882a593Smuzhiyun 	ret = ufshcd_disable_host_tx_lcc(hba);
795*4882a593Smuzhiyun 	if (ret)
796*4882a593Smuzhiyun 		return ret;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	/* disable deep stall */
799*4882a593Smuzhiyun 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
800*4882a593Smuzhiyun 	if (ret)
801*4882a593Smuzhiyun 		return ret;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	tmp &= ~(1 << 6);
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	return ret;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun 
ufs_mtk_setup_clk_gating(struct ufs_hba * hba)810*4882a593Smuzhiyun static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun 	unsigned long flags;
813*4882a593Smuzhiyun 	u32 ah_ms;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	if (ufshcd_is_clkgating_allowed(hba)) {
816*4882a593Smuzhiyun 		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
817*4882a593Smuzhiyun 			ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
818*4882a593Smuzhiyun 					  hba->ahit);
819*4882a593Smuzhiyun 		else
820*4882a593Smuzhiyun 			ah_ms = 10;
821*4882a593Smuzhiyun 		spin_lock_irqsave(hba->host->host_lock, flags);
822*4882a593Smuzhiyun 		hba->clk_gating.delay_ms = ah_ms + 5;
823*4882a593Smuzhiyun 		spin_unlock_irqrestore(hba->host->host_lock, flags);
824*4882a593Smuzhiyun 	}
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun 
ufs_mtk_post_link(struct ufs_hba * hba)827*4882a593Smuzhiyun static int ufs_mtk_post_link(struct ufs_hba *hba)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun 	/* enable unipro clock gating feature */
830*4882a593Smuzhiyun 	ufs_mtk_cfg_unipro_cg(hba, true);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	/* configure auto-hibern8 timer to 10ms */
833*4882a593Smuzhiyun 	if (ufshcd_is_auto_hibern8_supported(hba)) {
834*4882a593Smuzhiyun 		ufshcd_auto_hibern8_update(hba,
835*4882a593Smuzhiyun 			FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
836*4882a593Smuzhiyun 			FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
837*4882a593Smuzhiyun 	}
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	ufs_mtk_setup_clk_gating(hba);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	return 0;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun 
ufs_mtk_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage)844*4882a593Smuzhiyun static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
845*4882a593Smuzhiyun 				       enum ufs_notify_change_status stage)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun 	int ret = 0;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	switch (stage) {
850*4882a593Smuzhiyun 	case PRE_CHANGE:
851*4882a593Smuzhiyun 		ret = ufs_mtk_pre_link(hba);
852*4882a593Smuzhiyun 		break;
853*4882a593Smuzhiyun 	case POST_CHANGE:
854*4882a593Smuzhiyun 		ret = ufs_mtk_post_link(hba);
855*4882a593Smuzhiyun 		break;
856*4882a593Smuzhiyun 	default:
857*4882a593Smuzhiyun 		ret = -EINVAL;
858*4882a593Smuzhiyun 		break;
859*4882a593Smuzhiyun 	}
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	return ret;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun 
ufs_mtk_device_reset(struct ufs_hba * hba)864*4882a593Smuzhiyun static int ufs_mtk_device_reset(struct ufs_hba *hba)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun 	struct arm_smccc_res res;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	/* disable hba before device reset */
869*4882a593Smuzhiyun 	ufshcd_hba_stop(hba);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	ufs_mtk_device_reset_ctrl(0, res);
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	/*
874*4882a593Smuzhiyun 	 * The reset signal is active low. UFS devices shall detect
875*4882a593Smuzhiyun 	 * more than or equal to 1us of positive or negative RST_n
876*4882a593Smuzhiyun 	 * pulse width.
877*4882a593Smuzhiyun 	 *
878*4882a593Smuzhiyun 	 * To be on safe side, keep the reset low for at least 10us.
879*4882a593Smuzhiyun 	 */
880*4882a593Smuzhiyun 	usleep_range(10, 15);
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	ufs_mtk_device_reset_ctrl(1, res);
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	/* Some devices may need time to respond to rst_n */
885*4882a593Smuzhiyun 	usleep_range(10000, 15000);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	dev_info(hba->dev, "device reset done\n");
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	return 0;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun 
ufs_mtk_link_set_hpm(struct ufs_hba * hba)892*4882a593Smuzhiyun static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun 	int err;
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	err = ufshcd_hba_enable(hba);
897*4882a593Smuzhiyun 	if (err)
898*4882a593Smuzhiyun 		return err;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	err = ufs_mtk_unipro_set_pm(hba, false);
901*4882a593Smuzhiyun 	if (err)
902*4882a593Smuzhiyun 		return err;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	err = ufshcd_uic_hibern8_exit(hba);
905*4882a593Smuzhiyun 	if (!err)
906*4882a593Smuzhiyun 		ufshcd_set_link_active(hba);
907*4882a593Smuzhiyun 	else
908*4882a593Smuzhiyun 		return err;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	err = ufshcd_make_hba_operational(hba);
911*4882a593Smuzhiyun 	if (err)
912*4882a593Smuzhiyun 		return err;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	return 0;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun 
ufs_mtk_link_set_lpm(struct ufs_hba * hba)917*4882a593Smuzhiyun static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun 	int err;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	err = ufs_mtk_unipro_set_pm(hba, true);
922*4882a593Smuzhiyun 	if (err) {
923*4882a593Smuzhiyun 		/* Resume UniPro state for following error recovery */
924*4882a593Smuzhiyun 		ufs_mtk_unipro_set_pm(hba, false);
925*4882a593Smuzhiyun 		return err;
926*4882a593Smuzhiyun 	}
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	return 0;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun 
ufs_mtk_vreg_set_lpm(struct ufs_hba * hba,bool lpm)931*4882a593Smuzhiyun static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun 	if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
934*4882a593Smuzhiyun 		return;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	if (lpm && !hba->vreg_info.vcc->enabled)
937*4882a593Smuzhiyun 		regulator_set_mode(hba->vreg_info.vccq2->reg,
938*4882a593Smuzhiyun 				   REGULATOR_MODE_IDLE);
939*4882a593Smuzhiyun 	else if (!lpm)
940*4882a593Smuzhiyun 		regulator_set_mode(hba->vreg_info.vccq2->reg,
941*4882a593Smuzhiyun 				   REGULATOR_MODE_NORMAL);
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun 
ufs_mtk_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op)944*4882a593Smuzhiyun static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun 	int err;
947*4882a593Smuzhiyun 	struct arm_smccc_res res;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	if (ufshcd_is_link_hibern8(hba)) {
950*4882a593Smuzhiyun 		err = ufs_mtk_link_set_lpm(hba);
951*4882a593Smuzhiyun 		if (err)
952*4882a593Smuzhiyun 			goto fail;
953*4882a593Smuzhiyun 	}
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	if (!ufshcd_is_link_active(hba)) {
956*4882a593Smuzhiyun 		/*
957*4882a593Smuzhiyun 		 * Make sure no error will be returned to prevent
958*4882a593Smuzhiyun 		 * ufshcd_suspend() re-enabling regulators while vreg is still
959*4882a593Smuzhiyun 		 * in low-power mode.
960*4882a593Smuzhiyun 		 */
961*4882a593Smuzhiyun 		ufs_mtk_vreg_set_lpm(hba, true);
962*4882a593Smuzhiyun 		err = ufs_mtk_mphy_power_on(hba, false);
963*4882a593Smuzhiyun 		if (err)
964*4882a593Smuzhiyun 			goto fail;
965*4882a593Smuzhiyun 	}
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	if (ufshcd_is_link_off(hba))
968*4882a593Smuzhiyun 		ufs_mtk_device_reset_ctrl(0, res);
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	return 0;
971*4882a593Smuzhiyun fail:
972*4882a593Smuzhiyun 	/*
973*4882a593Smuzhiyun 	 * Set link as off state enforcedly to trigger
974*4882a593Smuzhiyun 	 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
975*4882a593Smuzhiyun 	 * for completed host reset.
976*4882a593Smuzhiyun 	 */
977*4882a593Smuzhiyun 	ufshcd_set_link_off(hba);
978*4882a593Smuzhiyun 	return -EAGAIN;
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun 
ufs_mtk_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)981*4882a593Smuzhiyun static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun 	int err;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	err = ufs_mtk_mphy_power_on(hba, true);
986*4882a593Smuzhiyun 	if (err)
987*4882a593Smuzhiyun 		goto fail;
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	ufs_mtk_vreg_set_lpm(hba, false);
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	if (ufshcd_is_link_hibern8(hba)) {
992*4882a593Smuzhiyun 		err = ufs_mtk_link_set_hpm(hba);
993*4882a593Smuzhiyun 		if (err)
994*4882a593Smuzhiyun 			goto fail;
995*4882a593Smuzhiyun 	}
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	return 0;
998*4882a593Smuzhiyun fail:
999*4882a593Smuzhiyun 	return ufshcd_link_recovery(hba);
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun 
ufs_mtk_dbg_register_dump(struct ufs_hba * hba)1002*4882a593Smuzhiyun static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun 	ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1009*4882a593Smuzhiyun 			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1010*4882a593Smuzhiyun 			 "MPHY Ctrl ");
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	/* Direct debugging information to REG_MTK_PROBE */
1013*4882a593Smuzhiyun 	ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
1014*4882a593Smuzhiyun 	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun 
ufs_mtk_apply_dev_quirks(struct ufs_hba * hba)1017*4882a593Smuzhiyun static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1018*4882a593Smuzhiyun {
1019*4882a593Smuzhiyun 	struct ufs_dev_info *dev_info = &hba->dev_info;
1020*4882a593Smuzhiyun 	u16 mid = dev_info->wmanufacturerid;
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	if (mid == UFS_VENDOR_SAMSUNG)
1023*4882a593Smuzhiyun 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	/*
1026*4882a593Smuzhiyun 	 * Decide waiting time before gating reference clock and
1027*4882a593Smuzhiyun 	 * after ungating reference clock according to vendors'
1028*4882a593Smuzhiyun 	 * requirements.
1029*4882a593Smuzhiyun 	 */
1030*4882a593Smuzhiyun 	if (mid == UFS_VENDOR_SAMSUNG)
1031*4882a593Smuzhiyun 		ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
1032*4882a593Smuzhiyun 	else if (mid == UFS_VENDOR_SKHYNIX)
1033*4882a593Smuzhiyun 		ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
1034*4882a593Smuzhiyun 	else if (mid == UFS_VENDOR_TOSHIBA)
1035*4882a593Smuzhiyun 		ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	return 0;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun 
ufs_mtk_fixup_dev_quirks(struct ufs_hba * hba)1040*4882a593Smuzhiyun static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1045*4882a593Smuzhiyun 	    (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1046*4882a593Smuzhiyun 		hba->vreg_info.vcc->always_on = true;
1047*4882a593Smuzhiyun 		/*
1048*4882a593Smuzhiyun 		 * VCC will be kept always-on thus we don't
1049*4882a593Smuzhiyun 		 * need any delay during regulator operations
1050*4882a593Smuzhiyun 		 */
1051*4882a593Smuzhiyun 		hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1052*4882a593Smuzhiyun 			UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1053*4882a593Smuzhiyun 	}
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun 
ufs_mtk_event_notify(struct ufs_hba * hba,enum ufs_event_type evt,void * data)1056*4882a593Smuzhiyun static void ufs_mtk_event_notify(struct ufs_hba *hba,
1057*4882a593Smuzhiyun 				 enum ufs_event_type evt, void *data)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun 	unsigned int val = *(u32 *)data;
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	trace_ufs_mtk_event(evt, val);
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun /*
1065*4882a593Smuzhiyun  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1066*4882a593Smuzhiyun  *
1067*4882a593Smuzhiyun  * The variant operations configure the necessary controller and PHY
1068*4882a593Smuzhiyun  * handshake during initialization.
1069*4882a593Smuzhiyun  */
1070*4882a593Smuzhiyun static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1071*4882a593Smuzhiyun 	.name                = "mediatek.ufshci",
1072*4882a593Smuzhiyun 	.init                = ufs_mtk_init,
1073*4882a593Smuzhiyun 	.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1074*4882a593Smuzhiyun 	.setup_clocks        = ufs_mtk_setup_clocks,
1075*4882a593Smuzhiyun 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
1076*4882a593Smuzhiyun 	.link_startup_notify = ufs_mtk_link_startup_notify,
1077*4882a593Smuzhiyun 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
1078*4882a593Smuzhiyun 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1079*4882a593Smuzhiyun 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1080*4882a593Smuzhiyun 	.suspend             = ufs_mtk_suspend,
1081*4882a593Smuzhiyun 	.resume              = ufs_mtk_resume,
1082*4882a593Smuzhiyun 	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
1083*4882a593Smuzhiyun 	.device_reset        = ufs_mtk_device_reset,
1084*4882a593Smuzhiyun 	.event_notify        = ufs_mtk_event_notify,
1085*4882a593Smuzhiyun };
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun /**
1088*4882a593Smuzhiyun  * ufs_mtk_probe - probe routine of the driver
1089*4882a593Smuzhiyun  * @pdev: pointer to Platform device handle
1090*4882a593Smuzhiyun  *
1091*4882a593Smuzhiyun  * Return zero for success and non-zero for failure
1092*4882a593Smuzhiyun  */
ufs_mtk_probe(struct platform_device * pdev)1093*4882a593Smuzhiyun static int ufs_mtk_probe(struct platform_device *pdev)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun 	int err;
1096*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
1097*4882a593Smuzhiyun 	struct device_node *reset_node;
1098*4882a593Smuzhiyun 	struct platform_device *reset_pdev;
1099*4882a593Smuzhiyun 	struct device_link *link;
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	reset_node = of_find_compatible_node(NULL, NULL,
1102*4882a593Smuzhiyun 					     "ti,syscon-reset");
1103*4882a593Smuzhiyun 	if (!reset_node) {
1104*4882a593Smuzhiyun 		dev_notice(dev, "find ti,syscon-reset fail\n");
1105*4882a593Smuzhiyun 		goto skip_reset;
1106*4882a593Smuzhiyun 	}
1107*4882a593Smuzhiyun 	reset_pdev = of_find_device_by_node(reset_node);
1108*4882a593Smuzhiyun 	if (!reset_pdev) {
1109*4882a593Smuzhiyun 		dev_notice(dev, "find reset_pdev fail\n");
1110*4882a593Smuzhiyun 		goto skip_reset;
1111*4882a593Smuzhiyun 	}
1112*4882a593Smuzhiyun 	link = device_link_add(dev, &reset_pdev->dev,
1113*4882a593Smuzhiyun 		DL_FLAG_AUTOPROBE_CONSUMER);
1114*4882a593Smuzhiyun 	if (!link) {
1115*4882a593Smuzhiyun 		dev_notice(dev, "add reset device_link fail\n");
1116*4882a593Smuzhiyun 		goto skip_reset;
1117*4882a593Smuzhiyun 	}
1118*4882a593Smuzhiyun 	/* supplier is not probed */
1119*4882a593Smuzhiyun 	if (link->status == DL_STATE_DORMANT) {
1120*4882a593Smuzhiyun 		err = -EPROBE_DEFER;
1121*4882a593Smuzhiyun 		goto out;
1122*4882a593Smuzhiyun 	}
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun skip_reset:
1125*4882a593Smuzhiyun 	/* perform generic probe */
1126*4882a593Smuzhiyun 	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun out:
1129*4882a593Smuzhiyun 	if (err)
1130*4882a593Smuzhiyun 		dev_info(dev, "probe failed %d\n", err);
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	of_node_put(reset_node);
1133*4882a593Smuzhiyun 	return err;
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun /**
1137*4882a593Smuzhiyun  * ufs_mtk_remove - set driver_data of the device to NULL
1138*4882a593Smuzhiyun  * @pdev: pointer to platform device handle
1139*4882a593Smuzhiyun  *
1140*4882a593Smuzhiyun  * Always return 0
1141*4882a593Smuzhiyun  */
ufs_mtk_remove(struct platform_device * pdev)1142*4882a593Smuzhiyun static int ufs_mtk_remove(struct platform_device *pdev)
1143*4882a593Smuzhiyun {
1144*4882a593Smuzhiyun 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	pm_runtime_get_sync(&(pdev)->dev);
1147*4882a593Smuzhiyun 	ufshcd_remove(hba);
1148*4882a593Smuzhiyun 	return 0;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun static const struct dev_pm_ops ufs_mtk_pm_ops = {
1152*4882a593Smuzhiyun 	.suspend         = ufshcd_pltfrm_suspend,
1153*4882a593Smuzhiyun 	.resume          = ufshcd_pltfrm_resume,
1154*4882a593Smuzhiyun 	.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1155*4882a593Smuzhiyun 	.runtime_resume  = ufshcd_pltfrm_runtime_resume,
1156*4882a593Smuzhiyun 	.runtime_idle    = ufshcd_pltfrm_runtime_idle,
1157*4882a593Smuzhiyun };
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun static struct platform_driver ufs_mtk_pltform = {
1160*4882a593Smuzhiyun 	.probe      = ufs_mtk_probe,
1161*4882a593Smuzhiyun 	.remove     = ufs_mtk_remove,
1162*4882a593Smuzhiyun 	.shutdown   = ufshcd_pltfrm_shutdown,
1163*4882a593Smuzhiyun 	.driver = {
1164*4882a593Smuzhiyun 		.name   = "ufshcd-mtk",
1165*4882a593Smuzhiyun 		.pm     = &ufs_mtk_pm_ops,
1166*4882a593Smuzhiyun 		.of_match_table = ufs_mtk_of_match,
1167*4882a593Smuzhiyun 	},
1168*4882a593Smuzhiyun };
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1171*4882a593Smuzhiyun MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1172*4882a593Smuzhiyun MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1173*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun module_platform_driver(ufs_mtk_pltform);
1176