xref: /OK3568_Linux_fs/kernel/drivers/scsi/ufs/ufshcd-pltfrm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Universal Flash Storage Host controller Platform bus based glue driver
4*4882a593Smuzhiyun  * Copyright (C) 2011-2013 Samsung India Software Operations
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Authors:
7*4882a593Smuzhiyun  *	Santosh Yaraganavi <santosh.sy@samsung.com>
8*4882a593Smuzhiyun  *	Vinayak Holikatti <h.vinayak@samsung.com>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/platform_device.h>
12*4882a593Smuzhiyun #include <linux/pm_runtime.h>
13*4882a593Smuzhiyun #include <linux/of.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include "ufshcd.h"
16*4882a593Smuzhiyun #include "ufshcd-pltfrm.h"
17*4882a593Smuzhiyun #include "unipro.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define UFSHCD_DEFAULT_LANES_PER_DIRECTION		2
20*4882a593Smuzhiyun 
ufshcd_parse_clock_info(struct ufs_hba * hba)21*4882a593Smuzhiyun static int ufshcd_parse_clock_info(struct ufs_hba *hba)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	int ret = 0;
24*4882a593Smuzhiyun 	int cnt;
25*4882a593Smuzhiyun 	int i;
26*4882a593Smuzhiyun 	struct device *dev = hba->dev;
27*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
28*4882a593Smuzhiyun 	char *name;
29*4882a593Smuzhiyun 	u32 *clkfreq = NULL;
30*4882a593Smuzhiyun 	struct ufs_clk_info *clki;
31*4882a593Smuzhiyun 	int len = 0;
32*4882a593Smuzhiyun 	size_t sz = 0;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	if (!np)
35*4882a593Smuzhiyun 		goto out;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	cnt = of_property_count_strings(np, "clock-names");
38*4882a593Smuzhiyun 	if (!cnt || (cnt == -EINVAL)) {
39*4882a593Smuzhiyun 		dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
40*4882a593Smuzhiyun 				__func__);
41*4882a593Smuzhiyun 	} else if (cnt < 0) {
42*4882a593Smuzhiyun 		dev_err(dev, "%s: count clock strings failed, err %d\n",
43*4882a593Smuzhiyun 				__func__, cnt);
44*4882a593Smuzhiyun 		ret = cnt;
45*4882a593Smuzhiyun 	}
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	if (cnt <= 0)
48*4882a593Smuzhiyun 		goto out;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	if (!of_get_property(np, "freq-table-hz", &len)) {
51*4882a593Smuzhiyun 		dev_info(dev, "freq-table-hz property not specified\n");
52*4882a593Smuzhiyun 		goto out;
53*4882a593Smuzhiyun 	}
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	if (len <= 0)
56*4882a593Smuzhiyun 		goto out;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	sz = len / sizeof(*clkfreq);
59*4882a593Smuzhiyun 	if (sz != 2 * cnt) {
60*4882a593Smuzhiyun 		dev_err(dev, "%s len mismatch\n", "freq-table-hz");
61*4882a593Smuzhiyun 		ret = -EINVAL;
62*4882a593Smuzhiyun 		goto out;
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	clkfreq = devm_kcalloc(dev, sz, sizeof(*clkfreq),
66*4882a593Smuzhiyun 			       GFP_KERNEL);
67*4882a593Smuzhiyun 	if (!clkfreq) {
68*4882a593Smuzhiyun 		ret = -ENOMEM;
69*4882a593Smuzhiyun 		goto out;
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	ret = of_property_read_u32_array(np, "freq-table-hz",
73*4882a593Smuzhiyun 			clkfreq, sz);
74*4882a593Smuzhiyun 	if (ret && (ret != -EINVAL)) {
75*4882a593Smuzhiyun 		dev_err(dev, "%s: error reading array %d\n",
76*4882a593Smuzhiyun 				"freq-table-hz", ret);
77*4882a593Smuzhiyun 		return ret;
78*4882a593Smuzhiyun 	}
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	for (i = 0; i < sz; i += 2) {
81*4882a593Smuzhiyun 		ret = of_property_read_string_index(np,
82*4882a593Smuzhiyun 				"clock-names", i/2, (const char **)&name);
83*4882a593Smuzhiyun 		if (ret)
84*4882a593Smuzhiyun 			goto out;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 		clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
87*4882a593Smuzhiyun 		if (!clki) {
88*4882a593Smuzhiyun 			ret = -ENOMEM;
89*4882a593Smuzhiyun 			goto out;
90*4882a593Smuzhiyun 		}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 		clki->min_freq = clkfreq[i];
93*4882a593Smuzhiyun 		clki->max_freq = clkfreq[i+1];
94*4882a593Smuzhiyun 		clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
95*4882a593Smuzhiyun 		if (!clki->name) {
96*4882a593Smuzhiyun 			ret = -ENOMEM;
97*4882a593Smuzhiyun 			goto out;
98*4882a593Smuzhiyun 		}
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 		if (!strcmp(name, "ref_clk"))
101*4882a593Smuzhiyun 			clki->keep_link_active = true;
102*4882a593Smuzhiyun 		dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
103*4882a593Smuzhiyun 				clki->min_freq, clki->max_freq, clki->name);
104*4882a593Smuzhiyun 		list_add_tail(&clki->list, &hba->clk_list_head);
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun out:
107*4882a593Smuzhiyun 	return ret;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
phandle_exists(const struct device_node * np,const char * phandle_name,int index)110*4882a593Smuzhiyun static bool phandle_exists(const struct device_node *np,
111*4882a593Smuzhiyun 			   const char *phandle_name, int index)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (parse_np)
116*4882a593Smuzhiyun 		of_node_put(parse_np);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	return parse_np != NULL;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #define MAX_PROP_SIZE 32
ufshcd_populate_vreg(struct device * dev,const char * name,struct ufs_vreg ** out_vreg)122*4882a593Smuzhiyun static int ufshcd_populate_vreg(struct device *dev, const char *name,
123*4882a593Smuzhiyun 				struct ufs_vreg **out_vreg)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	int ret = 0;
126*4882a593Smuzhiyun 	char prop_name[MAX_PROP_SIZE];
127*4882a593Smuzhiyun 	struct ufs_vreg *vreg = NULL;
128*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (!np) {
131*4882a593Smuzhiyun 		dev_err(dev, "%s: non DT initialization\n", __func__);
132*4882a593Smuzhiyun 		goto out;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
136*4882a593Smuzhiyun 	if (!phandle_exists(np, prop_name, 0)) {
137*4882a593Smuzhiyun 		dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
138*4882a593Smuzhiyun 				__func__, prop_name);
139*4882a593Smuzhiyun 		goto out;
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
143*4882a593Smuzhiyun 	if (!vreg)
144*4882a593Smuzhiyun 		return -ENOMEM;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
147*4882a593Smuzhiyun 	if (!vreg->name)
148*4882a593Smuzhiyun 		return -ENOMEM;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
151*4882a593Smuzhiyun 	if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
152*4882a593Smuzhiyun 		dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
153*4882a593Smuzhiyun 		vreg->max_uA = 0;
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun out:
156*4882a593Smuzhiyun 	if (!ret)
157*4882a593Smuzhiyun 		*out_vreg = vreg;
158*4882a593Smuzhiyun 	return ret;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun /**
162*4882a593Smuzhiyun  * ufshcd_parse_regulator_info - get regulator info from device tree
163*4882a593Smuzhiyun  * @hba: per adapter instance
164*4882a593Smuzhiyun  *
165*4882a593Smuzhiyun  * Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
166*4882a593Smuzhiyun  * If any of the supplies are not defined it is assumed that they are always-on
167*4882a593Smuzhiyun  * and hence return zero. If the property is defined but parsing is failed
168*4882a593Smuzhiyun  * then return corresponding error.
169*4882a593Smuzhiyun  */
ufshcd_parse_regulator_info(struct ufs_hba * hba)170*4882a593Smuzhiyun static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	int err;
173*4882a593Smuzhiyun 	struct device *dev = hba->dev;
174*4882a593Smuzhiyun 	struct ufs_vreg_info *info = &hba->vreg_info;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
177*4882a593Smuzhiyun 	if (err)
178*4882a593Smuzhiyun 		goto out;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
181*4882a593Smuzhiyun 	if (err)
182*4882a593Smuzhiyun 		goto out;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
185*4882a593Smuzhiyun 	if (err)
186*4882a593Smuzhiyun 		goto out;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
189*4882a593Smuzhiyun out:
190*4882a593Smuzhiyun 	return err;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun #ifdef CONFIG_PM
194*4882a593Smuzhiyun /**
195*4882a593Smuzhiyun  * ufshcd_pltfrm_suspend - suspend power management function
196*4882a593Smuzhiyun  * @dev: pointer to device handle
197*4882a593Smuzhiyun  *
198*4882a593Smuzhiyun  * Returns 0 if successful
199*4882a593Smuzhiyun  * Returns non-zero otherwise
200*4882a593Smuzhiyun  */
ufshcd_pltfrm_suspend(struct device * dev)201*4882a593Smuzhiyun int ufshcd_pltfrm_suspend(struct device *dev)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	return ufshcd_system_suspend(dev_get_drvdata(dev));
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ufshcd_pltfrm_suspend);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /**
208*4882a593Smuzhiyun  * ufshcd_pltfrm_resume - resume power management function
209*4882a593Smuzhiyun  * @dev: pointer to device handle
210*4882a593Smuzhiyun  *
211*4882a593Smuzhiyun  * Returns 0 if successful
212*4882a593Smuzhiyun  * Returns non-zero otherwise
213*4882a593Smuzhiyun  */
ufshcd_pltfrm_resume(struct device * dev)214*4882a593Smuzhiyun int ufshcd_pltfrm_resume(struct device *dev)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	return ufshcd_system_resume(dev_get_drvdata(dev));
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ufshcd_pltfrm_resume);
219*4882a593Smuzhiyun 
ufshcd_pltfrm_runtime_suspend(struct device * dev)220*4882a593Smuzhiyun int ufshcd_pltfrm_runtime_suspend(struct device *dev)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	return ufshcd_runtime_suspend(dev_get_drvdata(dev));
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_suspend);
225*4882a593Smuzhiyun 
ufshcd_pltfrm_runtime_resume(struct device * dev)226*4882a593Smuzhiyun int ufshcd_pltfrm_runtime_resume(struct device *dev)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	return ufshcd_runtime_resume(dev_get_drvdata(dev));
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_resume);
231*4882a593Smuzhiyun 
ufshcd_pltfrm_runtime_idle(struct device * dev)232*4882a593Smuzhiyun int ufshcd_pltfrm_runtime_idle(struct device *dev)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	return ufshcd_runtime_idle(dev_get_drvdata(dev));
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_idle);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun #endif /* CONFIG_PM */
239*4882a593Smuzhiyun 
ufshcd_pltfrm_shutdown(struct platform_device * pdev)240*4882a593Smuzhiyun void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
245*4882a593Smuzhiyun 
ufshcd_init_lanes_per_dir(struct ufs_hba * hba)246*4882a593Smuzhiyun static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct device *dev = hba->dev;
249*4882a593Smuzhiyun 	int ret;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
252*4882a593Smuzhiyun 		&hba->lanes_per_direction);
253*4882a593Smuzhiyun 	if (ret) {
254*4882a593Smuzhiyun 		dev_dbg(hba->dev,
255*4882a593Smuzhiyun 			"%s: failed to read lanes-per-direction, ret=%d\n",
256*4882a593Smuzhiyun 			__func__, ret);
257*4882a593Smuzhiyun 		hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun /**
262*4882a593Smuzhiyun  * ufshcd_get_pwr_dev_param - get finally agreed attributes for
263*4882a593Smuzhiyun  *                            power mode change
264*4882a593Smuzhiyun  * @pltfrm_param: pointer to platform parameters
265*4882a593Smuzhiyun  * @dev_max: pointer to device attributes
266*4882a593Smuzhiyun  * @agreed_pwr: returned agreed attributes
267*4882a593Smuzhiyun  *
268*4882a593Smuzhiyun  * Returns 0 on success, non-zero value on failure
269*4882a593Smuzhiyun  */
ufshcd_get_pwr_dev_param(struct ufs_dev_params * pltfrm_param,struct ufs_pa_layer_attr * dev_max,struct ufs_pa_layer_attr * agreed_pwr)270*4882a593Smuzhiyun int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param,
271*4882a593Smuzhiyun 			     struct ufs_pa_layer_attr *dev_max,
272*4882a593Smuzhiyun 			     struct ufs_pa_layer_attr *agreed_pwr)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	int min_pltfrm_gear;
275*4882a593Smuzhiyun 	int min_dev_gear;
276*4882a593Smuzhiyun 	bool is_dev_sup_hs = false;
277*4882a593Smuzhiyun 	bool is_pltfrm_max_hs = false;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	if (dev_max->pwr_rx == FAST_MODE)
280*4882a593Smuzhiyun 		is_dev_sup_hs = true;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
283*4882a593Smuzhiyun 		is_pltfrm_max_hs = true;
284*4882a593Smuzhiyun 		min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
285*4882a593Smuzhiyun 					pltfrm_param->hs_tx_gear);
286*4882a593Smuzhiyun 	} else {
287*4882a593Smuzhiyun 		min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
288*4882a593Smuzhiyun 					pltfrm_param->pwm_tx_gear);
289*4882a593Smuzhiyun 	}
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	/*
292*4882a593Smuzhiyun 	 * device doesn't support HS but
293*4882a593Smuzhiyun 	 * pltfrm_param->desired_working_mode is HS,
294*4882a593Smuzhiyun 	 * thus device and pltfrm_param don't agree
295*4882a593Smuzhiyun 	 */
296*4882a593Smuzhiyun 	if (!is_dev_sup_hs && is_pltfrm_max_hs) {
297*4882a593Smuzhiyun 		pr_info("%s: device doesn't support HS\n",
298*4882a593Smuzhiyun 			__func__);
299*4882a593Smuzhiyun 		return -ENOTSUPP;
300*4882a593Smuzhiyun 	} else if (is_dev_sup_hs && is_pltfrm_max_hs) {
301*4882a593Smuzhiyun 		/*
302*4882a593Smuzhiyun 		 * since device supports HS, it supports FAST_MODE.
303*4882a593Smuzhiyun 		 * since pltfrm_param->desired_working_mode is also HS
304*4882a593Smuzhiyun 		 * then final decision (FAST/FASTAUTO) is done according
305*4882a593Smuzhiyun 		 * to pltfrm_params as it is the restricting factor
306*4882a593Smuzhiyun 		 */
307*4882a593Smuzhiyun 		agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
308*4882a593Smuzhiyun 		agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
309*4882a593Smuzhiyun 	} else {
310*4882a593Smuzhiyun 		/*
311*4882a593Smuzhiyun 		 * here pltfrm_param->desired_working_mode is PWM.
312*4882a593Smuzhiyun 		 * it doesn't matter whether device supports HS or PWM,
313*4882a593Smuzhiyun 		 * in both cases pltfrm_param->desired_working_mode will
314*4882a593Smuzhiyun 		 * determine the mode
315*4882a593Smuzhiyun 		 */
316*4882a593Smuzhiyun 		agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
317*4882a593Smuzhiyun 		agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	/*
321*4882a593Smuzhiyun 	 * we would like tx to work in the minimum number of lanes
322*4882a593Smuzhiyun 	 * between device capability and vendor preferences.
323*4882a593Smuzhiyun 	 * the same decision will be made for rx
324*4882a593Smuzhiyun 	 */
325*4882a593Smuzhiyun 	agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
326*4882a593Smuzhiyun 				    pltfrm_param->tx_lanes);
327*4882a593Smuzhiyun 	agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
328*4882a593Smuzhiyun 				    pltfrm_param->rx_lanes);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/* device maximum gear is the minimum between device rx and tx gears */
331*4882a593Smuzhiyun 	min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	/*
334*4882a593Smuzhiyun 	 * if both device capabilities and vendor pre-defined preferences are
335*4882a593Smuzhiyun 	 * both HS or both PWM then set the minimum gear to be the chosen
336*4882a593Smuzhiyun 	 * working gear.
337*4882a593Smuzhiyun 	 * if one is PWM and one is HS then the one that is PWM get to decide
338*4882a593Smuzhiyun 	 * what is the gear, as it is the one that also decided previously what
339*4882a593Smuzhiyun 	 * pwr the device will be configured to.
340*4882a593Smuzhiyun 	 */
341*4882a593Smuzhiyun 	if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
342*4882a593Smuzhiyun 	    (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
343*4882a593Smuzhiyun 		agreed_pwr->gear_rx =
344*4882a593Smuzhiyun 			min_t(u32, min_dev_gear, min_pltfrm_gear);
345*4882a593Smuzhiyun 	} else if (!is_dev_sup_hs) {
346*4882a593Smuzhiyun 		agreed_pwr->gear_rx = min_dev_gear;
347*4882a593Smuzhiyun 	} else {
348*4882a593Smuzhiyun 		agreed_pwr->gear_rx = min_pltfrm_gear;
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 	agreed_pwr->gear_tx = agreed_pwr->gear_rx;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	agreed_pwr->hs_rate = pltfrm_param->hs_rate;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	return 0;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun /**
359*4882a593Smuzhiyun  * ufshcd_pltfrm_init - probe routine of the driver
360*4882a593Smuzhiyun  * @pdev: pointer to Platform device handle
361*4882a593Smuzhiyun  * @vops: pointer to variant ops
362*4882a593Smuzhiyun  *
363*4882a593Smuzhiyun  * Returns 0 on success, non-zero value on failure
364*4882a593Smuzhiyun  */
ufshcd_pltfrm_init(struct platform_device * pdev,const struct ufs_hba_variant_ops * vops)365*4882a593Smuzhiyun int ufshcd_pltfrm_init(struct platform_device *pdev,
366*4882a593Smuzhiyun 		       const struct ufs_hba_variant_ops *vops)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	struct ufs_hba *hba;
369*4882a593Smuzhiyun 	void __iomem *mmio_base;
370*4882a593Smuzhiyun 	int irq, err;
371*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	mmio_base = devm_platform_ioremap_resource(pdev, 0);
374*4882a593Smuzhiyun 	if (IS_ERR(mmio_base)) {
375*4882a593Smuzhiyun 		err = PTR_ERR(mmio_base);
376*4882a593Smuzhiyun 		goto out;
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	irq = platform_get_irq(pdev, 0);
380*4882a593Smuzhiyun 	if (irq < 0) {
381*4882a593Smuzhiyun 		err = irq;
382*4882a593Smuzhiyun 		goto out;
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	err = ufshcd_alloc_host(dev, &hba);
386*4882a593Smuzhiyun 	if (err) {
387*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Allocation failed\n");
388*4882a593Smuzhiyun 		goto out;
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	hba->vops = vops;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	err = ufshcd_parse_clock_info(hba);
394*4882a593Smuzhiyun 	if (err) {
395*4882a593Smuzhiyun 		dev_err(&pdev->dev, "%s: clock parse failed %d\n",
396*4882a593Smuzhiyun 				__func__, err);
397*4882a593Smuzhiyun 		goto dealloc_host;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 	err = ufshcd_parse_regulator_info(hba);
400*4882a593Smuzhiyun 	if (err) {
401*4882a593Smuzhiyun 		dev_err(&pdev->dev, "%s: regulator init failed %d\n",
402*4882a593Smuzhiyun 				__func__, err);
403*4882a593Smuzhiyun 		goto dealloc_host;
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	ufshcd_init_lanes_per_dir(hba);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	err = ufshcd_init(hba, mmio_base, irq);
409*4882a593Smuzhiyun 	if (err) {
410*4882a593Smuzhiyun 		dev_err(dev, "Initialization failed\n");
411*4882a593Smuzhiyun 		goto dealloc_host;
412*4882a593Smuzhiyun 	}
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	pm_runtime_set_active(&pdev->dev);
415*4882a593Smuzhiyun 	pm_runtime_enable(&pdev->dev);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	return 0;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun dealloc_host:
420*4882a593Smuzhiyun 	ufshcd_dealloc_host(hba);
421*4882a593Smuzhiyun out:
422*4882a593Smuzhiyun 	return err;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
427*4882a593Smuzhiyun MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
428*4882a593Smuzhiyun MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
429*4882a593Smuzhiyun MODULE_LICENSE("GPL");
430*4882a593Smuzhiyun MODULE_VERSION(UFSHCD_DRIVER_VERSION);
431