xref: /OK3568_Linux_fs/kernel/drivers/scsi/ufs/ufshcd-pltfrm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Universal Flash Storage Host controller Platform bus based glue driver
4  * Copyright (C) 2011-2013 Samsung India Software Operations
5  *
6  * Authors:
7  *	Santosh Yaraganavi <santosh.sy@samsung.com>
8  *	Vinayak Holikatti <h.vinayak@samsung.com>
9  */
10 
11 #include <linux/platform_device.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/of.h>
14 
15 #include "ufshcd.h"
16 #include "ufshcd-pltfrm.h"
17 #include "unipro.h"
18 
19 #define UFSHCD_DEFAULT_LANES_PER_DIRECTION		2
20 
ufshcd_parse_clock_info(struct ufs_hba * hba)21 static int ufshcd_parse_clock_info(struct ufs_hba *hba)
22 {
23 	int ret = 0;
24 	int cnt;
25 	int i;
26 	struct device *dev = hba->dev;
27 	struct device_node *np = dev->of_node;
28 	char *name;
29 	u32 *clkfreq = NULL;
30 	struct ufs_clk_info *clki;
31 	int len = 0;
32 	size_t sz = 0;
33 
34 	if (!np)
35 		goto out;
36 
37 	cnt = of_property_count_strings(np, "clock-names");
38 	if (!cnt || (cnt == -EINVAL)) {
39 		dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
40 				__func__);
41 	} else if (cnt < 0) {
42 		dev_err(dev, "%s: count clock strings failed, err %d\n",
43 				__func__, cnt);
44 		ret = cnt;
45 	}
46 
47 	if (cnt <= 0)
48 		goto out;
49 
50 	if (!of_get_property(np, "freq-table-hz", &len)) {
51 		dev_info(dev, "freq-table-hz property not specified\n");
52 		goto out;
53 	}
54 
55 	if (len <= 0)
56 		goto out;
57 
58 	sz = len / sizeof(*clkfreq);
59 	if (sz != 2 * cnt) {
60 		dev_err(dev, "%s len mismatch\n", "freq-table-hz");
61 		ret = -EINVAL;
62 		goto out;
63 	}
64 
65 	clkfreq = devm_kcalloc(dev, sz, sizeof(*clkfreq),
66 			       GFP_KERNEL);
67 	if (!clkfreq) {
68 		ret = -ENOMEM;
69 		goto out;
70 	}
71 
72 	ret = of_property_read_u32_array(np, "freq-table-hz",
73 			clkfreq, sz);
74 	if (ret && (ret != -EINVAL)) {
75 		dev_err(dev, "%s: error reading array %d\n",
76 				"freq-table-hz", ret);
77 		return ret;
78 	}
79 
80 	for (i = 0; i < sz; i += 2) {
81 		ret = of_property_read_string_index(np,
82 				"clock-names", i/2, (const char **)&name);
83 		if (ret)
84 			goto out;
85 
86 		clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
87 		if (!clki) {
88 			ret = -ENOMEM;
89 			goto out;
90 		}
91 
92 		clki->min_freq = clkfreq[i];
93 		clki->max_freq = clkfreq[i+1];
94 		clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
95 		if (!clki->name) {
96 			ret = -ENOMEM;
97 			goto out;
98 		}
99 
100 		if (!strcmp(name, "ref_clk"))
101 			clki->keep_link_active = true;
102 		dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
103 				clki->min_freq, clki->max_freq, clki->name);
104 		list_add_tail(&clki->list, &hba->clk_list_head);
105 	}
106 out:
107 	return ret;
108 }
109 
phandle_exists(const struct device_node * np,const char * phandle_name,int index)110 static bool phandle_exists(const struct device_node *np,
111 			   const char *phandle_name, int index)
112 {
113 	struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
114 
115 	if (parse_np)
116 		of_node_put(parse_np);
117 
118 	return parse_np != NULL;
119 }
120 
121 #define MAX_PROP_SIZE 32
ufshcd_populate_vreg(struct device * dev,const char * name,struct ufs_vreg ** out_vreg)122 static int ufshcd_populate_vreg(struct device *dev, const char *name,
123 				struct ufs_vreg **out_vreg)
124 {
125 	int ret = 0;
126 	char prop_name[MAX_PROP_SIZE];
127 	struct ufs_vreg *vreg = NULL;
128 	struct device_node *np = dev->of_node;
129 
130 	if (!np) {
131 		dev_err(dev, "%s: non DT initialization\n", __func__);
132 		goto out;
133 	}
134 
135 	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
136 	if (!phandle_exists(np, prop_name, 0)) {
137 		dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
138 				__func__, prop_name);
139 		goto out;
140 	}
141 
142 	vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
143 	if (!vreg)
144 		return -ENOMEM;
145 
146 	vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
147 	if (!vreg->name)
148 		return -ENOMEM;
149 
150 	snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
151 	if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
152 		dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
153 		vreg->max_uA = 0;
154 	}
155 out:
156 	if (!ret)
157 		*out_vreg = vreg;
158 	return ret;
159 }
160 
161 /**
162  * ufshcd_parse_regulator_info - get regulator info from device tree
163  * @hba: per adapter instance
164  *
165  * Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
166  * If any of the supplies are not defined it is assumed that they are always-on
167  * and hence return zero. If the property is defined but parsing is failed
168  * then return corresponding error.
169  */
ufshcd_parse_regulator_info(struct ufs_hba * hba)170 static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
171 {
172 	int err;
173 	struct device *dev = hba->dev;
174 	struct ufs_vreg_info *info = &hba->vreg_info;
175 
176 	err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
177 	if (err)
178 		goto out;
179 
180 	err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
181 	if (err)
182 		goto out;
183 
184 	err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
185 	if (err)
186 		goto out;
187 
188 	err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
189 out:
190 	return err;
191 }
192 
193 #ifdef CONFIG_PM
194 /**
195  * ufshcd_pltfrm_suspend - suspend power management function
196  * @dev: pointer to device handle
197  *
198  * Returns 0 if successful
199  * Returns non-zero otherwise
200  */
ufshcd_pltfrm_suspend(struct device * dev)201 int ufshcd_pltfrm_suspend(struct device *dev)
202 {
203 	return ufshcd_system_suspend(dev_get_drvdata(dev));
204 }
205 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_suspend);
206 
207 /**
208  * ufshcd_pltfrm_resume - resume power management function
209  * @dev: pointer to device handle
210  *
211  * Returns 0 if successful
212  * Returns non-zero otherwise
213  */
ufshcd_pltfrm_resume(struct device * dev)214 int ufshcd_pltfrm_resume(struct device *dev)
215 {
216 	return ufshcd_system_resume(dev_get_drvdata(dev));
217 }
218 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_resume);
219 
ufshcd_pltfrm_runtime_suspend(struct device * dev)220 int ufshcd_pltfrm_runtime_suspend(struct device *dev)
221 {
222 	return ufshcd_runtime_suspend(dev_get_drvdata(dev));
223 }
224 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_suspend);
225 
ufshcd_pltfrm_runtime_resume(struct device * dev)226 int ufshcd_pltfrm_runtime_resume(struct device *dev)
227 {
228 	return ufshcd_runtime_resume(dev_get_drvdata(dev));
229 }
230 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_resume);
231 
ufshcd_pltfrm_runtime_idle(struct device * dev)232 int ufshcd_pltfrm_runtime_idle(struct device *dev)
233 {
234 	return ufshcd_runtime_idle(dev_get_drvdata(dev));
235 }
236 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_idle);
237 
238 #endif /* CONFIG_PM */
239 
ufshcd_pltfrm_shutdown(struct platform_device * pdev)240 void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
241 {
242 	ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
243 }
244 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
245 
ufshcd_init_lanes_per_dir(struct ufs_hba * hba)246 static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
247 {
248 	struct device *dev = hba->dev;
249 	int ret;
250 
251 	ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
252 		&hba->lanes_per_direction);
253 	if (ret) {
254 		dev_dbg(hba->dev,
255 			"%s: failed to read lanes-per-direction, ret=%d\n",
256 			__func__, ret);
257 		hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
258 	}
259 }
260 
261 /**
262  * ufshcd_get_pwr_dev_param - get finally agreed attributes for
263  *                            power mode change
264  * @pltfrm_param: pointer to platform parameters
265  * @dev_max: pointer to device attributes
266  * @agreed_pwr: returned agreed attributes
267  *
268  * Returns 0 on success, non-zero value on failure
269  */
ufshcd_get_pwr_dev_param(struct ufs_dev_params * pltfrm_param,struct ufs_pa_layer_attr * dev_max,struct ufs_pa_layer_attr * agreed_pwr)270 int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param,
271 			     struct ufs_pa_layer_attr *dev_max,
272 			     struct ufs_pa_layer_attr *agreed_pwr)
273 {
274 	int min_pltfrm_gear;
275 	int min_dev_gear;
276 	bool is_dev_sup_hs = false;
277 	bool is_pltfrm_max_hs = false;
278 
279 	if (dev_max->pwr_rx == FAST_MODE)
280 		is_dev_sup_hs = true;
281 
282 	if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
283 		is_pltfrm_max_hs = true;
284 		min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
285 					pltfrm_param->hs_tx_gear);
286 	} else {
287 		min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
288 					pltfrm_param->pwm_tx_gear);
289 	}
290 
291 	/*
292 	 * device doesn't support HS but
293 	 * pltfrm_param->desired_working_mode is HS,
294 	 * thus device and pltfrm_param don't agree
295 	 */
296 	if (!is_dev_sup_hs && is_pltfrm_max_hs) {
297 		pr_info("%s: device doesn't support HS\n",
298 			__func__);
299 		return -ENOTSUPP;
300 	} else if (is_dev_sup_hs && is_pltfrm_max_hs) {
301 		/*
302 		 * since device supports HS, it supports FAST_MODE.
303 		 * since pltfrm_param->desired_working_mode is also HS
304 		 * then final decision (FAST/FASTAUTO) is done according
305 		 * to pltfrm_params as it is the restricting factor
306 		 */
307 		agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
308 		agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
309 	} else {
310 		/*
311 		 * here pltfrm_param->desired_working_mode is PWM.
312 		 * it doesn't matter whether device supports HS or PWM,
313 		 * in both cases pltfrm_param->desired_working_mode will
314 		 * determine the mode
315 		 */
316 		agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
317 		agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
318 	}
319 
320 	/*
321 	 * we would like tx to work in the minimum number of lanes
322 	 * between device capability and vendor preferences.
323 	 * the same decision will be made for rx
324 	 */
325 	agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
326 				    pltfrm_param->tx_lanes);
327 	agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
328 				    pltfrm_param->rx_lanes);
329 
330 	/* device maximum gear is the minimum between device rx and tx gears */
331 	min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
332 
333 	/*
334 	 * if both device capabilities and vendor pre-defined preferences are
335 	 * both HS or both PWM then set the minimum gear to be the chosen
336 	 * working gear.
337 	 * if one is PWM and one is HS then the one that is PWM get to decide
338 	 * what is the gear, as it is the one that also decided previously what
339 	 * pwr the device will be configured to.
340 	 */
341 	if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
342 	    (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
343 		agreed_pwr->gear_rx =
344 			min_t(u32, min_dev_gear, min_pltfrm_gear);
345 	} else if (!is_dev_sup_hs) {
346 		agreed_pwr->gear_rx = min_dev_gear;
347 	} else {
348 		agreed_pwr->gear_rx = min_pltfrm_gear;
349 	}
350 	agreed_pwr->gear_tx = agreed_pwr->gear_rx;
351 
352 	agreed_pwr->hs_rate = pltfrm_param->hs_rate;
353 
354 	return 0;
355 }
356 EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
357 
358 /**
359  * ufshcd_pltfrm_init - probe routine of the driver
360  * @pdev: pointer to Platform device handle
361  * @vops: pointer to variant ops
362  *
363  * Returns 0 on success, non-zero value on failure
364  */
ufshcd_pltfrm_init(struct platform_device * pdev,const struct ufs_hba_variant_ops * vops)365 int ufshcd_pltfrm_init(struct platform_device *pdev,
366 		       const struct ufs_hba_variant_ops *vops)
367 {
368 	struct ufs_hba *hba;
369 	void __iomem *mmio_base;
370 	int irq, err;
371 	struct device *dev = &pdev->dev;
372 
373 	mmio_base = devm_platform_ioremap_resource(pdev, 0);
374 	if (IS_ERR(mmio_base)) {
375 		err = PTR_ERR(mmio_base);
376 		goto out;
377 	}
378 
379 	irq = platform_get_irq(pdev, 0);
380 	if (irq < 0) {
381 		err = irq;
382 		goto out;
383 	}
384 
385 	err = ufshcd_alloc_host(dev, &hba);
386 	if (err) {
387 		dev_err(&pdev->dev, "Allocation failed\n");
388 		goto out;
389 	}
390 
391 	hba->vops = vops;
392 
393 	err = ufshcd_parse_clock_info(hba);
394 	if (err) {
395 		dev_err(&pdev->dev, "%s: clock parse failed %d\n",
396 				__func__, err);
397 		goto dealloc_host;
398 	}
399 	err = ufshcd_parse_regulator_info(hba);
400 	if (err) {
401 		dev_err(&pdev->dev, "%s: regulator init failed %d\n",
402 				__func__, err);
403 		goto dealloc_host;
404 	}
405 
406 	ufshcd_init_lanes_per_dir(hba);
407 
408 	err = ufshcd_init(hba, mmio_base, irq);
409 	if (err) {
410 		dev_err(dev, "Initialization failed\n");
411 		goto dealloc_host;
412 	}
413 
414 	pm_runtime_set_active(&pdev->dev);
415 	pm_runtime_enable(&pdev->dev);
416 
417 	return 0;
418 
419 dealloc_host:
420 	ufshcd_dealloc_host(hba);
421 out:
422 	return err;
423 }
424 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
425 
426 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
427 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
428 MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
429 MODULE_LICENSE("GPL");
430 MODULE_VERSION(UFSHCD_DRIVER_VERSION);
431