1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Generic OPP Interface
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2009-2010 Texas Instruments Incorporated.
6*4882a593Smuzhiyun * Nishanth Menon
7*4882a593Smuzhiyun * Romit Dasgupta
8*4882a593Smuzhiyun * Kevin Hilman
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/clk.h>
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun #include <linux/err.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/device.h>
18*4882a593Smuzhiyun #include <linux/export.h>
19*4882a593Smuzhiyun #include <linux/pm_domain.h>
20*4882a593Smuzhiyun #include <linux/regulator/consumer.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "opp.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun * The root of the list of all opp-tables. All opp_table structures branch off
26*4882a593Smuzhiyun * from here, with each opp_table containing the list of opps it supports in
27*4882a593Smuzhiyun * various states of availability.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun LIST_HEAD(opp_tables);
30*4882a593Smuzhiyun /* Lock to allow exclusive modification to the device and opp lists */
31*4882a593Smuzhiyun DEFINE_MUTEX(opp_table_lock);
32*4882a593Smuzhiyun
_find_opp_dev(const struct device * dev,struct opp_table * opp_table)33*4882a593Smuzhiyun static struct opp_device *_find_opp_dev(const struct device *dev,
34*4882a593Smuzhiyun struct opp_table *opp_table)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun struct opp_device *opp_dev;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun list_for_each_entry(opp_dev, &opp_table->dev_list, node)
39*4882a593Smuzhiyun if (opp_dev->dev == dev)
40*4882a593Smuzhiyun return opp_dev;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun return NULL;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
_find_opp_table_unlocked(struct device * dev)45*4882a593Smuzhiyun static struct opp_table *_find_opp_table_unlocked(struct device *dev)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun struct opp_table *opp_table;
48*4882a593Smuzhiyun bool found;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun list_for_each_entry(opp_table, &opp_tables, node) {
51*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
52*4882a593Smuzhiyun found = !!_find_opp_dev(dev, opp_table);
53*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun if (found) {
56*4882a593Smuzhiyun _get_opp_table_kref(opp_table);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun return opp_table;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /**
66*4882a593Smuzhiyun * _find_opp_table() - find opp_table struct using device pointer
67*4882a593Smuzhiyun * @dev: device pointer used to lookup OPP table
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * Search OPP table for one containing matching device.
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
72*4882a593Smuzhiyun * -EINVAL based on type of error.
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * The callers must call dev_pm_opp_put_opp_table() after the table is used.
75*4882a593Smuzhiyun */
_find_opp_table(struct device * dev)76*4882a593Smuzhiyun struct opp_table *_find_opp_table(struct device *dev)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun struct opp_table *opp_table;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun if (IS_ERR_OR_NULL(dev)) {
81*4882a593Smuzhiyun pr_err("%s: Invalid parameters\n", __func__);
82*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun mutex_lock(&opp_table_lock);
86*4882a593Smuzhiyun opp_table = _find_opp_table_unlocked(dev);
87*4882a593Smuzhiyun mutex_unlock(&opp_table_lock);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun return opp_table;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
94*4882a593Smuzhiyun * @opp: opp for which voltage has to be returned for
95*4882a593Smuzhiyun *
96*4882a593Smuzhiyun * Return: voltage in micro volt corresponding to the opp, else
97*4882a593Smuzhiyun * return 0
98*4882a593Smuzhiyun *
99*4882a593Smuzhiyun * This is useful only for devices with single power supply.
100*4882a593Smuzhiyun */
dev_pm_opp_get_voltage(struct dev_pm_opp * opp)101*4882a593Smuzhiyun unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun if (IS_ERR_OR_NULL(opp)) {
104*4882a593Smuzhiyun pr_err("%s: Invalid parameters\n", __func__);
105*4882a593Smuzhiyun return 0;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun return opp->supplies[0].u_volt;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /**
113*4882a593Smuzhiyun * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
114*4882a593Smuzhiyun * @opp: opp for which frequency has to be returned for
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * Return: frequency in hertz corresponding to the opp, else
117*4882a593Smuzhiyun * return 0
118*4882a593Smuzhiyun */
dev_pm_opp_get_freq(struct dev_pm_opp * opp)119*4882a593Smuzhiyun unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun if (IS_ERR_OR_NULL(opp)) {
122*4882a593Smuzhiyun pr_err("%s: Invalid parameters\n", __func__);
123*4882a593Smuzhiyun return 0;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun return opp->rate;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
132*4882a593Smuzhiyun * @opp: opp for which level value has to be returned for
133*4882a593Smuzhiyun *
134*4882a593Smuzhiyun * Return: level read from device tree corresponding to the opp, else
135*4882a593Smuzhiyun * return 0.
136*4882a593Smuzhiyun */
dev_pm_opp_get_level(struct dev_pm_opp * opp)137*4882a593Smuzhiyun unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun if (IS_ERR_OR_NULL(opp) || !opp->available) {
140*4882a593Smuzhiyun pr_err("%s: Invalid parameters\n", __func__);
141*4882a593Smuzhiyun return 0;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun return opp->level;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /**
149*4882a593Smuzhiyun * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
150*4882a593Smuzhiyun * @opp: opp for which turbo mode is being verified
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun * Turbo OPPs are not for normal use, and can be enabled (under certain
153*4882a593Smuzhiyun * conditions) for short duration of times to finish high throughput work
154*4882a593Smuzhiyun * quickly. Running on them for longer times may overheat the chip.
155*4882a593Smuzhiyun *
156*4882a593Smuzhiyun * Return: true if opp is turbo opp, else false.
157*4882a593Smuzhiyun */
dev_pm_opp_is_turbo(struct dev_pm_opp * opp)158*4882a593Smuzhiyun bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun if (IS_ERR_OR_NULL(opp) || !opp->available) {
161*4882a593Smuzhiyun pr_err("%s: Invalid parameters\n", __func__);
162*4882a593Smuzhiyun return false;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun return opp->turbo;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /**
170*4882a593Smuzhiyun * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
171*4882a593Smuzhiyun * @dev: device for which we do this operation
172*4882a593Smuzhiyun *
173*4882a593Smuzhiyun * Return: This function returns the max clock latency in nanoseconds.
174*4882a593Smuzhiyun */
dev_pm_opp_get_max_clock_latency(struct device * dev)175*4882a593Smuzhiyun unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun struct opp_table *opp_table;
178*4882a593Smuzhiyun unsigned long clock_latency_ns;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
181*4882a593Smuzhiyun if (IS_ERR(opp_table))
182*4882a593Smuzhiyun return 0;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun clock_latency_ns = opp_table->clock_latency_ns_max;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun return clock_latency_ns;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /**
193*4882a593Smuzhiyun * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
194*4882a593Smuzhiyun * @dev: device for which we do this operation
195*4882a593Smuzhiyun *
196*4882a593Smuzhiyun * Return: This function returns the max voltage latency in nanoseconds.
197*4882a593Smuzhiyun */
dev_pm_opp_get_max_volt_latency(struct device * dev)198*4882a593Smuzhiyun unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct opp_table *opp_table;
201*4882a593Smuzhiyun struct dev_pm_opp *opp;
202*4882a593Smuzhiyun struct regulator *reg;
203*4882a593Smuzhiyun unsigned long latency_ns = 0;
204*4882a593Smuzhiyun int ret, i, count;
205*4882a593Smuzhiyun struct {
206*4882a593Smuzhiyun unsigned long min;
207*4882a593Smuzhiyun unsigned long max;
208*4882a593Smuzhiyun } *uV;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
211*4882a593Smuzhiyun if (IS_ERR(opp_table))
212*4882a593Smuzhiyun return 0;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* Regulator may not be required for the device */
215*4882a593Smuzhiyun if (!opp_table->regulators)
216*4882a593Smuzhiyun goto put_opp_table;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun count = opp_table->regulator_count;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
221*4882a593Smuzhiyun if (!uV)
222*4882a593Smuzhiyun goto put_opp_table;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun for (i = 0; i < count; i++) {
227*4882a593Smuzhiyun uV[i].min = ~0;
228*4882a593Smuzhiyun uV[i].max = 0;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun list_for_each_entry(opp, &opp_table->opp_list, node) {
231*4882a593Smuzhiyun if (!opp->available)
232*4882a593Smuzhiyun continue;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (opp->supplies[i].u_volt_min < uV[i].min)
235*4882a593Smuzhiyun uV[i].min = opp->supplies[i].u_volt_min;
236*4882a593Smuzhiyun if (opp->supplies[i].u_volt_max > uV[i].max)
237*4882a593Smuzhiyun uV[i].max = opp->supplies[i].u_volt_max;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun * The caller needs to ensure that opp_table (and hence the regulator)
245*4882a593Smuzhiyun * isn't freed, while we are executing this routine.
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun for (i = 0; i < count; i++) {
248*4882a593Smuzhiyun reg = opp_table->regulators[i];
249*4882a593Smuzhiyun ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
250*4882a593Smuzhiyun if (ret > 0)
251*4882a593Smuzhiyun latency_ns += ret * 1000;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun kfree(uV);
255*4882a593Smuzhiyun put_opp_table:
256*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun return latency_ns;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /**
263*4882a593Smuzhiyun * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
264*4882a593Smuzhiyun * nanoseconds
265*4882a593Smuzhiyun * @dev: device for which we do this operation
266*4882a593Smuzhiyun *
267*4882a593Smuzhiyun * Return: This function returns the max transition latency, in nanoseconds, to
268*4882a593Smuzhiyun * switch from one OPP to other.
269*4882a593Smuzhiyun */
dev_pm_opp_get_max_transition_latency(struct device * dev)270*4882a593Smuzhiyun unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun return dev_pm_opp_get_max_volt_latency(dev) +
273*4882a593Smuzhiyun dev_pm_opp_get_max_clock_latency(dev);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /**
278*4882a593Smuzhiyun * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
279*4882a593Smuzhiyun * @dev: device for which we do this operation
280*4882a593Smuzhiyun *
281*4882a593Smuzhiyun * Return: This function returns the frequency of the OPP marked as suspend_opp
282*4882a593Smuzhiyun * if one is available, else returns 0;
283*4882a593Smuzhiyun */
dev_pm_opp_get_suspend_opp_freq(struct device * dev)284*4882a593Smuzhiyun unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun struct opp_table *opp_table;
287*4882a593Smuzhiyun unsigned long freq = 0;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
290*4882a593Smuzhiyun if (IS_ERR(opp_table))
291*4882a593Smuzhiyun return 0;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (opp_table->suspend_opp && opp_table->suspend_opp->available)
294*4882a593Smuzhiyun freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun return freq;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
301*4882a593Smuzhiyun
_get_opp_count(struct opp_table * opp_table)302*4882a593Smuzhiyun int _get_opp_count(struct opp_table *opp_table)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun struct dev_pm_opp *opp;
305*4882a593Smuzhiyun int count = 0;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun list_for_each_entry(opp, &opp_table->opp_list, node) {
310*4882a593Smuzhiyun if (opp->available)
311*4882a593Smuzhiyun count++;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun return count;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /**
320*4882a593Smuzhiyun * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
321*4882a593Smuzhiyun * @dev: device for which we do this operation
322*4882a593Smuzhiyun *
323*4882a593Smuzhiyun * Return: This function returns the number of available opps if there are any,
324*4882a593Smuzhiyun * else returns 0 if none or the corresponding error value.
325*4882a593Smuzhiyun */
dev_pm_opp_get_opp_count(struct device * dev)326*4882a593Smuzhiyun int dev_pm_opp_get_opp_count(struct device *dev)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun struct opp_table *opp_table;
329*4882a593Smuzhiyun int count;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
332*4882a593Smuzhiyun if (IS_ERR(opp_table)) {
333*4882a593Smuzhiyun count = PTR_ERR(opp_table);
334*4882a593Smuzhiyun dev_dbg(dev, "%s: OPP table not found (%d)\n",
335*4882a593Smuzhiyun __func__, count);
336*4882a593Smuzhiyun return count;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun count = _get_opp_count(opp_table);
340*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun return count;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /**
347*4882a593Smuzhiyun * dev_pm_opp_find_freq_exact() - search for an exact frequency
348*4882a593Smuzhiyun * @dev: device for which we do this operation
349*4882a593Smuzhiyun * @freq: frequency to search for
350*4882a593Smuzhiyun * @available: true/false - match for available opp
351*4882a593Smuzhiyun *
352*4882a593Smuzhiyun * Return: Searches for exact match in the opp table and returns pointer to the
353*4882a593Smuzhiyun * matching opp if found, else returns ERR_PTR in case of error and should
354*4882a593Smuzhiyun * be handled using IS_ERR. Error return values can be:
355*4882a593Smuzhiyun * EINVAL: for bad pointer
356*4882a593Smuzhiyun * ERANGE: no match found for search
357*4882a593Smuzhiyun * ENODEV: if device not found in list of registered devices
358*4882a593Smuzhiyun *
359*4882a593Smuzhiyun * Note: available is a modifier for the search. if available=true, then the
360*4882a593Smuzhiyun * match is for exact matching frequency and is available in the stored OPP
361*4882a593Smuzhiyun * table. if false, the match is for exact frequency which is not available.
362*4882a593Smuzhiyun *
363*4882a593Smuzhiyun * This provides a mechanism to enable an opp which is not available currently
364*4882a593Smuzhiyun * or the opposite as well.
365*4882a593Smuzhiyun *
366*4882a593Smuzhiyun * The callers are required to call dev_pm_opp_put() for the returned OPP after
367*4882a593Smuzhiyun * use.
368*4882a593Smuzhiyun */
dev_pm_opp_find_freq_exact(struct device * dev,unsigned long freq,bool available)369*4882a593Smuzhiyun struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
370*4882a593Smuzhiyun unsigned long freq,
371*4882a593Smuzhiyun bool available)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun struct opp_table *opp_table;
374*4882a593Smuzhiyun struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
377*4882a593Smuzhiyun if (IS_ERR(opp_table)) {
378*4882a593Smuzhiyun int r = PTR_ERR(opp_table);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
381*4882a593Smuzhiyun return ERR_PTR(r);
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
387*4882a593Smuzhiyun if (temp_opp->available == available &&
388*4882a593Smuzhiyun temp_opp->rate == freq) {
389*4882a593Smuzhiyun opp = temp_opp;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /* Increment the reference count of OPP */
392*4882a593Smuzhiyun dev_pm_opp_get(opp);
393*4882a593Smuzhiyun break;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
398*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun return opp;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /**
405*4882a593Smuzhiyun * dev_pm_opp_find_level_exact() - search for an exact level
406*4882a593Smuzhiyun * @dev: device for which we do this operation
407*4882a593Smuzhiyun * @level: level to search for
408*4882a593Smuzhiyun *
409*4882a593Smuzhiyun * Return: Searches for exact match in the opp table and returns pointer to the
410*4882a593Smuzhiyun * matching opp if found, else returns ERR_PTR in case of error and should
411*4882a593Smuzhiyun * be handled using IS_ERR. Error return values can be:
412*4882a593Smuzhiyun * EINVAL: for bad pointer
413*4882a593Smuzhiyun * ERANGE: no match found for search
414*4882a593Smuzhiyun * ENODEV: if device not found in list of registered devices
415*4882a593Smuzhiyun *
416*4882a593Smuzhiyun * The callers are required to call dev_pm_opp_put() for the returned OPP after
417*4882a593Smuzhiyun * use.
418*4882a593Smuzhiyun */
dev_pm_opp_find_level_exact(struct device * dev,unsigned int level)419*4882a593Smuzhiyun struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
420*4882a593Smuzhiyun unsigned int level)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct opp_table *opp_table;
423*4882a593Smuzhiyun struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
426*4882a593Smuzhiyun if (IS_ERR(opp_table)) {
427*4882a593Smuzhiyun int r = PTR_ERR(opp_table);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
430*4882a593Smuzhiyun return ERR_PTR(r);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
436*4882a593Smuzhiyun if (temp_opp->level == level) {
437*4882a593Smuzhiyun opp = temp_opp;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /* Increment the reference count of OPP */
440*4882a593Smuzhiyun dev_pm_opp_get(opp);
441*4882a593Smuzhiyun break;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
446*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun return opp;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
451*4882a593Smuzhiyun
_find_freq_ceil(struct opp_table * opp_table,unsigned long * freq)452*4882a593Smuzhiyun static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
453*4882a593Smuzhiyun unsigned long *freq)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
460*4882a593Smuzhiyun if (temp_opp->available && temp_opp->rate >= *freq) {
461*4882a593Smuzhiyun opp = temp_opp;
462*4882a593Smuzhiyun *freq = opp->rate;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /* Increment the reference count of OPP */
465*4882a593Smuzhiyun dev_pm_opp_get(opp);
466*4882a593Smuzhiyun break;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun return opp;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun /**
476*4882a593Smuzhiyun * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
477*4882a593Smuzhiyun * @dev: device for which we do this operation
478*4882a593Smuzhiyun * @freq: Start frequency
479*4882a593Smuzhiyun *
480*4882a593Smuzhiyun * Search for the matching ceil *available* OPP from a starting freq
481*4882a593Smuzhiyun * for a device.
482*4882a593Smuzhiyun *
483*4882a593Smuzhiyun * Return: matching *opp and refreshes *freq accordingly, else returns
484*4882a593Smuzhiyun * ERR_PTR in case of error and should be handled using IS_ERR. Error return
485*4882a593Smuzhiyun * values can be:
486*4882a593Smuzhiyun * EINVAL: for bad pointer
487*4882a593Smuzhiyun * ERANGE: no match found for search
488*4882a593Smuzhiyun * ENODEV: if device not found in list of registered devices
489*4882a593Smuzhiyun *
490*4882a593Smuzhiyun * The callers are required to call dev_pm_opp_put() for the returned OPP after
491*4882a593Smuzhiyun * use.
492*4882a593Smuzhiyun */
dev_pm_opp_find_freq_ceil(struct device * dev,unsigned long * freq)493*4882a593Smuzhiyun struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
494*4882a593Smuzhiyun unsigned long *freq)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun struct opp_table *opp_table;
497*4882a593Smuzhiyun struct dev_pm_opp *opp;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun if (!dev || !freq) {
500*4882a593Smuzhiyun dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
501*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
505*4882a593Smuzhiyun if (IS_ERR(opp_table))
506*4882a593Smuzhiyun return ERR_CAST(opp_table);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun opp = _find_freq_ceil(opp_table, freq);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun return opp;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /**
517*4882a593Smuzhiyun * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
518*4882a593Smuzhiyun * @dev: device for which we do this operation
519*4882a593Smuzhiyun * @freq: Start frequency
520*4882a593Smuzhiyun *
521*4882a593Smuzhiyun * Search for the matching floor *available* OPP from a starting freq
522*4882a593Smuzhiyun * for a device.
523*4882a593Smuzhiyun *
524*4882a593Smuzhiyun * Return: matching *opp and refreshes *freq accordingly, else returns
525*4882a593Smuzhiyun * ERR_PTR in case of error and should be handled using IS_ERR. Error return
526*4882a593Smuzhiyun * values can be:
527*4882a593Smuzhiyun * EINVAL: for bad pointer
528*4882a593Smuzhiyun * ERANGE: no match found for search
529*4882a593Smuzhiyun * ENODEV: if device not found in list of registered devices
530*4882a593Smuzhiyun *
531*4882a593Smuzhiyun * The callers are required to call dev_pm_opp_put() for the returned OPP after
532*4882a593Smuzhiyun * use.
533*4882a593Smuzhiyun */
dev_pm_opp_find_freq_floor(struct device * dev,unsigned long * freq)534*4882a593Smuzhiyun struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
535*4882a593Smuzhiyun unsigned long *freq)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun struct opp_table *opp_table;
538*4882a593Smuzhiyun struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun if (!dev || !freq) {
541*4882a593Smuzhiyun dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
542*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
546*4882a593Smuzhiyun if (IS_ERR(opp_table))
547*4882a593Smuzhiyun return ERR_CAST(opp_table);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
552*4882a593Smuzhiyun if (temp_opp->available) {
553*4882a593Smuzhiyun /* go to the next node, before choosing prev */
554*4882a593Smuzhiyun if (temp_opp->rate > *freq)
555*4882a593Smuzhiyun break;
556*4882a593Smuzhiyun else
557*4882a593Smuzhiyun opp = temp_opp;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /* Increment the reference count of OPP */
562*4882a593Smuzhiyun if (!IS_ERR(opp))
563*4882a593Smuzhiyun dev_pm_opp_get(opp);
564*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
565*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun if (!IS_ERR(opp))
568*4882a593Smuzhiyun *freq = opp->rate;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun return opp;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /**
575*4882a593Smuzhiyun * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for
576*4882a593Smuzhiyun * target voltage.
577*4882a593Smuzhiyun * @dev: Device for which we do this operation.
578*4882a593Smuzhiyun * @u_volt: Target voltage.
579*4882a593Smuzhiyun *
580*4882a593Smuzhiyun * Search for OPP with highest (ceil) frequency and has voltage <= u_volt.
581*4882a593Smuzhiyun *
582*4882a593Smuzhiyun * Return: matching *opp, else returns ERR_PTR in case of error which should be
583*4882a593Smuzhiyun * handled using IS_ERR.
584*4882a593Smuzhiyun *
585*4882a593Smuzhiyun * Error return values can be:
586*4882a593Smuzhiyun * EINVAL: bad parameters
587*4882a593Smuzhiyun *
588*4882a593Smuzhiyun * The callers are required to call dev_pm_opp_put() for the returned OPP after
589*4882a593Smuzhiyun * use.
590*4882a593Smuzhiyun */
dev_pm_opp_find_freq_ceil_by_volt(struct device * dev,unsigned long u_volt)591*4882a593Smuzhiyun struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
592*4882a593Smuzhiyun unsigned long u_volt)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun struct opp_table *opp_table;
595*4882a593Smuzhiyun struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (!dev || !u_volt) {
598*4882a593Smuzhiyun dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__,
599*4882a593Smuzhiyun u_volt);
600*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
604*4882a593Smuzhiyun if (IS_ERR(opp_table))
605*4882a593Smuzhiyun return ERR_CAST(opp_table);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
610*4882a593Smuzhiyun if (temp_opp->available) {
611*4882a593Smuzhiyun if (temp_opp->supplies[0].u_volt > u_volt)
612*4882a593Smuzhiyun break;
613*4882a593Smuzhiyun opp = temp_opp;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /* Increment the reference count of OPP */
618*4882a593Smuzhiyun if (!IS_ERR(opp))
619*4882a593Smuzhiyun dev_pm_opp_get(opp);
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
622*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun return opp;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
627*4882a593Smuzhiyun
_set_opp_voltage(struct device * dev,struct regulator * reg,struct dev_pm_opp_supply * supply)628*4882a593Smuzhiyun static int _set_opp_voltage(struct device *dev, struct regulator *reg,
629*4882a593Smuzhiyun struct dev_pm_opp_supply *supply)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun int ret;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun /* Regulator not available for device */
634*4882a593Smuzhiyun if (IS_ERR(reg)) {
635*4882a593Smuzhiyun dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
636*4882a593Smuzhiyun PTR_ERR(reg));
637*4882a593Smuzhiyun return 0;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
641*4882a593Smuzhiyun supply->u_volt_min, supply->u_volt, supply->u_volt_max);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
644*4882a593Smuzhiyun supply->u_volt, supply->u_volt_max);
645*4882a593Smuzhiyun if (ret)
646*4882a593Smuzhiyun dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
647*4882a593Smuzhiyun __func__, supply->u_volt_min, supply->u_volt,
648*4882a593Smuzhiyun supply->u_volt_max, ret);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun return ret;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
_generic_set_opp_clk_only(struct device * dev,struct clk * clk,unsigned long freq)653*4882a593Smuzhiyun static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
654*4882a593Smuzhiyun unsigned long freq)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun int ret;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun ret = clk_set_rate(clk, freq);
659*4882a593Smuzhiyun if (ret) {
660*4882a593Smuzhiyun dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
661*4882a593Smuzhiyun ret);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun return ret;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
_generic_set_opp_regulator(struct opp_table * opp_table,struct device * dev,unsigned long old_freq,unsigned long freq,struct dev_pm_opp_supply * old_supply,struct dev_pm_opp_supply * new_supply)667*4882a593Smuzhiyun static int _generic_set_opp_regulator(struct opp_table *opp_table,
668*4882a593Smuzhiyun struct device *dev,
669*4882a593Smuzhiyun unsigned long old_freq,
670*4882a593Smuzhiyun unsigned long freq,
671*4882a593Smuzhiyun struct dev_pm_opp_supply *old_supply,
672*4882a593Smuzhiyun struct dev_pm_opp_supply *new_supply)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun struct regulator *reg = opp_table->regulators[0];
675*4882a593Smuzhiyun int ret;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* This function only supports single regulator per device */
678*4882a593Smuzhiyun if (WARN_ON(opp_table->regulator_count > 1)) {
679*4882a593Smuzhiyun dev_err(dev, "multiple regulators are not supported\n");
680*4882a593Smuzhiyun return -EINVAL;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /* Scaling up? Scale voltage before frequency */
684*4882a593Smuzhiyun if (freq >= old_freq) {
685*4882a593Smuzhiyun ret = _set_opp_voltage(dev, reg, new_supply);
686*4882a593Smuzhiyun if (ret)
687*4882a593Smuzhiyun goto restore_voltage;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /* Change frequency */
691*4882a593Smuzhiyun ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
692*4882a593Smuzhiyun if (ret)
693*4882a593Smuzhiyun goto restore_voltage;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun /* Scaling down? Scale voltage after frequency */
696*4882a593Smuzhiyun if (freq < old_freq) {
697*4882a593Smuzhiyun ret = _set_opp_voltage(dev, reg, new_supply);
698*4882a593Smuzhiyun if (ret)
699*4882a593Smuzhiyun goto restore_freq;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /*
703*4882a593Smuzhiyun * Enable the regulator after setting its voltages, otherwise it breaks
704*4882a593Smuzhiyun * some boot-enabled regulators.
705*4882a593Smuzhiyun */
706*4882a593Smuzhiyun if (unlikely(!opp_table->enabled)) {
707*4882a593Smuzhiyun ret = regulator_enable(reg);
708*4882a593Smuzhiyun if (ret < 0)
709*4882a593Smuzhiyun dev_warn(dev, "Failed to enable regulator: %d", ret);
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun return 0;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun restore_freq:
715*4882a593Smuzhiyun if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq))
716*4882a593Smuzhiyun dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
717*4882a593Smuzhiyun __func__, old_freq);
718*4882a593Smuzhiyun restore_voltage:
719*4882a593Smuzhiyun /* This shouldn't harm even if the voltages weren't updated earlier */
720*4882a593Smuzhiyun if (old_supply)
721*4882a593Smuzhiyun _set_opp_voltage(dev, reg, old_supply);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun return ret;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
_set_opp_bw(const struct opp_table * opp_table,struct dev_pm_opp * opp,struct device * dev,bool remove)726*4882a593Smuzhiyun static int _set_opp_bw(const struct opp_table *opp_table,
727*4882a593Smuzhiyun struct dev_pm_opp *opp, struct device *dev, bool remove)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun u32 avg, peak;
730*4882a593Smuzhiyun int i, ret;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun if (!opp_table->paths)
733*4882a593Smuzhiyun return 0;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun for (i = 0; i < opp_table->path_count; i++) {
736*4882a593Smuzhiyun if (remove) {
737*4882a593Smuzhiyun avg = 0;
738*4882a593Smuzhiyun peak = 0;
739*4882a593Smuzhiyun } else {
740*4882a593Smuzhiyun avg = opp->bandwidth[i].avg;
741*4882a593Smuzhiyun peak = opp->bandwidth[i].peak;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun ret = icc_set_bw(opp_table->paths[i], avg, peak);
744*4882a593Smuzhiyun if (ret) {
745*4882a593Smuzhiyun dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
746*4882a593Smuzhiyun remove ? "remove" : "set", i, ret);
747*4882a593Smuzhiyun return ret;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun return 0;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
_set_opp_custom(const struct opp_table * opp_table,struct device * dev,unsigned long old_freq,unsigned long freq,struct dev_pm_opp_supply * old_supply,struct dev_pm_opp_supply * new_supply)754*4882a593Smuzhiyun static int _set_opp_custom(const struct opp_table *opp_table,
755*4882a593Smuzhiyun struct device *dev, unsigned long old_freq,
756*4882a593Smuzhiyun unsigned long freq,
757*4882a593Smuzhiyun struct dev_pm_opp_supply *old_supply,
758*4882a593Smuzhiyun struct dev_pm_opp_supply *new_supply)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun struct dev_pm_set_opp_data *data;
761*4882a593Smuzhiyun int size;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun data = opp_table->set_opp_data;
764*4882a593Smuzhiyun data->regulators = opp_table->regulators;
765*4882a593Smuzhiyun data->regulator_count = opp_table->regulator_count;
766*4882a593Smuzhiyun data->clk = opp_table->clk;
767*4882a593Smuzhiyun data->dev = dev;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun data->old_opp.rate = old_freq;
770*4882a593Smuzhiyun size = sizeof(*old_supply) * opp_table->regulator_count;
771*4882a593Smuzhiyun if (!old_supply)
772*4882a593Smuzhiyun memset(data->old_opp.supplies, 0, size);
773*4882a593Smuzhiyun else
774*4882a593Smuzhiyun memcpy(data->old_opp.supplies, old_supply, size);
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun data->new_opp.rate = freq;
777*4882a593Smuzhiyun memcpy(data->new_opp.supplies, new_supply, size);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun return opp_table->set_opp(data);
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
_set_required_opp(struct device * dev,struct device * pd_dev,struct dev_pm_opp * opp,int i)782*4882a593Smuzhiyun static int _set_required_opp(struct device *dev, struct device *pd_dev,
783*4882a593Smuzhiyun struct dev_pm_opp *opp, int i)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun unsigned int pstate = likely(opp) ? opp->required_opps[i]->pstate : 0;
786*4882a593Smuzhiyun int ret;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun if (!pd_dev)
789*4882a593Smuzhiyun return 0;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun ret = dev_pm_genpd_set_performance_state(pd_dev, pstate);
792*4882a593Smuzhiyun if (ret) {
793*4882a593Smuzhiyun dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n",
794*4882a593Smuzhiyun dev_name(pd_dev), pstate, ret);
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun return ret;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /* This is only called for PM domain for now */
_set_required_opps(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,bool up)801*4882a593Smuzhiyun static int _set_required_opps(struct device *dev,
802*4882a593Smuzhiyun struct opp_table *opp_table,
803*4882a593Smuzhiyun struct dev_pm_opp *opp, bool up)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun struct opp_table **required_opp_tables = opp_table->required_opp_tables;
806*4882a593Smuzhiyun struct device **genpd_virt_devs = opp_table->genpd_virt_devs;
807*4882a593Smuzhiyun int i, ret = 0;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun if (!required_opp_tables)
810*4882a593Smuzhiyun return 0;
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun /* Single genpd case */
813*4882a593Smuzhiyun if (!genpd_virt_devs)
814*4882a593Smuzhiyun return _set_required_opp(dev, dev, opp, 0);
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /* Multiple genpd case */
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun /*
819*4882a593Smuzhiyun * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev
820*4882a593Smuzhiyun * after it is freed from another thread.
821*4882a593Smuzhiyun */
822*4882a593Smuzhiyun mutex_lock(&opp_table->genpd_virt_dev_lock);
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun /* Scaling up? Set required OPPs in normal order, else reverse */
825*4882a593Smuzhiyun if (up) {
826*4882a593Smuzhiyun for (i = 0; i < opp_table->required_opp_count; i++) {
827*4882a593Smuzhiyun ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i);
828*4882a593Smuzhiyun if (ret)
829*4882a593Smuzhiyun break;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun } else {
832*4882a593Smuzhiyun for (i = opp_table->required_opp_count - 1; i >= 0; i--) {
833*4882a593Smuzhiyun ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i);
834*4882a593Smuzhiyun if (ret)
835*4882a593Smuzhiyun break;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun mutex_unlock(&opp_table->genpd_virt_dev_lock);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun return ret;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun /**
845*4882a593Smuzhiyun * dev_pm_opp_set_bw() - sets bandwidth levels corresponding to an opp
846*4882a593Smuzhiyun * @dev: device for which we do this operation
847*4882a593Smuzhiyun * @opp: opp based on which the bandwidth levels are to be configured
848*4882a593Smuzhiyun *
849*4882a593Smuzhiyun * This configures the bandwidth to the levels specified by the OPP. However
850*4882a593Smuzhiyun * if the OPP specified is NULL the bandwidth levels are cleared out.
851*4882a593Smuzhiyun *
852*4882a593Smuzhiyun * Return: 0 on success or a negative error value.
853*4882a593Smuzhiyun */
dev_pm_opp_set_bw(struct device * dev,struct dev_pm_opp * opp)854*4882a593Smuzhiyun int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun struct opp_table *opp_table;
857*4882a593Smuzhiyun int ret;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
860*4882a593Smuzhiyun if (IS_ERR(opp_table)) {
861*4882a593Smuzhiyun dev_err(dev, "%s: device opp table doesn't exist\n", __func__);
862*4882a593Smuzhiyun return PTR_ERR(opp_table);
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (opp)
866*4882a593Smuzhiyun ret = _set_opp_bw(opp_table, opp, dev, false);
867*4882a593Smuzhiyun else
868*4882a593Smuzhiyun ret = _set_opp_bw(opp_table, NULL, dev, true);
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
871*4882a593Smuzhiyun return ret;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_set_bw);
874*4882a593Smuzhiyun
_opp_set_rate_zero(struct device * dev,struct opp_table * opp_table)875*4882a593Smuzhiyun static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun int ret;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun if (!opp_table->enabled)
880*4882a593Smuzhiyun return 0;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun /*
883*4882a593Smuzhiyun * Some drivers need to support cases where some platforms may
884*4882a593Smuzhiyun * have OPP table for the device, while others don't and
885*4882a593Smuzhiyun * opp_set_rate() just needs to behave like clk_set_rate().
886*4882a593Smuzhiyun */
887*4882a593Smuzhiyun if (!_get_opp_count(opp_table))
888*4882a593Smuzhiyun return 0;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun ret = _set_opp_bw(opp_table, NULL, dev, true);
891*4882a593Smuzhiyun if (ret)
892*4882a593Smuzhiyun return ret;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun if (opp_table->regulators)
895*4882a593Smuzhiyun regulator_disable(opp_table->regulators[0]);
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun ret = _set_required_opps(dev, opp_table, NULL, false);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun opp_table->enabled = false;
900*4882a593Smuzhiyun return ret;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun /**
904*4882a593Smuzhiyun * dev_pm_opp_set_rate() - Configure new OPP based on frequency
905*4882a593Smuzhiyun * @dev: device for which we do this operation
906*4882a593Smuzhiyun * @target_freq: frequency to achieve
907*4882a593Smuzhiyun *
908*4882a593Smuzhiyun * This configures the power-supplies to the levels specified by the OPP
909*4882a593Smuzhiyun * corresponding to the target_freq, and programs the clock to a value <=
910*4882a593Smuzhiyun * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax
911*4882a593Smuzhiyun * provided by the opp, should have already rounded to the target OPP's
912*4882a593Smuzhiyun * frequency.
913*4882a593Smuzhiyun */
dev_pm_opp_set_rate(struct device * dev,unsigned long target_freq)914*4882a593Smuzhiyun int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
915*4882a593Smuzhiyun {
916*4882a593Smuzhiyun struct opp_table *opp_table;
917*4882a593Smuzhiyun unsigned long freq, old_freq, temp_freq;
918*4882a593Smuzhiyun struct dev_pm_opp *old_opp, *opp;
919*4882a593Smuzhiyun struct clk *clk;
920*4882a593Smuzhiyun int ret;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
923*4882a593Smuzhiyun if (IS_ERR(opp_table)) {
924*4882a593Smuzhiyun dev_err(dev, "%s: device opp doesn't exist\n", __func__);
925*4882a593Smuzhiyun return PTR_ERR(opp_table);
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun if (unlikely(!target_freq)) {
929*4882a593Smuzhiyun ret = _opp_set_rate_zero(dev, opp_table);
930*4882a593Smuzhiyun goto put_opp_table;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun clk = opp_table->clk;
934*4882a593Smuzhiyun if (IS_ERR(clk)) {
935*4882a593Smuzhiyun dev_err(dev, "%s: No clock available for the device\n",
936*4882a593Smuzhiyun __func__);
937*4882a593Smuzhiyun ret = PTR_ERR(clk);
938*4882a593Smuzhiyun goto put_opp_table;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun freq = clk_round_rate(clk, target_freq);
942*4882a593Smuzhiyun if ((long)freq <= 0)
943*4882a593Smuzhiyun freq = target_freq;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun old_freq = clk_get_rate(clk);
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun /* Return early if nothing to do */
948*4882a593Smuzhiyun if (opp_table->enabled && old_freq == freq) {
949*4882a593Smuzhiyun dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
950*4882a593Smuzhiyun __func__, freq);
951*4882a593Smuzhiyun ret = 0;
952*4882a593Smuzhiyun goto put_opp_table;
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun /*
956*4882a593Smuzhiyun * For IO devices which require an OPP on some platforms/SoCs
957*4882a593Smuzhiyun * while just needing to scale the clock on some others
958*4882a593Smuzhiyun * we look for empty OPP tables with just a clock handle and
959*4882a593Smuzhiyun * scale only the clk. This makes dev_pm_opp_set_rate()
960*4882a593Smuzhiyun * equivalent to a clk_set_rate()
961*4882a593Smuzhiyun */
962*4882a593Smuzhiyun if (!_get_opp_count(opp_table)) {
963*4882a593Smuzhiyun ret = _generic_set_opp_clk_only(dev, clk, freq);
964*4882a593Smuzhiyun goto put_opp_table;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun temp_freq = old_freq;
968*4882a593Smuzhiyun old_opp = _find_freq_ceil(opp_table, &temp_freq);
969*4882a593Smuzhiyun if (IS_ERR(old_opp)) {
970*4882a593Smuzhiyun dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
971*4882a593Smuzhiyun __func__, old_freq, PTR_ERR(old_opp));
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun temp_freq = freq;
975*4882a593Smuzhiyun opp = _find_freq_ceil(opp_table, &temp_freq);
976*4882a593Smuzhiyun if (IS_ERR(opp)) {
977*4882a593Smuzhiyun ret = PTR_ERR(opp);
978*4882a593Smuzhiyun dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
979*4882a593Smuzhiyun __func__, freq, ret);
980*4882a593Smuzhiyun goto put_old_opp;
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
984*4882a593Smuzhiyun old_freq, freq);
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /* Scaling up? Configure required OPPs before frequency */
987*4882a593Smuzhiyun if (freq >= old_freq) {
988*4882a593Smuzhiyun ret = _set_required_opps(dev, opp_table, opp, true);
989*4882a593Smuzhiyun if (ret)
990*4882a593Smuzhiyun goto put_opp;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun if (opp_table->set_opp) {
994*4882a593Smuzhiyun ret = _set_opp_custom(opp_table, dev, old_freq, freq,
995*4882a593Smuzhiyun IS_ERR(old_opp) ? NULL : old_opp->supplies,
996*4882a593Smuzhiyun opp->supplies);
997*4882a593Smuzhiyun } else if (opp_table->regulators) {
998*4882a593Smuzhiyun ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
999*4882a593Smuzhiyun IS_ERR(old_opp) ? NULL : old_opp->supplies,
1000*4882a593Smuzhiyun opp->supplies);
1001*4882a593Smuzhiyun } else {
1002*4882a593Smuzhiyun /* Only frequency scaling */
1003*4882a593Smuzhiyun ret = _generic_set_opp_clk_only(dev, clk, freq);
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun /* Scaling down? Configure required OPPs after frequency */
1007*4882a593Smuzhiyun if (!ret && freq < old_freq) {
1008*4882a593Smuzhiyun ret = _set_required_opps(dev, opp_table, opp, false);
1009*4882a593Smuzhiyun if (ret)
1010*4882a593Smuzhiyun dev_err(dev, "Failed to set required opps: %d\n", ret);
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun if (!ret) {
1014*4882a593Smuzhiyun ret = _set_opp_bw(opp_table, opp, dev, false);
1015*4882a593Smuzhiyun if (!ret)
1016*4882a593Smuzhiyun opp_table->enabled = true;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun put_opp:
1020*4882a593Smuzhiyun dev_pm_opp_put(opp);
1021*4882a593Smuzhiyun put_old_opp:
1022*4882a593Smuzhiyun if (!IS_ERR(old_opp))
1023*4882a593Smuzhiyun dev_pm_opp_put(old_opp);
1024*4882a593Smuzhiyun put_opp_table:
1025*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1026*4882a593Smuzhiyun return ret;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun /* OPP-dev Helpers */
_remove_opp_dev(struct opp_device * opp_dev,struct opp_table * opp_table)1031*4882a593Smuzhiyun static void _remove_opp_dev(struct opp_device *opp_dev,
1032*4882a593Smuzhiyun struct opp_table *opp_table)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun opp_debug_unregister(opp_dev, opp_table);
1035*4882a593Smuzhiyun list_del(&opp_dev->node);
1036*4882a593Smuzhiyun kfree(opp_dev);
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
_add_opp_dev_unlocked(const struct device * dev,struct opp_table * opp_table)1039*4882a593Smuzhiyun static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
1040*4882a593Smuzhiyun struct opp_table *opp_table)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun struct opp_device *opp_dev;
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
1045*4882a593Smuzhiyun if (!opp_dev)
1046*4882a593Smuzhiyun return NULL;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun /* Initialize opp-dev */
1049*4882a593Smuzhiyun opp_dev->dev = dev;
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun list_add(&opp_dev->node, &opp_table->dev_list);
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun /* Create debugfs entries for the opp_table */
1054*4882a593Smuzhiyun opp_debug_register(opp_dev, opp_table);
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun return opp_dev;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
_add_opp_dev(const struct device * dev,struct opp_table * opp_table)1059*4882a593Smuzhiyun struct opp_device *_add_opp_dev(const struct device *dev,
1060*4882a593Smuzhiyun struct opp_table *opp_table)
1061*4882a593Smuzhiyun {
1062*4882a593Smuzhiyun struct opp_device *opp_dev;
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
1065*4882a593Smuzhiyun opp_dev = _add_opp_dev_unlocked(dev, opp_table);
1066*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun return opp_dev;
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun
_allocate_opp_table(struct device * dev,int index)1071*4882a593Smuzhiyun static struct opp_table *_allocate_opp_table(struct device *dev, int index)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun struct opp_table *opp_table;
1074*4882a593Smuzhiyun struct opp_device *opp_dev;
1075*4882a593Smuzhiyun int ret;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun /*
1078*4882a593Smuzhiyun * Allocate a new OPP table. In the infrequent case where a new
1079*4882a593Smuzhiyun * device is needed to be added, we pay this penalty.
1080*4882a593Smuzhiyun */
1081*4882a593Smuzhiyun opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
1082*4882a593Smuzhiyun if (!opp_table)
1083*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun mutex_init(&opp_table->lock);
1086*4882a593Smuzhiyun mutex_init(&opp_table->genpd_virt_dev_lock);
1087*4882a593Smuzhiyun INIT_LIST_HEAD(&opp_table->dev_list);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun /* Mark regulator count uninitialized */
1090*4882a593Smuzhiyun opp_table->regulator_count = -1;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun opp_dev = _add_opp_dev(dev, opp_table);
1093*4882a593Smuzhiyun if (!opp_dev) {
1094*4882a593Smuzhiyun ret = -ENOMEM;
1095*4882a593Smuzhiyun goto err;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun _of_init_opp_table(opp_table, dev, index);
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun /* Find clk for the device */
1101*4882a593Smuzhiyun opp_table->clk = clk_get(dev, NULL);
1102*4882a593Smuzhiyun if (IS_ERR(opp_table->clk)) {
1103*4882a593Smuzhiyun ret = PTR_ERR(opp_table->clk);
1104*4882a593Smuzhiyun if (ret == -EPROBE_DEFER)
1105*4882a593Smuzhiyun goto remove_opp_dev;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun /* Find interconnect path(s) for the device */
1111*4882a593Smuzhiyun ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
1112*4882a593Smuzhiyun if (ret) {
1113*4882a593Smuzhiyun if (ret == -EPROBE_DEFER)
1114*4882a593Smuzhiyun goto put_clk;
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
1117*4882a593Smuzhiyun __func__, ret);
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
1121*4882a593Smuzhiyun INIT_LIST_HEAD(&opp_table->opp_list);
1122*4882a593Smuzhiyun kref_init(&opp_table->kref);
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /* Secure the device table modification */
1125*4882a593Smuzhiyun list_add(&opp_table->node, &opp_tables);
1126*4882a593Smuzhiyun return opp_table;
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun put_clk:
1129*4882a593Smuzhiyun if (!IS_ERR(opp_table->clk))
1130*4882a593Smuzhiyun clk_put(opp_table->clk);
1131*4882a593Smuzhiyun remove_opp_dev:
1132*4882a593Smuzhiyun _remove_opp_dev(opp_dev, opp_table);
1133*4882a593Smuzhiyun err:
1134*4882a593Smuzhiyun kfree(opp_table);
1135*4882a593Smuzhiyun return ERR_PTR(ret);
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun
_get_opp_table_kref(struct opp_table * opp_table)1138*4882a593Smuzhiyun void _get_opp_table_kref(struct opp_table *opp_table)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun kref_get(&opp_table->kref);
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun
_opp_get_opp_table(struct device * dev,int index)1143*4882a593Smuzhiyun static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun struct opp_table *opp_table;
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun /* Hold our table modification lock here */
1148*4882a593Smuzhiyun mutex_lock(&opp_table_lock);
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun opp_table = _find_opp_table_unlocked(dev);
1151*4882a593Smuzhiyun if (!IS_ERR(opp_table))
1152*4882a593Smuzhiyun goto unlock;
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun opp_table = _managed_opp(dev, index);
1155*4882a593Smuzhiyun if (opp_table) {
1156*4882a593Smuzhiyun if (!_add_opp_dev_unlocked(dev, opp_table)) {
1157*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1158*4882a593Smuzhiyun opp_table = ERR_PTR(-ENOMEM);
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun goto unlock;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun opp_table = _allocate_opp_table(dev, index);
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun unlock:
1166*4882a593Smuzhiyun mutex_unlock(&opp_table_lock);
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun return opp_table;
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun
dev_pm_opp_get_opp_table(struct device * dev)1171*4882a593Smuzhiyun struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
1172*4882a593Smuzhiyun {
1173*4882a593Smuzhiyun return _opp_get_opp_table(dev, 0);
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
1176*4882a593Smuzhiyun
dev_pm_opp_get_opp_table_indexed(struct device * dev,int index)1177*4882a593Smuzhiyun struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev,
1178*4882a593Smuzhiyun int index)
1179*4882a593Smuzhiyun {
1180*4882a593Smuzhiyun return _opp_get_opp_table(dev, index);
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun
_opp_table_kref_release(struct kref * kref)1183*4882a593Smuzhiyun static void _opp_table_kref_release(struct kref *kref)
1184*4882a593Smuzhiyun {
1185*4882a593Smuzhiyun struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
1186*4882a593Smuzhiyun struct opp_device *opp_dev, *temp;
1187*4882a593Smuzhiyun int i;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun /* Drop the lock as soon as we can */
1190*4882a593Smuzhiyun list_del(&opp_table->node);
1191*4882a593Smuzhiyun mutex_unlock(&opp_table_lock);
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun _of_clear_opp_table(opp_table);
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun /* Release clk */
1196*4882a593Smuzhiyun if (!IS_ERR(opp_table->clk))
1197*4882a593Smuzhiyun clk_put(opp_table->clk);
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun if (opp_table->paths) {
1200*4882a593Smuzhiyun for (i = 0; i < opp_table->path_count; i++)
1201*4882a593Smuzhiyun icc_put(opp_table->paths[i]);
1202*4882a593Smuzhiyun kfree(opp_table->paths);
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun WARN_ON(!list_empty(&opp_table->opp_list));
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) {
1208*4882a593Smuzhiyun /*
1209*4882a593Smuzhiyun * The OPP table is getting removed, drop the performance state
1210*4882a593Smuzhiyun * constraints.
1211*4882a593Smuzhiyun */
1212*4882a593Smuzhiyun if (opp_table->genpd_performance_state)
1213*4882a593Smuzhiyun dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0);
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun _remove_opp_dev(opp_dev, opp_table);
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun mutex_destroy(&opp_table->genpd_virt_dev_lock);
1219*4882a593Smuzhiyun mutex_destroy(&opp_table->lock);
1220*4882a593Smuzhiyun kfree(opp_table);
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun
dev_pm_opp_put_opp_table(struct opp_table * opp_table)1223*4882a593Smuzhiyun void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
1224*4882a593Smuzhiyun {
1225*4882a593Smuzhiyun kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
1226*4882a593Smuzhiyun &opp_table_lock);
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);
1229*4882a593Smuzhiyun
_opp_free(struct dev_pm_opp * opp)1230*4882a593Smuzhiyun void _opp_free(struct dev_pm_opp *opp)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun kfree(opp);
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun
_opp_kref_release(struct dev_pm_opp * opp,struct opp_table * opp_table)1235*4882a593Smuzhiyun static void _opp_kref_release(struct dev_pm_opp *opp,
1236*4882a593Smuzhiyun struct opp_table *opp_table)
1237*4882a593Smuzhiyun {
1238*4882a593Smuzhiyun /*
1239*4882a593Smuzhiyun * Notify the changes in the availability of the operable
1240*4882a593Smuzhiyun * frequency/voltage list.
1241*4882a593Smuzhiyun */
1242*4882a593Smuzhiyun blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
1243*4882a593Smuzhiyun _of_opp_free_required_opps(opp_table, opp);
1244*4882a593Smuzhiyun opp_debug_remove_one(opp);
1245*4882a593Smuzhiyun list_del(&opp->node);
1246*4882a593Smuzhiyun kfree(opp);
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun
_opp_kref_release_unlocked(struct kref * kref)1249*4882a593Smuzhiyun static void _opp_kref_release_unlocked(struct kref *kref)
1250*4882a593Smuzhiyun {
1251*4882a593Smuzhiyun struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1252*4882a593Smuzhiyun struct opp_table *opp_table = opp->opp_table;
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun _opp_kref_release(opp, opp_table);
1255*4882a593Smuzhiyun }
1256*4882a593Smuzhiyun
_opp_kref_release_locked(struct kref * kref)1257*4882a593Smuzhiyun static void _opp_kref_release_locked(struct kref *kref)
1258*4882a593Smuzhiyun {
1259*4882a593Smuzhiyun struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1260*4882a593Smuzhiyun struct opp_table *opp_table = opp->opp_table;
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun _opp_kref_release(opp, opp_table);
1263*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun
dev_pm_opp_get(struct dev_pm_opp * opp)1266*4882a593Smuzhiyun void dev_pm_opp_get(struct dev_pm_opp *opp)
1267*4882a593Smuzhiyun {
1268*4882a593Smuzhiyun kref_get(&opp->kref);
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun
dev_pm_opp_put(struct dev_pm_opp * opp)1271*4882a593Smuzhiyun void dev_pm_opp_put(struct dev_pm_opp *opp)
1272*4882a593Smuzhiyun {
1273*4882a593Smuzhiyun kref_put_mutex(&opp->kref, _opp_kref_release_locked,
1274*4882a593Smuzhiyun &opp->opp_table->lock);
1275*4882a593Smuzhiyun }
1276*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_put);
1277*4882a593Smuzhiyun
dev_pm_opp_put_unlocked(struct dev_pm_opp * opp)1278*4882a593Smuzhiyun static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun kref_put(&opp->kref, _opp_kref_release_unlocked);
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun /**
1284*4882a593Smuzhiyun * dev_pm_opp_remove() - Remove an OPP from OPP table
1285*4882a593Smuzhiyun * @dev: device for which we do this operation
1286*4882a593Smuzhiyun * @freq: OPP to remove with matching 'freq'
1287*4882a593Smuzhiyun *
1288*4882a593Smuzhiyun * This function removes an opp from the opp table.
1289*4882a593Smuzhiyun */
dev_pm_opp_remove(struct device * dev,unsigned long freq)1290*4882a593Smuzhiyun void dev_pm_opp_remove(struct device *dev, unsigned long freq)
1291*4882a593Smuzhiyun {
1292*4882a593Smuzhiyun struct dev_pm_opp *opp;
1293*4882a593Smuzhiyun struct opp_table *opp_table;
1294*4882a593Smuzhiyun bool found = false;
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
1297*4882a593Smuzhiyun if (IS_ERR(opp_table))
1298*4882a593Smuzhiyun return;
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun list_for_each_entry(opp, &opp_table->opp_list, node) {
1303*4882a593Smuzhiyun if (opp->rate == freq) {
1304*4882a593Smuzhiyun found = true;
1305*4882a593Smuzhiyun break;
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun if (found) {
1312*4882a593Smuzhiyun dev_pm_opp_put(opp);
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun /* Drop the reference taken by dev_pm_opp_add() */
1315*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1316*4882a593Smuzhiyun } else {
1317*4882a593Smuzhiyun dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
1318*4882a593Smuzhiyun __func__, freq);
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun /* Drop the reference taken by _find_opp_table() */
1322*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
1325*4882a593Smuzhiyun
_opp_remove_all_static(struct opp_table * opp_table)1326*4882a593Smuzhiyun bool _opp_remove_all_static(struct opp_table *opp_table)
1327*4882a593Smuzhiyun {
1328*4882a593Smuzhiyun struct dev_pm_opp *opp, *tmp;
1329*4882a593Smuzhiyun bool ret = true;
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun if (!opp_table->parsed_static_opps) {
1334*4882a593Smuzhiyun ret = false;
1335*4882a593Smuzhiyun goto unlock;
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun if (--opp_table->parsed_static_opps)
1339*4882a593Smuzhiyun goto unlock;
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
1342*4882a593Smuzhiyun if (!opp->dynamic)
1343*4882a593Smuzhiyun dev_pm_opp_put_unlocked(opp);
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun unlock:
1347*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun return ret;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun /**
1353*4882a593Smuzhiyun * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
1354*4882a593Smuzhiyun * @dev: device for which we do this operation
1355*4882a593Smuzhiyun *
1356*4882a593Smuzhiyun * This function removes all dynamically created OPPs from the opp table.
1357*4882a593Smuzhiyun */
dev_pm_opp_remove_all_dynamic(struct device * dev)1358*4882a593Smuzhiyun void dev_pm_opp_remove_all_dynamic(struct device *dev)
1359*4882a593Smuzhiyun {
1360*4882a593Smuzhiyun struct opp_table *opp_table;
1361*4882a593Smuzhiyun struct dev_pm_opp *opp, *temp;
1362*4882a593Smuzhiyun int count = 0;
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
1365*4882a593Smuzhiyun if (IS_ERR(opp_table))
1366*4882a593Smuzhiyun return;
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
1369*4882a593Smuzhiyun list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
1370*4882a593Smuzhiyun if (opp->dynamic) {
1371*4882a593Smuzhiyun dev_pm_opp_put_unlocked(opp);
1372*4882a593Smuzhiyun count++;
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun /* Drop the references taken by dev_pm_opp_add() */
1378*4882a593Smuzhiyun while (count--)
1379*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun /* Drop the reference taken by _find_opp_table() */
1382*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
1385*4882a593Smuzhiyun
_opp_allocate(struct opp_table * table)1386*4882a593Smuzhiyun struct dev_pm_opp *_opp_allocate(struct opp_table *table)
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun struct dev_pm_opp *opp;
1389*4882a593Smuzhiyun int supply_count, supply_size, icc_size;
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun /* Allocate space for at least one supply */
1392*4882a593Smuzhiyun supply_count = table->regulator_count > 0 ? table->regulator_count : 1;
1393*4882a593Smuzhiyun supply_size = sizeof(*opp->supplies) * supply_count;
1394*4882a593Smuzhiyun icc_size = sizeof(*opp->bandwidth) * table->path_count;
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun /* allocate new OPP node and supplies structures */
1397*4882a593Smuzhiyun opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL);
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun if (!opp)
1400*4882a593Smuzhiyun return NULL;
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun /* Put the supplies at the end of the OPP structure as an empty array */
1403*4882a593Smuzhiyun opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
1404*4882a593Smuzhiyun if (icc_size)
1405*4882a593Smuzhiyun opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count);
1406*4882a593Smuzhiyun INIT_LIST_HEAD(&opp->node);
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun return opp;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
_opp_supported_by_regulators(struct dev_pm_opp * opp,struct opp_table * opp_table)1411*4882a593Smuzhiyun static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
1412*4882a593Smuzhiyun struct opp_table *opp_table)
1413*4882a593Smuzhiyun {
1414*4882a593Smuzhiyun struct regulator *reg;
1415*4882a593Smuzhiyun int i;
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun if (!opp_table->regulators)
1418*4882a593Smuzhiyun return true;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun for (i = 0; i < opp_table->regulator_count; i++) {
1421*4882a593Smuzhiyun reg = opp_table->regulators[i];
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun if (!regulator_is_supported_voltage(reg,
1424*4882a593Smuzhiyun opp->supplies[i].u_volt_min,
1425*4882a593Smuzhiyun opp->supplies[i].u_volt_max)) {
1426*4882a593Smuzhiyun pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
1427*4882a593Smuzhiyun __func__, opp->supplies[i].u_volt_min,
1428*4882a593Smuzhiyun opp->supplies[i].u_volt_max);
1429*4882a593Smuzhiyun return false;
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun return true;
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun
_opp_compare_key(struct dev_pm_opp * opp1,struct dev_pm_opp * opp2)1436*4882a593Smuzhiyun int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
1437*4882a593Smuzhiyun {
1438*4882a593Smuzhiyun if (opp1->rate != opp2->rate)
1439*4882a593Smuzhiyun return opp1->rate < opp2->rate ? -1 : 1;
1440*4882a593Smuzhiyun if (opp1->bandwidth && opp2->bandwidth &&
1441*4882a593Smuzhiyun opp1->bandwidth[0].peak != opp2->bandwidth[0].peak)
1442*4882a593Smuzhiyun return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1;
1443*4882a593Smuzhiyun if (opp1->level != opp2->level)
1444*4882a593Smuzhiyun return opp1->level < opp2->level ? -1 : 1;
1445*4882a593Smuzhiyun return 0;
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
_opp_is_duplicate(struct device * dev,struct dev_pm_opp * new_opp,struct opp_table * opp_table,struct list_head ** head)1448*4882a593Smuzhiyun static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
1449*4882a593Smuzhiyun struct opp_table *opp_table,
1450*4882a593Smuzhiyun struct list_head **head)
1451*4882a593Smuzhiyun {
1452*4882a593Smuzhiyun struct dev_pm_opp *opp;
1453*4882a593Smuzhiyun int opp_cmp;
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun /*
1456*4882a593Smuzhiyun * Insert new OPP in order of increasing frequency and discard if
1457*4882a593Smuzhiyun * already present.
1458*4882a593Smuzhiyun *
1459*4882a593Smuzhiyun * Need to use &opp_table->opp_list in the condition part of the 'for'
1460*4882a593Smuzhiyun * loop, don't replace it with head otherwise it will become an infinite
1461*4882a593Smuzhiyun * loop.
1462*4882a593Smuzhiyun */
1463*4882a593Smuzhiyun list_for_each_entry(opp, &opp_table->opp_list, node) {
1464*4882a593Smuzhiyun opp_cmp = _opp_compare_key(new_opp, opp);
1465*4882a593Smuzhiyun if (opp_cmp > 0) {
1466*4882a593Smuzhiyun *head = &opp->node;
1467*4882a593Smuzhiyun continue;
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun if (opp_cmp < 0)
1471*4882a593Smuzhiyun return 0;
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun /* Duplicate OPPs */
1474*4882a593Smuzhiyun dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1475*4882a593Smuzhiyun __func__, opp->rate, opp->supplies[0].u_volt,
1476*4882a593Smuzhiyun opp->available, new_opp->rate,
1477*4882a593Smuzhiyun new_opp->supplies[0].u_volt, new_opp->available);
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun /* Should we compare voltages for all regulators here ? */
1480*4882a593Smuzhiyun return opp->available &&
1481*4882a593Smuzhiyun new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun return 0;
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun
1487*4882a593Smuzhiyun /*
1488*4882a593Smuzhiyun * Returns:
1489*4882a593Smuzhiyun * 0: On success. And appropriate error message for duplicate OPPs.
1490*4882a593Smuzhiyun * -EBUSY: For OPP with same freq/volt and is available. The callers of
1491*4882a593Smuzhiyun * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
1492*4882a593Smuzhiyun * sure we don't print error messages unnecessarily if different parts of
1493*4882a593Smuzhiyun * kernel try to initialize the OPP table.
1494*4882a593Smuzhiyun * -EEXIST: For OPP with same freq but different volt or is unavailable. This
1495*4882a593Smuzhiyun * should be considered an error by the callers of _opp_add().
1496*4882a593Smuzhiyun */
_opp_add(struct device * dev,struct dev_pm_opp * new_opp,struct opp_table * opp_table,bool rate_not_available)1497*4882a593Smuzhiyun int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
1498*4882a593Smuzhiyun struct opp_table *opp_table, bool rate_not_available)
1499*4882a593Smuzhiyun {
1500*4882a593Smuzhiyun struct list_head *head;
1501*4882a593Smuzhiyun int ret;
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
1504*4882a593Smuzhiyun head = &opp_table->opp_list;
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun if (likely(!rate_not_available)) {
1507*4882a593Smuzhiyun ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
1508*4882a593Smuzhiyun if (ret) {
1509*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
1510*4882a593Smuzhiyun return ret;
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun }
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun list_add(&new_opp->node, head);
1515*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun new_opp->opp_table = opp_table;
1518*4882a593Smuzhiyun kref_init(&new_opp->kref);
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun opp_debug_create_one(new_opp, opp_table);
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1523*4882a593Smuzhiyun new_opp->available = false;
1524*4882a593Smuzhiyun dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1525*4882a593Smuzhiyun __func__, new_opp->rate);
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun return 0;
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun /**
1532*4882a593Smuzhiyun * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1533*4882a593Smuzhiyun * @opp_table: OPP table
1534*4882a593Smuzhiyun * @dev: device for which we do this operation
1535*4882a593Smuzhiyun * @freq: Frequency in Hz for this OPP
1536*4882a593Smuzhiyun * @u_volt: Voltage in uVolts for this OPP
1537*4882a593Smuzhiyun * @dynamic: Dynamically added OPPs.
1538*4882a593Smuzhiyun *
1539*4882a593Smuzhiyun * This function adds an opp definition to the opp table and returns status.
1540*4882a593Smuzhiyun * The opp is made available by default and it can be controlled using
1541*4882a593Smuzhiyun * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1542*4882a593Smuzhiyun *
1543*4882a593Smuzhiyun * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1544*4882a593Smuzhiyun * and freed by dev_pm_opp_of_remove_table.
1545*4882a593Smuzhiyun *
1546*4882a593Smuzhiyun * Return:
1547*4882a593Smuzhiyun * 0 On success OR
1548*4882a593Smuzhiyun * Duplicate OPPs (both freq and volt are same) and opp->available
1549*4882a593Smuzhiyun * -EEXIST Freq are same and volt are different OR
1550*4882a593Smuzhiyun * Duplicate OPPs (both freq and volt are same) and !opp->available
1551*4882a593Smuzhiyun * -ENOMEM Memory allocation failure
1552*4882a593Smuzhiyun */
_opp_add_v1(struct opp_table * opp_table,struct device * dev,unsigned long freq,long u_volt,bool dynamic)1553*4882a593Smuzhiyun int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
1554*4882a593Smuzhiyun unsigned long freq, long u_volt, bool dynamic)
1555*4882a593Smuzhiyun {
1556*4882a593Smuzhiyun struct dev_pm_opp *new_opp;
1557*4882a593Smuzhiyun unsigned long tol;
1558*4882a593Smuzhiyun int ret;
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun new_opp = _opp_allocate(opp_table);
1561*4882a593Smuzhiyun if (!new_opp)
1562*4882a593Smuzhiyun return -ENOMEM;
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun /* populate the opp table */
1565*4882a593Smuzhiyun new_opp->rate = freq;
1566*4882a593Smuzhiyun tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
1567*4882a593Smuzhiyun new_opp->supplies[0].u_volt = u_volt;
1568*4882a593Smuzhiyun new_opp->supplies[0].u_volt_min = u_volt - tol;
1569*4882a593Smuzhiyun new_opp->supplies[0].u_volt_max = u_volt + tol;
1570*4882a593Smuzhiyun new_opp->available = true;
1571*4882a593Smuzhiyun new_opp->dynamic = dynamic;
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun ret = _opp_add(dev, new_opp, opp_table, false);
1574*4882a593Smuzhiyun if (ret) {
1575*4882a593Smuzhiyun /* Don't return error for duplicate OPPs */
1576*4882a593Smuzhiyun if (ret == -EBUSY)
1577*4882a593Smuzhiyun ret = 0;
1578*4882a593Smuzhiyun goto free_opp;
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun /*
1582*4882a593Smuzhiyun * Notify the changes in the availability of the operable
1583*4882a593Smuzhiyun * frequency/voltage list.
1584*4882a593Smuzhiyun */
1585*4882a593Smuzhiyun blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
1586*4882a593Smuzhiyun return 0;
1587*4882a593Smuzhiyun
1588*4882a593Smuzhiyun free_opp:
1589*4882a593Smuzhiyun _opp_free(new_opp);
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun return ret;
1592*4882a593Smuzhiyun }
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun /**
1595*4882a593Smuzhiyun * dev_pm_opp_set_supported_hw() - Set supported platforms
1596*4882a593Smuzhiyun * @dev: Device for which supported-hw has to be set.
1597*4882a593Smuzhiyun * @versions: Array of hierarchy of versions to match.
1598*4882a593Smuzhiyun * @count: Number of elements in the array.
1599*4882a593Smuzhiyun *
1600*4882a593Smuzhiyun * This is required only for the V2 bindings, and it enables a platform to
1601*4882a593Smuzhiyun * specify the hierarchy of versions it supports. OPP layer will then enable
1602*4882a593Smuzhiyun * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1603*4882a593Smuzhiyun * property.
1604*4882a593Smuzhiyun */
dev_pm_opp_set_supported_hw(struct device * dev,const u32 * versions,unsigned int count)1605*4882a593Smuzhiyun struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
1606*4882a593Smuzhiyun const u32 *versions, unsigned int count)
1607*4882a593Smuzhiyun {
1608*4882a593Smuzhiyun struct opp_table *opp_table;
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun opp_table = dev_pm_opp_get_opp_table(dev);
1611*4882a593Smuzhiyun if (IS_ERR(opp_table))
1612*4882a593Smuzhiyun return opp_table;
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun /* Make sure there are no concurrent readers while updating opp_table */
1615*4882a593Smuzhiyun WARN_ON(!list_empty(&opp_table->opp_list));
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun /* Another CPU that shares the OPP table has set the property ? */
1618*4882a593Smuzhiyun if (opp_table->supported_hw)
1619*4882a593Smuzhiyun return opp_table;
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
1622*4882a593Smuzhiyun GFP_KERNEL);
1623*4882a593Smuzhiyun if (!opp_table->supported_hw) {
1624*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1625*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun opp_table->supported_hw_count = count;
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun return opp_table;
1631*4882a593Smuzhiyun }
1632*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun /**
1635*4882a593Smuzhiyun * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1636*4882a593Smuzhiyun * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
1637*4882a593Smuzhiyun *
1638*4882a593Smuzhiyun * This is required only for the V2 bindings, and is called for a matching
1639*4882a593Smuzhiyun * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
1640*4882a593Smuzhiyun * will not be freed.
1641*4882a593Smuzhiyun */
dev_pm_opp_put_supported_hw(struct opp_table * opp_table)1642*4882a593Smuzhiyun void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
1643*4882a593Smuzhiyun {
1644*4882a593Smuzhiyun /* Make sure there are no concurrent readers while updating opp_table */
1645*4882a593Smuzhiyun WARN_ON(!list_empty(&opp_table->opp_list));
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun kfree(opp_table->supported_hw);
1648*4882a593Smuzhiyun opp_table->supported_hw = NULL;
1649*4882a593Smuzhiyun opp_table->supported_hw_count = 0;
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun /**
1656*4882a593Smuzhiyun * dev_pm_opp_set_prop_name() - Set prop-extn name
1657*4882a593Smuzhiyun * @dev: Device for which the prop-name has to be set.
1658*4882a593Smuzhiyun * @name: name to postfix to properties.
1659*4882a593Smuzhiyun *
1660*4882a593Smuzhiyun * This is required only for the V2 bindings, and it enables a platform to
1661*4882a593Smuzhiyun * specify the extn to be used for certain property names. The properties to
1662*4882a593Smuzhiyun * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1663*4882a593Smuzhiyun * should postfix the property name with -<name> while looking for them.
1664*4882a593Smuzhiyun */
dev_pm_opp_set_prop_name(struct device * dev,const char * name)1665*4882a593Smuzhiyun struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1666*4882a593Smuzhiyun {
1667*4882a593Smuzhiyun struct opp_table *opp_table;
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun opp_table = dev_pm_opp_get_opp_table(dev);
1670*4882a593Smuzhiyun if (IS_ERR(opp_table))
1671*4882a593Smuzhiyun return opp_table;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun /* Make sure there are no concurrent readers while updating opp_table */
1674*4882a593Smuzhiyun WARN_ON(!list_empty(&opp_table->opp_list));
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun /* Another CPU that shares the OPP table has set the property ? */
1677*4882a593Smuzhiyun if (opp_table->prop_name)
1678*4882a593Smuzhiyun return opp_table;
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun opp_table->prop_name = kstrdup(name, GFP_KERNEL);
1681*4882a593Smuzhiyun if (!opp_table->prop_name) {
1682*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1683*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1684*4882a593Smuzhiyun }
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun return opp_table;
1687*4882a593Smuzhiyun }
1688*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun /**
1691*4882a593Smuzhiyun * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1692*4882a593Smuzhiyun * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
1693*4882a593Smuzhiyun *
1694*4882a593Smuzhiyun * This is required only for the V2 bindings, and is called for a matching
1695*4882a593Smuzhiyun * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
1696*4882a593Smuzhiyun * will not be freed.
1697*4882a593Smuzhiyun */
dev_pm_opp_put_prop_name(struct opp_table * opp_table)1698*4882a593Smuzhiyun void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
1699*4882a593Smuzhiyun {
1700*4882a593Smuzhiyun /* Make sure there are no concurrent readers while updating opp_table */
1701*4882a593Smuzhiyun WARN_ON(!list_empty(&opp_table->opp_list));
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun kfree(opp_table->prop_name);
1704*4882a593Smuzhiyun opp_table->prop_name = NULL;
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1709*4882a593Smuzhiyun
_allocate_set_opp_data(struct opp_table * opp_table)1710*4882a593Smuzhiyun static int _allocate_set_opp_data(struct opp_table *opp_table)
1711*4882a593Smuzhiyun {
1712*4882a593Smuzhiyun struct dev_pm_set_opp_data *data;
1713*4882a593Smuzhiyun int len, count = opp_table->regulator_count;
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun if (WARN_ON(!opp_table->regulators))
1716*4882a593Smuzhiyun return -EINVAL;
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun /* space for set_opp_data */
1719*4882a593Smuzhiyun len = sizeof(*data);
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun /* space for old_opp.supplies and new_opp.supplies */
1722*4882a593Smuzhiyun len += 2 * sizeof(struct dev_pm_opp_supply) * count;
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun data = kzalloc(len, GFP_KERNEL);
1725*4882a593Smuzhiyun if (!data)
1726*4882a593Smuzhiyun return -ENOMEM;
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun data->old_opp.supplies = (void *)(data + 1);
1729*4882a593Smuzhiyun data->new_opp.supplies = data->old_opp.supplies + count;
1730*4882a593Smuzhiyun
1731*4882a593Smuzhiyun opp_table->set_opp_data = data;
1732*4882a593Smuzhiyun
1733*4882a593Smuzhiyun return 0;
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun
_free_set_opp_data(struct opp_table * opp_table)1736*4882a593Smuzhiyun static void _free_set_opp_data(struct opp_table *opp_table)
1737*4882a593Smuzhiyun {
1738*4882a593Smuzhiyun kfree(opp_table->set_opp_data);
1739*4882a593Smuzhiyun opp_table->set_opp_data = NULL;
1740*4882a593Smuzhiyun }
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun /**
1743*4882a593Smuzhiyun * dev_pm_opp_set_regulators() - Set regulator names for the device
1744*4882a593Smuzhiyun * @dev: Device for which regulator name is being set.
1745*4882a593Smuzhiyun * @names: Array of pointers to the names of the regulator.
1746*4882a593Smuzhiyun * @count: Number of regulators.
1747*4882a593Smuzhiyun *
1748*4882a593Smuzhiyun * In order to support OPP switching, OPP layer needs to know the name of the
1749*4882a593Smuzhiyun * device's regulators, as the core would be required to switch voltages as
1750*4882a593Smuzhiyun * well.
1751*4882a593Smuzhiyun *
1752*4882a593Smuzhiyun * This must be called before any OPPs are initialized for the device.
1753*4882a593Smuzhiyun */
dev_pm_opp_set_regulators(struct device * dev,const char * const names[],unsigned int count)1754*4882a593Smuzhiyun struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
1755*4882a593Smuzhiyun const char * const names[],
1756*4882a593Smuzhiyun unsigned int count)
1757*4882a593Smuzhiyun {
1758*4882a593Smuzhiyun struct opp_table *opp_table;
1759*4882a593Smuzhiyun struct regulator *reg;
1760*4882a593Smuzhiyun int ret, i;
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun opp_table = dev_pm_opp_get_opp_table(dev);
1763*4882a593Smuzhiyun if (IS_ERR(opp_table))
1764*4882a593Smuzhiyun return opp_table;
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun /* This should be called before OPPs are initialized */
1767*4882a593Smuzhiyun if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1768*4882a593Smuzhiyun ret = -EBUSY;
1769*4882a593Smuzhiyun goto err;
1770*4882a593Smuzhiyun }
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun /* Another CPU that shares the OPP table has set the regulators ? */
1773*4882a593Smuzhiyun if (opp_table->regulators)
1774*4882a593Smuzhiyun return opp_table;
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun opp_table->regulators = kmalloc_array(count,
1777*4882a593Smuzhiyun sizeof(*opp_table->regulators),
1778*4882a593Smuzhiyun GFP_KERNEL);
1779*4882a593Smuzhiyun if (!opp_table->regulators) {
1780*4882a593Smuzhiyun ret = -ENOMEM;
1781*4882a593Smuzhiyun goto err;
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun for (i = 0; i < count; i++) {
1785*4882a593Smuzhiyun reg = regulator_get_optional(dev, names[i]);
1786*4882a593Smuzhiyun if (IS_ERR(reg)) {
1787*4882a593Smuzhiyun ret = PTR_ERR(reg);
1788*4882a593Smuzhiyun if (ret != -EPROBE_DEFER)
1789*4882a593Smuzhiyun dev_err(dev, "%s: no regulator (%s) found: %d\n",
1790*4882a593Smuzhiyun __func__, names[i], ret);
1791*4882a593Smuzhiyun goto free_regulators;
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun opp_table->regulators[i] = reg;
1795*4882a593Smuzhiyun }
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun opp_table->regulator_count = count;
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun /* Allocate block only once to pass to set_opp() routines */
1800*4882a593Smuzhiyun ret = _allocate_set_opp_data(opp_table);
1801*4882a593Smuzhiyun if (ret)
1802*4882a593Smuzhiyun goto free_regulators;
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun return opp_table;
1805*4882a593Smuzhiyun
1806*4882a593Smuzhiyun free_regulators:
1807*4882a593Smuzhiyun while (i != 0)
1808*4882a593Smuzhiyun regulator_put(opp_table->regulators[--i]);
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun kfree(opp_table->regulators);
1811*4882a593Smuzhiyun opp_table->regulators = NULL;
1812*4882a593Smuzhiyun opp_table->regulator_count = -1;
1813*4882a593Smuzhiyun err:
1814*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun return ERR_PTR(ret);
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
1819*4882a593Smuzhiyun
1820*4882a593Smuzhiyun /**
1821*4882a593Smuzhiyun * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
1822*4882a593Smuzhiyun * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
1823*4882a593Smuzhiyun */
dev_pm_opp_put_regulators(struct opp_table * opp_table)1824*4882a593Smuzhiyun void dev_pm_opp_put_regulators(struct opp_table *opp_table)
1825*4882a593Smuzhiyun {
1826*4882a593Smuzhiyun int i;
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun if (!opp_table->regulators)
1829*4882a593Smuzhiyun goto put_opp_table;
1830*4882a593Smuzhiyun
1831*4882a593Smuzhiyun /* Make sure there are no concurrent readers while updating opp_table */
1832*4882a593Smuzhiyun WARN_ON(!list_empty(&opp_table->opp_list));
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun if (opp_table->enabled) {
1835*4882a593Smuzhiyun for (i = opp_table->regulator_count - 1; i >= 0; i--)
1836*4882a593Smuzhiyun regulator_disable(opp_table->regulators[i]);
1837*4882a593Smuzhiyun }
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun for (i = opp_table->regulator_count - 1; i >= 0; i--)
1840*4882a593Smuzhiyun regulator_put(opp_table->regulators[i]);
1841*4882a593Smuzhiyun
1842*4882a593Smuzhiyun _free_set_opp_data(opp_table);
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun kfree(opp_table->regulators);
1845*4882a593Smuzhiyun opp_table->regulators = NULL;
1846*4882a593Smuzhiyun opp_table->regulator_count = -1;
1847*4882a593Smuzhiyun
1848*4882a593Smuzhiyun put_opp_table:
1849*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1850*4882a593Smuzhiyun }
1851*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun /**
1854*4882a593Smuzhiyun * dev_pm_opp_set_clkname() - Set clk name for the device
1855*4882a593Smuzhiyun * @dev: Device for which clk name is being set.
1856*4882a593Smuzhiyun * @name: Clk name.
1857*4882a593Smuzhiyun *
1858*4882a593Smuzhiyun * In order to support OPP switching, OPP layer needs to get pointer to the
1859*4882a593Smuzhiyun * clock for the device. Simple cases work fine without using this routine (i.e.
1860*4882a593Smuzhiyun * by passing connection-id as NULL), but for a device with multiple clocks
1861*4882a593Smuzhiyun * available, the OPP core needs to know the exact name of the clk to use.
1862*4882a593Smuzhiyun *
1863*4882a593Smuzhiyun * This must be called before any OPPs are initialized for the device.
1864*4882a593Smuzhiyun */
dev_pm_opp_set_clkname(struct device * dev,const char * name)1865*4882a593Smuzhiyun struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
1866*4882a593Smuzhiyun {
1867*4882a593Smuzhiyun struct opp_table *opp_table;
1868*4882a593Smuzhiyun int ret;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun opp_table = dev_pm_opp_get_opp_table(dev);
1871*4882a593Smuzhiyun if (IS_ERR(opp_table))
1872*4882a593Smuzhiyun return opp_table;
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun /* This should be called before OPPs are initialized */
1875*4882a593Smuzhiyun if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1876*4882a593Smuzhiyun ret = -EBUSY;
1877*4882a593Smuzhiyun goto err;
1878*4882a593Smuzhiyun }
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun /* Already have default clk set, free it */
1881*4882a593Smuzhiyun if (!IS_ERR(opp_table->clk))
1882*4882a593Smuzhiyun clk_put(opp_table->clk);
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun /* Find clk for the device */
1885*4882a593Smuzhiyun opp_table->clk = clk_get(dev, name);
1886*4882a593Smuzhiyun if (IS_ERR(opp_table->clk)) {
1887*4882a593Smuzhiyun ret = PTR_ERR(opp_table->clk);
1888*4882a593Smuzhiyun if (ret != -EPROBE_DEFER) {
1889*4882a593Smuzhiyun dev_err(dev, "%s: Couldn't find clock: %d\n", __func__,
1890*4882a593Smuzhiyun ret);
1891*4882a593Smuzhiyun }
1892*4882a593Smuzhiyun goto err;
1893*4882a593Smuzhiyun }
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun return opp_table;
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun err:
1898*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1899*4882a593Smuzhiyun
1900*4882a593Smuzhiyun return ERR_PTR(ret);
1901*4882a593Smuzhiyun }
1902*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
1903*4882a593Smuzhiyun
1904*4882a593Smuzhiyun /**
1905*4882a593Smuzhiyun * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
1906*4882a593Smuzhiyun * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
1907*4882a593Smuzhiyun */
dev_pm_opp_put_clkname(struct opp_table * opp_table)1908*4882a593Smuzhiyun void dev_pm_opp_put_clkname(struct opp_table *opp_table)
1909*4882a593Smuzhiyun {
1910*4882a593Smuzhiyun /* Make sure there are no concurrent readers while updating opp_table */
1911*4882a593Smuzhiyun WARN_ON(!list_empty(&opp_table->opp_list));
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun clk_put(opp_table->clk);
1914*4882a593Smuzhiyun opp_table->clk = ERR_PTR(-EINVAL);
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1917*4882a593Smuzhiyun }
1918*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun /**
1921*4882a593Smuzhiyun * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
1922*4882a593Smuzhiyun * @dev: Device for which the helper is getting registered.
1923*4882a593Smuzhiyun * @set_opp: Custom set OPP helper.
1924*4882a593Smuzhiyun *
1925*4882a593Smuzhiyun * This is useful to support complex platforms (like platforms with multiple
1926*4882a593Smuzhiyun * regulators per device), instead of the generic OPP set rate helper.
1927*4882a593Smuzhiyun *
1928*4882a593Smuzhiyun * This must be called before any OPPs are initialized for the device.
1929*4882a593Smuzhiyun */
dev_pm_opp_register_set_opp_helper(struct device * dev,int (* set_opp)(struct dev_pm_set_opp_data * data))1930*4882a593Smuzhiyun struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
1931*4882a593Smuzhiyun int (*set_opp)(struct dev_pm_set_opp_data *data))
1932*4882a593Smuzhiyun {
1933*4882a593Smuzhiyun struct opp_table *opp_table;
1934*4882a593Smuzhiyun
1935*4882a593Smuzhiyun if (!set_opp)
1936*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun opp_table = dev_pm_opp_get_opp_table(dev);
1939*4882a593Smuzhiyun if (IS_ERR(opp_table))
1940*4882a593Smuzhiyun return opp_table;
1941*4882a593Smuzhiyun
1942*4882a593Smuzhiyun /* This should be called before OPPs are initialized */
1943*4882a593Smuzhiyun if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1944*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1945*4882a593Smuzhiyun return ERR_PTR(-EBUSY);
1946*4882a593Smuzhiyun }
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun /* Another CPU that shares the OPP table has set the helper ? */
1949*4882a593Smuzhiyun if (!opp_table->set_opp)
1950*4882a593Smuzhiyun opp_table->set_opp = set_opp;
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun return opp_table;
1953*4882a593Smuzhiyun }
1954*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
1955*4882a593Smuzhiyun
1956*4882a593Smuzhiyun /**
1957*4882a593Smuzhiyun * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
1958*4882a593Smuzhiyun * set_opp helper
1959*4882a593Smuzhiyun * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
1960*4882a593Smuzhiyun *
1961*4882a593Smuzhiyun * Release resources blocked for platform specific set_opp helper.
1962*4882a593Smuzhiyun */
dev_pm_opp_unregister_set_opp_helper(struct opp_table * opp_table)1963*4882a593Smuzhiyun void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
1964*4882a593Smuzhiyun {
1965*4882a593Smuzhiyun /* Make sure there are no concurrent readers while updating opp_table */
1966*4882a593Smuzhiyun WARN_ON(!list_empty(&opp_table->opp_list));
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun opp_table->set_opp = NULL;
1969*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
1970*4882a593Smuzhiyun }
1971*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
1972*4882a593Smuzhiyun
_opp_detach_genpd(struct opp_table * opp_table)1973*4882a593Smuzhiyun static void _opp_detach_genpd(struct opp_table *opp_table)
1974*4882a593Smuzhiyun {
1975*4882a593Smuzhiyun int index;
1976*4882a593Smuzhiyun
1977*4882a593Smuzhiyun if (!opp_table->genpd_virt_devs)
1978*4882a593Smuzhiyun return;
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun for (index = 0; index < opp_table->required_opp_count; index++) {
1981*4882a593Smuzhiyun if (!opp_table->genpd_virt_devs[index])
1982*4882a593Smuzhiyun continue;
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false);
1985*4882a593Smuzhiyun opp_table->genpd_virt_devs[index] = NULL;
1986*4882a593Smuzhiyun }
1987*4882a593Smuzhiyun
1988*4882a593Smuzhiyun kfree(opp_table->genpd_virt_devs);
1989*4882a593Smuzhiyun opp_table->genpd_virt_devs = NULL;
1990*4882a593Smuzhiyun }
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun /**
1993*4882a593Smuzhiyun * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
1994*4882a593Smuzhiyun * @dev: Consumer device for which the genpd is getting attached.
1995*4882a593Smuzhiyun * @names: Null terminated array of pointers containing names of genpd to attach.
1996*4882a593Smuzhiyun * @virt_devs: Pointer to return the array of virtual devices.
1997*4882a593Smuzhiyun *
1998*4882a593Smuzhiyun * Multiple generic power domains for a device are supported with the help of
1999*4882a593Smuzhiyun * virtual genpd devices, which are created for each consumer device - genpd
2000*4882a593Smuzhiyun * pair. These are the device structures which are attached to the power domain
2001*4882a593Smuzhiyun * and are required by the OPP core to set the performance state of the genpd.
2002*4882a593Smuzhiyun * The same API also works for the case where single genpd is available and so
2003*4882a593Smuzhiyun * we don't need to support that separately.
2004*4882a593Smuzhiyun *
2005*4882a593Smuzhiyun * This helper will normally be called by the consumer driver of the device
2006*4882a593Smuzhiyun * "dev", as only that has details of the genpd names.
2007*4882a593Smuzhiyun *
2008*4882a593Smuzhiyun * This helper needs to be called once with a list of all genpd to attach.
2009*4882a593Smuzhiyun * Otherwise the original device structure will be used instead by the OPP core.
2010*4882a593Smuzhiyun *
2011*4882a593Smuzhiyun * The order of entries in the names array must match the order in which
2012*4882a593Smuzhiyun * "required-opps" are added in DT.
2013*4882a593Smuzhiyun */
dev_pm_opp_attach_genpd(struct device * dev,const char ** names,struct device *** virt_devs)2014*4882a593Smuzhiyun struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
2015*4882a593Smuzhiyun const char **names, struct device ***virt_devs)
2016*4882a593Smuzhiyun {
2017*4882a593Smuzhiyun struct opp_table *opp_table;
2018*4882a593Smuzhiyun struct device *virt_dev;
2019*4882a593Smuzhiyun int index = 0, ret = -EINVAL;
2020*4882a593Smuzhiyun const char **name = names;
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun opp_table = dev_pm_opp_get_opp_table(dev);
2023*4882a593Smuzhiyun if (IS_ERR(opp_table))
2024*4882a593Smuzhiyun return opp_table;
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun if (opp_table->genpd_virt_devs)
2027*4882a593Smuzhiyun return opp_table;
2028*4882a593Smuzhiyun
2029*4882a593Smuzhiyun /*
2030*4882a593Smuzhiyun * If the genpd's OPP table isn't already initialized, parsing of the
2031*4882a593Smuzhiyun * required-opps fail for dev. We should retry this after genpd's OPP
2032*4882a593Smuzhiyun * table is added.
2033*4882a593Smuzhiyun */
2034*4882a593Smuzhiyun if (!opp_table->required_opp_count) {
2035*4882a593Smuzhiyun ret = -EPROBE_DEFER;
2036*4882a593Smuzhiyun goto put_table;
2037*4882a593Smuzhiyun }
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun mutex_lock(&opp_table->genpd_virt_dev_lock);
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count,
2042*4882a593Smuzhiyun sizeof(*opp_table->genpd_virt_devs),
2043*4882a593Smuzhiyun GFP_KERNEL);
2044*4882a593Smuzhiyun if (!opp_table->genpd_virt_devs)
2045*4882a593Smuzhiyun goto unlock;
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun while (*name) {
2048*4882a593Smuzhiyun if (index >= opp_table->required_opp_count) {
2049*4882a593Smuzhiyun dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n",
2050*4882a593Smuzhiyun *name, opp_table->required_opp_count, index);
2051*4882a593Smuzhiyun goto err;
2052*4882a593Smuzhiyun }
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun virt_dev = dev_pm_domain_attach_by_name(dev, *name);
2055*4882a593Smuzhiyun if (IS_ERR_OR_NULL(virt_dev)) {
2056*4882a593Smuzhiyun ret = PTR_ERR(virt_dev) ? : -ENODEV;
2057*4882a593Smuzhiyun dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
2058*4882a593Smuzhiyun goto err;
2059*4882a593Smuzhiyun }
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun opp_table->genpd_virt_devs[index] = virt_dev;
2062*4882a593Smuzhiyun index++;
2063*4882a593Smuzhiyun name++;
2064*4882a593Smuzhiyun }
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun if (virt_devs)
2067*4882a593Smuzhiyun *virt_devs = opp_table->genpd_virt_devs;
2068*4882a593Smuzhiyun mutex_unlock(&opp_table->genpd_virt_dev_lock);
2069*4882a593Smuzhiyun
2070*4882a593Smuzhiyun return opp_table;
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun err:
2073*4882a593Smuzhiyun _opp_detach_genpd(opp_table);
2074*4882a593Smuzhiyun unlock:
2075*4882a593Smuzhiyun mutex_unlock(&opp_table->genpd_virt_dev_lock);
2076*4882a593Smuzhiyun
2077*4882a593Smuzhiyun put_table:
2078*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
2079*4882a593Smuzhiyun
2080*4882a593Smuzhiyun return ERR_PTR(ret);
2081*4882a593Smuzhiyun }
2082*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd);
2083*4882a593Smuzhiyun
2084*4882a593Smuzhiyun /**
2085*4882a593Smuzhiyun * dev_pm_opp_detach_genpd() - Detach genpd(s) from the device.
2086*4882a593Smuzhiyun * @opp_table: OPP table returned by dev_pm_opp_attach_genpd().
2087*4882a593Smuzhiyun *
2088*4882a593Smuzhiyun * This detaches the genpd(s), resets the virtual device pointers, and puts the
2089*4882a593Smuzhiyun * OPP table.
2090*4882a593Smuzhiyun */
dev_pm_opp_detach_genpd(struct opp_table * opp_table)2091*4882a593Smuzhiyun void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
2092*4882a593Smuzhiyun {
2093*4882a593Smuzhiyun /*
2094*4882a593Smuzhiyun * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
2095*4882a593Smuzhiyun * used in parallel.
2096*4882a593Smuzhiyun */
2097*4882a593Smuzhiyun mutex_lock(&opp_table->genpd_virt_dev_lock);
2098*4882a593Smuzhiyun _opp_detach_genpd(opp_table);
2099*4882a593Smuzhiyun mutex_unlock(&opp_table->genpd_virt_dev_lock);
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
2102*4882a593Smuzhiyun }
2103*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd);
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun /**
2106*4882a593Smuzhiyun * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
2107*4882a593Smuzhiyun * @src_table: OPP table which has dst_table as one of its required OPP table.
2108*4882a593Smuzhiyun * @dst_table: Required OPP table of the src_table.
2109*4882a593Smuzhiyun * @pstate: Current performance state of the src_table.
2110*4882a593Smuzhiyun *
2111*4882a593Smuzhiyun * This Returns pstate of the OPP (present in @dst_table) pointed out by the
2112*4882a593Smuzhiyun * "required-opps" property of the OPP (present in @src_table) which has
2113*4882a593Smuzhiyun * performance state set to @pstate.
2114*4882a593Smuzhiyun *
2115*4882a593Smuzhiyun * Return: Zero or positive performance state on success, otherwise negative
2116*4882a593Smuzhiyun * value on errors.
2117*4882a593Smuzhiyun */
dev_pm_opp_xlate_performance_state(struct opp_table * src_table,struct opp_table * dst_table,unsigned int pstate)2118*4882a593Smuzhiyun int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
2119*4882a593Smuzhiyun struct opp_table *dst_table,
2120*4882a593Smuzhiyun unsigned int pstate)
2121*4882a593Smuzhiyun {
2122*4882a593Smuzhiyun struct dev_pm_opp *opp;
2123*4882a593Smuzhiyun int dest_pstate = -EINVAL;
2124*4882a593Smuzhiyun int i;
2125*4882a593Smuzhiyun
2126*4882a593Smuzhiyun /*
2127*4882a593Smuzhiyun * Normally the src_table will have the "required_opps" property set to
2128*4882a593Smuzhiyun * point to one of the OPPs in the dst_table, but in some cases the
2129*4882a593Smuzhiyun * genpd and its master have one to one mapping of performance states
2130*4882a593Smuzhiyun * and so none of them have the "required-opps" property set. Return the
2131*4882a593Smuzhiyun * pstate of the src_table as it is in such cases.
2132*4882a593Smuzhiyun */
2133*4882a593Smuzhiyun if (!src_table->required_opp_count)
2134*4882a593Smuzhiyun return pstate;
2135*4882a593Smuzhiyun
2136*4882a593Smuzhiyun for (i = 0; i < src_table->required_opp_count; i++) {
2137*4882a593Smuzhiyun if (src_table->required_opp_tables[i]->np == dst_table->np)
2138*4882a593Smuzhiyun break;
2139*4882a593Smuzhiyun }
2140*4882a593Smuzhiyun
2141*4882a593Smuzhiyun if (unlikely(i == src_table->required_opp_count)) {
2142*4882a593Smuzhiyun pr_err("%s: Couldn't find matching OPP table (%p: %p)\n",
2143*4882a593Smuzhiyun __func__, src_table, dst_table);
2144*4882a593Smuzhiyun return -EINVAL;
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun
2147*4882a593Smuzhiyun mutex_lock(&src_table->lock);
2148*4882a593Smuzhiyun
2149*4882a593Smuzhiyun list_for_each_entry(opp, &src_table->opp_list, node) {
2150*4882a593Smuzhiyun if (opp->pstate == pstate) {
2151*4882a593Smuzhiyun dest_pstate = opp->required_opps[i]->pstate;
2152*4882a593Smuzhiyun goto unlock;
2153*4882a593Smuzhiyun }
2154*4882a593Smuzhiyun }
2155*4882a593Smuzhiyun
2156*4882a593Smuzhiyun pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table,
2157*4882a593Smuzhiyun dst_table);
2158*4882a593Smuzhiyun
2159*4882a593Smuzhiyun unlock:
2160*4882a593Smuzhiyun mutex_unlock(&src_table->lock);
2161*4882a593Smuzhiyun
2162*4882a593Smuzhiyun return dest_pstate;
2163*4882a593Smuzhiyun }
2164*4882a593Smuzhiyun
2165*4882a593Smuzhiyun /**
2166*4882a593Smuzhiyun * dev_pm_opp_add() - Add an OPP table from a table definitions
2167*4882a593Smuzhiyun * @dev: device for which we do this operation
2168*4882a593Smuzhiyun * @freq: Frequency in Hz for this OPP
2169*4882a593Smuzhiyun * @u_volt: Voltage in uVolts for this OPP
2170*4882a593Smuzhiyun *
2171*4882a593Smuzhiyun * This function adds an opp definition to the opp table and returns status.
2172*4882a593Smuzhiyun * The opp is made available by default and it can be controlled using
2173*4882a593Smuzhiyun * dev_pm_opp_enable/disable functions.
2174*4882a593Smuzhiyun *
2175*4882a593Smuzhiyun * Return:
2176*4882a593Smuzhiyun * 0 On success OR
2177*4882a593Smuzhiyun * Duplicate OPPs (both freq and volt are same) and opp->available
2178*4882a593Smuzhiyun * -EEXIST Freq are same and volt are different OR
2179*4882a593Smuzhiyun * Duplicate OPPs (both freq and volt are same) and !opp->available
2180*4882a593Smuzhiyun * -ENOMEM Memory allocation failure
2181*4882a593Smuzhiyun */
dev_pm_opp_add(struct device * dev,unsigned long freq,unsigned long u_volt)2182*4882a593Smuzhiyun int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
2183*4882a593Smuzhiyun {
2184*4882a593Smuzhiyun struct opp_table *opp_table;
2185*4882a593Smuzhiyun int ret;
2186*4882a593Smuzhiyun
2187*4882a593Smuzhiyun opp_table = dev_pm_opp_get_opp_table(dev);
2188*4882a593Smuzhiyun if (IS_ERR(opp_table))
2189*4882a593Smuzhiyun return PTR_ERR(opp_table);
2190*4882a593Smuzhiyun
2191*4882a593Smuzhiyun /* Fix regulator count for dynamic OPPs */
2192*4882a593Smuzhiyun opp_table->regulator_count = 1;
2193*4882a593Smuzhiyun
2194*4882a593Smuzhiyun ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
2195*4882a593Smuzhiyun if (ret)
2196*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun return ret;
2199*4882a593Smuzhiyun }
2200*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_add);
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun /**
2203*4882a593Smuzhiyun * _opp_set_availability() - helper to set the availability of an opp
2204*4882a593Smuzhiyun * @dev: device for which we do this operation
2205*4882a593Smuzhiyun * @freq: OPP frequency to modify availability
2206*4882a593Smuzhiyun * @availability_req: availability status requested for this opp
2207*4882a593Smuzhiyun *
2208*4882a593Smuzhiyun * Set the availability of an OPP, opp_{enable,disable} share a common logic
2209*4882a593Smuzhiyun * which is isolated here.
2210*4882a593Smuzhiyun *
2211*4882a593Smuzhiyun * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2212*4882a593Smuzhiyun * copy operation, returns 0 if no modification was done OR modification was
2213*4882a593Smuzhiyun * successful.
2214*4882a593Smuzhiyun */
_opp_set_availability(struct device * dev,unsigned long freq,bool availability_req)2215*4882a593Smuzhiyun static int _opp_set_availability(struct device *dev, unsigned long freq,
2216*4882a593Smuzhiyun bool availability_req)
2217*4882a593Smuzhiyun {
2218*4882a593Smuzhiyun struct opp_table *opp_table;
2219*4882a593Smuzhiyun struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
2220*4882a593Smuzhiyun int r = 0;
2221*4882a593Smuzhiyun
2222*4882a593Smuzhiyun /* Find the opp_table */
2223*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
2224*4882a593Smuzhiyun if (IS_ERR(opp_table)) {
2225*4882a593Smuzhiyun r = PTR_ERR(opp_table);
2226*4882a593Smuzhiyun dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
2227*4882a593Smuzhiyun return r;
2228*4882a593Smuzhiyun }
2229*4882a593Smuzhiyun
2230*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
2231*4882a593Smuzhiyun
2232*4882a593Smuzhiyun /* Do we have the frequency? */
2233*4882a593Smuzhiyun list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
2234*4882a593Smuzhiyun if (tmp_opp->rate == freq) {
2235*4882a593Smuzhiyun opp = tmp_opp;
2236*4882a593Smuzhiyun break;
2237*4882a593Smuzhiyun }
2238*4882a593Smuzhiyun }
2239*4882a593Smuzhiyun
2240*4882a593Smuzhiyun if (IS_ERR(opp)) {
2241*4882a593Smuzhiyun r = PTR_ERR(opp);
2242*4882a593Smuzhiyun goto unlock;
2243*4882a593Smuzhiyun }
2244*4882a593Smuzhiyun
2245*4882a593Smuzhiyun /* Is update really needed? */
2246*4882a593Smuzhiyun if (opp->available == availability_req)
2247*4882a593Smuzhiyun goto unlock;
2248*4882a593Smuzhiyun
2249*4882a593Smuzhiyun opp->available = availability_req;
2250*4882a593Smuzhiyun
2251*4882a593Smuzhiyun dev_pm_opp_get(opp);
2252*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
2253*4882a593Smuzhiyun
2254*4882a593Smuzhiyun /* Notify the change of the OPP availability */
2255*4882a593Smuzhiyun if (availability_req)
2256*4882a593Smuzhiyun blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
2257*4882a593Smuzhiyun opp);
2258*4882a593Smuzhiyun else
2259*4882a593Smuzhiyun blocking_notifier_call_chain(&opp_table->head,
2260*4882a593Smuzhiyun OPP_EVENT_DISABLE, opp);
2261*4882a593Smuzhiyun
2262*4882a593Smuzhiyun dev_pm_opp_put(opp);
2263*4882a593Smuzhiyun goto put_table;
2264*4882a593Smuzhiyun
2265*4882a593Smuzhiyun unlock:
2266*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
2267*4882a593Smuzhiyun put_table:
2268*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
2269*4882a593Smuzhiyun return r;
2270*4882a593Smuzhiyun }
2271*4882a593Smuzhiyun
2272*4882a593Smuzhiyun /**
2273*4882a593Smuzhiyun * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
2274*4882a593Smuzhiyun * @dev: device for which we do this operation
2275*4882a593Smuzhiyun * @freq: OPP frequency to adjust voltage of
2276*4882a593Smuzhiyun * @u_volt: new OPP target voltage
2277*4882a593Smuzhiyun * @u_volt_min: new OPP min voltage
2278*4882a593Smuzhiyun * @u_volt_max: new OPP max voltage
2279*4882a593Smuzhiyun *
2280*4882a593Smuzhiyun * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2281*4882a593Smuzhiyun * copy operation, returns 0 if no modifcation was done OR modification was
2282*4882a593Smuzhiyun * successful.
2283*4882a593Smuzhiyun */
dev_pm_opp_adjust_voltage(struct device * dev,unsigned long freq,unsigned long u_volt,unsigned long u_volt_min,unsigned long u_volt_max)2284*4882a593Smuzhiyun int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
2285*4882a593Smuzhiyun unsigned long u_volt, unsigned long u_volt_min,
2286*4882a593Smuzhiyun unsigned long u_volt_max)
2287*4882a593Smuzhiyun
2288*4882a593Smuzhiyun {
2289*4882a593Smuzhiyun struct opp_table *opp_table;
2290*4882a593Smuzhiyun struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
2291*4882a593Smuzhiyun int r = 0;
2292*4882a593Smuzhiyun
2293*4882a593Smuzhiyun /* Find the opp_table */
2294*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
2295*4882a593Smuzhiyun if (IS_ERR(opp_table)) {
2296*4882a593Smuzhiyun r = PTR_ERR(opp_table);
2297*4882a593Smuzhiyun dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
2298*4882a593Smuzhiyun return r;
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun mutex_lock(&opp_table->lock);
2302*4882a593Smuzhiyun
2303*4882a593Smuzhiyun /* Do we have the frequency? */
2304*4882a593Smuzhiyun list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
2305*4882a593Smuzhiyun if (tmp_opp->rate == freq) {
2306*4882a593Smuzhiyun opp = tmp_opp;
2307*4882a593Smuzhiyun break;
2308*4882a593Smuzhiyun }
2309*4882a593Smuzhiyun }
2310*4882a593Smuzhiyun
2311*4882a593Smuzhiyun if (IS_ERR(opp)) {
2312*4882a593Smuzhiyun r = PTR_ERR(opp);
2313*4882a593Smuzhiyun goto adjust_unlock;
2314*4882a593Smuzhiyun }
2315*4882a593Smuzhiyun
2316*4882a593Smuzhiyun /* Is update really needed? */
2317*4882a593Smuzhiyun if (opp->supplies->u_volt == u_volt)
2318*4882a593Smuzhiyun goto adjust_unlock;
2319*4882a593Smuzhiyun
2320*4882a593Smuzhiyun opp->supplies->u_volt = u_volt;
2321*4882a593Smuzhiyun opp->supplies->u_volt_min = u_volt_min;
2322*4882a593Smuzhiyun opp->supplies->u_volt_max = u_volt_max;
2323*4882a593Smuzhiyun
2324*4882a593Smuzhiyun dev_pm_opp_get(opp);
2325*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun /* Notify the voltage change of the OPP */
2328*4882a593Smuzhiyun blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
2329*4882a593Smuzhiyun opp);
2330*4882a593Smuzhiyun
2331*4882a593Smuzhiyun dev_pm_opp_put(opp);
2332*4882a593Smuzhiyun goto adjust_put_table;
2333*4882a593Smuzhiyun
2334*4882a593Smuzhiyun adjust_unlock:
2335*4882a593Smuzhiyun mutex_unlock(&opp_table->lock);
2336*4882a593Smuzhiyun adjust_put_table:
2337*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
2338*4882a593Smuzhiyun return r;
2339*4882a593Smuzhiyun }
2340*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
2341*4882a593Smuzhiyun
2342*4882a593Smuzhiyun /**
2343*4882a593Smuzhiyun * dev_pm_opp_enable() - Enable a specific OPP
2344*4882a593Smuzhiyun * @dev: device for which we do this operation
2345*4882a593Smuzhiyun * @freq: OPP frequency to enable
2346*4882a593Smuzhiyun *
2347*4882a593Smuzhiyun * Enables a provided opp. If the operation is valid, this returns 0, else the
2348*4882a593Smuzhiyun * corresponding error value. It is meant to be used for users an OPP available
2349*4882a593Smuzhiyun * after being temporarily made unavailable with dev_pm_opp_disable.
2350*4882a593Smuzhiyun *
2351*4882a593Smuzhiyun * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2352*4882a593Smuzhiyun * copy operation, returns 0 if no modification was done OR modification was
2353*4882a593Smuzhiyun * successful.
2354*4882a593Smuzhiyun */
dev_pm_opp_enable(struct device * dev,unsigned long freq)2355*4882a593Smuzhiyun int dev_pm_opp_enable(struct device *dev, unsigned long freq)
2356*4882a593Smuzhiyun {
2357*4882a593Smuzhiyun return _opp_set_availability(dev, freq, true);
2358*4882a593Smuzhiyun }
2359*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
2360*4882a593Smuzhiyun
2361*4882a593Smuzhiyun /**
2362*4882a593Smuzhiyun * dev_pm_opp_disable() - Disable a specific OPP
2363*4882a593Smuzhiyun * @dev: device for which we do this operation
2364*4882a593Smuzhiyun * @freq: OPP frequency to disable
2365*4882a593Smuzhiyun *
2366*4882a593Smuzhiyun * Disables a provided opp. If the operation is valid, this returns
2367*4882a593Smuzhiyun * 0, else the corresponding error value. It is meant to be a temporary
2368*4882a593Smuzhiyun * control by users to make this OPP not available until the circumstances are
2369*4882a593Smuzhiyun * right to make it available again (with a call to dev_pm_opp_enable).
2370*4882a593Smuzhiyun *
2371*4882a593Smuzhiyun * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2372*4882a593Smuzhiyun * copy operation, returns 0 if no modification was done OR modification was
2373*4882a593Smuzhiyun * successful.
2374*4882a593Smuzhiyun */
dev_pm_opp_disable(struct device * dev,unsigned long freq)2375*4882a593Smuzhiyun int dev_pm_opp_disable(struct device *dev, unsigned long freq)
2376*4882a593Smuzhiyun {
2377*4882a593Smuzhiyun return _opp_set_availability(dev, freq, false);
2378*4882a593Smuzhiyun }
2379*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
2380*4882a593Smuzhiyun
2381*4882a593Smuzhiyun /**
2382*4882a593Smuzhiyun * dev_pm_opp_register_notifier() - Register OPP notifier for the device
2383*4882a593Smuzhiyun * @dev: Device for which notifier needs to be registered
2384*4882a593Smuzhiyun * @nb: Notifier block to be registered
2385*4882a593Smuzhiyun *
2386*4882a593Smuzhiyun * Return: 0 on success or a negative error value.
2387*4882a593Smuzhiyun */
dev_pm_opp_register_notifier(struct device * dev,struct notifier_block * nb)2388*4882a593Smuzhiyun int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
2389*4882a593Smuzhiyun {
2390*4882a593Smuzhiyun struct opp_table *opp_table;
2391*4882a593Smuzhiyun int ret;
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
2394*4882a593Smuzhiyun if (IS_ERR(opp_table))
2395*4882a593Smuzhiyun return PTR_ERR(opp_table);
2396*4882a593Smuzhiyun
2397*4882a593Smuzhiyun ret = blocking_notifier_chain_register(&opp_table->head, nb);
2398*4882a593Smuzhiyun
2399*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
2400*4882a593Smuzhiyun
2401*4882a593Smuzhiyun return ret;
2402*4882a593Smuzhiyun }
2403*4882a593Smuzhiyun EXPORT_SYMBOL(dev_pm_opp_register_notifier);
2404*4882a593Smuzhiyun
2405*4882a593Smuzhiyun /**
2406*4882a593Smuzhiyun * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
2407*4882a593Smuzhiyun * @dev: Device for which notifier needs to be unregistered
2408*4882a593Smuzhiyun * @nb: Notifier block to be unregistered
2409*4882a593Smuzhiyun *
2410*4882a593Smuzhiyun * Return: 0 on success or a negative error value.
2411*4882a593Smuzhiyun */
dev_pm_opp_unregister_notifier(struct device * dev,struct notifier_block * nb)2412*4882a593Smuzhiyun int dev_pm_opp_unregister_notifier(struct device *dev,
2413*4882a593Smuzhiyun struct notifier_block *nb)
2414*4882a593Smuzhiyun {
2415*4882a593Smuzhiyun struct opp_table *opp_table;
2416*4882a593Smuzhiyun int ret;
2417*4882a593Smuzhiyun
2418*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
2419*4882a593Smuzhiyun if (IS_ERR(opp_table))
2420*4882a593Smuzhiyun return PTR_ERR(opp_table);
2421*4882a593Smuzhiyun
2422*4882a593Smuzhiyun ret = blocking_notifier_chain_unregister(&opp_table->head, nb);
2423*4882a593Smuzhiyun
2424*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
2425*4882a593Smuzhiyun
2426*4882a593Smuzhiyun return ret;
2427*4882a593Smuzhiyun }
2428*4882a593Smuzhiyun EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
2429*4882a593Smuzhiyun
2430*4882a593Smuzhiyun /**
2431*4882a593Smuzhiyun * dev_pm_opp_remove_table() - Free all OPPs associated with the device
2432*4882a593Smuzhiyun * @dev: device pointer used to lookup OPP table.
2433*4882a593Smuzhiyun *
2434*4882a593Smuzhiyun * Free both OPPs created using static entries present in DT and the
2435*4882a593Smuzhiyun * dynamically added entries.
2436*4882a593Smuzhiyun */
dev_pm_opp_remove_table(struct device * dev)2437*4882a593Smuzhiyun void dev_pm_opp_remove_table(struct device *dev)
2438*4882a593Smuzhiyun {
2439*4882a593Smuzhiyun struct opp_table *opp_table;
2440*4882a593Smuzhiyun
2441*4882a593Smuzhiyun /* Check for existing table for 'dev' */
2442*4882a593Smuzhiyun opp_table = _find_opp_table(dev);
2443*4882a593Smuzhiyun if (IS_ERR(opp_table)) {
2444*4882a593Smuzhiyun int error = PTR_ERR(opp_table);
2445*4882a593Smuzhiyun
2446*4882a593Smuzhiyun if (error != -ENODEV)
2447*4882a593Smuzhiyun WARN(1, "%s: opp_table: %d\n",
2448*4882a593Smuzhiyun IS_ERR_OR_NULL(dev) ?
2449*4882a593Smuzhiyun "Invalid device" : dev_name(dev),
2450*4882a593Smuzhiyun error);
2451*4882a593Smuzhiyun return;
2452*4882a593Smuzhiyun }
2453*4882a593Smuzhiyun
2454*4882a593Smuzhiyun /*
2455*4882a593Smuzhiyun * Drop the extra reference only if the OPP table was successfully added
2456*4882a593Smuzhiyun * with dev_pm_opp_of_add_table() earlier.
2457*4882a593Smuzhiyun **/
2458*4882a593Smuzhiyun if (_opp_remove_all_static(opp_table))
2459*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
2460*4882a593Smuzhiyun
2461*4882a593Smuzhiyun /* Drop reference taken by _find_opp_table() */
2462*4882a593Smuzhiyun dev_pm_opp_put_opp_table(opp_table);
2463*4882a593Smuzhiyun }
2464*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
2465