xref: /OK3568_Linux_fs/kernel/drivers/media/v4l2-core/v4l2-clk.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * V4L2 clock service
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/atomic.h>
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/device.h>
11*4882a593Smuzhiyun #include <linux/errno.h>
12*4882a593Smuzhiyun #include <linux/list.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/mutex.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/string.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <media/v4l2-clk.h>
20*4882a593Smuzhiyun #include <media/v4l2-subdev.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun static DEFINE_MUTEX(clk_lock);
23*4882a593Smuzhiyun static LIST_HEAD(clk_list);
24*4882a593Smuzhiyun 
v4l2_clk_find(const char * dev_id)25*4882a593Smuzhiyun static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	struct v4l2_clk *clk;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	list_for_each_entry(clk, &clk_list, list)
30*4882a593Smuzhiyun 		if (!strcmp(dev_id, clk->dev_id))
31*4882a593Smuzhiyun 			return clk;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	return ERR_PTR(-ENODEV);
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
v4l2_clk_get(struct device * dev,const char * id)36*4882a593Smuzhiyun struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	struct v4l2_clk *clk;
39*4882a593Smuzhiyun 	struct clk *ccf_clk = clk_get(dev, id);
40*4882a593Smuzhiyun 	char clk_name[V4L2_CLK_NAME_SIZE];
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	if (PTR_ERR(ccf_clk) == -EPROBE_DEFER)
43*4882a593Smuzhiyun 		return ERR_PTR(-EPROBE_DEFER);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	if (!IS_ERR_OR_NULL(ccf_clk)) {
46*4882a593Smuzhiyun 		clk = kzalloc(sizeof(*clk), GFP_KERNEL);
47*4882a593Smuzhiyun 		if (!clk) {
48*4882a593Smuzhiyun 			clk_put(ccf_clk);
49*4882a593Smuzhiyun 			return ERR_PTR(-ENOMEM);
50*4882a593Smuzhiyun 		}
51*4882a593Smuzhiyun 		clk->clk = ccf_clk;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 		return clk;
54*4882a593Smuzhiyun 	}
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	mutex_lock(&clk_lock);
57*4882a593Smuzhiyun 	clk = v4l2_clk_find(dev_name(dev));
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	/* if dev_name is not found, try use the OF name to find again  */
60*4882a593Smuzhiyun 	if (PTR_ERR(clk) == -ENODEV && dev->of_node) {
61*4882a593Smuzhiyun 		v4l2_clk_name_of(clk_name, sizeof(clk_name), dev->of_node);
62*4882a593Smuzhiyun 		clk = v4l2_clk_find(clk_name);
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	if (!IS_ERR(clk))
66*4882a593Smuzhiyun 		atomic_inc(&clk->use_count);
67*4882a593Smuzhiyun 	mutex_unlock(&clk_lock);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	return clk;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_clk_get);
72*4882a593Smuzhiyun 
v4l2_clk_put(struct v4l2_clk * clk)73*4882a593Smuzhiyun void v4l2_clk_put(struct v4l2_clk *clk)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	struct v4l2_clk *tmp;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	if (IS_ERR(clk))
78*4882a593Smuzhiyun 		return;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	if (clk->clk) {
81*4882a593Smuzhiyun 		clk_put(clk->clk);
82*4882a593Smuzhiyun 		kfree(clk);
83*4882a593Smuzhiyun 		return;
84*4882a593Smuzhiyun 	}
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	mutex_lock(&clk_lock);
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	list_for_each_entry(tmp, &clk_list, list)
89*4882a593Smuzhiyun 		if (tmp == clk)
90*4882a593Smuzhiyun 			atomic_dec(&clk->use_count);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	mutex_unlock(&clk_lock);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_clk_put);
95*4882a593Smuzhiyun 
v4l2_clk_lock_driver(struct v4l2_clk * clk)96*4882a593Smuzhiyun static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	struct v4l2_clk *tmp;
99*4882a593Smuzhiyun 	int ret = -ENODEV;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	mutex_lock(&clk_lock);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	list_for_each_entry(tmp, &clk_list, list)
104*4882a593Smuzhiyun 		if (tmp == clk) {
105*4882a593Smuzhiyun 			ret = !try_module_get(clk->ops->owner);
106*4882a593Smuzhiyun 			if (ret)
107*4882a593Smuzhiyun 				ret = -EFAULT;
108*4882a593Smuzhiyun 			break;
109*4882a593Smuzhiyun 		}
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	mutex_unlock(&clk_lock);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	return ret;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
v4l2_clk_unlock_driver(struct v4l2_clk * clk)116*4882a593Smuzhiyun static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	module_put(clk->ops->owner);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
v4l2_clk_enable(struct v4l2_clk * clk)121*4882a593Smuzhiyun int v4l2_clk_enable(struct v4l2_clk *clk)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	int ret;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	if (clk->clk)
126*4882a593Smuzhiyun 		return clk_prepare_enable(clk->clk);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	ret = v4l2_clk_lock_driver(clk);
129*4882a593Smuzhiyun 	if (ret < 0)
130*4882a593Smuzhiyun 		return ret;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	mutex_lock(&clk->lock);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (++clk->enable == 1 && clk->ops->enable) {
135*4882a593Smuzhiyun 		ret = clk->ops->enable(clk);
136*4882a593Smuzhiyun 		if (ret < 0)
137*4882a593Smuzhiyun 			clk->enable--;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	mutex_unlock(&clk->lock);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	return ret;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_clk_enable);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun /*
147*4882a593Smuzhiyun  * You might Oops if you try to disabled a disabled clock, because then the
148*4882a593Smuzhiyun  * driver isn't locked and could have been unloaded by now, so, don't do that
149*4882a593Smuzhiyun  */
v4l2_clk_disable(struct v4l2_clk * clk)150*4882a593Smuzhiyun void v4l2_clk_disable(struct v4l2_clk *clk)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	int enable;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if (clk->clk)
155*4882a593Smuzhiyun 		return clk_disable_unprepare(clk->clk);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	mutex_lock(&clk->lock);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	enable = --clk->enable;
160*4882a593Smuzhiyun 	if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
161*4882a593Smuzhiyun 		 clk->dev_id))
162*4882a593Smuzhiyun 		clk->enable++;
163*4882a593Smuzhiyun 	else if (!enable && clk->ops->disable)
164*4882a593Smuzhiyun 		clk->ops->disable(clk);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	mutex_unlock(&clk->lock);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	v4l2_clk_unlock_driver(clk);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_clk_disable);
171*4882a593Smuzhiyun 
v4l2_clk_get_rate(struct v4l2_clk * clk)172*4882a593Smuzhiyun unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	int ret;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	if (clk->clk)
177*4882a593Smuzhiyun 		return clk_get_rate(clk->clk);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	ret = v4l2_clk_lock_driver(clk);
180*4882a593Smuzhiyun 	if (ret < 0)
181*4882a593Smuzhiyun 		return ret;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	mutex_lock(&clk->lock);
184*4882a593Smuzhiyun 	if (!clk->ops->get_rate)
185*4882a593Smuzhiyun 		ret = -ENOSYS;
186*4882a593Smuzhiyun 	else
187*4882a593Smuzhiyun 		ret = clk->ops->get_rate(clk);
188*4882a593Smuzhiyun 	mutex_unlock(&clk->lock);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	v4l2_clk_unlock_driver(clk);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	return ret;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_clk_get_rate);
195*4882a593Smuzhiyun 
v4l2_clk_set_rate(struct v4l2_clk * clk,unsigned long rate)196*4882a593Smuzhiyun int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	int ret;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	if (clk->clk) {
201*4882a593Smuzhiyun 		long r = clk_round_rate(clk->clk, rate);
202*4882a593Smuzhiyun 		if (r < 0)
203*4882a593Smuzhiyun 			return r;
204*4882a593Smuzhiyun 		return clk_set_rate(clk->clk, r);
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	ret = v4l2_clk_lock_driver(clk);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (ret < 0)
210*4882a593Smuzhiyun 		return ret;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	mutex_lock(&clk->lock);
213*4882a593Smuzhiyun 	if (!clk->ops->set_rate)
214*4882a593Smuzhiyun 		ret = -ENOSYS;
215*4882a593Smuzhiyun 	else
216*4882a593Smuzhiyun 		ret = clk->ops->set_rate(clk, rate);
217*4882a593Smuzhiyun 	mutex_unlock(&clk->lock);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	v4l2_clk_unlock_driver(clk);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	return ret;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_clk_set_rate);
224*4882a593Smuzhiyun 
v4l2_clk_register(const struct v4l2_clk_ops * ops,const char * dev_id,void * priv)225*4882a593Smuzhiyun struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
226*4882a593Smuzhiyun 				   const char *dev_id,
227*4882a593Smuzhiyun 				   void *priv)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	struct v4l2_clk *clk;
230*4882a593Smuzhiyun 	int ret;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	if (!ops || !dev_id)
233*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
236*4882a593Smuzhiyun 	if (!clk)
237*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
240*4882a593Smuzhiyun 	if (!clk->dev_id) {
241*4882a593Smuzhiyun 		ret = -ENOMEM;
242*4882a593Smuzhiyun 		goto ealloc;
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 	clk->ops = ops;
245*4882a593Smuzhiyun 	clk->priv = priv;
246*4882a593Smuzhiyun 	atomic_set(&clk->use_count, 0);
247*4882a593Smuzhiyun 	mutex_init(&clk->lock);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	mutex_lock(&clk_lock);
250*4882a593Smuzhiyun 	if (!IS_ERR(v4l2_clk_find(dev_id))) {
251*4882a593Smuzhiyun 		mutex_unlock(&clk_lock);
252*4882a593Smuzhiyun 		ret = -EEXIST;
253*4882a593Smuzhiyun 		goto eexist;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 	list_add_tail(&clk->list, &clk_list);
256*4882a593Smuzhiyun 	mutex_unlock(&clk_lock);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	return clk;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun eexist:
261*4882a593Smuzhiyun ealloc:
262*4882a593Smuzhiyun 	kfree(clk->dev_id);
263*4882a593Smuzhiyun 	kfree(clk);
264*4882a593Smuzhiyun 	return ERR_PTR(ret);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_clk_register);
267*4882a593Smuzhiyun 
v4l2_clk_unregister(struct v4l2_clk * clk)268*4882a593Smuzhiyun void v4l2_clk_unregister(struct v4l2_clk *clk)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	if (WARN(atomic_read(&clk->use_count),
271*4882a593Smuzhiyun 		 "%s(): Refusing to unregister ref-counted %s clock!\n",
272*4882a593Smuzhiyun 		 __func__, clk->dev_id))
273*4882a593Smuzhiyun 		return;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	mutex_lock(&clk_lock);
276*4882a593Smuzhiyun 	list_del(&clk->list);
277*4882a593Smuzhiyun 	mutex_unlock(&clk_lock);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	kfree(clk->dev_id);
280*4882a593Smuzhiyun 	kfree(clk);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_clk_unregister);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun struct v4l2_clk_fixed {
285*4882a593Smuzhiyun 	unsigned long rate;
286*4882a593Smuzhiyun 	struct v4l2_clk_ops ops;
287*4882a593Smuzhiyun };
288*4882a593Smuzhiyun 
fixed_get_rate(struct v4l2_clk * clk)289*4882a593Smuzhiyun static unsigned long fixed_get_rate(struct v4l2_clk *clk)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	struct v4l2_clk_fixed *priv = clk->priv;
292*4882a593Smuzhiyun 	return priv->rate;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
__v4l2_clk_register_fixed(const char * dev_id,unsigned long rate,struct module * owner)295*4882a593Smuzhiyun struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
296*4882a593Smuzhiyun 				unsigned long rate, struct module *owner)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	struct v4l2_clk *clk;
299*4882a593Smuzhiyun 	struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	if (!priv)
302*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	priv->rate = rate;
305*4882a593Smuzhiyun 	priv->ops.get_rate = fixed_get_rate;
306*4882a593Smuzhiyun 	priv->ops.owner = owner;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	clk = v4l2_clk_register(&priv->ops, dev_id, priv);
309*4882a593Smuzhiyun 	if (IS_ERR(clk))
310*4882a593Smuzhiyun 		kfree(priv);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	return clk;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun EXPORT_SYMBOL(__v4l2_clk_register_fixed);
315*4882a593Smuzhiyun 
v4l2_clk_unregister_fixed(struct v4l2_clk * clk)316*4882a593Smuzhiyun void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	kfree(clk->priv);
319*4882a593Smuzhiyun 	v4l2_clk_unregister(clk);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_clk_unregister_fixed);
322