xref: /OK3568_Linux_fs/kernel/drivers/clk/clk.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
4*4882a593Smuzhiyun  * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Standard functionality for the common clock API.  See Documentation/driver-api/clk.rst
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/clk-provider.h>
11*4882a593Smuzhiyun #include <linux/clk/clk-conf.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/mutex.h>
14*4882a593Smuzhiyun #include <linux/spinlock.h>
15*4882a593Smuzhiyun #include <linux/err.h>
16*4882a593Smuzhiyun #include <linux/list.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/of.h>
19*4882a593Smuzhiyun #include <linux/device.h>
20*4882a593Smuzhiyun #include <linux/init.h>
21*4882a593Smuzhiyun #include <linux/pm_runtime.h>
22*4882a593Smuzhiyun #include <linux/sched.h>
23*4882a593Smuzhiyun #include <linux/clkdev.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include "clk.h"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun static DEFINE_SPINLOCK(enable_lock);
28*4882a593Smuzhiyun static DEFINE_MUTEX(prepare_lock);
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static struct task_struct *prepare_owner;
31*4882a593Smuzhiyun static struct task_struct *enable_owner;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun static int prepare_refcnt;
34*4882a593Smuzhiyun static int enable_refcnt;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static HLIST_HEAD(clk_root_list);
37*4882a593Smuzhiyun static HLIST_HEAD(clk_orphan_list);
38*4882a593Smuzhiyun static LIST_HEAD(clk_notifier_list);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun static struct hlist_head *all_lists[] = {
41*4882a593Smuzhiyun 	&clk_root_list,
42*4882a593Smuzhiyun 	&clk_orphan_list,
43*4882a593Smuzhiyun 	NULL,
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /***    private data structures    ***/
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun struct clk_parent_map {
49*4882a593Smuzhiyun 	const struct clk_hw	*hw;
50*4882a593Smuzhiyun 	struct clk_core		*core;
51*4882a593Smuzhiyun 	const char		*fw_name;
52*4882a593Smuzhiyun 	const char		*name;
53*4882a593Smuzhiyun 	int			index;
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun struct clk_core {
57*4882a593Smuzhiyun 	const char		*name;
58*4882a593Smuzhiyun 	const struct clk_ops	*ops;
59*4882a593Smuzhiyun 	struct clk_hw		*hw;
60*4882a593Smuzhiyun 	struct module		*owner;
61*4882a593Smuzhiyun 	struct device		*dev;
62*4882a593Smuzhiyun 	struct device_node	*of_node;
63*4882a593Smuzhiyun 	struct clk_core		*parent;
64*4882a593Smuzhiyun 	struct clk_parent_map	*parents;
65*4882a593Smuzhiyun 	u8			num_parents;
66*4882a593Smuzhiyun 	u8			new_parent_index;
67*4882a593Smuzhiyun 	unsigned long		rate;
68*4882a593Smuzhiyun 	unsigned long		req_rate;
69*4882a593Smuzhiyun 	unsigned long		new_rate;
70*4882a593Smuzhiyun 	struct clk_core		*new_parent;
71*4882a593Smuzhiyun 	struct clk_core		*new_child;
72*4882a593Smuzhiyun 	unsigned long		flags;
73*4882a593Smuzhiyun 	bool			orphan;
74*4882a593Smuzhiyun 	bool			rpm_enabled;
75*4882a593Smuzhiyun 	bool			need_sync;
76*4882a593Smuzhiyun 	bool			boot_enabled;
77*4882a593Smuzhiyun 	unsigned int		enable_count;
78*4882a593Smuzhiyun 	unsigned int		prepare_count;
79*4882a593Smuzhiyun 	unsigned int		protect_count;
80*4882a593Smuzhiyun 	unsigned long		min_rate;
81*4882a593Smuzhiyun 	unsigned long		max_rate;
82*4882a593Smuzhiyun 	unsigned long		accuracy;
83*4882a593Smuzhiyun 	int			phase;
84*4882a593Smuzhiyun 	struct clk_duty		duty;
85*4882a593Smuzhiyun 	struct hlist_head	children;
86*4882a593Smuzhiyun 	struct hlist_node	child_node;
87*4882a593Smuzhiyun 	struct hlist_head	clks;
88*4882a593Smuzhiyun 	unsigned int		notifier_count;
89*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
90*4882a593Smuzhiyun 	struct dentry		*dentry;
91*4882a593Smuzhiyun 	struct hlist_node	debug_node;
92*4882a593Smuzhiyun #endif
93*4882a593Smuzhiyun 	struct kref		ref;
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
97*4882a593Smuzhiyun #include <trace/events/clk.h>
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun struct clk {
100*4882a593Smuzhiyun 	struct clk_core	*core;
101*4882a593Smuzhiyun 	struct device *dev;
102*4882a593Smuzhiyun 	const char *dev_id;
103*4882a593Smuzhiyun 	const char *con_id;
104*4882a593Smuzhiyun 	unsigned long min_rate;
105*4882a593Smuzhiyun 	unsigned long max_rate;
106*4882a593Smuzhiyun 	unsigned int exclusive_count;
107*4882a593Smuzhiyun 	struct hlist_node clks_node;
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun /***           runtime pm          ***/
clk_pm_runtime_get(struct clk_core * core)111*4882a593Smuzhiyun static int clk_pm_runtime_get(struct clk_core *core)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	int ret;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (!core->rpm_enabled)
116*4882a593Smuzhiyun 		return 0;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	ret = pm_runtime_get_sync(core->dev);
119*4882a593Smuzhiyun 	if (ret < 0) {
120*4882a593Smuzhiyun 		pm_runtime_put_noidle(core->dev);
121*4882a593Smuzhiyun 		return ret;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 	return 0;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
clk_pm_runtime_put(struct clk_core * core)126*4882a593Smuzhiyun static void clk_pm_runtime_put(struct clk_core *core)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	if (!core->rpm_enabled)
129*4882a593Smuzhiyun 		return;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	pm_runtime_put_sync(core->dev);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /***           locking             ***/
clk_prepare_lock(void)135*4882a593Smuzhiyun static void clk_prepare_lock(void)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	if (!mutex_trylock(&prepare_lock)) {
138*4882a593Smuzhiyun 		if (prepare_owner == current) {
139*4882a593Smuzhiyun 			prepare_refcnt++;
140*4882a593Smuzhiyun 			return;
141*4882a593Smuzhiyun 		}
142*4882a593Smuzhiyun 		mutex_lock(&prepare_lock);
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 	WARN_ON_ONCE(prepare_owner != NULL);
145*4882a593Smuzhiyun 	WARN_ON_ONCE(prepare_refcnt != 0);
146*4882a593Smuzhiyun 	prepare_owner = current;
147*4882a593Smuzhiyun 	prepare_refcnt = 1;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
clk_prepare_unlock(void)150*4882a593Smuzhiyun static void clk_prepare_unlock(void)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	WARN_ON_ONCE(prepare_owner != current);
153*4882a593Smuzhiyun 	WARN_ON_ONCE(prepare_refcnt == 0);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (--prepare_refcnt)
156*4882a593Smuzhiyun 		return;
157*4882a593Smuzhiyun 	prepare_owner = NULL;
158*4882a593Smuzhiyun 	mutex_unlock(&prepare_lock);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun 
clk_enable_lock(void)161*4882a593Smuzhiyun static unsigned long clk_enable_lock(void)
162*4882a593Smuzhiyun 	__acquires(enable_lock)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	unsigned long flags;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/*
167*4882a593Smuzhiyun 	 * On UP systems, spin_trylock_irqsave() always returns true, even if
168*4882a593Smuzhiyun 	 * we already hold the lock. So, in that case, we rely only on
169*4882a593Smuzhiyun 	 * reference counting.
170*4882a593Smuzhiyun 	 */
171*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_SMP) ||
172*4882a593Smuzhiyun 	    !spin_trylock_irqsave(&enable_lock, flags)) {
173*4882a593Smuzhiyun 		if (enable_owner == current) {
174*4882a593Smuzhiyun 			enable_refcnt++;
175*4882a593Smuzhiyun 			__acquire(enable_lock);
176*4882a593Smuzhiyun 			if (!IS_ENABLED(CONFIG_SMP))
177*4882a593Smuzhiyun 				local_save_flags(flags);
178*4882a593Smuzhiyun 			return flags;
179*4882a593Smuzhiyun 		}
180*4882a593Smuzhiyun 		spin_lock_irqsave(&enable_lock, flags);
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 	WARN_ON_ONCE(enable_owner != NULL);
183*4882a593Smuzhiyun 	WARN_ON_ONCE(enable_refcnt != 0);
184*4882a593Smuzhiyun 	enable_owner = current;
185*4882a593Smuzhiyun 	enable_refcnt = 1;
186*4882a593Smuzhiyun 	return flags;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
clk_enable_unlock(unsigned long flags)189*4882a593Smuzhiyun static void clk_enable_unlock(unsigned long flags)
190*4882a593Smuzhiyun 	__releases(enable_lock)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	WARN_ON_ONCE(enable_owner != current);
193*4882a593Smuzhiyun 	WARN_ON_ONCE(enable_refcnt == 0);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	if (--enable_refcnt) {
196*4882a593Smuzhiyun 		__release(enable_lock);
197*4882a593Smuzhiyun 		return;
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 	enable_owner = NULL;
200*4882a593Smuzhiyun 	spin_unlock_irqrestore(&enable_lock, flags);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
clk_core_rate_is_protected(struct clk_core * core)203*4882a593Smuzhiyun static bool clk_core_rate_is_protected(struct clk_core *core)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	return core->protect_count;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
clk_core_is_prepared(struct clk_core * core)208*4882a593Smuzhiyun static bool clk_core_is_prepared(struct clk_core *core)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	bool ret = false;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/*
213*4882a593Smuzhiyun 	 * .is_prepared is optional for clocks that can prepare
214*4882a593Smuzhiyun 	 * fall back to software usage counter if it is missing
215*4882a593Smuzhiyun 	 */
216*4882a593Smuzhiyun 	if (!core->ops->is_prepared)
217*4882a593Smuzhiyun 		return core->prepare_count;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	if (!clk_pm_runtime_get(core)) {
220*4882a593Smuzhiyun 		ret = core->ops->is_prepared(core->hw);
221*4882a593Smuzhiyun 		clk_pm_runtime_put(core);
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	return ret;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
clk_core_is_enabled(struct clk_core * core)227*4882a593Smuzhiyun static bool clk_core_is_enabled(struct clk_core *core)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	bool ret = false;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	/*
232*4882a593Smuzhiyun 	 * .is_enabled is only mandatory for clocks that gate
233*4882a593Smuzhiyun 	 * fall back to software usage counter if .is_enabled is missing
234*4882a593Smuzhiyun 	 */
235*4882a593Smuzhiyun 	if (!core->ops->is_enabled)
236*4882a593Smuzhiyun 		return core->enable_count;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/*
239*4882a593Smuzhiyun 	 * Check if clock controller's device is runtime active before
240*4882a593Smuzhiyun 	 * calling .is_enabled callback. If not, assume that clock is
241*4882a593Smuzhiyun 	 * disabled, because we might be called from atomic context, from
242*4882a593Smuzhiyun 	 * which pm_runtime_get() is not allowed.
243*4882a593Smuzhiyun 	 * This function is called mainly from clk_disable_unused_subtree,
244*4882a593Smuzhiyun 	 * which ensures proper runtime pm activation of controller before
245*4882a593Smuzhiyun 	 * taking enable spinlock, but the below check is needed if one tries
246*4882a593Smuzhiyun 	 * to call it from other places.
247*4882a593Smuzhiyun 	 */
248*4882a593Smuzhiyun 	if (core->rpm_enabled) {
249*4882a593Smuzhiyun 		pm_runtime_get_noresume(core->dev);
250*4882a593Smuzhiyun 		if (!pm_runtime_active(core->dev)) {
251*4882a593Smuzhiyun 			ret = false;
252*4882a593Smuzhiyun 			goto done;
253*4882a593Smuzhiyun 		}
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	ret = core->ops->is_enabled(core->hw);
257*4882a593Smuzhiyun done:
258*4882a593Smuzhiyun 	if (core->rpm_enabled)
259*4882a593Smuzhiyun 		pm_runtime_put(core->dev);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return ret;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun /***    helper functions   ***/
265*4882a593Smuzhiyun 
__clk_get_name(const struct clk * clk)266*4882a593Smuzhiyun const char *__clk_get_name(const struct clk *clk)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	return !clk ? NULL : clk->core->name;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__clk_get_name);
271*4882a593Smuzhiyun 
clk_hw_get_name(const struct clk_hw * hw)272*4882a593Smuzhiyun const char *clk_hw_get_name(const struct clk_hw *hw)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	return hw->core->name;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_get_name);
277*4882a593Smuzhiyun 
__clk_get_hw(struct clk * clk)278*4882a593Smuzhiyun struct clk_hw *__clk_get_hw(struct clk *clk)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	return !clk ? NULL : clk->core->hw;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__clk_get_hw);
283*4882a593Smuzhiyun 
clk_hw_get_num_parents(const struct clk_hw * hw)284*4882a593Smuzhiyun unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	return hw->core->num_parents;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
289*4882a593Smuzhiyun 
clk_hw_get_parent(const struct clk_hw * hw)290*4882a593Smuzhiyun struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	return hw->core->parent ? hw->core->parent->hw : NULL;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_get_parent);
295*4882a593Smuzhiyun 
__clk_lookup_subtree(const char * name,struct clk_core * core)296*4882a593Smuzhiyun static struct clk_core *__clk_lookup_subtree(const char *name,
297*4882a593Smuzhiyun 					     struct clk_core *core)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	struct clk_core *child;
300*4882a593Smuzhiyun 	struct clk_core *ret;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	if (!strcmp(core->name, name))
303*4882a593Smuzhiyun 		return core;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	hlist_for_each_entry(child, &core->children, child_node) {
306*4882a593Smuzhiyun 		ret = __clk_lookup_subtree(name, child);
307*4882a593Smuzhiyun 		if (ret)
308*4882a593Smuzhiyun 			return ret;
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	return NULL;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
clk_core_lookup(const char * name)314*4882a593Smuzhiyun static struct clk_core *clk_core_lookup(const char *name)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	struct clk_core *root_clk;
317*4882a593Smuzhiyun 	struct clk_core *ret;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (!name)
320*4882a593Smuzhiyun 		return NULL;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	/* search the 'proper' clk tree first */
323*4882a593Smuzhiyun 	hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
324*4882a593Smuzhiyun 		ret = __clk_lookup_subtree(name, root_clk);
325*4882a593Smuzhiyun 		if (ret)
326*4882a593Smuzhiyun 			return ret;
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	/* if not found, then search the orphan tree */
330*4882a593Smuzhiyun 	hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
331*4882a593Smuzhiyun 		ret = __clk_lookup_subtree(name, root_clk);
332*4882a593Smuzhiyun 		if (ret)
333*4882a593Smuzhiyun 			return ret;
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	return NULL;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun #ifdef CONFIG_OF
340*4882a593Smuzhiyun static int of_parse_clkspec(const struct device_node *np, int index,
341*4882a593Smuzhiyun 			    const char *name, struct of_phandle_args *out_args);
342*4882a593Smuzhiyun static struct clk_hw *
343*4882a593Smuzhiyun of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
344*4882a593Smuzhiyun #else
of_parse_clkspec(const struct device_node * np,int index,const char * name,struct of_phandle_args * out_args)345*4882a593Smuzhiyun static inline int of_parse_clkspec(const struct device_node *np, int index,
346*4882a593Smuzhiyun 				   const char *name,
347*4882a593Smuzhiyun 				   struct of_phandle_args *out_args)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	return -ENOENT;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun static inline struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args * clkspec)352*4882a593Smuzhiyun of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	return ERR_PTR(-ENOENT);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun #endif
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun /**
359*4882a593Smuzhiyun  * clk_core_get - Find the clk_core parent of a clk
360*4882a593Smuzhiyun  * @core: clk to find parent of
361*4882a593Smuzhiyun  * @p_index: parent index to search for
362*4882a593Smuzhiyun  *
363*4882a593Smuzhiyun  * This is the preferred method for clk providers to find the parent of a
364*4882a593Smuzhiyun  * clk when that parent is external to the clk controller. The parent_names
365*4882a593Smuzhiyun  * array is indexed and treated as a local name matching a string in the device
366*4882a593Smuzhiyun  * node's 'clock-names' property or as the 'con_id' matching the device's
367*4882a593Smuzhiyun  * dev_name() in a clk_lookup. This allows clk providers to use their own
368*4882a593Smuzhiyun  * namespace instead of looking for a globally unique parent string.
369*4882a593Smuzhiyun  *
370*4882a593Smuzhiyun  * For example the following DT snippet would allow a clock registered by the
371*4882a593Smuzhiyun  * clock-controller@c001 that has a clk_init_data::parent_data array
372*4882a593Smuzhiyun  * with 'xtal' in the 'name' member to find the clock provided by the
373*4882a593Smuzhiyun  * clock-controller@f00abcd without needing to get the globally unique name of
374*4882a593Smuzhiyun  * the xtal clk.
375*4882a593Smuzhiyun  *
376*4882a593Smuzhiyun  *      parent: clock-controller@f00abcd {
377*4882a593Smuzhiyun  *              reg = <0xf00abcd 0xabcd>;
378*4882a593Smuzhiyun  *              #clock-cells = <0>;
379*4882a593Smuzhiyun  *      };
380*4882a593Smuzhiyun  *
381*4882a593Smuzhiyun  *      clock-controller@c001 {
382*4882a593Smuzhiyun  *              reg = <0xc001 0xf00d>;
383*4882a593Smuzhiyun  *              clocks = <&parent>;
384*4882a593Smuzhiyun  *              clock-names = "xtal";
385*4882a593Smuzhiyun  *              #clock-cells = <1>;
386*4882a593Smuzhiyun  *      };
387*4882a593Smuzhiyun  *
388*4882a593Smuzhiyun  * Returns: -ENOENT when the provider can't be found or the clk doesn't
389*4882a593Smuzhiyun  * exist in the provider or the name can't be found in the DT node or
390*4882a593Smuzhiyun  * in a clkdev lookup. NULL when the provider knows about the clk but it
391*4882a593Smuzhiyun  * isn't provided on this system.
392*4882a593Smuzhiyun  * A valid clk_core pointer when the clk can be found in the provider.
393*4882a593Smuzhiyun  */
clk_core_get(struct clk_core * core,u8 p_index)394*4882a593Smuzhiyun static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	const char *name = core->parents[p_index].fw_name;
397*4882a593Smuzhiyun 	int index = core->parents[p_index].index;
398*4882a593Smuzhiyun 	struct clk_hw *hw = ERR_PTR(-ENOENT);
399*4882a593Smuzhiyun 	struct device *dev = core->dev;
400*4882a593Smuzhiyun 	const char *dev_id = dev ? dev_name(dev) : NULL;
401*4882a593Smuzhiyun 	struct device_node *np = core->of_node;
402*4882a593Smuzhiyun 	struct of_phandle_args clkspec;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	if (np && (name || index >= 0) &&
405*4882a593Smuzhiyun 	    !of_parse_clkspec(np, index, name, &clkspec)) {
406*4882a593Smuzhiyun 		hw = of_clk_get_hw_from_clkspec(&clkspec);
407*4882a593Smuzhiyun 		of_node_put(clkspec.np);
408*4882a593Smuzhiyun 	} else if (name) {
409*4882a593Smuzhiyun 		/*
410*4882a593Smuzhiyun 		 * If the DT search above couldn't find the provider fallback to
411*4882a593Smuzhiyun 		 * looking up via clkdev based clk_lookups.
412*4882a593Smuzhiyun 		 */
413*4882a593Smuzhiyun 		hw = clk_find_hw(dev_id, name);
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	if (IS_ERR(hw))
417*4882a593Smuzhiyun 		return ERR_CAST(hw);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	return hw->core;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
clk_core_fill_parent_index(struct clk_core * core,u8 index)422*4882a593Smuzhiyun static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	struct clk_parent_map *entry = &core->parents[index];
425*4882a593Smuzhiyun 	struct clk_core *parent = ERR_PTR(-ENOENT);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	if (entry->hw) {
428*4882a593Smuzhiyun 		parent = entry->hw->core;
429*4882a593Smuzhiyun 		/*
430*4882a593Smuzhiyun 		 * We have a direct reference but it isn't registered yet?
431*4882a593Smuzhiyun 		 * Orphan it and let clk_reparent() update the orphan status
432*4882a593Smuzhiyun 		 * when the parent is registered.
433*4882a593Smuzhiyun 		 */
434*4882a593Smuzhiyun 		if (!parent)
435*4882a593Smuzhiyun 			parent = ERR_PTR(-EPROBE_DEFER);
436*4882a593Smuzhiyun 	} else {
437*4882a593Smuzhiyun 		parent = clk_core_get(core, index);
438*4882a593Smuzhiyun 		if (PTR_ERR(parent) == -ENOENT && entry->name)
439*4882a593Smuzhiyun 			parent = clk_core_lookup(entry->name);
440*4882a593Smuzhiyun 	}
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	/* Only cache it if it's not an error */
443*4882a593Smuzhiyun 	if (!IS_ERR(parent))
444*4882a593Smuzhiyun 		entry->core = parent;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
clk_core_get_parent_by_index(struct clk_core * core,u8 index)447*4882a593Smuzhiyun static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
448*4882a593Smuzhiyun 							 u8 index)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	if (!core || index >= core->num_parents || !core->parents)
451*4882a593Smuzhiyun 		return NULL;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	if (!core->parents[index].core)
454*4882a593Smuzhiyun 		clk_core_fill_parent_index(core, index);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	return core->parents[index].core;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun struct clk_hw *
clk_hw_get_parent_by_index(const struct clk_hw * hw,unsigned int index)460*4882a593Smuzhiyun clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	struct clk_core *parent;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	parent = clk_core_get_parent_by_index(hw->core, index);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	return !parent ? NULL : parent->hw;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
469*4882a593Smuzhiyun 
__clk_get_enable_count(struct clk * clk)470*4882a593Smuzhiyun unsigned int __clk_get_enable_count(struct clk *clk)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	return !clk ? 0 : clk->core->enable_count;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
clk_core_get_rate_nolock(struct clk_core * core)475*4882a593Smuzhiyun static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	if (!core)
478*4882a593Smuzhiyun 		return 0;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	if (!core->num_parents || core->parent)
481*4882a593Smuzhiyun 		return core->rate;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	/*
484*4882a593Smuzhiyun 	 * Clk must have a parent because num_parents > 0 but the parent isn't
485*4882a593Smuzhiyun 	 * known yet. Best to return 0 as the rate of this clk until we can
486*4882a593Smuzhiyun 	 * properly recalc the rate based on the parent's rate.
487*4882a593Smuzhiyun 	 */
488*4882a593Smuzhiyun 	return 0;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun 
clk_hw_get_rate(const struct clk_hw * hw)491*4882a593Smuzhiyun unsigned long clk_hw_get_rate(const struct clk_hw *hw)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	return clk_core_get_rate_nolock(hw->core);
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_get_rate);
496*4882a593Smuzhiyun 
clk_core_get_accuracy_no_lock(struct clk_core * core)497*4882a593Smuzhiyun static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	if (!core)
500*4882a593Smuzhiyun 		return 0;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	return core->accuracy;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun 
clk_hw_get_flags(const struct clk_hw * hw)505*4882a593Smuzhiyun unsigned long clk_hw_get_flags(const struct clk_hw *hw)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	return hw->core->flags;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_get_flags);
510*4882a593Smuzhiyun 
clk_hw_is_prepared(const struct clk_hw * hw)511*4882a593Smuzhiyun bool clk_hw_is_prepared(const struct clk_hw *hw)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	return clk_core_is_prepared(hw->core);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
516*4882a593Smuzhiyun 
clk_hw_rate_is_protected(const struct clk_hw * hw)517*4882a593Smuzhiyun bool clk_hw_rate_is_protected(const struct clk_hw *hw)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	return clk_core_rate_is_protected(hw->core);
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
522*4882a593Smuzhiyun 
clk_hw_is_enabled(const struct clk_hw * hw)523*4882a593Smuzhiyun bool clk_hw_is_enabled(const struct clk_hw *hw)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	return clk_core_is_enabled(hw->core);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
528*4882a593Smuzhiyun 
__clk_is_enabled(struct clk * clk)529*4882a593Smuzhiyun bool __clk_is_enabled(struct clk *clk)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	if (!clk)
532*4882a593Smuzhiyun 		return false;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	return clk_core_is_enabled(clk->core);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__clk_is_enabled);
537*4882a593Smuzhiyun 
mux_is_better_rate(unsigned long rate,unsigned long now,unsigned long best,unsigned long flags)538*4882a593Smuzhiyun static bool mux_is_better_rate(unsigned long rate, unsigned long now,
539*4882a593Smuzhiyun 			   unsigned long best, unsigned long flags)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun 	if (flags & CLK_MUX_ROUND_CLOSEST)
542*4882a593Smuzhiyun 		return abs(now - rate) < abs(best - rate);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	return now <= rate && now > best;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun 
clk_mux_determine_rate_flags(struct clk_hw * hw,struct clk_rate_request * req,unsigned long flags)547*4882a593Smuzhiyun int clk_mux_determine_rate_flags(struct clk_hw *hw,
548*4882a593Smuzhiyun 				 struct clk_rate_request *req,
549*4882a593Smuzhiyun 				 unsigned long flags)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	struct clk_core *core = hw->core, *parent, *best_parent = NULL;
552*4882a593Smuzhiyun 	int i, num_parents, ret;
553*4882a593Smuzhiyun 	unsigned long best = 0;
554*4882a593Smuzhiyun 	struct clk_rate_request parent_req = *req;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	/* if NO_REPARENT flag set, pass through to current parent */
557*4882a593Smuzhiyun 	if (core->flags & CLK_SET_RATE_NO_REPARENT) {
558*4882a593Smuzhiyun 		parent = core->parent;
559*4882a593Smuzhiyun 		if (core->flags & CLK_SET_RATE_PARENT) {
560*4882a593Smuzhiyun 			ret = __clk_determine_rate(parent ? parent->hw : NULL,
561*4882a593Smuzhiyun 						   &parent_req);
562*4882a593Smuzhiyun 			if (ret)
563*4882a593Smuzhiyun 				return ret;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 			best = parent_req.rate;
566*4882a593Smuzhiyun 		} else if (parent) {
567*4882a593Smuzhiyun 			best = clk_core_get_rate_nolock(parent);
568*4882a593Smuzhiyun 		} else {
569*4882a593Smuzhiyun 			best = clk_core_get_rate_nolock(core);
570*4882a593Smuzhiyun 		}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 		goto out;
573*4882a593Smuzhiyun 	}
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	/* find the parent that can provide the fastest rate <= rate */
576*4882a593Smuzhiyun 	num_parents = core->num_parents;
577*4882a593Smuzhiyun 	for (i = 0; i < num_parents; i++) {
578*4882a593Smuzhiyun 		parent = clk_core_get_parent_by_index(core, i);
579*4882a593Smuzhiyun 		if (!parent)
580*4882a593Smuzhiyun 			continue;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 		if (core->flags & CLK_SET_RATE_PARENT) {
583*4882a593Smuzhiyun 			parent_req = *req;
584*4882a593Smuzhiyun 			ret = __clk_determine_rate(parent->hw, &parent_req);
585*4882a593Smuzhiyun 			if (ret)
586*4882a593Smuzhiyun 				continue;
587*4882a593Smuzhiyun 		} else {
588*4882a593Smuzhiyun 			parent_req.rate = clk_core_get_rate_nolock(parent);
589*4882a593Smuzhiyun 		}
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 		if (mux_is_better_rate(req->rate, parent_req.rate,
592*4882a593Smuzhiyun 				       best, flags)) {
593*4882a593Smuzhiyun 			best_parent = parent;
594*4882a593Smuzhiyun 			best = parent_req.rate;
595*4882a593Smuzhiyun 		}
596*4882a593Smuzhiyun 	}
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	if (!best_parent)
599*4882a593Smuzhiyun 		return -EINVAL;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun out:
602*4882a593Smuzhiyun 	if (best_parent)
603*4882a593Smuzhiyun 		req->best_parent_hw = best_parent->hw;
604*4882a593Smuzhiyun 	req->best_parent_rate = best;
605*4882a593Smuzhiyun 	req->rate = best;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	return 0;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
610*4882a593Smuzhiyun 
__clk_lookup(const char * name)611*4882a593Smuzhiyun struct clk *__clk_lookup(const char *name)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	struct clk_core *core = clk_core_lookup(name);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	return !core ? NULL : core->hw->clk;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun 
clk_core_get_boundaries(struct clk_core * core,unsigned long * min_rate,unsigned long * max_rate)618*4882a593Smuzhiyun static void clk_core_get_boundaries(struct clk_core *core,
619*4882a593Smuzhiyun 				    unsigned long *min_rate,
620*4882a593Smuzhiyun 				    unsigned long *max_rate)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun 	struct clk *clk_user;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	*min_rate = core->min_rate;
627*4882a593Smuzhiyun 	*max_rate = core->max_rate;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	hlist_for_each_entry(clk_user, &core->clks, clks_node)
630*4882a593Smuzhiyun 		*min_rate = max(*min_rate, clk_user->min_rate);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	hlist_for_each_entry(clk_user, &core->clks, clks_node)
633*4882a593Smuzhiyun 		*max_rate = min(*max_rate, clk_user->max_rate);
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun 
clk_core_check_boundaries(struct clk_core * core,unsigned long min_rate,unsigned long max_rate)636*4882a593Smuzhiyun static bool clk_core_check_boundaries(struct clk_core *core,
637*4882a593Smuzhiyun 				      unsigned long min_rate,
638*4882a593Smuzhiyun 				      unsigned long max_rate)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun 	struct clk *user;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	if (min_rate > core->max_rate || max_rate < core->min_rate)
645*4882a593Smuzhiyun 		return false;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	hlist_for_each_entry(user, &core->clks, clks_node)
648*4882a593Smuzhiyun 		if (min_rate > user->max_rate || max_rate < user->min_rate)
649*4882a593Smuzhiyun 			return false;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	return true;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun 
clk_hw_set_rate_range(struct clk_hw * hw,unsigned long min_rate,unsigned long max_rate)654*4882a593Smuzhiyun void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
655*4882a593Smuzhiyun 			   unsigned long max_rate)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	hw->core->min_rate = min_rate;
658*4882a593Smuzhiyun 	hw->core->max_rate = max_rate;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun /*
663*4882a593Smuzhiyun  * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
664*4882a593Smuzhiyun  * @hw: mux type clk to determine rate on
665*4882a593Smuzhiyun  * @req: rate request, also used to return preferred parent and frequencies
666*4882a593Smuzhiyun  *
667*4882a593Smuzhiyun  * Helper for finding best parent to provide a given frequency. This can be used
668*4882a593Smuzhiyun  * directly as a determine_rate callback (e.g. for a mux), or from a more
669*4882a593Smuzhiyun  * complex clock that may combine a mux with other operations.
670*4882a593Smuzhiyun  *
671*4882a593Smuzhiyun  * Returns: 0 on success, -EERROR value on error
672*4882a593Smuzhiyun  */
__clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)673*4882a593Smuzhiyun int __clk_mux_determine_rate(struct clk_hw *hw,
674*4882a593Smuzhiyun 			     struct clk_rate_request *req)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun 	return clk_mux_determine_rate_flags(hw, req, 0);
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
679*4882a593Smuzhiyun 
__clk_mux_determine_rate_closest(struct clk_hw * hw,struct clk_rate_request * req)680*4882a593Smuzhiyun int __clk_mux_determine_rate_closest(struct clk_hw *hw,
681*4882a593Smuzhiyun 				     struct clk_rate_request *req)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun 	return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun /***        clk api        ***/
688*4882a593Smuzhiyun 
clk_core_rate_unprotect(struct clk_core * core)689*4882a593Smuzhiyun static void clk_core_rate_unprotect(struct clk_core *core)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	if (!core)
694*4882a593Smuzhiyun 		return;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	if (WARN(core->protect_count == 0,
697*4882a593Smuzhiyun 	    "%s already unprotected\n", core->name))
698*4882a593Smuzhiyun 		return;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	if (--core->protect_count > 0)
701*4882a593Smuzhiyun 		return;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	clk_core_rate_unprotect(core->parent);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun 
clk_core_rate_nuke_protect(struct clk_core * core)706*4882a593Smuzhiyun static int clk_core_rate_nuke_protect(struct clk_core *core)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun 	int ret;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	if (!core)
713*4882a593Smuzhiyun 		return -EINVAL;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	if (core->protect_count == 0)
716*4882a593Smuzhiyun 		return 0;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	ret = core->protect_count;
719*4882a593Smuzhiyun 	core->protect_count = 1;
720*4882a593Smuzhiyun 	clk_core_rate_unprotect(core);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	return ret;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun /**
726*4882a593Smuzhiyun  * clk_rate_exclusive_put - release exclusivity over clock rate control
727*4882a593Smuzhiyun  * @clk: the clk over which the exclusivity is released
728*4882a593Smuzhiyun  *
729*4882a593Smuzhiyun  * clk_rate_exclusive_put() completes a critical section during which a clock
730*4882a593Smuzhiyun  * consumer cannot tolerate any other consumer making any operation on the
731*4882a593Smuzhiyun  * clock which could result in a rate change or rate glitch. Exclusive clocks
732*4882a593Smuzhiyun  * cannot have their rate changed, either directly or indirectly due to changes
733*4882a593Smuzhiyun  * further up the parent chain of clocks. As a result, clocks up parent chain
734*4882a593Smuzhiyun  * also get under exclusive control of the calling consumer.
735*4882a593Smuzhiyun  *
736*4882a593Smuzhiyun  * If exlusivity is claimed more than once on clock, even by the same consumer,
737*4882a593Smuzhiyun  * the rate effectively gets locked as exclusivity can't be preempted.
738*4882a593Smuzhiyun  *
739*4882a593Smuzhiyun  * Calls to clk_rate_exclusive_put() must be balanced with calls to
740*4882a593Smuzhiyun  * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
741*4882a593Smuzhiyun  * error status.
742*4882a593Smuzhiyun  */
clk_rate_exclusive_put(struct clk * clk)743*4882a593Smuzhiyun void clk_rate_exclusive_put(struct clk *clk)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun 	if (!clk)
746*4882a593Smuzhiyun 		return;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	clk_prepare_lock();
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	/*
751*4882a593Smuzhiyun 	 * if there is something wrong with this consumer protect count, stop
752*4882a593Smuzhiyun 	 * here before messing with the provider
753*4882a593Smuzhiyun 	 */
754*4882a593Smuzhiyun 	if (WARN_ON(clk->exclusive_count <= 0))
755*4882a593Smuzhiyun 		goto out;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	clk_core_rate_unprotect(clk->core);
758*4882a593Smuzhiyun 	clk->exclusive_count--;
759*4882a593Smuzhiyun out:
760*4882a593Smuzhiyun 	clk_prepare_unlock();
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
763*4882a593Smuzhiyun 
clk_core_rate_protect(struct clk_core * core)764*4882a593Smuzhiyun static void clk_core_rate_protect(struct clk_core *core)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	if (!core)
769*4882a593Smuzhiyun 		return;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	if (core->protect_count == 0)
772*4882a593Smuzhiyun 		clk_core_rate_protect(core->parent);
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	core->protect_count++;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
clk_core_rate_restore_protect(struct clk_core * core,int count)777*4882a593Smuzhiyun static void clk_core_rate_restore_protect(struct clk_core *core, int count)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	if (!core)
782*4882a593Smuzhiyun 		return;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	if (count == 0)
785*4882a593Smuzhiyun 		return;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	clk_core_rate_protect(core);
788*4882a593Smuzhiyun 	core->protect_count = count;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun /**
792*4882a593Smuzhiyun  * clk_rate_exclusive_get - get exclusivity over the clk rate control
793*4882a593Smuzhiyun  * @clk: the clk over which the exclusity of rate control is requested
794*4882a593Smuzhiyun  *
795*4882a593Smuzhiyun  * clk_rate_exclusive_get() begins a critical section during which a clock
796*4882a593Smuzhiyun  * consumer cannot tolerate any other consumer making any operation on the
797*4882a593Smuzhiyun  * clock which could result in a rate change or rate glitch. Exclusive clocks
798*4882a593Smuzhiyun  * cannot have their rate changed, either directly or indirectly due to changes
799*4882a593Smuzhiyun  * further up the parent chain of clocks. As a result, clocks up parent chain
800*4882a593Smuzhiyun  * also get under exclusive control of the calling consumer.
801*4882a593Smuzhiyun  *
802*4882a593Smuzhiyun  * If exlusivity is claimed more than once on clock, even by the same consumer,
803*4882a593Smuzhiyun  * the rate effectively gets locked as exclusivity can't be preempted.
804*4882a593Smuzhiyun  *
805*4882a593Smuzhiyun  * Calls to clk_rate_exclusive_get() should be balanced with calls to
806*4882a593Smuzhiyun  * clk_rate_exclusive_put(). Calls to this function may sleep.
807*4882a593Smuzhiyun  * Returns 0 on success, -EERROR otherwise
808*4882a593Smuzhiyun  */
clk_rate_exclusive_get(struct clk * clk)809*4882a593Smuzhiyun int clk_rate_exclusive_get(struct clk *clk)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun 	if (!clk)
812*4882a593Smuzhiyun 		return 0;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	clk_prepare_lock();
815*4882a593Smuzhiyun 	clk_core_rate_protect(clk->core);
816*4882a593Smuzhiyun 	clk->exclusive_count++;
817*4882a593Smuzhiyun 	clk_prepare_unlock();
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	return 0;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
822*4882a593Smuzhiyun 
clk_core_unprepare(struct clk_core * core)823*4882a593Smuzhiyun static void clk_core_unprepare(struct clk_core *core)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	if (!core)
828*4882a593Smuzhiyun 		return;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	if (WARN(core->prepare_count == 0,
831*4882a593Smuzhiyun 	    "%s already unprepared\n", core->name))
832*4882a593Smuzhiyun 		return;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
835*4882a593Smuzhiyun 	    "Unpreparing critical %s\n", core->name))
836*4882a593Smuzhiyun 		return;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	if (core->flags & CLK_SET_RATE_GATE)
839*4882a593Smuzhiyun 		clk_core_rate_unprotect(core);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	if (--core->prepare_count > 0)
842*4882a593Smuzhiyun 		return;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	trace_clk_unprepare(core);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	if (core->ops->unprepare)
849*4882a593Smuzhiyun 		core->ops->unprepare(core->hw);
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	trace_clk_unprepare_complete(core);
852*4882a593Smuzhiyun 	clk_core_unprepare(core->parent);
853*4882a593Smuzhiyun 	clk_pm_runtime_put(core);
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun 
clk_core_unprepare_lock(struct clk_core * core)856*4882a593Smuzhiyun static void clk_core_unprepare_lock(struct clk_core *core)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun 	clk_prepare_lock();
859*4882a593Smuzhiyun 	clk_core_unprepare(core);
860*4882a593Smuzhiyun 	clk_prepare_unlock();
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun /**
864*4882a593Smuzhiyun  * clk_unprepare - undo preparation of a clock source
865*4882a593Smuzhiyun  * @clk: the clk being unprepared
866*4882a593Smuzhiyun  *
867*4882a593Smuzhiyun  * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
868*4882a593Smuzhiyun  * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
869*4882a593Smuzhiyun  * if the operation may sleep.  One example is a clk which is accessed over
870*4882a593Smuzhiyun  * I2c.  In the complex case a clk gate operation may require a fast and a slow
871*4882a593Smuzhiyun  * part.  It is this reason that clk_unprepare and clk_disable are not mutually
872*4882a593Smuzhiyun  * exclusive.  In fact clk_disable must be called before clk_unprepare.
873*4882a593Smuzhiyun  */
clk_unprepare(struct clk * clk)874*4882a593Smuzhiyun void clk_unprepare(struct clk *clk)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(clk))
877*4882a593Smuzhiyun 		return;
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	clk_core_unprepare_lock(clk->core);
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_unprepare);
882*4882a593Smuzhiyun 
clk_core_prepare(struct clk_core * core)883*4882a593Smuzhiyun static int clk_core_prepare(struct clk_core *core)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun 	int ret = 0;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	if (!core)
890*4882a593Smuzhiyun 		return 0;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	if (core->prepare_count == 0) {
893*4882a593Smuzhiyun 		ret = clk_pm_runtime_get(core);
894*4882a593Smuzhiyun 		if (ret)
895*4882a593Smuzhiyun 			return ret;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 		ret = clk_core_prepare(core->parent);
898*4882a593Smuzhiyun 		if (ret)
899*4882a593Smuzhiyun 			goto runtime_put;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 		trace_clk_prepare(core);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 		if (core->ops->prepare)
904*4882a593Smuzhiyun 			ret = core->ops->prepare(core->hw);
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 		trace_clk_prepare_complete(core);
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 		if (ret)
909*4882a593Smuzhiyun 			goto unprepare;
910*4882a593Smuzhiyun 	}
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	core->prepare_count++;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	/*
915*4882a593Smuzhiyun 	 * CLK_SET_RATE_GATE is a special case of clock protection
916*4882a593Smuzhiyun 	 * Instead of a consumer claiming exclusive rate control, it is
917*4882a593Smuzhiyun 	 * actually the provider which prevents any consumer from making any
918*4882a593Smuzhiyun 	 * operation which could result in a rate change or rate glitch while
919*4882a593Smuzhiyun 	 * the clock is prepared.
920*4882a593Smuzhiyun 	 */
921*4882a593Smuzhiyun 	if (core->flags & CLK_SET_RATE_GATE)
922*4882a593Smuzhiyun 		clk_core_rate_protect(core);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	return 0;
925*4882a593Smuzhiyun unprepare:
926*4882a593Smuzhiyun 	clk_core_unprepare(core->parent);
927*4882a593Smuzhiyun runtime_put:
928*4882a593Smuzhiyun 	clk_pm_runtime_put(core);
929*4882a593Smuzhiyun 	return ret;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun 
clk_core_prepare_lock(struct clk_core * core)932*4882a593Smuzhiyun static int clk_core_prepare_lock(struct clk_core *core)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun 	int ret;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	clk_prepare_lock();
937*4882a593Smuzhiyun 	ret = clk_core_prepare(core);
938*4882a593Smuzhiyun 	clk_prepare_unlock();
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	return ret;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun /**
944*4882a593Smuzhiyun  * clk_prepare - prepare a clock source
945*4882a593Smuzhiyun  * @clk: the clk being prepared
946*4882a593Smuzhiyun  *
947*4882a593Smuzhiyun  * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
948*4882a593Smuzhiyun  * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
949*4882a593Smuzhiyun  * operation may sleep.  One example is a clk which is accessed over I2c.  In
950*4882a593Smuzhiyun  * the complex case a clk ungate operation may require a fast and a slow part.
951*4882a593Smuzhiyun  * It is this reason that clk_prepare and clk_enable are not mutually
952*4882a593Smuzhiyun  * exclusive.  In fact clk_prepare must be called before clk_enable.
953*4882a593Smuzhiyun  * Returns 0 on success, -EERROR otherwise.
954*4882a593Smuzhiyun  */
clk_prepare(struct clk * clk)955*4882a593Smuzhiyun int clk_prepare(struct clk *clk)
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun 	if (!clk)
958*4882a593Smuzhiyun 		return 0;
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	return clk_core_prepare_lock(clk->core);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_prepare);
963*4882a593Smuzhiyun 
clk_core_disable(struct clk_core * core)964*4882a593Smuzhiyun static void clk_core_disable(struct clk_core *core)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun 	lockdep_assert_held(&enable_lock);
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	if (!core)
969*4882a593Smuzhiyun 		return;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
972*4882a593Smuzhiyun 		return;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
975*4882a593Smuzhiyun 	    "Disabling critical %s\n", core->name))
976*4882a593Smuzhiyun 		return;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	if (--core->enable_count > 0)
979*4882a593Smuzhiyun 		return;
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	trace_clk_disable_rcuidle(core);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	if (core->ops->disable)
984*4882a593Smuzhiyun 		core->ops->disable(core->hw);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	trace_clk_disable_complete_rcuidle(core);
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	clk_core_disable(core->parent);
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun 
clk_core_disable_lock(struct clk_core * core)991*4882a593Smuzhiyun static void clk_core_disable_lock(struct clk_core *core)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	unsigned long flags;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	flags = clk_enable_lock();
996*4882a593Smuzhiyun 	clk_core_disable(core);
997*4882a593Smuzhiyun 	clk_enable_unlock(flags);
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun /**
1001*4882a593Smuzhiyun  * clk_disable - gate a clock
1002*4882a593Smuzhiyun  * @clk: the clk being gated
1003*4882a593Smuzhiyun  *
1004*4882a593Smuzhiyun  * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
1005*4882a593Smuzhiyun  * a simple case, clk_disable can be used instead of clk_unprepare to gate a
1006*4882a593Smuzhiyun  * clk if the operation is fast and will never sleep.  One example is a
1007*4882a593Smuzhiyun  * SoC-internal clk which is controlled via simple register writes.  In the
1008*4882a593Smuzhiyun  * complex case a clk gate operation may require a fast and a slow part.  It is
1009*4882a593Smuzhiyun  * this reason that clk_unprepare and clk_disable are not mutually exclusive.
1010*4882a593Smuzhiyun  * In fact clk_disable must be called before clk_unprepare.
1011*4882a593Smuzhiyun  */
clk_disable(struct clk * clk)1012*4882a593Smuzhiyun void clk_disable(struct clk *clk)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(clk))
1015*4882a593Smuzhiyun 		return;
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	clk_core_disable_lock(clk->core);
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_disable);
1020*4882a593Smuzhiyun 
clk_core_enable(struct clk_core * core)1021*4882a593Smuzhiyun static int clk_core_enable(struct clk_core *core)
1022*4882a593Smuzhiyun {
1023*4882a593Smuzhiyun 	int ret = 0;
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	lockdep_assert_held(&enable_lock);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	if (!core)
1028*4882a593Smuzhiyun 		return 0;
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	if (WARN(core->prepare_count == 0,
1031*4882a593Smuzhiyun 	    "Enabling unprepared %s\n", core->name))
1032*4882a593Smuzhiyun 		return -ESHUTDOWN;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	if (core->enable_count == 0) {
1035*4882a593Smuzhiyun 		ret = clk_core_enable(core->parent);
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 		if (ret)
1038*4882a593Smuzhiyun 			return ret;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 		trace_clk_enable_rcuidle(core);
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 		if (core->ops->enable)
1043*4882a593Smuzhiyun 			ret = core->ops->enable(core->hw);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 		trace_clk_enable_complete_rcuidle(core);
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 		if (ret) {
1048*4882a593Smuzhiyun 			clk_core_disable(core->parent);
1049*4882a593Smuzhiyun 			return ret;
1050*4882a593Smuzhiyun 		}
1051*4882a593Smuzhiyun 	}
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	core->enable_count++;
1054*4882a593Smuzhiyun 	return 0;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun 
clk_core_enable_lock(struct clk_core * core)1057*4882a593Smuzhiyun static int clk_core_enable_lock(struct clk_core *core)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun 	unsigned long flags;
1060*4882a593Smuzhiyun 	int ret;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	flags = clk_enable_lock();
1063*4882a593Smuzhiyun 	ret = clk_core_enable(core);
1064*4882a593Smuzhiyun 	clk_enable_unlock(flags);
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	return ret;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun /**
1070*4882a593Smuzhiyun  * clk_gate_restore_context - restore context for poweroff
1071*4882a593Smuzhiyun  * @hw: the clk_hw pointer of clock whose state is to be restored
1072*4882a593Smuzhiyun  *
1073*4882a593Smuzhiyun  * The clock gate restore context function enables or disables
1074*4882a593Smuzhiyun  * the gate clocks based on the enable_count. This is done in cases
1075*4882a593Smuzhiyun  * where the clock context is lost and based on the enable_count
1076*4882a593Smuzhiyun  * the clock either needs to be enabled/disabled. This
1077*4882a593Smuzhiyun  * helps restore the state of gate clocks.
1078*4882a593Smuzhiyun  */
clk_gate_restore_context(struct clk_hw * hw)1079*4882a593Smuzhiyun void clk_gate_restore_context(struct clk_hw *hw)
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun 	struct clk_core *core = hw->core;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	if (core->enable_count)
1084*4882a593Smuzhiyun 		core->ops->enable(hw);
1085*4882a593Smuzhiyun 	else
1086*4882a593Smuzhiyun 		core->ops->disable(hw);
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1089*4882a593Smuzhiyun 
clk_core_save_context(struct clk_core * core)1090*4882a593Smuzhiyun static int clk_core_save_context(struct clk_core *core)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun 	struct clk_core *child;
1093*4882a593Smuzhiyun 	int ret = 0;
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	hlist_for_each_entry(child, &core->children, child_node) {
1096*4882a593Smuzhiyun 		ret = clk_core_save_context(child);
1097*4882a593Smuzhiyun 		if (ret < 0)
1098*4882a593Smuzhiyun 			return ret;
1099*4882a593Smuzhiyun 	}
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	if (core->ops && core->ops->save_context)
1102*4882a593Smuzhiyun 		ret = core->ops->save_context(core->hw);
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	return ret;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun 
clk_core_restore_context(struct clk_core * core)1107*4882a593Smuzhiyun static void clk_core_restore_context(struct clk_core *core)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun 	struct clk_core *child;
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	if (core->ops && core->ops->restore_context)
1112*4882a593Smuzhiyun 		core->ops->restore_context(core->hw);
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	hlist_for_each_entry(child, &core->children, child_node)
1115*4882a593Smuzhiyun 		clk_core_restore_context(child);
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun /**
1119*4882a593Smuzhiyun  * clk_save_context - save clock context for poweroff
1120*4882a593Smuzhiyun  *
1121*4882a593Smuzhiyun  * Saves the context of the clock register for powerstates in which the
1122*4882a593Smuzhiyun  * contents of the registers will be lost. Occurs deep within the suspend
1123*4882a593Smuzhiyun  * code.  Returns 0 on success.
1124*4882a593Smuzhiyun  */
clk_save_context(void)1125*4882a593Smuzhiyun int clk_save_context(void)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	struct clk_core *clk;
1128*4882a593Smuzhiyun 	int ret;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	hlist_for_each_entry(clk, &clk_root_list, child_node) {
1131*4882a593Smuzhiyun 		ret = clk_core_save_context(clk);
1132*4882a593Smuzhiyun 		if (ret < 0)
1133*4882a593Smuzhiyun 			return ret;
1134*4882a593Smuzhiyun 	}
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1137*4882a593Smuzhiyun 		ret = clk_core_save_context(clk);
1138*4882a593Smuzhiyun 		if (ret < 0)
1139*4882a593Smuzhiyun 			return ret;
1140*4882a593Smuzhiyun 	}
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	return 0;
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_save_context);
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun /**
1147*4882a593Smuzhiyun  * clk_restore_context - restore clock context after poweroff
1148*4882a593Smuzhiyun  *
1149*4882a593Smuzhiyun  * Restore the saved clock context upon resume.
1150*4882a593Smuzhiyun  *
1151*4882a593Smuzhiyun  */
clk_restore_context(void)1152*4882a593Smuzhiyun void clk_restore_context(void)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun 	struct clk_core *core;
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 	hlist_for_each_entry(core, &clk_root_list, child_node)
1157*4882a593Smuzhiyun 		clk_core_restore_context(core);
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1160*4882a593Smuzhiyun 		clk_core_restore_context(core);
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_restore_context);
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun /**
1165*4882a593Smuzhiyun  * clk_enable - ungate a clock
1166*4882a593Smuzhiyun  * @clk: the clk being ungated
1167*4882a593Smuzhiyun  *
1168*4882a593Smuzhiyun  * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
1169*4882a593Smuzhiyun  * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1170*4882a593Smuzhiyun  * if the operation will never sleep.  One example is a SoC-internal clk which
1171*4882a593Smuzhiyun  * is controlled via simple register writes.  In the complex case a clk ungate
1172*4882a593Smuzhiyun  * operation may require a fast and a slow part.  It is this reason that
1173*4882a593Smuzhiyun  * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
1174*4882a593Smuzhiyun  * must be called before clk_enable.  Returns 0 on success, -EERROR
1175*4882a593Smuzhiyun  * otherwise.
1176*4882a593Smuzhiyun  */
clk_enable(struct clk * clk)1177*4882a593Smuzhiyun int clk_enable(struct clk *clk)
1178*4882a593Smuzhiyun {
1179*4882a593Smuzhiyun 	if (!clk)
1180*4882a593Smuzhiyun 		return 0;
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	return clk_core_enable_lock(clk->core);
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_enable);
1185*4882a593Smuzhiyun 
clk_core_prepare_enable(struct clk_core * core)1186*4882a593Smuzhiyun static int clk_core_prepare_enable(struct clk_core *core)
1187*4882a593Smuzhiyun {
1188*4882a593Smuzhiyun 	int ret;
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	ret = clk_core_prepare_lock(core);
1191*4882a593Smuzhiyun 	if (ret)
1192*4882a593Smuzhiyun 		return ret;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	ret = clk_core_enable_lock(core);
1195*4882a593Smuzhiyun 	if (ret)
1196*4882a593Smuzhiyun 		clk_core_unprepare_lock(core);
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	return ret;
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun 
clk_core_disable_unprepare(struct clk_core * core)1201*4882a593Smuzhiyun static void clk_core_disable_unprepare(struct clk_core *core)
1202*4882a593Smuzhiyun {
1203*4882a593Smuzhiyun 	clk_core_disable_lock(core);
1204*4882a593Smuzhiyun 	clk_core_unprepare_lock(core);
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun 
clk_unprepare_unused_subtree(struct clk_core * core)1207*4882a593Smuzhiyun static void __init clk_unprepare_unused_subtree(struct clk_core *core)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun 	struct clk_core *child;
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	hlist_for_each_entry(child, &core->children, child_node)
1214*4882a593Smuzhiyun 		clk_unprepare_unused_subtree(child);
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	if (dev_has_sync_state(core->dev) &&
1217*4882a593Smuzhiyun 	    !(core->flags & CLK_DONT_HOLD_STATE))
1218*4882a593Smuzhiyun 		return;
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	if (core->prepare_count)
1221*4882a593Smuzhiyun 		return;
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	if (core->flags & CLK_IGNORE_UNUSED)
1224*4882a593Smuzhiyun 		return;
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	if (clk_pm_runtime_get(core))
1227*4882a593Smuzhiyun 		return;
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	if (clk_core_is_prepared(core)) {
1230*4882a593Smuzhiyun 		trace_clk_unprepare(core);
1231*4882a593Smuzhiyun 		if (core->ops->unprepare_unused)
1232*4882a593Smuzhiyun 			core->ops->unprepare_unused(core->hw);
1233*4882a593Smuzhiyun 		else if (core->ops->unprepare)
1234*4882a593Smuzhiyun 			core->ops->unprepare(core->hw);
1235*4882a593Smuzhiyun 		trace_clk_unprepare_complete(core);
1236*4882a593Smuzhiyun 	}
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun 	clk_pm_runtime_put(core);
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun 
clk_disable_unused_subtree(struct clk_core * core)1241*4882a593Smuzhiyun static void __init clk_disable_unused_subtree(struct clk_core *core)
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun 	struct clk_core *child;
1244*4882a593Smuzhiyun 	unsigned long flags;
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	hlist_for_each_entry(child, &core->children, child_node)
1249*4882a593Smuzhiyun 		clk_disable_unused_subtree(child);
1250*4882a593Smuzhiyun 
1251*4882a593Smuzhiyun 	if (dev_has_sync_state(core->dev) &&
1252*4882a593Smuzhiyun 	    !(core->flags & CLK_DONT_HOLD_STATE))
1253*4882a593Smuzhiyun 		return;
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 	if (core->flags & CLK_OPS_PARENT_ENABLE)
1256*4882a593Smuzhiyun 		clk_core_prepare_enable(core->parent);
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	if (clk_pm_runtime_get(core))
1259*4882a593Smuzhiyun 		goto unprepare_out;
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 	flags = clk_enable_lock();
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	if (core->enable_count)
1264*4882a593Smuzhiyun 		goto unlock_out;
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	if (core->flags & CLK_IGNORE_UNUSED)
1267*4882a593Smuzhiyun 		goto unlock_out;
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun 	/*
1270*4882a593Smuzhiyun 	 * some gate clocks have special needs during the disable-unused
1271*4882a593Smuzhiyun 	 * sequence.  call .disable_unused if available, otherwise fall
1272*4882a593Smuzhiyun 	 * back to .disable
1273*4882a593Smuzhiyun 	 */
1274*4882a593Smuzhiyun 	if (clk_core_is_enabled(core)) {
1275*4882a593Smuzhiyun 		trace_clk_disable(core);
1276*4882a593Smuzhiyun 		if (core->ops->disable_unused)
1277*4882a593Smuzhiyun 			core->ops->disable_unused(core->hw);
1278*4882a593Smuzhiyun 		else if (core->ops->disable)
1279*4882a593Smuzhiyun 			core->ops->disable(core->hw);
1280*4882a593Smuzhiyun 		trace_clk_disable_complete(core);
1281*4882a593Smuzhiyun 	}
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun unlock_out:
1284*4882a593Smuzhiyun 	clk_enable_unlock(flags);
1285*4882a593Smuzhiyun 	clk_pm_runtime_put(core);
1286*4882a593Smuzhiyun unprepare_out:
1287*4882a593Smuzhiyun 	if (core->flags & CLK_OPS_PARENT_ENABLE)
1288*4882a593Smuzhiyun 		clk_core_disable_unprepare(core->parent);
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun static bool clk_ignore_unused __initdata;
clk_ignore_unused_setup(char * __unused)1292*4882a593Smuzhiyun static int __init clk_ignore_unused_setup(char *__unused)
1293*4882a593Smuzhiyun {
1294*4882a593Smuzhiyun 	clk_ignore_unused = true;
1295*4882a593Smuzhiyun 	return 1;
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun __setup("clk_ignore_unused", clk_ignore_unused_setup);
1298*4882a593Smuzhiyun 
clk_disable_unused(void)1299*4882a593Smuzhiyun static int __init clk_disable_unused(void)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun 	struct clk_core *core;
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	if (clk_ignore_unused) {
1304*4882a593Smuzhiyun 		pr_warn("clk: Not disabling unused clocks\n");
1305*4882a593Smuzhiyun 		return 0;
1306*4882a593Smuzhiyun 	}
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	clk_prepare_lock();
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	hlist_for_each_entry(core, &clk_root_list, child_node)
1311*4882a593Smuzhiyun 		clk_disable_unused_subtree(core);
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1314*4882a593Smuzhiyun 		clk_disable_unused_subtree(core);
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 	hlist_for_each_entry(core, &clk_root_list, child_node)
1317*4882a593Smuzhiyun 		clk_unprepare_unused_subtree(core);
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1320*4882a593Smuzhiyun 		clk_unprepare_unused_subtree(core);
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 	clk_prepare_unlock();
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	return 0;
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun late_initcall_sync(clk_disable_unused);
1327*4882a593Smuzhiyun 
clk_unprepare_disable_dev_subtree(struct clk_core * core,struct device * dev)1328*4882a593Smuzhiyun static void clk_unprepare_disable_dev_subtree(struct clk_core *core,
1329*4882a593Smuzhiyun 					      struct device *dev)
1330*4882a593Smuzhiyun {
1331*4882a593Smuzhiyun 	struct clk_core *child;
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	hlist_for_each_entry(child, &core->children, child_node)
1336*4882a593Smuzhiyun 		clk_unprepare_disable_dev_subtree(child, dev);
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	if (core->dev != dev || !core->need_sync)
1339*4882a593Smuzhiyun 		return;
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	clk_core_disable_unprepare(core);
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun 
clk_sync_state(struct device * dev)1344*4882a593Smuzhiyun void clk_sync_state(struct device *dev)
1345*4882a593Smuzhiyun {
1346*4882a593Smuzhiyun 	struct clk_core *core;
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	clk_prepare_lock();
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	hlist_for_each_entry(core, &clk_root_list, child_node)
1351*4882a593Smuzhiyun 		clk_unprepare_disable_dev_subtree(core, dev);
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1354*4882a593Smuzhiyun 		clk_unprepare_disable_dev_subtree(core, dev);
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	clk_prepare_unlock();
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_sync_state);
1359*4882a593Smuzhiyun 
clk_core_determine_round_nolock(struct clk_core * core,struct clk_rate_request * req)1360*4882a593Smuzhiyun static int clk_core_determine_round_nolock(struct clk_core *core,
1361*4882a593Smuzhiyun 					   struct clk_rate_request *req)
1362*4882a593Smuzhiyun {
1363*4882a593Smuzhiyun 	long rate;
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 	if (!core)
1368*4882a593Smuzhiyun 		return 0;
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun 	/*
1371*4882a593Smuzhiyun 	 * At this point, core protection will be disabled if
1372*4882a593Smuzhiyun 	 * - if the provider is not protected at all
1373*4882a593Smuzhiyun 	 * - if the calling consumer is the only one which has exclusivity
1374*4882a593Smuzhiyun 	 *   over the provider
1375*4882a593Smuzhiyun 	 */
1376*4882a593Smuzhiyun 	if (clk_core_rate_is_protected(core)) {
1377*4882a593Smuzhiyun 		req->rate = core->rate;
1378*4882a593Smuzhiyun 	} else if (core->ops->determine_rate) {
1379*4882a593Smuzhiyun 		return core->ops->determine_rate(core->hw, req);
1380*4882a593Smuzhiyun 	} else if (core->ops->round_rate) {
1381*4882a593Smuzhiyun 		rate = core->ops->round_rate(core->hw, req->rate,
1382*4882a593Smuzhiyun 					     &req->best_parent_rate);
1383*4882a593Smuzhiyun 		if (rate < 0)
1384*4882a593Smuzhiyun 			return rate;
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 		req->rate = rate;
1387*4882a593Smuzhiyun 	} else {
1388*4882a593Smuzhiyun 		return -EINVAL;
1389*4882a593Smuzhiyun 	}
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	return 0;
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun 
clk_core_init_rate_req(struct clk_core * const core,struct clk_rate_request * req)1394*4882a593Smuzhiyun static void clk_core_init_rate_req(struct clk_core * const core,
1395*4882a593Smuzhiyun 				   struct clk_rate_request *req)
1396*4882a593Smuzhiyun {
1397*4882a593Smuzhiyun 	struct clk_core *parent;
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	if (WARN_ON(!core || !req))
1400*4882a593Smuzhiyun 		return;
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	parent = core->parent;
1403*4882a593Smuzhiyun 	if (parent) {
1404*4882a593Smuzhiyun 		req->best_parent_hw = parent->hw;
1405*4882a593Smuzhiyun 		req->best_parent_rate = parent->rate;
1406*4882a593Smuzhiyun 	} else {
1407*4882a593Smuzhiyun 		req->best_parent_hw = NULL;
1408*4882a593Smuzhiyun 		req->best_parent_rate = 0;
1409*4882a593Smuzhiyun 	}
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun 
clk_core_can_round(struct clk_core * const core)1412*4882a593Smuzhiyun static bool clk_core_can_round(struct clk_core * const core)
1413*4882a593Smuzhiyun {
1414*4882a593Smuzhiyun 	return core->ops->determine_rate || core->ops->round_rate;
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun 
clk_core_round_rate_nolock(struct clk_core * core,struct clk_rate_request * req)1417*4882a593Smuzhiyun static int clk_core_round_rate_nolock(struct clk_core *core,
1418*4882a593Smuzhiyun 				      struct clk_rate_request *req)
1419*4882a593Smuzhiyun {
1420*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	if (!core) {
1423*4882a593Smuzhiyun 		req->rate = 0;
1424*4882a593Smuzhiyun 		return 0;
1425*4882a593Smuzhiyun 	}
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun 	clk_core_init_rate_req(core, req);
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	if (clk_core_can_round(core))
1430*4882a593Smuzhiyun 		return clk_core_determine_round_nolock(core, req);
1431*4882a593Smuzhiyun 	else if (core->flags & CLK_SET_RATE_PARENT)
1432*4882a593Smuzhiyun 		return clk_core_round_rate_nolock(core->parent, req);
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 	req->rate = core->rate;
1435*4882a593Smuzhiyun 	return 0;
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun 
1438*4882a593Smuzhiyun /**
1439*4882a593Smuzhiyun  * __clk_determine_rate - get the closest rate actually supported by a clock
1440*4882a593Smuzhiyun  * @hw: determine the rate of this clock
1441*4882a593Smuzhiyun  * @req: target rate request
1442*4882a593Smuzhiyun  *
1443*4882a593Smuzhiyun  * Useful for clk_ops such as .set_rate and .determine_rate.
1444*4882a593Smuzhiyun  */
__clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1445*4882a593Smuzhiyun int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun 	if (!hw) {
1448*4882a593Smuzhiyun 		req->rate = 0;
1449*4882a593Smuzhiyun 		return 0;
1450*4882a593Smuzhiyun 	}
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	return clk_core_round_rate_nolock(hw->core, req);
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__clk_determine_rate);
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun /**
1457*4882a593Smuzhiyun  * clk_hw_round_rate() - round the given rate for a hw clk
1458*4882a593Smuzhiyun  * @hw: the hw clk for which we are rounding a rate
1459*4882a593Smuzhiyun  * @rate: the rate which is to be rounded
1460*4882a593Smuzhiyun  *
1461*4882a593Smuzhiyun  * Takes in a rate as input and rounds it to a rate that the clk can actually
1462*4882a593Smuzhiyun  * use.
1463*4882a593Smuzhiyun  *
1464*4882a593Smuzhiyun  * Context: prepare_lock must be held.
1465*4882a593Smuzhiyun  *          For clk providers to call from within clk_ops such as .round_rate,
1466*4882a593Smuzhiyun  *          .determine_rate.
1467*4882a593Smuzhiyun  *
1468*4882a593Smuzhiyun  * Return: returns rounded rate of hw clk if clk supports round_rate operation
1469*4882a593Smuzhiyun  *         else returns the parent rate.
1470*4882a593Smuzhiyun  */
clk_hw_round_rate(struct clk_hw * hw,unsigned long rate)1471*4882a593Smuzhiyun unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1472*4882a593Smuzhiyun {
1473*4882a593Smuzhiyun 	int ret;
1474*4882a593Smuzhiyun 	struct clk_rate_request req;
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1477*4882a593Smuzhiyun 	req.rate = rate;
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun 	ret = clk_core_round_rate_nolock(hw->core, &req);
1480*4882a593Smuzhiyun 	if (ret)
1481*4882a593Smuzhiyun 		return 0;
1482*4882a593Smuzhiyun 
1483*4882a593Smuzhiyun 	return req.rate;
1484*4882a593Smuzhiyun }
1485*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun /**
1488*4882a593Smuzhiyun  * clk_round_rate - round the given rate for a clk
1489*4882a593Smuzhiyun  * @clk: the clk for which we are rounding a rate
1490*4882a593Smuzhiyun  * @rate: the rate which is to be rounded
1491*4882a593Smuzhiyun  *
1492*4882a593Smuzhiyun  * Takes in a rate as input and rounds it to a rate that the clk can actually
1493*4882a593Smuzhiyun  * use which is then returned.  If clk doesn't support round_rate operation
1494*4882a593Smuzhiyun  * then the parent rate is returned.
1495*4882a593Smuzhiyun  */
clk_round_rate(struct clk * clk,unsigned long rate)1496*4882a593Smuzhiyun long clk_round_rate(struct clk *clk, unsigned long rate)
1497*4882a593Smuzhiyun {
1498*4882a593Smuzhiyun 	struct clk_rate_request req;
1499*4882a593Smuzhiyun 	int ret;
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun 	if (!clk)
1502*4882a593Smuzhiyun 		return 0;
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	clk_prepare_lock();
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun 	if (clk->exclusive_count)
1507*4882a593Smuzhiyun 		clk_core_rate_unprotect(clk->core);
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1510*4882a593Smuzhiyun 	req.rate = rate;
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	ret = clk_core_round_rate_nolock(clk->core, &req);
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	if (clk->exclusive_count)
1515*4882a593Smuzhiyun 		clk_core_rate_protect(clk->core);
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun 	clk_prepare_unlock();
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 	if (ret)
1520*4882a593Smuzhiyun 		return ret;
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 	return req.rate;
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_round_rate);
1525*4882a593Smuzhiyun 
1526*4882a593Smuzhiyun /**
1527*4882a593Smuzhiyun  * __clk_notify - call clk notifier chain
1528*4882a593Smuzhiyun  * @core: clk that is changing rate
1529*4882a593Smuzhiyun  * @msg: clk notifier type (see include/linux/clk.h)
1530*4882a593Smuzhiyun  * @old_rate: old clk rate
1531*4882a593Smuzhiyun  * @new_rate: new clk rate
1532*4882a593Smuzhiyun  *
1533*4882a593Smuzhiyun  * Triggers a notifier call chain on the clk rate-change notification
1534*4882a593Smuzhiyun  * for 'clk'.  Passes a pointer to the struct clk and the previous
1535*4882a593Smuzhiyun  * and current rates to the notifier callback.  Intended to be called by
1536*4882a593Smuzhiyun  * internal clock code only.  Returns NOTIFY_DONE from the last driver
1537*4882a593Smuzhiyun  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1538*4882a593Smuzhiyun  * a driver returns that.
1539*4882a593Smuzhiyun  */
__clk_notify(struct clk_core * core,unsigned long msg,unsigned long old_rate,unsigned long new_rate)1540*4882a593Smuzhiyun static int __clk_notify(struct clk_core *core, unsigned long msg,
1541*4882a593Smuzhiyun 		unsigned long old_rate, unsigned long new_rate)
1542*4882a593Smuzhiyun {
1543*4882a593Smuzhiyun 	struct clk_notifier *cn;
1544*4882a593Smuzhiyun 	struct clk_notifier_data cnd;
1545*4882a593Smuzhiyun 	int ret = NOTIFY_DONE;
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 	cnd.old_rate = old_rate;
1548*4882a593Smuzhiyun 	cnd.new_rate = new_rate;
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	list_for_each_entry(cn, &clk_notifier_list, node) {
1551*4882a593Smuzhiyun 		if (cn->clk->core == core) {
1552*4882a593Smuzhiyun 			cnd.clk = cn->clk;
1553*4882a593Smuzhiyun 			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1554*4882a593Smuzhiyun 					&cnd);
1555*4882a593Smuzhiyun 			if (ret & NOTIFY_STOP_MASK)
1556*4882a593Smuzhiyun 				return ret;
1557*4882a593Smuzhiyun 		}
1558*4882a593Smuzhiyun 	}
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	return ret;
1561*4882a593Smuzhiyun }
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun /**
1564*4882a593Smuzhiyun  * __clk_recalc_accuracies
1565*4882a593Smuzhiyun  * @core: first clk in the subtree
1566*4882a593Smuzhiyun  *
1567*4882a593Smuzhiyun  * Walks the subtree of clks starting with clk and recalculates accuracies as
1568*4882a593Smuzhiyun  * it goes.  Note that if a clk does not implement the .recalc_accuracy
1569*4882a593Smuzhiyun  * callback then it is assumed that the clock will take on the accuracy of its
1570*4882a593Smuzhiyun  * parent.
1571*4882a593Smuzhiyun  */
__clk_recalc_accuracies(struct clk_core * core)1572*4882a593Smuzhiyun static void __clk_recalc_accuracies(struct clk_core *core)
1573*4882a593Smuzhiyun {
1574*4882a593Smuzhiyun 	unsigned long parent_accuracy = 0;
1575*4882a593Smuzhiyun 	struct clk_core *child;
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	if (core->parent)
1580*4882a593Smuzhiyun 		parent_accuracy = core->parent->accuracy;
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	if (core->ops->recalc_accuracy)
1583*4882a593Smuzhiyun 		core->accuracy = core->ops->recalc_accuracy(core->hw,
1584*4882a593Smuzhiyun 							  parent_accuracy);
1585*4882a593Smuzhiyun 	else
1586*4882a593Smuzhiyun 		core->accuracy = parent_accuracy;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	hlist_for_each_entry(child, &core->children, child_node)
1589*4882a593Smuzhiyun 		__clk_recalc_accuracies(child);
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun 
clk_core_get_accuracy_recalc(struct clk_core * core)1592*4882a593Smuzhiyun static long clk_core_get_accuracy_recalc(struct clk_core *core)
1593*4882a593Smuzhiyun {
1594*4882a593Smuzhiyun 	if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1595*4882a593Smuzhiyun 		__clk_recalc_accuracies(core);
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	return clk_core_get_accuracy_no_lock(core);
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun /**
1601*4882a593Smuzhiyun  * clk_get_accuracy - return the accuracy of clk
1602*4882a593Smuzhiyun  * @clk: the clk whose accuracy is being returned
1603*4882a593Smuzhiyun  *
1604*4882a593Smuzhiyun  * Simply returns the cached accuracy of the clk, unless
1605*4882a593Smuzhiyun  * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1606*4882a593Smuzhiyun  * issued.
1607*4882a593Smuzhiyun  * If clk is NULL then returns 0.
1608*4882a593Smuzhiyun  */
clk_get_accuracy(struct clk * clk)1609*4882a593Smuzhiyun long clk_get_accuracy(struct clk *clk)
1610*4882a593Smuzhiyun {
1611*4882a593Smuzhiyun 	long accuracy;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	if (!clk)
1614*4882a593Smuzhiyun 		return 0;
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 	clk_prepare_lock();
1617*4882a593Smuzhiyun 	accuracy = clk_core_get_accuracy_recalc(clk->core);
1618*4882a593Smuzhiyun 	clk_prepare_unlock();
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	return accuracy;
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_get_accuracy);
1623*4882a593Smuzhiyun 
clk_recalc(struct clk_core * core,unsigned long parent_rate)1624*4882a593Smuzhiyun static unsigned long clk_recalc(struct clk_core *core,
1625*4882a593Smuzhiyun 				unsigned long parent_rate)
1626*4882a593Smuzhiyun {
1627*4882a593Smuzhiyun 	unsigned long rate = parent_rate;
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun 	if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1630*4882a593Smuzhiyun 		rate = core->ops->recalc_rate(core->hw, parent_rate);
1631*4882a593Smuzhiyun 		clk_pm_runtime_put(core);
1632*4882a593Smuzhiyun 	}
1633*4882a593Smuzhiyun 	return rate;
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun /**
1637*4882a593Smuzhiyun  * __clk_recalc_rates
1638*4882a593Smuzhiyun  * @core: first clk in the subtree
1639*4882a593Smuzhiyun  * @msg: notification type (see include/linux/clk.h)
1640*4882a593Smuzhiyun  *
1641*4882a593Smuzhiyun  * Walks the subtree of clks starting with clk and recalculates rates as it
1642*4882a593Smuzhiyun  * goes.  Note that if a clk does not implement the .recalc_rate callback then
1643*4882a593Smuzhiyun  * it is assumed that the clock will take on the rate of its parent.
1644*4882a593Smuzhiyun  *
1645*4882a593Smuzhiyun  * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1646*4882a593Smuzhiyun  * if necessary.
1647*4882a593Smuzhiyun  */
__clk_recalc_rates(struct clk_core * core,unsigned long msg)1648*4882a593Smuzhiyun static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1649*4882a593Smuzhiyun {
1650*4882a593Smuzhiyun 	unsigned long old_rate;
1651*4882a593Smuzhiyun 	unsigned long parent_rate = 0;
1652*4882a593Smuzhiyun 	struct clk_core *child;
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 	old_rate = core->rate;
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 	if (core->parent)
1659*4882a593Smuzhiyun 		parent_rate = core->parent->rate;
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun 	core->rate = clk_recalc(core, parent_rate);
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	/*
1664*4882a593Smuzhiyun 	 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1665*4882a593Smuzhiyun 	 * & ABORT_RATE_CHANGE notifiers
1666*4882a593Smuzhiyun 	 */
1667*4882a593Smuzhiyun 	if (core->notifier_count && msg)
1668*4882a593Smuzhiyun 		__clk_notify(core, msg, old_rate, core->rate);
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	hlist_for_each_entry(child, &core->children, child_node)
1671*4882a593Smuzhiyun 		__clk_recalc_rates(child, msg);
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun 
clk_core_get_rate_recalc(struct clk_core * core)1674*4882a593Smuzhiyun static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
1675*4882a593Smuzhiyun {
1676*4882a593Smuzhiyun 	if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1677*4882a593Smuzhiyun 		__clk_recalc_rates(core, 0);
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	return clk_core_get_rate_nolock(core);
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun /**
1683*4882a593Smuzhiyun  * clk_get_rate - return the rate of clk
1684*4882a593Smuzhiyun  * @clk: the clk whose rate is being returned
1685*4882a593Smuzhiyun  *
1686*4882a593Smuzhiyun  * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1687*4882a593Smuzhiyun  * is set, which means a recalc_rate will be issued.
1688*4882a593Smuzhiyun  * If clk is NULL then returns 0.
1689*4882a593Smuzhiyun  */
clk_get_rate(struct clk * clk)1690*4882a593Smuzhiyun unsigned long clk_get_rate(struct clk *clk)
1691*4882a593Smuzhiyun {
1692*4882a593Smuzhiyun 	unsigned long rate;
1693*4882a593Smuzhiyun 
1694*4882a593Smuzhiyun 	if (!clk)
1695*4882a593Smuzhiyun 		return 0;
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	clk_prepare_lock();
1698*4882a593Smuzhiyun 	rate = clk_core_get_rate_recalc(clk->core);
1699*4882a593Smuzhiyun 	clk_prepare_unlock();
1700*4882a593Smuzhiyun 
1701*4882a593Smuzhiyun 	return rate;
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_get_rate);
1704*4882a593Smuzhiyun 
clk_fetch_parent_index(struct clk_core * core,struct clk_core * parent)1705*4882a593Smuzhiyun static int clk_fetch_parent_index(struct clk_core *core,
1706*4882a593Smuzhiyun 				  struct clk_core *parent)
1707*4882a593Smuzhiyun {
1708*4882a593Smuzhiyun 	int i;
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun 	if (!parent)
1711*4882a593Smuzhiyun 		return -EINVAL;
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 	for (i = 0; i < core->num_parents; i++) {
1714*4882a593Smuzhiyun 		/* Found it first try! */
1715*4882a593Smuzhiyun 		if (core->parents[i].core == parent)
1716*4882a593Smuzhiyun 			return i;
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun 		/* Something else is here, so keep looking */
1719*4882a593Smuzhiyun 		if (core->parents[i].core)
1720*4882a593Smuzhiyun 			continue;
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 		/* Maybe core hasn't been cached but the hw is all we know? */
1723*4882a593Smuzhiyun 		if (core->parents[i].hw) {
1724*4882a593Smuzhiyun 			if (core->parents[i].hw == parent->hw)
1725*4882a593Smuzhiyun 				break;
1726*4882a593Smuzhiyun 
1727*4882a593Smuzhiyun 			/* Didn't match, but we're expecting a clk_hw */
1728*4882a593Smuzhiyun 			continue;
1729*4882a593Smuzhiyun 		}
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun 		/* Maybe it hasn't been cached (clk_set_parent() path) */
1732*4882a593Smuzhiyun 		if (parent == clk_core_get(core, i))
1733*4882a593Smuzhiyun 			break;
1734*4882a593Smuzhiyun 
1735*4882a593Smuzhiyun 		/* Fallback to comparing globally unique names */
1736*4882a593Smuzhiyun 		if (core->parents[i].name &&
1737*4882a593Smuzhiyun 		    !strcmp(parent->name, core->parents[i].name))
1738*4882a593Smuzhiyun 			break;
1739*4882a593Smuzhiyun 	}
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun 	if (i == core->num_parents)
1742*4882a593Smuzhiyun 		return -EINVAL;
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 	core->parents[i].core = parent;
1745*4882a593Smuzhiyun 	return i;
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun /**
1749*4882a593Smuzhiyun  * clk_hw_get_parent_index - return the index of the parent clock
1750*4882a593Smuzhiyun  * @hw: clk_hw associated with the clk being consumed
1751*4882a593Smuzhiyun  *
1752*4882a593Smuzhiyun  * Fetches and returns the index of parent clock. Returns -EINVAL if the given
1753*4882a593Smuzhiyun  * clock does not have a current parent.
1754*4882a593Smuzhiyun  */
clk_hw_get_parent_index(struct clk_hw * hw)1755*4882a593Smuzhiyun int clk_hw_get_parent_index(struct clk_hw *hw)
1756*4882a593Smuzhiyun {
1757*4882a593Smuzhiyun 	struct clk_hw *parent = clk_hw_get_parent(hw);
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 	if (WARN_ON(parent == NULL))
1760*4882a593Smuzhiyun 		return -EINVAL;
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 	return clk_fetch_parent_index(hw->core, parent->core);
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
1765*4882a593Smuzhiyun 
clk_core_hold_state(struct clk_core * core)1766*4882a593Smuzhiyun static void clk_core_hold_state(struct clk_core *core)
1767*4882a593Smuzhiyun {
1768*4882a593Smuzhiyun 	if (core->need_sync || !core->boot_enabled)
1769*4882a593Smuzhiyun 		return;
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 	if (core->orphan || !dev_has_sync_state(core->dev))
1772*4882a593Smuzhiyun 		return;
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun 	if (core->flags & CLK_DONT_HOLD_STATE)
1775*4882a593Smuzhiyun 		return;
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 	core->need_sync = !clk_core_prepare_enable(core);
1778*4882a593Smuzhiyun }
1779*4882a593Smuzhiyun 
__clk_core_update_orphan_hold_state(struct clk_core * core)1780*4882a593Smuzhiyun static void __clk_core_update_orphan_hold_state(struct clk_core *core)
1781*4882a593Smuzhiyun {
1782*4882a593Smuzhiyun 	struct clk_core *child;
1783*4882a593Smuzhiyun 
1784*4882a593Smuzhiyun 	if (core->orphan)
1785*4882a593Smuzhiyun 		return;
1786*4882a593Smuzhiyun 
1787*4882a593Smuzhiyun 	clk_core_hold_state(core);
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	hlist_for_each_entry(child, &core->children, child_node)
1790*4882a593Smuzhiyun 		__clk_core_update_orphan_hold_state(child);
1791*4882a593Smuzhiyun }
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun /*
1794*4882a593Smuzhiyun  * Update the orphan status of @core and all its children.
1795*4882a593Smuzhiyun  */
clk_core_update_orphan_status(struct clk_core * core,bool is_orphan)1796*4882a593Smuzhiyun static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1797*4882a593Smuzhiyun {
1798*4882a593Smuzhiyun 	struct clk_core *child;
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun 	core->orphan = is_orphan;
1801*4882a593Smuzhiyun 
1802*4882a593Smuzhiyun 	hlist_for_each_entry(child, &core->children, child_node)
1803*4882a593Smuzhiyun 		clk_core_update_orphan_status(child, is_orphan);
1804*4882a593Smuzhiyun }
1805*4882a593Smuzhiyun 
clk_reparent(struct clk_core * core,struct clk_core * new_parent)1806*4882a593Smuzhiyun static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1807*4882a593Smuzhiyun {
1808*4882a593Smuzhiyun 	bool was_orphan = core->orphan;
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 	hlist_del(&core->child_node);
1811*4882a593Smuzhiyun 
1812*4882a593Smuzhiyun 	if (new_parent) {
1813*4882a593Smuzhiyun 		bool becomes_orphan = new_parent->orphan;
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 		/* avoid duplicate POST_RATE_CHANGE notifications */
1816*4882a593Smuzhiyun 		if (new_parent->new_child == core)
1817*4882a593Smuzhiyun 			new_parent->new_child = NULL;
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun 		hlist_add_head(&core->child_node, &new_parent->children);
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 		if (was_orphan != becomes_orphan)
1822*4882a593Smuzhiyun 			clk_core_update_orphan_status(core, becomes_orphan);
1823*4882a593Smuzhiyun 	} else {
1824*4882a593Smuzhiyun 		hlist_add_head(&core->child_node, &clk_orphan_list);
1825*4882a593Smuzhiyun 		if (!was_orphan)
1826*4882a593Smuzhiyun 			clk_core_update_orphan_status(core, true);
1827*4882a593Smuzhiyun 	}
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 	core->parent = new_parent;
1830*4882a593Smuzhiyun }
1831*4882a593Smuzhiyun 
__clk_set_parent_before(struct clk_core * core,struct clk_core * parent)1832*4882a593Smuzhiyun static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1833*4882a593Smuzhiyun 					   struct clk_core *parent)
1834*4882a593Smuzhiyun {
1835*4882a593Smuzhiyun 	unsigned long flags;
1836*4882a593Smuzhiyun 	struct clk_core *old_parent = core->parent;
1837*4882a593Smuzhiyun 
1838*4882a593Smuzhiyun 	/*
1839*4882a593Smuzhiyun 	 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1840*4882a593Smuzhiyun 	 *
1841*4882a593Smuzhiyun 	 * 2. Migrate prepare state between parents and prevent race with
1842*4882a593Smuzhiyun 	 * clk_enable().
1843*4882a593Smuzhiyun 	 *
1844*4882a593Smuzhiyun 	 * If the clock is not prepared, then a race with
1845*4882a593Smuzhiyun 	 * clk_enable/disable() is impossible since we already have the
1846*4882a593Smuzhiyun 	 * prepare lock (future calls to clk_enable() need to be preceded by
1847*4882a593Smuzhiyun 	 * a clk_prepare()).
1848*4882a593Smuzhiyun 	 *
1849*4882a593Smuzhiyun 	 * If the clock is prepared, migrate the prepared state to the new
1850*4882a593Smuzhiyun 	 * parent and also protect against a race with clk_enable() by
1851*4882a593Smuzhiyun 	 * forcing the clock and the new parent on.  This ensures that all
1852*4882a593Smuzhiyun 	 * future calls to clk_enable() are practically NOPs with respect to
1853*4882a593Smuzhiyun 	 * hardware and software states.
1854*4882a593Smuzhiyun 	 *
1855*4882a593Smuzhiyun 	 * See also: Comment for clk_set_parent() below.
1856*4882a593Smuzhiyun 	 */
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun 	/* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1859*4882a593Smuzhiyun 	if (core->flags & CLK_OPS_PARENT_ENABLE) {
1860*4882a593Smuzhiyun 		clk_core_prepare_enable(old_parent);
1861*4882a593Smuzhiyun 		clk_core_prepare_enable(parent);
1862*4882a593Smuzhiyun 	}
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 	/* migrate prepare count if > 0 */
1865*4882a593Smuzhiyun 	if (core->prepare_count) {
1866*4882a593Smuzhiyun 		clk_core_prepare_enable(parent);
1867*4882a593Smuzhiyun 		clk_core_enable_lock(core);
1868*4882a593Smuzhiyun 	}
1869*4882a593Smuzhiyun 
1870*4882a593Smuzhiyun 	/* update the clk tree topology */
1871*4882a593Smuzhiyun 	flags = clk_enable_lock();
1872*4882a593Smuzhiyun 	clk_reparent(core, parent);
1873*4882a593Smuzhiyun 	clk_enable_unlock(flags);
1874*4882a593Smuzhiyun 
1875*4882a593Smuzhiyun 	return old_parent;
1876*4882a593Smuzhiyun }
1877*4882a593Smuzhiyun 
__clk_set_parent_after(struct clk_core * core,struct clk_core * parent,struct clk_core * old_parent)1878*4882a593Smuzhiyun static void __clk_set_parent_after(struct clk_core *core,
1879*4882a593Smuzhiyun 				   struct clk_core *parent,
1880*4882a593Smuzhiyun 				   struct clk_core *old_parent)
1881*4882a593Smuzhiyun {
1882*4882a593Smuzhiyun 	/*
1883*4882a593Smuzhiyun 	 * Finish the migration of prepare state and undo the changes done
1884*4882a593Smuzhiyun 	 * for preventing a race with clk_enable().
1885*4882a593Smuzhiyun 	 */
1886*4882a593Smuzhiyun 	if (core->prepare_count) {
1887*4882a593Smuzhiyun 		clk_core_disable_lock(core);
1888*4882a593Smuzhiyun 		clk_core_disable_unprepare(old_parent);
1889*4882a593Smuzhiyun 	}
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 	/* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1892*4882a593Smuzhiyun 	if (core->flags & CLK_OPS_PARENT_ENABLE) {
1893*4882a593Smuzhiyun 		clk_core_disable_unprepare(parent);
1894*4882a593Smuzhiyun 		clk_core_disable_unprepare(old_parent);
1895*4882a593Smuzhiyun 	}
1896*4882a593Smuzhiyun }
1897*4882a593Smuzhiyun 
__clk_set_parent(struct clk_core * core,struct clk_core * parent,u8 p_index)1898*4882a593Smuzhiyun static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1899*4882a593Smuzhiyun 			    u8 p_index)
1900*4882a593Smuzhiyun {
1901*4882a593Smuzhiyun 	unsigned long flags;
1902*4882a593Smuzhiyun 	int ret = 0;
1903*4882a593Smuzhiyun 	struct clk_core *old_parent;
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun 	old_parent = __clk_set_parent_before(core, parent);
1906*4882a593Smuzhiyun 
1907*4882a593Smuzhiyun 	trace_clk_set_parent(core, parent);
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun 	/* change clock input source */
1910*4882a593Smuzhiyun 	if (parent && core->ops->set_parent)
1911*4882a593Smuzhiyun 		ret = core->ops->set_parent(core->hw, p_index);
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun 	trace_clk_set_parent_complete(core, parent);
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 	if (ret) {
1916*4882a593Smuzhiyun 		flags = clk_enable_lock();
1917*4882a593Smuzhiyun 		clk_reparent(core, old_parent);
1918*4882a593Smuzhiyun 		clk_enable_unlock(flags);
1919*4882a593Smuzhiyun 		__clk_set_parent_after(core, old_parent, parent);
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 		return ret;
1922*4882a593Smuzhiyun 	}
1923*4882a593Smuzhiyun 
1924*4882a593Smuzhiyun 	__clk_set_parent_after(core, parent, old_parent);
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun 	return 0;
1927*4882a593Smuzhiyun }
1928*4882a593Smuzhiyun 
1929*4882a593Smuzhiyun /**
1930*4882a593Smuzhiyun  * __clk_speculate_rates
1931*4882a593Smuzhiyun  * @core: first clk in the subtree
1932*4882a593Smuzhiyun  * @parent_rate: the "future" rate of clk's parent
1933*4882a593Smuzhiyun  *
1934*4882a593Smuzhiyun  * Walks the subtree of clks starting with clk, speculating rates as it
1935*4882a593Smuzhiyun  * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1936*4882a593Smuzhiyun  *
1937*4882a593Smuzhiyun  * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1938*4882a593Smuzhiyun  * pre-rate change notifications and returns early if no clks in the
1939*4882a593Smuzhiyun  * subtree have subscribed to the notifications.  Note that if a clk does not
1940*4882a593Smuzhiyun  * implement the .recalc_rate callback then it is assumed that the clock will
1941*4882a593Smuzhiyun  * take on the rate of its parent.
1942*4882a593Smuzhiyun  */
__clk_speculate_rates(struct clk_core * core,unsigned long parent_rate)1943*4882a593Smuzhiyun static int __clk_speculate_rates(struct clk_core *core,
1944*4882a593Smuzhiyun 				 unsigned long parent_rate)
1945*4882a593Smuzhiyun {
1946*4882a593Smuzhiyun 	struct clk_core *child;
1947*4882a593Smuzhiyun 	unsigned long new_rate;
1948*4882a593Smuzhiyun 	int ret = NOTIFY_DONE;
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 	new_rate = clk_recalc(core, parent_rate);
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 	/* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1955*4882a593Smuzhiyun 	if (core->notifier_count)
1956*4882a593Smuzhiyun 		ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 	if (ret & NOTIFY_STOP_MASK) {
1959*4882a593Smuzhiyun 		pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1960*4882a593Smuzhiyun 				__func__, core->name, ret);
1961*4882a593Smuzhiyun 		goto out;
1962*4882a593Smuzhiyun 	}
1963*4882a593Smuzhiyun 
1964*4882a593Smuzhiyun 	hlist_for_each_entry(child, &core->children, child_node) {
1965*4882a593Smuzhiyun 		ret = __clk_speculate_rates(child, new_rate);
1966*4882a593Smuzhiyun 		if (ret & NOTIFY_STOP_MASK)
1967*4882a593Smuzhiyun 			break;
1968*4882a593Smuzhiyun 	}
1969*4882a593Smuzhiyun 
1970*4882a593Smuzhiyun out:
1971*4882a593Smuzhiyun 	return ret;
1972*4882a593Smuzhiyun }
1973*4882a593Smuzhiyun 
clk_calc_subtree(struct clk_core * core,unsigned long new_rate,struct clk_core * new_parent,u8 p_index)1974*4882a593Smuzhiyun static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1975*4882a593Smuzhiyun 			     struct clk_core *new_parent, u8 p_index)
1976*4882a593Smuzhiyun {
1977*4882a593Smuzhiyun 	struct clk_core *child;
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 	core->new_rate = new_rate;
1980*4882a593Smuzhiyun 	core->new_parent = new_parent;
1981*4882a593Smuzhiyun 	core->new_parent_index = p_index;
1982*4882a593Smuzhiyun 	/* include clk in new parent's PRE_RATE_CHANGE notifications */
1983*4882a593Smuzhiyun 	core->new_child = NULL;
1984*4882a593Smuzhiyun 	if (new_parent && new_parent != core->parent)
1985*4882a593Smuzhiyun 		new_parent->new_child = core;
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 	hlist_for_each_entry(child, &core->children, child_node) {
1988*4882a593Smuzhiyun 		child->new_rate = clk_recalc(child, new_rate);
1989*4882a593Smuzhiyun 		clk_calc_subtree(child, child->new_rate, NULL, 0);
1990*4882a593Smuzhiyun 	}
1991*4882a593Smuzhiyun }
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun /*
1994*4882a593Smuzhiyun  * calculate the new rates returning the topmost clock that has to be
1995*4882a593Smuzhiyun  * changed.
1996*4882a593Smuzhiyun  */
clk_calc_new_rates(struct clk_core * core,unsigned long rate)1997*4882a593Smuzhiyun static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1998*4882a593Smuzhiyun 					   unsigned long rate)
1999*4882a593Smuzhiyun {
2000*4882a593Smuzhiyun 	struct clk_core *top = core;
2001*4882a593Smuzhiyun 	struct clk_core *old_parent, *parent;
2002*4882a593Smuzhiyun 	unsigned long best_parent_rate = 0;
2003*4882a593Smuzhiyun 	unsigned long new_rate;
2004*4882a593Smuzhiyun 	unsigned long min_rate;
2005*4882a593Smuzhiyun 	unsigned long max_rate;
2006*4882a593Smuzhiyun 	int p_index = 0;
2007*4882a593Smuzhiyun 	long ret;
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 	/* sanity */
2010*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(core))
2011*4882a593Smuzhiyun 		return NULL;
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	/* save parent rate, if it exists */
2014*4882a593Smuzhiyun 	parent = old_parent = core->parent;
2015*4882a593Smuzhiyun 	if (parent)
2016*4882a593Smuzhiyun 		best_parent_rate = parent->rate;
2017*4882a593Smuzhiyun 
2018*4882a593Smuzhiyun 	clk_core_get_boundaries(core, &min_rate, &max_rate);
2019*4882a593Smuzhiyun 
2020*4882a593Smuzhiyun 	/* find the closest rate and parent clk/rate */
2021*4882a593Smuzhiyun 	if (clk_core_can_round(core)) {
2022*4882a593Smuzhiyun 		struct clk_rate_request req;
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun 		req.rate = rate;
2025*4882a593Smuzhiyun 		req.min_rate = min_rate;
2026*4882a593Smuzhiyun 		req.max_rate = max_rate;
2027*4882a593Smuzhiyun 
2028*4882a593Smuzhiyun 		clk_core_init_rate_req(core, &req);
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun 		ret = clk_core_determine_round_nolock(core, &req);
2031*4882a593Smuzhiyun 		if (ret < 0)
2032*4882a593Smuzhiyun 			return NULL;
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun 		best_parent_rate = req.best_parent_rate;
2035*4882a593Smuzhiyun 		new_rate = req.rate;
2036*4882a593Smuzhiyun 		parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 		if (new_rate < min_rate || new_rate > max_rate)
2039*4882a593Smuzhiyun 			return NULL;
2040*4882a593Smuzhiyun 	} else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
2041*4882a593Smuzhiyun 		/* pass-through clock without adjustable parent */
2042*4882a593Smuzhiyun 		core->new_rate = core->rate;
2043*4882a593Smuzhiyun 		return NULL;
2044*4882a593Smuzhiyun 	} else {
2045*4882a593Smuzhiyun 		/* pass-through clock with adjustable parent */
2046*4882a593Smuzhiyun 		top = clk_calc_new_rates(parent, rate);
2047*4882a593Smuzhiyun 		new_rate = parent->new_rate;
2048*4882a593Smuzhiyun 		goto out;
2049*4882a593Smuzhiyun 	}
2050*4882a593Smuzhiyun 
2051*4882a593Smuzhiyun 	/* some clocks must be gated to change parent */
2052*4882a593Smuzhiyun 	if (parent != old_parent &&
2053*4882a593Smuzhiyun 	    (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
2054*4882a593Smuzhiyun 		pr_debug("%s: %s not gated but wants to reparent\n",
2055*4882a593Smuzhiyun 			 __func__, core->name);
2056*4882a593Smuzhiyun 		return NULL;
2057*4882a593Smuzhiyun 	}
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun 	/* try finding the new parent index */
2060*4882a593Smuzhiyun 	if (parent && core->num_parents > 1) {
2061*4882a593Smuzhiyun 		p_index = clk_fetch_parent_index(core, parent);
2062*4882a593Smuzhiyun 		if (p_index < 0) {
2063*4882a593Smuzhiyun 			pr_debug("%s: clk %s can not be parent of clk %s\n",
2064*4882a593Smuzhiyun 				 __func__, parent->name, core->name);
2065*4882a593Smuzhiyun 			return NULL;
2066*4882a593Smuzhiyun 		}
2067*4882a593Smuzhiyun 	}
2068*4882a593Smuzhiyun 
2069*4882a593Smuzhiyun 	if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
2070*4882a593Smuzhiyun 	    best_parent_rate != parent->rate)
2071*4882a593Smuzhiyun 		top = clk_calc_new_rates(parent, best_parent_rate);
2072*4882a593Smuzhiyun 
2073*4882a593Smuzhiyun out:
2074*4882a593Smuzhiyun 	clk_calc_subtree(core, new_rate, parent, p_index);
2075*4882a593Smuzhiyun 
2076*4882a593Smuzhiyun 	return top;
2077*4882a593Smuzhiyun }
2078*4882a593Smuzhiyun 
2079*4882a593Smuzhiyun /*
2080*4882a593Smuzhiyun  * Notify about rate changes in a subtree. Always walk down the whole tree
2081*4882a593Smuzhiyun  * so that in case of an error we can walk down the whole tree again and
2082*4882a593Smuzhiyun  * abort the change.
2083*4882a593Smuzhiyun  */
clk_propagate_rate_change(struct clk_core * core,unsigned long event)2084*4882a593Smuzhiyun static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
2085*4882a593Smuzhiyun 						  unsigned long event)
2086*4882a593Smuzhiyun {
2087*4882a593Smuzhiyun 	struct clk_core *child, *tmp_clk, *fail_clk = NULL;
2088*4882a593Smuzhiyun 	int ret = NOTIFY_DONE;
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun 	if (core->rate == core->new_rate)
2091*4882a593Smuzhiyun 		return NULL;
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 	if (core->notifier_count) {
2094*4882a593Smuzhiyun 		ret = __clk_notify(core, event, core->rate, core->new_rate);
2095*4882a593Smuzhiyun 		if (ret & NOTIFY_STOP_MASK)
2096*4882a593Smuzhiyun 			fail_clk = core;
2097*4882a593Smuzhiyun 	}
2098*4882a593Smuzhiyun 
2099*4882a593Smuzhiyun 	if (core->ops->pre_rate_change) {
2100*4882a593Smuzhiyun 		ret = core->ops->pre_rate_change(core->hw, core->rate,
2101*4882a593Smuzhiyun 						 core->new_rate);
2102*4882a593Smuzhiyun 		if (ret)
2103*4882a593Smuzhiyun 			fail_clk = core;
2104*4882a593Smuzhiyun 	}
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun 	hlist_for_each_entry(child, &core->children, child_node) {
2107*4882a593Smuzhiyun 		/* Skip children who will be reparented to another clock */
2108*4882a593Smuzhiyun 		if (child->new_parent && child->new_parent != core)
2109*4882a593Smuzhiyun 			continue;
2110*4882a593Smuzhiyun 		tmp_clk = clk_propagate_rate_change(child, event);
2111*4882a593Smuzhiyun 		if (tmp_clk)
2112*4882a593Smuzhiyun 			fail_clk = tmp_clk;
2113*4882a593Smuzhiyun 	}
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 	/* handle the new child who might not be in core->children yet */
2116*4882a593Smuzhiyun 	if (core->new_child) {
2117*4882a593Smuzhiyun 		tmp_clk = clk_propagate_rate_change(core->new_child, event);
2118*4882a593Smuzhiyun 		if (tmp_clk)
2119*4882a593Smuzhiyun 			fail_clk = tmp_clk;
2120*4882a593Smuzhiyun 	}
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun 	return fail_clk;
2123*4882a593Smuzhiyun }
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun /*
2126*4882a593Smuzhiyun  * walk down a subtree and set the new rates notifying the rate
2127*4882a593Smuzhiyun  * change on the way
2128*4882a593Smuzhiyun  */
clk_change_rate(struct clk_core * core)2129*4882a593Smuzhiyun static void clk_change_rate(struct clk_core *core)
2130*4882a593Smuzhiyun {
2131*4882a593Smuzhiyun 	struct clk_core *child;
2132*4882a593Smuzhiyun 	struct hlist_node *tmp;
2133*4882a593Smuzhiyun 	unsigned long old_rate;
2134*4882a593Smuzhiyun 	unsigned long best_parent_rate = 0;
2135*4882a593Smuzhiyun 	bool skip_set_rate = false;
2136*4882a593Smuzhiyun 	struct clk_core *old_parent;
2137*4882a593Smuzhiyun 	struct clk_core *parent = NULL;
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 	old_rate = core->rate;
2140*4882a593Smuzhiyun 
2141*4882a593Smuzhiyun 	if (core->new_parent) {
2142*4882a593Smuzhiyun 		parent = core->new_parent;
2143*4882a593Smuzhiyun 		best_parent_rate = core->new_parent->rate;
2144*4882a593Smuzhiyun 	} else if (core->parent) {
2145*4882a593Smuzhiyun 		parent = core->parent;
2146*4882a593Smuzhiyun 		best_parent_rate = core->parent->rate;
2147*4882a593Smuzhiyun 	}
2148*4882a593Smuzhiyun 
2149*4882a593Smuzhiyun 	if (clk_pm_runtime_get(core))
2150*4882a593Smuzhiyun 		return;
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 	if (core->flags & CLK_SET_RATE_UNGATE) {
2153*4882a593Smuzhiyun 		unsigned long flags;
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 		clk_core_prepare(core);
2156*4882a593Smuzhiyun 		flags = clk_enable_lock();
2157*4882a593Smuzhiyun 		clk_core_enable(core);
2158*4882a593Smuzhiyun 		clk_enable_unlock(flags);
2159*4882a593Smuzhiyun 	}
2160*4882a593Smuzhiyun 
2161*4882a593Smuzhiyun 	if (core->new_parent && core->new_parent != core->parent) {
2162*4882a593Smuzhiyun 		old_parent = __clk_set_parent_before(core, core->new_parent);
2163*4882a593Smuzhiyun 		trace_clk_set_parent(core, core->new_parent);
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun 		if (core->ops->set_rate_and_parent) {
2166*4882a593Smuzhiyun 			skip_set_rate = true;
2167*4882a593Smuzhiyun 			core->ops->set_rate_and_parent(core->hw, core->new_rate,
2168*4882a593Smuzhiyun 					best_parent_rate,
2169*4882a593Smuzhiyun 					core->new_parent_index);
2170*4882a593Smuzhiyun 		} else if (core->ops->set_parent) {
2171*4882a593Smuzhiyun 			core->ops->set_parent(core->hw, core->new_parent_index);
2172*4882a593Smuzhiyun 		}
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 		trace_clk_set_parent_complete(core, core->new_parent);
2175*4882a593Smuzhiyun 		__clk_set_parent_after(core, core->new_parent, old_parent);
2176*4882a593Smuzhiyun 	}
2177*4882a593Smuzhiyun 
2178*4882a593Smuzhiyun 	if (core->flags & CLK_OPS_PARENT_ENABLE)
2179*4882a593Smuzhiyun 		clk_core_prepare_enable(parent);
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun 	trace_clk_set_rate(core, core->new_rate);
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	if (!skip_set_rate && core->ops->set_rate)
2184*4882a593Smuzhiyun 		core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2185*4882a593Smuzhiyun 
2186*4882a593Smuzhiyun 	trace_clk_set_rate_complete(core, core->new_rate);
2187*4882a593Smuzhiyun 
2188*4882a593Smuzhiyun 	core->rate = clk_recalc(core, best_parent_rate);
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 	if (core->flags & CLK_SET_RATE_UNGATE) {
2191*4882a593Smuzhiyun 		unsigned long flags;
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun 		flags = clk_enable_lock();
2194*4882a593Smuzhiyun 		clk_core_disable(core);
2195*4882a593Smuzhiyun 		clk_enable_unlock(flags);
2196*4882a593Smuzhiyun 		clk_core_unprepare(core);
2197*4882a593Smuzhiyun 	}
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun 	if (core->flags & CLK_OPS_PARENT_ENABLE)
2200*4882a593Smuzhiyun 		clk_core_disable_unprepare(parent);
2201*4882a593Smuzhiyun 
2202*4882a593Smuzhiyun 	if (core->notifier_count && old_rate != core->rate)
2203*4882a593Smuzhiyun 		__clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun 	if (core->flags & CLK_RECALC_NEW_RATES)
2206*4882a593Smuzhiyun 		(void)clk_calc_new_rates(core, core->new_rate);
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun 	if (core->ops->post_rate_change)
2209*4882a593Smuzhiyun 		core->ops->post_rate_change(core->hw, old_rate, core->rate);
2210*4882a593Smuzhiyun 
2211*4882a593Smuzhiyun 	/*
2212*4882a593Smuzhiyun 	 * Use safe iteration, as change_rate can actually swap parents
2213*4882a593Smuzhiyun 	 * for certain clock types.
2214*4882a593Smuzhiyun 	 */
2215*4882a593Smuzhiyun 	hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2216*4882a593Smuzhiyun 		/* Skip children who will be reparented to another clock */
2217*4882a593Smuzhiyun 		if (child->new_parent && child->new_parent != core)
2218*4882a593Smuzhiyun 			continue;
2219*4882a593Smuzhiyun 		clk_change_rate(child);
2220*4882a593Smuzhiyun 	}
2221*4882a593Smuzhiyun 
2222*4882a593Smuzhiyun 	/* handle the new child who might not be in core->children yet */
2223*4882a593Smuzhiyun 	if (core->new_child)
2224*4882a593Smuzhiyun 		clk_change_rate(core->new_child);
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 	clk_pm_runtime_put(core);
2227*4882a593Smuzhiyun }
2228*4882a593Smuzhiyun 
clk_core_req_round_rate_nolock(struct clk_core * core,unsigned long req_rate)2229*4882a593Smuzhiyun static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2230*4882a593Smuzhiyun 						     unsigned long req_rate)
2231*4882a593Smuzhiyun {
2232*4882a593Smuzhiyun 	int ret, cnt;
2233*4882a593Smuzhiyun 	struct clk_rate_request req;
2234*4882a593Smuzhiyun 
2235*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
2236*4882a593Smuzhiyun 
2237*4882a593Smuzhiyun 	if (!core)
2238*4882a593Smuzhiyun 		return 0;
2239*4882a593Smuzhiyun 
2240*4882a593Smuzhiyun 	/* simulate what the rate would be if it could be freely set */
2241*4882a593Smuzhiyun 	cnt = clk_core_rate_nuke_protect(core);
2242*4882a593Smuzhiyun 	if (cnt < 0)
2243*4882a593Smuzhiyun 		return cnt;
2244*4882a593Smuzhiyun 
2245*4882a593Smuzhiyun 	clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
2246*4882a593Smuzhiyun 	req.rate = req_rate;
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun 	ret = clk_core_round_rate_nolock(core, &req);
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 	/* restore the protection */
2251*4882a593Smuzhiyun 	clk_core_rate_restore_protect(core, cnt);
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	return ret ? 0 : req.rate;
2254*4882a593Smuzhiyun }
2255*4882a593Smuzhiyun 
clk_core_set_rate_nolock(struct clk_core * core,unsigned long req_rate)2256*4882a593Smuzhiyun static int clk_core_set_rate_nolock(struct clk_core *core,
2257*4882a593Smuzhiyun 				    unsigned long req_rate)
2258*4882a593Smuzhiyun {
2259*4882a593Smuzhiyun 	struct clk_core *top, *fail_clk;
2260*4882a593Smuzhiyun 	unsigned long rate;
2261*4882a593Smuzhiyun 	int ret = 0;
2262*4882a593Smuzhiyun 
2263*4882a593Smuzhiyun 	if (!core)
2264*4882a593Smuzhiyun 		return 0;
2265*4882a593Smuzhiyun 
2266*4882a593Smuzhiyun 	rate = clk_core_req_round_rate_nolock(core, req_rate);
2267*4882a593Smuzhiyun 
2268*4882a593Smuzhiyun 	/* bail early if nothing to do */
2269*4882a593Smuzhiyun 	if (rate == clk_core_get_rate_nolock(core))
2270*4882a593Smuzhiyun 		return 0;
2271*4882a593Smuzhiyun 
2272*4882a593Smuzhiyun 	/* fail on a direct rate set of a protected provider */
2273*4882a593Smuzhiyun 	if (clk_core_rate_is_protected(core))
2274*4882a593Smuzhiyun 		return -EBUSY;
2275*4882a593Smuzhiyun 
2276*4882a593Smuzhiyun 	/* calculate new rates and get the topmost changed clock */
2277*4882a593Smuzhiyun 	top = clk_calc_new_rates(core, req_rate);
2278*4882a593Smuzhiyun 	if (!top)
2279*4882a593Smuzhiyun 		return -EINVAL;
2280*4882a593Smuzhiyun 
2281*4882a593Smuzhiyun 	ret = clk_pm_runtime_get(core);
2282*4882a593Smuzhiyun 	if (ret)
2283*4882a593Smuzhiyun 		return ret;
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	/* notify that we are about to change rates */
2286*4882a593Smuzhiyun 	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2287*4882a593Smuzhiyun 	if (fail_clk) {
2288*4882a593Smuzhiyun 		pr_debug("%s: failed to set %s rate\n", __func__,
2289*4882a593Smuzhiyun 				fail_clk->name);
2290*4882a593Smuzhiyun 		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2291*4882a593Smuzhiyun 		ret = -EBUSY;
2292*4882a593Smuzhiyun 		goto err;
2293*4882a593Smuzhiyun 	}
2294*4882a593Smuzhiyun 
2295*4882a593Smuzhiyun 	/* change the rates */
2296*4882a593Smuzhiyun 	clk_change_rate(top);
2297*4882a593Smuzhiyun 
2298*4882a593Smuzhiyun 	core->req_rate = req_rate;
2299*4882a593Smuzhiyun err:
2300*4882a593Smuzhiyun 	clk_pm_runtime_put(core);
2301*4882a593Smuzhiyun 
2302*4882a593Smuzhiyun 	return ret;
2303*4882a593Smuzhiyun }
2304*4882a593Smuzhiyun 
2305*4882a593Smuzhiyun /**
2306*4882a593Smuzhiyun  * clk_set_rate - specify a new rate for clk
2307*4882a593Smuzhiyun  * @clk: the clk whose rate is being changed
2308*4882a593Smuzhiyun  * @rate: the new rate for clk
2309*4882a593Smuzhiyun  *
2310*4882a593Smuzhiyun  * In the simplest case clk_set_rate will only adjust the rate of clk.
2311*4882a593Smuzhiyun  *
2312*4882a593Smuzhiyun  * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
2313*4882a593Smuzhiyun  * propagate up to clk's parent; whether or not this happens depends on the
2314*4882a593Smuzhiyun  * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
2315*4882a593Smuzhiyun  * after calling .round_rate then upstream parent propagation is ignored.  If
2316*4882a593Smuzhiyun  * *parent_rate comes back with a new rate for clk's parent then we propagate
2317*4882a593Smuzhiyun  * up to clk's parent and set its rate.  Upward propagation will continue
2318*4882a593Smuzhiyun  * until either a clk does not support the CLK_SET_RATE_PARENT flag or
2319*4882a593Smuzhiyun  * .round_rate stops requesting changes to clk's parent_rate.
2320*4882a593Smuzhiyun  *
2321*4882a593Smuzhiyun  * Rate changes are accomplished via tree traversal that also recalculates the
2322*4882a593Smuzhiyun  * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
2323*4882a593Smuzhiyun  *
2324*4882a593Smuzhiyun  * Returns 0 on success, -EERROR otherwise.
2325*4882a593Smuzhiyun  */
clk_set_rate(struct clk * clk,unsigned long rate)2326*4882a593Smuzhiyun int clk_set_rate(struct clk *clk, unsigned long rate)
2327*4882a593Smuzhiyun {
2328*4882a593Smuzhiyun 	int ret;
2329*4882a593Smuzhiyun 
2330*4882a593Smuzhiyun 	if (!clk)
2331*4882a593Smuzhiyun 		return 0;
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun 	/* prevent racing with updates to the clock topology */
2334*4882a593Smuzhiyun 	clk_prepare_lock();
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun 	if (clk->exclusive_count)
2337*4882a593Smuzhiyun 		clk_core_rate_unprotect(clk->core);
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 	ret = clk_core_set_rate_nolock(clk->core, rate);
2340*4882a593Smuzhiyun 
2341*4882a593Smuzhiyun 	if (clk->exclusive_count)
2342*4882a593Smuzhiyun 		clk_core_rate_protect(clk->core);
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun 	clk_prepare_unlock();
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 	return ret;
2347*4882a593Smuzhiyun }
2348*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_set_rate);
2349*4882a593Smuzhiyun 
2350*4882a593Smuzhiyun /**
2351*4882a593Smuzhiyun  * clk_set_rate_exclusive - specify a new rate and get exclusive control
2352*4882a593Smuzhiyun  * @clk: the clk whose rate is being changed
2353*4882a593Smuzhiyun  * @rate: the new rate for clk
2354*4882a593Smuzhiyun  *
2355*4882a593Smuzhiyun  * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
2356*4882a593Smuzhiyun  * within a critical section
2357*4882a593Smuzhiyun  *
2358*4882a593Smuzhiyun  * This can be used initially to ensure that at least 1 consumer is
2359*4882a593Smuzhiyun  * satisfied when several consumers are competing for exclusivity over the
2360*4882a593Smuzhiyun  * same clock provider.
2361*4882a593Smuzhiyun  *
2362*4882a593Smuzhiyun  * The exclusivity is not applied if setting the rate failed.
2363*4882a593Smuzhiyun  *
2364*4882a593Smuzhiyun  * Calls to clk_rate_exclusive_get() should be balanced with calls to
2365*4882a593Smuzhiyun  * clk_rate_exclusive_put().
2366*4882a593Smuzhiyun  *
2367*4882a593Smuzhiyun  * Returns 0 on success, -EERROR otherwise.
2368*4882a593Smuzhiyun  */
clk_set_rate_exclusive(struct clk * clk,unsigned long rate)2369*4882a593Smuzhiyun int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2370*4882a593Smuzhiyun {
2371*4882a593Smuzhiyun 	int ret;
2372*4882a593Smuzhiyun 
2373*4882a593Smuzhiyun 	if (!clk)
2374*4882a593Smuzhiyun 		return 0;
2375*4882a593Smuzhiyun 
2376*4882a593Smuzhiyun 	/* prevent racing with updates to the clock topology */
2377*4882a593Smuzhiyun 	clk_prepare_lock();
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 	/*
2380*4882a593Smuzhiyun 	 * The temporary protection removal is not here, on purpose
2381*4882a593Smuzhiyun 	 * This function is meant to be used instead of clk_rate_protect,
2382*4882a593Smuzhiyun 	 * so before the consumer code path protect the clock provider
2383*4882a593Smuzhiyun 	 */
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun 	ret = clk_core_set_rate_nolock(clk->core, rate);
2386*4882a593Smuzhiyun 	if (!ret) {
2387*4882a593Smuzhiyun 		clk_core_rate_protect(clk->core);
2388*4882a593Smuzhiyun 		clk->exclusive_count++;
2389*4882a593Smuzhiyun 	}
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun 	clk_prepare_unlock();
2392*4882a593Smuzhiyun 
2393*4882a593Smuzhiyun 	return ret;
2394*4882a593Smuzhiyun }
2395*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2396*4882a593Smuzhiyun 
2397*4882a593Smuzhiyun /**
2398*4882a593Smuzhiyun  * clk_set_rate_range - set a rate range for a clock source
2399*4882a593Smuzhiyun  * @clk: clock source
2400*4882a593Smuzhiyun  * @min: desired minimum clock rate in Hz, inclusive
2401*4882a593Smuzhiyun  * @max: desired maximum clock rate in Hz, inclusive
2402*4882a593Smuzhiyun  *
2403*4882a593Smuzhiyun  * Returns success (0) or negative errno.
2404*4882a593Smuzhiyun  */
clk_set_rate_range(struct clk * clk,unsigned long min,unsigned long max)2405*4882a593Smuzhiyun int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2406*4882a593Smuzhiyun {
2407*4882a593Smuzhiyun 	int ret = 0;
2408*4882a593Smuzhiyun 	unsigned long old_min, old_max, rate;
2409*4882a593Smuzhiyun 
2410*4882a593Smuzhiyun 	if (!clk)
2411*4882a593Smuzhiyun 		return 0;
2412*4882a593Smuzhiyun 
2413*4882a593Smuzhiyun 	if (min > max) {
2414*4882a593Smuzhiyun 		pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2415*4882a593Smuzhiyun 		       __func__, clk->core->name, clk->dev_id, clk->con_id,
2416*4882a593Smuzhiyun 		       min, max);
2417*4882a593Smuzhiyun 		return -EINVAL;
2418*4882a593Smuzhiyun 	}
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun 	clk_prepare_lock();
2421*4882a593Smuzhiyun 
2422*4882a593Smuzhiyun 	if (clk->exclusive_count)
2423*4882a593Smuzhiyun 		clk_core_rate_unprotect(clk->core);
2424*4882a593Smuzhiyun 
2425*4882a593Smuzhiyun 	/* Save the current values in case we need to rollback the change */
2426*4882a593Smuzhiyun 	old_min = clk->min_rate;
2427*4882a593Smuzhiyun 	old_max = clk->max_rate;
2428*4882a593Smuzhiyun 	clk->min_rate = min;
2429*4882a593Smuzhiyun 	clk->max_rate = max;
2430*4882a593Smuzhiyun 
2431*4882a593Smuzhiyun 	if (!clk_core_check_boundaries(clk->core, min, max)) {
2432*4882a593Smuzhiyun 		ret = -EINVAL;
2433*4882a593Smuzhiyun 		goto out;
2434*4882a593Smuzhiyun 	}
2435*4882a593Smuzhiyun 
2436*4882a593Smuzhiyun 	rate = clk_core_get_rate_nolock(clk->core);
2437*4882a593Smuzhiyun 	if (rate < min || rate > max) {
2438*4882a593Smuzhiyun 		/*
2439*4882a593Smuzhiyun 		 * FIXME:
2440*4882a593Smuzhiyun 		 * We are in bit of trouble here, current rate is outside the
2441*4882a593Smuzhiyun 		 * the requested range. We are going try to request appropriate
2442*4882a593Smuzhiyun 		 * range boundary but there is a catch. It may fail for the
2443*4882a593Smuzhiyun 		 * usual reason (clock broken, clock protected, etc) but also
2444*4882a593Smuzhiyun 		 * because:
2445*4882a593Smuzhiyun 		 * - round_rate() was not favorable and fell on the wrong
2446*4882a593Smuzhiyun 		 *   side of the boundary
2447*4882a593Smuzhiyun 		 * - the determine_rate() callback does not really check for
2448*4882a593Smuzhiyun 		 *   this corner case when determining the rate
2449*4882a593Smuzhiyun 		 */
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 		if (rate < min)
2452*4882a593Smuzhiyun 			rate = min;
2453*4882a593Smuzhiyun 		else
2454*4882a593Smuzhiyun 			rate = max;
2455*4882a593Smuzhiyun 
2456*4882a593Smuzhiyun 		ret = clk_core_set_rate_nolock(clk->core, rate);
2457*4882a593Smuzhiyun 		if (ret) {
2458*4882a593Smuzhiyun 			/* rollback the changes */
2459*4882a593Smuzhiyun 			clk->min_rate = old_min;
2460*4882a593Smuzhiyun 			clk->max_rate = old_max;
2461*4882a593Smuzhiyun 		}
2462*4882a593Smuzhiyun 	}
2463*4882a593Smuzhiyun 
2464*4882a593Smuzhiyun out:
2465*4882a593Smuzhiyun 	if (clk->exclusive_count)
2466*4882a593Smuzhiyun 		clk_core_rate_protect(clk->core);
2467*4882a593Smuzhiyun 
2468*4882a593Smuzhiyun 	clk_prepare_unlock();
2469*4882a593Smuzhiyun 
2470*4882a593Smuzhiyun 	return ret;
2471*4882a593Smuzhiyun }
2472*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_set_rate_range);
2473*4882a593Smuzhiyun 
2474*4882a593Smuzhiyun /**
2475*4882a593Smuzhiyun  * clk_set_min_rate - set a minimum clock rate for a clock source
2476*4882a593Smuzhiyun  * @clk: clock source
2477*4882a593Smuzhiyun  * @rate: desired minimum clock rate in Hz, inclusive
2478*4882a593Smuzhiyun  *
2479*4882a593Smuzhiyun  * Returns success (0) or negative errno.
2480*4882a593Smuzhiyun  */
clk_set_min_rate(struct clk * clk,unsigned long rate)2481*4882a593Smuzhiyun int clk_set_min_rate(struct clk *clk, unsigned long rate)
2482*4882a593Smuzhiyun {
2483*4882a593Smuzhiyun 	if (!clk)
2484*4882a593Smuzhiyun 		return 0;
2485*4882a593Smuzhiyun 
2486*4882a593Smuzhiyun 	return clk_set_rate_range(clk, rate, clk->max_rate);
2487*4882a593Smuzhiyun }
2488*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_set_min_rate);
2489*4882a593Smuzhiyun 
2490*4882a593Smuzhiyun /**
2491*4882a593Smuzhiyun  * clk_set_max_rate - set a maximum clock rate for a clock source
2492*4882a593Smuzhiyun  * @clk: clock source
2493*4882a593Smuzhiyun  * @rate: desired maximum clock rate in Hz, inclusive
2494*4882a593Smuzhiyun  *
2495*4882a593Smuzhiyun  * Returns success (0) or negative errno.
2496*4882a593Smuzhiyun  */
clk_set_max_rate(struct clk * clk,unsigned long rate)2497*4882a593Smuzhiyun int clk_set_max_rate(struct clk *clk, unsigned long rate)
2498*4882a593Smuzhiyun {
2499*4882a593Smuzhiyun 	if (!clk)
2500*4882a593Smuzhiyun 		return 0;
2501*4882a593Smuzhiyun 
2502*4882a593Smuzhiyun 	return clk_set_rate_range(clk, clk->min_rate, rate);
2503*4882a593Smuzhiyun }
2504*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_set_max_rate);
2505*4882a593Smuzhiyun 
2506*4882a593Smuzhiyun /**
2507*4882a593Smuzhiyun  * clk_get_parent - return the parent of a clk
2508*4882a593Smuzhiyun  * @clk: the clk whose parent gets returned
2509*4882a593Smuzhiyun  *
2510*4882a593Smuzhiyun  * Simply returns clk->parent.  Returns NULL if clk is NULL.
2511*4882a593Smuzhiyun  */
clk_get_parent(struct clk * clk)2512*4882a593Smuzhiyun struct clk *clk_get_parent(struct clk *clk)
2513*4882a593Smuzhiyun {
2514*4882a593Smuzhiyun 	struct clk *parent;
2515*4882a593Smuzhiyun 
2516*4882a593Smuzhiyun 	if (!clk)
2517*4882a593Smuzhiyun 		return NULL;
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	clk_prepare_lock();
2520*4882a593Smuzhiyun 	/* TODO: Create a per-user clk and change callers to call clk_put */
2521*4882a593Smuzhiyun 	parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2522*4882a593Smuzhiyun 	clk_prepare_unlock();
2523*4882a593Smuzhiyun 
2524*4882a593Smuzhiyun 	return parent;
2525*4882a593Smuzhiyun }
2526*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_get_parent);
2527*4882a593Smuzhiyun 
__clk_init_parent(struct clk_core * core)2528*4882a593Smuzhiyun static struct clk_core *__clk_init_parent(struct clk_core *core)
2529*4882a593Smuzhiyun {
2530*4882a593Smuzhiyun 	u8 index = 0;
2531*4882a593Smuzhiyun 
2532*4882a593Smuzhiyun 	if (core->num_parents > 1 && core->ops->get_parent)
2533*4882a593Smuzhiyun 		index = core->ops->get_parent(core->hw);
2534*4882a593Smuzhiyun 
2535*4882a593Smuzhiyun 	return clk_core_get_parent_by_index(core, index);
2536*4882a593Smuzhiyun }
2537*4882a593Smuzhiyun 
clk_core_reparent(struct clk_core * core,struct clk_core * new_parent)2538*4882a593Smuzhiyun static void clk_core_reparent(struct clk_core *core,
2539*4882a593Smuzhiyun 				  struct clk_core *new_parent)
2540*4882a593Smuzhiyun {
2541*4882a593Smuzhiyun 	clk_reparent(core, new_parent);
2542*4882a593Smuzhiyun 	__clk_recalc_accuracies(core);
2543*4882a593Smuzhiyun 	__clk_recalc_rates(core, POST_RATE_CHANGE);
2544*4882a593Smuzhiyun }
2545*4882a593Smuzhiyun 
clk_hw_reparent(struct clk_hw * hw,struct clk_hw * new_parent)2546*4882a593Smuzhiyun void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2547*4882a593Smuzhiyun {
2548*4882a593Smuzhiyun 	if (!hw)
2549*4882a593Smuzhiyun 		return;
2550*4882a593Smuzhiyun 
2551*4882a593Smuzhiyun 	clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2552*4882a593Smuzhiyun }
2553*4882a593Smuzhiyun 
2554*4882a593Smuzhiyun /**
2555*4882a593Smuzhiyun  * clk_has_parent - check if a clock is a possible parent for another
2556*4882a593Smuzhiyun  * @clk: clock source
2557*4882a593Smuzhiyun  * @parent: parent clock source
2558*4882a593Smuzhiyun  *
2559*4882a593Smuzhiyun  * This function can be used in drivers that need to check that a clock can be
2560*4882a593Smuzhiyun  * the parent of another without actually changing the parent.
2561*4882a593Smuzhiyun  *
2562*4882a593Smuzhiyun  * Returns true if @parent is a possible parent for @clk, false otherwise.
2563*4882a593Smuzhiyun  */
clk_has_parent(struct clk * clk,struct clk * parent)2564*4882a593Smuzhiyun bool clk_has_parent(struct clk *clk, struct clk *parent)
2565*4882a593Smuzhiyun {
2566*4882a593Smuzhiyun 	struct clk_core *core, *parent_core;
2567*4882a593Smuzhiyun 	int i;
2568*4882a593Smuzhiyun 
2569*4882a593Smuzhiyun 	/* NULL clocks should be nops, so return success if either is NULL. */
2570*4882a593Smuzhiyun 	if (!clk || !parent)
2571*4882a593Smuzhiyun 		return true;
2572*4882a593Smuzhiyun 
2573*4882a593Smuzhiyun 	core = clk->core;
2574*4882a593Smuzhiyun 	parent_core = parent->core;
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun 	/* Optimize for the case where the parent is already the parent. */
2577*4882a593Smuzhiyun 	if (core->parent == parent_core)
2578*4882a593Smuzhiyun 		return true;
2579*4882a593Smuzhiyun 
2580*4882a593Smuzhiyun 	for (i = 0; i < core->num_parents; i++)
2581*4882a593Smuzhiyun 		if (!strcmp(core->parents[i].name, parent_core->name))
2582*4882a593Smuzhiyun 			return true;
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun 	return false;
2585*4882a593Smuzhiyun }
2586*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_has_parent);
2587*4882a593Smuzhiyun 
clk_core_set_parent_nolock(struct clk_core * core,struct clk_core * parent)2588*4882a593Smuzhiyun static int clk_core_set_parent_nolock(struct clk_core *core,
2589*4882a593Smuzhiyun 				      struct clk_core *parent)
2590*4882a593Smuzhiyun {
2591*4882a593Smuzhiyun 	int ret = 0;
2592*4882a593Smuzhiyun 	int p_index = 0;
2593*4882a593Smuzhiyun 	unsigned long p_rate = 0;
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
2596*4882a593Smuzhiyun 
2597*4882a593Smuzhiyun 	if (!core)
2598*4882a593Smuzhiyun 		return 0;
2599*4882a593Smuzhiyun 
2600*4882a593Smuzhiyun 	if (core->parent == parent)
2601*4882a593Smuzhiyun 		return 0;
2602*4882a593Smuzhiyun 
2603*4882a593Smuzhiyun 	/* verify ops for multi-parent clks */
2604*4882a593Smuzhiyun 	if (core->num_parents > 1 && !core->ops->set_parent)
2605*4882a593Smuzhiyun 		return -EPERM;
2606*4882a593Smuzhiyun 
2607*4882a593Smuzhiyun 	/* check that we are allowed to re-parent if the clock is in use */
2608*4882a593Smuzhiyun 	if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2609*4882a593Smuzhiyun 		return -EBUSY;
2610*4882a593Smuzhiyun 
2611*4882a593Smuzhiyun 	if (clk_core_rate_is_protected(core))
2612*4882a593Smuzhiyun 		return -EBUSY;
2613*4882a593Smuzhiyun 
2614*4882a593Smuzhiyun 	/* try finding the new parent index */
2615*4882a593Smuzhiyun 	if (parent) {
2616*4882a593Smuzhiyun 		p_index = clk_fetch_parent_index(core, parent);
2617*4882a593Smuzhiyun 		if (p_index < 0) {
2618*4882a593Smuzhiyun 			pr_debug("%s: clk %s can not be parent of clk %s\n",
2619*4882a593Smuzhiyun 					__func__, parent->name, core->name);
2620*4882a593Smuzhiyun 			return p_index;
2621*4882a593Smuzhiyun 		}
2622*4882a593Smuzhiyun 		p_rate = parent->rate;
2623*4882a593Smuzhiyun 	}
2624*4882a593Smuzhiyun 
2625*4882a593Smuzhiyun 	ret = clk_pm_runtime_get(core);
2626*4882a593Smuzhiyun 	if (ret)
2627*4882a593Smuzhiyun 		return ret;
2628*4882a593Smuzhiyun 
2629*4882a593Smuzhiyun 	/* propagate PRE_RATE_CHANGE notifications */
2630*4882a593Smuzhiyun 	ret = __clk_speculate_rates(core, p_rate);
2631*4882a593Smuzhiyun 
2632*4882a593Smuzhiyun 	/* abort if a driver objects */
2633*4882a593Smuzhiyun 	if (ret & NOTIFY_STOP_MASK)
2634*4882a593Smuzhiyun 		goto runtime_put;
2635*4882a593Smuzhiyun 
2636*4882a593Smuzhiyun 	/* do the re-parent */
2637*4882a593Smuzhiyun 	ret = __clk_set_parent(core, parent, p_index);
2638*4882a593Smuzhiyun 
2639*4882a593Smuzhiyun 	/* propagate rate an accuracy recalculation accordingly */
2640*4882a593Smuzhiyun 	if (ret) {
2641*4882a593Smuzhiyun 		__clk_recalc_rates(core, ABORT_RATE_CHANGE);
2642*4882a593Smuzhiyun 	} else {
2643*4882a593Smuzhiyun 		__clk_recalc_rates(core, POST_RATE_CHANGE);
2644*4882a593Smuzhiyun 		__clk_recalc_accuracies(core);
2645*4882a593Smuzhiyun 	}
2646*4882a593Smuzhiyun 
2647*4882a593Smuzhiyun runtime_put:
2648*4882a593Smuzhiyun 	clk_pm_runtime_put(core);
2649*4882a593Smuzhiyun 
2650*4882a593Smuzhiyun 	return ret;
2651*4882a593Smuzhiyun }
2652*4882a593Smuzhiyun 
clk_hw_set_parent(struct clk_hw * hw,struct clk_hw * parent)2653*4882a593Smuzhiyun int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
2654*4882a593Smuzhiyun {
2655*4882a593Smuzhiyun 	return clk_core_set_parent_nolock(hw->core, parent->core);
2656*4882a593Smuzhiyun }
2657*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_set_parent);
2658*4882a593Smuzhiyun 
2659*4882a593Smuzhiyun /**
2660*4882a593Smuzhiyun  * clk_set_parent - switch the parent of a mux clk
2661*4882a593Smuzhiyun  * @clk: the mux clk whose input we are switching
2662*4882a593Smuzhiyun  * @parent: the new input to clk
2663*4882a593Smuzhiyun  *
2664*4882a593Smuzhiyun  * Re-parent clk to use parent as its new input source.  If clk is in
2665*4882a593Smuzhiyun  * prepared state, the clk will get enabled for the duration of this call. If
2666*4882a593Smuzhiyun  * that's not acceptable for a specific clk (Eg: the consumer can't handle
2667*4882a593Smuzhiyun  * that, the reparenting is glitchy in hardware, etc), use the
2668*4882a593Smuzhiyun  * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2669*4882a593Smuzhiyun  *
2670*4882a593Smuzhiyun  * After successfully changing clk's parent clk_set_parent will update the
2671*4882a593Smuzhiyun  * clk topology, sysfs topology and propagate rate recalculation via
2672*4882a593Smuzhiyun  * __clk_recalc_rates.
2673*4882a593Smuzhiyun  *
2674*4882a593Smuzhiyun  * Returns 0 on success, -EERROR otherwise.
2675*4882a593Smuzhiyun  */
clk_set_parent(struct clk * clk,struct clk * parent)2676*4882a593Smuzhiyun int clk_set_parent(struct clk *clk, struct clk *parent)
2677*4882a593Smuzhiyun {
2678*4882a593Smuzhiyun 	int ret;
2679*4882a593Smuzhiyun 
2680*4882a593Smuzhiyun 	if (!clk)
2681*4882a593Smuzhiyun 		return 0;
2682*4882a593Smuzhiyun 
2683*4882a593Smuzhiyun 	clk_prepare_lock();
2684*4882a593Smuzhiyun 
2685*4882a593Smuzhiyun 	if (clk->exclusive_count)
2686*4882a593Smuzhiyun 		clk_core_rate_unprotect(clk->core);
2687*4882a593Smuzhiyun 
2688*4882a593Smuzhiyun 	ret = clk_core_set_parent_nolock(clk->core,
2689*4882a593Smuzhiyun 					 parent ? parent->core : NULL);
2690*4882a593Smuzhiyun 
2691*4882a593Smuzhiyun 	if (clk->exclusive_count)
2692*4882a593Smuzhiyun 		clk_core_rate_protect(clk->core);
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun 	clk_prepare_unlock();
2695*4882a593Smuzhiyun 
2696*4882a593Smuzhiyun 	return ret;
2697*4882a593Smuzhiyun }
2698*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_set_parent);
2699*4882a593Smuzhiyun 
clk_core_set_phase_nolock(struct clk_core * core,int degrees)2700*4882a593Smuzhiyun static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2701*4882a593Smuzhiyun {
2702*4882a593Smuzhiyun 	int ret = -EINVAL;
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
2705*4882a593Smuzhiyun 
2706*4882a593Smuzhiyun 	if (!core)
2707*4882a593Smuzhiyun 		return 0;
2708*4882a593Smuzhiyun 
2709*4882a593Smuzhiyun 	if (clk_core_rate_is_protected(core))
2710*4882a593Smuzhiyun 		return -EBUSY;
2711*4882a593Smuzhiyun 
2712*4882a593Smuzhiyun 	trace_clk_set_phase(core, degrees);
2713*4882a593Smuzhiyun 
2714*4882a593Smuzhiyun 	if (core->ops->set_phase) {
2715*4882a593Smuzhiyun 		ret = core->ops->set_phase(core->hw, degrees);
2716*4882a593Smuzhiyun 		if (!ret)
2717*4882a593Smuzhiyun 			core->phase = degrees;
2718*4882a593Smuzhiyun 	}
2719*4882a593Smuzhiyun 
2720*4882a593Smuzhiyun 	trace_clk_set_phase_complete(core, degrees);
2721*4882a593Smuzhiyun 
2722*4882a593Smuzhiyun 	return ret;
2723*4882a593Smuzhiyun }
2724*4882a593Smuzhiyun 
2725*4882a593Smuzhiyun /**
2726*4882a593Smuzhiyun  * clk_set_phase - adjust the phase shift of a clock signal
2727*4882a593Smuzhiyun  * @clk: clock signal source
2728*4882a593Smuzhiyun  * @degrees: number of degrees the signal is shifted
2729*4882a593Smuzhiyun  *
2730*4882a593Smuzhiyun  * Shifts the phase of a clock signal by the specified
2731*4882a593Smuzhiyun  * degrees. Returns 0 on success, -EERROR otherwise.
2732*4882a593Smuzhiyun  *
2733*4882a593Smuzhiyun  * This function makes no distinction about the input or reference
2734*4882a593Smuzhiyun  * signal that we adjust the clock signal phase against. For example
2735*4882a593Smuzhiyun  * phase locked-loop clock signal generators we may shift phase with
2736*4882a593Smuzhiyun  * respect to feedback clock signal input, but for other cases the
2737*4882a593Smuzhiyun  * clock phase may be shifted with respect to some other, unspecified
2738*4882a593Smuzhiyun  * signal.
2739*4882a593Smuzhiyun  *
2740*4882a593Smuzhiyun  * Additionally the concept of phase shift does not propagate through
2741*4882a593Smuzhiyun  * the clock tree hierarchy, which sets it apart from clock rates and
2742*4882a593Smuzhiyun  * clock accuracy. A parent clock phase attribute does not have an
2743*4882a593Smuzhiyun  * impact on the phase attribute of a child clock.
2744*4882a593Smuzhiyun  */
clk_set_phase(struct clk * clk,int degrees)2745*4882a593Smuzhiyun int clk_set_phase(struct clk *clk, int degrees)
2746*4882a593Smuzhiyun {
2747*4882a593Smuzhiyun 	int ret;
2748*4882a593Smuzhiyun 
2749*4882a593Smuzhiyun 	if (!clk)
2750*4882a593Smuzhiyun 		return 0;
2751*4882a593Smuzhiyun 
2752*4882a593Smuzhiyun 	/* sanity check degrees */
2753*4882a593Smuzhiyun 	degrees %= 360;
2754*4882a593Smuzhiyun 	if (degrees < 0)
2755*4882a593Smuzhiyun 		degrees += 360;
2756*4882a593Smuzhiyun 
2757*4882a593Smuzhiyun 	clk_prepare_lock();
2758*4882a593Smuzhiyun 
2759*4882a593Smuzhiyun 	if (clk->exclusive_count)
2760*4882a593Smuzhiyun 		clk_core_rate_unprotect(clk->core);
2761*4882a593Smuzhiyun 
2762*4882a593Smuzhiyun 	ret = clk_core_set_phase_nolock(clk->core, degrees);
2763*4882a593Smuzhiyun 
2764*4882a593Smuzhiyun 	if (clk->exclusive_count)
2765*4882a593Smuzhiyun 		clk_core_rate_protect(clk->core);
2766*4882a593Smuzhiyun 
2767*4882a593Smuzhiyun 	clk_prepare_unlock();
2768*4882a593Smuzhiyun 
2769*4882a593Smuzhiyun 	return ret;
2770*4882a593Smuzhiyun }
2771*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_set_phase);
2772*4882a593Smuzhiyun 
clk_core_get_phase(struct clk_core * core)2773*4882a593Smuzhiyun static int clk_core_get_phase(struct clk_core *core)
2774*4882a593Smuzhiyun {
2775*4882a593Smuzhiyun 	int ret;
2776*4882a593Smuzhiyun 
2777*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
2778*4882a593Smuzhiyun 	if (!core->ops->get_phase)
2779*4882a593Smuzhiyun 		return 0;
2780*4882a593Smuzhiyun 
2781*4882a593Smuzhiyun 	/* Always try to update cached phase if possible */
2782*4882a593Smuzhiyun 	ret = core->ops->get_phase(core->hw);
2783*4882a593Smuzhiyun 	if (ret >= 0)
2784*4882a593Smuzhiyun 		core->phase = ret;
2785*4882a593Smuzhiyun 
2786*4882a593Smuzhiyun 	return ret;
2787*4882a593Smuzhiyun }
2788*4882a593Smuzhiyun 
2789*4882a593Smuzhiyun /**
2790*4882a593Smuzhiyun  * clk_get_phase - return the phase shift of a clock signal
2791*4882a593Smuzhiyun  * @clk: clock signal source
2792*4882a593Smuzhiyun  *
2793*4882a593Smuzhiyun  * Returns the phase shift of a clock node in degrees, otherwise returns
2794*4882a593Smuzhiyun  * -EERROR.
2795*4882a593Smuzhiyun  */
clk_get_phase(struct clk * clk)2796*4882a593Smuzhiyun int clk_get_phase(struct clk *clk)
2797*4882a593Smuzhiyun {
2798*4882a593Smuzhiyun 	int ret;
2799*4882a593Smuzhiyun 
2800*4882a593Smuzhiyun 	if (!clk)
2801*4882a593Smuzhiyun 		return 0;
2802*4882a593Smuzhiyun 
2803*4882a593Smuzhiyun 	clk_prepare_lock();
2804*4882a593Smuzhiyun 	ret = clk_core_get_phase(clk->core);
2805*4882a593Smuzhiyun 	clk_prepare_unlock();
2806*4882a593Smuzhiyun 
2807*4882a593Smuzhiyun 	return ret;
2808*4882a593Smuzhiyun }
2809*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_get_phase);
2810*4882a593Smuzhiyun 
clk_core_reset_duty_cycle_nolock(struct clk_core * core)2811*4882a593Smuzhiyun static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
2812*4882a593Smuzhiyun {
2813*4882a593Smuzhiyun 	/* Assume a default value of 50% */
2814*4882a593Smuzhiyun 	core->duty.num = 1;
2815*4882a593Smuzhiyun 	core->duty.den = 2;
2816*4882a593Smuzhiyun }
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2819*4882a593Smuzhiyun 
clk_core_update_duty_cycle_nolock(struct clk_core * core)2820*4882a593Smuzhiyun static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
2821*4882a593Smuzhiyun {
2822*4882a593Smuzhiyun 	struct clk_duty *duty = &core->duty;
2823*4882a593Smuzhiyun 	int ret = 0;
2824*4882a593Smuzhiyun 
2825*4882a593Smuzhiyun 	if (!core->ops->get_duty_cycle)
2826*4882a593Smuzhiyun 		return clk_core_update_duty_cycle_parent_nolock(core);
2827*4882a593Smuzhiyun 
2828*4882a593Smuzhiyun 	ret = core->ops->get_duty_cycle(core->hw, duty);
2829*4882a593Smuzhiyun 	if (ret)
2830*4882a593Smuzhiyun 		goto reset;
2831*4882a593Smuzhiyun 
2832*4882a593Smuzhiyun 	/* Don't trust the clock provider too much */
2833*4882a593Smuzhiyun 	if (duty->den == 0 || duty->num > duty->den) {
2834*4882a593Smuzhiyun 		ret = -EINVAL;
2835*4882a593Smuzhiyun 		goto reset;
2836*4882a593Smuzhiyun 	}
2837*4882a593Smuzhiyun 
2838*4882a593Smuzhiyun 	return 0;
2839*4882a593Smuzhiyun 
2840*4882a593Smuzhiyun reset:
2841*4882a593Smuzhiyun 	clk_core_reset_duty_cycle_nolock(core);
2842*4882a593Smuzhiyun 	return ret;
2843*4882a593Smuzhiyun }
2844*4882a593Smuzhiyun 
clk_core_update_duty_cycle_parent_nolock(struct clk_core * core)2845*4882a593Smuzhiyun static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
2846*4882a593Smuzhiyun {
2847*4882a593Smuzhiyun 	int ret = 0;
2848*4882a593Smuzhiyun 
2849*4882a593Smuzhiyun 	if (core->parent &&
2850*4882a593Smuzhiyun 	    core->flags & CLK_DUTY_CYCLE_PARENT) {
2851*4882a593Smuzhiyun 		ret = clk_core_update_duty_cycle_nolock(core->parent);
2852*4882a593Smuzhiyun 		memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2853*4882a593Smuzhiyun 	} else {
2854*4882a593Smuzhiyun 		clk_core_reset_duty_cycle_nolock(core);
2855*4882a593Smuzhiyun 	}
2856*4882a593Smuzhiyun 
2857*4882a593Smuzhiyun 	return ret;
2858*4882a593Smuzhiyun }
2859*4882a593Smuzhiyun 
2860*4882a593Smuzhiyun static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2861*4882a593Smuzhiyun 						 struct clk_duty *duty);
2862*4882a593Smuzhiyun 
clk_core_set_duty_cycle_nolock(struct clk_core * core,struct clk_duty * duty)2863*4882a593Smuzhiyun static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
2864*4882a593Smuzhiyun 					  struct clk_duty *duty)
2865*4882a593Smuzhiyun {
2866*4882a593Smuzhiyun 	int ret;
2867*4882a593Smuzhiyun 
2868*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
2869*4882a593Smuzhiyun 
2870*4882a593Smuzhiyun 	if (clk_core_rate_is_protected(core))
2871*4882a593Smuzhiyun 		return -EBUSY;
2872*4882a593Smuzhiyun 
2873*4882a593Smuzhiyun 	trace_clk_set_duty_cycle(core, duty);
2874*4882a593Smuzhiyun 
2875*4882a593Smuzhiyun 	if (!core->ops->set_duty_cycle)
2876*4882a593Smuzhiyun 		return clk_core_set_duty_cycle_parent_nolock(core, duty);
2877*4882a593Smuzhiyun 
2878*4882a593Smuzhiyun 	ret = core->ops->set_duty_cycle(core->hw, duty);
2879*4882a593Smuzhiyun 	if (!ret)
2880*4882a593Smuzhiyun 		memcpy(&core->duty, duty, sizeof(*duty));
2881*4882a593Smuzhiyun 
2882*4882a593Smuzhiyun 	trace_clk_set_duty_cycle_complete(core, duty);
2883*4882a593Smuzhiyun 
2884*4882a593Smuzhiyun 	return ret;
2885*4882a593Smuzhiyun }
2886*4882a593Smuzhiyun 
clk_core_set_duty_cycle_parent_nolock(struct clk_core * core,struct clk_duty * duty)2887*4882a593Smuzhiyun static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2888*4882a593Smuzhiyun 						 struct clk_duty *duty)
2889*4882a593Smuzhiyun {
2890*4882a593Smuzhiyun 	int ret = 0;
2891*4882a593Smuzhiyun 
2892*4882a593Smuzhiyun 	if (core->parent &&
2893*4882a593Smuzhiyun 	    core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
2894*4882a593Smuzhiyun 		ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
2895*4882a593Smuzhiyun 		memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2896*4882a593Smuzhiyun 	}
2897*4882a593Smuzhiyun 
2898*4882a593Smuzhiyun 	return ret;
2899*4882a593Smuzhiyun }
2900*4882a593Smuzhiyun 
2901*4882a593Smuzhiyun /**
2902*4882a593Smuzhiyun  * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
2903*4882a593Smuzhiyun  * @clk: clock signal source
2904*4882a593Smuzhiyun  * @num: numerator of the duty cycle ratio to be applied
2905*4882a593Smuzhiyun  * @den: denominator of the duty cycle ratio to be applied
2906*4882a593Smuzhiyun  *
2907*4882a593Smuzhiyun  * Apply the duty cycle ratio if the ratio is valid and the clock can
2908*4882a593Smuzhiyun  * perform this operation
2909*4882a593Smuzhiyun  *
2910*4882a593Smuzhiyun  * Returns (0) on success, a negative errno otherwise.
2911*4882a593Smuzhiyun  */
clk_set_duty_cycle(struct clk * clk,unsigned int num,unsigned int den)2912*4882a593Smuzhiyun int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
2913*4882a593Smuzhiyun {
2914*4882a593Smuzhiyun 	int ret;
2915*4882a593Smuzhiyun 	struct clk_duty duty;
2916*4882a593Smuzhiyun 
2917*4882a593Smuzhiyun 	if (!clk)
2918*4882a593Smuzhiyun 		return 0;
2919*4882a593Smuzhiyun 
2920*4882a593Smuzhiyun 	/* sanity check the ratio */
2921*4882a593Smuzhiyun 	if (den == 0 || num > den)
2922*4882a593Smuzhiyun 		return -EINVAL;
2923*4882a593Smuzhiyun 
2924*4882a593Smuzhiyun 	duty.num = num;
2925*4882a593Smuzhiyun 	duty.den = den;
2926*4882a593Smuzhiyun 
2927*4882a593Smuzhiyun 	clk_prepare_lock();
2928*4882a593Smuzhiyun 
2929*4882a593Smuzhiyun 	if (clk->exclusive_count)
2930*4882a593Smuzhiyun 		clk_core_rate_unprotect(clk->core);
2931*4882a593Smuzhiyun 
2932*4882a593Smuzhiyun 	ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2933*4882a593Smuzhiyun 
2934*4882a593Smuzhiyun 	if (clk->exclusive_count)
2935*4882a593Smuzhiyun 		clk_core_rate_protect(clk->core);
2936*4882a593Smuzhiyun 
2937*4882a593Smuzhiyun 	clk_prepare_unlock();
2938*4882a593Smuzhiyun 
2939*4882a593Smuzhiyun 	return ret;
2940*4882a593Smuzhiyun }
2941*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
2942*4882a593Smuzhiyun 
clk_core_get_scaled_duty_cycle(struct clk_core * core,unsigned int scale)2943*4882a593Smuzhiyun static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
2944*4882a593Smuzhiyun 					  unsigned int scale)
2945*4882a593Smuzhiyun {
2946*4882a593Smuzhiyun 	struct clk_duty *duty = &core->duty;
2947*4882a593Smuzhiyun 	int ret;
2948*4882a593Smuzhiyun 
2949*4882a593Smuzhiyun 	clk_prepare_lock();
2950*4882a593Smuzhiyun 
2951*4882a593Smuzhiyun 	ret = clk_core_update_duty_cycle_nolock(core);
2952*4882a593Smuzhiyun 	if (!ret)
2953*4882a593Smuzhiyun 		ret = mult_frac(scale, duty->num, duty->den);
2954*4882a593Smuzhiyun 
2955*4882a593Smuzhiyun 	clk_prepare_unlock();
2956*4882a593Smuzhiyun 
2957*4882a593Smuzhiyun 	return ret;
2958*4882a593Smuzhiyun }
2959*4882a593Smuzhiyun 
2960*4882a593Smuzhiyun /**
2961*4882a593Smuzhiyun  * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
2962*4882a593Smuzhiyun  * @clk: clock signal source
2963*4882a593Smuzhiyun  * @scale: scaling factor to be applied to represent the ratio as an integer
2964*4882a593Smuzhiyun  *
2965*4882a593Smuzhiyun  * Returns the duty cycle ratio of a clock node multiplied by the provided
2966*4882a593Smuzhiyun  * scaling factor, or negative errno on error.
2967*4882a593Smuzhiyun  */
clk_get_scaled_duty_cycle(struct clk * clk,unsigned int scale)2968*4882a593Smuzhiyun int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
2969*4882a593Smuzhiyun {
2970*4882a593Smuzhiyun 	if (!clk)
2971*4882a593Smuzhiyun 		return 0;
2972*4882a593Smuzhiyun 
2973*4882a593Smuzhiyun 	return clk_core_get_scaled_duty_cycle(clk->core, scale);
2974*4882a593Smuzhiyun }
2975*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
2976*4882a593Smuzhiyun 
2977*4882a593Smuzhiyun /**
2978*4882a593Smuzhiyun  * clk_is_match - check if two clk's point to the same hardware clock
2979*4882a593Smuzhiyun  * @p: clk compared against q
2980*4882a593Smuzhiyun  * @q: clk compared against p
2981*4882a593Smuzhiyun  *
2982*4882a593Smuzhiyun  * Returns true if the two struct clk pointers both point to the same hardware
2983*4882a593Smuzhiyun  * clock node. Put differently, returns true if struct clk *p and struct clk *q
2984*4882a593Smuzhiyun  * share the same struct clk_core object.
2985*4882a593Smuzhiyun  *
2986*4882a593Smuzhiyun  * Returns false otherwise. Note that two NULL clks are treated as matching.
2987*4882a593Smuzhiyun  */
clk_is_match(const struct clk * p,const struct clk * q)2988*4882a593Smuzhiyun bool clk_is_match(const struct clk *p, const struct clk *q)
2989*4882a593Smuzhiyun {
2990*4882a593Smuzhiyun 	/* trivial case: identical struct clk's or both NULL */
2991*4882a593Smuzhiyun 	if (p == q)
2992*4882a593Smuzhiyun 		return true;
2993*4882a593Smuzhiyun 
2994*4882a593Smuzhiyun 	/* true if clk->core pointers match. Avoid dereferencing garbage */
2995*4882a593Smuzhiyun 	if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2996*4882a593Smuzhiyun 		if (p->core == q->core)
2997*4882a593Smuzhiyun 			return true;
2998*4882a593Smuzhiyun 
2999*4882a593Smuzhiyun 	return false;
3000*4882a593Smuzhiyun }
3001*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_is_match);
3002*4882a593Smuzhiyun 
3003*4882a593Smuzhiyun /***        debugfs support        ***/
3004*4882a593Smuzhiyun 
3005*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
3006*4882a593Smuzhiyun #include <linux/debugfs.h>
3007*4882a593Smuzhiyun 
3008*4882a593Smuzhiyun static struct dentry *rootdir;
3009*4882a593Smuzhiyun static int inited = 0;
3010*4882a593Smuzhiyun static DEFINE_MUTEX(clk_debug_lock);
3011*4882a593Smuzhiyun static HLIST_HEAD(clk_debug_list);
3012*4882a593Smuzhiyun 
3013*4882a593Smuzhiyun static struct hlist_head *orphan_list[] = {
3014*4882a593Smuzhiyun 	&clk_orphan_list,
3015*4882a593Smuzhiyun 	NULL,
3016*4882a593Smuzhiyun };
3017*4882a593Smuzhiyun 
clk_summary_show_one(struct seq_file * s,struct clk_core * c,int level)3018*4882a593Smuzhiyun static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
3019*4882a593Smuzhiyun 				 int level)
3020*4882a593Smuzhiyun {
3021*4882a593Smuzhiyun 	int phase;
3022*4882a593Smuzhiyun 
3023*4882a593Smuzhiyun 	seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
3024*4882a593Smuzhiyun 		   level * 3 + 1, "",
3025*4882a593Smuzhiyun 		   30 - level * 3, c->name,
3026*4882a593Smuzhiyun 		   c->enable_count, c->prepare_count, c->protect_count,
3027*4882a593Smuzhiyun 		   clk_core_get_rate_recalc(c),
3028*4882a593Smuzhiyun 		   clk_core_get_accuracy_recalc(c));
3029*4882a593Smuzhiyun 
3030*4882a593Smuzhiyun 	phase = clk_core_get_phase(c);
3031*4882a593Smuzhiyun 	if (phase >= 0)
3032*4882a593Smuzhiyun 		seq_printf(s, "%5d", phase);
3033*4882a593Smuzhiyun 	else
3034*4882a593Smuzhiyun 		seq_puts(s, "-----");
3035*4882a593Smuzhiyun 
3036*4882a593Smuzhiyun 	seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
3037*4882a593Smuzhiyun }
3038*4882a593Smuzhiyun 
clk_summary_show_subtree(struct seq_file * s,struct clk_core * c,int level)3039*4882a593Smuzhiyun static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
3040*4882a593Smuzhiyun 				     int level)
3041*4882a593Smuzhiyun {
3042*4882a593Smuzhiyun 	struct clk_core *child;
3043*4882a593Smuzhiyun 
3044*4882a593Smuzhiyun 	clk_summary_show_one(s, c, level);
3045*4882a593Smuzhiyun 
3046*4882a593Smuzhiyun 	hlist_for_each_entry(child, &c->children, child_node)
3047*4882a593Smuzhiyun 		clk_summary_show_subtree(s, child, level + 1);
3048*4882a593Smuzhiyun }
3049*4882a593Smuzhiyun 
clk_summary_show(struct seq_file * s,void * data)3050*4882a593Smuzhiyun static int clk_summary_show(struct seq_file *s, void *data)
3051*4882a593Smuzhiyun {
3052*4882a593Smuzhiyun 	struct clk_core *c;
3053*4882a593Smuzhiyun 	struct hlist_head **lists = (struct hlist_head **)s->private;
3054*4882a593Smuzhiyun 
3055*4882a593Smuzhiyun 	seq_puts(s, "                                 enable  prepare  protect                                duty\n");
3056*4882a593Smuzhiyun 	seq_puts(s, "   clock                          count    count    count        rate   accuracy phase  cycle\n");
3057*4882a593Smuzhiyun 	seq_puts(s, "---------------------------------------------------------------------------------------------\n");
3058*4882a593Smuzhiyun 
3059*4882a593Smuzhiyun 	clk_prepare_lock();
3060*4882a593Smuzhiyun 
3061*4882a593Smuzhiyun 	for (; *lists; lists++)
3062*4882a593Smuzhiyun 		hlist_for_each_entry(c, *lists, child_node)
3063*4882a593Smuzhiyun 			clk_summary_show_subtree(s, c, 0);
3064*4882a593Smuzhiyun 
3065*4882a593Smuzhiyun 	clk_prepare_unlock();
3066*4882a593Smuzhiyun 
3067*4882a593Smuzhiyun 	return 0;
3068*4882a593Smuzhiyun }
3069*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(clk_summary);
3070*4882a593Smuzhiyun 
clk_dump_one(struct seq_file * s,struct clk_core * c,int level)3071*4882a593Smuzhiyun static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
3072*4882a593Smuzhiyun {
3073*4882a593Smuzhiyun 	int phase;
3074*4882a593Smuzhiyun 	unsigned long min_rate, max_rate;
3075*4882a593Smuzhiyun 
3076*4882a593Smuzhiyun 	clk_core_get_boundaries(c, &min_rate, &max_rate);
3077*4882a593Smuzhiyun 
3078*4882a593Smuzhiyun 	/* This should be JSON format, i.e. elements separated with a comma */
3079*4882a593Smuzhiyun 	seq_printf(s, "\"%s\": { ", c->name);
3080*4882a593Smuzhiyun 	seq_printf(s, "\"enable_count\": %d,", c->enable_count);
3081*4882a593Smuzhiyun 	seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
3082*4882a593Smuzhiyun 	seq_printf(s, "\"protect_count\": %d,", c->protect_count);
3083*4882a593Smuzhiyun 	seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
3084*4882a593Smuzhiyun 	seq_printf(s, "\"min_rate\": %lu,", min_rate);
3085*4882a593Smuzhiyun 	seq_printf(s, "\"max_rate\": %lu,", max_rate);
3086*4882a593Smuzhiyun 	seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
3087*4882a593Smuzhiyun 	phase = clk_core_get_phase(c);
3088*4882a593Smuzhiyun 	if (phase >= 0)
3089*4882a593Smuzhiyun 		seq_printf(s, "\"phase\": %d,", phase);
3090*4882a593Smuzhiyun 	seq_printf(s, "\"duty_cycle\": %u",
3091*4882a593Smuzhiyun 		   clk_core_get_scaled_duty_cycle(c, 100000));
3092*4882a593Smuzhiyun }
3093*4882a593Smuzhiyun 
clk_dump_subtree(struct seq_file * s,struct clk_core * c,int level)3094*4882a593Smuzhiyun static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
3095*4882a593Smuzhiyun {
3096*4882a593Smuzhiyun 	struct clk_core *child;
3097*4882a593Smuzhiyun 
3098*4882a593Smuzhiyun 	clk_dump_one(s, c, level);
3099*4882a593Smuzhiyun 
3100*4882a593Smuzhiyun 	hlist_for_each_entry(child, &c->children, child_node) {
3101*4882a593Smuzhiyun 		seq_putc(s, ',');
3102*4882a593Smuzhiyun 		clk_dump_subtree(s, child, level + 1);
3103*4882a593Smuzhiyun 	}
3104*4882a593Smuzhiyun 
3105*4882a593Smuzhiyun 	seq_putc(s, '}');
3106*4882a593Smuzhiyun }
3107*4882a593Smuzhiyun 
clk_dump_show(struct seq_file * s,void * data)3108*4882a593Smuzhiyun static int clk_dump_show(struct seq_file *s, void *data)
3109*4882a593Smuzhiyun {
3110*4882a593Smuzhiyun 	struct clk_core *c;
3111*4882a593Smuzhiyun 	bool first_node = true;
3112*4882a593Smuzhiyun 	struct hlist_head **lists = (struct hlist_head **)s->private;
3113*4882a593Smuzhiyun 
3114*4882a593Smuzhiyun 	seq_putc(s, '{');
3115*4882a593Smuzhiyun 	clk_prepare_lock();
3116*4882a593Smuzhiyun 
3117*4882a593Smuzhiyun 	for (; *lists; lists++) {
3118*4882a593Smuzhiyun 		hlist_for_each_entry(c, *lists, child_node) {
3119*4882a593Smuzhiyun 			if (!first_node)
3120*4882a593Smuzhiyun 				seq_putc(s, ',');
3121*4882a593Smuzhiyun 			first_node = false;
3122*4882a593Smuzhiyun 			clk_dump_subtree(s, c, 0);
3123*4882a593Smuzhiyun 		}
3124*4882a593Smuzhiyun 	}
3125*4882a593Smuzhiyun 
3126*4882a593Smuzhiyun 	clk_prepare_unlock();
3127*4882a593Smuzhiyun 
3128*4882a593Smuzhiyun 	seq_puts(s, "}\n");
3129*4882a593Smuzhiyun 	return 0;
3130*4882a593Smuzhiyun }
3131*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(clk_dump);
3132*4882a593Smuzhiyun 
3133*4882a593Smuzhiyun #ifdef CONFIG_ANDROID_BINDER_IPC
3134*4882a593Smuzhiyun #define CLOCK_ALLOW_WRITE_DEBUGFS
3135*4882a593Smuzhiyun #else
3136*4882a593Smuzhiyun #undef CLOCK_ALLOW_WRITE_DEBUGFS
3137*4882a593Smuzhiyun #endif
3138*4882a593Smuzhiyun #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3139*4882a593Smuzhiyun /*
3140*4882a593Smuzhiyun  * This can be dangerous, therefore don't provide any real compile time
3141*4882a593Smuzhiyun  * configuration option for this feature.
3142*4882a593Smuzhiyun  * People who want to use this will need to modify the source code directly.
3143*4882a593Smuzhiyun  */
clk_rate_set(void * data,u64 val)3144*4882a593Smuzhiyun static int clk_rate_set(void *data, u64 val)
3145*4882a593Smuzhiyun {
3146*4882a593Smuzhiyun 	struct clk_core *core = data;
3147*4882a593Smuzhiyun 	int ret;
3148*4882a593Smuzhiyun 
3149*4882a593Smuzhiyun 	clk_prepare_lock();
3150*4882a593Smuzhiyun 	ret = clk_core_set_rate_nolock(core, val);
3151*4882a593Smuzhiyun 	clk_prepare_unlock();
3152*4882a593Smuzhiyun 
3153*4882a593Smuzhiyun 	return ret;
3154*4882a593Smuzhiyun }
3155*4882a593Smuzhiyun 
3156*4882a593Smuzhiyun #define clk_rate_mode	0644
3157*4882a593Smuzhiyun 
clk_prepare_enable_set(void * data,u64 val)3158*4882a593Smuzhiyun static int clk_prepare_enable_set(void *data, u64 val)
3159*4882a593Smuzhiyun {
3160*4882a593Smuzhiyun 	struct clk_core *core = data;
3161*4882a593Smuzhiyun 	int ret = 0;
3162*4882a593Smuzhiyun 
3163*4882a593Smuzhiyun 	if (val)
3164*4882a593Smuzhiyun 		ret = clk_prepare_enable(core->hw->clk);
3165*4882a593Smuzhiyun 	else
3166*4882a593Smuzhiyun 		clk_disable_unprepare(core->hw->clk);
3167*4882a593Smuzhiyun 
3168*4882a593Smuzhiyun 	return ret;
3169*4882a593Smuzhiyun }
3170*4882a593Smuzhiyun 
clk_prepare_enable_get(void * data,u64 * val)3171*4882a593Smuzhiyun static int clk_prepare_enable_get(void *data, u64 *val)
3172*4882a593Smuzhiyun {
3173*4882a593Smuzhiyun 	struct clk_core *core = data;
3174*4882a593Smuzhiyun 
3175*4882a593Smuzhiyun 	*val = core->enable_count && core->prepare_count;
3176*4882a593Smuzhiyun 	return 0;
3177*4882a593Smuzhiyun }
3178*4882a593Smuzhiyun 
3179*4882a593Smuzhiyun DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
3180*4882a593Smuzhiyun 			 clk_prepare_enable_set, "%llu\n");
3181*4882a593Smuzhiyun 
3182*4882a593Smuzhiyun #else
3183*4882a593Smuzhiyun #define clk_rate_set	NULL
3184*4882a593Smuzhiyun #define clk_rate_mode	0444
3185*4882a593Smuzhiyun #endif
3186*4882a593Smuzhiyun 
clk_rate_get(void * data,u64 * val)3187*4882a593Smuzhiyun static int clk_rate_get(void *data, u64 *val)
3188*4882a593Smuzhiyun {
3189*4882a593Smuzhiyun 	struct clk_core *core = data;
3190*4882a593Smuzhiyun 
3191*4882a593Smuzhiyun 	*val = core->rate;
3192*4882a593Smuzhiyun 	return 0;
3193*4882a593Smuzhiyun }
3194*4882a593Smuzhiyun 
3195*4882a593Smuzhiyun DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
3196*4882a593Smuzhiyun 
3197*4882a593Smuzhiyun static const struct {
3198*4882a593Smuzhiyun 	unsigned long flag;
3199*4882a593Smuzhiyun 	const char *name;
3200*4882a593Smuzhiyun } clk_flags[] = {
3201*4882a593Smuzhiyun #define ENTRY(f) { f, #f }
3202*4882a593Smuzhiyun 	ENTRY(CLK_SET_RATE_GATE),
3203*4882a593Smuzhiyun 	ENTRY(CLK_SET_PARENT_GATE),
3204*4882a593Smuzhiyun 	ENTRY(CLK_SET_RATE_PARENT),
3205*4882a593Smuzhiyun 	ENTRY(CLK_IGNORE_UNUSED),
3206*4882a593Smuzhiyun 	ENTRY(CLK_GET_RATE_NOCACHE),
3207*4882a593Smuzhiyun 	ENTRY(CLK_SET_RATE_NO_REPARENT),
3208*4882a593Smuzhiyun 	ENTRY(CLK_GET_ACCURACY_NOCACHE),
3209*4882a593Smuzhiyun 	ENTRY(CLK_RECALC_NEW_RATES),
3210*4882a593Smuzhiyun 	ENTRY(CLK_SET_RATE_UNGATE),
3211*4882a593Smuzhiyun 	ENTRY(CLK_IS_CRITICAL),
3212*4882a593Smuzhiyun 	ENTRY(CLK_OPS_PARENT_ENABLE),
3213*4882a593Smuzhiyun 	ENTRY(CLK_DUTY_CYCLE_PARENT),
3214*4882a593Smuzhiyun #undef ENTRY
3215*4882a593Smuzhiyun };
3216*4882a593Smuzhiyun 
clk_flags_show(struct seq_file * s,void * data)3217*4882a593Smuzhiyun static int clk_flags_show(struct seq_file *s, void *data)
3218*4882a593Smuzhiyun {
3219*4882a593Smuzhiyun 	struct clk_core *core = s->private;
3220*4882a593Smuzhiyun 	unsigned long flags = core->flags;
3221*4882a593Smuzhiyun 	unsigned int i;
3222*4882a593Smuzhiyun 
3223*4882a593Smuzhiyun 	for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
3224*4882a593Smuzhiyun 		if (flags & clk_flags[i].flag) {
3225*4882a593Smuzhiyun 			seq_printf(s, "%s\n", clk_flags[i].name);
3226*4882a593Smuzhiyun 			flags &= ~clk_flags[i].flag;
3227*4882a593Smuzhiyun 		}
3228*4882a593Smuzhiyun 	}
3229*4882a593Smuzhiyun 	if (flags) {
3230*4882a593Smuzhiyun 		/* Unknown flags */
3231*4882a593Smuzhiyun 		seq_printf(s, "0x%lx\n", flags);
3232*4882a593Smuzhiyun 	}
3233*4882a593Smuzhiyun 
3234*4882a593Smuzhiyun 	return 0;
3235*4882a593Smuzhiyun }
3236*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(clk_flags);
3237*4882a593Smuzhiyun 
possible_parent_show(struct seq_file * s,struct clk_core * core,unsigned int i,char terminator)3238*4882a593Smuzhiyun static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3239*4882a593Smuzhiyun 				 unsigned int i, char terminator)
3240*4882a593Smuzhiyun {
3241*4882a593Smuzhiyun 	struct clk_core *parent;
3242*4882a593Smuzhiyun 
3243*4882a593Smuzhiyun 	/*
3244*4882a593Smuzhiyun 	 * Go through the following options to fetch a parent's name.
3245*4882a593Smuzhiyun 	 *
3246*4882a593Smuzhiyun 	 * 1. Fetch the registered parent clock and use its name
3247*4882a593Smuzhiyun 	 * 2. Use the global (fallback) name if specified
3248*4882a593Smuzhiyun 	 * 3. Use the local fw_name if provided
3249*4882a593Smuzhiyun 	 * 4. Fetch parent clock's clock-output-name if DT index was set
3250*4882a593Smuzhiyun 	 *
3251*4882a593Smuzhiyun 	 * This may still fail in some cases, such as when the parent is
3252*4882a593Smuzhiyun 	 * specified directly via a struct clk_hw pointer, but it isn't
3253*4882a593Smuzhiyun 	 * registered (yet).
3254*4882a593Smuzhiyun 	 */
3255*4882a593Smuzhiyun 	parent = clk_core_get_parent_by_index(core, i);
3256*4882a593Smuzhiyun 	if (parent)
3257*4882a593Smuzhiyun 		seq_puts(s, parent->name);
3258*4882a593Smuzhiyun 	else if (core->parents[i].name)
3259*4882a593Smuzhiyun 		seq_puts(s, core->parents[i].name);
3260*4882a593Smuzhiyun 	else if (core->parents[i].fw_name)
3261*4882a593Smuzhiyun 		seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3262*4882a593Smuzhiyun 	else if (core->parents[i].index >= 0)
3263*4882a593Smuzhiyun 		seq_puts(s,
3264*4882a593Smuzhiyun 			 of_clk_get_parent_name(core->of_node,
3265*4882a593Smuzhiyun 						core->parents[i].index));
3266*4882a593Smuzhiyun 	else
3267*4882a593Smuzhiyun 		seq_puts(s, "(missing)");
3268*4882a593Smuzhiyun 
3269*4882a593Smuzhiyun 	seq_putc(s, terminator);
3270*4882a593Smuzhiyun }
3271*4882a593Smuzhiyun 
possible_parents_show(struct seq_file * s,void * data)3272*4882a593Smuzhiyun static int possible_parents_show(struct seq_file *s, void *data)
3273*4882a593Smuzhiyun {
3274*4882a593Smuzhiyun 	struct clk_core *core = s->private;
3275*4882a593Smuzhiyun 	int i;
3276*4882a593Smuzhiyun 
3277*4882a593Smuzhiyun 	for (i = 0; i < core->num_parents - 1; i++)
3278*4882a593Smuzhiyun 		possible_parent_show(s, core, i, ' ');
3279*4882a593Smuzhiyun 
3280*4882a593Smuzhiyun 	possible_parent_show(s, core, i, '\n');
3281*4882a593Smuzhiyun 
3282*4882a593Smuzhiyun 	return 0;
3283*4882a593Smuzhiyun }
3284*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(possible_parents);
3285*4882a593Smuzhiyun 
current_parent_show(struct seq_file * s,void * data)3286*4882a593Smuzhiyun static int current_parent_show(struct seq_file *s, void *data)
3287*4882a593Smuzhiyun {
3288*4882a593Smuzhiyun 	struct clk_core *core = s->private;
3289*4882a593Smuzhiyun 
3290*4882a593Smuzhiyun 	if (core->parent)
3291*4882a593Smuzhiyun 		seq_printf(s, "%s\n", core->parent->name);
3292*4882a593Smuzhiyun 
3293*4882a593Smuzhiyun 	return 0;
3294*4882a593Smuzhiyun }
3295*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(current_parent);
3296*4882a593Smuzhiyun 
clk_duty_cycle_show(struct seq_file * s,void * data)3297*4882a593Smuzhiyun static int clk_duty_cycle_show(struct seq_file *s, void *data)
3298*4882a593Smuzhiyun {
3299*4882a593Smuzhiyun 	struct clk_core *core = s->private;
3300*4882a593Smuzhiyun 	struct clk_duty *duty = &core->duty;
3301*4882a593Smuzhiyun 
3302*4882a593Smuzhiyun 	seq_printf(s, "%u/%u\n", duty->num, duty->den);
3303*4882a593Smuzhiyun 
3304*4882a593Smuzhiyun 	return 0;
3305*4882a593Smuzhiyun }
3306*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
3307*4882a593Smuzhiyun 
clk_min_rate_show(struct seq_file * s,void * data)3308*4882a593Smuzhiyun static int clk_min_rate_show(struct seq_file *s, void *data)
3309*4882a593Smuzhiyun {
3310*4882a593Smuzhiyun 	struct clk_core *core = s->private;
3311*4882a593Smuzhiyun 	unsigned long min_rate, max_rate;
3312*4882a593Smuzhiyun 
3313*4882a593Smuzhiyun 	clk_prepare_lock();
3314*4882a593Smuzhiyun 	clk_core_get_boundaries(core, &min_rate, &max_rate);
3315*4882a593Smuzhiyun 	clk_prepare_unlock();
3316*4882a593Smuzhiyun 	seq_printf(s, "%lu\n", min_rate);
3317*4882a593Smuzhiyun 
3318*4882a593Smuzhiyun 	return 0;
3319*4882a593Smuzhiyun }
3320*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
3321*4882a593Smuzhiyun 
clk_max_rate_show(struct seq_file * s,void * data)3322*4882a593Smuzhiyun static int clk_max_rate_show(struct seq_file *s, void *data)
3323*4882a593Smuzhiyun {
3324*4882a593Smuzhiyun 	struct clk_core *core = s->private;
3325*4882a593Smuzhiyun 	unsigned long min_rate, max_rate;
3326*4882a593Smuzhiyun 
3327*4882a593Smuzhiyun 	clk_prepare_lock();
3328*4882a593Smuzhiyun 	clk_core_get_boundaries(core, &min_rate, &max_rate);
3329*4882a593Smuzhiyun 	clk_prepare_unlock();
3330*4882a593Smuzhiyun 	seq_printf(s, "%lu\n", max_rate);
3331*4882a593Smuzhiyun 
3332*4882a593Smuzhiyun 	return 0;
3333*4882a593Smuzhiyun }
3334*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
3335*4882a593Smuzhiyun 
clk_debug_create_one(struct clk_core * core,struct dentry * pdentry)3336*4882a593Smuzhiyun static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3337*4882a593Smuzhiyun {
3338*4882a593Smuzhiyun 	struct dentry *root;
3339*4882a593Smuzhiyun 
3340*4882a593Smuzhiyun 	if (!core || !pdentry)
3341*4882a593Smuzhiyun 		return;
3342*4882a593Smuzhiyun 
3343*4882a593Smuzhiyun 	root = debugfs_create_dir(core->name, pdentry);
3344*4882a593Smuzhiyun 	core->dentry = root;
3345*4882a593Smuzhiyun 
3346*4882a593Smuzhiyun 	debugfs_create_file("clk_rate", clk_rate_mode, root, core,
3347*4882a593Smuzhiyun 			    &clk_rate_fops);
3348*4882a593Smuzhiyun 	debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3349*4882a593Smuzhiyun 	debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3350*4882a593Smuzhiyun 	debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3351*4882a593Smuzhiyun 	debugfs_create_u32("clk_phase", 0444, root, &core->phase);
3352*4882a593Smuzhiyun 	debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3353*4882a593Smuzhiyun 	debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3354*4882a593Smuzhiyun 	debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3355*4882a593Smuzhiyun 	debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3356*4882a593Smuzhiyun 	debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3357*4882a593Smuzhiyun 	debugfs_create_file("clk_duty_cycle", 0444, root, core,
3358*4882a593Smuzhiyun 			    &clk_duty_cycle_fops);
3359*4882a593Smuzhiyun #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3360*4882a593Smuzhiyun 	debugfs_create_file("clk_prepare_enable", 0644, root, core,
3361*4882a593Smuzhiyun 			    &clk_prepare_enable_fops);
3362*4882a593Smuzhiyun #endif
3363*4882a593Smuzhiyun 
3364*4882a593Smuzhiyun 	if (core->num_parents > 0)
3365*4882a593Smuzhiyun 		debugfs_create_file("clk_parent", 0444, root, core,
3366*4882a593Smuzhiyun 				    &current_parent_fops);
3367*4882a593Smuzhiyun 
3368*4882a593Smuzhiyun 	if (core->num_parents > 1)
3369*4882a593Smuzhiyun 		debugfs_create_file("clk_possible_parents", 0444, root, core,
3370*4882a593Smuzhiyun 				    &possible_parents_fops);
3371*4882a593Smuzhiyun 
3372*4882a593Smuzhiyun 	if (core->ops->debug_init)
3373*4882a593Smuzhiyun 		core->ops->debug_init(core->hw, core->dentry);
3374*4882a593Smuzhiyun }
3375*4882a593Smuzhiyun 
3376*4882a593Smuzhiyun /**
3377*4882a593Smuzhiyun  * clk_debug_register - add a clk node to the debugfs clk directory
3378*4882a593Smuzhiyun  * @core: the clk being added to the debugfs clk directory
3379*4882a593Smuzhiyun  *
3380*4882a593Smuzhiyun  * Dynamically adds a clk to the debugfs clk directory if debugfs has been
3381*4882a593Smuzhiyun  * initialized.  Otherwise it bails out early since the debugfs clk directory
3382*4882a593Smuzhiyun  * will be created lazily by clk_debug_init as part of a late_initcall.
3383*4882a593Smuzhiyun  */
clk_debug_register(struct clk_core * core)3384*4882a593Smuzhiyun static void clk_debug_register(struct clk_core *core)
3385*4882a593Smuzhiyun {
3386*4882a593Smuzhiyun 	mutex_lock(&clk_debug_lock);
3387*4882a593Smuzhiyun 	hlist_add_head(&core->debug_node, &clk_debug_list);
3388*4882a593Smuzhiyun 	if (inited)
3389*4882a593Smuzhiyun 		clk_debug_create_one(core, rootdir);
3390*4882a593Smuzhiyun 	mutex_unlock(&clk_debug_lock);
3391*4882a593Smuzhiyun }
3392*4882a593Smuzhiyun 
3393*4882a593Smuzhiyun  /**
3394*4882a593Smuzhiyun  * clk_debug_unregister - remove a clk node from the debugfs clk directory
3395*4882a593Smuzhiyun  * @core: the clk being removed from the debugfs clk directory
3396*4882a593Smuzhiyun  *
3397*4882a593Smuzhiyun  * Dynamically removes a clk and all its child nodes from the
3398*4882a593Smuzhiyun  * debugfs clk directory if clk->dentry points to debugfs created by
3399*4882a593Smuzhiyun  * clk_debug_register in __clk_core_init.
3400*4882a593Smuzhiyun  */
clk_debug_unregister(struct clk_core * core)3401*4882a593Smuzhiyun static void clk_debug_unregister(struct clk_core *core)
3402*4882a593Smuzhiyun {
3403*4882a593Smuzhiyun 	mutex_lock(&clk_debug_lock);
3404*4882a593Smuzhiyun 	hlist_del_init(&core->debug_node);
3405*4882a593Smuzhiyun 	debugfs_remove_recursive(core->dentry);
3406*4882a593Smuzhiyun 	core->dentry = NULL;
3407*4882a593Smuzhiyun 	mutex_unlock(&clk_debug_lock);
3408*4882a593Smuzhiyun }
3409*4882a593Smuzhiyun 
3410*4882a593Smuzhiyun /**
3411*4882a593Smuzhiyun  * clk_debug_init - lazily populate the debugfs clk directory
3412*4882a593Smuzhiyun  *
3413*4882a593Smuzhiyun  * clks are often initialized very early during boot before memory can be
3414*4882a593Smuzhiyun  * dynamically allocated and well before debugfs is setup. This function
3415*4882a593Smuzhiyun  * populates the debugfs clk directory once at boot-time when we know that
3416*4882a593Smuzhiyun  * debugfs is setup. It should only be called once at boot-time, all other clks
3417*4882a593Smuzhiyun  * added dynamically will be done so with clk_debug_register.
3418*4882a593Smuzhiyun  */
clk_debug_init(void)3419*4882a593Smuzhiyun static int __init clk_debug_init(void)
3420*4882a593Smuzhiyun {
3421*4882a593Smuzhiyun 	struct clk_core *core;
3422*4882a593Smuzhiyun 
3423*4882a593Smuzhiyun #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3424*4882a593Smuzhiyun 	pr_warn("\n");
3425*4882a593Smuzhiyun 	pr_warn("********************************************************************\n");
3426*4882a593Smuzhiyun 	pr_warn("**     NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE           **\n");
3427*4882a593Smuzhiyun 	pr_warn("**                                                                **\n");
3428*4882a593Smuzhiyun 	pr_warn("**  WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
3429*4882a593Smuzhiyun 	pr_warn("**                                                                **\n");
3430*4882a593Smuzhiyun 	pr_warn("** This means that this kernel is built to expose clk operations  **\n");
3431*4882a593Smuzhiyun 	pr_warn("** such as parent or rate setting, enabling, disabling, etc.      **\n");
3432*4882a593Smuzhiyun 	pr_warn("** to userspace, which may compromise security on your system.    **\n");
3433*4882a593Smuzhiyun 	pr_warn("**                                                                **\n");
3434*4882a593Smuzhiyun 	pr_warn("** If you see this message and you are not debugging the          **\n");
3435*4882a593Smuzhiyun 	pr_warn("** kernel, report this immediately to your vendor!                **\n");
3436*4882a593Smuzhiyun 	pr_warn("**                                                                **\n");
3437*4882a593Smuzhiyun 	pr_warn("**     NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE           **\n");
3438*4882a593Smuzhiyun 	pr_warn("********************************************************************\n");
3439*4882a593Smuzhiyun #endif
3440*4882a593Smuzhiyun 
3441*4882a593Smuzhiyun 	rootdir = debugfs_create_dir("clk", NULL);
3442*4882a593Smuzhiyun 
3443*4882a593Smuzhiyun 	debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
3444*4882a593Smuzhiyun 			    &clk_summary_fops);
3445*4882a593Smuzhiyun 	debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
3446*4882a593Smuzhiyun 			    &clk_dump_fops);
3447*4882a593Smuzhiyun 	debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
3448*4882a593Smuzhiyun 			    &clk_summary_fops);
3449*4882a593Smuzhiyun 	debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
3450*4882a593Smuzhiyun 			    &clk_dump_fops);
3451*4882a593Smuzhiyun 
3452*4882a593Smuzhiyun 	mutex_lock(&clk_debug_lock);
3453*4882a593Smuzhiyun 	hlist_for_each_entry(core, &clk_debug_list, debug_node)
3454*4882a593Smuzhiyun 		clk_debug_create_one(core, rootdir);
3455*4882a593Smuzhiyun 
3456*4882a593Smuzhiyun 	inited = 1;
3457*4882a593Smuzhiyun 	mutex_unlock(&clk_debug_lock);
3458*4882a593Smuzhiyun 
3459*4882a593Smuzhiyun 	return 0;
3460*4882a593Smuzhiyun }
3461*4882a593Smuzhiyun late_initcall(clk_debug_init);
3462*4882a593Smuzhiyun #else
clk_debug_register(struct clk_core * core)3463*4882a593Smuzhiyun static inline void clk_debug_register(struct clk_core *core) { }
clk_debug_unregister(struct clk_core * core)3464*4882a593Smuzhiyun static inline void clk_debug_unregister(struct clk_core *core)
3465*4882a593Smuzhiyun {
3466*4882a593Smuzhiyun }
3467*4882a593Smuzhiyun #endif
3468*4882a593Smuzhiyun 
clk_core_reparent_orphans_nolock(void)3469*4882a593Smuzhiyun static void clk_core_reparent_orphans_nolock(void)
3470*4882a593Smuzhiyun {
3471*4882a593Smuzhiyun 	struct clk_core *orphan;
3472*4882a593Smuzhiyun 	struct hlist_node *tmp2;
3473*4882a593Smuzhiyun 
3474*4882a593Smuzhiyun 	/*
3475*4882a593Smuzhiyun 	 * walk the list of orphan clocks and reparent any that newly finds a
3476*4882a593Smuzhiyun 	 * parent.
3477*4882a593Smuzhiyun 	 */
3478*4882a593Smuzhiyun 	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3479*4882a593Smuzhiyun 		struct clk_core *parent = __clk_init_parent(orphan);
3480*4882a593Smuzhiyun 
3481*4882a593Smuzhiyun 		/*
3482*4882a593Smuzhiyun 		 * We need to use __clk_set_parent_before() and _after() to
3483*4882a593Smuzhiyun 		 * to properly migrate any prepare/enable count of the orphan
3484*4882a593Smuzhiyun 		 * clock. This is important for CLK_IS_CRITICAL clocks, which
3485*4882a593Smuzhiyun 		 * are enabled during init but might not have a parent yet.
3486*4882a593Smuzhiyun 		 */
3487*4882a593Smuzhiyun 		if (parent) {
3488*4882a593Smuzhiyun 			/* update the clk tree topology */
3489*4882a593Smuzhiyun 			__clk_set_parent_before(orphan, parent);
3490*4882a593Smuzhiyun 			__clk_set_parent_after(orphan, parent, NULL);
3491*4882a593Smuzhiyun 			__clk_recalc_accuracies(orphan);
3492*4882a593Smuzhiyun 			__clk_recalc_rates(orphan, 0);
3493*4882a593Smuzhiyun 			__clk_core_update_orphan_hold_state(orphan);
3494*4882a593Smuzhiyun 
3495*4882a593Smuzhiyun 			/*
3496*4882a593Smuzhiyun 			 * __clk_init_parent() will set the initial req_rate to
3497*4882a593Smuzhiyun 			 * 0 if the clock doesn't have clk_ops::recalc_rate and
3498*4882a593Smuzhiyun 			 * is an orphan when it's registered.
3499*4882a593Smuzhiyun 			 *
3500*4882a593Smuzhiyun 			 * 'req_rate' is used by clk_set_rate_range() and
3501*4882a593Smuzhiyun 			 * clk_put() to trigger a clk_set_rate() call whenever
3502*4882a593Smuzhiyun 			 * the boundaries are modified. Let's make sure
3503*4882a593Smuzhiyun 			 * 'req_rate' is set to something non-zero so that
3504*4882a593Smuzhiyun 			 * clk_set_rate_range() doesn't drop the frequency.
3505*4882a593Smuzhiyun 			 */
3506*4882a593Smuzhiyun 			orphan->req_rate = orphan->rate;
3507*4882a593Smuzhiyun 		}
3508*4882a593Smuzhiyun 	}
3509*4882a593Smuzhiyun }
3510*4882a593Smuzhiyun 
3511*4882a593Smuzhiyun /**
3512*4882a593Smuzhiyun  * __clk_core_init - initialize the data structures in a struct clk_core
3513*4882a593Smuzhiyun  * @core:	clk_core being initialized
3514*4882a593Smuzhiyun  *
3515*4882a593Smuzhiyun  * Initializes the lists in struct clk_core, queries the hardware for the
3516*4882a593Smuzhiyun  * parent and rate and sets them both.
3517*4882a593Smuzhiyun  */
__clk_core_init(struct clk_core * core)3518*4882a593Smuzhiyun static int __clk_core_init(struct clk_core *core)
3519*4882a593Smuzhiyun {
3520*4882a593Smuzhiyun 	int ret;
3521*4882a593Smuzhiyun 	struct clk_core *parent;
3522*4882a593Smuzhiyun 	unsigned long rate;
3523*4882a593Smuzhiyun 	int phase;
3524*4882a593Smuzhiyun 
3525*4882a593Smuzhiyun 	if (!core)
3526*4882a593Smuzhiyun 		return -EINVAL;
3527*4882a593Smuzhiyun 
3528*4882a593Smuzhiyun 	clk_prepare_lock();
3529*4882a593Smuzhiyun 
3530*4882a593Smuzhiyun 	/*
3531*4882a593Smuzhiyun 	 * Set hw->core after grabbing the prepare_lock to synchronize with
3532*4882a593Smuzhiyun 	 * callers of clk_core_fill_parent_index() where we treat hw->core
3533*4882a593Smuzhiyun 	 * being NULL as the clk not being registered yet. This is crucial so
3534*4882a593Smuzhiyun 	 * that clks aren't parented until their parent is fully registered.
3535*4882a593Smuzhiyun 	 */
3536*4882a593Smuzhiyun 	core->hw->core = core;
3537*4882a593Smuzhiyun 
3538*4882a593Smuzhiyun 	ret = clk_pm_runtime_get(core);
3539*4882a593Smuzhiyun 	if (ret)
3540*4882a593Smuzhiyun 		goto unlock;
3541*4882a593Smuzhiyun 
3542*4882a593Smuzhiyun 	/* check to see if a clock with this name is already registered */
3543*4882a593Smuzhiyun 	if (clk_core_lookup(core->name)) {
3544*4882a593Smuzhiyun 		pr_debug("%s: clk %s already initialized\n",
3545*4882a593Smuzhiyun 				__func__, core->name);
3546*4882a593Smuzhiyun 		ret = -EEXIST;
3547*4882a593Smuzhiyun 		goto out;
3548*4882a593Smuzhiyun 	}
3549*4882a593Smuzhiyun 
3550*4882a593Smuzhiyun 	/* check that clk_ops are sane.  See Documentation/driver-api/clk.rst */
3551*4882a593Smuzhiyun 	if (core->ops->set_rate &&
3552*4882a593Smuzhiyun 	    !((core->ops->round_rate || core->ops->determine_rate) &&
3553*4882a593Smuzhiyun 	      core->ops->recalc_rate)) {
3554*4882a593Smuzhiyun 		pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3555*4882a593Smuzhiyun 		       __func__, core->name);
3556*4882a593Smuzhiyun 		ret = -EINVAL;
3557*4882a593Smuzhiyun 		goto out;
3558*4882a593Smuzhiyun 	}
3559*4882a593Smuzhiyun 
3560*4882a593Smuzhiyun 	if (core->ops->set_parent && !core->ops->get_parent) {
3561*4882a593Smuzhiyun 		pr_err("%s: %s must implement .get_parent & .set_parent\n",
3562*4882a593Smuzhiyun 		       __func__, core->name);
3563*4882a593Smuzhiyun 		ret = -EINVAL;
3564*4882a593Smuzhiyun 		goto out;
3565*4882a593Smuzhiyun 	}
3566*4882a593Smuzhiyun 
3567*4882a593Smuzhiyun 	if (core->num_parents > 1 && !core->ops->get_parent) {
3568*4882a593Smuzhiyun 		pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3569*4882a593Smuzhiyun 		       __func__, core->name);
3570*4882a593Smuzhiyun 		ret = -EINVAL;
3571*4882a593Smuzhiyun 		goto out;
3572*4882a593Smuzhiyun 	}
3573*4882a593Smuzhiyun 
3574*4882a593Smuzhiyun 	if (core->ops->set_rate_and_parent &&
3575*4882a593Smuzhiyun 			!(core->ops->set_parent && core->ops->set_rate)) {
3576*4882a593Smuzhiyun 		pr_err("%s: %s must implement .set_parent & .set_rate\n",
3577*4882a593Smuzhiyun 				__func__, core->name);
3578*4882a593Smuzhiyun 		ret = -EINVAL;
3579*4882a593Smuzhiyun 		goto out;
3580*4882a593Smuzhiyun 	}
3581*4882a593Smuzhiyun 
3582*4882a593Smuzhiyun 	/*
3583*4882a593Smuzhiyun 	 * optional platform-specific magic
3584*4882a593Smuzhiyun 	 *
3585*4882a593Smuzhiyun 	 * The .init callback is not used by any of the basic clock types, but
3586*4882a593Smuzhiyun 	 * exists for weird hardware that must perform initialization magic for
3587*4882a593Smuzhiyun 	 * CCF to get an accurate view of clock for any other callbacks. It may
3588*4882a593Smuzhiyun 	 * also be used needs to perform dynamic allocations. Such allocation
3589*4882a593Smuzhiyun 	 * must be freed in the terminate() callback.
3590*4882a593Smuzhiyun 	 * This callback shall not be used to initialize the parameters state,
3591*4882a593Smuzhiyun 	 * such as rate, parent, etc ...
3592*4882a593Smuzhiyun 	 *
3593*4882a593Smuzhiyun 	 * If it exist, this callback should called before any other callback of
3594*4882a593Smuzhiyun 	 * the clock
3595*4882a593Smuzhiyun 	 */
3596*4882a593Smuzhiyun 	if (core->ops->init) {
3597*4882a593Smuzhiyun 		ret = core->ops->init(core->hw);
3598*4882a593Smuzhiyun 		if (ret)
3599*4882a593Smuzhiyun 			goto out;
3600*4882a593Smuzhiyun 	}
3601*4882a593Smuzhiyun 
3602*4882a593Smuzhiyun 	parent = core->parent = __clk_init_parent(core);
3603*4882a593Smuzhiyun 
3604*4882a593Smuzhiyun 	/*
3605*4882a593Smuzhiyun 	 * Populate core->parent if parent has already been clk_core_init'd. If
3606*4882a593Smuzhiyun 	 * parent has not yet been clk_core_init'd then place clk in the orphan
3607*4882a593Smuzhiyun 	 * list.  If clk doesn't have any parents then place it in the root
3608*4882a593Smuzhiyun 	 * clk list.
3609*4882a593Smuzhiyun 	 *
3610*4882a593Smuzhiyun 	 * Every time a new clk is clk_init'd then we walk the list of orphan
3611*4882a593Smuzhiyun 	 * clocks and re-parent any that are children of the clock currently
3612*4882a593Smuzhiyun 	 * being clk_init'd.
3613*4882a593Smuzhiyun 	 */
3614*4882a593Smuzhiyun 	if (parent) {
3615*4882a593Smuzhiyun 		hlist_add_head(&core->child_node, &parent->children);
3616*4882a593Smuzhiyun 		core->orphan = parent->orphan;
3617*4882a593Smuzhiyun 	} else if (!core->num_parents) {
3618*4882a593Smuzhiyun 		hlist_add_head(&core->child_node, &clk_root_list);
3619*4882a593Smuzhiyun 		core->orphan = false;
3620*4882a593Smuzhiyun 	} else {
3621*4882a593Smuzhiyun 		hlist_add_head(&core->child_node, &clk_orphan_list);
3622*4882a593Smuzhiyun 		core->orphan = true;
3623*4882a593Smuzhiyun 	}
3624*4882a593Smuzhiyun 
3625*4882a593Smuzhiyun 	/*
3626*4882a593Smuzhiyun 	 * Set clk's accuracy.  The preferred method is to use
3627*4882a593Smuzhiyun 	 * .recalc_accuracy. For simple clocks and lazy developers the default
3628*4882a593Smuzhiyun 	 * fallback is to use the parent's accuracy.  If a clock doesn't have a
3629*4882a593Smuzhiyun 	 * parent (or is orphaned) then accuracy is set to zero (perfect
3630*4882a593Smuzhiyun 	 * clock).
3631*4882a593Smuzhiyun 	 */
3632*4882a593Smuzhiyun 	if (core->ops->recalc_accuracy)
3633*4882a593Smuzhiyun 		core->accuracy = core->ops->recalc_accuracy(core->hw,
3634*4882a593Smuzhiyun 					clk_core_get_accuracy_no_lock(parent));
3635*4882a593Smuzhiyun 	else if (parent)
3636*4882a593Smuzhiyun 		core->accuracy = parent->accuracy;
3637*4882a593Smuzhiyun 	else
3638*4882a593Smuzhiyun 		core->accuracy = 0;
3639*4882a593Smuzhiyun 
3640*4882a593Smuzhiyun 	/*
3641*4882a593Smuzhiyun 	 * Set clk's phase by clk_core_get_phase() caching the phase.
3642*4882a593Smuzhiyun 	 * Since a phase is by definition relative to its parent, just
3643*4882a593Smuzhiyun 	 * query the current clock phase, or just assume it's in phase.
3644*4882a593Smuzhiyun 	 */
3645*4882a593Smuzhiyun 	phase = clk_core_get_phase(core);
3646*4882a593Smuzhiyun 	if (phase < 0) {
3647*4882a593Smuzhiyun 		ret = phase;
3648*4882a593Smuzhiyun 		pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
3649*4882a593Smuzhiyun 			core->name);
3650*4882a593Smuzhiyun 		goto out;
3651*4882a593Smuzhiyun 	}
3652*4882a593Smuzhiyun 
3653*4882a593Smuzhiyun 	/*
3654*4882a593Smuzhiyun 	 * Set clk's duty cycle.
3655*4882a593Smuzhiyun 	 */
3656*4882a593Smuzhiyun 	clk_core_update_duty_cycle_nolock(core);
3657*4882a593Smuzhiyun 
3658*4882a593Smuzhiyun 	/*
3659*4882a593Smuzhiyun 	 * Set clk's rate.  The preferred method is to use .recalc_rate.  For
3660*4882a593Smuzhiyun 	 * simple clocks and lazy developers the default fallback is to use the
3661*4882a593Smuzhiyun 	 * parent's rate.  If a clock doesn't have a parent (or is orphaned)
3662*4882a593Smuzhiyun 	 * then rate is set to zero.
3663*4882a593Smuzhiyun 	 */
3664*4882a593Smuzhiyun 	if (core->ops->recalc_rate)
3665*4882a593Smuzhiyun 		rate = core->ops->recalc_rate(core->hw,
3666*4882a593Smuzhiyun 				clk_core_get_rate_nolock(parent));
3667*4882a593Smuzhiyun 	else if (parent)
3668*4882a593Smuzhiyun 		rate = parent->rate;
3669*4882a593Smuzhiyun 	else
3670*4882a593Smuzhiyun 		rate = 0;
3671*4882a593Smuzhiyun 	core->rate = core->req_rate = rate;
3672*4882a593Smuzhiyun 
3673*4882a593Smuzhiyun 	core->boot_enabled = clk_core_is_enabled(core);
3674*4882a593Smuzhiyun 
3675*4882a593Smuzhiyun 	/*
3676*4882a593Smuzhiyun 	 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
3677*4882a593Smuzhiyun 	 * don't get accidentally disabled when walking the orphan tree and
3678*4882a593Smuzhiyun 	 * reparenting clocks
3679*4882a593Smuzhiyun 	 */
3680*4882a593Smuzhiyun 	if (core->flags & CLK_IS_CRITICAL) {
3681*4882a593Smuzhiyun 		unsigned long flags;
3682*4882a593Smuzhiyun 
3683*4882a593Smuzhiyun 		ret = clk_core_prepare(core);
3684*4882a593Smuzhiyun 		if (ret) {
3685*4882a593Smuzhiyun 			pr_warn("%s: critical clk '%s' failed to prepare\n",
3686*4882a593Smuzhiyun 			       __func__, core->name);
3687*4882a593Smuzhiyun 			goto out;
3688*4882a593Smuzhiyun 		}
3689*4882a593Smuzhiyun 
3690*4882a593Smuzhiyun 		flags = clk_enable_lock();
3691*4882a593Smuzhiyun 		ret = clk_core_enable(core);
3692*4882a593Smuzhiyun 		clk_enable_unlock(flags);
3693*4882a593Smuzhiyun 		if (ret) {
3694*4882a593Smuzhiyun 			pr_warn("%s: critical clk '%s' failed to enable\n",
3695*4882a593Smuzhiyun 			       __func__, core->name);
3696*4882a593Smuzhiyun 			clk_core_unprepare(core);
3697*4882a593Smuzhiyun 			goto out;
3698*4882a593Smuzhiyun 		}
3699*4882a593Smuzhiyun 	}
3700*4882a593Smuzhiyun 
3701*4882a593Smuzhiyun 	clk_core_hold_state(core);
3702*4882a593Smuzhiyun 	clk_core_reparent_orphans_nolock();
3703*4882a593Smuzhiyun 
3704*4882a593Smuzhiyun 
3705*4882a593Smuzhiyun 	kref_init(&core->ref);
3706*4882a593Smuzhiyun out:
3707*4882a593Smuzhiyun 	clk_pm_runtime_put(core);
3708*4882a593Smuzhiyun unlock:
3709*4882a593Smuzhiyun 	if (ret) {
3710*4882a593Smuzhiyun 		hlist_del_init(&core->child_node);
3711*4882a593Smuzhiyun 		core->hw->core = NULL;
3712*4882a593Smuzhiyun 	}
3713*4882a593Smuzhiyun 
3714*4882a593Smuzhiyun 	clk_prepare_unlock();
3715*4882a593Smuzhiyun 
3716*4882a593Smuzhiyun 	if (!ret)
3717*4882a593Smuzhiyun 		clk_debug_register(core);
3718*4882a593Smuzhiyun 
3719*4882a593Smuzhiyun 	return ret;
3720*4882a593Smuzhiyun }
3721*4882a593Smuzhiyun 
3722*4882a593Smuzhiyun /**
3723*4882a593Smuzhiyun  * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
3724*4882a593Smuzhiyun  * @core: clk to add consumer to
3725*4882a593Smuzhiyun  * @clk: consumer to link to a clk
3726*4882a593Smuzhiyun  */
clk_core_link_consumer(struct clk_core * core,struct clk * clk)3727*4882a593Smuzhiyun static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3728*4882a593Smuzhiyun {
3729*4882a593Smuzhiyun 	clk_prepare_lock();
3730*4882a593Smuzhiyun 	hlist_add_head(&clk->clks_node, &core->clks);
3731*4882a593Smuzhiyun 	clk_prepare_unlock();
3732*4882a593Smuzhiyun }
3733*4882a593Smuzhiyun 
3734*4882a593Smuzhiyun /**
3735*4882a593Smuzhiyun  * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
3736*4882a593Smuzhiyun  * @clk: consumer to unlink
3737*4882a593Smuzhiyun  */
clk_core_unlink_consumer(struct clk * clk)3738*4882a593Smuzhiyun static void clk_core_unlink_consumer(struct clk *clk)
3739*4882a593Smuzhiyun {
3740*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
3741*4882a593Smuzhiyun 	hlist_del(&clk->clks_node);
3742*4882a593Smuzhiyun }
3743*4882a593Smuzhiyun 
3744*4882a593Smuzhiyun /**
3745*4882a593Smuzhiyun  * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
3746*4882a593Smuzhiyun  * @core: clk to allocate a consumer for
3747*4882a593Smuzhiyun  * @dev_id: string describing device name
3748*4882a593Smuzhiyun  * @con_id: connection ID string on device
3749*4882a593Smuzhiyun  *
3750*4882a593Smuzhiyun  * Returns: clk consumer left unlinked from the consumer list
3751*4882a593Smuzhiyun  */
alloc_clk(struct clk_core * core,const char * dev_id,const char * con_id)3752*4882a593Smuzhiyun static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3753*4882a593Smuzhiyun 			     const char *con_id)
3754*4882a593Smuzhiyun {
3755*4882a593Smuzhiyun 	struct clk *clk;
3756*4882a593Smuzhiyun 
3757*4882a593Smuzhiyun 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3758*4882a593Smuzhiyun 	if (!clk)
3759*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
3760*4882a593Smuzhiyun 
3761*4882a593Smuzhiyun 	clk->core = core;
3762*4882a593Smuzhiyun 	clk->dev_id = dev_id;
3763*4882a593Smuzhiyun 	clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3764*4882a593Smuzhiyun 	clk->max_rate = ULONG_MAX;
3765*4882a593Smuzhiyun 
3766*4882a593Smuzhiyun 	return clk;
3767*4882a593Smuzhiyun }
3768*4882a593Smuzhiyun 
3769*4882a593Smuzhiyun /**
3770*4882a593Smuzhiyun  * free_clk - Free a clk consumer
3771*4882a593Smuzhiyun  * @clk: clk consumer to free
3772*4882a593Smuzhiyun  *
3773*4882a593Smuzhiyun  * Note, this assumes the clk has been unlinked from the clk_core consumer
3774*4882a593Smuzhiyun  * list.
3775*4882a593Smuzhiyun  */
free_clk(struct clk * clk)3776*4882a593Smuzhiyun static void free_clk(struct clk *clk)
3777*4882a593Smuzhiyun {
3778*4882a593Smuzhiyun 	kfree_const(clk->con_id);
3779*4882a593Smuzhiyun 	kfree(clk);
3780*4882a593Smuzhiyun }
3781*4882a593Smuzhiyun 
3782*4882a593Smuzhiyun /**
3783*4882a593Smuzhiyun  * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
3784*4882a593Smuzhiyun  * a clk_hw
3785*4882a593Smuzhiyun  * @dev: clk consumer device
3786*4882a593Smuzhiyun  * @hw: clk_hw associated with the clk being consumed
3787*4882a593Smuzhiyun  * @dev_id: string describing device name
3788*4882a593Smuzhiyun  * @con_id: connection ID string on device
3789*4882a593Smuzhiyun  *
3790*4882a593Smuzhiyun  * This is the main function used to create a clk pointer for use by clk
3791*4882a593Smuzhiyun  * consumers. It connects a consumer to the clk_core and clk_hw structures
3792*4882a593Smuzhiyun  * used by the framework and clk provider respectively.
3793*4882a593Smuzhiyun  */
clk_hw_create_clk(struct device * dev,struct clk_hw * hw,const char * dev_id,const char * con_id)3794*4882a593Smuzhiyun struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
3795*4882a593Smuzhiyun 			      const char *dev_id, const char *con_id)
3796*4882a593Smuzhiyun {
3797*4882a593Smuzhiyun 	struct clk *clk;
3798*4882a593Smuzhiyun 	struct clk_core *core;
3799*4882a593Smuzhiyun 
3800*4882a593Smuzhiyun 	/* This is to allow this function to be chained to others */
3801*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(hw))
3802*4882a593Smuzhiyun 		return ERR_CAST(hw);
3803*4882a593Smuzhiyun 
3804*4882a593Smuzhiyun 	core = hw->core;
3805*4882a593Smuzhiyun 	clk = alloc_clk(core, dev_id, con_id);
3806*4882a593Smuzhiyun 	if (IS_ERR(clk))
3807*4882a593Smuzhiyun 		return clk;
3808*4882a593Smuzhiyun 	clk->dev = dev;
3809*4882a593Smuzhiyun 
3810*4882a593Smuzhiyun 	if (!try_module_get(core->owner)) {
3811*4882a593Smuzhiyun 		free_clk(clk);
3812*4882a593Smuzhiyun 		return ERR_PTR(-ENOENT);
3813*4882a593Smuzhiyun 	}
3814*4882a593Smuzhiyun 
3815*4882a593Smuzhiyun 	kref_get(&core->ref);
3816*4882a593Smuzhiyun 	clk_core_link_consumer(core, clk);
3817*4882a593Smuzhiyun 
3818*4882a593Smuzhiyun 	return clk;
3819*4882a593Smuzhiyun }
3820*4882a593Smuzhiyun 
3821*4882a593Smuzhiyun /**
3822*4882a593Smuzhiyun  * clk_hw_get_clk - get clk consumer given an clk_hw
3823*4882a593Smuzhiyun  * @hw: clk_hw associated with the clk being consumed
3824*4882a593Smuzhiyun  * @con_id: connection ID string on device
3825*4882a593Smuzhiyun  *
3826*4882a593Smuzhiyun  * Returns: new clk consumer
3827*4882a593Smuzhiyun  * This is the function to be used by providers which need
3828*4882a593Smuzhiyun  * to get a consumer clk and act on the clock element
3829*4882a593Smuzhiyun  * Calls to this function must be balanced with calls clk_put()
3830*4882a593Smuzhiyun  */
clk_hw_get_clk(struct clk_hw * hw,const char * con_id)3831*4882a593Smuzhiyun struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
3832*4882a593Smuzhiyun {
3833*4882a593Smuzhiyun 	struct device *dev = hw->core->dev;
3834*4882a593Smuzhiyun 	const char *name = dev ? dev_name(dev) : NULL;
3835*4882a593Smuzhiyun 
3836*4882a593Smuzhiyun 	return clk_hw_create_clk(dev, hw, name, con_id);
3837*4882a593Smuzhiyun }
3838*4882a593Smuzhiyun EXPORT_SYMBOL(clk_hw_get_clk);
3839*4882a593Smuzhiyun 
clk_cpy_name(const char ** dst_p,const char * src,bool must_exist)3840*4882a593Smuzhiyun static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
3841*4882a593Smuzhiyun {
3842*4882a593Smuzhiyun 	const char *dst;
3843*4882a593Smuzhiyun 
3844*4882a593Smuzhiyun 	if (!src) {
3845*4882a593Smuzhiyun 		if (must_exist)
3846*4882a593Smuzhiyun 			return -EINVAL;
3847*4882a593Smuzhiyun 		return 0;
3848*4882a593Smuzhiyun 	}
3849*4882a593Smuzhiyun 
3850*4882a593Smuzhiyun 	*dst_p = dst = kstrdup_const(src, GFP_KERNEL);
3851*4882a593Smuzhiyun 	if (!dst)
3852*4882a593Smuzhiyun 		return -ENOMEM;
3853*4882a593Smuzhiyun 
3854*4882a593Smuzhiyun 	return 0;
3855*4882a593Smuzhiyun }
3856*4882a593Smuzhiyun 
clk_core_populate_parent_map(struct clk_core * core,const struct clk_init_data * init)3857*4882a593Smuzhiyun static int clk_core_populate_parent_map(struct clk_core *core,
3858*4882a593Smuzhiyun 					const struct clk_init_data *init)
3859*4882a593Smuzhiyun {
3860*4882a593Smuzhiyun 	u8 num_parents = init->num_parents;
3861*4882a593Smuzhiyun 	const char * const *parent_names = init->parent_names;
3862*4882a593Smuzhiyun 	const struct clk_hw **parent_hws = init->parent_hws;
3863*4882a593Smuzhiyun 	const struct clk_parent_data *parent_data = init->parent_data;
3864*4882a593Smuzhiyun 	int i, ret = 0;
3865*4882a593Smuzhiyun 	struct clk_parent_map *parents, *parent;
3866*4882a593Smuzhiyun 
3867*4882a593Smuzhiyun 	if (!num_parents)
3868*4882a593Smuzhiyun 		return 0;
3869*4882a593Smuzhiyun 
3870*4882a593Smuzhiyun 	/*
3871*4882a593Smuzhiyun 	 * Avoid unnecessary string look-ups of clk_core's possible parents by
3872*4882a593Smuzhiyun 	 * having a cache of names/clk_hw pointers to clk_core pointers.
3873*4882a593Smuzhiyun 	 */
3874*4882a593Smuzhiyun 	parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
3875*4882a593Smuzhiyun 	core->parents = parents;
3876*4882a593Smuzhiyun 	if (!parents)
3877*4882a593Smuzhiyun 		return -ENOMEM;
3878*4882a593Smuzhiyun 
3879*4882a593Smuzhiyun 	/* Copy everything over because it might be __initdata */
3880*4882a593Smuzhiyun 	for (i = 0, parent = parents; i < num_parents; i++, parent++) {
3881*4882a593Smuzhiyun 		parent->index = -1;
3882*4882a593Smuzhiyun 		if (parent_names) {
3883*4882a593Smuzhiyun 			/* throw a WARN if any entries are NULL */
3884*4882a593Smuzhiyun 			WARN(!parent_names[i],
3885*4882a593Smuzhiyun 				"%s: invalid NULL in %s's .parent_names\n",
3886*4882a593Smuzhiyun 				__func__, core->name);
3887*4882a593Smuzhiyun 			ret = clk_cpy_name(&parent->name, parent_names[i],
3888*4882a593Smuzhiyun 					   true);
3889*4882a593Smuzhiyun 		} else if (parent_data) {
3890*4882a593Smuzhiyun 			parent->hw = parent_data[i].hw;
3891*4882a593Smuzhiyun 			parent->index = parent_data[i].index;
3892*4882a593Smuzhiyun 			ret = clk_cpy_name(&parent->fw_name,
3893*4882a593Smuzhiyun 					   parent_data[i].fw_name, false);
3894*4882a593Smuzhiyun 			if (!ret)
3895*4882a593Smuzhiyun 				ret = clk_cpy_name(&parent->name,
3896*4882a593Smuzhiyun 						   parent_data[i].name,
3897*4882a593Smuzhiyun 						   false);
3898*4882a593Smuzhiyun 		} else if (parent_hws) {
3899*4882a593Smuzhiyun 			parent->hw = parent_hws[i];
3900*4882a593Smuzhiyun 		} else {
3901*4882a593Smuzhiyun 			ret = -EINVAL;
3902*4882a593Smuzhiyun 			WARN(1, "Must specify parents if num_parents > 0\n");
3903*4882a593Smuzhiyun 		}
3904*4882a593Smuzhiyun 
3905*4882a593Smuzhiyun 		if (ret) {
3906*4882a593Smuzhiyun 			do {
3907*4882a593Smuzhiyun 				kfree_const(parents[i].name);
3908*4882a593Smuzhiyun 				kfree_const(parents[i].fw_name);
3909*4882a593Smuzhiyun 			} while (--i >= 0);
3910*4882a593Smuzhiyun 			kfree(parents);
3911*4882a593Smuzhiyun 
3912*4882a593Smuzhiyun 			return ret;
3913*4882a593Smuzhiyun 		}
3914*4882a593Smuzhiyun 	}
3915*4882a593Smuzhiyun 
3916*4882a593Smuzhiyun 	return 0;
3917*4882a593Smuzhiyun }
3918*4882a593Smuzhiyun 
clk_core_free_parent_map(struct clk_core * core)3919*4882a593Smuzhiyun static void clk_core_free_parent_map(struct clk_core *core)
3920*4882a593Smuzhiyun {
3921*4882a593Smuzhiyun 	int i = core->num_parents;
3922*4882a593Smuzhiyun 
3923*4882a593Smuzhiyun 	if (!core->num_parents)
3924*4882a593Smuzhiyun 		return;
3925*4882a593Smuzhiyun 
3926*4882a593Smuzhiyun 	while (--i >= 0) {
3927*4882a593Smuzhiyun 		kfree_const(core->parents[i].name);
3928*4882a593Smuzhiyun 		kfree_const(core->parents[i].fw_name);
3929*4882a593Smuzhiyun 	}
3930*4882a593Smuzhiyun 
3931*4882a593Smuzhiyun 	kfree(core->parents);
3932*4882a593Smuzhiyun }
3933*4882a593Smuzhiyun 
3934*4882a593Smuzhiyun static struct clk *
__clk_register(struct device * dev,struct device_node * np,struct clk_hw * hw)3935*4882a593Smuzhiyun __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
3936*4882a593Smuzhiyun {
3937*4882a593Smuzhiyun 	int ret;
3938*4882a593Smuzhiyun 	struct clk_core *core;
3939*4882a593Smuzhiyun 	const struct clk_init_data *init = hw->init;
3940*4882a593Smuzhiyun 
3941*4882a593Smuzhiyun 	/*
3942*4882a593Smuzhiyun 	 * The init data is not supposed to be used outside of registration path.
3943*4882a593Smuzhiyun 	 * Set it to NULL so that provider drivers can't use it either and so that
3944*4882a593Smuzhiyun 	 * we catch use of hw->init early on in the core.
3945*4882a593Smuzhiyun 	 */
3946*4882a593Smuzhiyun 	hw->init = NULL;
3947*4882a593Smuzhiyun 
3948*4882a593Smuzhiyun 	core = kzalloc(sizeof(*core), GFP_KERNEL);
3949*4882a593Smuzhiyun 	if (!core) {
3950*4882a593Smuzhiyun 		ret = -ENOMEM;
3951*4882a593Smuzhiyun 		goto fail_out;
3952*4882a593Smuzhiyun 	}
3953*4882a593Smuzhiyun 
3954*4882a593Smuzhiyun 	core->name = kstrdup_const(init->name, GFP_KERNEL);
3955*4882a593Smuzhiyun 	if (!core->name) {
3956*4882a593Smuzhiyun 		ret = -ENOMEM;
3957*4882a593Smuzhiyun 		goto fail_name;
3958*4882a593Smuzhiyun 	}
3959*4882a593Smuzhiyun 
3960*4882a593Smuzhiyun 	if (WARN_ON(!init->ops)) {
3961*4882a593Smuzhiyun 		ret = -EINVAL;
3962*4882a593Smuzhiyun 		goto fail_ops;
3963*4882a593Smuzhiyun 	}
3964*4882a593Smuzhiyun 	core->ops = init->ops;
3965*4882a593Smuzhiyun 
3966*4882a593Smuzhiyun 	if (dev && pm_runtime_enabled(dev))
3967*4882a593Smuzhiyun 		core->rpm_enabled = true;
3968*4882a593Smuzhiyun 	core->dev = dev;
3969*4882a593Smuzhiyun 	core->of_node = np;
3970*4882a593Smuzhiyun 	if (dev && dev->driver)
3971*4882a593Smuzhiyun 		core->owner = dev->driver->owner;
3972*4882a593Smuzhiyun 	core->hw = hw;
3973*4882a593Smuzhiyun 	core->flags = init->flags;
3974*4882a593Smuzhiyun 	core->num_parents = init->num_parents;
3975*4882a593Smuzhiyun 	core->min_rate = 0;
3976*4882a593Smuzhiyun 	core->max_rate = ULONG_MAX;
3977*4882a593Smuzhiyun 
3978*4882a593Smuzhiyun 	ret = clk_core_populate_parent_map(core, init);
3979*4882a593Smuzhiyun 	if (ret)
3980*4882a593Smuzhiyun 		goto fail_parents;
3981*4882a593Smuzhiyun 
3982*4882a593Smuzhiyun 	INIT_HLIST_HEAD(&core->clks);
3983*4882a593Smuzhiyun 
3984*4882a593Smuzhiyun 	/*
3985*4882a593Smuzhiyun 	 * Don't call clk_hw_create_clk() here because that would pin the
3986*4882a593Smuzhiyun 	 * provider module to itself and prevent it from ever being removed.
3987*4882a593Smuzhiyun 	 */
3988*4882a593Smuzhiyun 	hw->clk = alloc_clk(core, NULL, NULL);
3989*4882a593Smuzhiyun 	if (IS_ERR(hw->clk)) {
3990*4882a593Smuzhiyun 		ret = PTR_ERR(hw->clk);
3991*4882a593Smuzhiyun 		goto fail_create_clk;
3992*4882a593Smuzhiyun 	}
3993*4882a593Smuzhiyun 
3994*4882a593Smuzhiyun 	clk_core_link_consumer(core, hw->clk);
3995*4882a593Smuzhiyun 
3996*4882a593Smuzhiyun 	ret = __clk_core_init(core);
3997*4882a593Smuzhiyun 	if (!ret)
3998*4882a593Smuzhiyun 		return hw->clk;
3999*4882a593Smuzhiyun 
4000*4882a593Smuzhiyun 	clk_prepare_lock();
4001*4882a593Smuzhiyun 	clk_core_unlink_consumer(hw->clk);
4002*4882a593Smuzhiyun 	clk_prepare_unlock();
4003*4882a593Smuzhiyun 
4004*4882a593Smuzhiyun 	free_clk(hw->clk);
4005*4882a593Smuzhiyun 	hw->clk = NULL;
4006*4882a593Smuzhiyun 
4007*4882a593Smuzhiyun fail_create_clk:
4008*4882a593Smuzhiyun 	clk_core_free_parent_map(core);
4009*4882a593Smuzhiyun fail_parents:
4010*4882a593Smuzhiyun fail_ops:
4011*4882a593Smuzhiyun 	kfree_const(core->name);
4012*4882a593Smuzhiyun fail_name:
4013*4882a593Smuzhiyun 	kfree(core);
4014*4882a593Smuzhiyun fail_out:
4015*4882a593Smuzhiyun 	return ERR_PTR(ret);
4016*4882a593Smuzhiyun }
4017*4882a593Smuzhiyun 
4018*4882a593Smuzhiyun /**
4019*4882a593Smuzhiyun  * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
4020*4882a593Smuzhiyun  * @dev: Device to get device node of
4021*4882a593Smuzhiyun  *
4022*4882a593Smuzhiyun  * Return: device node pointer of @dev, or the device node pointer of
4023*4882a593Smuzhiyun  * @dev->parent if dev doesn't have a device node, or NULL if neither
4024*4882a593Smuzhiyun  * @dev or @dev->parent have a device node.
4025*4882a593Smuzhiyun  */
dev_or_parent_of_node(struct device * dev)4026*4882a593Smuzhiyun static struct device_node *dev_or_parent_of_node(struct device *dev)
4027*4882a593Smuzhiyun {
4028*4882a593Smuzhiyun 	struct device_node *np;
4029*4882a593Smuzhiyun 
4030*4882a593Smuzhiyun 	if (!dev)
4031*4882a593Smuzhiyun 		return NULL;
4032*4882a593Smuzhiyun 
4033*4882a593Smuzhiyun 	np = dev_of_node(dev);
4034*4882a593Smuzhiyun 	if (!np)
4035*4882a593Smuzhiyun 		np = dev_of_node(dev->parent);
4036*4882a593Smuzhiyun 
4037*4882a593Smuzhiyun 	return np;
4038*4882a593Smuzhiyun }
4039*4882a593Smuzhiyun 
4040*4882a593Smuzhiyun /**
4041*4882a593Smuzhiyun  * clk_register - allocate a new clock, register it and return an opaque cookie
4042*4882a593Smuzhiyun  * @dev: device that is registering this clock
4043*4882a593Smuzhiyun  * @hw: link to hardware-specific clock data
4044*4882a593Smuzhiyun  *
4045*4882a593Smuzhiyun  * clk_register is the *deprecated* interface for populating the clock tree with
4046*4882a593Smuzhiyun  * new clock nodes. Use clk_hw_register() instead.
4047*4882a593Smuzhiyun  *
4048*4882a593Smuzhiyun  * Returns: a pointer to the newly allocated struct clk which
4049*4882a593Smuzhiyun  * cannot be dereferenced by driver code but may be used in conjunction with the
4050*4882a593Smuzhiyun  * rest of the clock API.  In the event of an error clk_register will return an
4051*4882a593Smuzhiyun  * error code; drivers must test for an error code after calling clk_register.
4052*4882a593Smuzhiyun  */
clk_register(struct device * dev,struct clk_hw * hw)4053*4882a593Smuzhiyun struct clk *clk_register(struct device *dev, struct clk_hw *hw)
4054*4882a593Smuzhiyun {
4055*4882a593Smuzhiyun 	return __clk_register(dev, dev_or_parent_of_node(dev), hw);
4056*4882a593Smuzhiyun }
4057*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_register);
4058*4882a593Smuzhiyun 
4059*4882a593Smuzhiyun /**
4060*4882a593Smuzhiyun  * clk_hw_register - register a clk_hw and return an error code
4061*4882a593Smuzhiyun  * @dev: device that is registering this clock
4062*4882a593Smuzhiyun  * @hw: link to hardware-specific clock data
4063*4882a593Smuzhiyun  *
4064*4882a593Smuzhiyun  * clk_hw_register is the primary interface for populating the clock tree with
4065*4882a593Smuzhiyun  * new clock nodes. It returns an integer equal to zero indicating success or
4066*4882a593Smuzhiyun  * less than zero indicating failure. Drivers must test for an error code after
4067*4882a593Smuzhiyun  * calling clk_hw_register().
4068*4882a593Smuzhiyun  */
clk_hw_register(struct device * dev,struct clk_hw * hw)4069*4882a593Smuzhiyun int clk_hw_register(struct device *dev, struct clk_hw *hw)
4070*4882a593Smuzhiyun {
4071*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
4072*4882a593Smuzhiyun 			       hw));
4073*4882a593Smuzhiyun }
4074*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_register);
4075*4882a593Smuzhiyun 
4076*4882a593Smuzhiyun /*
4077*4882a593Smuzhiyun  * of_clk_hw_register - register a clk_hw and return an error code
4078*4882a593Smuzhiyun  * @node: device_node of device that is registering this clock
4079*4882a593Smuzhiyun  * @hw: link to hardware-specific clock data
4080*4882a593Smuzhiyun  *
4081*4882a593Smuzhiyun  * of_clk_hw_register() is the primary interface for populating the clock tree
4082*4882a593Smuzhiyun  * with new clock nodes when a struct device is not available, but a struct
4083*4882a593Smuzhiyun  * device_node is. It returns an integer equal to zero indicating success or
4084*4882a593Smuzhiyun  * less than zero indicating failure. Drivers must test for an error code after
4085*4882a593Smuzhiyun  * calling of_clk_hw_register().
4086*4882a593Smuzhiyun  */
of_clk_hw_register(struct device_node * node,struct clk_hw * hw)4087*4882a593Smuzhiyun int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
4088*4882a593Smuzhiyun {
4089*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
4090*4882a593Smuzhiyun }
4091*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_clk_hw_register);
4092*4882a593Smuzhiyun 
4093*4882a593Smuzhiyun /* Free memory allocated for a clock. */
__clk_release(struct kref * ref)4094*4882a593Smuzhiyun static void __clk_release(struct kref *ref)
4095*4882a593Smuzhiyun {
4096*4882a593Smuzhiyun 	struct clk_core *core = container_of(ref, struct clk_core, ref);
4097*4882a593Smuzhiyun 
4098*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
4099*4882a593Smuzhiyun 
4100*4882a593Smuzhiyun 	clk_core_free_parent_map(core);
4101*4882a593Smuzhiyun 	kfree_const(core->name);
4102*4882a593Smuzhiyun 	kfree(core);
4103*4882a593Smuzhiyun }
4104*4882a593Smuzhiyun 
4105*4882a593Smuzhiyun /*
4106*4882a593Smuzhiyun  * Empty clk_ops for unregistered clocks. These are used temporarily
4107*4882a593Smuzhiyun  * after clk_unregister() was called on a clock and until last clock
4108*4882a593Smuzhiyun  * consumer calls clk_put() and the struct clk object is freed.
4109*4882a593Smuzhiyun  */
clk_nodrv_prepare_enable(struct clk_hw * hw)4110*4882a593Smuzhiyun static int clk_nodrv_prepare_enable(struct clk_hw *hw)
4111*4882a593Smuzhiyun {
4112*4882a593Smuzhiyun 	return -ENXIO;
4113*4882a593Smuzhiyun }
4114*4882a593Smuzhiyun 
clk_nodrv_disable_unprepare(struct clk_hw * hw)4115*4882a593Smuzhiyun static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
4116*4882a593Smuzhiyun {
4117*4882a593Smuzhiyun 	WARN_ON_ONCE(1);
4118*4882a593Smuzhiyun }
4119*4882a593Smuzhiyun 
clk_nodrv_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)4120*4882a593Smuzhiyun static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
4121*4882a593Smuzhiyun 					unsigned long parent_rate)
4122*4882a593Smuzhiyun {
4123*4882a593Smuzhiyun 	return -ENXIO;
4124*4882a593Smuzhiyun }
4125*4882a593Smuzhiyun 
clk_nodrv_set_parent(struct clk_hw * hw,u8 index)4126*4882a593Smuzhiyun static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
4127*4882a593Smuzhiyun {
4128*4882a593Smuzhiyun 	return -ENXIO;
4129*4882a593Smuzhiyun }
4130*4882a593Smuzhiyun 
4131*4882a593Smuzhiyun static const struct clk_ops clk_nodrv_ops = {
4132*4882a593Smuzhiyun 	.enable		= clk_nodrv_prepare_enable,
4133*4882a593Smuzhiyun 	.disable	= clk_nodrv_disable_unprepare,
4134*4882a593Smuzhiyun 	.prepare	= clk_nodrv_prepare_enable,
4135*4882a593Smuzhiyun 	.unprepare	= clk_nodrv_disable_unprepare,
4136*4882a593Smuzhiyun 	.set_rate	= clk_nodrv_set_rate,
4137*4882a593Smuzhiyun 	.set_parent	= clk_nodrv_set_parent,
4138*4882a593Smuzhiyun };
4139*4882a593Smuzhiyun 
clk_core_evict_parent_cache_subtree(struct clk_core * root,struct clk_core * target)4140*4882a593Smuzhiyun static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
4141*4882a593Smuzhiyun 						struct clk_core *target)
4142*4882a593Smuzhiyun {
4143*4882a593Smuzhiyun 	int i;
4144*4882a593Smuzhiyun 	struct clk_core *child;
4145*4882a593Smuzhiyun 
4146*4882a593Smuzhiyun 	for (i = 0; i < root->num_parents; i++)
4147*4882a593Smuzhiyun 		if (root->parents[i].core == target)
4148*4882a593Smuzhiyun 			root->parents[i].core = NULL;
4149*4882a593Smuzhiyun 
4150*4882a593Smuzhiyun 	hlist_for_each_entry(child, &root->children, child_node)
4151*4882a593Smuzhiyun 		clk_core_evict_parent_cache_subtree(child, target);
4152*4882a593Smuzhiyun }
4153*4882a593Smuzhiyun 
4154*4882a593Smuzhiyun /* Remove this clk from all parent caches */
clk_core_evict_parent_cache(struct clk_core * core)4155*4882a593Smuzhiyun static void clk_core_evict_parent_cache(struct clk_core *core)
4156*4882a593Smuzhiyun {
4157*4882a593Smuzhiyun 	struct hlist_head **lists;
4158*4882a593Smuzhiyun 	struct clk_core *root;
4159*4882a593Smuzhiyun 
4160*4882a593Smuzhiyun 	lockdep_assert_held(&prepare_lock);
4161*4882a593Smuzhiyun 
4162*4882a593Smuzhiyun 	for (lists = all_lists; *lists; lists++)
4163*4882a593Smuzhiyun 		hlist_for_each_entry(root, *lists, child_node)
4164*4882a593Smuzhiyun 			clk_core_evict_parent_cache_subtree(root, core);
4165*4882a593Smuzhiyun 
4166*4882a593Smuzhiyun }
4167*4882a593Smuzhiyun 
4168*4882a593Smuzhiyun /**
4169*4882a593Smuzhiyun  * clk_unregister - unregister a currently registered clock
4170*4882a593Smuzhiyun  * @clk: clock to unregister
4171*4882a593Smuzhiyun  */
clk_unregister(struct clk * clk)4172*4882a593Smuzhiyun void clk_unregister(struct clk *clk)
4173*4882a593Smuzhiyun {
4174*4882a593Smuzhiyun 	unsigned long flags;
4175*4882a593Smuzhiyun 	const struct clk_ops *ops;
4176*4882a593Smuzhiyun 
4177*4882a593Smuzhiyun 	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4178*4882a593Smuzhiyun 		return;
4179*4882a593Smuzhiyun 
4180*4882a593Smuzhiyun 	clk_debug_unregister(clk->core);
4181*4882a593Smuzhiyun 
4182*4882a593Smuzhiyun 	clk_prepare_lock();
4183*4882a593Smuzhiyun 
4184*4882a593Smuzhiyun 	ops = clk->core->ops;
4185*4882a593Smuzhiyun 	if (ops == &clk_nodrv_ops) {
4186*4882a593Smuzhiyun 		pr_err("%s: unregistered clock: %s\n", __func__,
4187*4882a593Smuzhiyun 		       clk->core->name);
4188*4882a593Smuzhiyun 		goto unlock;
4189*4882a593Smuzhiyun 	}
4190*4882a593Smuzhiyun 	/*
4191*4882a593Smuzhiyun 	 * Assign empty clock ops for consumers that might still hold
4192*4882a593Smuzhiyun 	 * a reference to this clock.
4193*4882a593Smuzhiyun 	 */
4194*4882a593Smuzhiyun 	flags = clk_enable_lock();
4195*4882a593Smuzhiyun 	clk->core->ops = &clk_nodrv_ops;
4196*4882a593Smuzhiyun 	clk_enable_unlock(flags);
4197*4882a593Smuzhiyun 
4198*4882a593Smuzhiyun 	if (ops->terminate)
4199*4882a593Smuzhiyun 		ops->terminate(clk->core->hw);
4200*4882a593Smuzhiyun 
4201*4882a593Smuzhiyun 	if (!hlist_empty(&clk->core->children)) {
4202*4882a593Smuzhiyun 		struct clk_core *child;
4203*4882a593Smuzhiyun 		struct hlist_node *t;
4204*4882a593Smuzhiyun 
4205*4882a593Smuzhiyun 		/* Reparent all children to the orphan list. */
4206*4882a593Smuzhiyun 		hlist_for_each_entry_safe(child, t, &clk->core->children,
4207*4882a593Smuzhiyun 					  child_node)
4208*4882a593Smuzhiyun 			clk_core_set_parent_nolock(child, NULL);
4209*4882a593Smuzhiyun 	}
4210*4882a593Smuzhiyun 
4211*4882a593Smuzhiyun 	clk_core_evict_parent_cache(clk->core);
4212*4882a593Smuzhiyun 
4213*4882a593Smuzhiyun 	hlist_del_init(&clk->core->child_node);
4214*4882a593Smuzhiyun 
4215*4882a593Smuzhiyun 	if (clk->core->prepare_count)
4216*4882a593Smuzhiyun 		pr_warn("%s: unregistering prepared clock: %s\n",
4217*4882a593Smuzhiyun 					__func__, clk->core->name);
4218*4882a593Smuzhiyun 
4219*4882a593Smuzhiyun 	if (clk->core->protect_count)
4220*4882a593Smuzhiyun 		pr_warn("%s: unregistering protected clock: %s\n",
4221*4882a593Smuzhiyun 					__func__, clk->core->name);
4222*4882a593Smuzhiyun 
4223*4882a593Smuzhiyun 	kref_put(&clk->core->ref, __clk_release);
4224*4882a593Smuzhiyun 	free_clk(clk);
4225*4882a593Smuzhiyun unlock:
4226*4882a593Smuzhiyun 	clk_prepare_unlock();
4227*4882a593Smuzhiyun }
4228*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_unregister);
4229*4882a593Smuzhiyun 
4230*4882a593Smuzhiyun /**
4231*4882a593Smuzhiyun  * clk_hw_unregister - unregister a currently registered clk_hw
4232*4882a593Smuzhiyun  * @hw: hardware-specific clock data to unregister
4233*4882a593Smuzhiyun  */
clk_hw_unregister(struct clk_hw * hw)4234*4882a593Smuzhiyun void clk_hw_unregister(struct clk_hw *hw)
4235*4882a593Smuzhiyun {
4236*4882a593Smuzhiyun 	clk_unregister(hw->clk);
4237*4882a593Smuzhiyun }
4238*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_unregister);
4239*4882a593Smuzhiyun 
devm_clk_unregister_cb(struct device * dev,void * res)4240*4882a593Smuzhiyun static void devm_clk_unregister_cb(struct device *dev, void *res)
4241*4882a593Smuzhiyun {
4242*4882a593Smuzhiyun 	clk_unregister(*(struct clk **)res);
4243*4882a593Smuzhiyun }
4244*4882a593Smuzhiyun 
devm_clk_hw_unregister_cb(struct device * dev,void * res)4245*4882a593Smuzhiyun static void devm_clk_hw_unregister_cb(struct device *dev, void *res)
4246*4882a593Smuzhiyun {
4247*4882a593Smuzhiyun 	clk_hw_unregister(*(struct clk_hw **)res);
4248*4882a593Smuzhiyun }
4249*4882a593Smuzhiyun 
4250*4882a593Smuzhiyun /**
4251*4882a593Smuzhiyun  * devm_clk_register - resource managed clk_register()
4252*4882a593Smuzhiyun  * @dev: device that is registering this clock
4253*4882a593Smuzhiyun  * @hw: link to hardware-specific clock data
4254*4882a593Smuzhiyun  *
4255*4882a593Smuzhiyun  * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
4256*4882a593Smuzhiyun  *
4257*4882a593Smuzhiyun  * Clocks returned from this function are automatically clk_unregister()ed on
4258*4882a593Smuzhiyun  * driver detach. See clk_register() for more information.
4259*4882a593Smuzhiyun  */
devm_clk_register(struct device * dev,struct clk_hw * hw)4260*4882a593Smuzhiyun struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
4261*4882a593Smuzhiyun {
4262*4882a593Smuzhiyun 	struct clk *clk;
4263*4882a593Smuzhiyun 	struct clk **clkp;
4264*4882a593Smuzhiyun 
4265*4882a593Smuzhiyun 	clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL);
4266*4882a593Smuzhiyun 	if (!clkp)
4267*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
4268*4882a593Smuzhiyun 
4269*4882a593Smuzhiyun 	clk = clk_register(dev, hw);
4270*4882a593Smuzhiyun 	if (!IS_ERR(clk)) {
4271*4882a593Smuzhiyun 		*clkp = clk;
4272*4882a593Smuzhiyun 		devres_add(dev, clkp);
4273*4882a593Smuzhiyun 	} else {
4274*4882a593Smuzhiyun 		devres_free(clkp);
4275*4882a593Smuzhiyun 	}
4276*4882a593Smuzhiyun 
4277*4882a593Smuzhiyun 	return clk;
4278*4882a593Smuzhiyun }
4279*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_clk_register);
4280*4882a593Smuzhiyun 
4281*4882a593Smuzhiyun /**
4282*4882a593Smuzhiyun  * devm_clk_hw_register - resource managed clk_hw_register()
4283*4882a593Smuzhiyun  * @dev: device that is registering this clock
4284*4882a593Smuzhiyun  * @hw: link to hardware-specific clock data
4285*4882a593Smuzhiyun  *
4286*4882a593Smuzhiyun  * Managed clk_hw_register(). Clocks registered by this function are
4287*4882a593Smuzhiyun  * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
4288*4882a593Smuzhiyun  * for more information.
4289*4882a593Smuzhiyun  */
devm_clk_hw_register(struct device * dev,struct clk_hw * hw)4290*4882a593Smuzhiyun int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
4291*4882a593Smuzhiyun {
4292*4882a593Smuzhiyun 	struct clk_hw **hwp;
4293*4882a593Smuzhiyun 	int ret;
4294*4882a593Smuzhiyun 
4295*4882a593Smuzhiyun 	hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL);
4296*4882a593Smuzhiyun 	if (!hwp)
4297*4882a593Smuzhiyun 		return -ENOMEM;
4298*4882a593Smuzhiyun 
4299*4882a593Smuzhiyun 	ret = clk_hw_register(dev, hw);
4300*4882a593Smuzhiyun 	if (!ret) {
4301*4882a593Smuzhiyun 		*hwp = hw;
4302*4882a593Smuzhiyun 		devres_add(dev, hwp);
4303*4882a593Smuzhiyun 	} else {
4304*4882a593Smuzhiyun 		devres_free(hwp);
4305*4882a593Smuzhiyun 	}
4306*4882a593Smuzhiyun 
4307*4882a593Smuzhiyun 	return ret;
4308*4882a593Smuzhiyun }
4309*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_clk_hw_register);
4310*4882a593Smuzhiyun 
devm_clk_match(struct device * dev,void * res,void * data)4311*4882a593Smuzhiyun static int devm_clk_match(struct device *dev, void *res, void *data)
4312*4882a593Smuzhiyun {
4313*4882a593Smuzhiyun 	struct clk *c = res;
4314*4882a593Smuzhiyun 	if (WARN_ON(!c))
4315*4882a593Smuzhiyun 		return 0;
4316*4882a593Smuzhiyun 	return c == data;
4317*4882a593Smuzhiyun }
4318*4882a593Smuzhiyun 
devm_clk_hw_match(struct device * dev,void * res,void * data)4319*4882a593Smuzhiyun static int devm_clk_hw_match(struct device *dev, void *res, void *data)
4320*4882a593Smuzhiyun {
4321*4882a593Smuzhiyun 	struct clk_hw *hw = res;
4322*4882a593Smuzhiyun 
4323*4882a593Smuzhiyun 	if (WARN_ON(!hw))
4324*4882a593Smuzhiyun 		return 0;
4325*4882a593Smuzhiyun 	return hw == data;
4326*4882a593Smuzhiyun }
4327*4882a593Smuzhiyun 
4328*4882a593Smuzhiyun /**
4329*4882a593Smuzhiyun  * devm_clk_unregister - resource managed clk_unregister()
4330*4882a593Smuzhiyun  * @dev: device that is unregistering the clock data
4331*4882a593Smuzhiyun  * @clk: clock to unregister
4332*4882a593Smuzhiyun  *
4333*4882a593Smuzhiyun  * Deallocate a clock allocated with devm_clk_register(). Normally
4334*4882a593Smuzhiyun  * this function will not need to be called and the resource management
4335*4882a593Smuzhiyun  * code will ensure that the resource is freed.
4336*4882a593Smuzhiyun  */
devm_clk_unregister(struct device * dev,struct clk * clk)4337*4882a593Smuzhiyun void devm_clk_unregister(struct device *dev, struct clk *clk)
4338*4882a593Smuzhiyun {
4339*4882a593Smuzhiyun 	WARN_ON(devres_release(dev, devm_clk_unregister_cb, devm_clk_match, clk));
4340*4882a593Smuzhiyun }
4341*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_clk_unregister);
4342*4882a593Smuzhiyun 
4343*4882a593Smuzhiyun /**
4344*4882a593Smuzhiyun  * devm_clk_hw_unregister - resource managed clk_hw_unregister()
4345*4882a593Smuzhiyun  * @dev: device that is unregistering the hardware-specific clock data
4346*4882a593Smuzhiyun  * @hw: link to hardware-specific clock data
4347*4882a593Smuzhiyun  *
4348*4882a593Smuzhiyun  * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
4349*4882a593Smuzhiyun  * this function will not need to be called and the resource management
4350*4882a593Smuzhiyun  * code will ensure that the resource is freed.
4351*4882a593Smuzhiyun  */
devm_clk_hw_unregister(struct device * dev,struct clk_hw * hw)4352*4882a593Smuzhiyun void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
4353*4882a593Smuzhiyun {
4354*4882a593Smuzhiyun 	WARN_ON(devres_release(dev, devm_clk_hw_unregister_cb, devm_clk_hw_match,
4355*4882a593Smuzhiyun 				hw));
4356*4882a593Smuzhiyun }
4357*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
4358*4882a593Smuzhiyun 
devm_clk_release(struct device * dev,void * res)4359*4882a593Smuzhiyun static void devm_clk_release(struct device *dev, void *res)
4360*4882a593Smuzhiyun {
4361*4882a593Smuzhiyun 	clk_put(*(struct clk **)res);
4362*4882a593Smuzhiyun }
4363*4882a593Smuzhiyun 
4364*4882a593Smuzhiyun /**
4365*4882a593Smuzhiyun  * devm_clk_hw_get_clk - resource managed clk_hw_get_clk()
4366*4882a593Smuzhiyun  * @dev: device that is registering this clock
4367*4882a593Smuzhiyun  * @hw: clk_hw associated with the clk being consumed
4368*4882a593Smuzhiyun  * @con_id: connection ID string on device
4369*4882a593Smuzhiyun  *
4370*4882a593Smuzhiyun  * Managed clk_hw_get_clk(). Clocks got with this function are
4371*4882a593Smuzhiyun  * automatically clk_put() on driver detach. See clk_put()
4372*4882a593Smuzhiyun  * for more information.
4373*4882a593Smuzhiyun  */
devm_clk_hw_get_clk(struct device * dev,struct clk_hw * hw,const char * con_id)4374*4882a593Smuzhiyun struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
4375*4882a593Smuzhiyun 				const char *con_id)
4376*4882a593Smuzhiyun {
4377*4882a593Smuzhiyun 	struct clk *clk;
4378*4882a593Smuzhiyun 	struct clk **clkp;
4379*4882a593Smuzhiyun 
4380*4882a593Smuzhiyun 	/* This should not happen because it would mean we have drivers
4381*4882a593Smuzhiyun 	 * passing around clk_hw pointers instead of having the caller use
4382*4882a593Smuzhiyun 	 * proper clk_get() style APIs
4383*4882a593Smuzhiyun 	 */
4384*4882a593Smuzhiyun 	WARN_ON_ONCE(dev != hw->core->dev);
4385*4882a593Smuzhiyun 
4386*4882a593Smuzhiyun 	clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
4387*4882a593Smuzhiyun 	if (!clkp)
4388*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
4389*4882a593Smuzhiyun 
4390*4882a593Smuzhiyun 	clk = clk_hw_get_clk(hw, con_id);
4391*4882a593Smuzhiyun 	if (!IS_ERR(clk)) {
4392*4882a593Smuzhiyun 		*clkp = clk;
4393*4882a593Smuzhiyun 		devres_add(dev, clkp);
4394*4882a593Smuzhiyun 	} else {
4395*4882a593Smuzhiyun 		devres_free(clkp);
4396*4882a593Smuzhiyun 	}
4397*4882a593Smuzhiyun 
4398*4882a593Smuzhiyun 	return clk;
4399*4882a593Smuzhiyun }
4400*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk);
4401*4882a593Smuzhiyun 
4402*4882a593Smuzhiyun /*
4403*4882a593Smuzhiyun  * clkdev helpers
4404*4882a593Smuzhiyun  */
4405*4882a593Smuzhiyun 
__clk_put(struct clk * clk)4406*4882a593Smuzhiyun void __clk_put(struct clk *clk)
4407*4882a593Smuzhiyun {
4408*4882a593Smuzhiyun 	struct module *owner;
4409*4882a593Smuzhiyun 
4410*4882a593Smuzhiyun 	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4411*4882a593Smuzhiyun 		return;
4412*4882a593Smuzhiyun 
4413*4882a593Smuzhiyun 	clk_prepare_lock();
4414*4882a593Smuzhiyun 
4415*4882a593Smuzhiyun 	/*
4416*4882a593Smuzhiyun 	 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
4417*4882a593Smuzhiyun 	 * given user should be balanced with calls to clk_rate_exclusive_put()
4418*4882a593Smuzhiyun 	 * and by that same consumer
4419*4882a593Smuzhiyun 	 */
4420*4882a593Smuzhiyun 	if (WARN_ON(clk->exclusive_count)) {
4421*4882a593Smuzhiyun 		/* We voiced our concern, let's sanitize the situation */
4422*4882a593Smuzhiyun 		clk->core->protect_count -= (clk->exclusive_count - 1);
4423*4882a593Smuzhiyun 		clk_core_rate_unprotect(clk->core);
4424*4882a593Smuzhiyun 		clk->exclusive_count = 0;
4425*4882a593Smuzhiyun 	}
4426*4882a593Smuzhiyun 
4427*4882a593Smuzhiyun 	hlist_del(&clk->clks_node);
4428*4882a593Smuzhiyun 	if (clk->min_rate > clk->core->req_rate ||
4429*4882a593Smuzhiyun 	    clk->max_rate < clk->core->req_rate)
4430*4882a593Smuzhiyun 		clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4431*4882a593Smuzhiyun 
4432*4882a593Smuzhiyun 	owner = clk->core->owner;
4433*4882a593Smuzhiyun 	kref_put(&clk->core->ref, __clk_release);
4434*4882a593Smuzhiyun 
4435*4882a593Smuzhiyun 	clk_prepare_unlock();
4436*4882a593Smuzhiyun 
4437*4882a593Smuzhiyun 	module_put(owner);
4438*4882a593Smuzhiyun 
4439*4882a593Smuzhiyun 	free_clk(clk);
4440*4882a593Smuzhiyun }
4441*4882a593Smuzhiyun 
4442*4882a593Smuzhiyun /***        clk rate change notifiers        ***/
4443*4882a593Smuzhiyun 
4444*4882a593Smuzhiyun /**
4445*4882a593Smuzhiyun  * clk_notifier_register - add a clk rate change notifier
4446*4882a593Smuzhiyun  * @clk: struct clk * to watch
4447*4882a593Smuzhiyun  * @nb: struct notifier_block * with callback info
4448*4882a593Smuzhiyun  *
4449*4882a593Smuzhiyun  * Request notification when clk's rate changes.  This uses an SRCU
4450*4882a593Smuzhiyun  * notifier because we want it to block and notifier unregistrations are
4451*4882a593Smuzhiyun  * uncommon.  The callbacks associated with the notifier must not
4452*4882a593Smuzhiyun  * re-enter into the clk framework by calling any top-level clk APIs;
4453*4882a593Smuzhiyun  * this will cause a nested prepare_lock mutex.
4454*4882a593Smuzhiyun  *
4455*4882a593Smuzhiyun  * In all notification cases (pre, post and abort rate change) the original
4456*4882a593Smuzhiyun  * clock rate is passed to the callback via struct clk_notifier_data.old_rate
4457*4882a593Smuzhiyun  * and the new frequency is passed via struct clk_notifier_data.new_rate.
4458*4882a593Smuzhiyun  *
4459*4882a593Smuzhiyun  * clk_notifier_register() must be called from non-atomic context.
4460*4882a593Smuzhiyun  * Returns -EINVAL if called with null arguments, -ENOMEM upon
4461*4882a593Smuzhiyun  * allocation failure; otherwise, passes along the return value of
4462*4882a593Smuzhiyun  * srcu_notifier_chain_register().
4463*4882a593Smuzhiyun  */
clk_notifier_register(struct clk * clk,struct notifier_block * nb)4464*4882a593Smuzhiyun int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4465*4882a593Smuzhiyun {
4466*4882a593Smuzhiyun 	struct clk_notifier *cn;
4467*4882a593Smuzhiyun 	int ret = -ENOMEM;
4468*4882a593Smuzhiyun 
4469*4882a593Smuzhiyun 	if (!clk || !nb)
4470*4882a593Smuzhiyun 		return -EINVAL;
4471*4882a593Smuzhiyun 
4472*4882a593Smuzhiyun 	clk_prepare_lock();
4473*4882a593Smuzhiyun 
4474*4882a593Smuzhiyun 	/* search the list of notifiers for this clk */
4475*4882a593Smuzhiyun 	list_for_each_entry(cn, &clk_notifier_list, node)
4476*4882a593Smuzhiyun 		if (cn->clk == clk)
4477*4882a593Smuzhiyun 			goto found;
4478*4882a593Smuzhiyun 
4479*4882a593Smuzhiyun 	/* if clk wasn't in the notifier list, allocate new clk_notifier */
4480*4882a593Smuzhiyun 	cn = kzalloc(sizeof(*cn), GFP_KERNEL);
4481*4882a593Smuzhiyun 	if (!cn)
4482*4882a593Smuzhiyun 		goto out;
4483*4882a593Smuzhiyun 
4484*4882a593Smuzhiyun 	cn->clk = clk;
4485*4882a593Smuzhiyun 	srcu_init_notifier_head(&cn->notifier_head);
4486*4882a593Smuzhiyun 
4487*4882a593Smuzhiyun 	list_add(&cn->node, &clk_notifier_list);
4488*4882a593Smuzhiyun 
4489*4882a593Smuzhiyun found:
4490*4882a593Smuzhiyun 	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4491*4882a593Smuzhiyun 
4492*4882a593Smuzhiyun 	clk->core->notifier_count++;
4493*4882a593Smuzhiyun 
4494*4882a593Smuzhiyun out:
4495*4882a593Smuzhiyun 	clk_prepare_unlock();
4496*4882a593Smuzhiyun 
4497*4882a593Smuzhiyun 	return ret;
4498*4882a593Smuzhiyun }
4499*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_notifier_register);
4500*4882a593Smuzhiyun 
4501*4882a593Smuzhiyun /**
4502*4882a593Smuzhiyun  * clk_notifier_unregister - remove a clk rate change notifier
4503*4882a593Smuzhiyun  * @clk: struct clk *
4504*4882a593Smuzhiyun  * @nb: struct notifier_block * with callback info
4505*4882a593Smuzhiyun  *
4506*4882a593Smuzhiyun  * Request no further notification for changes to 'clk' and frees memory
4507*4882a593Smuzhiyun  * allocated in clk_notifier_register.
4508*4882a593Smuzhiyun  *
4509*4882a593Smuzhiyun  * Returns -EINVAL if called with null arguments; otherwise, passes
4510*4882a593Smuzhiyun  * along the return value of srcu_notifier_chain_unregister().
4511*4882a593Smuzhiyun  */
clk_notifier_unregister(struct clk * clk,struct notifier_block * nb)4512*4882a593Smuzhiyun int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4513*4882a593Smuzhiyun {
4514*4882a593Smuzhiyun 	struct clk_notifier *cn;
4515*4882a593Smuzhiyun 	int ret = -ENOENT;
4516*4882a593Smuzhiyun 
4517*4882a593Smuzhiyun 	if (!clk || !nb)
4518*4882a593Smuzhiyun 		return -EINVAL;
4519*4882a593Smuzhiyun 
4520*4882a593Smuzhiyun 	clk_prepare_lock();
4521*4882a593Smuzhiyun 
4522*4882a593Smuzhiyun 	list_for_each_entry(cn, &clk_notifier_list, node) {
4523*4882a593Smuzhiyun 		if (cn->clk == clk) {
4524*4882a593Smuzhiyun 			ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4525*4882a593Smuzhiyun 
4526*4882a593Smuzhiyun 			clk->core->notifier_count--;
4527*4882a593Smuzhiyun 
4528*4882a593Smuzhiyun 			/* XXX the notifier code should handle this better */
4529*4882a593Smuzhiyun 			if (!cn->notifier_head.head) {
4530*4882a593Smuzhiyun 				srcu_cleanup_notifier_head(&cn->notifier_head);
4531*4882a593Smuzhiyun 				list_del(&cn->node);
4532*4882a593Smuzhiyun 				kfree(cn);
4533*4882a593Smuzhiyun 			}
4534*4882a593Smuzhiyun 			break;
4535*4882a593Smuzhiyun 		}
4536*4882a593Smuzhiyun 	}
4537*4882a593Smuzhiyun 
4538*4882a593Smuzhiyun 	clk_prepare_unlock();
4539*4882a593Smuzhiyun 
4540*4882a593Smuzhiyun 	return ret;
4541*4882a593Smuzhiyun }
4542*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_notifier_unregister);
4543*4882a593Smuzhiyun 
4544*4882a593Smuzhiyun struct clk_notifier_devres {
4545*4882a593Smuzhiyun 	struct clk *clk;
4546*4882a593Smuzhiyun 	struct notifier_block *nb;
4547*4882a593Smuzhiyun };
4548*4882a593Smuzhiyun 
devm_clk_notifier_release(struct device * dev,void * res)4549*4882a593Smuzhiyun static void devm_clk_notifier_release(struct device *dev, void *res)
4550*4882a593Smuzhiyun {
4551*4882a593Smuzhiyun 	struct clk_notifier_devres *devres = res;
4552*4882a593Smuzhiyun 
4553*4882a593Smuzhiyun 	clk_notifier_unregister(devres->clk, devres->nb);
4554*4882a593Smuzhiyun }
4555*4882a593Smuzhiyun 
devm_clk_notifier_register(struct device * dev,struct clk * clk,struct notifier_block * nb)4556*4882a593Smuzhiyun int devm_clk_notifier_register(struct device *dev, struct clk *clk,
4557*4882a593Smuzhiyun 			       struct notifier_block *nb)
4558*4882a593Smuzhiyun {
4559*4882a593Smuzhiyun 	struct clk_notifier_devres *devres;
4560*4882a593Smuzhiyun 	int ret;
4561*4882a593Smuzhiyun 
4562*4882a593Smuzhiyun 	devres = devres_alloc(devm_clk_notifier_release,
4563*4882a593Smuzhiyun 			      sizeof(*devres), GFP_KERNEL);
4564*4882a593Smuzhiyun 
4565*4882a593Smuzhiyun 	if (!devres)
4566*4882a593Smuzhiyun 		return -ENOMEM;
4567*4882a593Smuzhiyun 
4568*4882a593Smuzhiyun 	ret = clk_notifier_register(clk, nb);
4569*4882a593Smuzhiyun 	if (!ret) {
4570*4882a593Smuzhiyun 		devres->clk = clk;
4571*4882a593Smuzhiyun 		devres->nb = nb;
4572*4882a593Smuzhiyun 	} else {
4573*4882a593Smuzhiyun 		devres_free(devres);
4574*4882a593Smuzhiyun 	}
4575*4882a593Smuzhiyun 
4576*4882a593Smuzhiyun 	return ret;
4577*4882a593Smuzhiyun }
4578*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_clk_notifier_register);
4579*4882a593Smuzhiyun 
4580*4882a593Smuzhiyun #ifdef CONFIG_OF
clk_core_reparent_orphans(void)4581*4882a593Smuzhiyun static void clk_core_reparent_orphans(void)
4582*4882a593Smuzhiyun {
4583*4882a593Smuzhiyun 	clk_prepare_lock();
4584*4882a593Smuzhiyun 	clk_core_reparent_orphans_nolock();
4585*4882a593Smuzhiyun 	clk_prepare_unlock();
4586*4882a593Smuzhiyun }
4587*4882a593Smuzhiyun 
4588*4882a593Smuzhiyun /**
4589*4882a593Smuzhiyun  * struct of_clk_provider - Clock provider registration structure
4590*4882a593Smuzhiyun  * @link: Entry in global list of clock providers
4591*4882a593Smuzhiyun  * @node: Pointer to device tree node of clock provider
4592*4882a593Smuzhiyun  * @get: Get clock callback.  Returns NULL or a struct clk for the
4593*4882a593Smuzhiyun  *       given clock specifier
4594*4882a593Smuzhiyun  * @get_hw: Get clk_hw callback.  Returns NULL, ERR_PTR or a
4595*4882a593Smuzhiyun  *       struct clk_hw for the given clock specifier
4596*4882a593Smuzhiyun  * @data: context pointer to be passed into @get callback
4597*4882a593Smuzhiyun  */
4598*4882a593Smuzhiyun struct of_clk_provider {
4599*4882a593Smuzhiyun 	struct list_head link;
4600*4882a593Smuzhiyun 
4601*4882a593Smuzhiyun 	struct device_node *node;
4602*4882a593Smuzhiyun 	struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
4603*4882a593Smuzhiyun 	struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
4604*4882a593Smuzhiyun 	void *data;
4605*4882a593Smuzhiyun };
4606*4882a593Smuzhiyun 
4607*4882a593Smuzhiyun extern struct of_device_id __clk_of_table;
4608*4882a593Smuzhiyun static const struct of_device_id __clk_of_table_sentinel
4609*4882a593Smuzhiyun 	__used __section("__clk_of_table_end");
4610*4882a593Smuzhiyun 
4611*4882a593Smuzhiyun static LIST_HEAD(of_clk_providers);
4612*4882a593Smuzhiyun static DEFINE_MUTEX(of_clk_mutex);
4613*4882a593Smuzhiyun 
of_clk_src_simple_get(struct of_phandle_args * clkspec,void * data)4614*4882a593Smuzhiyun struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4615*4882a593Smuzhiyun 				     void *data)
4616*4882a593Smuzhiyun {
4617*4882a593Smuzhiyun 	return data;
4618*4882a593Smuzhiyun }
4619*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
4620*4882a593Smuzhiyun 
of_clk_hw_simple_get(struct of_phandle_args * clkspec,void * data)4621*4882a593Smuzhiyun struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
4622*4882a593Smuzhiyun {
4623*4882a593Smuzhiyun 	return data;
4624*4882a593Smuzhiyun }
4625*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
4626*4882a593Smuzhiyun 
of_clk_src_onecell_get(struct of_phandle_args * clkspec,void * data)4627*4882a593Smuzhiyun struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4628*4882a593Smuzhiyun {
4629*4882a593Smuzhiyun 	struct clk_onecell_data *clk_data = data;
4630*4882a593Smuzhiyun 	unsigned int idx = clkspec->args[0];
4631*4882a593Smuzhiyun 
4632*4882a593Smuzhiyun 	if (idx >= clk_data->clk_num) {
4633*4882a593Smuzhiyun 		pr_err("%s: invalid clock index %u\n", __func__, idx);
4634*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
4635*4882a593Smuzhiyun 	}
4636*4882a593Smuzhiyun 
4637*4882a593Smuzhiyun 	return clk_data->clks[idx];
4638*4882a593Smuzhiyun }
4639*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
4640*4882a593Smuzhiyun 
4641*4882a593Smuzhiyun struct clk_hw *
of_clk_hw_onecell_get(struct of_phandle_args * clkspec,void * data)4642*4882a593Smuzhiyun of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
4643*4882a593Smuzhiyun {
4644*4882a593Smuzhiyun 	struct clk_hw_onecell_data *hw_data = data;
4645*4882a593Smuzhiyun 	unsigned int idx = clkspec->args[0];
4646*4882a593Smuzhiyun 
4647*4882a593Smuzhiyun 	if (idx >= hw_data->num) {
4648*4882a593Smuzhiyun 		pr_err("%s: invalid index %u\n", __func__, idx);
4649*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
4650*4882a593Smuzhiyun 	}
4651*4882a593Smuzhiyun 
4652*4882a593Smuzhiyun 	return hw_data->hws[idx];
4653*4882a593Smuzhiyun }
4654*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
4655*4882a593Smuzhiyun 
4656*4882a593Smuzhiyun /**
4657*4882a593Smuzhiyun  * of_clk_add_provider() - Register a clock provider for a node
4658*4882a593Smuzhiyun  * @np: Device node pointer associated with clock provider
4659*4882a593Smuzhiyun  * @clk_src_get: callback for decoding clock
4660*4882a593Smuzhiyun  * @data: context pointer for @clk_src_get callback.
4661*4882a593Smuzhiyun  *
4662*4882a593Smuzhiyun  * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
4663*4882a593Smuzhiyun  */
of_clk_add_provider(struct device_node * np,struct clk * (* clk_src_get)(struct of_phandle_args * clkspec,void * data),void * data)4664*4882a593Smuzhiyun int of_clk_add_provider(struct device_node *np,
4665*4882a593Smuzhiyun 			struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
4666*4882a593Smuzhiyun 						   void *data),
4667*4882a593Smuzhiyun 			void *data)
4668*4882a593Smuzhiyun {
4669*4882a593Smuzhiyun 	struct of_clk_provider *cp;
4670*4882a593Smuzhiyun 	int ret;
4671*4882a593Smuzhiyun 
4672*4882a593Smuzhiyun 	if (!np)
4673*4882a593Smuzhiyun 		return 0;
4674*4882a593Smuzhiyun 
4675*4882a593Smuzhiyun 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4676*4882a593Smuzhiyun 	if (!cp)
4677*4882a593Smuzhiyun 		return -ENOMEM;
4678*4882a593Smuzhiyun 
4679*4882a593Smuzhiyun 	cp->node = of_node_get(np);
4680*4882a593Smuzhiyun 	cp->data = data;
4681*4882a593Smuzhiyun 	cp->get = clk_src_get;
4682*4882a593Smuzhiyun 
4683*4882a593Smuzhiyun 	mutex_lock(&of_clk_mutex);
4684*4882a593Smuzhiyun 	list_add(&cp->link, &of_clk_providers);
4685*4882a593Smuzhiyun 	mutex_unlock(&of_clk_mutex);
4686*4882a593Smuzhiyun 	pr_debug("Added clock from %pOF\n", np);
4687*4882a593Smuzhiyun 
4688*4882a593Smuzhiyun 	clk_core_reparent_orphans();
4689*4882a593Smuzhiyun 
4690*4882a593Smuzhiyun 	ret = of_clk_set_defaults(np, true);
4691*4882a593Smuzhiyun 	if (ret < 0)
4692*4882a593Smuzhiyun 		of_clk_del_provider(np);
4693*4882a593Smuzhiyun 
4694*4882a593Smuzhiyun 	fwnode_dev_initialized(&np->fwnode, true);
4695*4882a593Smuzhiyun 
4696*4882a593Smuzhiyun 	return ret;
4697*4882a593Smuzhiyun }
4698*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_clk_add_provider);
4699*4882a593Smuzhiyun 
4700*4882a593Smuzhiyun /**
4701*4882a593Smuzhiyun  * of_clk_add_hw_provider() - Register a clock provider for a node
4702*4882a593Smuzhiyun  * @np: Device node pointer associated with clock provider
4703*4882a593Smuzhiyun  * @get: callback for decoding clk_hw
4704*4882a593Smuzhiyun  * @data: context pointer for @get callback.
4705*4882a593Smuzhiyun  */
of_clk_add_hw_provider(struct device_node * np,struct clk_hw * (* get)(struct of_phandle_args * clkspec,void * data),void * data)4706*4882a593Smuzhiyun int of_clk_add_hw_provider(struct device_node *np,
4707*4882a593Smuzhiyun 			   struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4708*4882a593Smuzhiyun 						 void *data),
4709*4882a593Smuzhiyun 			   void *data)
4710*4882a593Smuzhiyun {
4711*4882a593Smuzhiyun 	struct of_clk_provider *cp;
4712*4882a593Smuzhiyun 	int ret;
4713*4882a593Smuzhiyun 
4714*4882a593Smuzhiyun 	if (!np)
4715*4882a593Smuzhiyun 		return 0;
4716*4882a593Smuzhiyun 
4717*4882a593Smuzhiyun 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4718*4882a593Smuzhiyun 	if (!cp)
4719*4882a593Smuzhiyun 		return -ENOMEM;
4720*4882a593Smuzhiyun 
4721*4882a593Smuzhiyun 	cp->node = of_node_get(np);
4722*4882a593Smuzhiyun 	cp->data = data;
4723*4882a593Smuzhiyun 	cp->get_hw = get;
4724*4882a593Smuzhiyun 
4725*4882a593Smuzhiyun 	mutex_lock(&of_clk_mutex);
4726*4882a593Smuzhiyun 	list_add(&cp->link, &of_clk_providers);
4727*4882a593Smuzhiyun 	mutex_unlock(&of_clk_mutex);
4728*4882a593Smuzhiyun 	pr_debug("Added clk_hw provider from %pOF\n", np);
4729*4882a593Smuzhiyun 
4730*4882a593Smuzhiyun 	clk_core_reparent_orphans();
4731*4882a593Smuzhiyun 
4732*4882a593Smuzhiyun 	ret = of_clk_set_defaults(np, true);
4733*4882a593Smuzhiyun 	if (ret < 0)
4734*4882a593Smuzhiyun 		of_clk_del_provider(np);
4735*4882a593Smuzhiyun 
4736*4882a593Smuzhiyun 	return ret;
4737*4882a593Smuzhiyun }
4738*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
4739*4882a593Smuzhiyun 
devm_of_clk_release_provider(struct device * dev,void * res)4740*4882a593Smuzhiyun static void devm_of_clk_release_provider(struct device *dev, void *res)
4741*4882a593Smuzhiyun {
4742*4882a593Smuzhiyun 	of_clk_del_provider(*(struct device_node **)res);
4743*4882a593Smuzhiyun }
4744*4882a593Smuzhiyun 
4745*4882a593Smuzhiyun /*
4746*4882a593Smuzhiyun  * We allow a child device to use its parent device as the clock provider node
4747*4882a593Smuzhiyun  * for cases like MFD sub-devices where the child device driver wants to use
4748*4882a593Smuzhiyun  * devm_*() APIs but not list the device in DT as a sub-node.
4749*4882a593Smuzhiyun  */
get_clk_provider_node(struct device * dev)4750*4882a593Smuzhiyun static struct device_node *get_clk_provider_node(struct device *dev)
4751*4882a593Smuzhiyun {
4752*4882a593Smuzhiyun 	struct device_node *np, *parent_np;
4753*4882a593Smuzhiyun 
4754*4882a593Smuzhiyun 	np = dev->of_node;
4755*4882a593Smuzhiyun 	parent_np = dev->parent ? dev->parent->of_node : NULL;
4756*4882a593Smuzhiyun 
4757*4882a593Smuzhiyun 	if (!of_find_property(np, "#clock-cells", NULL))
4758*4882a593Smuzhiyun 		if (of_find_property(parent_np, "#clock-cells", NULL))
4759*4882a593Smuzhiyun 			np = parent_np;
4760*4882a593Smuzhiyun 
4761*4882a593Smuzhiyun 	return np;
4762*4882a593Smuzhiyun }
4763*4882a593Smuzhiyun 
4764*4882a593Smuzhiyun /**
4765*4882a593Smuzhiyun  * devm_of_clk_add_hw_provider() - Managed clk provider node registration
4766*4882a593Smuzhiyun  * @dev: Device acting as the clock provider (used for DT node and lifetime)
4767*4882a593Smuzhiyun  * @get: callback for decoding clk_hw
4768*4882a593Smuzhiyun  * @data: context pointer for @get callback
4769*4882a593Smuzhiyun  *
4770*4882a593Smuzhiyun  * Registers clock provider for given device's node. If the device has no DT
4771*4882a593Smuzhiyun  * node or if the device node lacks of clock provider information (#clock-cells)
4772*4882a593Smuzhiyun  * then the parent device's node is scanned for this information. If parent node
4773*4882a593Smuzhiyun  * has the #clock-cells then it is used in registration. Provider is
4774*4882a593Smuzhiyun  * automatically released at device exit.
4775*4882a593Smuzhiyun  *
4776*4882a593Smuzhiyun  * Return: 0 on success or an errno on failure.
4777*4882a593Smuzhiyun  */
devm_of_clk_add_hw_provider(struct device * dev,struct clk_hw * (* get)(struct of_phandle_args * clkspec,void * data),void * data)4778*4882a593Smuzhiyun int devm_of_clk_add_hw_provider(struct device *dev,
4779*4882a593Smuzhiyun 			struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4780*4882a593Smuzhiyun 					      void *data),
4781*4882a593Smuzhiyun 			void *data)
4782*4882a593Smuzhiyun {
4783*4882a593Smuzhiyun 	struct device_node **ptr, *np;
4784*4882a593Smuzhiyun 	int ret;
4785*4882a593Smuzhiyun 
4786*4882a593Smuzhiyun 	ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
4787*4882a593Smuzhiyun 			   GFP_KERNEL);
4788*4882a593Smuzhiyun 	if (!ptr)
4789*4882a593Smuzhiyun 		return -ENOMEM;
4790*4882a593Smuzhiyun 
4791*4882a593Smuzhiyun 	np = get_clk_provider_node(dev);
4792*4882a593Smuzhiyun 	ret = of_clk_add_hw_provider(np, get, data);
4793*4882a593Smuzhiyun 	if (!ret) {
4794*4882a593Smuzhiyun 		*ptr = np;
4795*4882a593Smuzhiyun 		devres_add(dev, ptr);
4796*4882a593Smuzhiyun 	} else {
4797*4882a593Smuzhiyun 		devres_free(ptr);
4798*4882a593Smuzhiyun 	}
4799*4882a593Smuzhiyun 
4800*4882a593Smuzhiyun 	return ret;
4801*4882a593Smuzhiyun }
4802*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
4803*4882a593Smuzhiyun 
4804*4882a593Smuzhiyun /**
4805*4882a593Smuzhiyun  * of_clk_del_provider() - Remove a previously registered clock provider
4806*4882a593Smuzhiyun  * @np: Device node pointer associated with clock provider
4807*4882a593Smuzhiyun  */
of_clk_del_provider(struct device_node * np)4808*4882a593Smuzhiyun void of_clk_del_provider(struct device_node *np)
4809*4882a593Smuzhiyun {
4810*4882a593Smuzhiyun 	struct of_clk_provider *cp;
4811*4882a593Smuzhiyun 
4812*4882a593Smuzhiyun 	if (!np)
4813*4882a593Smuzhiyun 		return;
4814*4882a593Smuzhiyun 
4815*4882a593Smuzhiyun 	mutex_lock(&of_clk_mutex);
4816*4882a593Smuzhiyun 	list_for_each_entry(cp, &of_clk_providers, link) {
4817*4882a593Smuzhiyun 		if (cp->node == np) {
4818*4882a593Smuzhiyun 			list_del(&cp->link);
4819*4882a593Smuzhiyun 			fwnode_dev_initialized(&np->fwnode, false);
4820*4882a593Smuzhiyun 			of_node_put(cp->node);
4821*4882a593Smuzhiyun 			kfree(cp);
4822*4882a593Smuzhiyun 			break;
4823*4882a593Smuzhiyun 		}
4824*4882a593Smuzhiyun 	}
4825*4882a593Smuzhiyun 	mutex_unlock(&of_clk_mutex);
4826*4882a593Smuzhiyun }
4827*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_clk_del_provider);
4828*4882a593Smuzhiyun 
devm_clk_provider_match(struct device * dev,void * res,void * data)4829*4882a593Smuzhiyun static int devm_clk_provider_match(struct device *dev, void *res, void *data)
4830*4882a593Smuzhiyun {
4831*4882a593Smuzhiyun 	struct device_node **np = res;
4832*4882a593Smuzhiyun 
4833*4882a593Smuzhiyun 	if (WARN_ON(!np || !*np))
4834*4882a593Smuzhiyun 		return 0;
4835*4882a593Smuzhiyun 
4836*4882a593Smuzhiyun 	return *np == data;
4837*4882a593Smuzhiyun }
4838*4882a593Smuzhiyun 
4839*4882a593Smuzhiyun /**
4840*4882a593Smuzhiyun  * devm_of_clk_del_provider() - Remove clock provider registered using devm
4841*4882a593Smuzhiyun  * @dev: Device to whose lifetime the clock provider was bound
4842*4882a593Smuzhiyun  */
devm_of_clk_del_provider(struct device * dev)4843*4882a593Smuzhiyun void devm_of_clk_del_provider(struct device *dev)
4844*4882a593Smuzhiyun {
4845*4882a593Smuzhiyun 	int ret;
4846*4882a593Smuzhiyun 	struct device_node *np = get_clk_provider_node(dev);
4847*4882a593Smuzhiyun 
4848*4882a593Smuzhiyun 	ret = devres_release(dev, devm_of_clk_release_provider,
4849*4882a593Smuzhiyun 			     devm_clk_provider_match, np);
4850*4882a593Smuzhiyun 
4851*4882a593Smuzhiyun 	WARN_ON(ret);
4852*4882a593Smuzhiyun }
4853*4882a593Smuzhiyun EXPORT_SYMBOL(devm_of_clk_del_provider);
4854*4882a593Smuzhiyun 
4855*4882a593Smuzhiyun /**
4856*4882a593Smuzhiyun  * of_parse_clkspec() - Parse a DT clock specifier for a given device node
4857*4882a593Smuzhiyun  * @np: device node to parse clock specifier from
4858*4882a593Smuzhiyun  * @index: index of phandle to parse clock out of. If index < 0, @name is used
4859*4882a593Smuzhiyun  * @name: clock name to find and parse. If name is NULL, the index is used
4860*4882a593Smuzhiyun  * @out_args: Result of parsing the clock specifier
4861*4882a593Smuzhiyun  *
4862*4882a593Smuzhiyun  * Parses a device node's "clocks" and "clock-names" properties to find the
4863*4882a593Smuzhiyun  * phandle and cells for the index or name that is desired. The resulting clock
4864*4882a593Smuzhiyun  * specifier is placed into @out_args, or an errno is returned when there's a
4865*4882a593Smuzhiyun  * parsing error. The @index argument is ignored if @name is non-NULL.
4866*4882a593Smuzhiyun  *
4867*4882a593Smuzhiyun  * Example:
4868*4882a593Smuzhiyun  *
4869*4882a593Smuzhiyun  * phandle1: clock-controller@1 {
4870*4882a593Smuzhiyun  *	#clock-cells = <2>;
4871*4882a593Smuzhiyun  * }
4872*4882a593Smuzhiyun  *
4873*4882a593Smuzhiyun  * phandle2: clock-controller@2 {
4874*4882a593Smuzhiyun  *	#clock-cells = <1>;
4875*4882a593Smuzhiyun  * }
4876*4882a593Smuzhiyun  *
4877*4882a593Smuzhiyun  * clock-consumer@3 {
4878*4882a593Smuzhiyun  *	clocks = <&phandle1 1 2 &phandle2 3>;
4879*4882a593Smuzhiyun  *	clock-names = "name1", "name2";
4880*4882a593Smuzhiyun  * }
4881*4882a593Smuzhiyun  *
4882*4882a593Smuzhiyun  * To get a device_node for `clock-controller@2' node you may call this
4883*4882a593Smuzhiyun  * function a few different ways:
4884*4882a593Smuzhiyun  *
4885*4882a593Smuzhiyun  *   of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
4886*4882a593Smuzhiyun  *   of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
4887*4882a593Smuzhiyun  *   of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
4888*4882a593Smuzhiyun  *
4889*4882a593Smuzhiyun  * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
4890*4882a593Smuzhiyun  * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
4891*4882a593Smuzhiyun  * the "clock-names" property of @np.
4892*4882a593Smuzhiyun  */
of_parse_clkspec(const struct device_node * np,int index,const char * name,struct of_phandle_args * out_args)4893*4882a593Smuzhiyun static int of_parse_clkspec(const struct device_node *np, int index,
4894*4882a593Smuzhiyun 			    const char *name, struct of_phandle_args *out_args)
4895*4882a593Smuzhiyun {
4896*4882a593Smuzhiyun 	int ret = -ENOENT;
4897*4882a593Smuzhiyun 
4898*4882a593Smuzhiyun 	/* Walk up the tree of devices looking for a clock property that matches */
4899*4882a593Smuzhiyun 	while (np) {
4900*4882a593Smuzhiyun 		/*
4901*4882a593Smuzhiyun 		 * For named clocks, first look up the name in the
4902*4882a593Smuzhiyun 		 * "clock-names" property.  If it cannot be found, then index
4903*4882a593Smuzhiyun 		 * will be an error code and of_parse_phandle_with_args() will
4904*4882a593Smuzhiyun 		 * return -EINVAL.
4905*4882a593Smuzhiyun 		 */
4906*4882a593Smuzhiyun 		if (name)
4907*4882a593Smuzhiyun 			index = of_property_match_string(np, "clock-names", name);
4908*4882a593Smuzhiyun 		ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
4909*4882a593Smuzhiyun 						 index, out_args);
4910*4882a593Smuzhiyun 		if (!ret)
4911*4882a593Smuzhiyun 			break;
4912*4882a593Smuzhiyun 		if (name && index >= 0)
4913*4882a593Smuzhiyun 			break;
4914*4882a593Smuzhiyun 
4915*4882a593Smuzhiyun 		/*
4916*4882a593Smuzhiyun 		 * No matching clock found on this node.  If the parent node
4917*4882a593Smuzhiyun 		 * has a "clock-ranges" property, then we can try one of its
4918*4882a593Smuzhiyun 		 * clocks.
4919*4882a593Smuzhiyun 		 */
4920*4882a593Smuzhiyun 		np = np->parent;
4921*4882a593Smuzhiyun 		if (np && !of_get_property(np, "clock-ranges", NULL))
4922*4882a593Smuzhiyun 			break;
4923*4882a593Smuzhiyun 		index = 0;
4924*4882a593Smuzhiyun 	}
4925*4882a593Smuzhiyun 
4926*4882a593Smuzhiyun 	return ret;
4927*4882a593Smuzhiyun }
4928*4882a593Smuzhiyun 
4929*4882a593Smuzhiyun static struct clk_hw *
__of_clk_get_hw_from_provider(struct of_clk_provider * provider,struct of_phandle_args * clkspec)4930*4882a593Smuzhiyun __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
4931*4882a593Smuzhiyun 			      struct of_phandle_args *clkspec)
4932*4882a593Smuzhiyun {
4933*4882a593Smuzhiyun 	struct clk *clk;
4934*4882a593Smuzhiyun 
4935*4882a593Smuzhiyun 	if (provider->get_hw)
4936*4882a593Smuzhiyun 		return provider->get_hw(clkspec, provider->data);
4937*4882a593Smuzhiyun 
4938*4882a593Smuzhiyun 	clk = provider->get(clkspec, provider->data);
4939*4882a593Smuzhiyun 	if (IS_ERR(clk))
4940*4882a593Smuzhiyun 		return ERR_CAST(clk);
4941*4882a593Smuzhiyun 	return __clk_get_hw(clk);
4942*4882a593Smuzhiyun }
4943*4882a593Smuzhiyun 
4944*4882a593Smuzhiyun static struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args * clkspec)4945*4882a593Smuzhiyun of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
4946*4882a593Smuzhiyun {
4947*4882a593Smuzhiyun 	struct of_clk_provider *provider;
4948*4882a593Smuzhiyun 	struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
4949*4882a593Smuzhiyun 
4950*4882a593Smuzhiyun 	if (!clkspec)
4951*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
4952*4882a593Smuzhiyun 
4953*4882a593Smuzhiyun 	mutex_lock(&of_clk_mutex);
4954*4882a593Smuzhiyun 	list_for_each_entry(provider, &of_clk_providers, link) {
4955*4882a593Smuzhiyun 		if (provider->node == clkspec->np) {
4956*4882a593Smuzhiyun 			hw = __of_clk_get_hw_from_provider(provider, clkspec);
4957*4882a593Smuzhiyun 			if (!IS_ERR(hw))
4958*4882a593Smuzhiyun 				break;
4959*4882a593Smuzhiyun 		}
4960*4882a593Smuzhiyun 	}
4961*4882a593Smuzhiyun 	mutex_unlock(&of_clk_mutex);
4962*4882a593Smuzhiyun 
4963*4882a593Smuzhiyun 	return hw;
4964*4882a593Smuzhiyun }
4965*4882a593Smuzhiyun 
4966*4882a593Smuzhiyun /**
4967*4882a593Smuzhiyun  * of_clk_get_from_provider() - Lookup a clock from a clock provider
4968*4882a593Smuzhiyun  * @clkspec: pointer to a clock specifier data structure
4969*4882a593Smuzhiyun  *
4970*4882a593Smuzhiyun  * This function looks up a struct clk from the registered list of clock
4971*4882a593Smuzhiyun  * providers, an input is a clock specifier data structure as returned
4972*4882a593Smuzhiyun  * from the of_parse_phandle_with_args() function call.
4973*4882a593Smuzhiyun  */
of_clk_get_from_provider(struct of_phandle_args * clkspec)4974*4882a593Smuzhiyun struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
4975*4882a593Smuzhiyun {
4976*4882a593Smuzhiyun 	struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
4977*4882a593Smuzhiyun 
4978*4882a593Smuzhiyun 	return clk_hw_create_clk(NULL, hw, NULL, __func__);
4979*4882a593Smuzhiyun }
4980*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
4981*4882a593Smuzhiyun 
of_clk_get_hw(struct device_node * np,int index,const char * con_id)4982*4882a593Smuzhiyun struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
4983*4882a593Smuzhiyun 			     const char *con_id)
4984*4882a593Smuzhiyun {
4985*4882a593Smuzhiyun 	int ret;
4986*4882a593Smuzhiyun 	struct clk_hw *hw;
4987*4882a593Smuzhiyun 	struct of_phandle_args clkspec;
4988*4882a593Smuzhiyun 
4989*4882a593Smuzhiyun 	ret = of_parse_clkspec(np, index, con_id, &clkspec);
4990*4882a593Smuzhiyun 	if (ret)
4991*4882a593Smuzhiyun 		return ERR_PTR(ret);
4992*4882a593Smuzhiyun 
4993*4882a593Smuzhiyun 	hw = of_clk_get_hw_from_clkspec(&clkspec);
4994*4882a593Smuzhiyun 	of_node_put(clkspec.np);
4995*4882a593Smuzhiyun 
4996*4882a593Smuzhiyun 	return hw;
4997*4882a593Smuzhiyun }
4998*4882a593Smuzhiyun 
__of_clk_get(struct device_node * np,int index,const char * dev_id,const char * con_id)4999*4882a593Smuzhiyun static struct clk *__of_clk_get(struct device_node *np,
5000*4882a593Smuzhiyun 				int index, const char *dev_id,
5001*4882a593Smuzhiyun 				const char *con_id)
5002*4882a593Smuzhiyun {
5003*4882a593Smuzhiyun 	struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
5004*4882a593Smuzhiyun 
5005*4882a593Smuzhiyun 	return clk_hw_create_clk(NULL, hw, dev_id, con_id);
5006*4882a593Smuzhiyun }
5007*4882a593Smuzhiyun 
of_clk_get(struct device_node * np,int index)5008*4882a593Smuzhiyun struct clk *of_clk_get(struct device_node *np, int index)
5009*4882a593Smuzhiyun {
5010*4882a593Smuzhiyun 	return __of_clk_get(np, index, np->full_name, NULL);
5011*4882a593Smuzhiyun }
5012*4882a593Smuzhiyun EXPORT_SYMBOL(of_clk_get);
5013*4882a593Smuzhiyun 
5014*4882a593Smuzhiyun /**
5015*4882a593Smuzhiyun  * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
5016*4882a593Smuzhiyun  * @np: pointer to clock consumer node
5017*4882a593Smuzhiyun  * @name: name of consumer's clock input, or NULL for the first clock reference
5018*4882a593Smuzhiyun  *
5019*4882a593Smuzhiyun  * This function parses the clocks and clock-names properties,
5020*4882a593Smuzhiyun  * and uses them to look up the struct clk from the registered list of clock
5021*4882a593Smuzhiyun  * providers.
5022*4882a593Smuzhiyun  */
of_clk_get_by_name(struct device_node * np,const char * name)5023*4882a593Smuzhiyun struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
5024*4882a593Smuzhiyun {
5025*4882a593Smuzhiyun 	if (!np)
5026*4882a593Smuzhiyun 		return ERR_PTR(-ENOENT);
5027*4882a593Smuzhiyun 
5028*4882a593Smuzhiyun 	return __of_clk_get(np, 0, np->full_name, name);
5029*4882a593Smuzhiyun }
5030*4882a593Smuzhiyun EXPORT_SYMBOL(of_clk_get_by_name);
5031*4882a593Smuzhiyun 
5032*4882a593Smuzhiyun /**
5033*4882a593Smuzhiyun  * of_clk_get_parent_count() - Count the number of clocks a device node has
5034*4882a593Smuzhiyun  * @np: device node to count
5035*4882a593Smuzhiyun  *
5036*4882a593Smuzhiyun  * Returns: The number of clocks that are possible parents of this node
5037*4882a593Smuzhiyun  */
of_clk_get_parent_count(const struct device_node * np)5038*4882a593Smuzhiyun unsigned int of_clk_get_parent_count(const struct device_node *np)
5039*4882a593Smuzhiyun {
5040*4882a593Smuzhiyun 	int count;
5041*4882a593Smuzhiyun 
5042*4882a593Smuzhiyun 	count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
5043*4882a593Smuzhiyun 	if (count < 0)
5044*4882a593Smuzhiyun 		return 0;
5045*4882a593Smuzhiyun 
5046*4882a593Smuzhiyun 	return count;
5047*4882a593Smuzhiyun }
5048*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
5049*4882a593Smuzhiyun 
of_clk_get_parent_name(const struct device_node * np,int index)5050*4882a593Smuzhiyun const char *of_clk_get_parent_name(const struct device_node *np, int index)
5051*4882a593Smuzhiyun {
5052*4882a593Smuzhiyun 	struct of_phandle_args clkspec;
5053*4882a593Smuzhiyun 	struct property *prop;
5054*4882a593Smuzhiyun 	const char *clk_name;
5055*4882a593Smuzhiyun 	const __be32 *vp;
5056*4882a593Smuzhiyun 	u32 pv;
5057*4882a593Smuzhiyun 	int rc;
5058*4882a593Smuzhiyun 	int count;
5059*4882a593Smuzhiyun 	struct clk *clk;
5060*4882a593Smuzhiyun 
5061*4882a593Smuzhiyun 	rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
5062*4882a593Smuzhiyun 					&clkspec);
5063*4882a593Smuzhiyun 	if (rc)
5064*4882a593Smuzhiyun 		return NULL;
5065*4882a593Smuzhiyun 
5066*4882a593Smuzhiyun 	index = clkspec.args_count ? clkspec.args[0] : 0;
5067*4882a593Smuzhiyun 	count = 0;
5068*4882a593Smuzhiyun 
5069*4882a593Smuzhiyun 	/* if there is an indices property, use it to transfer the index
5070*4882a593Smuzhiyun 	 * specified into an array offset for the clock-output-names property.
5071*4882a593Smuzhiyun 	 */
5072*4882a593Smuzhiyun 	of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
5073*4882a593Smuzhiyun 		if (index == pv) {
5074*4882a593Smuzhiyun 			index = count;
5075*4882a593Smuzhiyun 			break;
5076*4882a593Smuzhiyun 		}
5077*4882a593Smuzhiyun 		count++;
5078*4882a593Smuzhiyun 	}
5079*4882a593Smuzhiyun 	/* We went off the end of 'clock-indices' without finding it */
5080*4882a593Smuzhiyun 	if (prop && !vp)
5081*4882a593Smuzhiyun 		return NULL;
5082*4882a593Smuzhiyun 
5083*4882a593Smuzhiyun 	if (of_property_read_string_index(clkspec.np, "clock-output-names",
5084*4882a593Smuzhiyun 					  index,
5085*4882a593Smuzhiyun 					  &clk_name) < 0) {
5086*4882a593Smuzhiyun 		/*
5087*4882a593Smuzhiyun 		 * Best effort to get the name if the clock has been
5088*4882a593Smuzhiyun 		 * registered with the framework. If the clock isn't
5089*4882a593Smuzhiyun 		 * registered, we return the node name as the name of
5090*4882a593Smuzhiyun 		 * the clock as long as #clock-cells = 0.
5091*4882a593Smuzhiyun 		 */
5092*4882a593Smuzhiyun 		clk = of_clk_get_from_provider(&clkspec);
5093*4882a593Smuzhiyun 		if (IS_ERR(clk)) {
5094*4882a593Smuzhiyun 			if (clkspec.args_count == 0)
5095*4882a593Smuzhiyun 				clk_name = clkspec.np->name;
5096*4882a593Smuzhiyun 			else
5097*4882a593Smuzhiyun 				clk_name = NULL;
5098*4882a593Smuzhiyun 		} else {
5099*4882a593Smuzhiyun 			clk_name = __clk_get_name(clk);
5100*4882a593Smuzhiyun 			clk_put(clk);
5101*4882a593Smuzhiyun 		}
5102*4882a593Smuzhiyun 	}
5103*4882a593Smuzhiyun 
5104*4882a593Smuzhiyun 
5105*4882a593Smuzhiyun 	of_node_put(clkspec.np);
5106*4882a593Smuzhiyun 	return clk_name;
5107*4882a593Smuzhiyun }
5108*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
5109*4882a593Smuzhiyun 
5110*4882a593Smuzhiyun /**
5111*4882a593Smuzhiyun  * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
5112*4882a593Smuzhiyun  * number of parents
5113*4882a593Smuzhiyun  * @np: Device node pointer associated with clock provider
5114*4882a593Smuzhiyun  * @parents: pointer to char array that hold the parents' names
5115*4882a593Smuzhiyun  * @size: size of the @parents array
5116*4882a593Smuzhiyun  *
5117*4882a593Smuzhiyun  * Return: number of parents for the clock node.
5118*4882a593Smuzhiyun  */
of_clk_parent_fill(struct device_node * np,const char ** parents,unsigned int size)5119*4882a593Smuzhiyun int of_clk_parent_fill(struct device_node *np, const char **parents,
5120*4882a593Smuzhiyun 		       unsigned int size)
5121*4882a593Smuzhiyun {
5122*4882a593Smuzhiyun 	unsigned int i = 0;
5123*4882a593Smuzhiyun 
5124*4882a593Smuzhiyun 	while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
5125*4882a593Smuzhiyun 		i++;
5126*4882a593Smuzhiyun 
5127*4882a593Smuzhiyun 	return i;
5128*4882a593Smuzhiyun }
5129*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_clk_parent_fill);
5130*4882a593Smuzhiyun 
5131*4882a593Smuzhiyun struct clock_provider {
5132*4882a593Smuzhiyun 	void (*clk_init_cb)(struct device_node *);
5133*4882a593Smuzhiyun 	struct device_node *np;
5134*4882a593Smuzhiyun 	struct list_head node;
5135*4882a593Smuzhiyun };
5136*4882a593Smuzhiyun 
5137*4882a593Smuzhiyun /*
5138*4882a593Smuzhiyun  * This function looks for a parent clock. If there is one, then it
5139*4882a593Smuzhiyun  * checks that the provider for this parent clock was initialized, in
5140*4882a593Smuzhiyun  * this case the parent clock will be ready.
5141*4882a593Smuzhiyun  */
parent_ready(struct device_node * np)5142*4882a593Smuzhiyun static int parent_ready(struct device_node *np)
5143*4882a593Smuzhiyun {
5144*4882a593Smuzhiyun 	int i = 0;
5145*4882a593Smuzhiyun 
5146*4882a593Smuzhiyun 	while (true) {
5147*4882a593Smuzhiyun 		struct clk *clk = of_clk_get(np, i);
5148*4882a593Smuzhiyun 
5149*4882a593Smuzhiyun 		/* this parent is ready we can check the next one */
5150*4882a593Smuzhiyun 		if (!IS_ERR(clk)) {
5151*4882a593Smuzhiyun 			clk_put(clk);
5152*4882a593Smuzhiyun 			i++;
5153*4882a593Smuzhiyun 			continue;
5154*4882a593Smuzhiyun 		}
5155*4882a593Smuzhiyun 
5156*4882a593Smuzhiyun 		/* at least one parent is not ready, we exit now */
5157*4882a593Smuzhiyun 		if (PTR_ERR(clk) == -EPROBE_DEFER)
5158*4882a593Smuzhiyun 			return 0;
5159*4882a593Smuzhiyun 
5160*4882a593Smuzhiyun 		/*
5161*4882a593Smuzhiyun 		 * Here we make assumption that the device tree is
5162*4882a593Smuzhiyun 		 * written correctly. So an error means that there is
5163*4882a593Smuzhiyun 		 * no more parent. As we didn't exit yet, then the
5164*4882a593Smuzhiyun 		 * previous parent are ready. If there is no clock
5165*4882a593Smuzhiyun 		 * parent, no need to wait for them, then we can
5166*4882a593Smuzhiyun 		 * consider their absence as being ready
5167*4882a593Smuzhiyun 		 */
5168*4882a593Smuzhiyun 		return 1;
5169*4882a593Smuzhiyun 	}
5170*4882a593Smuzhiyun }
5171*4882a593Smuzhiyun 
5172*4882a593Smuzhiyun /**
5173*4882a593Smuzhiyun  * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
5174*4882a593Smuzhiyun  * @np: Device node pointer associated with clock provider
5175*4882a593Smuzhiyun  * @index: clock index
5176*4882a593Smuzhiyun  * @flags: pointer to top-level framework flags
5177*4882a593Smuzhiyun  *
5178*4882a593Smuzhiyun  * Detects if the clock-critical property exists and, if so, sets the
5179*4882a593Smuzhiyun  * corresponding CLK_IS_CRITICAL flag.
5180*4882a593Smuzhiyun  *
5181*4882a593Smuzhiyun  * Do not use this function. It exists only for legacy Device Tree
5182*4882a593Smuzhiyun  * bindings, such as the one-clock-per-node style that are outdated.
5183*4882a593Smuzhiyun  * Those bindings typically put all clock data into .dts and the Linux
5184*4882a593Smuzhiyun  * driver has no clock data, thus making it impossible to set this flag
5185*4882a593Smuzhiyun  * correctly from the driver. Only those drivers may call
5186*4882a593Smuzhiyun  * of_clk_detect_critical from their setup functions.
5187*4882a593Smuzhiyun  *
5188*4882a593Smuzhiyun  * Return: error code or zero on success
5189*4882a593Smuzhiyun  */
of_clk_detect_critical(struct device_node * np,int index,unsigned long * flags)5190*4882a593Smuzhiyun int of_clk_detect_critical(struct device_node *np, int index,
5191*4882a593Smuzhiyun 			   unsigned long *flags)
5192*4882a593Smuzhiyun {
5193*4882a593Smuzhiyun 	struct property *prop;
5194*4882a593Smuzhiyun 	const __be32 *cur;
5195*4882a593Smuzhiyun 	uint32_t idx;
5196*4882a593Smuzhiyun 
5197*4882a593Smuzhiyun 	if (!np || !flags)
5198*4882a593Smuzhiyun 		return -EINVAL;
5199*4882a593Smuzhiyun 
5200*4882a593Smuzhiyun 	of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
5201*4882a593Smuzhiyun 		if (index == idx)
5202*4882a593Smuzhiyun 			*flags |= CLK_IS_CRITICAL;
5203*4882a593Smuzhiyun 
5204*4882a593Smuzhiyun 	return 0;
5205*4882a593Smuzhiyun }
5206*4882a593Smuzhiyun 
5207*4882a593Smuzhiyun /**
5208*4882a593Smuzhiyun  * of_clk_init() - Scan and init clock providers from the DT
5209*4882a593Smuzhiyun  * @matches: array of compatible values and init functions for providers.
5210*4882a593Smuzhiyun  *
5211*4882a593Smuzhiyun  * This function scans the device tree for matching clock providers
5212*4882a593Smuzhiyun  * and calls their initialization functions. It also does it by trying
5213*4882a593Smuzhiyun  * to follow the dependencies.
5214*4882a593Smuzhiyun  */
of_clk_init(const struct of_device_id * matches)5215*4882a593Smuzhiyun void __init of_clk_init(const struct of_device_id *matches)
5216*4882a593Smuzhiyun {
5217*4882a593Smuzhiyun 	const struct of_device_id *match;
5218*4882a593Smuzhiyun 	struct device_node *np;
5219*4882a593Smuzhiyun 	struct clock_provider *clk_provider, *next;
5220*4882a593Smuzhiyun 	bool is_init_done;
5221*4882a593Smuzhiyun 	bool force = false;
5222*4882a593Smuzhiyun 	LIST_HEAD(clk_provider_list);
5223*4882a593Smuzhiyun 
5224*4882a593Smuzhiyun 	if (!matches)
5225*4882a593Smuzhiyun 		matches = &__clk_of_table;
5226*4882a593Smuzhiyun 
5227*4882a593Smuzhiyun 	/* First prepare the list of the clocks providers */
5228*4882a593Smuzhiyun 	for_each_matching_node_and_match(np, matches, &match) {
5229*4882a593Smuzhiyun 		struct clock_provider *parent;
5230*4882a593Smuzhiyun 
5231*4882a593Smuzhiyun 		if (!of_device_is_available(np))
5232*4882a593Smuzhiyun 			continue;
5233*4882a593Smuzhiyun 
5234*4882a593Smuzhiyun 		parent = kzalloc(sizeof(*parent), GFP_KERNEL);
5235*4882a593Smuzhiyun 		if (!parent) {
5236*4882a593Smuzhiyun 			list_for_each_entry_safe(clk_provider, next,
5237*4882a593Smuzhiyun 						 &clk_provider_list, node) {
5238*4882a593Smuzhiyun 				list_del(&clk_provider->node);
5239*4882a593Smuzhiyun 				of_node_put(clk_provider->np);
5240*4882a593Smuzhiyun 				kfree(clk_provider);
5241*4882a593Smuzhiyun 			}
5242*4882a593Smuzhiyun 			of_node_put(np);
5243*4882a593Smuzhiyun 			return;
5244*4882a593Smuzhiyun 		}
5245*4882a593Smuzhiyun 
5246*4882a593Smuzhiyun 		parent->clk_init_cb = match->data;
5247*4882a593Smuzhiyun 		parent->np = of_node_get(np);
5248*4882a593Smuzhiyun 		list_add_tail(&parent->node, &clk_provider_list);
5249*4882a593Smuzhiyun 	}
5250*4882a593Smuzhiyun 
5251*4882a593Smuzhiyun 	while (!list_empty(&clk_provider_list)) {
5252*4882a593Smuzhiyun 		is_init_done = false;
5253*4882a593Smuzhiyun 		list_for_each_entry_safe(clk_provider, next,
5254*4882a593Smuzhiyun 					&clk_provider_list, node) {
5255*4882a593Smuzhiyun 			if (force || parent_ready(clk_provider->np)) {
5256*4882a593Smuzhiyun 
5257*4882a593Smuzhiyun 				/* Don't populate platform devices */
5258*4882a593Smuzhiyun 				of_node_set_flag(clk_provider->np,
5259*4882a593Smuzhiyun 						 OF_POPULATED);
5260*4882a593Smuzhiyun 
5261*4882a593Smuzhiyun 				clk_provider->clk_init_cb(clk_provider->np);
5262*4882a593Smuzhiyun 				of_clk_set_defaults(clk_provider->np, true);
5263*4882a593Smuzhiyun 
5264*4882a593Smuzhiyun 				list_del(&clk_provider->node);
5265*4882a593Smuzhiyun 				of_node_put(clk_provider->np);
5266*4882a593Smuzhiyun 				kfree(clk_provider);
5267*4882a593Smuzhiyun 				is_init_done = true;
5268*4882a593Smuzhiyun 			}
5269*4882a593Smuzhiyun 		}
5270*4882a593Smuzhiyun 
5271*4882a593Smuzhiyun 		/*
5272*4882a593Smuzhiyun 		 * We didn't manage to initialize any of the
5273*4882a593Smuzhiyun 		 * remaining providers during the last loop, so now we
5274*4882a593Smuzhiyun 		 * initialize all the remaining ones unconditionally
5275*4882a593Smuzhiyun 		 * in case the clock parent was not mandatory
5276*4882a593Smuzhiyun 		 */
5277*4882a593Smuzhiyun 		if (!is_init_done)
5278*4882a593Smuzhiyun 			force = true;
5279*4882a593Smuzhiyun 	}
5280*4882a593Smuzhiyun }
5281*4882a593Smuzhiyun #endif
5282*4882a593Smuzhiyun 
5283*4882a593Smuzhiyun #ifdef CONFIG_COMMON_CLK_PROCFS
5284*4882a593Smuzhiyun #include <linux/proc_fs.h>
5285*4882a593Smuzhiyun #include <linux/seq_file.h>
5286*4882a593Smuzhiyun 
clk_rate_show(struct seq_file * s,void * v)5287*4882a593Smuzhiyun static int clk_rate_show(struct seq_file *s, void *v)
5288*4882a593Smuzhiyun {
5289*4882a593Smuzhiyun 	seq_puts(s, "set clk rate:\n");
5290*4882a593Smuzhiyun 	seq_puts(s, "	echo [clk_name] [rate(Hz)] > /proc/clk/rate\n");
5291*4882a593Smuzhiyun 
5292*4882a593Smuzhiyun 	return 0;
5293*4882a593Smuzhiyun }
5294*4882a593Smuzhiyun 
clk_rate_open(struct inode * inode,struct file * file)5295*4882a593Smuzhiyun static int clk_rate_open(struct inode *inode, struct file *file)
5296*4882a593Smuzhiyun {
5297*4882a593Smuzhiyun 	return single_open(file, clk_rate_show, NULL);
5298*4882a593Smuzhiyun }
5299*4882a593Smuzhiyun 
clk_rate_write(struct file * filp,const char __user * buf,size_t cnt,loff_t * ppos)5300*4882a593Smuzhiyun static ssize_t clk_rate_write(struct file *filp, const char __user *buf,
5301*4882a593Smuzhiyun 			      size_t cnt, loff_t *ppos)
5302*4882a593Smuzhiyun {
5303*4882a593Smuzhiyun 	char clk_name[40], input[55];
5304*4882a593Smuzhiyun 	struct clk_core *core;
5305*4882a593Smuzhiyun 	int argc, ret, val;
5306*4882a593Smuzhiyun 
5307*4882a593Smuzhiyun 	if (cnt >= sizeof(input))
5308*4882a593Smuzhiyun 		return -EINVAL;
5309*4882a593Smuzhiyun 
5310*4882a593Smuzhiyun 	if (copy_from_user(input, buf, cnt))
5311*4882a593Smuzhiyun 		return -EFAULT;
5312*4882a593Smuzhiyun 
5313*4882a593Smuzhiyun 	input[cnt] = '\0';
5314*4882a593Smuzhiyun 
5315*4882a593Smuzhiyun 	argc = sscanf(input, "%38s %10d", clk_name, &val);
5316*4882a593Smuzhiyun 	if (argc != 2)
5317*4882a593Smuzhiyun 		return -EINVAL;
5318*4882a593Smuzhiyun 
5319*4882a593Smuzhiyun 	core = clk_core_lookup(clk_name);
5320*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(core)) {
5321*4882a593Smuzhiyun 		pr_err("get %s error\n", clk_name);
5322*4882a593Smuzhiyun 		return -EINVAL;
5323*4882a593Smuzhiyun 	}
5324*4882a593Smuzhiyun 
5325*4882a593Smuzhiyun 	clk_prepare_lock();
5326*4882a593Smuzhiyun 	ret = clk_core_set_rate_nolock(core, val);
5327*4882a593Smuzhiyun 	clk_prepare_unlock();
5328*4882a593Smuzhiyun 	if (ret) {
5329*4882a593Smuzhiyun 		pr_err("set %s rate %d error\n", clk_name, val);
5330*4882a593Smuzhiyun 		return ret;
5331*4882a593Smuzhiyun 	}
5332*4882a593Smuzhiyun 
5333*4882a593Smuzhiyun 	return cnt;
5334*4882a593Smuzhiyun }
5335*4882a593Smuzhiyun 
5336*4882a593Smuzhiyun static const struct proc_ops clk_rate_proc_ops = {
5337*4882a593Smuzhiyun 	.proc_open	= clk_rate_open,
5338*4882a593Smuzhiyun 	.proc_read	= seq_read,
5339*4882a593Smuzhiyun 	.proc_write	= clk_rate_write,
5340*4882a593Smuzhiyun 	.proc_lseek	= seq_lseek,
5341*4882a593Smuzhiyun 	.proc_release	= single_release,
5342*4882a593Smuzhiyun };
5343*4882a593Smuzhiyun 
clk_enable_show(struct seq_file * s,void * v)5344*4882a593Smuzhiyun static int clk_enable_show(struct seq_file *s, void *v)
5345*4882a593Smuzhiyun {
5346*4882a593Smuzhiyun 	seq_puts(s, "enable clk:\n");
5347*4882a593Smuzhiyun 	seq_puts(s, "	echo enable [clk_name] > /proc/clk/enable\n");
5348*4882a593Smuzhiyun 	seq_puts(s, "disable clk:\n");
5349*4882a593Smuzhiyun 	seq_puts(s, "	echo disable [clk_name] > /proc/clk/enable\n");
5350*4882a593Smuzhiyun 
5351*4882a593Smuzhiyun 	return 0;
5352*4882a593Smuzhiyun }
5353*4882a593Smuzhiyun 
clk_enable_open(struct inode * inode,struct file * file)5354*4882a593Smuzhiyun static int clk_enable_open(struct inode *inode, struct file *file)
5355*4882a593Smuzhiyun {
5356*4882a593Smuzhiyun 	return single_open(file, clk_enable_show, NULL);
5357*4882a593Smuzhiyun }
5358*4882a593Smuzhiyun 
clk_enable_write(struct file * filp,const char __user * buf,size_t cnt,loff_t * ppos)5359*4882a593Smuzhiyun static ssize_t clk_enable_write(struct file *filp, const char __user *buf,
5360*4882a593Smuzhiyun 				size_t cnt, loff_t *ppos)
5361*4882a593Smuzhiyun {
5362*4882a593Smuzhiyun 	char cmd[10], clk_name[40], input[50];
5363*4882a593Smuzhiyun 	struct clk_core *core;
5364*4882a593Smuzhiyun 	int argc, ret;
5365*4882a593Smuzhiyun 
5366*4882a593Smuzhiyun 	if (cnt >= sizeof(input))
5367*4882a593Smuzhiyun 		return -EINVAL;
5368*4882a593Smuzhiyun 
5369*4882a593Smuzhiyun 	if (copy_from_user(input, buf, cnt))
5370*4882a593Smuzhiyun 		return -EFAULT;
5371*4882a593Smuzhiyun 
5372*4882a593Smuzhiyun 	input[cnt] = '\0';
5373*4882a593Smuzhiyun 
5374*4882a593Smuzhiyun 	argc = sscanf(input, "%8s %38s", cmd, clk_name);
5375*4882a593Smuzhiyun 	if (argc != 2)
5376*4882a593Smuzhiyun 		return -EINVAL;
5377*4882a593Smuzhiyun 
5378*4882a593Smuzhiyun 	core = clk_core_lookup(clk_name);
5379*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(core)) {
5380*4882a593Smuzhiyun 		pr_err("get %s error\n", clk_name);
5381*4882a593Smuzhiyun 		return -EINVAL;
5382*4882a593Smuzhiyun 	}
5383*4882a593Smuzhiyun 
5384*4882a593Smuzhiyun 	if (!strncmp(cmd, "enable", strlen("enable"))) {
5385*4882a593Smuzhiyun 		ret = clk_core_prepare_enable(core);
5386*4882a593Smuzhiyun 		if (ret)
5387*4882a593Smuzhiyun 			pr_err("enable %s err\n", clk_name);
5388*4882a593Smuzhiyun 	} else if (!strncmp(cmd, "disable", strlen("disable"))) {
5389*4882a593Smuzhiyun 		clk_core_disable_unprepare(core);
5390*4882a593Smuzhiyun 	} else {
5391*4882a593Smuzhiyun 		pr_err("unsupported cmd(%s)\n", cmd);
5392*4882a593Smuzhiyun 	}
5393*4882a593Smuzhiyun 
5394*4882a593Smuzhiyun 	return cnt;
5395*4882a593Smuzhiyun }
5396*4882a593Smuzhiyun 
5397*4882a593Smuzhiyun static const struct proc_ops clk_enable_proc_ops = {
5398*4882a593Smuzhiyun 	.proc_open	= clk_enable_open,
5399*4882a593Smuzhiyun 	.proc_read	= seq_read,
5400*4882a593Smuzhiyun 	.proc_write	= clk_enable_write,
5401*4882a593Smuzhiyun 	.proc_lseek	= seq_lseek,
5402*4882a593Smuzhiyun 	.proc_release	= single_release,
5403*4882a593Smuzhiyun };
5404*4882a593Smuzhiyun 
clk_parent_show(struct seq_file * s,void * v)5405*4882a593Smuzhiyun static int clk_parent_show(struct seq_file *s, void *v)
5406*4882a593Smuzhiyun {
5407*4882a593Smuzhiyun 	seq_puts(s, "echo [clk_name] [parent_name] > /proc/clk/parent\n");
5408*4882a593Smuzhiyun 
5409*4882a593Smuzhiyun 	return 0;
5410*4882a593Smuzhiyun }
5411*4882a593Smuzhiyun 
clk_parent_open(struct inode * inode,struct file * file)5412*4882a593Smuzhiyun static int clk_parent_open(struct inode *inode, struct file *file)
5413*4882a593Smuzhiyun {
5414*4882a593Smuzhiyun 	return single_open(file, clk_parent_show, NULL);
5415*4882a593Smuzhiyun }
5416*4882a593Smuzhiyun 
clk_parent_write(struct file * filp,const char __user * buf,size_t cnt,loff_t * ppos)5417*4882a593Smuzhiyun static ssize_t clk_parent_write(struct file *filp, const char __user *buf,
5418*4882a593Smuzhiyun 				size_t cnt, loff_t *ppos)
5419*4882a593Smuzhiyun {
5420*4882a593Smuzhiyun 	char clk_name[40], p_name[40];
5421*4882a593Smuzhiyun 	char input[80];
5422*4882a593Smuzhiyun 	struct clk_core *core, *p;
5423*4882a593Smuzhiyun 	int argc, ret;
5424*4882a593Smuzhiyun 
5425*4882a593Smuzhiyun 	if (cnt >= sizeof(input))
5426*4882a593Smuzhiyun 		return -EINVAL;
5427*4882a593Smuzhiyun 
5428*4882a593Smuzhiyun 	if (copy_from_user(input, buf, cnt))
5429*4882a593Smuzhiyun 		return -EFAULT;
5430*4882a593Smuzhiyun 
5431*4882a593Smuzhiyun 	input[cnt] = '\0';
5432*4882a593Smuzhiyun 
5433*4882a593Smuzhiyun 	argc = sscanf(input, "%38s %38s", clk_name, p_name);
5434*4882a593Smuzhiyun 	if (argc != 2)
5435*4882a593Smuzhiyun 		return -EINVAL;
5436*4882a593Smuzhiyun 
5437*4882a593Smuzhiyun 	core = clk_core_lookup(clk_name);
5438*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(core)) {
5439*4882a593Smuzhiyun 		pr_err("get %s error\n", clk_name);
5440*4882a593Smuzhiyun 		return -EINVAL;
5441*4882a593Smuzhiyun 	}
5442*4882a593Smuzhiyun 	p = clk_core_lookup(p_name);
5443*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(p)) {
5444*4882a593Smuzhiyun 		pr_err("get %s error\n", p_name);
5445*4882a593Smuzhiyun 		return -EINVAL;
5446*4882a593Smuzhiyun 	}
5447*4882a593Smuzhiyun 	clk_prepare_lock();
5448*4882a593Smuzhiyun 	ret = clk_core_set_parent_nolock(core, p);
5449*4882a593Smuzhiyun 	clk_prepare_unlock();
5450*4882a593Smuzhiyun 	if (ret < 0)
5451*4882a593Smuzhiyun 		pr_err("set clk(%s)'s parent(%s) error\n", clk_name, p_name);
5452*4882a593Smuzhiyun 
5453*4882a593Smuzhiyun 	return cnt;
5454*4882a593Smuzhiyun }
5455*4882a593Smuzhiyun 
5456*4882a593Smuzhiyun static const struct proc_ops clk_parent_proc_ops = {
5457*4882a593Smuzhiyun 	.proc_open	= clk_parent_open,
5458*4882a593Smuzhiyun 	.proc_read	= seq_read,
5459*4882a593Smuzhiyun 	.proc_write	= clk_parent_write,
5460*4882a593Smuzhiyun 	.proc_lseek	= seq_lseek,
5461*4882a593Smuzhiyun 	.proc_release	= single_release,
5462*4882a593Smuzhiyun };
5463*4882a593Smuzhiyun 
clk_proc_summary_show_one(struct seq_file * s,struct clk_core * c,int level)5464*4882a593Smuzhiyun static void clk_proc_summary_show_one(struct seq_file *s, struct clk_core *c,
5465*4882a593Smuzhiyun 				      int level)
5466*4882a593Smuzhiyun {
5467*4882a593Smuzhiyun 	if (!c)
5468*4882a593Smuzhiyun 		return;
5469*4882a593Smuzhiyun 
5470*4882a593Smuzhiyun 	seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
5471*4882a593Smuzhiyun 		   level * 3 + 1, "",
5472*4882a593Smuzhiyun 		   30 - level * 3, c->name,
5473*4882a593Smuzhiyun 		   c->enable_count, c->prepare_count, c->protect_count,
5474*4882a593Smuzhiyun 		   clk_core_get_rate_recalc(c),
5475*4882a593Smuzhiyun 		   clk_core_get_accuracy_recalc(c),
5476*4882a593Smuzhiyun 		   clk_core_get_phase(c),
5477*4882a593Smuzhiyun 		   clk_core_get_scaled_duty_cycle(c, 100000));
5478*4882a593Smuzhiyun }
5479*4882a593Smuzhiyun 
clk_proc_summary_show_subtree(struct seq_file * s,struct clk_core * c,int level)5480*4882a593Smuzhiyun static void clk_proc_summary_show_subtree(struct seq_file *s,
5481*4882a593Smuzhiyun 					  struct clk_core *c, int level)
5482*4882a593Smuzhiyun {
5483*4882a593Smuzhiyun 	struct clk_core *child;
5484*4882a593Smuzhiyun 
5485*4882a593Smuzhiyun 	if (!c)
5486*4882a593Smuzhiyun 		return;
5487*4882a593Smuzhiyun 
5488*4882a593Smuzhiyun 	clk_proc_summary_show_one(s, c, level);
5489*4882a593Smuzhiyun 
5490*4882a593Smuzhiyun 	hlist_for_each_entry(child, &c->children, child_node)
5491*4882a593Smuzhiyun 		clk_proc_summary_show_subtree(s, child, level + 1);
5492*4882a593Smuzhiyun }
5493*4882a593Smuzhiyun 
clk_proc_summary_show(struct seq_file * s,void * v)5494*4882a593Smuzhiyun static int clk_proc_summary_show(struct seq_file *s, void *v)
5495*4882a593Smuzhiyun {
5496*4882a593Smuzhiyun 	struct clk_core *c;
5497*4882a593Smuzhiyun 	struct hlist_head *all_lists[] = {
5498*4882a593Smuzhiyun 		&clk_root_list,
5499*4882a593Smuzhiyun 		&clk_orphan_list,
5500*4882a593Smuzhiyun 		NULL,
5501*4882a593Smuzhiyun 	};
5502*4882a593Smuzhiyun 	struct hlist_head **lists = all_lists;
5503*4882a593Smuzhiyun 
5504*4882a593Smuzhiyun 	seq_puts(s, "                                 enable  prepare  protect                                duty\n");
5505*4882a593Smuzhiyun 	seq_puts(s, "   clock                          count    count    count        rate   accuracy phase  cycle\n");
5506*4882a593Smuzhiyun 	seq_puts(s, "---------------------------------------------------------------------------------------------\n");
5507*4882a593Smuzhiyun 
5508*4882a593Smuzhiyun 	clk_prepare_lock();
5509*4882a593Smuzhiyun 
5510*4882a593Smuzhiyun 	for (; *lists; lists++)
5511*4882a593Smuzhiyun 		hlist_for_each_entry(c, *lists, child_node)
5512*4882a593Smuzhiyun 			clk_proc_summary_show_subtree(s, c, 0);
5513*4882a593Smuzhiyun 
5514*4882a593Smuzhiyun 	clk_prepare_unlock();
5515*4882a593Smuzhiyun 
5516*4882a593Smuzhiyun 	return 0;
5517*4882a593Smuzhiyun }
5518*4882a593Smuzhiyun 
clk_create_procfs(void)5519*4882a593Smuzhiyun static int __init clk_create_procfs(void)
5520*4882a593Smuzhiyun {
5521*4882a593Smuzhiyun 	struct proc_dir_entry *proc_clk_root;
5522*4882a593Smuzhiyun 	struct proc_dir_entry *ent;
5523*4882a593Smuzhiyun 
5524*4882a593Smuzhiyun 	proc_clk_root = proc_mkdir("clk", NULL);
5525*4882a593Smuzhiyun 	if (!proc_clk_root)
5526*4882a593Smuzhiyun 		return -EINVAL;
5527*4882a593Smuzhiyun 
5528*4882a593Smuzhiyun 	ent = proc_create("rate", 0644, proc_clk_root, &clk_rate_proc_ops);
5529*4882a593Smuzhiyun 	if (!ent)
5530*4882a593Smuzhiyun 		goto fail;
5531*4882a593Smuzhiyun 
5532*4882a593Smuzhiyun 	ent = proc_create("enable", 0644, proc_clk_root, &clk_enable_proc_ops);
5533*4882a593Smuzhiyun 	if (!ent)
5534*4882a593Smuzhiyun 		goto fail;
5535*4882a593Smuzhiyun 
5536*4882a593Smuzhiyun 	ent = proc_create("parent", 0644, proc_clk_root, &clk_parent_proc_ops);
5537*4882a593Smuzhiyun 	if (!ent)
5538*4882a593Smuzhiyun 		goto fail;
5539*4882a593Smuzhiyun 
5540*4882a593Smuzhiyun 	ent = proc_create_single("summary", 0444, proc_clk_root,
5541*4882a593Smuzhiyun 				 clk_proc_summary_show);
5542*4882a593Smuzhiyun 	if (!ent)
5543*4882a593Smuzhiyun 		goto fail;
5544*4882a593Smuzhiyun 
5545*4882a593Smuzhiyun 	return 0;
5546*4882a593Smuzhiyun 
5547*4882a593Smuzhiyun fail:
5548*4882a593Smuzhiyun 	proc_remove(proc_clk_root);
5549*4882a593Smuzhiyun 	return -EINVAL;
5550*4882a593Smuzhiyun }
5551*4882a593Smuzhiyun late_initcall_sync(clk_create_procfs);
5552*4882a593Smuzhiyun #endif
5553