xref: /OK3568_Linux_fs/kernel/include/linux/clk.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/include/linux/clk.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 2004 ARM Limited.
6*4882a593Smuzhiyun  *  Written by Deep Blue Solutions Limited.
7*4882a593Smuzhiyun  *  Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun #ifndef __LINUX_CLK_H
10*4882a593Smuzhiyun #define __LINUX_CLK_H
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/err.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/notifier.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun struct device;
17*4882a593Smuzhiyun struct clk;
18*4882a593Smuzhiyun struct device_node;
19*4882a593Smuzhiyun struct of_phandle_args;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /**
22*4882a593Smuzhiyun  * DOC: clk notifier callback types
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
25*4882a593Smuzhiyun  *     to indicate that the rate change will proceed.  Drivers must
26*4882a593Smuzhiyun  *     immediately terminate any operations that will be affected by the
27*4882a593Smuzhiyun  *     rate change.  Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
28*4882a593Smuzhiyun  *     NOTIFY_STOP or NOTIFY_BAD.
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  * ABORT_RATE_CHANGE: called if the rate change failed for some reason
31*4882a593Smuzhiyun  *     after PRE_RATE_CHANGE.  In this case, all registered notifiers on
32*4882a593Smuzhiyun  *     the clk will be called with ABORT_RATE_CHANGE. Callbacks must
33*4882a593Smuzhiyun  *     always return NOTIFY_DONE or NOTIFY_OK.
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * POST_RATE_CHANGE - called after the clk rate change has successfully
36*4882a593Smuzhiyun  *     completed.  Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun #define PRE_RATE_CHANGE			BIT(0)
40*4882a593Smuzhiyun #define POST_RATE_CHANGE		BIT(1)
41*4882a593Smuzhiyun #define ABORT_RATE_CHANGE		BIT(2)
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /**
44*4882a593Smuzhiyun  * struct clk_notifier - associate a clk with a notifier
45*4882a593Smuzhiyun  * @clk: struct clk * to associate the notifier with
46*4882a593Smuzhiyun  * @notifier_head: a blocking_notifier_head for this clk
47*4882a593Smuzhiyun  * @node: linked list pointers
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * A list of struct clk_notifier is maintained by the notifier code.
50*4882a593Smuzhiyun  * An entry is created whenever code registers the first notifier on a
51*4882a593Smuzhiyun  * particular @clk.  Future notifiers on that @clk are added to the
52*4882a593Smuzhiyun  * @notifier_head.
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun struct clk_notifier {
55*4882a593Smuzhiyun 	struct clk			*clk;
56*4882a593Smuzhiyun 	struct srcu_notifier_head	notifier_head;
57*4882a593Smuzhiyun 	struct list_head		node;
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /**
61*4882a593Smuzhiyun  * struct clk_notifier_data - rate data to pass to the notifier callback
62*4882a593Smuzhiyun  * @clk: struct clk * being changed
63*4882a593Smuzhiyun  * @old_rate: previous rate of this clk
64*4882a593Smuzhiyun  * @new_rate: new rate of this clk
65*4882a593Smuzhiyun  *
66*4882a593Smuzhiyun  * For a pre-notifier, old_rate is the clk's rate before this rate
67*4882a593Smuzhiyun  * change, and new_rate is what the rate will be in the future.  For a
68*4882a593Smuzhiyun  * post-notifier, old_rate and new_rate are both set to the clk's
69*4882a593Smuzhiyun  * current rate (this was done to optimize the implementation).
70*4882a593Smuzhiyun  */
71*4882a593Smuzhiyun struct clk_notifier_data {
72*4882a593Smuzhiyun 	struct clk		*clk;
73*4882a593Smuzhiyun 	unsigned long		old_rate;
74*4882a593Smuzhiyun 	unsigned long		new_rate;
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /**
78*4882a593Smuzhiyun  * struct clk_bulk_data - Data used for bulk clk operations.
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * @id: clock consumer ID
81*4882a593Smuzhiyun  * @clk: struct clk * to store the associated clock
82*4882a593Smuzhiyun  *
83*4882a593Smuzhiyun  * The CLK APIs provide a series of clk_bulk_() API calls as
84*4882a593Smuzhiyun  * a convenience to consumers which require multiple clks.  This
85*4882a593Smuzhiyun  * structure is used to manage data for these calls.
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun struct clk_bulk_data {
88*4882a593Smuzhiyun 	const char		*id;
89*4882a593Smuzhiyun 	struct clk		*clk;
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #ifdef CONFIG_COMMON_CLK
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun  * clk_notifier_register: register a clock rate-change notifier callback
96*4882a593Smuzhiyun  * @clk: clock whose rate we are interested in
97*4882a593Smuzhiyun  * @nb: notifier block with callback function pointer
98*4882a593Smuzhiyun  *
99*4882a593Smuzhiyun  * ProTip: debugging across notifier chains can be frustrating. Make sure that
100*4882a593Smuzhiyun  * your notifier callback function prints a nice big warning in case of
101*4882a593Smuzhiyun  * failure.
102*4882a593Smuzhiyun  */
103*4882a593Smuzhiyun int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /**
106*4882a593Smuzhiyun  * clk_notifier_unregister: unregister a clock rate-change notifier callback
107*4882a593Smuzhiyun  * @clk: clock whose rate we are no longer interested in
108*4882a593Smuzhiyun  * @nb: notifier block which will be unregistered
109*4882a593Smuzhiyun  */
110*4882a593Smuzhiyun int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun /**
113*4882a593Smuzhiyun  * devm_clk_notifier_register - register a managed rate-change notifier callback
114*4882a593Smuzhiyun  * @dev: device for clock "consumer"
115*4882a593Smuzhiyun  * @clk: clock whose rate we are interested in
116*4882a593Smuzhiyun  * @nb: notifier block with callback function pointer
117*4882a593Smuzhiyun  *
118*4882a593Smuzhiyun  * Returns 0 on success, -EERROR otherwise
119*4882a593Smuzhiyun  */
120*4882a593Smuzhiyun int devm_clk_notifier_register(struct device *dev, struct clk *clk,
121*4882a593Smuzhiyun 			       struct notifier_block *nb);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /**
124*4882a593Smuzhiyun  * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
125*4882a593Smuzhiyun  *		      for a clock source.
126*4882a593Smuzhiyun  * @clk: clock source
127*4882a593Smuzhiyun  *
128*4882a593Smuzhiyun  * This gets the clock source accuracy expressed in ppb.
129*4882a593Smuzhiyun  * A perfect clock returns 0.
130*4882a593Smuzhiyun  */
131*4882a593Smuzhiyun long clk_get_accuracy(struct clk *clk);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /**
134*4882a593Smuzhiyun  * clk_set_phase - adjust the phase shift of a clock signal
135*4882a593Smuzhiyun  * @clk: clock signal source
136*4882a593Smuzhiyun  * @degrees: number of degrees the signal is shifted
137*4882a593Smuzhiyun  *
138*4882a593Smuzhiyun  * Shifts the phase of a clock signal by the specified degrees. Returns 0 on
139*4882a593Smuzhiyun  * success, -EERROR otherwise.
140*4882a593Smuzhiyun  */
141*4882a593Smuzhiyun int clk_set_phase(struct clk *clk, int degrees);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /**
144*4882a593Smuzhiyun  * clk_get_phase - return the phase shift of a clock signal
145*4882a593Smuzhiyun  * @clk: clock signal source
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  * Returns the phase shift of a clock node in degrees, otherwise returns
148*4882a593Smuzhiyun  * -EERROR.
149*4882a593Smuzhiyun  */
150*4882a593Smuzhiyun int clk_get_phase(struct clk *clk);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun /**
153*4882a593Smuzhiyun  * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
154*4882a593Smuzhiyun  * @clk: clock signal source
155*4882a593Smuzhiyun  * @num: numerator of the duty cycle ratio to be applied
156*4882a593Smuzhiyun  * @den: denominator of the duty cycle ratio to be applied
157*4882a593Smuzhiyun  *
158*4882a593Smuzhiyun  * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
159*4882a593Smuzhiyun  * success, -EERROR otherwise.
160*4882a593Smuzhiyun  */
161*4882a593Smuzhiyun int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /**
164*4882a593Smuzhiyun  * clk_get_duty_cycle - return the duty cycle ratio of a clock signal
165*4882a593Smuzhiyun  * @clk: clock signal source
166*4882a593Smuzhiyun  * @scale: scaling factor to be applied to represent the ratio as an integer
167*4882a593Smuzhiyun  *
168*4882a593Smuzhiyun  * Returns the duty cycle ratio multiplied by the scale provided, otherwise
169*4882a593Smuzhiyun  * returns -EERROR.
170*4882a593Smuzhiyun  */
171*4882a593Smuzhiyun int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /**
174*4882a593Smuzhiyun  * clk_is_match - check if two clk's point to the same hardware clock
175*4882a593Smuzhiyun  * @p: clk compared against q
176*4882a593Smuzhiyun  * @q: clk compared against p
177*4882a593Smuzhiyun  *
178*4882a593Smuzhiyun  * Returns true if the two struct clk pointers both point to the same hardware
179*4882a593Smuzhiyun  * clock node. Put differently, returns true if @p and @q
180*4882a593Smuzhiyun  * share the same &struct clk_core object.
181*4882a593Smuzhiyun  *
182*4882a593Smuzhiyun  * Returns false otherwise. Note that two NULL clks are treated as matching.
183*4882a593Smuzhiyun  */
184*4882a593Smuzhiyun bool clk_is_match(const struct clk *p, const struct clk *q);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun #else
187*4882a593Smuzhiyun 
clk_notifier_register(struct clk * clk,struct notifier_block * nb)188*4882a593Smuzhiyun static inline int clk_notifier_register(struct clk *clk,
189*4882a593Smuzhiyun 					struct notifier_block *nb)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	return -ENOTSUPP;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
clk_notifier_unregister(struct clk * clk,struct notifier_block * nb)194*4882a593Smuzhiyun static inline int clk_notifier_unregister(struct clk *clk,
195*4882a593Smuzhiyun 					  struct notifier_block *nb)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	return -ENOTSUPP;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
devm_clk_notifier_register(struct device * dev,struct clk * clk,struct notifier_block * nb)200*4882a593Smuzhiyun static inline int devm_clk_notifier_register(struct device *dev,
201*4882a593Smuzhiyun 					     struct clk *clk,
202*4882a593Smuzhiyun 					     struct notifier_block *nb)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	return -ENOTSUPP;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
clk_get_accuracy(struct clk * clk)207*4882a593Smuzhiyun static inline long clk_get_accuracy(struct clk *clk)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	return -ENOTSUPP;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
clk_set_phase(struct clk * clk,int phase)212*4882a593Smuzhiyun static inline long clk_set_phase(struct clk *clk, int phase)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	return -ENOTSUPP;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
clk_get_phase(struct clk * clk)217*4882a593Smuzhiyun static inline long clk_get_phase(struct clk *clk)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	return -ENOTSUPP;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
clk_set_duty_cycle(struct clk * clk,unsigned int num,unsigned int den)222*4882a593Smuzhiyun static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
223*4882a593Smuzhiyun 				     unsigned int den)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	return -ENOTSUPP;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
clk_get_scaled_duty_cycle(struct clk * clk,unsigned int scale)228*4882a593Smuzhiyun static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
229*4882a593Smuzhiyun 						     unsigned int scale)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	return 0;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
clk_is_match(const struct clk * p,const struct clk * q)234*4882a593Smuzhiyun static inline bool clk_is_match(const struct clk *p, const struct clk *q)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	return p == q;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun #endif
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /**
242*4882a593Smuzhiyun  * clk_prepare - prepare a clock source
243*4882a593Smuzhiyun  * @clk: clock source
244*4882a593Smuzhiyun  *
245*4882a593Smuzhiyun  * This prepares the clock source for use.
246*4882a593Smuzhiyun  *
247*4882a593Smuzhiyun  * Must not be called from within atomic context.
248*4882a593Smuzhiyun  */
249*4882a593Smuzhiyun #ifdef CONFIG_HAVE_CLK_PREPARE
250*4882a593Smuzhiyun int clk_prepare(struct clk *clk);
251*4882a593Smuzhiyun int __must_check clk_bulk_prepare(int num_clks,
252*4882a593Smuzhiyun 				  const struct clk_bulk_data *clks);
253*4882a593Smuzhiyun #else
clk_prepare(struct clk * clk)254*4882a593Smuzhiyun static inline int clk_prepare(struct clk *clk)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	might_sleep();
257*4882a593Smuzhiyun 	return 0;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun static inline int __must_check
clk_bulk_prepare(int num_clks,const struct clk_bulk_data * clks)261*4882a593Smuzhiyun clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	might_sleep();
264*4882a593Smuzhiyun 	return 0;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun #endif
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /**
269*4882a593Smuzhiyun  * clk_unprepare - undo preparation of a clock source
270*4882a593Smuzhiyun  * @clk: clock source
271*4882a593Smuzhiyun  *
272*4882a593Smuzhiyun  * This undoes a previously prepared clock.  The caller must balance
273*4882a593Smuzhiyun  * the number of prepare and unprepare calls.
274*4882a593Smuzhiyun  *
275*4882a593Smuzhiyun  * Must not be called from within atomic context.
276*4882a593Smuzhiyun  */
277*4882a593Smuzhiyun #ifdef CONFIG_HAVE_CLK_PREPARE
278*4882a593Smuzhiyun void clk_unprepare(struct clk *clk);
279*4882a593Smuzhiyun void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks);
280*4882a593Smuzhiyun #else
clk_unprepare(struct clk * clk)281*4882a593Smuzhiyun static inline void clk_unprepare(struct clk *clk)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	might_sleep();
284*4882a593Smuzhiyun }
clk_bulk_unprepare(int num_clks,const struct clk_bulk_data * clks)285*4882a593Smuzhiyun static inline void clk_bulk_unprepare(int num_clks,
286*4882a593Smuzhiyun 				      const struct clk_bulk_data *clks)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	might_sleep();
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun #endif
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun #ifdef CONFIG_HAVE_CLK
293*4882a593Smuzhiyun /**
294*4882a593Smuzhiyun  * clk_get - lookup and obtain a reference to a clock producer.
295*4882a593Smuzhiyun  * @dev: device for clock "consumer"
296*4882a593Smuzhiyun  * @id: clock consumer ID
297*4882a593Smuzhiyun  *
298*4882a593Smuzhiyun  * Returns a struct clk corresponding to the clock producer, or
299*4882a593Smuzhiyun  * valid IS_ERR() condition containing errno.  The implementation
300*4882a593Smuzhiyun  * uses @dev and @id to determine the clock consumer, and thereby
301*4882a593Smuzhiyun  * the clock producer.  (IOW, @id may be identical strings, but
302*4882a593Smuzhiyun  * clk_get may return different clock producers depending on @dev.)
303*4882a593Smuzhiyun  *
304*4882a593Smuzhiyun  * Drivers must assume that the clock source is not enabled.
305*4882a593Smuzhiyun  *
306*4882a593Smuzhiyun  * clk_get should not be called from within interrupt context.
307*4882a593Smuzhiyun  */
308*4882a593Smuzhiyun struct clk *clk_get(struct device *dev, const char *id);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun /**
311*4882a593Smuzhiyun  * clk_bulk_get - lookup and obtain a number of references to clock producer.
312*4882a593Smuzhiyun  * @dev: device for clock "consumer"
313*4882a593Smuzhiyun  * @num_clks: the number of clk_bulk_data
314*4882a593Smuzhiyun  * @clks: the clk_bulk_data table of consumer
315*4882a593Smuzhiyun  *
316*4882a593Smuzhiyun  * This helper function allows drivers to get several clk consumers in one
317*4882a593Smuzhiyun  * operation. If any of the clk cannot be acquired then any clks
318*4882a593Smuzhiyun  * that were obtained will be freed before returning to the caller.
319*4882a593Smuzhiyun  *
320*4882a593Smuzhiyun  * Returns 0 if all clocks specified in clk_bulk_data table are obtained
321*4882a593Smuzhiyun  * successfully, or valid IS_ERR() condition containing errno.
322*4882a593Smuzhiyun  * The implementation uses @dev and @clk_bulk_data.id to determine the
323*4882a593Smuzhiyun  * clock consumer, and thereby the clock producer.
324*4882a593Smuzhiyun  * The clock returned is stored in each @clk_bulk_data.clk field.
325*4882a593Smuzhiyun  *
326*4882a593Smuzhiyun  * Drivers must assume that the clock source is not enabled.
327*4882a593Smuzhiyun  *
328*4882a593Smuzhiyun  * clk_bulk_get should not be called from within interrupt context.
329*4882a593Smuzhiyun  */
330*4882a593Smuzhiyun int __must_check clk_bulk_get(struct device *dev, int num_clks,
331*4882a593Smuzhiyun 			      struct clk_bulk_data *clks);
332*4882a593Smuzhiyun /**
333*4882a593Smuzhiyun  * clk_bulk_get_all - lookup and obtain all available references to clock
334*4882a593Smuzhiyun  *		      producer.
335*4882a593Smuzhiyun  * @dev: device for clock "consumer"
336*4882a593Smuzhiyun  * @clks: pointer to the clk_bulk_data table of consumer
337*4882a593Smuzhiyun  *
338*4882a593Smuzhiyun  * This helper function allows drivers to get all clk consumers in one
339*4882a593Smuzhiyun  * operation. If any of the clk cannot be acquired then any clks
340*4882a593Smuzhiyun  * that were obtained will be freed before returning to the caller.
341*4882a593Smuzhiyun  *
342*4882a593Smuzhiyun  * Returns a positive value for the number of clocks obtained while the
343*4882a593Smuzhiyun  * clock references are stored in the clk_bulk_data table in @clks field.
344*4882a593Smuzhiyun  * Returns 0 if there're none and a negative value if something failed.
345*4882a593Smuzhiyun  *
346*4882a593Smuzhiyun  * Drivers must assume that the clock source is not enabled.
347*4882a593Smuzhiyun  *
348*4882a593Smuzhiyun  * clk_bulk_get should not be called from within interrupt context.
349*4882a593Smuzhiyun  */
350*4882a593Smuzhiyun int __must_check clk_bulk_get_all(struct device *dev,
351*4882a593Smuzhiyun 				  struct clk_bulk_data **clks);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun /**
354*4882a593Smuzhiyun  * clk_bulk_get_optional - lookup and obtain a number of references to clock producer
355*4882a593Smuzhiyun  * @dev: device for clock "consumer"
356*4882a593Smuzhiyun  * @num_clks: the number of clk_bulk_data
357*4882a593Smuzhiyun  * @clks: the clk_bulk_data table of consumer
358*4882a593Smuzhiyun  *
359*4882a593Smuzhiyun  * Behaves the same as clk_bulk_get() except where there is no clock producer.
360*4882a593Smuzhiyun  * In this case, instead of returning -ENOENT, the function returns 0 and
361*4882a593Smuzhiyun  * NULL for a clk for which a clock producer could not be determined.
362*4882a593Smuzhiyun  */
363*4882a593Smuzhiyun int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
364*4882a593Smuzhiyun 				       struct clk_bulk_data *clks);
365*4882a593Smuzhiyun /**
366*4882a593Smuzhiyun  * devm_clk_bulk_get - managed get multiple clk consumers
367*4882a593Smuzhiyun  * @dev: device for clock "consumer"
368*4882a593Smuzhiyun  * @num_clks: the number of clk_bulk_data
369*4882a593Smuzhiyun  * @clks: the clk_bulk_data table of consumer
370*4882a593Smuzhiyun  *
371*4882a593Smuzhiyun  * Return 0 on success, an errno on failure.
372*4882a593Smuzhiyun  *
373*4882a593Smuzhiyun  * This helper function allows drivers to get several clk
374*4882a593Smuzhiyun  * consumers in one operation with management, the clks will
375*4882a593Smuzhiyun  * automatically be freed when the device is unbound.
376*4882a593Smuzhiyun  */
377*4882a593Smuzhiyun int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
378*4882a593Smuzhiyun 				   struct clk_bulk_data *clks);
379*4882a593Smuzhiyun /**
380*4882a593Smuzhiyun  * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
381*4882a593Smuzhiyun  * @dev: device for clock "consumer"
382*4882a593Smuzhiyun  * @num_clks: the number of clk_bulk_data
383*4882a593Smuzhiyun  * @clks: pointer to the clk_bulk_data table of consumer
384*4882a593Smuzhiyun  *
385*4882a593Smuzhiyun  * Behaves the same as devm_clk_bulk_get() except where there is no clock
386*4882a593Smuzhiyun  * producer.  In this case, instead of returning -ENOENT, the function returns
387*4882a593Smuzhiyun  * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional.
388*4882a593Smuzhiyun  *
389*4882a593Smuzhiyun  * Returns 0 if all clocks specified in clk_bulk_data table are obtained
390*4882a593Smuzhiyun  * successfully or for any clk there was no clk provider available, otherwise
391*4882a593Smuzhiyun  * returns valid IS_ERR() condition containing errno.
392*4882a593Smuzhiyun  * The implementation uses @dev and @clk_bulk_data.id to determine the
393*4882a593Smuzhiyun  * clock consumer, and thereby the clock producer.
394*4882a593Smuzhiyun  * The clock returned is stored in each @clk_bulk_data.clk field.
395*4882a593Smuzhiyun  *
396*4882a593Smuzhiyun  * Drivers must assume that the clock source is not enabled.
397*4882a593Smuzhiyun  *
398*4882a593Smuzhiyun  * clk_bulk_get should not be called from within interrupt context.
399*4882a593Smuzhiyun  */
400*4882a593Smuzhiyun int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
401*4882a593Smuzhiyun 					    struct clk_bulk_data *clks);
402*4882a593Smuzhiyun /**
403*4882a593Smuzhiyun  * devm_clk_bulk_get_all - managed get multiple clk consumers
404*4882a593Smuzhiyun  * @dev: device for clock "consumer"
405*4882a593Smuzhiyun  * @clks: pointer to the clk_bulk_data table of consumer
406*4882a593Smuzhiyun  *
407*4882a593Smuzhiyun  * Returns a positive value for the number of clocks obtained while the
408*4882a593Smuzhiyun  * clock references are stored in the clk_bulk_data table in @clks field.
409*4882a593Smuzhiyun  * Returns 0 if there're none and a negative value if something failed.
410*4882a593Smuzhiyun  *
411*4882a593Smuzhiyun  * This helper function allows drivers to get several clk
412*4882a593Smuzhiyun  * consumers in one operation with management, the clks will
413*4882a593Smuzhiyun  * automatically be freed when the device is unbound.
414*4882a593Smuzhiyun  */
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun int __must_check devm_clk_bulk_get_all(struct device *dev,
417*4882a593Smuzhiyun 				       struct clk_bulk_data **clks);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun /**
420*4882a593Smuzhiyun  * devm_clk_get - lookup and obtain a managed reference to a clock producer.
421*4882a593Smuzhiyun  * @dev: device for clock "consumer"
422*4882a593Smuzhiyun  * @id: clock consumer ID
423*4882a593Smuzhiyun  *
424*4882a593Smuzhiyun  * Returns a struct clk corresponding to the clock producer, or
425*4882a593Smuzhiyun  * valid IS_ERR() condition containing errno.  The implementation
426*4882a593Smuzhiyun  * uses @dev and @id to determine the clock consumer, and thereby
427*4882a593Smuzhiyun  * the clock producer.  (IOW, @id may be identical strings, but
428*4882a593Smuzhiyun  * clk_get may return different clock producers depending on @dev.)
429*4882a593Smuzhiyun  *
430*4882a593Smuzhiyun  * Drivers must assume that the clock source is not enabled.
431*4882a593Smuzhiyun  *
432*4882a593Smuzhiyun  * devm_clk_get should not be called from within interrupt context.
433*4882a593Smuzhiyun  *
434*4882a593Smuzhiyun  * The clock will automatically be freed when the device is unbound
435*4882a593Smuzhiyun  * from the bus.
436*4882a593Smuzhiyun  */
437*4882a593Smuzhiyun struct clk *devm_clk_get(struct device *dev, const char *id);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun /**
440*4882a593Smuzhiyun  * devm_clk_get_optional - lookup and obtain a managed reference to an optional
441*4882a593Smuzhiyun  *			   clock producer.
442*4882a593Smuzhiyun  * @dev: device for clock "consumer"
443*4882a593Smuzhiyun  * @id: clock consumer ID
444*4882a593Smuzhiyun  *
445*4882a593Smuzhiyun  * Behaves the same as devm_clk_get() except where there is no clock producer.
446*4882a593Smuzhiyun  * In this case, instead of returning -ENOENT, the function returns NULL.
447*4882a593Smuzhiyun  */
448*4882a593Smuzhiyun struct clk *devm_clk_get_optional(struct device *dev, const char *id);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun /**
451*4882a593Smuzhiyun  * devm_get_clk_from_child - lookup and obtain a managed reference to a
452*4882a593Smuzhiyun  *			     clock producer from child node.
453*4882a593Smuzhiyun  * @dev: device for clock "consumer"
454*4882a593Smuzhiyun  * @np: pointer to clock consumer node
455*4882a593Smuzhiyun  * @con_id: clock consumer ID
456*4882a593Smuzhiyun  *
457*4882a593Smuzhiyun  * This function parses the clocks, and uses them to look up the
458*4882a593Smuzhiyun  * struct clk from the registered list of clock providers by using
459*4882a593Smuzhiyun  * @np and @con_id
460*4882a593Smuzhiyun  *
461*4882a593Smuzhiyun  * The clock will automatically be freed when the device is unbound
462*4882a593Smuzhiyun  * from the bus.
463*4882a593Smuzhiyun  */
464*4882a593Smuzhiyun struct clk *devm_get_clk_from_child(struct device *dev,
465*4882a593Smuzhiyun 				    struct device_node *np, const char *con_id);
466*4882a593Smuzhiyun /**
467*4882a593Smuzhiyun  * clk_rate_exclusive_get - get exclusivity over the rate control of a
468*4882a593Smuzhiyun  *                          producer
469*4882a593Smuzhiyun  * @clk: clock source
470*4882a593Smuzhiyun  *
471*4882a593Smuzhiyun  * This function allows drivers to get exclusive control over the rate of a
472*4882a593Smuzhiyun  * provider. It prevents any other consumer to execute, even indirectly,
473*4882a593Smuzhiyun  * opereation which could alter the rate of the provider or cause glitches
474*4882a593Smuzhiyun  *
475*4882a593Smuzhiyun  * If exlusivity is claimed more than once on clock, even by the same driver,
476*4882a593Smuzhiyun  * the rate effectively gets locked as exclusivity can't be preempted.
477*4882a593Smuzhiyun  *
478*4882a593Smuzhiyun  * Must not be called from within atomic context.
479*4882a593Smuzhiyun  *
480*4882a593Smuzhiyun  * Returns success (0) or negative errno.
481*4882a593Smuzhiyun  */
482*4882a593Smuzhiyun int clk_rate_exclusive_get(struct clk *clk);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun /**
485*4882a593Smuzhiyun  * clk_rate_exclusive_put - release exclusivity over the rate control of a
486*4882a593Smuzhiyun  *                          producer
487*4882a593Smuzhiyun  * @clk: clock source
488*4882a593Smuzhiyun  *
489*4882a593Smuzhiyun  * This function allows drivers to release the exclusivity it previously got
490*4882a593Smuzhiyun  * from clk_rate_exclusive_get()
491*4882a593Smuzhiyun  *
492*4882a593Smuzhiyun  * The caller must balance the number of clk_rate_exclusive_get() and
493*4882a593Smuzhiyun  * clk_rate_exclusive_put() calls.
494*4882a593Smuzhiyun  *
495*4882a593Smuzhiyun  * Must not be called from within atomic context.
496*4882a593Smuzhiyun  */
497*4882a593Smuzhiyun void clk_rate_exclusive_put(struct clk *clk);
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun /**
500*4882a593Smuzhiyun  * clk_enable - inform the system when the clock source should be running.
501*4882a593Smuzhiyun  * @clk: clock source
502*4882a593Smuzhiyun  *
503*4882a593Smuzhiyun  * If the clock can not be enabled/disabled, this should return success.
504*4882a593Smuzhiyun  *
505*4882a593Smuzhiyun  * May be called from atomic contexts.
506*4882a593Smuzhiyun  *
507*4882a593Smuzhiyun  * Returns success (0) or negative errno.
508*4882a593Smuzhiyun  */
509*4882a593Smuzhiyun int clk_enable(struct clk *clk);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun /**
512*4882a593Smuzhiyun  * clk_bulk_enable - inform the system when the set of clks should be running.
513*4882a593Smuzhiyun  * @num_clks: the number of clk_bulk_data
514*4882a593Smuzhiyun  * @clks: the clk_bulk_data table of consumer
515*4882a593Smuzhiyun  *
516*4882a593Smuzhiyun  * May be called from atomic contexts.
517*4882a593Smuzhiyun  *
518*4882a593Smuzhiyun  * Returns success (0) or negative errno.
519*4882a593Smuzhiyun  */
520*4882a593Smuzhiyun int __must_check clk_bulk_enable(int num_clks,
521*4882a593Smuzhiyun 				 const struct clk_bulk_data *clks);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun /**
524*4882a593Smuzhiyun  * clk_disable - inform the system when the clock source is no longer required.
525*4882a593Smuzhiyun  * @clk: clock source
526*4882a593Smuzhiyun  *
527*4882a593Smuzhiyun  * Inform the system that a clock source is no longer required by
528*4882a593Smuzhiyun  * a driver and may be shut down.
529*4882a593Smuzhiyun  *
530*4882a593Smuzhiyun  * May be called from atomic contexts.
531*4882a593Smuzhiyun  *
532*4882a593Smuzhiyun  * Implementation detail: if the clock source is shared between
533*4882a593Smuzhiyun  * multiple drivers, clk_enable() calls must be balanced by the
534*4882a593Smuzhiyun  * same number of clk_disable() calls for the clock source to be
535*4882a593Smuzhiyun  * disabled.
536*4882a593Smuzhiyun  */
537*4882a593Smuzhiyun void clk_disable(struct clk *clk);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun /**
540*4882a593Smuzhiyun  * clk_bulk_disable - inform the system when the set of clks is no
541*4882a593Smuzhiyun  *		      longer required.
542*4882a593Smuzhiyun  * @num_clks: the number of clk_bulk_data
543*4882a593Smuzhiyun  * @clks: the clk_bulk_data table of consumer
544*4882a593Smuzhiyun  *
545*4882a593Smuzhiyun  * Inform the system that a set of clks is no longer required by
546*4882a593Smuzhiyun  * a driver and may be shut down.
547*4882a593Smuzhiyun  *
548*4882a593Smuzhiyun  * May be called from atomic contexts.
549*4882a593Smuzhiyun  *
550*4882a593Smuzhiyun  * Implementation detail: if the set of clks is shared between
551*4882a593Smuzhiyun  * multiple drivers, clk_bulk_enable() calls must be balanced by the
552*4882a593Smuzhiyun  * same number of clk_bulk_disable() calls for the clock source to be
553*4882a593Smuzhiyun  * disabled.
554*4882a593Smuzhiyun  */
555*4882a593Smuzhiyun void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun /**
558*4882a593Smuzhiyun  * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
559*4882a593Smuzhiyun  *		  This is only valid once the clock source has been enabled.
560*4882a593Smuzhiyun  * @clk: clock source
561*4882a593Smuzhiyun  */
562*4882a593Smuzhiyun unsigned long clk_get_rate(struct clk *clk);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun /**
565*4882a593Smuzhiyun  * clk_put	- "free" the clock source
566*4882a593Smuzhiyun  * @clk: clock source
567*4882a593Smuzhiyun  *
568*4882a593Smuzhiyun  * Note: drivers must ensure that all clk_enable calls made on this
569*4882a593Smuzhiyun  * clock source are balanced by clk_disable calls prior to calling
570*4882a593Smuzhiyun  * this function.
571*4882a593Smuzhiyun  *
572*4882a593Smuzhiyun  * clk_put should not be called from within interrupt context.
573*4882a593Smuzhiyun  */
574*4882a593Smuzhiyun void clk_put(struct clk *clk);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun /**
577*4882a593Smuzhiyun  * clk_bulk_put	- "free" the clock source
578*4882a593Smuzhiyun  * @num_clks: the number of clk_bulk_data
579*4882a593Smuzhiyun  * @clks: the clk_bulk_data table of consumer
580*4882a593Smuzhiyun  *
581*4882a593Smuzhiyun  * Note: drivers must ensure that all clk_bulk_enable calls made on this
582*4882a593Smuzhiyun  * clock source are balanced by clk_bulk_disable calls prior to calling
583*4882a593Smuzhiyun  * this function.
584*4882a593Smuzhiyun  *
585*4882a593Smuzhiyun  * clk_bulk_put should not be called from within interrupt context.
586*4882a593Smuzhiyun  */
587*4882a593Smuzhiyun void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun /**
590*4882a593Smuzhiyun  * clk_bulk_put_all - "free" all the clock source
591*4882a593Smuzhiyun  * @num_clks: the number of clk_bulk_data
592*4882a593Smuzhiyun  * @clks: the clk_bulk_data table of consumer
593*4882a593Smuzhiyun  *
594*4882a593Smuzhiyun  * Note: drivers must ensure that all clk_bulk_enable calls made on this
595*4882a593Smuzhiyun  * clock source are balanced by clk_bulk_disable calls prior to calling
596*4882a593Smuzhiyun  * this function.
597*4882a593Smuzhiyun  *
598*4882a593Smuzhiyun  * clk_bulk_put_all should not be called from within interrupt context.
599*4882a593Smuzhiyun  */
600*4882a593Smuzhiyun void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun /**
603*4882a593Smuzhiyun  * devm_clk_put	- "free" a managed clock source
604*4882a593Smuzhiyun  * @dev: device used to acquire the clock
605*4882a593Smuzhiyun  * @clk: clock source acquired with devm_clk_get()
606*4882a593Smuzhiyun  *
607*4882a593Smuzhiyun  * Note: drivers must ensure that all clk_enable calls made on this
608*4882a593Smuzhiyun  * clock source are balanced by clk_disable calls prior to calling
609*4882a593Smuzhiyun  * this function.
610*4882a593Smuzhiyun  *
611*4882a593Smuzhiyun  * clk_put should not be called from within interrupt context.
612*4882a593Smuzhiyun  */
613*4882a593Smuzhiyun void devm_clk_put(struct device *dev, struct clk *clk);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun /*
616*4882a593Smuzhiyun  * The remaining APIs are optional for machine class support.
617*4882a593Smuzhiyun  */
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun /**
621*4882a593Smuzhiyun  * clk_round_rate - adjust a rate to the exact rate a clock can provide
622*4882a593Smuzhiyun  * @clk: clock source
623*4882a593Smuzhiyun  * @rate: desired clock rate in Hz
624*4882a593Smuzhiyun  *
625*4882a593Smuzhiyun  * This answers the question "if I were to pass @rate to clk_set_rate(),
626*4882a593Smuzhiyun  * what clock rate would I end up with?" without changing the hardware
627*4882a593Smuzhiyun  * in any way.  In other words:
628*4882a593Smuzhiyun  *
629*4882a593Smuzhiyun  *   rate = clk_round_rate(clk, r);
630*4882a593Smuzhiyun  *
631*4882a593Smuzhiyun  * and:
632*4882a593Smuzhiyun  *
633*4882a593Smuzhiyun  *   clk_set_rate(clk, r);
634*4882a593Smuzhiyun  *   rate = clk_get_rate(clk);
635*4882a593Smuzhiyun  *
636*4882a593Smuzhiyun  * are equivalent except the former does not modify the clock hardware
637*4882a593Smuzhiyun  * in any way.
638*4882a593Smuzhiyun  *
639*4882a593Smuzhiyun  * Returns rounded clock rate in Hz, or negative errno.
640*4882a593Smuzhiyun  */
641*4882a593Smuzhiyun long clk_round_rate(struct clk *clk, unsigned long rate);
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun /**
644*4882a593Smuzhiyun  * clk_set_rate - set the clock rate for a clock source
645*4882a593Smuzhiyun  * @clk: clock source
646*4882a593Smuzhiyun  * @rate: desired clock rate in Hz
647*4882a593Smuzhiyun  *
648*4882a593Smuzhiyun  * Updating the rate starts at the top-most affected clock and then
649*4882a593Smuzhiyun  * walks the tree down to the bottom-most clock that needs updating.
650*4882a593Smuzhiyun  *
651*4882a593Smuzhiyun  * Returns success (0) or negative errno.
652*4882a593Smuzhiyun  */
653*4882a593Smuzhiyun int clk_set_rate(struct clk *clk, unsigned long rate);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun /**
656*4882a593Smuzhiyun  * clk_set_rate_exclusive- set the clock rate and claim exclusivity over
657*4882a593Smuzhiyun  *                         clock source
658*4882a593Smuzhiyun  * @clk: clock source
659*4882a593Smuzhiyun  * @rate: desired clock rate in Hz
660*4882a593Smuzhiyun  *
661*4882a593Smuzhiyun  * This helper function allows drivers to atomically set the rate of a producer
662*4882a593Smuzhiyun  * and claim exclusivity over the rate control of the producer.
663*4882a593Smuzhiyun  *
664*4882a593Smuzhiyun  * It is essentially a combination of clk_set_rate() and
665*4882a593Smuzhiyun  * clk_rate_exclusite_get(). Caller must balance this call with a call to
666*4882a593Smuzhiyun  * clk_rate_exclusive_put()
667*4882a593Smuzhiyun  *
668*4882a593Smuzhiyun  * Returns success (0) or negative errno.
669*4882a593Smuzhiyun  */
670*4882a593Smuzhiyun int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun /**
673*4882a593Smuzhiyun  * clk_has_parent - check if a clock is a possible parent for another
674*4882a593Smuzhiyun  * @clk: clock source
675*4882a593Smuzhiyun  * @parent: parent clock source
676*4882a593Smuzhiyun  *
677*4882a593Smuzhiyun  * This function can be used in drivers that need to check that a clock can be
678*4882a593Smuzhiyun  * the parent of another without actually changing the parent.
679*4882a593Smuzhiyun  *
680*4882a593Smuzhiyun  * Returns true if @parent is a possible parent for @clk, false otherwise.
681*4882a593Smuzhiyun  */
682*4882a593Smuzhiyun bool clk_has_parent(struct clk *clk, struct clk *parent);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun /**
685*4882a593Smuzhiyun  * clk_set_rate_range - set a rate range for a clock source
686*4882a593Smuzhiyun  * @clk: clock source
687*4882a593Smuzhiyun  * @min: desired minimum clock rate in Hz, inclusive
688*4882a593Smuzhiyun  * @max: desired maximum clock rate in Hz, inclusive
689*4882a593Smuzhiyun  *
690*4882a593Smuzhiyun  * Returns success (0) or negative errno.
691*4882a593Smuzhiyun  */
692*4882a593Smuzhiyun int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun /**
695*4882a593Smuzhiyun  * clk_set_min_rate - set a minimum clock rate for a clock source
696*4882a593Smuzhiyun  * @clk: clock source
697*4882a593Smuzhiyun  * @rate: desired minimum clock rate in Hz, inclusive
698*4882a593Smuzhiyun  *
699*4882a593Smuzhiyun  * Returns success (0) or negative errno.
700*4882a593Smuzhiyun  */
701*4882a593Smuzhiyun int clk_set_min_rate(struct clk *clk, unsigned long rate);
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun /**
704*4882a593Smuzhiyun  * clk_set_max_rate - set a maximum clock rate for a clock source
705*4882a593Smuzhiyun  * @clk: clock source
706*4882a593Smuzhiyun  * @rate: desired maximum clock rate in Hz, inclusive
707*4882a593Smuzhiyun  *
708*4882a593Smuzhiyun  * Returns success (0) or negative errno.
709*4882a593Smuzhiyun  */
710*4882a593Smuzhiyun int clk_set_max_rate(struct clk *clk, unsigned long rate);
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun /**
713*4882a593Smuzhiyun  * clk_set_parent - set the parent clock source for this clock
714*4882a593Smuzhiyun  * @clk: clock source
715*4882a593Smuzhiyun  * @parent: parent clock source
716*4882a593Smuzhiyun  *
717*4882a593Smuzhiyun  * Returns success (0) or negative errno.
718*4882a593Smuzhiyun  */
719*4882a593Smuzhiyun int clk_set_parent(struct clk *clk, struct clk *parent);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun /**
722*4882a593Smuzhiyun  * clk_get_parent - get the parent clock source for this clock
723*4882a593Smuzhiyun  * @clk: clock source
724*4882a593Smuzhiyun  *
725*4882a593Smuzhiyun  * Returns struct clk corresponding to parent clock source, or
726*4882a593Smuzhiyun  * valid IS_ERR() condition containing errno.
727*4882a593Smuzhiyun  */
728*4882a593Smuzhiyun struct clk *clk_get_parent(struct clk *clk);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun /**
731*4882a593Smuzhiyun  * clk_get_sys - get a clock based upon the device name
732*4882a593Smuzhiyun  * @dev_id: device name
733*4882a593Smuzhiyun  * @con_id: connection ID
734*4882a593Smuzhiyun  *
735*4882a593Smuzhiyun  * Returns a struct clk corresponding to the clock producer, or
736*4882a593Smuzhiyun  * valid IS_ERR() condition containing errno.  The implementation
737*4882a593Smuzhiyun  * uses @dev_id and @con_id to determine the clock consumer, and
738*4882a593Smuzhiyun  * thereby the clock producer. In contrast to clk_get() this function
739*4882a593Smuzhiyun  * takes the device name instead of the device itself for identification.
740*4882a593Smuzhiyun  *
741*4882a593Smuzhiyun  * Drivers must assume that the clock source is not enabled.
742*4882a593Smuzhiyun  *
743*4882a593Smuzhiyun  * clk_get_sys should not be called from within interrupt context.
744*4882a593Smuzhiyun  */
745*4882a593Smuzhiyun struct clk *clk_get_sys(const char *dev_id, const char *con_id);
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun /**
748*4882a593Smuzhiyun  * clk_save_context - save clock context for poweroff
749*4882a593Smuzhiyun  *
750*4882a593Smuzhiyun  * Saves the context of the clock register for powerstates in which the
751*4882a593Smuzhiyun  * contents of the registers will be lost. Occurs deep within the suspend
752*4882a593Smuzhiyun  * code so locking is not necessary.
753*4882a593Smuzhiyun  */
754*4882a593Smuzhiyun int clk_save_context(void);
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun /**
757*4882a593Smuzhiyun  * clk_restore_context - restore clock context after poweroff
758*4882a593Smuzhiyun  *
759*4882a593Smuzhiyun  * This occurs with all clocks enabled. Occurs deep within the resume code
760*4882a593Smuzhiyun  * so locking is not necessary.
761*4882a593Smuzhiyun  */
762*4882a593Smuzhiyun void clk_restore_context(void);
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun #else /* !CONFIG_HAVE_CLK */
765*4882a593Smuzhiyun 
clk_get(struct device * dev,const char * id)766*4882a593Smuzhiyun static inline struct clk *clk_get(struct device *dev, const char *id)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun 	return NULL;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun 
clk_bulk_get(struct device * dev,int num_clks,struct clk_bulk_data * clks)771*4882a593Smuzhiyun static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
772*4882a593Smuzhiyun 					    struct clk_bulk_data *clks)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun 	return 0;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
clk_bulk_get_optional(struct device * dev,int num_clks,struct clk_bulk_data * clks)777*4882a593Smuzhiyun static inline int __must_check clk_bulk_get_optional(struct device *dev,
778*4882a593Smuzhiyun 				int num_clks, struct clk_bulk_data *clks)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun 	return 0;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun 
clk_bulk_get_all(struct device * dev,struct clk_bulk_data ** clks)783*4882a593Smuzhiyun static inline int __must_check clk_bulk_get_all(struct device *dev,
784*4882a593Smuzhiyun 					 struct clk_bulk_data **clks)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun 	return 0;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun 
devm_clk_get(struct device * dev,const char * id)789*4882a593Smuzhiyun static inline struct clk *devm_clk_get(struct device *dev, const char *id)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun 	return NULL;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun 
devm_clk_get_optional(struct device * dev,const char * id)794*4882a593Smuzhiyun static inline struct clk *devm_clk_get_optional(struct device *dev,
795*4882a593Smuzhiyun 						const char *id)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun 	return NULL;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
devm_clk_bulk_get(struct device * dev,int num_clks,struct clk_bulk_data * clks)800*4882a593Smuzhiyun static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
801*4882a593Smuzhiyun 						 struct clk_bulk_data *clks)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	return 0;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun 
devm_clk_bulk_get_optional(struct device * dev,int num_clks,struct clk_bulk_data * clks)806*4882a593Smuzhiyun static inline int __must_check devm_clk_bulk_get_optional(struct device *dev,
807*4882a593Smuzhiyun 				int num_clks, struct clk_bulk_data *clks)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	return 0;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun 
devm_clk_bulk_get_all(struct device * dev,struct clk_bulk_data ** clks)812*4882a593Smuzhiyun static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
813*4882a593Smuzhiyun 						     struct clk_bulk_data **clks)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	return 0;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun 
devm_get_clk_from_child(struct device * dev,struct device_node * np,const char * con_id)819*4882a593Smuzhiyun static inline struct clk *devm_get_clk_from_child(struct device *dev,
820*4882a593Smuzhiyun 				struct device_node *np, const char *con_id)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun 	return NULL;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun 
clk_put(struct clk * clk)825*4882a593Smuzhiyun static inline void clk_put(struct clk *clk) {}
826*4882a593Smuzhiyun 
clk_bulk_put(int num_clks,struct clk_bulk_data * clks)827*4882a593Smuzhiyun static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
828*4882a593Smuzhiyun 
clk_bulk_put_all(int num_clks,struct clk_bulk_data * clks)829*4882a593Smuzhiyun static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
830*4882a593Smuzhiyun 
devm_clk_put(struct device * dev,struct clk * clk)831*4882a593Smuzhiyun static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 
clk_rate_exclusive_get(struct clk * clk)834*4882a593Smuzhiyun static inline int clk_rate_exclusive_get(struct clk *clk)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun 	return 0;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun 
clk_rate_exclusive_put(struct clk * clk)839*4882a593Smuzhiyun static inline void clk_rate_exclusive_put(struct clk *clk) {}
840*4882a593Smuzhiyun 
clk_enable(struct clk * clk)841*4882a593Smuzhiyun static inline int clk_enable(struct clk *clk)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun 	return 0;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun 
clk_bulk_enable(int num_clks,const struct clk_bulk_data * clks)846*4882a593Smuzhiyun static inline int __must_check clk_bulk_enable(int num_clks,
847*4882a593Smuzhiyun 					       const struct clk_bulk_data *clks)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun 	return 0;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun 
clk_disable(struct clk * clk)852*4882a593Smuzhiyun static inline void clk_disable(struct clk *clk) {}
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 
clk_bulk_disable(int num_clks,const struct clk_bulk_data * clks)855*4882a593Smuzhiyun static inline void clk_bulk_disable(int num_clks,
856*4882a593Smuzhiyun 				    const struct clk_bulk_data *clks) {}
857*4882a593Smuzhiyun 
clk_get_rate(struct clk * clk)858*4882a593Smuzhiyun static inline unsigned long clk_get_rate(struct clk *clk)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun 	return 0;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun 
clk_set_rate(struct clk * clk,unsigned long rate)863*4882a593Smuzhiyun static inline int clk_set_rate(struct clk *clk, unsigned long rate)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun 	return 0;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun 
clk_set_rate_exclusive(struct clk * clk,unsigned long rate)868*4882a593Smuzhiyun static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun 	return 0;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun 
clk_round_rate(struct clk * clk,unsigned long rate)873*4882a593Smuzhiyun static inline long clk_round_rate(struct clk *clk, unsigned long rate)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun 	return 0;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun 
clk_has_parent(struct clk * clk,struct clk * parent)878*4882a593Smuzhiyun static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun 	return true;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun 
clk_set_rate_range(struct clk * clk,unsigned long min,unsigned long max)883*4882a593Smuzhiyun static inline int clk_set_rate_range(struct clk *clk, unsigned long min,
884*4882a593Smuzhiyun 				     unsigned long max)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun 	return 0;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun 
clk_set_min_rate(struct clk * clk,unsigned long rate)889*4882a593Smuzhiyun static inline int clk_set_min_rate(struct clk *clk, unsigned long rate)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun 	return 0;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun 
clk_set_max_rate(struct clk * clk,unsigned long rate)894*4882a593Smuzhiyun static inline int clk_set_max_rate(struct clk *clk, unsigned long rate)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun 	return 0;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun 
clk_set_parent(struct clk * clk,struct clk * parent)899*4882a593Smuzhiyun static inline int clk_set_parent(struct clk *clk, struct clk *parent)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun 	return 0;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun 
clk_get_parent(struct clk * clk)904*4882a593Smuzhiyun static inline struct clk *clk_get_parent(struct clk *clk)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun 	return NULL;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun 
clk_get_sys(const char * dev_id,const char * con_id)909*4882a593Smuzhiyun static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun 	return NULL;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun 
clk_save_context(void)914*4882a593Smuzhiyun static inline int clk_save_context(void)
915*4882a593Smuzhiyun {
916*4882a593Smuzhiyun 	return 0;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun 
clk_restore_context(void)919*4882a593Smuzhiyun static inline void clk_restore_context(void) {}
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun #endif
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
clk_prepare_enable(struct clk * clk)924*4882a593Smuzhiyun static inline int clk_prepare_enable(struct clk *clk)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun 	int ret;
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	ret = clk_prepare(clk);
929*4882a593Smuzhiyun 	if (ret)
930*4882a593Smuzhiyun 		return ret;
931*4882a593Smuzhiyun 	ret = clk_enable(clk);
932*4882a593Smuzhiyun 	if (ret)
933*4882a593Smuzhiyun 		clk_unprepare(clk);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	return ret;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun /* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */
clk_disable_unprepare(struct clk * clk)939*4882a593Smuzhiyun static inline void clk_disable_unprepare(struct clk *clk)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun 	clk_disable(clk);
942*4882a593Smuzhiyun 	clk_unprepare(clk);
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun static inline int __must_check
clk_bulk_prepare_enable(int num_clks,const struct clk_bulk_data * clks)946*4882a593Smuzhiyun clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun 	int ret;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	ret = clk_bulk_prepare(num_clks, clks);
951*4882a593Smuzhiyun 	if (ret)
952*4882a593Smuzhiyun 		return ret;
953*4882a593Smuzhiyun 	ret = clk_bulk_enable(num_clks, clks);
954*4882a593Smuzhiyun 	if (ret)
955*4882a593Smuzhiyun 		clk_bulk_unprepare(num_clks, clks);
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	return ret;
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun 
clk_bulk_disable_unprepare(int num_clks,const struct clk_bulk_data * clks)960*4882a593Smuzhiyun static inline void clk_bulk_disable_unprepare(int num_clks,
961*4882a593Smuzhiyun 					      const struct clk_bulk_data *clks)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun 	clk_bulk_disable(num_clks, clks);
964*4882a593Smuzhiyun 	clk_bulk_unprepare(num_clks, clks);
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun /**
968*4882a593Smuzhiyun  * clk_get_optional - lookup and obtain a reference to an optional clock
969*4882a593Smuzhiyun  *		      producer.
970*4882a593Smuzhiyun  * @dev: device for clock "consumer"
971*4882a593Smuzhiyun  * @id: clock consumer ID
972*4882a593Smuzhiyun  *
973*4882a593Smuzhiyun  * Behaves the same as clk_get() except where there is no clock producer. In
974*4882a593Smuzhiyun  * this case, instead of returning -ENOENT, the function returns NULL.
975*4882a593Smuzhiyun  */
clk_get_optional(struct device * dev,const char * id)976*4882a593Smuzhiyun static inline struct clk *clk_get_optional(struct device *dev, const char *id)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun 	struct clk *clk = clk_get(dev, id);
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	if (clk == ERR_PTR(-ENOENT))
981*4882a593Smuzhiyun 		return NULL;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	return clk;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
987*4882a593Smuzhiyun struct clk *of_clk_get(struct device_node *np, int index);
988*4882a593Smuzhiyun struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
989*4882a593Smuzhiyun struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
990*4882a593Smuzhiyun #else
of_clk_get(struct device_node * np,int index)991*4882a593Smuzhiyun static inline struct clk *of_clk_get(struct device_node *np, int index)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	return ERR_PTR(-ENOENT);
994*4882a593Smuzhiyun }
of_clk_get_by_name(struct device_node * np,const char * name)995*4882a593Smuzhiyun static inline struct clk *of_clk_get_by_name(struct device_node *np,
996*4882a593Smuzhiyun 					     const char *name)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun 	return ERR_PTR(-ENOENT);
999*4882a593Smuzhiyun }
of_clk_get_from_provider(struct of_phandle_args * clkspec)1000*4882a593Smuzhiyun static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
1001*4882a593Smuzhiyun {
1002*4882a593Smuzhiyun 	return ERR_PTR(-ENOENT);
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun #endif
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun #endif
1007