1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * OMAP clkctrl clock support
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2017 Texas Instruments, Inc.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Tero Kristo <t-kristo@ti.com>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
9*4882a593Smuzhiyun * it under the terms of the GNU General Public License version 2 as
10*4882a593Smuzhiyun * published by the Free Software Foundation.
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13*4882a593Smuzhiyun * kind, whether express or implied; without even the implied warranty
14*4882a593Smuzhiyun * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15*4882a593Smuzhiyun * GNU General Public License for more details.
16*4882a593Smuzhiyun */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/clk-provider.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/of.h>
21*4882a593Smuzhiyun #include <linux/of_address.h>
22*4882a593Smuzhiyun #include <linux/clk/ti.h>
23*4882a593Smuzhiyun #include <linux/delay.h>
24*4882a593Smuzhiyun #include <linux/timekeeping.h>
25*4882a593Smuzhiyun #include "clock.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define NO_IDLEST 0
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define OMAP4_MODULEMODE_MASK 0x3
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define MODULEMODE_HWCTRL 0x1
32*4882a593Smuzhiyun #define MODULEMODE_SWCTRL 0x2
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define OMAP4_IDLEST_MASK (0x3 << 16)
35*4882a593Smuzhiyun #define OMAP4_IDLEST_SHIFT 16
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #define OMAP4_STBYST_MASK BIT(18)
38*4882a593Smuzhiyun #define OMAP4_STBYST_SHIFT 18
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define CLKCTRL_IDLEST_FUNCTIONAL 0x0
41*4882a593Smuzhiyun #define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2
42*4882a593Smuzhiyun #define CLKCTRL_IDLEST_DISABLED 0x3
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* These timeouts are in us */
45*4882a593Smuzhiyun #define OMAP4_MAX_MODULE_READY_TIME 2000
46*4882a593Smuzhiyun #define OMAP4_MAX_MODULE_DISABLE_TIME 5000
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun static bool _early_timeout = true;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun struct omap_clkctrl_provider {
51*4882a593Smuzhiyun void __iomem *base;
52*4882a593Smuzhiyun struct list_head clocks;
53*4882a593Smuzhiyun char *clkdm_name;
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun struct omap_clkctrl_clk {
57*4882a593Smuzhiyun struct clk_hw *clk;
58*4882a593Smuzhiyun u16 reg_offset;
59*4882a593Smuzhiyun int bit_offset;
60*4882a593Smuzhiyun struct list_head node;
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun union omap4_timeout {
64*4882a593Smuzhiyun u32 cycles;
65*4882a593Smuzhiyun ktime_t start;
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = {
69*4882a593Smuzhiyun { 0 },
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
_omap4_idlest(u32 val)72*4882a593Smuzhiyun static u32 _omap4_idlest(u32 val)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun val &= OMAP4_IDLEST_MASK;
75*4882a593Smuzhiyun val >>= OMAP4_IDLEST_SHIFT;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun return val;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
_omap4_is_idle(u32 val)80*4882a593Smuzhiyun static bool _omap4_is_idle(u32 val)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun val = _omap4_idlest(val);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun return val == CLKCTRL_IDLEST_DISABLED;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
_omap4_is_ready(u32 val)87*4882a593Smuzhiyun static bool _omap4_is_ready(u32 val)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun val = _omap4_idlest(val);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun return val == CLKCTRL_IDLEST_FUNCTIONAL ||
92*4882a593Smuzhiyun val == CLKCTRL_IDLEST_INTERFACE_IDLE;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
_omap4_is_timeout(union omap4_timeout * time,u32 timeout)95*4882a593Smuzhiyun static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun * There are two special cases where ktime_to_ns() can't be
99*4882a593Smuzhiyun * used to track the timeouts. First one is during early boot
100*4882a593Smuzhiyun * when the timers haven't been initialized yet. The second
101*4882a593Smuzhiyun * one is during suspend-resume cycle while timekeeping is
102*4882a593Smuzhiyun * being suspended / resumed. Clocksource for the system
103*4882a593Smuzhiyun * can be from a timer that requires pm_runtime access, which
104*4882a593Smuzhiyun * will eventually bring us here with timekeeping_suspended,
105*4882a593Smuzhiyun * during both suspend entry and resume paths. This happens
106*4882a593Smuzhiyun * at least on am43xx platform. Account for flakeyness
107*4882a593Smuzhiyun * with udelay() by multiplying the timeout value by 2.
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun if (unlikely(_early_timeout || timekeeping_suspended)) {
110*4882a593Smuzhiyun if (time->cycles++ < timeout) {
111*4882a593Smuzhiyun udelay(1 * 2);
112*4882a593Smuzhiyun return false;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun } else {
115*4882a593Smuzhiyun if (!ktime_to_ns(time->start)) {
116*4882a593Smuzhiyun time->start = ktime_get();
117*4882a593Smuzhiyun return false;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun if (ktime_us_delta(ktime_get(), time->start) < timeout) {
121*4882a593Smuzhiyun cpu_relax();
122*4882a593Smuzhiyun return false;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun return true;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
_omap4_disable_early_timeout(void)129*4882a593Smuzhiyun static int __init _omap4_disable_early_timeout(void)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun _early_timeout = false;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun return 0;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun arch_initcall(_omap4_disable_early_timeout);
136*4882a593Smuzhiyun
_omap4_clkctrl_clk_enable(struct clk_hw * hw)137*4882a593Smuzhiyun static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct clk_hw_omap *clk = to_clk_hw_omap(hw);
140*4882a593Smuzhiyun u32 val;
141*4882a593Smuzhiyun int ret;
142*4882a593Smuzhiyun union omap4_timeout timeout = { 0 };
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (clk->clkdm) {
145*4882a593Smuzhiyun ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
146*4882a593Smuzhiyun if (ret) {
147*4882a593Smuzhiyun WARN(1,
148*4882a593Smuzhiyun "%s: could not enable %s's clockdomain %s: %d\n",
149*4882a593Smuzhiyun __func__, clk_hw_get_name(hw),
150*4882a593Smuzhiyun clk->clkdm_name, ret);
151*4882a593Smuzhiyun return ret;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (!clk->enable_bit)
156*4882a593Smuzhiyun return 0;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun val &= ~OMAP4_MODULEMODE_MASK;
161*4882a593Smuzhiyun val |= clk->enable_bit;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (test_bit(NO_IDLEST, &clk->flags))
166*4882a593Smuzhiyun return 0;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* Wait until module is enabled */
169*4882a593Smuzhiyun while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
170*4882a593Smuzhiyun if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) {
171*4882a593Smuzhiyun pr_err("%s: failed to enable\n", clk_hw_get_name(hw));
172*4882a593Smuzhiyun return -EBUSY;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun return 0;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
_omap4_clkctrl_clk_disable(struct clk_hw * hw)179*4882a593Smuzhiyun static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun struct clk_hw_omap *clk = to_clk_hw_omap(hw);
182*4882a593Smuzhiyun u32 val;
183*4882a593Smuzhiyun union omap4_timeout timeout = { 0 };
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (!clk->enable_bit)
186*4882a593Smuzhiyun goto exit;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun val &= ~OMAP4_MODULEMODE_MASK;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (test_bit(NO_IDLEST, &clk->flags))
195*4882a593Smuzhiyun goto exit;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* Wait until module is disabled */
198*4882a593Smuzhiyun while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
199*4882a593Smuzhiyun if (_omap4_is_timeout(&timeout,
200*4882a593Smuzhiyun OMAP4_MAX_MODULE_DISABLE_TIME)) {
201*4882a593Smuzhiyun pr_err("%s: failed to disable\n", clk_hw_get_name(hw));
202*4882a593Smuzhiyun break;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun exit:
207*4882a593Smuzhiyun if (clk->clkdm)
208*4882a593Smuzhiyun ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
_omap4_clkctrl_clk_is_enabled(struct clk_hw * hw)211*4882a593Smuzhiyun static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun struct clk_hw_omap *clk = to_clk_hw_omap(hw);
214*4882a593Smuzhiyun u32 val;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (val & clk->enable_bit)
219*4882a593Smuzhiyun return 1;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun static const struct clk_ops omap4_clkctrl_clk_ops = {
225*4882a593Smuzhiyun .enable = _omap4_clkctrl_clk_enable,
226*4882a593Smuzhiyun .disable = _omap4_clkctrl_clk_disable,
227*4882a593Smuzhiyun .is_enabled = _omap4_clkctrl_clk_is_enabled,
228*4882a593Smuzhiyun .init = omap2_init_clk_clkdm,
229*4882a593Smuzhiyun };
230*4882a593Smuzhiyun
_ti_omap4_clkctrl_xlate(struct of_phandle_args * clkspec,void * data)231*4882a593Smuzhiyun static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
232*4882a593Smuzhiyun void *data)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun struct omap_clkctrl_provider *provider = data;
235*4882a593Smuzhiyun struct omap_clkctrl_clk *entry;
236*4882a593Smuzhiyun bool found = false;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (clkspec->args_count != 2)
239*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun pr_debug("%s: looking for %x:%x\n", __func__,
242*4882a593Smuzhiyun clkspec->args[0], clkspec->args[1]);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun list_for_each_entry(entry, &provider->clocks, node) {
245*4882a593Smuzhiyun if (entry->reg_offset == clkspec->args[0] &&
246*4882a593Smuzhiyun entry->bit_offset == clkspec->args[1]) {
247*4882a593Smuzhiyun found = true;
248*4882a593Smuzhiyun break;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (!found)
253*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun return entry->clk;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* Get clkctrl clock base name based on clkctrl_name or dts node */
clkctrl_get_clock_name(struct device_node * np,const char * clkctrl_name,int offset,int index,bool legacy_naming)259*4882a593Smuzhiyun static const char * __init clkctrl_get_clock_name(struct device_node *np,
260*4882a593Smuzhiyun const char *clkctrl_name,
261*4882a593Smuzhiyun int offset, int index,
262*4882a593Smuzhiyun bool legacy_naming)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun char *clock_name;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
267*4882a593Smuzhiyun if (clkctrl_name && !legacy_naming) {
268*4882a593Smuzhiyun clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
269*4882a593Smuzhiyun clkctrl_name, offset, index);
270*4882a593Smuzhiyun strreplace(clock_name, '_', '-');
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun return clock_name;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /* l4per:1234:0 old style naming based on clkctrl_name */
276*4882a593Smuzhiyun if (clkctrl_name)
277*4882a593Smuzhiyun return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
278*4882a593Smuzhiyun clkctrl_name, offset, index);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /* l4per_cm:1234:0 old style naming based on parent node name */
281*4882a593Smuzhiyun if (legacy_naming)
282*4882a593Smuzhiyun return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
283*4882a593Smuzhiyun np->parent, offset, index);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* l4per-clkctrl:1234:0 style naming based on node name */
286*4882a593Smuzhiyun return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun static int __init
_ti_clkctrl_clk_register(struct omap_clkctrl_provider * provider,struct device_node * node,struct clk_hw * clk_hw,u16 offset,u8 bit,const char * const * parents,int num_parents,const struct clk_ops * ops,const char * clkctrl_name)290*4882a593Smuzhiyun _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
291*4882a593Smuzhiyun struct device_node *node, struct clk_hw *clk_hw,
292*4882a593Smuzhiyun u16 offset, u8 bit, const char * const *parents,
293*4882a593Smuzhiyun int num_parents, const struct clk_ops *ops,
294*4882a593Smuzhiyun const char *clkctrl_name)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun struct clk_init_data init = { NULL };
297*4882a593Smuzhiyun struct clk *clk;
298*4882a593Smuzhiyun struct omap_clkctrl_clk *clkctrl_clk;
299*4882a593Smuzhiyun int ret = 0;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun init.name = clkctrl_get_clock_name(node, clkctrl_name, offset, bit,
302*4882a593Smuzhiyun ti_clk_get_features()->flags &
303*4882a593Smuzhiyun TI_CLK_CLKCTRL_COMPAT);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
306*4882a593Smuzhiyun if (!init.name || !clkctrl_clk) {
307*4882a593Smuzhiyun ret = -ENOMEM;
308*4882a593Smuzhiyun goto cleanup;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun clk_hw->init = &init;
312*4882a593Smuzhiyun init.parent_names = parents;
313*4882a593Smuzhiyun init.num_parents = num_parents;
314*4882a593Smuzhiyun init.ops = ops;
315*4882a593Smuzhiyun init.flags = 0;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun clk = ti_clk_register(NULL, clk_hw, init.name);
318*4882a593Smuzhiyun if (IS_ERR_OR_NULL(clk)) {
319*4882a593Smuzhiyun ret = -EINVAL;
320*4882a593Smuzhiyun goto cleanup;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun clkctrl_clk->reg_offset = offset;
324*4882a593Smuzhiyun clkctrl_clk->bit_offset = bit;
325*4882a593Smuzhiyun clkctrl_clk->clk = clk_hw;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun list_add(&clkctrl_clk->node, &provider->clocks);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun return 0;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun cleanup:
332*4882a593Smuzhiyun kfree(init.name);
333*4882a593Smuzhiyun kfree(clkctrl_clk);
334*4882a593Smuzhiyun return ret;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun static void __init
_ti_clkctrl_setup_gate(struct omap_clkctrl_provider * provider,struct device_node * node,u16 offset,const struct omap_clkctrl_bit_data * data,void __iomem * reg,const char * clkctrl_name)338*4882a593Smuzhiyun _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
339*4882a593Smuzhiyun struct device_node *node, u16 offset,
340*4882a593Smuzhiyun const struct omap_clkctrl_bit_data *data,
341*4882a593Smuzhiyun void __iomem *reg, const char *clkctrl_name)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct clk_hw_omap *clk_hw;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
346*4882a593Smuzhiyun if (!clk_hw)
347*4882a593Smuzhiyun return;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun clk_hw->enable_bit = data->bit;
350*4882a593Smuzhiyun clk_hw->enable_reg.ptr = reg;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
353*4882a593Smuzhiyun data->bit, data->parents, 1,
354*4882a593Smuzhiyun &omap_gate_clk_ops, clkctrl_name))
355*4882a593Smuzhiyun kfree(clk_hw);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun static void __init
_ti_clkctrl_setup_mux(struct omap_clkctrl_provider * provider,struct device_node * node,u16 offset,const struct omap_clkctrl_bit_data * data,void __iomem * reg,const char * clkctrl_name)359*4882a593Smuzhiyun _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
360*4882a593Smuzhiyun struct device_node *node, u16 offset,
361*4882a593Smuzhiyun const struct omap_clkctrl_bit_data *data,
362*4882a593Smuzhiyun void __iomem *reg, const char *clkctrl_name)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun struct clk_omap_mux *mux;
365*4882a593Smuzhiyun int num_parents = 0;
366*4882a593Smuzhiyun const char * const *pname;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun mux = kzalloc(sizeof(*mux), GFP_KERNEL);
369*4882a593Smuzhiyun if (!mux)
370*4882a593Smuzhiyun return;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun pname = data->parents;
373*4882a593Smuzhiyun while (*pname) {
374*4882a593Smuzhiyun num_parents++;
375*4882a593Smuzhiyun pname++;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun mux->mask = num_parents;
379*4882a593Smuzhiyun if (!(mux->flags & CLK_MUX_INDEX_ONE))
380*4882a593Smuzhiyun mux->mask--;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun mux->mask = (1 << fls(mux->mask)) - 1;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun mux->shift = data->bit;
385*4882a593Smuzhiyun mux->reg.ptr = reg;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
388*4882a593Smuzhiyun data->bit, data->parents, num_parents,
389*4882a593Smuzhiyun &ti_clk_mux_ops, clkctrl_name))
390*4882a593Smuzhiyun kfree(mux);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun static void __init
_ti_clkctrl_setup_div(struct omap_clkctrl_provider * provider,struct device_node * node,u16 offset,const struct omap_clkctrl_bit_data * data,void __iomem * reg,const char * clkctrl_name)394*4882a593Smuzhiyun _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
395*4882a593Smuzhiyun struct device_node *node, u16 offset,
396*4882a593Smuzhiyun const struct omap_clkctrl_bit_data *data,
397*4882a593Smuzhiyun void __iomem *reg, const char *clkctrl_name)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun struct clk_omap_divider *div;
400*4882a593Smuzhiyun const struct omap_clkctrl_div_data *div_data = data->data;
401*4882a593Smuzhiyun u8 div_flags = 0;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun div = kzalloc(sizeof(*div), GFP_KERNEL);
404*4882a593Smuzhiyun if (!div)
405*4882a593Smuzhiyun return;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun div->reg.ptr = reg;
408*4882a593Smuzhiyun div->shift = data->bit;
409*4882a593Smuzhiyun div->flags = div_data->flags;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (div->flags & CLK_DIVIDER_POWER_OF_TWO)
412*4882a593Smuzhiyun div_flags |= CLKF_INDEX_POWER_OF_TWO;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (ti_clk_parse_divider_data((int *)div_data->dividers, 0,
415*4882a593Smuzhiyun div_data->max_div, div_flags,
416*4882a593Smuzhiyun div)) {
417*4882a593Smuzhiyun pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__,
418*4882a593Smuzhiyun node, offset, data->bit);
419*4882a593Smuzhiyun kfree(div);
420*4882a593Smuzhiyun return;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
424*4882a593Smuzhiyun data->bit, data->parents, 1,
425*4882a593Smuzhiyun &ti_clk_divider_ops, clkctrl_name))
426*4882a593Smuzhiyun kfree(div);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun static void __init
_ti_clkctrl_setup_subclks(struct omap_clkctrl_provider * provider,struct device_node * node,const struct omap_clkctrl_reg_data * data,void __iomem * reg,const char * clkctrl_name)430*4882a593Smuzhiyun _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
431*4882a593Smuzhiyun struct device_node *node,
432*4882a593Smuzhiyun const struct omap_clkctrl_reg_data *data,
433*4882a593Smuzhiyun void __iomem *reg, const char *clkctrl_name)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun const struct omap_clkctrl_bit_data *bits = data->bit_data;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (!bits)
438*4882a593Smuzhiyun return;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun while (bits->bit) {
441*4882a593Smuzhiyun switch (bits->type) {
442*4882a593Smuzhiyun case TI_CLK_GATE:
443*4882a593Smuzhiyun _ti_clkctrl_setup_gate(provider, node, data->offset,
444*4882a593Smuzhiyun bits, reg, clkctrl_name);
445*4882a593Smuzhiyun break;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun case TI_CLK_DIVIDER:
448*4882a593Smuzhiyun _ti_clkctrl_setup_div(provider, node, data->offset,
449*4882a593Smuzhiyun bits, reg, clkctrl_name);
450*4882a593Smuzhiyun break;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun case TI_CLK_MUX:
453*4882a593Smuzhiyun _ti_clkctrl_setup_mux(provider, node, data->offset,
454*4882a593Smuzhiyun bits, reg, clkctrl_name);
455*4882a593Smuzhiyun break;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun default:
458*4882a593Smuzhiyun pr_err("%s: bad subclk type: %d\n", __func__,
459*4882a593Smuzhiyun bits->type);
460*4882a593Smuzhiyun return;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun bits++;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
_clkctrl_add_provider(void * data,struct device_node * np)466*4882a593Smuzhiyun static void __init _clkctrl_add_provider(void *data,
467*4882a593Smuzhiyun struct device_node *np)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /* Get clock name based on compatible string for clkctrl */
clkctrl_get_name(struct device_node * np)473*4882a593Smuzhiyun static char * __init clkctrl_get_name(struct device_node *np)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun struct property *prop;
476*4882a593Smuzhiyun const int prefix_len = 11;
477*4882a593Smuzhiyun const char *compat;
478*4882a593Smuzhiyun char *name;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun of_property_for_each_string(np, "compatible", prop, compat) {
481*4882a593Smuzhiyun if (!strncmp("ti,clkctrl-", compat, prefix_len)) {
482*4882a593Smuzhiyun /* Two letter minimum name length for l3, l4 etc */
483*4882a593Smuzhiyun if (strnlen(compat + prefix_len, 16) < 2)
484*4882a593Smuzhiyun continue;
485*4882a593Smuzhiyun name = kasprintf(GFP_KERNEL, "%s", compat + prefix_len);
486*4882a593Smuzhiyun if (!name)
487*4882a593Smuzhiyun continue;
488*4882a593Smuzhiyun strreplace(name, '-', '_');
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun return name;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun return NULL;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
_ti_omap4_clkctrl_setup(struct device_node * node)497*4882a593Smuzhiyun static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun struct omap_clkctrl_provider *provider;
500*4882a593Smuzhiyun const struct omap_clkctrl_data *data = default_clkctrl_data;
501*4882a593Smuzhiyun const struct omap_clkctrl_reg_data *reg_data;
502*4882a593Smuzhiyun struct clk_init_data init = { NULL };
503*4882a593Smuzhiyun struct clk_hw_omap *hw;
504*4882a593Smuzhiyun struct clk *clk;
505*4882a593Smuzhiyun struct omap_clkctrl_clk *clkctrl_clk = NULL;
506*4882a593Smuzhiyun const __be32 *addrp;
507*4882a593Smuzhiyun bool legacy_naming;
508*4882a593Smuzhiyun char *clkctrl_name;
509*4882a593Smuzhiyun u32 addr;
510*4882a593Smuzhiyun int ret;
511*4882a593Smuzhiyun char *c;
512*4882a593Smuzhiyun u16 soc_mask = 0;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
515*4882a593Smuzhiyun of_node_name_eq(node, "clk"))
516*4882a593Smuzhiyun ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun addrp = of_get_address(node, 0, NULL, NULL);
519*4882a593Smuzhiyun addr = (u32)of_translate_address(node, addrp);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun #ifdef CONFIG_ARCH_OMAP4
522*4882a593Smuzhiyun if (of_machine_is_compatible("ti,omap4"))
523*4882a593Smuzhiyun data = omap4_clkctrl_data;
524*4882a593Smuzhiyun #endif
525*4882a593Smuzhiyun #ifdef CONFIG_SOC_OMAP5
526*4882a593Smuzhiyun if (of_machine_is_compatible("ti,omap5"))
527*4882a593Smuzhiyun data = omap5_clkctrl_data;
528*4882a593Smuzhiyun #endif
529*4882a593Smuzhiyun #ifdef CONFIG_SOC_DRA7XX
530*4882a593Smuzhiyun if (of_machine_is_compatible("ti,dra7")) {
531*4882a593Smuzhiyun if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
532*4882a593Smuzhiyun data = dra7_clkctrl_compat_data;
533*4882a593Smuzhiyun else
534*4882a593Smuzhiyun data = dra7_clkctrl_data;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun if (of_machine_is_compatible("ti,dra72"))
538*4882a593Smuzhiyun soc_mask = CLKF_SOC_DRA72;
539*4882a593Smuzhiyun if (of_machine_is_compatible("ti,dra74"))
540*4882a593Smuzhiyun soc_mask = CLKF_SOC_DRA74;
541*4882a593Smuzhiyun if (of_machine_is_compatible("ti,dra76"))
542*4882a593Smuzhiyun soc_mask = CLKF_SOC_DRA76;
543*4882a593Smuzhiyun #endif
544*4882a593Smuzhiyun #ifdef CONFIG_SOC_AM33XX
545*4882a593Smuzhiyun if (of_machine_is_compatible("ti,am33xx")) {
546*4882a593Smuzhiyun if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
547*4882a593Smuzhiyun data = am3_clkctrl_compat_data;
548*4882a593Smuzhiyun else
549*4882a593Smuzhiyun data = am3_clkctrl_data;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun #endif
552*4882a593Smuzhiyun #ifdef CONFIG_SOC_AM43XX
553*4882a593Smuzhiyun if (of_machine_is_compatible("ti,am4372")) {
554*4882a593Smuzhiyun if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
555*4882a593Smuzhiyun data = am4_clkctrl_compat_data;
556*4882a593Smuzhiyun else
557*4882a593Smuzhiyun data = am4_clkctrl_data;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun if (of_machine_is_compatible("ti,am438x")) {
561*4882a593Smuzhiyun if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
562*4882a593Smuzhiyun data = am438x_clkctrl_compat_data;
563*4882a593Smuzhiyun else
564*4882a593Smuzhiyun data = am438x_clkctrl_data;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun #endif
567*4882a593Smuzhiyun #ifdef CONFIG_SOC_TI81XX
568*4882a593Smuzhiyun if (of_machine_is_compatible("ti,dm814"))
569*4882a593Smuzhiyun data = dm814_clkctrl_data;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun if (of_machine_is_compatible("ti,dm816"))
572*4882a593Smuzhiyun data = dm816_clkctrl_data;
573*4882a593Smuzhiyun #endif
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun if (ti_clk_get_features()->flags & TI_CLK_DEVICE_TYPE_GP)
576*4882a593Smuzhiyun soc_mask |= CLKF_SOC_NONSEC;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun while (data->addr) {
579*4882a593Smuzhiyun if (addr == data->addr)
580*4882a593Smuzhiyun break;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun data++;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun if (!data->addr) {
586*4882a593Smuzhiyun pr_err("%pOF not found from clkctrl data.\n", node);
587*4882a593Smuzhiyun return;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun provider = kzalloc(sizeof(*provider), GFP_KERNEL);
591*4882a593Smuzhiyun if (!provider)
592*4882a593Smuzhiyun return;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun provider->base = of_iomap(node, 0);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun legacy_naming = ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT;
597*4882a593Smuzhiyun clkctrl_name = clkctrl_get_name(node);
598*4882a593Smuzhiyun if (clkctrl_name) {
599*4882a593Smuzhiyun provider->clkdm_name = kasprintf(GFP_KERNEL,
600*4882a593Smuzhiyun "%s_clkdm", clkctrl_name);
601*4882a593Smuzhiyun goto clkdm_found;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun /*
605*4882a593Smuzhiyun * The code below can be removed when all clkctrl nodes use domain
606*4882a593Smuzhiyun * specific compatible proprerty and standard clock node naming
607*4882a593Smuzhiyun */
608*4882a593Smuzhiyun if (legacy_naming) {
609*4882a593Smuzhiyun provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
610*4882a593Smuzhiyun if (!provider->clkdm_name) {
611*4882a593Smuzhiyun kfree(provider);
612*4882a593Smuzhiyun return;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /*
616*4882a593Smuzhiyun * Create default clkdm name, replace _cm from end of parent
617*4882a593Smuzhiyun * node name with _clkdm
618*4882a593Smuzhiyun */
619*4882a593Smuzhiyun provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
620*4882a593Smuzhiyun } else {
621*4882a593Smuzhiyun provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
622*4882a593Smuzhiyun if (!provider->clkdm_name) {
623*4882a593Smuzhiyun kfree(provider);
624*4882a593Smuzhiyun return;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /*
628*4882a593Smuzhiyun * Create default clkdm name, replace _clkctrl from end of
629*4882a593Smuzhiyun * node name with _clkdm
630*4882a593Smuzhiyun */
631*4882a593Smuzhiyun provider->clkdm_name[strlen(provider->clkdm_name) - 7] = 0;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun strcat(provider->clkdm_name, "clkdm");
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun /* Replace any dash from the clkdm name with underscore */
637*4882a593Smuzhiyun c = provider->clkdm_name;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun while (*c) {
640*4882a593Smuzhiyun if (*c == '-')
641*4882a593Smuzhiyun *c = '_';
642*4882a593Smuzhiyun c++;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun clkdm_found:
645*4882a593Smuzhiyun INIT_LIST_HEAD(&provider->clocks);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /* Generate clocks */
648*4882a593Smuzhiyun reg_data = data->regs;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun while (reg_data->parent) {
651*4882a593Smuzhiyun if ((reg_data->flags & CLKF_SOC_MASK) &&
652*4882a593Smuzhiyun (reg_data->flags & soc_mask) == 0) {
653*4882a593Smuzhiyun reg_data++;
654*4882a593Smuzhiyun continue;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun hw = kzalloc(sizeof(*hw), GFP_KERNEL);
658*4882a593Smuzhiyun if (!hw)
659*4882a593Smuzhiyun return;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun hw->enable_reg.ptr = provider->base + reg_data->offset;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun _ti_clkctrl_setup_subclks(provider, node, reg_data,
664*4882a593Smuzhiyun hw->enable_reg.ptr, clkctrl_name);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun if (reg_data->flags & CLKF_SW_SUP)
667*4882a593Smuzhiyun hw->enable_bit = MODULEMODE_SWCTRL;
668*4882a593Smuzhiyun if (reg_data->flags & CLKF_HW_SUP)
669*4882a593Smuzhiyun hw->enable_bit = MODULEMODE_HWCTRL;
670*4882a593Smuzhiyun if (reg_data->flags & CLKF_NO_IDLEST)
671*4882a593Smuzhiyun set_bit(NO_IDLEST, &hw->flags);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun if (reg_data->clkdm_name)
674*4882a593Smuzhiyun hw->clkdm_name = reg_data->clkdm_name;
675*4882a593Smuzhiyun else
676*4882a593Smuzhiyun hw->clkdm_name = provider->clkdm_name;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun init.parent_names = ®_data->parent;
679*4882a593Smuzhiyun init.num_parents = 1;
680*4882a593Smuzhiyun init.flags = 0;
681*4882a593Smuzhiyun if (reg_data->flags & CLKF_SET_RATE_PARENT)
682*4882a593Smuzhiyun init.flags |= CLK_SET_RATE_PARENT;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun init.name = clkctrl_get_clock_name(node, clkctrl_name,
685*4882a593Smuzhiyun reg_data->offset, 0,
686*4882a593Smuzhiyun legacy_naming);
687*4882a593Smuzhiyun if (!init.name)
688*4882a593Smuzhiyun goto cleanup;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
691*4882a593Smuzhiyun if (!clkctrl_clk)
692*4882a593Smuzhiyun goto cleanup;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun init.ops = &omap4_clkctrl_clk_ops;
695*4882a593Smuzhiyun hw->hw.init = &init;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun clk = ti_clk_register_omap_hw(NULL, &hw->hw, init.name);
698*4882a593Smuzhiyun if (IS_ERR_OR_NULL(clk))
699*4882a593Smuzhiyun goto cleanup;
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun clkctrl_clk->reg_offset = reg_data->offset;
702*4882a593Smuzhiyun clkctrl_clk->clk = &hw->hw;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun list_add(&clkctrl_clk->node, &provider->clocks);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun reg_data++;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun ret = of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
710*4882a593Smuzhiyun if (ret == -EPROBE_DEFER)
711*4882a593Smuzhiyun ti_clk_retry_init(node, provider, _clkctrl_add_provider);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun kfree(clkctrl_name);
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun return;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun cleanup:
718*4882a593Smuzhiyun kfree(hw);
719*4882a593Smuzhiyun kfree(init.name);
720*4882a593Smuzhiyun kfree(clkctrl_name);
721*4882a593Smuzhiyun kfree(clkctrl_clk);
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
724*4882a593Smuzhiyun _ti_omap4_clkctrl_setup);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /**
727*4882a593Smuzhiyun * ti_clk_is_in_standby - Check if clkctrl clock is in standby or not
728*4882a593Smuzhiyun * @clk: clock to check standby status for
729*4882a593Smuzhiyun *
730*4882a593Smuzhiyun * Finds whether the provided clock is in standby mode or not. Returns
731*4882a593Smuzhiyun * true if the provided clock is a clkctrl type clock and it is in standby,
732*4882a593Smuzhiyun * false otherwise.
733*4882a593Smuzhiyun */
ti_clk_is_in_standby(struct clk * clk)734*4882a593Smuzhiyun bool ti_clk_is_in_standby(struct clk *clk)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun struct clk_hw *hw;
737*4882a593Smuzhiyun struct clk_hw_omap *hwclk;
738*4882a593Smuzhiyun u32 val;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun hw = __clk_get_hw(clk);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun if (!omap2_clk_is_hw_omap(hw))
743*4882a593Smuzhiyun return false;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun hwclk = to_clk_hw_omap(hw);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun val = ti_clk_ll_ops->clk_readl(&hwclk->enable_reg);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun if (val & OMAP4_STBYST_MASK)
750*4882a593Smuzhiyun return true;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun return false;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ti_clk_is_in_standby);
755