1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2016 Maxime Ripard
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Maxime Ripard <maxime.ripard@free-electrons.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/clk.h>
9*4882a593Smuzhiyun #include <linux/clk-provider.h>
10*4882a593Smuzhiyun #include <linux/iopoll.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include "ccu_common.h"
14*4882a593Smuzhiyun #include "ccu_gate.h"
15*4882a593Smuzhiyun #include "ccu_reset.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun static DEFINE_SPINLOCK(ccu_lock);
18*4882a593Smuzhiyun
ccu_helper_wait_for_lock(struct ccu_common * common,u32 lock)19*4882a593Smuzhiyun void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun void __iomem *addr;
22*4882a593Smuzhiyun u32 reg;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun if (!lock)
25*4882a593Smuzhiyun return;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun if (common->features & CCU_FEATURE_LOCK_REG)
28*4882a593Smuzhiyun addr = common->base + common->lock_reg;
29*4882a593Smuzhiyun else
30*4882a593Smuzhiyun addr = common->base + common->reg;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * This clock notifier is called when the frequency of a PLL clock is
37*4882a593Smuzhiyun * changed. In common PLL designs, changes to the dividers take effect
38*4882a593Smuzhiyun * almost immediately, while changes to the multipliers (implemented
39*4882a593Smuzhiyun * as dividers in the feedback loop) take a few cycles to work into
40*4882a593Smuzhiyun * the feedback loop for the PLL to stablize.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * Sometimes when the PLL clock rate is changed, the decrease in the
43*4882a593Smuzhiyun * divider is too much for the decrease in the multiplier to catch up.
44*4882a593Smuzhiyun * The PLL clock rate will spike, and in some cases, might lock up
45*4882a593Smuzhiyun * completely.
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * This notifier callback will gate and then ungate the clock,
48*4882a593Smuzhiyun * effectively resetting it, so it proceeds to work. Care must be
49*4882a593Smuzhiyun * taken to reparent consumers to other temporary clocks during the
50*4882a593Smuzhiyun * rate change, and that this notifier callback must be the first
51*4882a593Smuzhiyun * to be registered.
52*4882a593Smuzhiyun */
ccu_pll_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)53*4882a593Smuzhiyun static int ccu_pll_notifier_cb(struct notifier_block *nb,
54*4882a593Smuzhiyun unsigned long event, void *data)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun struct ccu_pll_nb *pll = to_ccu_pll_nb(nb);
57*4882a593Smuzhiyun int ret = 0;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun if (event != POST_RATE_CHANGE)
60*4882a593Smuzhiyun goto out;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun ccu_gate_helper_disable(pll->common, pll->enable);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun ret = ccu_gate_helper_enable(pll->common, pll->enable);
65*4882a593Smuzhiyun if (ret)
66*4882a593Smuzhiyun goto out;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun ccu_helper_wait_for_lock(pll->common, pll->lock);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun out:
71*4882a593Smuzhiyun return notifier_from_errno(ret);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
ccu_pll_notifier_register(struct ccu_pll_nb * pll_nb)74*4882a593Smuzhiyun int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun pll_nb->clk_nb.notifier_call = ccu_pll_notifier_cb;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun return clk_notifier_register(pll_nb->common->hw.clk,
79*4882a593Smuzhiyun &pll_nb->clk_nb);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
sunxi_ccu_probe(struct device_node * node,void __iomem * reg,const struct sunxi_ccu_desc * desc)82*4882a593Smuzhiyun int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
83*4882a593Smuzhiyun const struct sunxi_ccu_desc *desc)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun struct ccu_reset *reset;
86*4882a593Smuzhiyun int i, ret;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun for (i = 0; i < desc->num_ccu_clks; i++) {
89*4882a593Smuzhiyun struct ccu_common *cclk = desc->ccu_clks[i];
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if (!cclk)
92*4882a593Smuzhiyun continue;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun cclk->base = reg;
95*4882a593Smuzhiyun cclk->lock = &ccu_lock;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun for (i = 0; i < desc->hw_clks->num ; i++) {
99*4882a593Smuzhiyun struct clk_hw *hw = desc->hw_clks->hws[i];
100*4882a593Smuzhiyun const char *name;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if (!hw)
103*4882a593Smuzhiyun continue;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun name = hw->init->name;
106*4882a593Smuzhiyun ret = of_clk_hw_register(node, hw);
107*4882a593Smuzhiyun if (ret) {
108*4882a593Smuzhiyun pr_err("Couldn't register clock %d - %s\n", i, name);
109*4882a593Smuzhiyun goto err_clk_unreg;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
114*4882a593Smuzhiyun desc->hw_clks);
115*4882a593Smuzhiyun if (ret)
116*4882a593Smuzhiyun goto err_clk_unreg;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun reset = kzalloc(sizeof(*reset), GFP_KERNEL);
119*4882a593Smuzhiyun if (!reset) {
120*4882a593Smuzhiyun ret = -ENOMEM;
121*4882a593Smuzhiyun goto err_alloc_reset;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun reset->rcdev.of_node = node;
125*4882a593Smuzhiyun reset->rcdev.ops = &ccu_reset_ops;
126*4882a593Smuzhiyun reset->rcdev.owner = THIS_MODULE;
127*4882a593Smuzhiyun reset->rcdev.nr_resets = desc->num_resets;
128*4882a593Smuzhiyun reset->base = reg;
129*4882a593Smuzhiyun reset->lock = &ccu_lock;
130*4882a593Smuzhiyun reset->reset_map = desc->resets;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun ret = reset_controller_register(&reset->rcdev);
133*4882a593Smuzhiyun if (ret)
134*4882a593Smuzhiyun goto err_of_clk_unreg;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun return 0;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun err_of_clk_unreg:
139*4882a593Smuzhiyun kfree(reset);
140*4882a593Smuzhiyun err_alloc_reset:
141*4882a593Smuzhiyun of_clk_del_provider(node);
142*4882a593Smuzhiyun err_clk_unreg:
143*4882a593Smuzhiyun while (--i >= 0) {
144*4882a593Smuzhiyun struct clk_hw *hw = desc->hw_clks->hws[i];
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (!hw)
147*4882a593Smuzhiyun continue;
148*4882a593Smuzhiyun clk_hw_unregister(hw);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun return ret;
151*4882a593Smuzhiyun }
152