1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015 Chen-Yu Tsai
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Chen-Yu Tsai <wens@csie.org>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Allwinner A80 CPUS clock driver
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/clk.h>
12*4882a593Smuzhiyun #include <linux/clk-provider.h>
13*4882a593Smuzhiyun #include <linux/io.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/spinlock.h>
16*4882a593Smuzhiyun #include <linux/of.h>
17*4882a593Smuzhiyun #include <linux/of_address.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun static DEFINE_SPINLOCK(sun9i_a80_cpus_lock);
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /**
22*4882a593Smuzhiyun * sun9i_a80_cpus_clk_setup() - Setup function for a80 cpus composite clk
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define SUN9I_CPUS_MAX_PARENTS 4
26*4882a593Smuzhiyun #define SUN9I_CPUS_MUX_PARENT_PLL4 3
27*4882a593Smuzhiyun #define SUN9I_CPUS_MUX_SHIFT 16
28*4882a593Smuzhiyun #define SUN9I_CPUS_MUX_MASK GENMASK(17, 16)
29*4882a593Smuzhiyun #define SUN9I_CPUS_MUX_GET_PARENT(reg) ((reg & SUN9I_CPUS_MUX_MASK) >> \
30*4882a593Smuzhiyun SUN9I_CPUS_MUX_SHIFT)
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define SUN9I_CPUS_DIV_SHIFT 4
33*4882a593Smuzhiyun #define SUN9I_CPUS_DIV_MASK GENMASK(5, 4)
34*4882a593Smuzhiyun #define SUN9I_CPUS_DIV_GET(reg) ((reg & SUN9I_CPUS_DIV_MASK) >> \
35*4882a593Smuzhiyun SUN9I_CPUS_DIV_SHIFT)
36*4882a593Smuzhiyun #define SUN9I_CPUS_DIV_SET(reg, div) ((reg & ~SUN9I_CPUS_DIV_MASK) | \
37*4882a593Smuzhiyun (div << SUN9I_CPUS_DIV_SHIFT))
38*4882a593Smuzhiyun #define SUN9I_CPUS_PLL4_DIV_SHIFT 8
39*4882a593Smuzhiyun #define SUN9I_CPUS_PLL4_DIV_MASK GENMASK(12, 8)
40*4882a593Smuzhiyun #define SUN9I_CPUS_PLL4_DIV_GET(reg) ((reg & SUN9I_CPUS_PLL4_DIV_MASK) >> \
41*4882a593Smuzhiyun SUN9I_CPUS_PLL4_DIV_SHIFT)
42*4882a593Smuzhiyun #define SUN9I_CPUS_PLL4_DIV_SET(reg, div) ((reg & ~SUN9I_CPUS_PLL4_DIV_MASK) | \
43*4882a593Smuzhiyun (div << SUN9I_CPUS_PLL4_DIV_SHIFT))
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun struct sun9i_a80_cpus_clk {
46*4882a593Smuzhiyun struct clk_hw hw;
47*4882a593Smuzhiyun void __iomem *reg;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #define to_sun9i_a80_cpus_clk(_hw) container_of(_hw, struct sun9i_a80_cpus_clk, hw)
51*4882a593Smuzhiyun
sun9i_a80_cpus_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)52*4882a593Smuzhiyun static unsigned long sun9i_a80_cpus_clk_recalc_rate(struct clk_hw *hw,
53*4882a593Smuzhiyun unsigned long parent_rate)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw);
56*4882a593Smuzhiyun unsigned long rate;
57*4882a593Smuzhiyun u32 reg;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* Fetch the register value */
60*4882a593Smuzhiyun reg = readl(cpus->reg);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* apply pre-divider first if parent is pll4 */
63*4882a593Smuzhiyun if (SUN9I_CPUS_MUX_GET_PARENT(reg) == SUN9I_CPUS_MUX_PARENT_PLL4)
64*4882a593Smuzhiyun parent_rate /= SUN9I_CPUS_PLL4_DIV_GET(reg) + 1;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* clk divider */
67*4882a593Smuzhiyun rate = parent_rate / (SUN9I_CPUS_DIV_GET(reg) + 1);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun return rate;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
sun9i_a80_cpus_clk_round(unsigned long rate,u8 * divp,u8 * pre_divp,u8 parent,unsigned long parent_rate)72*4882a593Smuzhiyun static long sun9i_a80_cpus_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
73*4882a593Smuzhiyun u8 parent, unsigned long parent_rate)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun u8 div, pre_div = 1;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * clock can only divide, so we will never be able to achieve
79*4882a593Smuzhiyun * frequencies higher than the parent frequency
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun if (parent_rate && rate > parent_rate)
82*4882a593Smuzhiyun rate = parent_rate;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun div = DIV_ROUND_UP(parent_rate, rate);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* calculate pre-divider if parent is pll4 */
87*4882a593Smuzhiyun if (parent == SUN9I_CPUS_MUX_PARENT_PLL4 && div > 4) {
88*4882a593Smuzhiyun /* pre-divider is 1 ~ 32 */
89*4882a593Smuzhiyun if (div < 32) {
90*4882a593Smuzhiyun pre_div = div;
91*4882a593Smuzhiyun div = 1;
92*4882a593Smuzhiyun } else if (div < 64) {
93*4882a593Smuzhiyun pre_div = DIV_ROUND_UP(div, 2);
94*4882a593Smuzhiyun div = 2;
95*4882a593Smuzhiyun } else if (div < 96) {
96*4882a593Smuzhiyun pre_div = DIV_ROUND_UP(div, 3);
97*4882a593Smuzhiyun div = 3;
98*4882a593Smuzhiyun } else {
99*4882a593Smuzhiyun pre_div = DIV_ROUND_UP(div, 4);
100*4882a593Smuzhiyun div = 4;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* we were asked to pass back divider values */
105*4882a593Smuzhiyun if (divp) {
106*4882a593Smuzhiyun *divp = div - 1;
107*4882a593Smuzhiyun *pre_divp = pre_div - 1;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun return parent_rate / pre_div / div;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
sun9i_a80_cpus_clk_determine_rate(struct clk_hw * clk,struct clk_rate_request * req)113*4882a593Smuzhiyun static int sun9i_a80_cpus_clk_determine_rate(struct clk_hw *clk,
114*4882a593Smuzhiyun struct clk_rate_request *req)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun struct clk_hw *parent, *best_parent = NULL;
117*4882a593Smuzhiyun int i, num_parents;
118*4882a593Smuzhiyun unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
119*4882a593Smuzhiyun unsigned long rate = req->rate;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* find the parent that can help provide the fastest rate <= rate */
122*4882a593Smuzhiyun num_parents = clk_hw_get_num_parents(clk);
123*4882a593Smuzhiyun for (i = 0; i < num_parents; i++) {
124*4882a593Smuzhiyun parent = clk_hw_get_parent_by_index(clk, i);
125*4882a593Smuzhiyun if (!parent)
126*4882a593Smuzhiyun continue;
127*4882a593Smuzhiyun if (clk_hw_get_flags(clk) & CLK_SET_RATE_PARENT)
128*4882a593Smuzhiyun parent_rate = clk_hw_round_rate(parent, rate);
129*4882a593Smuzhiyun else
130*4882a593Smuzhiyun parent_rate = clk_hw_get_rate(parent);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun child_rate = sun9i_a80_cpus_clk_round(rate, NULL, NULL, i,
133*4882a593Smuzhiyun parent_rate);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (child_rate <= rate && child_rate > best_child_rate) {
136*4882a593Smuzhiyun best_parent = parent;
137*4882a593Smuzhiyun best = parent_rate;
138*4882a593Smuzhiyun best_child_rate = child_rate;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (!best_parent)
143*4882a593Smuzhiyun return -EINVAL;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun req->best_parent_hw = best_parent;
146*4882a593Smuzhiyun req->best_parent_rate = best;
147*4882a593Smuzhiyun req->rate = best_child_rate;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun return 0;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
sun9i_a80_cpus_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)152*4882a593Smuzhiyun static int sun9i_a80_cpus_clk_set_rate(struct clk_hw *hw, unsigned long rate,
153*4882a593Smuzhiyun unsigned long parent_rate)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw);
156*4882a593Smuzhiyun unsigned long flags;
157*4882a593Smuzhiyun u8 div, pre_div, parent;
158*4882a593Smuzhiyun u32 reg;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun spin_lock_irqsave(&sun9i_a80_cpus_lock, flags);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun reg = readl(cpus->reg);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* need to know which parent is used to apply pre-divider */
165*4882a593Smuzhiyun parent = SUN9I_CPUS_MUX_GET_PARENT(reg);
166*4882a593Smuzhiyun sun9i_a80_cpus_clk_round(rate, &div, &pre_div, parent, parent_rate);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun reg = SUN9I_CPUS_DIV_SET(reg, div);
169*4882a593Smuzhiyun reg = SUN9I_CPUS_PLL4_DIV_SET(reg, pre_div);
170*4882a593Smuzhiyun writel(reg, cpus->reg);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun spin_unlock_irqrestore(&sun9i_a80_cpus_lock, flags);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun return 0;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun static const struct clk_ops sun9i_a80_cpus_clk_ops = {
178*4882a593Smuzhiyun .determine_rate = sun9i_a80_cpus_clk_determine_rate,
179*4882a593Smuzhiyun .recalc_rate = sun9i_a80_cpus_clk_recalc_rate,
180*4882a593Smuzhiyun .set_rate = sun9i_a80_cpus_clk_set_rate,
181*4882a593Smuzhiyun };
182*4882a593Smuzhiyun
sun9i_a80_cpus_setup(struct device_node * node)183*4882a593Smuzhiyun static void sun9i_a80_cpus_setup(struct device_node *node)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun const char *clk_name = node->name;
186*4882a593Smuzhiyun const char *parents[SUN9I_CPUS_MAX_PARENTS];
187*4882a593Smuzhiyun struct resource res;
188*4882a593Smuzhiyun struct sun9i_a80_cpus_clk *cpus;
189*4882a593Smuzhiyun struct clk_mux *mux;
190*4882a593Smuzhiyun struct clk *clk;
191*4882a593Smuzhiyun int ret;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun cpus = kzalloc(sizeof(*cpus), GFP_KERNEL);
194*4882a593Smuzhiyun if (!cpus)
195*4882a593Smuzhiyun return;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun cpus->reg = of_io_request_and_map(node, 0, of_node_full_name(node));
198*4882a593Smuzhiyun if (IS_ERR(cpus->reg))
199*4882a593Smuzhiyun goto err_free_cpus;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun of_property_read_string(node, "clock-output-names", &clk_name);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* we have a mux, we will have >1 parents */
204*4882a593Smuzhiyun ret = of_clk_parent_fill(node, parents, SUN9I_CPUS_MAX_PARENTS);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun mux = kzalloc(sizeof(*mux), GFP_KERNEL);
207*4882a593Smuzhiyun if (!mux)
208*4882a593Smuzhiyun goto err_unmap;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* set up clock properties */
211*4882a593Smuzhiyun mux->reg = cpus->reg;
212*4882a593Smuzhiyun mux->shift = SUN9I_CPUS_MUX_SHIFT;
213*4882a593Smuzhiyun /* un-shifted mask is what mux_clk expects */
214*4882a593Smuzhiyun mux->mask = SUN9I_CPUS_MUX_MASK >> SUN9I_CPUS_MUX_SHIFT;
215*4882a593Smuzhiyun mux->lock = &sun9i_a80_cpus_lock;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun clk = clk_register_composite(NULL, clk_name, parents, ret,
218*4882a593Smuzhiyun &mux->hw, &clk_mux_ops,
219*4882a593Smuzhiyun &cpus->hw, &sun9i_a80_cpus_clk_ops,
220*4882a593Smuzhiyun NULL, NULL, 0);
221*4882a593Smuzhiyun if (IS_ERR(clk))
222*4882a593Smuzhiyun goto err_free_mux;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
225*4882a593Smuzhiyun if (ret)
226*4882a593Smuzhiyun goto err_unregister;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun return;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun err_unregister:
231*4882a593Smuzhiyun clk_unregister(clk);
232*4882a593Smuzhiyun err_free_mux:
233*4882a593Smuzhiyun kfree(mux);
234*4882a593Smuzhiyun err_unmap:
235*4882a593Smuzhiyun iounmap(cpus->reg);
236*4882a593Smuzhiyun of_address_to_resource(node, 0, &res);
237*4882a593Smuzhiyun release_mem_region(res.start, resource_size(&res));
238*4882a593Smuzhiyun err_free_cpus:
239*4882a593Smuzhiyun kfree(cpus);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun CLK_OF_DECLARE(sun9i_a80_cpus, "allwinner,sun9i-a80-cpus-clk",
242*4882a593Smuzhiyun sun9i_a80_cpus_setup);
243