1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2010 Magnus Damm
5*4882a593Smuzhiyun * Copyright (C) 2010 - 2012 Paul Mundt
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
8*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
9*4882a593Smuzhiyun * for more details.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #include <linux/clk.h>
12*4882a593Smuzhiyun #include <linux/compiler.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/io.h>
15*4882a593Smuzhiyun #include <linux/sh_clk.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #define CPG_CKSTP_BIT BIT(8)
18*4882a593Smuzhiyun
sh_clk_read(struct clk * clk)19*4882a593Smuzhiyun static unsigned int sh_clk_read(struct clk *clk)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun if (clk->flags & CLK_ENABLE_REG_8BIT)
22*4882a593Smuzhiyun return ioread8(clk->mapped_reg);
23*4882a593Smuzhiyun else if (clk->flags & CLK_ENABLE_REG_16BIT)
24*4882a593Smuzhiyun return ioread16(clk->mapped_reg);
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun return ioread32(clk->mapped_reg);
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun
sh_clk_write(int value,struct clk * clk)29*4882a593Smuzhiyun static void sh_clk_write(int value, struct clk *clk)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun if (clk->flags & CLK_ENABLE_REG_8BIT)
32*4882a593Smuzhiyun iowrite8(value, clk->mapped_reg);
33*4882a593Smuzhiyun else if (clk->flags & CLK_ENABLE_REG_16BIT)
34*4882a593Smuzhiyun iowrite16(value, clk->mapped_reg);
35*4882a593Smuzhiyun else
36*4882a593Smuzhiyun iowrite32(value, clk->mapped_reg);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
sh_clk_mstp_enable(struct clk * clk)39*4882a593Smuzhiyun static int sh_clk_mstp_enable(struct clk *clk)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
42*4882a593Smuzhiyun if (clk->status_reg) {
43*4882a593Smuzhiyun unsigned int (*read)(const void __iomem *addr);
44*4882a593Smuzhiyun int i;
45*4882a593Smuzhiyun void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
46*4882a593Smuzhiyun (phys_addr_t)clk->enable_reg + clk->mapped_reg;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun if (clk->flags & CLK_ENABLE_REG_8BIT)
49*4882a593Smuzhiyun read = ioread8;
50*4882a593Smuzhiyun else if (clk->flags & CLK_ENABLE_REG_16BIT)
51*4882a593Smuzhiyun read = ioread16;
52*4882a593Smuzhiyun else
53*4882a593Smuzhiyun read = ioread32;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun for (i = 1000;
56*4882a593Smuzhiyun (read(mapped_status) & (1 << clk->enable_bit)) && i;
57*4882a593Smuzhiyun i--)
58*4882a593Smuzhiyun cpu_relax();
59*4882a593Smuzhiyun if (!i) {
60*4882a593Smuzhiyun pr_err("cpg: failed to enable %p[%d]\n",
61*4882a593Smuzhiyun clk->enable_reg, clk->enable_bit);
62*4882a593Smuzhiyun return -ETIMEDOUT;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun return 0;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
sh_clk_mstp_disable(struct clk * clk)68*4882a593Smuzhiyun static void sh_clk_mstp_disable(struct clk *clk)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun static struct sh_clk_ops sh_clk_mstp_clk_ops = {
74*4882a593Smuzhiyun .enable = sh_clk_mstp_enable,
75*4882a593Smuzhiyun .disable = sh_clk_mstp_disable,
76*4882a593Smuzhiyun .recalc = followparent_recalc,
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
sh_clk_mstp_register(struct clk * clks,int nr)79*4882a593Smuzhiyun int __init sh_clk_mstp_register(struct clk *clks, int nr)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun struct clk *clkp;
82*4882a593Smuzhiyun int ret = 0;
83*4882a593Smuzhiyun int k;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun for (k = 0; !ret && (k < nr); k++) {
86*4882a593Smuzhiyun clkp = clks + k;
87*4882a593Smuzhiyun clkp->ops = &sh_clk_mstp_clk_ops;
88*4882a593Smuzhiyun ret |= clk_register(clkp);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun return ret;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun * Div/mult table lookup helpers
96*4882a593Smuzhiyun */
clk_to_div_table(struct clk * clk)97*4882a593Smuzhiyun static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun return clk->priv;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
clk_to_div_mult_table(struct clk * clk)102*4882a593Smuzhiyun static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun return clk_to_div_table(clk)->div_mult_table;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun * Common div ops
109*4882a593Smuzhiyun */
sh_clk_div_round_rate(struct clk * clk,unsigned long rate)110*4882a593Smuzhiyun static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun return clk_rate_table_round(clk, clk->freq_table, rate);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
sh_clk_div_recalc(struct clk * clk)115*4882a593Smuzhiyun static unsigned long sh_clk_div_recalc(struct clk *clk)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
118*4882a593Smuzhiyun unsigned int idx;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
121*4882a593Smuzhiyun table, clk->arch_flags ? &clk->arch_flags : NULL);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun return clk->freq_table[idx].frequency;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
sh_clk_div_set_rate(struct clk * clk,unsigned long rate)128*4882a593Smuzhiyun static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct clk_div_table *dt = clk_to_div_table(clk);
131*4882a593Smuzhiyun unsigned long value;
132*4882a593Smuzhiyun int idx;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun idx = clk_rate_table_find(clk, clk->freq_table, rate);
135*4882a593Smuzhiyun if (idx < 0)
136*4882a593Smuzhiyun return idx;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun value = sh_clk_read(clk);
139*4882a593Smuzhiyun value &= ~(clk->div_mask << clk->enable_bit);
140*4882a593Smuzhiyun value |= (idx << clk->enable_bit);
141*4882a593Smuzhiyun sh_clk_write(value, clk);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* XXX: Should use a post-change notifier */
144*4882a593Smuzhiyun if (dt->kick)
145*4882a593Smuzhiyun dt->kick(clk);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun return 0;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
sh_clk_div_enable(struct clk * clk)150*4882a593Smuzhiyun static int sh_clk_div_enable(struct clk *clk)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun if (clk->div_mask == SH_CLK_DIV6_MSK) {
153*4882a593Smuzhiyun int ret = sh_clk_div_set_rate(clk, clk->rate);
154*4882a593Smuzhiyun if (ret < 0)
155*4882a593Smuzhiyun return ret;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
159*4882a593Smuzhiyun return 0;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
sh_clk_div_disable(struct clk * clk)162*4882a593Smuzhiyun static void sh_clk_div_disable(struct clk *clk)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun unsigned int val;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun val = sh_clk_read(clk);
167*4882a593Smuzhiyun val |= CPG_CKSTP_BIT;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /*
170*4882a593Smuzhiyun * div6 clocks require the divisor field to be non-zero or the
171*4882a593Smuzhiyun * above CKSTP toggle silently fails. Ensure that the divisor
172*4882a593Smuzhiyun * array is reset to its initial state on disable.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
175*4882a593Smuzhiyun val |= clk->div_mask;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun sh_clk_write(val, clk);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun static struct sh_clk_ops sh_clk_div_clk_ops = {
181*4882a593Smuzhiyun .recalc = sh_clk_div_recalc,
182*4882a593Smuzhiyun .set_rate = sh_clk_div_set_rate,
183*4882a593Smuzhiyun .round_rate = sh_clk_div_round_rate,
184*4882a593Smuzhiyun };
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
187*4882a593Smuzhiyun .recalc = sh_clk_div_recalc,
188*4882a593Smuzhiyun .set_rate = sh_clk_div_set_rate,
189*4882a593Smuzhiyun .round_rate = sh_clk_div_round_rate,
190*4882a593Smuzhiyun .enable = sh_clk_div_enable,
191*4882a593Smuzhiyun .disable = sh_clk_div_disable,
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun
sh_clk_init_parent(struct clk * clk)194*4882a593Smuzhiyun static int __init sh_clk_init_parent(struct clk *clk)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun u32 val;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (clk->parent)
199*4882a593Smuzhiyun return 0;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (!clk->parent_table || !clk->parent_num)
202*4882a593Smuzhiyun return 0;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (!clk->src_width) {
205*4882a593Smuzhiyun pr_err("sh_clk_init_parent: cannot select parent clock\n");
206*4882a593Smuzhiyun return -EINVAL;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun val = (sh_clk_read(clk) >> clk->src_shift);
210*4882a593Smuzhiyun val &= (1 << clk->src_width) - 1;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun if (val >= clk->parent_num) {
213*4882a593Smuzhiyun pr_err("sh_clk_init_parent: parent table size failed\n");
214*4882a593Smuzhiyun return -EINVAL;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun clk_reparent(clk, clk->parent_table[val]);
218*4882a593Smuzhiyun if (!clk->parent) {
219*4882a593Smuzhiyun pr_err("sh_clk_init_parent: unable to set parent");
220*4882a593Smuzhiyun return -EINVAL;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
sh_clk_div_register_ops(struct clk * clks,int nr,struct clk_div_table * table,struct sh_clk_ops * ops)226*4882a593Smuzhiyun static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
227*4882a593Smuzhiyun struct clk_div_table *table, struct sh_clk_ops *ops)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun struct clk *clkp;
230*4882a593Smuzhiyun void *freq_table;
231*4882a593Smuzhiyun int nr_divs = table->div_mult_table->nr_divisors;
232*4882a593Smuzhiyun int freq_table_size = sizeof(struct cpufreq_frequency_table);
233*4882a593Smuzhiyun int ret = 0;
234*4882a593Smuzhiyun int k;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun freq_table_size *= (nr_divs + 1);
237*4882a593Smuzhiyun freq_table = kcalloc(nr, freq_table_size, GFP_KERNEL);
238*4882a593Smuzhiyun if (!freq_table) {
239*4882a593Smuzhiyun pr_err("%s: unable to alloc memory\n", __func__);
240*4882a593Smuzhiyun return -ENOMEM;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun for (k = 0; !ret && (k < nr); k++) {
244*4882a593Smuzhiyun clkp = clks + k;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun clkp->ops = ops;
247*4882a593Smuzhiyun clkp->priv = table;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun clkp->freq_table = freq_table + (k * freq_table_size);
250*4882a593Smuzhiyun clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun ret = clk_register(clkp);
253*4882a593Smuzhiyun if (ret == 0)
254*4882a593Smuzhiyun ret = sh_clk_init_parent(clkp);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun return ret;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun * div6 support
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun static int sh_clk_div6_divisors[64] = {
264*4882a593Smuzhiyun 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
265*4882a593Smuzhiyun 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
266*4882a593Smuzhiyun 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
267*4882a593Smuzhiyun 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
268*4882a593Smuzhiyun };
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun static struct clk_div_mult_table div6_div_mult_table = {
271*4882a593Smuzhiyun .divisors = sh_clk_div6_divisors,
272*4882a593Smuzhiyun .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
273*4882a593Smuzhiyun };
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun static struct clk_div_table sh_clk_div6_table = {
276*4882a593Smuzhiyun .div_mult_table = &div6_div_mult_table,
277*4882a593Smuzhiyun };
278*4882a593Smuzhiyun
sh_clk_div6_set_parent(struct clk * clk,struct clk * parent)279*4882a593Smuzhiyun static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
282*4882a593Smuzhiyun u32 value;
283*4882a593Smuzhiyun int ret, i;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if (!clk->parent_table || !clk->parent_num)
286*4882a593Smuzhiyun return -EINVAL;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* Search the parent */
289*4882a593Smuzhiyun for (i = 0; i < clk->parent_num; i++)
290*4882a593Smuzhiyun if (clk->parent_table[i] == parent)
291*4882a593Smuzhiyun break;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (i == clk->parent_num)
294*4882a593Smuzhiyun return -ENODEV;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun ret = clk_reparent(clk, parent);
297*4882a593Smuzhiyun if (ret < 0)
298*4882a593Smuzhiyun return ret;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun value = sh_clk_read(clk) &
301*4882a593Smuzhiyun ~(((1 << clk->src_width) - 1) << clk->src_shift);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun sh_clk_write(value | (i << clk->src_shift), clk);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /* Rebuild the frequency table */
306*4882a593Smuzhiyun clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
307*4882a593Smuzhiyun table, NULL);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun return 0;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
313*4882a593Smuzhiyun .recalc = sh_clk_div_recalc,
314*4882a593Smuzhiyun .round_rate = sh_clk_div_round_rate,
315*4882a593Smuzhiyun .set_rate = sh_clk_div_set_rate,
316*4882a593Smuzhiyun .enable = sh_clk_div_enable,
317*4882a593Smuzhiyun .disable = sh_clk_div_disable,
318*4882a593Smuzhiyun .set_parent = sh_clk_div6_set_parent,
319*4882a593Smuzhiyun };
320*4882a593Smuzhiyun
sh_clk_div6_register(struct clk * clks,int nr)321*4882a593Smuzhiyun int __init sh_clk_div6_register(struct clk *clks, int nr)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
324*4882a593Smuzhiyun &sh_clk_div_enable_clk_ops);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
sh_clk_div6_reparent_register(struct clk * clks,int nr)327*4882a593Smuzhiyun int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
330*4882a593Smuzhiyun &sh_clk_div6_reparent_clk_ops);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun * div4 support
335*4882a593Smuzhiyun */
sh_clk_div4_set_parent(struct clk * clk,struct clk * parent)336*4882a593Smuzhiyun static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
339*4882a593Smuzhiyun u32 value;
340*4882a593Smuzhiyun int ret;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* we really need a better way to determine parent index, but for
343*4882a593Smuzhiyun * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
344*4882a593Smuzhiyun * no CLK_ENABLE_ON_INIT means external clock...
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (parent->flags & CLK_ENABLE_ON_INIT)
348*4882a593Smuzhiyun value = sh_clk_read(clk) & ~(1 << 7);
349*4882a593Smuzhiyun else
350*4882a593Smuzhiyun value = sh_clk_read(clk) | (1 << 7);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun ret = clk_reparent(clk, parent);
353*4882a593Smuzhiyun if (ret < 0)
354*4882a593Smuzhiyun return ret;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun sh_clk_write(value, clk);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /* Rebiuld the frequency table */
359*4882a593Smuzhiyun clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
360*4882a593Smuzhiyun table, &clk->arch_flags);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun return 0;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
366*4882a593Smuzhiyun .recalc = sh_clk_div_recalc,
367*4882a593Smuzhiyun .set_rate = sh_clk_div_set_rate,
368*4882a593Smuzhiyun .round_rate = sh_clk_div_round_rate,
369*4882a593Smuzhiyun .enable = sh_clk_div_enable,
370*4882a593Smuzhiyun .disable = sh_clk_div_disable,
371*4882a593Smuzhiyun .set_parent = sh_clk_div4_set_parent,
372*4882a593Smuzhiyun };
373*4882a593Smuzhiyun
sh_clk_div4_register(struct clk * clks,int nr,struct clk_div4_table * table)374*4882a593Smuzhiyun int __init sh_clk_div4_register(struct clk *clks, int nr,
375*4882a593Smuzhiyun struct clk_div4_table *table)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
sh_clk_div4_enable_register(struct clk * clks,int nr,struct clk_div4_table * table)380*4882a593Smuzhiyun int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
381*4882a593Smuzhiyun struct clk_div4_table *table)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun return sh_clk_div_register_ops(clks, nr, table,
384*4882a593Smuzhiyun &sh_clk_div_enable_clk_ops);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
sh_clk_div4_reparent_register(struct clk * clks,int nr,struct clk_div4_table * table)387*4882a593Smuzhiyun int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
388*4882a593Smuzhiyun struct clk_div4_table *table)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun return sh_clk_div_register_ops(clks, nr, table,
391*4882a593Smuzhiyun &sh_clk_div4_reparent_clk_ops);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /* FSI-DIV */
fsidiv_recalc(struct clk * clk)395*4882a593Smuzhiyun static unsigned long fsidiv_recalc(struct clk *clk)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun u32 value;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun value = __raw_readl(clk->mapping->base);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun value >>= 16;
402*4882a593Smuzhiyun if (value < 2)
403*4882a593Smuzhiyun return clk->parent->rate;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun return clk->parent->rate / value;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
fsidiv_round_rate(struct clk * clk,unsigned long rate)408*4882a593Smuzhiyun static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun return clk_rate_div_range_round(clk, 1, 0xffff, rate);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
fsidiv_disable(struct clk * clk)413*4882a593Smuzhiyun static void fsidiv_disable(struct clk *clk)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun __raw_writel(0, clk->mapping->base);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
fsidiv_enable(struct clk * clk)418*4882a593Smuzhiyun static int fsidiv_enable(struct clk *clk)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun u32 value;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun value = __raw_readl(clk->mapping->base) >> 16;
423*4882a593Smuzhiyun if (value < 2)
424*4882a593Smuzhiyun return 0;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun __raw_writel((value << 16) | 0x3, clk->mapping->base);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun return 0;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
fsidiv_set_rate(struct clk * clk,unsigned long rate)431*4882a593Smuzhiyun static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun int idx;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun idx = (clk->parent->rate / rate) & 0xffff;
436*4882a593Smuzhiyun if (idx < 2)
437*4882a593Smuzhiyun __raw_writel(0, clk->mapping->base);
438*4882a593Smuzhiyun else
439*4882a593Smuzhiyun __raw_writel(idx << 16, clk->mapping->base);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun return 0;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun static struct sh_clk_ops fsidiv_clk_ops = {
445*4882a593Smuzhiyun .recalc = fsidiv_recalc,
446*4882a593Smuzhiyun .round_rate = fsidiv_round_rate,
447*4882a593Smuzhiyun .set_rate = fsidiv_set_rate,
448*4882a593Smuzhiyun .enable = fsidiv_enable,
449*4882a593Smuzhiyun .disable = fsidiv_disable,
450*4882a593Smuzhiyun };
451*4882a593Smuzhiyun
sh_clk_fsidiv_register(struct clk * clks,int nr)452*4882a593Smuzhiyun int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun struct clk_mapping *map;
455*4882a593Smuzhiyun int i;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun for (i = 0; i < nr; i++) {
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
460*4882a593Smuzhiyun if (!map) {
461*4882a593Smuzhiyun pr_err("%s: unable to alloc memory\n", __func__);
462*4882a593Smuzhiyun return -ENOMEM;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /* clks[i].enable_reg came from SH_CLK_FSIDIV() */
466*4882a593Smuzhiyun map->phys = (phys_addr_t)clks[i].enable_reg;
467*4882a593Smuzhiyun map->len = 8;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun clks[i].enable_reg = 0; /* remove .enable_reg */
470*4882a593Smuzhiyun clks[i].ops = &fsidiv_clk_ops;
471*4882a593Smuzhiyun clks[i].mapping = map;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun clk_register(&clks[i]);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun return 0;
477*4882a593Smuzhiyun }
478