1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015 Atmel Corporation,
4*4882a593Smuzhiyun * Nicolas Ferre <nicolas.ferre@atmel.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Based on clk-programmable & clk-peripheral drivers by Boris BREZILLON.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/bitfield.h>
10*4882a593Smuzhiyun #include <linux/clk-provider.h>
11*4882a593Smuzhiyun #include <linux/clkdev.h>
12*4882a593Smuzhiyun #include <linux/clk/at91_pmc.h>
13*4882a593Smuzhiyun #include <linux/of.h>
14*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
15*4882a593Smuzhiyun #include <linux/regmap.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "pmc.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define GENERATED_MAX_DIV 255
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun struct clk_generated {
22*4882a593Smuzhiyun struct clk_hw hw;
23*4882a593Smuzhiyun struct regmap *regmap;
24*4882a593Smuzhiyun struct clk_range range;
25*4882a593Smuzhiyun spinlock_t *lock;
26*4882a593Smuzhiyun u32 *mux_table;
27*4882a593Smuzhiyun u32 id;
28*4882a593Smuzhiyun u32 gckdiv;
29*4882a593Smuzhiyun const struct clk_pcr_layout *layout;
30*4882a593Smuzhiyun u8 parent_id;
31*4882a593Smuzhiyun int chg_pid;
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define to_clk_generated(hw) \
35*4882a593Smuzhiyun container_of(hw, struct clk_generated, hw)
36*4882a593Smuzhiyun
clk_generated_enable(struct clk_hw * hw)37*4882a593Smuzhiyun static int clk_generated_enable(struct clk_hw *hw)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun struct clk_generated *gck = to_clk_generated(hw);
40*4882a593Smuzhiyun unsigned long flags;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
43*4882a593Smuzhiyun __func__, gck->gckdiv, gck->parent_id);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun spin_lock_irqsave(gck->lock, flags);
46*4882a593Smuzhiyun regmap_write(gck->regmap, gck->layout->offset,
47*4882a593Smuzhiyun (gck->id & gck->layout->pid_mask));
48*4882a593Smuzhiyun regmap_update_bits(gck->regmap, gck->layout->offset,
49*4882a593Smuzhiyun AT91_PMC_PCR_GCKDIV_MASK | gck->layout->gckcss_mask |
50*4882a593Smuzhiyun gck->layout->cmd | AT91_PMC_PCR_GCKEN,
51*4882a593Smuzhiyun field_prep(gck->layout->gckcss_mask, gck->parent_id) |
52*4882a593Smuzhiyun gck->layout->cmd |
53*4882a593Smuzhiyun FIELD_PREP(AT91_PMC_PCR_GCKDIV_MASK, gck->gckdiv) |
54*4882a593Smuzhiyun AT91_PMC_PCR_GCKEN);
55*4882a593Smuzhiyun spin_unlock_irqrestore(gck->lock, flags);
56*4882a593Smuzhiyun return 0;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
clk_generated_disable(struct clk_hw * hw)59*4882a593Smuzhiyun static void clk_generated_disable(struct clk_hw *hw)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun struct clk_generated *gck = to_clk_generated(hw);
62*4882a593Smuzhiyun unsigned long flags;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun spin_lock_irqsave(gck->lock, flags);
65*4882a593Smuzhiyun regmap_write(gck->regmap, gck->layout->offset,
66*4882a593Smuzhiyun (gck->id & gck->layout->pid_mask));
67*4882a593Smuzhiyun regmap_update_bits(gck->regmap, gck->layout->offset,
68*4882a593Smuzhiyun gck->layout->cmd | AT91_PMC_PCR_GCKEN,
69*4882a593Smuzhiyun gck->layout->cmd);
70*4882a593Smuzhiyun spin_unlock_irqrestore(gck->lock, flags);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
clk_generated_is_enabled(struct clk_hw * hw)73*4882a593Smuzhiyun static int clk_generated_is_enabled(struct clk_hw *hw)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun struct clk_generated *gck = to_clk_generated(hw);
76*4882a593Smuzhiyun unsigned long flags;
77*4882a593Smuzhiyun unsigned int status;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun spin_lock_irqsave(gck->lock, flags);
80*4882a593Smuzhiyun regmap_write(gck->regmap, gck->layout->offset,
81*4882a593Smuzhiyun (gck->id & gck->layout->pid_mask));
82*4882a593Smuzhiyun regmap_read(gck->regmap, gck->layout->offset, &status);
83*4882a593Smuzhiyun spin_unlock_irqrestore(gck->lock, flags);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun return !!(status & AT91_PMC_PCR_GCKEN);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun static unsigned long
clk_generated_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)89*4882a593Smuzhiyun clk_generated_recalc_rate(struct clk_hw *hw,
90*4882a593Smuzhiyun unsigned long parent_rate)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct clk_generated *gck = to_clk_generated(hw);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return DIV_ROUND_CLOSEST(parent_rate, gck->gckdiv + 1);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
clk_generated_best_diff(struct clk_rate_request * req,struct clk_hw * parent,unsigned long parent_rate,u32 div,int * best_diff,long * best_rate)97*4882a593Smuzhiyun static void clk_generated_best_diff(struct clk_rate_request *req,
98*4882a593Smuzhiyun struct clk_hw *parent,
99*4882a593Smuzhiyun unsigned long parent_rate, u32 div,
100*4882a593Smuzhiyun int *best_diff, long *best_rate)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun unsigned long tmp_rate;
103*4882a593Smuzhiyun int tmp_diff;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (!div)
106*4882a593Smuzhiyun tmp_rate = parent_rate;
107*4882a593Smuzhiyun else
108*4882a593Smuzhiyun tmp_rate = parent_rate / div;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (tmp_rate < req->min_rate || tmp_rate > req->max_rate)
111*4882a593Smuzhiyun return;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun tmp_diff = abs(req->rate - tmp_rate);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if (*best_diff < 0 || *best_diff >= tmp_diff) {
116*4882a593Smuzhiyun *best_rate = tmp_rate;
117*4882a593Smuzhiyun *best_diff = tmp_diff;
118*4882a593Smuzhiyun req->best_parent_rate = parent_rate;
119*4882a593Smuzhiyun req->best_parent_hw = parent;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
clk_generated_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)123*4882a593Smuzhiyun static int clk_generated_determine_rate(struct clk_hw *hw,
124*4882a593Smuzhiyun struct clk_rate_request *req)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun struct clk_generated *gck = to_clk_generated(hw);
127*4882a593Smuzhiyun struct clk_hw *parent = NULL;
128*4882a593Smuzhiyun struct clk_rate_request req_parent = *req;
129*4882a593Smuzhiyun long best_rate = -EINVAL;
130*4882a593Smuzhiyun unsigned long min_rate, parent_rate;
131*4882a593Smuzhiyun int best_diff = -1;
132*4882a593Smuzhiyun int i;
133*4882a593Smuzhiyun u32 div;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* do not look for a rate that is outside of our range */
136*4882a593Smuzhiyun if (gck->range.max && req->rate > gck->range.max)
137*4882a593Smuzhiyun req->rate = gck->range.max;
138*4882a593Smuzhiyun if (gck->range.min && req->rate < gck->range.min)
139*4882a593Smuzhiyun req->rate = gck->range.min;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
142*4882a593Smuzhiyun if (gck->chg_pid == i)
143*4882a593Smuzhiyun continue;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun parent = clk_hw_get_parent_by_index(hw, i);
146*4882a593Smuzhiyun if (!parent)
147*4882a593Smuzhiyun continue;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun parent_rate = clk_hw_get_rate(parent);
150*4882a593Smuzhiyun min_rate = DIV_ROUND_CLOSEST(parent_rate, GENERATED_MAX_DIV + 1);
151*4882a593Smuzhiyun if (!parent_rate ||
152*4882a593Smuzhiyun (gck->range.max && min_rate > gck->range.max))
153*4882a593Smuzhiyun continue;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
156*4882a593Smuzhiyun if (div > GENERATED_MAX_DIV + 1)
157*4882a593Smuzhiyun div = GENERATED_MAX_DIV + 1;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun clk_generated_best_diff(req, parent, parent_rate, div,
160*4882a593Smuzhiyun &best_diff, &best_rate);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (!best_diff)
163*4882a593Smuzhiyun break;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun * The audio_pll rate can be modified, unlike the five others clocks
168*4882a593Smuzhiyun * that should never be altered.
169*4882a593Smuzhiyun * The audio_pll can technically be used by multiple consumers. However,
170*4882a593Smuzhiyun * with the rate locking, the first consumer to enable to clock will be
171*4882a593Smuzhiyun * the one definitely setting the rate of the clock.
172*4882a593Smuzhiyun * Since audio IPs are most likely to request the same rate, we enforce
173*4882a593Smuzhiyun * that the only clks able to modify gck rate are those of audio IPs.
174*4882a593Smuzhiyun */
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (gck->chg_pid < 0)
177*4882a593Smuzhiyun goto end;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun parent = clk_hw_get_parent_by_index(hw, gck->chg_pid);
180*4882a593Smuzhiyun if (!parent)
181*4882a593Smuzhiyun goto end;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
184*4882a593Smuzhiyun req_parent.rate = req->rate * div;
185*4882a593Smuzhiyun if (__clk_determine_rate(parent, &req_parent))
186*4882a593Smuzhiyun continue;
187*4882a593Smuzhiyun clk_generated_best_diff(req, parent, req_parent.rate, div,
188*4882a593Smuzhiyun &best_diff, &best_rate);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun if (!best_diff)
191*4882a593Smuzhiyun break;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun end:
195*4882a593Smuzhiyun pr_debug("GCLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
196*4882a593Smuzhiyun __func__, best_rate,
197*4882a593Smuzhiyun __clk_get_name((req->best_parent_hw)->clk),
198*4882a593Smuzhiyun req->best_parent_rate);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (best_rate < 0 || (gck->range.max && best_rate > gck->range.max))
201*4882a593Smuzhiyun return -EINVAL;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun req->rate = best_rate;
204*4882a593Smuzhiyun return 0;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* No modification of hardware as we have the flag CLK_SET_PARENT_GATE set */
clk_generated_set_parent(struct clk_hw * hw,u8 index)208*4882a593Smuzhiyun static int clk_generated_set_parent(struct clk_hw *hw, u8 index)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun struct clk_generated *gck = to_clk_generated(hw);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun if (index >= clk_hw_get_num_parents(hw))
213*4882a593Smuzhiyun return -EINVAL;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (gck->mux_table)
216*4882a593Smuzhiyun gck->parent_id = clk_mux_index_to_val(gck->mux_table, 0, index);
217*4882a593Smuzhiyun else
218*4882a593Smuzhiyun gck->parent_id = index;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun return 0;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
clk_generated_get_parent(struct clk_hw * hw)223*4882a593Smuzhiyun static u8 clk_generated_get_parent(struct clk_hw *hw)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun struct clk_generated *gck = to_clk_generated(hw);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun return gck->parent_id;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* No modification of hardware as we have the flag CLK_SET_RATE_GATE set */
clk_generated_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)231*4882a593Smuzhiyun static int clk_generated_set_rate(struct clk_hw *hw,
232*4882a593Smuzhiyun unsigned long rate,
233*4882a593Smuzhiyun unsigned long parent_rate)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun struct clk_generated *gck = to_clk_generated(hw);
236*4882a593Smuzhiyun u32 div;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (!rate)
239*4882a593Smuzhiyun return -EINVAL;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun if (gck->range.max && rate > gck->range.max)
242*4882a593Smuzhiyun return -EINVAL;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun div = DIV_ROUND_CLOSEST(parent_rate, rate);
245*4882a593Smuzhiyun if (div > GENERATED_MAX_DIV + 1 || !div)
246*4882a593Smuzhiyun return -EINVAL;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun gck->gckdiv = div - 1;
249*4882a593Smuzhiyun return 0;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun static const struct clk_ops generated_ops = {
253*4882a593Smuzhiyun .enable = clk_generated_enable,
254*4882a593Smuzhiyun .disable = clk_generated_disable,
255*4882a593Smuzhiyun .is_enabled = clk_generated_is_enabled,
256*4882a593Smuzhiyun .recalc_rate = clk_generated_recalc_rate,
257*4882a593Smuzhiyun .determine_rate = clk_generated_determine_rate,
258*4882a593Smuzhiyun .get_parent = clk_generated_get_parent,
259*4882a593Smuzhiyun .set_parent = clk_generated_set_parent,
260*4882a593Smuzhiyun .set_rate = clk_generated_set_rate,
261*4882a593Smuzhiyun };
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /**
264*4882a593Smuzhiyun * clk_generated_startup - Initialize a given clock to its default parent and
265*4882a593Smuzhiyun * divisor parameter.
266*4882a593Smuzhiyun *
267*4882a593Smuzhiyun * @gck: Generated clock to set the startup parameters for.
268*4882a593Smuzhiyun *
269*4882a593Smuzhiyun * Take parameters from the hardware and update local clock configuration
270*4882a593Smuzhiyun * accordingly.
271*4882a593Smuzhiyun */
clk_generated_startup(struct clk_generated * gck)272*4882a593Smuzhiyun static void clk_generated_startup(struct clk_generated *gck)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun u32 tmp;
275*4882a593Smuzhiyun unsigned long flags;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun spin_lock_irqsave(gck->lock, flags);
278*4882a593Smuzhiyun regmap_write(gck->regmap, gck->layout->offset,
279*4882a593Smuzhiyun (gck->id & gck->layout->pid_mask));
280*4882a593Smuzhiyun regmap_read(gck->regmap, gck->layout->offset, &tmp);
281*4882a593Smuzhiyun spin_unlock_irqrestore(gck->lock, flags);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun gck->parent_id = field_get(gck->layout->gckcss_mask, tmp);
284*4882a593Smuzhiyun gck->gckdiv = FIELD_GET(AT91_PMC_PCR_GCKDIV_MASK, tmp);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun struct clk_hw * __init
at91_clk_register_generated(struct regmap * regmap,spinlock_t * lock,const struct clk_pcr_layout * layout,const char * name,const char ** parent_names,u32 * mux_table,u8 num_parents,u8 id,const struct clk_range * range,int chg_pid)288*4882a593Smuzhiyun at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
289*4882a593Smuzhiyun const struct clk_pcr_layout *layout,
290*4882a593Smuzhiyun const char *name, const char **parent_names,
291*4882a593Smuzhiyun u32 *mux_table, u8 num_parents, u8 id,
292*4882a593Smuzhiyun const struct clk_range *range,
293*4882a593Smuzhiyun int chg_pid)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun struct clk_generated *gck;
296*4882a593Smuzhiyun struct clk_init_data init;
297*4882a593Smuzhiyun struct clk_hw *hw;
298*4882a593Smuzhiyun int ret;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun gck = kzalloc(sizeof(*gck), GFP_KERNEL);
301*4882a593Smuzhiyun if (!gck)
302*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun init.name = name;
305*4882a593Smuzhiyun init.ops = &generated_ops;
306*4882a593Smuzhiyun init.parent_names = parent_names;
307*4882a593Smuzhiyun init.num_parents = num_parents;
308*4882a593Smuzhiyun init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
309*4882a593Smuzhiyun if (chg_pid >= 0)
310*4882a593Smuzhiyun init.flags |= CLK_SET_RATE_PARENT;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun gck->id = id;
313*4882a593Smuzhiyun gck->hw.init = &init;
314*4882a593Smuzhiyun gck->regmap = regmap;
315*4882a593Smuzhiyun gck->lock = lock;
316*4882a593Smuzhiyun gck->range = *range;
317*4882a593Smuzhiyun gck->chg_pid = chg_pid;
318*4882a593Smuzhiyun gck->layout = layout;
319*4882a593Smuzhiyun gck->mux_table = mux_table;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun clk_generated_startup(gck);
322*4882a593Smuzhiyun hw = &gck->hw;
323*4882a593Smuzhiyun ret = clk_hw_register(NULL, &gck->hw);
324*4882a593Smuzhiyun if (ret) {
325*4882a593Smuzhiyun kfree(gck);
326*4882a593Smuzhiyun hw = ERR_PTR(ret);
327*4882a593Smuzhiyun } else {
328*4882a593Smuzhiyun pmc_register_id(id);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun return hw;
332*4882a593Smuzhiyun }
333