1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors:
6*4882a593Smuzhiyun * Serge Semin <Sergey.Semin@baikalelectronics.ru>
7*4882a593Smuzhiyun * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Baikal-T1 CCU Dividers interface driver
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #define pr_fmt(fmt) "bt1-ccu-div: " fmt
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/printk.h>
16*4882a593Smuzhiyun #include <linux/bits.h>
17*4882a593Smuzhiyun #include <linux/bitfield.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/clk-provider.h>
20*4882a593Smuzhiyun #include <linux/of.h>
21*4882a593Smuzhiyun #include <linux/spinlock.h>
22*4882a593Smuzhiyun #include <linux/regmap.h>
23*4882a593Smuzhiyun #include <linux/delay.h>
24*4882a593Smuzhiyun #include <linux/time64.h>
25*4882a593Smuzhiyun #include <linux/debugfs.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include "ccu-div.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define CCU_DIV_CTL 0x00
30*4882a593Smuzhiyun #define CCU_DIV_CTL_EN BIT(0)
31*4882a593Smuzhiyun #define CCU_DIV_CTL_RST BIT(1)
32*4882a593Smuzhiyun #define CCU_DIV_CTL_SET_CLKDIV BIT(2)
33*4882a593Smuzhiyun #define CCU_DIV_CTL_CLKDIV_FLD 4
34*4882a593Smuzhiyun #define CCU_DIV_CTL_CLKDIV_MASK(_width) \
35*4882a593Smuzhiyun GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
36*4882a593Smuzhiyun #define CCU_DIV_CTL_LOCK_SHIFTED BIT(27)
37*4882a593Smuzhiyun #define CCU_DIV_CTL_GATE_REF_BUF BIT(28)
38*4882a593Smuzhiyun #define CCU_DIV_CTL_LOCK_NORMAL BIT(31)
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define CCU_DIV_RST_DELAY_US 1
41*4882a593Smuzhiyun #define CCU_DIV_LOCK_CHECK_RETRIES 50
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #define CCU_DIV_CLKDIV_MIN 0
44*4882a593Smuzhiyun #define CCU_DIV_CLKDIV_MAX(_mask) \
45*4882a593Smuzhiyun ((_mask) >> CCU_DIV_CTL_CLKDIV_FLD)
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun * Use the next two methods until there are generic field setter and
49*4882a593Smuzhiyun * getter available with non-constant mask support.
50*4882a593Smuzhiyun */
ccu_div_get(u32 mask,u32 val)51*4882a593Smuzhiyun static inline u32 ccu_div_get(u32 mask, u32 val)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun return (val & mask) >> CCU_DIV_CTL_CLKDIV_FLD;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
ccu_div_prep(u32 mask,u32 val)56*4882a593Smuzhiyun static inline u32 ccu_div_prep(u32 mask, u32 val)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun return (val << CCU_DIV_CTL_CLKDIV_FLD) & mask;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
ccu_div_lock_delay_ns(unsigned long ref_clk,unsigned long div)61*4882a593Smuzhiyun static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk,
62*4882a593Smuzhiyun unsigned long div)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun u64 ns = 4ULL * (div ?: 1) * NSEC_PER_SEC;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun do_div(ns, ref_clk);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun return ns;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
ccu_div_calc_freq(unsigned long ref_clk,unsigned long div)71*4882a593Smuzhiyun static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk,
72*4882a593Smuzhiyun unsigned long div)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun return ref_clk / (div ?: 1);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
ccu_div_var_update_clkdiv(struct ccu_div * div,unsigned long parent_rate,unsigned long divider)77*4882a593Smuzhiyun static int ccu_div_var_update_clkdiv(struct ccu_div *div,
78*4882a593Smuzhiyun unsigned long parent_rate,
79*4882a593Smuzhiyun unsigned long divider)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun unsigned long nd;
82*4882a593Smuzhiyun u32 val = 0;
83*4882a593Smuzhiyun u32 lock;
84*4882a593Smuzhiyun int count;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun nd = ccu_div_lock_delay_ns(parent_rate, divider);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (div->features & CCU_DIV_LOCK_SHIFTED)
89*4882a593Smuzhiyun lock = CCU_DIV_CTL_LOCK_SHIFTED;
90*4882a593Smuzhiyun else
91*4882a593Smuzhiyun lock = CCU_DIV_CTL_LOCK_NORMAL;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun regmap_update_bits(div->sys_regs, div->reg_ctl,
94*4882a593Smuzhiyun CCU_DIV_CTL_SET_CLKDIV, CCU_DIV_CTL_SET_CLKDIV);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun * Until there is nsec-version of readl_poll_timeout() is available
98*4882a593Smuzhiyun * we have to implement the next polling loop.
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun count = CCU_DIV_LOCK_CHECK_RETRIES;
101*4882a593Smuzhiyun do {
102*4882a593Smuzhiyun ndelay(nd);
103*4882a593Smuzhiyun regmap_read(div->sys_regs, div->reg_ctl, &val);
104*4882a593Smuzhiyun if (val & lock)
105*4882a593Smuzhiyun return 0;
106*4882a593Smuzhiyun } while (--count);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun return -ETIMEDOUT;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
ccu_div_var_enable(struct clk_hw * hw)111*4882a593Smuzhiyun static int ccu_div_var_enable(struct clk_hw *hw)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct clk_hw *parent_hw = clk_hw_get_parent(hw);
114*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
115*4882a593Smuzhiyun unsigned long flags;
116*4882a593Smuzhiyun u32 val = 0;
117*4882a593Smuzhiyun int ret;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (!parent_hw) {
120*4882a593Smuzhiyun pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
121*4882a593Smuzhiyun return -EINVAL;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun regmap_read(div->sys_regs, div->reg_ctl, &val);
125*4882a593Smuzhiyun if (val & CCU_DIV_CTL_EN)
126*4882a593Smuzhiyun return 0;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun spin_lock_irqsave(&div->lock, flags);
129*4882a593Smuzhiyun ret = ccu_div_var_update_clkdiv(div, clk_hw_get_rate(parent_hw),
130*4882a593Smuzhiyun ccu_div_get(div->mask, val));
131*4882a593Smuzhiyun if (!ret)
132*4882a593Smuzhiyun regmap_update_bits(div->sys_regs, div->reg_ctl,
133*4882a593Smuzhiyun CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
134*4882a593Smuzhiyun spin_unlock_irqrestore(&div->lock, flags);
135*4882a593Smuzhiyun if (ret)
136*4882a593Smuzhiyun pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun return ret;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
ccu_div_gate_enable(struct clk_hw * hw)141*4882a593Smuzhiyun static int ccu_div_gate_enable(struct clk_hw *hw)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
144*4882a593Smuzhiyun unsigned long flags;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun spin_lock_irqsave(&div->lock, flags);
147*4882a593Smuzhiyun regmap_update_bits(div->sys_regs, div->reg_ctl,
148*4882a593Smuzhiyun CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
149*4882a593Smuzhiyun spin_unlock_irqrestore(&div->lock, flags);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun return 0;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
ccu_div_gate_disable(struct clk_hw * hw)154*4882a593Smuzhiyun static void ccu_div_gate_disable(struct clk_hw *hw)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
157*4882a593Smuzhiyun unsigned long flags;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun spin_lock_irqsave(&div->lock, flags);
160*4882a593Smuzhiyun regmap_update_bits(div->sys_regs, div->reg_ctl, CCU_DIV_CTL_EN, 0);
161*4882a593Smuzhiyun spin_unlock_irqrestore(&div->lock, flags);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
ccu_div_gate_is_enabled(struct clk_hw * hw)164*4882a593Smuzhiyun static int ccu_div_gate_is_enabled(struct clk_hw *hw)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
167*4882a593Smuzhiyun u32 val = 0;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun regmap_read(div->sys_regs, div->reg_ctl, &val);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun return !!(val & CCU_DIV_CTL_EN);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
ccu_div_buf_enable(struct clk_hw * hw)174*4882a593Smuzhiyun static int ccu_div_buf_enable(struct clk_hw *hw)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
177*4882a593Smuzhiyun unsigned long flags;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun spin_lock_irqsave(&div->lock, flags);
180*4882a593Smuzhiyun regmap_update_bits(div->sys_regs, div->reg_ctl,
181*4882a593Smuzhiyun CCU_DIV_CTL_GATE_REF_BUF, 0);
182*4882a593Smuzhiyun spin_unlock_irqrestore(&div->lock, flags);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun return 0;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
ccu_div_buf_disable(struct clk_hw * hw)187*4882a593Smuzhiyun static void ccu_div_buf_disable(struct clk_hw *hw)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
190*4882a593Smuzhiyun unsigned long flags;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun spin_lock_irqsave(&div->lock, flags);
193*4882a593Smuzhiyun regmap_update_bits(div->sys_regs, div->reg_ctl,
194*4882a593Smuzhiyun CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF);
195*4882a593Smuzhiyun spin_unlock_irqrestore(&div->lock, flags);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
ccu_div_buf_is_enabled(struct clk_hw * hw)198*4882a593Smuzhiyun static int ccu_div_buf_is_enabled(struct clk_hw *hw)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
201*4882a593Smuzhiyun u32 val = 0;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun regmap_read(div->sys_regs, div->reg_ctl, &val);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return !(val & CCU_DIV_CTL_GATE_REF_BUF);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
ccu_div_var_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)208*4882a593Smuzhiyun static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
209*4882a593Smuzhiyun unsigned long parent_rate)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
212*4882a593Smuzhiyun unsigned long divider;
213*4882a593Smuzhiyun u32 val = 0;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun regmap_read(div->sys_regs, div->reg_ctl, &val);
216*4882a593Smuzhiyun divider = ccu_div_get(div->mask, val);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun return ccu_div_calc_freq(parent_rate, divider);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
ccu_div_var_calc_divider(unsigned long rate,unsigned long parent_rate,unsigned int mask)221*4882a593Smuzhiyun static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
222*4882a593Smuzhiyun unsigned long parent_rate,
223*4882a593Smuzhiyun unsigned int mask)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun unsigned long divider;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun divider = parent_rate / rate;
228*4882a593Smuzhiyun return clamp_t(unsigned long, divider, CCU_DIV_CLKDIV_MIN,
229*4882a593Smuzhiyun CCU_DIV_CLKDIV_MAX(mask));
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
ccu_div_var_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)232*4882a593Smuzhiyun static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
233*4882a593Smuzhiyun unsigned long *parent_rate)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
236*4882a593Smuzhiyun unsigned long divider;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun return ccu_div_calc_freq(*parent_rate, divider);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun * This method is used for the clock divider blocks, which support the
245*4882a593Smuzhiyun * on-the-fly rate change. So due to lacking the EN bit functionality
246*4882a593Smuzhiyun * they can't be gated before the rate adjustment.
247*4882a593Smuzhiyun */
ccu_div_var_set_rate_slow(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)248*4882a593Smuzhiyun static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate,
249*4882a593Smuzhiyun unsigned long parent_rate)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
252*4882a593Smuzhiyun unsigned long flags, divider;
253*4882a593Smuzhiyun u32 val;
254*4882a593Smuzhiyun int ret;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
257*4882a593Smuzhiyun if (divider == 1 && div->features & CCU_DIV_SKIP_ONE) {
258*4882a593Smuzhiyun divider = 0;
259*4882a593Smuzhiyun } else if (div->features & CCU_DIV_SKIP_ONE_TO_THREE) {
260*4882a593Smuzhiyun if (divider == 1 || divider == 2)
261*4882a593Smuzhiyun divider = 0;
262*4882a593Smuzhiyun else if (divider == 3)
263*4882a593Smuzhiyun divider = 4;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun val = ccu_div_prep(div->mask, divider);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun spin_lock_irqsave(&div->lock, flags);
269*4882a593Smuzhiyun regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, val);
270*4882a593Smuzhiyun ret = ccu_div_var_update_clkdiv(div, parent_rate, divider);
271*4882a593Smuzhiyun spin_unlock_irqrestore(&div->lock, flags);
272*4882a593Smuzhiyun if (ret)
273*4882a593Smuzhiyun pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun return ret;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /*
279*4882a593Smuzhiyun * This method is used for the clock divider blocks, which don't support
280*4882a593Smuzhiyun * the on-the-fly rate change.
281*4882a593Smuzhiyun */
ccu_div_var_set_rate_fast(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)282*4882a593Smuzhiyun static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate,
283*4882a593Smuzhiyun unsigned long parent_rate)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
286*4882a593Smuzhiyun unsigned long flags, divider;
287*4882a593Smuzhiyun u32 val;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
290*4882a593Smuzhiyun val = ccu_div_prep(div->mask, divider);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /*
293*4882a593Smuzhiyun * Also disable the clock divider block if it was enabled by default
294*4882a593Smuzhiyun * or by the bootloader.
295*4882a593Smuzhiyun */
296*4882a593Smuzhiyun spin_lock_irqsave(&div->lock, flags);
297*4882a593Smuzhiyun regmap_update_bits(div->sys_regs, div->reg_ctl,
298*4882a593Smuzhiyun div->mask | CCU_DIV_CTL_EN, val);
299*4882a593Smuzhiyun spin_unlock_irqrestore(&div->lock, flags);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun return 0;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
ccu_div_fixed_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)304*4882a593Smuzhiyun static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
305*4882a593Smuzhiyun unsigned long parent_rate)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun return ccu_div_calc_freq(parent_rate, div->divider);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
ccu_div_fixed_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)312*4882a593Smuzhiyun static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
313*4882a593Smuzhiyun unsigned long *parent_rate)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun return ccu_div_calc_freq(*parent_rate, div->divider);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
ccu_div_fixed_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)320*4882a593Smuzhiyun static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
321*4882a593Smuzhiyun unsigned long parent_rate)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun return 0;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
ccu_div_reset_domain(struct ccu_div * div)326*4882a593Smuzhiyun int ccu_div_reset_domain(struct ccu_div *div)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun unsigned long flags;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun if (!div || !(div->features & CCU_DIV_RESET_DOMAIN))
331*4882a593Smuzhiyun return -EINVAL;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun spin_lock_irqsave(&div->lock, flags);
334*4882a593Smuzhiyun regmap_update_bits(div->sys_regs, div->reg_ctl,
335*4882a593Smuzhiyun CCU_DIV_CTL_RST, CCU_DIV_CTL_RST);
336*4882a593Smuzhiyun spin_unlock_irqrestore(&div->lock, flags);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* The next delay must be enough to cover all the resets. */
339*4882a593Smuzhiyun udelay(CCU_DIV_RST_DELAY_US);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun return 0;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun struct ccu_div_dbgfs_bit {
347*4882a593Smuzhiyun struct ccu_div *div;
348*4882a593Smuzhiyun const char *name;
349*4882a593Smuzhiyun u32 mask;
350*4882a593Smuzhiyun };
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun #define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask) { \
353*4882a593Smuzhiyun .name = _name, \
354*4882a593Smuzhiyun .mask = _mask \
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun static const struct ccu_div_dbgfs_bit ccu_div_bits[] = {
358*4882a593Smuzhiyun CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
359*4882a593Smuzhiyun CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
360*4882a593Smuzhiyun CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
361*4882a593Smuzhiyun CCU_DIV_DBGFS_BIT_ATTR("div_buf", CCU_DIV_CTL_GATE_REF_BUF),
362*4882a593Smuzhiyun CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
363*4882a593Smuzhiyun };
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun #define CCU_DIV_DBGFS_BIT_NUM ARRAY_SIZE(ccu_div_bits)
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /*
368*4882a593Smuzhiyun * It can be dangerous to change the Divider settings behind clock framework
369*4882a593Smuzhiyun * back, therefore we don't provide any kernel config based compile time option
370*4882a593Smuzhiyun * for this feature to enable.
371*4882a593Smuzhiyun */
372*4882a593Smuzhiyun #undef CCU_DIV_ALLOW_WRITE_DEBUGFS
373*4882a593Smuzhiyun #ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS
374*4882a593Smuzhiyun
ccu_div_dbgfs_bit_set(void * priv,u64 val)375*4882a593Smuzhiyun static int ccu_div_dbgfs_bit_set(void *priv, u64 val)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun const struct ccu_div_dbgfs_bit *bit = priv;
378*4882a593Smuzhiyun struct ccu_div *div = bit->div;
379*4882a593Smuzhiyun unsigned long flags;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun spin_lock_irqsave(&div->lock, flags);
382*4882a593Smuzhiyun regmap_update_bits(div->sys_regs, div->reg_ctl,
383*4882a593Smuzhiyun bit->mask, val ? bit->mask : 0);
384*4882a593Smuzhiyun spin_unlock_irqrestore(&div->lock, flags);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun return 0;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
ccu_div_dbgfs_var_clkdiv_set(void * priv,u64 val)389*4882a593Smuzhiyun static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun struct ccu_div *div = priv;
392*4882a593Smuzhiyun unsigned long flags;
393*4882a593Smuzhiyun u32 data;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN,
396*4882a593Smuzhiyun CCU_DIV_CLKDIV_MAX(div->mask));
397*4882a593Smuzhiyun data = ccu_div_prep(div->mask, val);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun spin_lock_irqsave(&div->lock, flags);
400*4882a593Smuzhiyun regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data);
401*4882a593Smuzhiyun spin_unlock_irqrestore(&div->lock, flags);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun return 0;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun #define ccu_div_dbgfs_mode 0644
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun #else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun #define ccu_div_dbgfs_bit_set NULL
411*4882a593Smuzhiyun #define ccu_div_dbgfs_var_clkdiv_set NULL
412*4882a593Smuzhiyun #define ccu_div_dbgfs_mode 0444
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun #endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
415*4882a593Smuzhiyun
ccu_div_dbgfs_bit_get(void * priv,u64 * val)416*4882a593Smuzhiyun static int ccu_div_dbgfs_bit_get(void *priv, u64 *val)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun const struct ccu_div_dbgfs_bit *bit = priv;
419*4882a593Smuzhiyun struct ccu_div *div = bit->div;
420*4882a593Smuzhiyun u32 data = 0;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun regmap_read(div->sys_regs, div->reg_ctl, &data);
423*4882a593Smuzhiyun *val = !!(data & bit->mask);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun return 0;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_bit_fops,
428*4882a593Smuzhiyun ccu_div_dbgfs_bit_get, ccu_div_dbgfs_bit_set, "%llu\n");
429*4882a593Smuzhiyun
ccu_div_dbgfs_var_clkdiv_get(void * priv,u64 * val)430*4882a593Smuzhiyun static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun struct ccu_div *div = priv;
433*4882a593Smuzhiyun u32 data = 0;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun regmap_read(div->sys_regs, div->reg_ctl, &data);
436*4882a593Smuzhiyun *val = ccu_div_get(div->mask, data);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun return 0;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_var_clkdiv_fops,
441*4882a593Smuzhiyun ccu_div_dbgfs_var_clkdiv_get, ccu_div_dbgfs_var_clkdiv_set, "%llu\n");
442*4882a593Smuzhiyun
ccu_div_dbgfs_fixed_clkdiv_get(void * priv,u64 * val)443*4882a593Smuzhiyun static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun struct ccu_div *div = priv;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun *val = div->divider;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun return 0;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_fixed_clkdiv_fops,
452*4882a593Smuzhiyun ccu_div_dbgfs_fixed_clkdiv_get, NULL, "%llu\n");
453*4882a593Smuzhiyun
ccu_div_var_debug_init(struct clk_hw * hw,struct dentry * dentry)454*4882a593Smuzhiyun static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
457*4882a593Smuzhiyun struct ccu_div_dbgfs_bit *bits;
458*4882a593Smuzhiyun int didx, bidx, num = 2;
459*4882a593Smuzhiyun const char *name;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun num += !!(div->flags & CLK_SET_RATE_GATE) +
462*4882a593Smuzhiyun !!(div->features & CCU_DIV_RESET_DOMAIN);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun bits = kcalloc(num, sizeof(*bits), GFP_KERNEL);
465*4882a593Smuzhiyun if (!bits)
466*4882a593Smuzhiyun return;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun for (didx = 0, bidx = 0; bidx < CCU_DIV_DBGFS_BIT_NUM; ++bidx) {
469*4882a593Smuzhiyun name = ccu_div_bits[bidx].name;
470*4882a593Smuzhiyun if (!(div->flags & CLK_SET_RATE_GATE) &&
471*4882a593Smuzhiyun !strcmp("div_en", name)) {
472*4882a593Smuzhiyun continue;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun if (!(div->features & CCU_DIV_RESET_DOMAIN) &&
476*4882a593Smuzhiyun !strcmp("div_rst", name)) {
477*4882a593Smuzhiyun continue;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun if (!strcmp("div_buf", name))
481*4882a593Smuzhiyun continue;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun bits[didx] = ccu_div_bits[bidx];
484*4882a593Smuzhiyun bits[didx].div = div;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (div->features & CCU_DIV_LOCK_SHIFTED &&
487*4882a593Smuzhiyun !strcmp("div_lock", name)) {
488*4882a593Smuzhiyun bits[didx].mask = CCU_DIV_CTL_LOCK_SHIFTED;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun debugfs_create_file_unsafe(bits[didx].name, ccu_div_dbgfs_mode,
492*4882a593Smuzhiyun dentry, &bits[didx],
493*4882a593Smuzhiyun &ccu_div_dbgfs_bit_fops);
494*4882a593Smuzhiyun ++didx;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun debugfs_create_file_unsafe("div_clkdiv", ccu_div_dbgfs_mode, dentry,
498*4882a593Smuzhiyun div, &ccu_div_dbgfs_var_clkdiv_fops);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
ccu_div_gate_debug_init(struct clk_hw * hw,struct dentry * dentry)501*4882a593Smuzhiyun static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
504*4882a593Smuzhiyun struct ccu_div_dbgfs_bit *bit;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun bit = kmalloc(sizeof(*bit), GFP_KERNEL);
507*4882a593Smuzhiyun if (!bit)
508*4882a593Smuzhiyun return;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun *bit = ccu_div_bits[0];
511*4882a593Smuzhiyun bit->div = div;
512*4882a593Smuzhiyun debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
513*4882a593Smuzhiyun &ccu_div_dbgfs_bit_fops);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
516*4882a593Smuzhiyun &ccu_div_dbgfs_fixed_clkdiv_fops);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
ccu_div_buf_debug_init(struct clk_hw * hw,struct dentry * dentry)519*4882a593Smuzhiyun static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
522*4882a593Smuzhiyun struct ccu_div_dbgfs_bit *bit;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun bit = kmalloc(sizeof(*bit), GFP_KERNEL);
525*4882a593Smuzhiyun if (!bit)
526*4882a593Smuzhiyun return;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun *bit = ccu_div_bits[3];
529*4882a593Smuzhiyun bit->div = div;
530*4882a593Smuzhiyun debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
531*4882a593Smuzhiyun &ccu_div_dbgfs_bit_fops);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
ccu_div_fixed_debug_init(struct clk_hw * hw,struct dentry * dentry)534*4882a593Smuzhiyun static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun struct ccu_div *div = to_ccu_div(hw);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
539*4882a593Smuzhiyun &ccu_div_dbgfs_fixed_clkdiv_fops);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun #else /* !CONFIG_DEBUG_FS */
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun #define ccu_div_var_debug_init NULL
545*4882a593Smuzhiyun #define ccu_div_gate_debug_init NULL
546*4882a593Smuzhiyun #define ccu_div_buf_debug_init NULL
547*4882a593Smuzhiyun #define ccu_div_fixed_debug_init NULL
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun #endif /* !CONFIG_DEBUG_FS */
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun static const struct clk_ops ccu_div_var_gate_to_set_ops = {
552*4882a593Smuzhiyun .enable = ccu_div_var_enable,
553*4882a593Smuzhiyun .disable = ccu_div_gate_disable,
554*4882a593Smuzhiyun .is_enabled = ccu_div_gate_is_enabled,
555*4882a593Smuzhiyun .recalc_rate = ccu_div_var_recalc_rate,
556*4882a593Smuzhiyun .round_rate = ccu_div_var_round_rate,
557*4882a593Smuzhiyun .set_rate = ccu_div_var_set_rate_fast,
558*4882a593Smuzhiyun .debug_init = ccu_div_var_debug_init
559*4882a593Smuzhiyun };
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun static const struct clk_ops ccu_div_var_nogate_ops = {
562*4882a593Smuzhiyun .recalc_rate = ccu_div_var_recalc_rate,
563*4882a593Smuzhiyun .round_rate = ccu_div_var_round_rate,
564*4882a593Smuzhiyun .set_rate = ccu_div_var_set_rate_slow,
565*4882a593Smuzhiyun .debug_init = ccu_div_var_debug_init
566*4882a593Smuzhiyun };
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun static const struct clk_ops ccu_div_gate_ops = {
569*4882a593Smuzhiyun .enable = ccu_div_gate_enable,
570*4882a593Smuzhiyun .disable = ccu_div_gate_disable,
571*4882a593Smuzhiyun .is_enabled = ccu_div_gate_is_enabled,
572*4882a593Smuzhiyun .recalc_rate = ccu_div_fixed_recalc_rate,
573*4882a593Smuzhiyun .round_rate = ccu_div_fixed_round_rate,
574*4882a593Smuzhiyun .set_rate = ccu_div_fixed_set_rate,
575*4882a593Smuzhiyun .debug_init = ccu_div_gate_debug_init
576*4882a593Smuzhiyun };
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun static const struct clk_ops ccu_div_buf_ops = {
579*4882a593Smuzhiyun .enable = ccu_div_buf_enable,
580*4882a593Smuzhiyun .disable = ccu_div_buf_disable,
581*4882a593Smuzhiyun .is_enabled = ccu_div_buf_is_enabled,
582*4882a593Smuzhiyun .debug_init = ccu_div_buf_debug_init
583*4882a593Smuzhiyun };
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun static const struct clk_ops ccu_div_fixed_ops = {
586*4882a593Smuzhiyun .recalc_rate = ccu_div_fixed_recalc_rate,
587*4882a593Smuzhiyun .round_rate = ccu_div_fixed_round_rate,
588*4882a593Smuzhiyun .set_rate = ccu_div_fixed_set_rate,
589*4882a593Smuzhiyun .debug_init = ccu_div_fixed_debug_init
590*4882a593Smuzhiyun };
591*4882a593Smuzhiyun
ccu_div_hw_register(const struct ccu_div_init_data * div_init)592*4882a593Smuzhiyun struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun struct clk_parent_data parent_data = { };
595*4882a593Smuzhiyun struct clk_init_data hw_init = { };
596*4882a593Smuzhiyun struct ccu_div *div;
597*4882a593Smuzhiyun int ret;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun if (!div_init)
600*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun div = kzalloc(sizeof(*div), GFP_KERNEL);
603*4882a593Smuzhiyun if (!div)
604*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun /*
607*4882a593Smuzhiyun * Note since Baikal-T1 System Controller registers are MMIO-backed
608*4882a593Smuzhiyun * we won't check the regmap IO operations return status, because it
609*4882a593Smuzhiyun * must be zero anyway.
610*4882a593Smuzhiyun */
611*4882a593Smuzhiyun div->hw.init = &hw_init;
612*4882a593Smuzhiyun div->id = div_init->id;
613*4882a593Smuzhiyun div->reg_ctl = div_init->base + CCU_DIV_CTL;
614*4882a593Smuzhiyun div->sys_regs = div_init->sys_regs;
615*4882a593Smuzhiyun div->flags = div_init->flags;
616*4882a593Smuzhiyun div->features = div_init->features;
617*4882a593Smuzhiyun spin_lock_init(&div->lock);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun hw_init.name = div_init->name;
620*4882a593Smuzhiyun hw_init.flags = div_init->flags;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun if (div_init->type == CCU_DIV_VAR) {
623*4882a593Smuzhiyun if (hw_init.flags & CLK_SET_RATE_GATE)
624*4882a593Smuzhiyun hw_init.ops = &ccu_div_var_gate_to_set_ops;
625*4882a593Smuzhiyun else
626*4882a593Smuzhiyun hw_init.ops = &ccu_div_var_nogate_ops;
627*4882a593Smuzhiyun div->mask = CCU_DIV_CTL_CLKDIV_MASK(div_init->width);
628*4882a593Smuzhiyun } else if (div_init->type == CCU_DIV_GATE) {
629*4882a593Smuzhiyun hw_init.ops = &ccu_div_gate_ops;
630*4882a593Smuzhiyun div->divider = div_init->divider;
631*4882a593Smuzhiyun } else if (div_init->type == CCU_DIV_BUF) {
632*4882a593Smuzhiyun hw_init.ops = &ccu_div_buf_ops;
633*4882a593Smuzhiyun } else if (div_init->type == CCU_DIV_FIXED) {
634*4882a593Smuzhiyun hw_init.ops = &ccu_div_fixed_ops;
635*4882a593Smuzhiyun div->divider = div_init->divider;
636*4882a593Smuzhiyun } else {
637*4882a593Smuzhiyun ret = -EINVAL;
638*4882a593Smuzhiyun goto err_free_div;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun if (!div_init->parent_name) {
642*4882a593Smuzhiyun ret = -EINVAL;
643*4882a593Smuzhiyun goto err_free_div;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun parent_data.fw_name = div_init->parent_name;
646*4882a593Smuzhiyun parent_data.name = div_init->parent_name;
647*4882a593Smuzhiyun hw_init.parent_data = &parent_data;
648*4882a593Smuzhiyun hw_init.num_parents = 1;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun ret = of_clk_hw_register(div_init->np, &div->hw);
651*4882a593Smuzhiyun if (ret)
652*4882a593Smuzhiyun goto err_free_div;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun return div;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun err_free_div:
657*4882a593Smuzhiyun kfree(div);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun return ERR_PTR(ret);
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
ccu_div_hw_unregister(struct ccu_div * div)662*4882a593Smuzhiyun void ccu_div_hw_unregister(struct ccu_div *div)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun clk_hw_unregister(&div->hw);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun kfree(div);
667*4882a593Smuzhiyun }
668