1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/slab.h>
7*4882a593Smuzhiyun #include <linux/bitops.h>
8*4882a593Smuzhiyun #include <linux/regmap.h>
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/clk-provider.h>
11*4882a593Smuzhiyun #include "clk.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define div_mask(width) ((1 << (width)) - 1)
14*4882a593Smuzhiyun
clk_dclk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)15*4882a593Smuzhiyun static unsigned long clk_dclk_recalc_rate(struct clk_hw *hw,
16*4882a593Smuzhiyun unsigned long parent_rate)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun struct clk_divider *divider = to_clk_divider(hw);
19*4882a593Smuzhiyun unsigned int val;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun val = clk_readl(divider->reg) >> divider->shift;
22*4882a593Smuzhiyun val &= div_mask(divider->width);
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun return DIV_ROUND_UP_ULL(((u64)parent_rate), val + 1);
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
clk_dclk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)27*4882a593Smuzhiyun static long clk_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
28*4882a593Smuzhiyun unsigned long *prate)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun struct clk_divider *divider = to_clk_divider(hw);
31*4882a593Smuzhiyun int div, maxdiv = div_mask(divider->width) + 1;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun div = DIV_ROUND_UP_ULL(divider->max_prate, rate);
34*4882a593Smuzhiyun if (div % 2)
35*4882a593Smuzhiyun div = __rounddown_pow_of_two(div);
36*4882a593Smuzhiyun div = div > maxdiv ? maxdiv : div;
37*4882a593Smuzhiyun *prate = div * rate;
38*4882a593Smuzhiyun return rate;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
clk_dclk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)41*4882a593Smuzhiyun static int clk_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
42*4882a593Smuzhiyun unsigned long parent_rate)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun struct clk_divider *divider = to_clk_divider(hw);
45*4882a593Smuzhiyun unsigned int value;
46*4882a593Smuzhiyun unsigned long flags = 0;
47*4882a593Smuzhiyun u32 val;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun value = divider_get_val(rate, parent_rate, divider->table,
50*4882a593Smuzhiyun divider->width, divider->flags);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun if (divider->lock)
53*4882a593Smuzhiyun spin_lock_irqsave(divider->lock, flags);
54*4882a593Smuzhiyun else
55*4882a593Smuzhiyun __acquire(divider->lock);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
58*4882a593Smuzhiyun val = div_mask(divider->width) << (divider->shift + 16);
59*4882a593Smuzhiyun } else {
60*4882a593Smuzhiyun val = clk_readl(divider->reg);
61*4882a593Smuzhiyun val &= ~(div_mask(divider->width) << divider->shift);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun val |= value << divider->shift;
64*4882a593Smuzhiyun clk_writel(val, divider->reg);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun if (divider->lock)
67*4882a593Smuzhiyun spin_unlock_irqrestore(divider->lock, flags);
68*4882a593Smuzhiyun else
69*4882a593Smuzhiyun __release(divider->lock);
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun return 0;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun const struct clk_ops clk_dclk_divider_ops = {
75*4882a593Smuzhiyun .recalc_rate = clk_dclk_recalc_rate,
76*4882a593Smuzhiyun .round_rate = clk_dclk_round_rate,
77*4882a593Smuzhiyun .set_rate = clk_dclk_set_rate,
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_dclk_divider_ops);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /**
82*4882a593Smuzhiyun * Register a clock branch.
83*4882a593Smuzhiyun * Most clock branches have a form like
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun * src1 --|--\
86*4882a593Smuzhiyun * |M |--[GATE]-[DIV]-
87*4882a593Smuzhiyun * src2 --|--/
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * sometimes without one of those components.
90*4882a593Smuzhiyun */
rockchip_clk_register_dclk_branch(const char * name,const char * const * parent_names,u8 num_parents,void __iomem * base,int muxdiv_offset,u8 mux_shift,u8 mux_width,u8 mux_flags,int div_offset,u8 div_shift,u8 div_width,u8 div_flags,struct clk_div_table * div_table,int gate_offset,u8 gate_shift,u8 gate_flags,unsigned long flags,unsigned long max_prate,spinlock_t * lock)91*4882a593Smuzhiyun struct clk *rockchip_clk_register_dclk_branch(const char *name,
92*4882a593Smuzhiyun const char *const *parent_names,
93*4882a593Smuzhiyun u8 num_parents,
94*4882a593Smuzhiyun void __iomem *base,
95*4882a593Smuzhiyun int muxdiv_offset, u8 mux_shift,
96*4882a593Smuzhiyun u8 mux_width, u8 mux_flags,
97*4882a593Smuzhiyun int div_offset, u8 div_shift,
98*4882a593Smuzhiyun u8 div_width, u8 div_flags,
99*4882a593Smuzhiyun struct clk_div_table *div_table,
100*4882a593Smuzhiyun int gate_offset,
101*4882a593Smuzhiyun u8 gate_shift, u8 gate_flags,
102*4882a593Smuzhiyun unsigned long flags,
103*4882a593Smuzhiyun unsigned long max_prate,
104*4882a593Smuzhiyun spinlock_t *lock)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun struct clk *clk;
107*4882a593Smuzhiyun struct clk_mux *mux = NULL;
108*4882a593Smuzhiyun struct clk_gate *gate = NULL;
109*4882a593Smuzhiyun struct clk_divider *div = NULL;
110*4882a593Smuzhiyun const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
111*4882a593Smuzhiyun *gate_ops = NULL;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (num_parents > 1) {
114*4882a593Smuzhiyun mux = kzalloc(sizeof(*mux), GFP_KERNEL);
115*4882a593Smuzhiyun if (!mux)
116*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun mux->reg = base + muxdiv_offset;
119*4882a593Smuzhiyun mux->shift = mux_shift;
120*4882a593Smuzhiyun mux->mask = BIT(mux_width) - 1;
121*4882a593Smuzhiyun mux->flags = mux_flags;
122*4882a593Smuzhiyun mux->lock = lock;
123*4882a593Smuzhiyun mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
124*4882a593Smuzhiyun : &clk_mux_ops;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun if (gate_offset >= 0) {
128*4882a593Smuzhiyun gate = kzalloc(sizeof(*gate), GFP_KERNEL);
129*4882a593Smuzhiyun if (!gate)
130*4882a593Smuzhiyun goto err_gate;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun gate->flags = gate_flags;
133*4882a593Smuzhiyun gate->reg = base + gate_offset;
134*4882a593Smuzhiyun gate->bit_idx = gate_shift;
135*4882a593Smuzhiyun gate->lock = lock;
136*4882a593Smuzhiyun gate_ops = &clk_gate_ops;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (div_width > 0) {
140*4882a593Smuzhiyun div = kzalloc(sizeof(*div), GFP_KERNEL);
141*4882a593Smuzhiyun if (!div)
142*4882a593Smuzhiyun goto err_div;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun div->flags = div_flags;
145*4882a593Smuzhiyun if (div_offset)
146*4882a593Smuzhiyun div->reg = base + div_offset;
147*4882a593Smuzhiyun else
148*4882a593Smuzhiyun div->reg = base + muxdiv_offset;
149*4882a593Smuzhiyun div->shift = div_shift;
150*4882a593Smuzhiyun div->width = div_width;
151*4882a593Smuzhiyun div->lock = lock;
152*4882a593Smuzhiyun div->max_prate = max_prate;
153*4882a593Smuzhiyun div_ops = &clk_dclk_divider_ops;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun clk = clk_register_composite(NULL, name, parent_names, num_parents,
157*4882a593Smuzhiyun mux ? &mux->hw : NULL, mux_ops,
158*4882a593Smuzhiyun div ? &div->hw : NULL, div_ops,
159*4882a593Smuzhiyun gate ? &gate->hw : NULL, gate_ops,
160*4882a593Smuzhiyun flags);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return clk;
163*4882a593Smuzhiyun err_div:
164*4882a593Smuzhiyun kfree(gate);
165*4882a593Smuzhiyun err_gate:
166*4882a593Smuzhiyun kfree(mux);
167*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
168*4882a593Smuzhiyun }
169