xref: /OK3568_Linux_fs/kernel/drivers/clk/rockchip/clk-cpu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2014 MundoReader S.L.
4*4882a593Smuzhiyun  * Author: Heiko Stuebner <heiko@sntech.de>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * based on clk/samsung/clk-cpu.c
7*4882a593Smuzhiyun  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
8*4882a593Smuzhiyun  * Author: Thomas Abraham <thomas.ab@samsung.com>
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * A CPU clock is defined as a clock supplied to a CPU or a group of CPUs.
11*4882a593Smuzhiyun  * The CPU clock is typically derived from a hierarchy of clock
12*4882a593Smuzhiyun  * blocks which includes mux and divider blocks. There are a number of other
13*4882a593Smuzhiyun  * auxiliary clocks supplied to the CPU domain such as the debug blocks and AXI
14*4882a593Smuzhiyun  * clock for CPU domain. The rates of these auxiliary clocks are related to the
15*4882a593Smuzhiyun  * CPU clock rate and this relation is usually specified in the hardware manual
16*4882a593Smuzhiyun  * of the SoC or supplied after the SoC characterization.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * The below implementation of the CPU clock allows the rate changes of the CPU
19*4882a593Smuzhiyun  * clock and the corresponding rate changes of the auxillary clocks of the CPU
20*4882a593Smuzhiyun  * domain. The platform clock driver provides a clock register configuration
21*4882a593Smuzhiyun  * for each configurable rate which is then used to program the clock hardware
22*4882a593Smuzhiyun  * registers to acheive a fast co-oridinated rate change for all the CPU domain
23*4882a593Smuzhiyun  * clocks.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * On a rate change request for the CPU clock, the rate change is propagated
26*4882a593Smuzhiyun  * upto the PLL supplying the clock to the CPU domain clock blocks. While the
27*4882a593Smuzhiyun  * CPU domain PLL is reconfigured, the CPU domain clocks are driven using an
28*4882a593Smuzhiyun  * alternate clock source. If required, the alternate clock source is divided
29*4882a593Smuzhiyun  * down in order to keep the output clock rate within the previous OPP limits.
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include <linux/of.h>
33*4882a593Smuzhiyun #include <linux/slab.h>
34*4882a593Smuzhiyun #include <linux/io.h>
35*4882a593Smuzhiyun #include <linux/clk.h>
36*4882a593Smuzhiyun #include <linux/clk-provider.h>
37*4882a593Smuzhiyun #include "clk.h"
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /**
40*4882a593Smuzhiyun  * struct rockchip_cpuclk: information about clock supplied to a CPU core.
41*4882a593Smuzhiyun  * @hw:		handle between ccf and cpu clock.
42*4882a593Smuzhiyun  * @alt_parent:	alternate parent clock to use when switching the speed
43*4882a593Smuzhiyun  *		of the primary parent clock.
44*4882a593Smuzhiyun  * @reg_base:	base register for cpu-clock values.
45*4882a593Smuzhiyun  * @clk_nb:	clock notifier registered for changes in clock speed of the
46*4882a593Smuzhiyun  *		primary parent clock.
47*4882a593Smuzhiyun  * @rate_count:	number of rates in the rate_table
48*4882a593Smuzhiyun  * @rate_table:	pll-rates and their associated dividers
49*4882a593Smuzhiyun  * @reg_data:	cpu-specific register settings
50*4882a593Smuzhiyun  * @lock:	clock lock
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun struct rockchip_cpuclk {
53*4882a593Smuzhiyun 	struct clk_hw				hw;
54*4882a593Smuzhiyun 	struct clk_hw				*pll_hw;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	struct clk_mux				cpu_mux;
57*4882a593Smuzhiyun 	const struct clk_ops			*cpu_mux_ops;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	struct clk				*alt_parent;
60*4882a593Smuzhiyun 	void __iomem				*reg_base;
61*4882a593Smuzhiyun 	struct notifier_block			clk_nb;
62*4882a593Smuzhiyun 	unsigned int				rate_count;
63*4882a593Smuzhiyun 	struct rockchip_cpuclk_rate_table	*rate_table;
64*4882a593Smuzhiyun 	const struct rockchip_cpuclk_reg_data	*reg_data;
65*4882a593Smuzhiyun 	spinlock_t				*lock;
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #define to_rockchip_cpuclk_hw(hw) container_of(hw, struct rockchip_cpuclk, hw)
69*4882a593Smuzhiyun #define to_rockchip_cpuclk_nb(nb) \
70*4882a593Smuzhiyun 			container_of(nb, struct rockchip_cpuclk, clk_nb)
71*4882a593Smuzhiyun 
rockchip_get_cpuclk_settings(struct rockchip_cpuclk * cpuclk,unsigned long rate)72*4882a593Smuzhiyun static const struct rockchip_cpuclk_rate_table *rockchip_get_cpuclk_settings(
73*4882a593Smuzhiyun 			    struct rockchip_cpuclk *cpuclk, unsigned long rate)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	const struct rockchip_cpuclk_rate_table *rate_table =
76*4882a593Smuzhiyun 							cpuclk->rate_table;
77*4882a593Smuzhiyun 	int i;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	for (i = 0; i < cpuclk->rate_count; i++) {
80*4882a593Smuzhiyun 		if (rate == rate_table[i].prate)
81*4882a593Smuzhiyun 			return &rate_table[i];
82*4882a593Smuzhiyun 	}
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	return NULL;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
rockchip_cpuclk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)87*4882a593Smuzhiyun static unsigned long rockchip_cpuclk_recalc_rate(struct clk_hw *hw,
88*4882a593Smuzhiyun 					unsigned long parent_rate)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct rockchip_cpuclk *cpuclk = to_rockchip_cpuclk_hw(hw);
91*4882a593Smuzhiyun 	const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
92*4882a593Smuzhiyun 	u32 clksel0 = readl_relaxed(cpuclk->reg_base + reg_data->core_reg[0]);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	clksel0 >>= reg_data->div_core_shift[0];
95*4882a593Smuzhiyun 	clksel0 &= reg_data->div_core_mask[0];
96*4882a593Smuzhiyun 	return parent_rate / (clksel0 + 1);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun static const struct clk_ops rockchip_cpuclk_ops = {
100*4882a593Smuzhiyun 	.recalc_rate = rockchip_cpuclk_recalc_rate,
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun 
rockchip_cpuclk_set_dividers(struct rockchip_cpuclk * cpuclk,const struct rockchip_cpuclk_rate_table * rate)103*4882a593Smuzhiyun static void rockchip_cpuclk_set_dividers(struct rockchip_cpuclk *cpuclk,
104*4882a593Smuzhiyun 				const struct rockchip_cpuclk_rate_table *rate)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	int i;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/* alternate parent is active now. set the dividers */
109*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(rate->divs); i++) {
110*4882a593Smuzhiyun 		const struct rockchip_cpuclk_clksel *clksel = &rate->divs[i];
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 		if (!clksel->reg)
113*4882a593Smuzhiyun 			continue;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 		pr_debug("%s: setting reg 0x%x to 0x%x\n",
116*4882a593Smuzhiyun 			 __func__, clksel->reg, clksel->val);
117*4882a593Smuzhiyun 		writel(clksel->val, cpuclk->reg_base + clksel->reg);
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
rockchip_cpuclk_set_pre_muxs(struct rockchip_cpuclk * cpuclk,const struct rockchip_cpuclk_rate_table * rate)121*4882a593Smuzhiyun static void rockchip_cpuclk_set_pre_muxs(struct rockchip_cpuclk *cpuclk,
122*4882a593Smuzhiyun 					 const struct rockchip_cpuclk_rate_table *rate)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	int i;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* alternate parent is active now. set the pre_muxs */
127*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(rate->pre_muxs); i++) {
128*4882a593Smuzhiyun 		const struct rockchip_cpuclk_clksel *clksel = &rate->pre_muxs[i];
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 		if (!clksel->reg)
131*4882a593Smuzhiyun 			break;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 		pr_debug("%s: setting reg 0x%x to 0x%x\n",
134*4882a593Smuzhiyun 			 __func__, clksel->reg, clksel->val);
135*4882a593Smuzhiyun 		writel(clksel->val, cpuclk->reg_base + clksel->reg);
136*4882a593Smuzhiyun 	}
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
rockchip_cpuclk_set_post_muxs(struct rockchip_cpuclk * cpuclk,const struct rockchip_cpuclk_rate_table * rate)139*4882a593Smuzhiyun static void rockchip_cpuclk_set_post_muxs(struct rockchip_cpuclk *cpuclk,
140*4882a593Smuzhiyun 					  const struct rockchip_cpuclk_rate_table *rate)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	int i;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	/* alternate parent is active now. set the muxs */
145*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(rate->post_muxs); i++) {
146*4882a593Smuzhiyun 		const struct rockchip_cpuclk_clksel *clksel = &rate->post_muxs[i];
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 		if (!clksel->reg)
149*4882a593Smuzhiyun 			break;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		pr_debug("%s: setting reg 0x%x to 0x%x\n",
152*4882a593Smuzhiyun 			 __func__, clksel->reg, clksel->val);
153*4882a593Smuzhiyun 		writel(clksel->val, cpuclk->reg_base + clksel->reg);
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk * cpuclk,struct clk_notifier_data * ndata)157*4882a593Smuzhiyun static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
158*4882a593Smuzhiyun 					   struct clk_notifier_data *ndata)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
161*4882a593Smuzhiyun 	const struct rockchip_cpuclk_rate_table *rate;
162*4882a593Smuzhiyun 	unsigned long alt_prate, alt_div;
163*4882a593Smuzhiyun 	unsigned long flags;
164*4882a593Smuzhiyun 	int i = 0;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/* check validity of the new rate */
167*4882a593Smuzhiyun 	rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate);
168*4882a593Smuzhiyun 	if (!rate) {
169*4882a593Smuzhiyun 		pr_err("%s: Invalid rate : %lu for cpuclk\n",
170*4882a593Smuzhiyun 		       __func__, ndata->new_rate);
171*4882a593Smuzhiyun 		return -EINVAL;
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_ROCKCHIP_CLK_BOOST))
175*4882a593Smuzhiyun 		rockchip_boost_enable_recovery_sw_low(cpuclk->pll_hw);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	alt_prate = clk_get_rate(cpuclk->alt_parent);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	spin_lock_irqsave(cpuclk->lock, flags);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/*
182*4882a593Smuzhiyun 	 * If the old parent clock speed is less than the clock speed
183*4882a593Smuzhiyun 	 * of the alternate parent, then it should be ensured that at no point
184*4882a593Smuzhiyun 	 * the armclk speed is more than the old_rate until the dividers are
185*4882a593Smuzhiyun 	 * set.
186*4882a593Smuzhiyun 	 */
187*4882a593Smuzhiyun 	if (alt_prate > ndata->old_rate) {
188*4882a593Smuzhiyun 		/* calculate dividers */
189*4882a593Smuzhiyun 		alt_div =  DIV_ROUND_UP(alt_prate, ndata->old_rate) - 1;
190*4882a593Smuzhiyun 		if (alt_div > reg_data->div_core_mask[0]) {
191*4882a593Smuzhiyun 			pr_warn("%s: limiting alt-divider %lu to %d\n",
192*4882a593Smuzhiyun 				__func__, alt_div, reg_data->div_core_mask[0]);
193*4882a593Smuzhiyun 			alt_div = reg_data->div_core_mask[0];
194*4882a593Smuzhiyun 		}
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 		/*
197*4882a593Smuzhiyun 		 * Change parents and add dividers in a single transaction.
198*4882a593Smuzhiyun 		 *
199*4882a593Smuzhiyun 		 * NOTE: we do this in a single transaction so we're never
200*4882a593Smuzhiyun 		 * dividing the primary parent by the extra dividers that were
201*4882a593Smuzhiyun 		 * needed for the alt.
202*4882a593Smuzhiyun 		 */
203*4882a593Smuzhiyun 		pr_debug("%s: setting div %lu as alt-rate %lu > old-rate %lu\n",
204*4882a593Smuzhiyun 			 __func__, alt_div, alt_prate, ndata->old_rate);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 		for (i = 0; i < reg_data->num_cores; i++) {
207*4882a593Smuzhiyun 			writel(HIWORD_UPDATE(alt_div, reg_data->div_core_mask[i],
208*4882a593Smuzhiyun 					     reg_data->div_core_shift[i]),
209*4882a593Smuzhiyun 			       cpuclk->reg_base + reg_data->core_reg[i]);
210*4882a593Smuzhiyun 		}
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_ROCKCHIP_CLK_BOOST))
214*4882a593Smuzhiyun 		rockchip_boost_add_core_div(cpuclk->pll_hw, alt_prate);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	rockchip_cpuclk_set_pre_muxs(cpuclk, rate);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/* select alternate parent */
219*4882a593Smuzhiyun 	if (reg_data->mux_core_reg)
220*4882a593Smuzhiyun 		writel(HIWORD_UPDATE(reg_data->mux_core_alt,
221*4882a593Smuzhiyun 				     reg_data->mux_core_mask,
222*4882a593Smuzhiyun 				     reg_data->mux_core_shift),
223*4882a593Smuzhiyun 		       cpuclk->reg_base + reg_data->mux_core_reg);
224*4882a593Smuzhiyun 	else
225*4882a593Smuzhiyun 		writel(HIWORD_UPDATE(reg_data->mux_core_alt,
226*4882a593Smuzhiyun 				     reg_data->mux_core_mask,
227*4882a593Smuzhiyun 				     reg_data->mux_core_shift),
228*4882a593Smuzhiyun 		       cpuclk->reg_base + reg_data->core_reg[0]);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	spin_unlock_irqrestore(cpuclk->lock, flags);
231*4882a593Smuzhiyun 	return 0;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk * cpuclk,struct clk_notifier_data * ndata)234*4882a593Smuzhiyun static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
235*4882a593Smuzhiyun 					    struct clk_notifier_data *ndata)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
238*4882a593Smuzhiyun 	const struct rockchip_cpuclk_rate_table *rate;
239*4882a593Smuzhiyun 	unsigned long flags;
240*4882a593Smuzhiyun 	int i = 0;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate);
243*4882a593Smuzhiyun 	if (!rate) {
244*4882a593Smuzhiyun 		pr_err("%s: Invalid rate : %lu for cpuclk\n",
245*4882a593Smuzhiyun 		       __func__, ndata->new_rate);
246*4882a593Smuzhiyun 		return -EINVAL;
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	spin_lock_irqsave(cpuclk->lock, flags);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	if (ndata->old_rate < ndata->new_rate)
252*4882a593Smuzhiyun 		rockchip_cpuclk_set_dividers(cpuclk, rate);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/*
255*4882a593Smuzhiyun 	 * post-rate change event, re-mux to primary parent and remove dividers.
256*4882a593Smuzhiyun 	 *
257*4882a593Smuzhiyun 	 * NOTE: we do this in a single transaction so we're never dividing the
258*4882a593Smuzhiyun 	 * primary parent by the extra dividers that were needed for the alt.
259*4882a593Smuzhiyun 	 */
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	if (reg_data->mux_core_reg)
262*4882a593Smuzhiyun 		writel(HIWORD_UPDATE(reg_data->mux_core_main,
263*4882a593Smuzhiyun 				     reg_data->mux_core_mask,
264*4882a593Smuzhiyun 				     reg_data->mux_core_shift),
265*4882a593Smuzhiyun 		       cpuclk->reg_base + reg_data->mux_core_reg);
266*4882a593Smuzhiyun 	else
267*4882a593Smuzhiyun 		writel(HIWORD_UPDATE(reg_data->mux_core_main,
268*4882a593Smuzhiyun 				     reg_data->mux_core_mask,
269*4882a593Smuzhiyun 				     reg_data->mux_core_shift),
270*4882a593Smuzhiyun 		       cpuclk->reg_base + reg_data->core_reg[0]);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	rockchip_cpuclk_set_post_muxs(cpuclk, rate);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	/* remove dividers */
275*4882a593Smuzhiyun 	for (i = 0; i < reg_data->num_cores; i++) {
276*4882a593Smuzhiyun 		writel(HIWORD_UPDATE(0, reg_data->div_core_mask[i],
277*4882a593Smuzhiyun 				     reg_data->div_core_shift[i]),
278*4882a593Smuzhiyun 		       cpuclk->reg_base + reg_data->core_reg[i]);
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	if (ndata->old_rate > ndata->new_rate)
282*4882a593Smuzhiyun 		rockchip_cpuclk_set_dividers(cpuclk, rate);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_ROCKCHIP_CLK_BOOST))
285*4882a593Smuzhiyun 		rockchip_boost_disable_recovery_sw(cpuclk->pll_hw);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	spin_unlock_irqrestore(cpuclk->lock, flags);
288*4882a593Smuzhiyun 	return 0;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun  * This clock notifier is called when the frequency of the parent clock
293*4882a593Smuzhiyun  * of cpuclk is to be changed. This notifier handles the setting up all
294*4882a593Smuzhiyun  * the divider clocks, remux to temporary parent and handling the safe
295*4882a593Smuzhiyun  * frequency levels when using temporary parent.
296*4882a593Smuzhiyun  */
rockchip_cpuclk_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)297*4882a593Smuzhiyun static int rockchip_cpuclk_notifier_cb(struct notifier_block *nb,
298*4882a593Smuzhiyun 					unsigned long event, void *data)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	struct clk_notifier_data *ndata = data;
301*4882a593Smuzhiyun 	struct rockchip_cpuclk *cpuclk = to_rockchip_cpuclk_nb(nb);
302*4882a593Smuzhiyun 	int ret = 0;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
305*4882a593Smuzhiyun 		 __func__, event, ndata->old_rate, ndata->new_rate);
306*4882a593Smuzhiyun 	if (event == PRE_RATE_CHANGE)
307*4882a593Smuzhiyun 		ret = rockchip_cpuclk_pre_rate_change(cpuclk, ndata);
308*4882a593Smuzhiyun 	else if (event == POST_RATE_CHANGE)
309*4882a593Smuzhiyun 		ret = rockchip_cpuclk_post_rate_change(cpuclk, ndata);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	return notifier_from_errno(ret);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
rockchip_clk_register_cpuclk(const char * name,u8 num_parents,struct clk * parent,struct clk * alt_parent,const struct rockchip_cpuclk_reg_data * reg_data,const struct rockchip_cpuclk_rate_table * rates,int nrates,void __iomem * reg_base,spinlock_t * lock)314*4882a593Smuzhiyun struct clk *rockchip_clk_register_cpuclk(const char *name,
315*4882a593Smuzhiyun 			u8 num_parents,
316*4882a593Smuzhiyun 			struct clk *parent, struct clk *alt_parent,
317*4882a593Smuzhiyun 			const struct rockchip_cpuclk_reg_data *reg_data,
318*4882a593Smuzhiyun 			const struct rockchip_cpuclk_rate_table *rates,
319*4882a593Smuzhiyun 			int nrates, void __iomem *reg_base, spinlock_t *lock)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	struct rockchip_cpuclk *cpuclk;
322*4882a593Smuzhiyun 	struct clk_init_data init;
323*4882a593Smuzhiyun 	struct clk *clk, *cclk, *pll_clk;
324*4882a593Smuzhiyun 	const char *parent_name;
325*4882a593Smuzhiyun 	int ret;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	if (num_parents < 2) {
328*4882a593Smuzhiyun 		pr_err("%s: needs at least two parent clocks\n", __func__);
329*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (IS_ERR(parent) || IS_ERR(alt_parent)) {
333*4882a593Smuzhiyun 		pr_err("%s: invalid parent clock(s)\n", __func__);
334*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
335*4882a593Smuzhiyun 	}
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
338*4882a593Smuzhiyun 	if (!cpuclk)
339*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	parent_name = clk_hw_get_name(__clk_get_hw(parent));
342*4882a593Smuzhiyun 	init.name = name;
343*4882a593Smuzhiyun 	init.parent_names = &parent_name;
344*4882a593Smuzhiyun 	init.num_parents = 1;
345*4882a593Smuzhiyun 	init.ops = &rockchip_cpuclk_ops;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	/* only allow rate changes when we have a rate table */
348*4882a593Smuzhiyun 	init.flags = (nrates > 0) ? CLK_SET_RATE_PARENT : 0;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/* disallow automatic parent changes by ccf */
351*4882a593Smuzhiyun 	init.flags |= CLK_SET_RATE_NO_REPARENT;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	init.flags |= CLK_GET_RATE_NOCACHE;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	cpuclk->reg_base = reg_base;
356*4882a593Smuzhiyun 	cpuclk->lock = lock;
357*4882a593Smuzhiyun 	cpuclk->reg_data = reg_data;
358*4882a593Smuzhiyun 	cpuclk->clk_nb.notifier_call = rockchip_cpuclk_notifier_cb;
359*4882a593Smuzhiyun 	cpuclk->hw.init = &init;
360*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_ROCKCHIP_CLK_BOOST) && reg_data->pll_name) {
361*4882a593Smuzhiyun 		pll_clk = clk_get_parent(parent);
362*4882a593Smuzhiyun 		if (!pll_clk) {
363*4882a593Smuzhiyun 			pr_err("%s: could not lookup pll clock: (%s)\n",
364*4882a593Smuzhiyun 			       __func__, reg_data->pll_name);
365*4882a593Smuzhiyun 			ret = -EINVAL;
366*4882a593Smuzhiyun 			goto free_cpuclk;
367*4882a593Smuzhiyun 		}
368*4882a593Smuzhiyun 		cpuclk->pll_hw = __clk_get_hw(pll_clk);
369*4882a593Smuzhiyun 		rockchip_boost_init(cpuclk->pll_hw);
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	cpuclk->alt_parent = alt_parent;
373*4882a593Smuzhiyun 	if (!cpuclk->alt_parent) {
374*4882a593Smuzhiyun 		pr_err("%s: could not lookup alternate parent: (%d)\n",
375*4882a593Smuzhiyun 		       __func__, reg_data->mux_core_alt);
376*4882a593Smuzhiyun 		ret = -EINVAL;
377*4882a593Smuzhiyun 		goto free_cpuclk;
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	ret = clk_prepare_enable(cpuclk->alt_parent);
381*4882a593Smuzhiyun 	if (ret) {
382*4882a593Smuzhiyun 		pr_err("%s: could not enable alternate parent\n",
383*4882a593Smuzhiyun 		       __func__);
384*4882a593Smuzhiyun 		goto free_cpuclk;
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	clk = parent;
388*4882a593Smuzhiyun 	if (!clk) {
389*4882a593Smuzhiyun 		pr_err("%s: could not lookup parent clock: (%d) %s\n",
390*4882a593Smuzhiyun 		       __func__, reg_data->mux_core_main,
391*4882a593Smuzhiyun 		       parent_name);
392*4882a593Smuzhiyun 		ret = -EINVAL;
393*4882a593Smuzhiyun 		goto free_alt_parent;
394*4882a593Smuzhiyun 	}
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	ret = clk_notifier_register(clk, &cpuclk->clk_nb);
397*4882a593Smuzhiyun 	if (ret) {
398*4882a593Smuzhiyun 		pr_err("%s: failed to register clock notifier for %s\n",
399*4882a593Smuzhiyun 				__func__, name);
400*4882a593Smuzhiyun 		goto free_alt_parent;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (nrates > 0) {
404*4882a593Smuzhiyun 		cpuclk->rate_count = nrates;
405*4882a593Smuzhiyun 		cpuclk->rate_table = kmemdup(rates,
406*4882a593Smuzhiyun 					     sizeof(*rates) * nrates,
407*4882a593Smuzhiyun 					     GFP_KERNEL);
408*4882a593Smuzhiyun 		if (!cpuclk->rate_table) {
409*4882a593Smuzhiyun 			ret = -ENOMEM;
410*4882a593Smuzhiyun 			goto unregister_notifier;
411*4882a593Smuzhiyun 		}
412*4882a593Smuzhiyun 	}
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	cclk = clk_register(NULL, &cpuclk->hw);
415*4882a593Smuzhiyun 	if (IS_ERR(cclk)) {
416*4882a593Smuzhiyun 		pr_err("%s: could not register cpuclk %s\n", __func__,	name);
417*4882a593Smuzhiyun 		ret = PTR_ERR(cclk);
418*4882a593Smuzhiyun 		goto free_rate_table;
419*4882a593Smuzhiyun 	}
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	return cclk;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun free_rate_table:
424*4882a593Smuzhiyun 	kfree(cpuclk->rate_table);
425*4882a593Smuzhiyun unregister_notifier:
426*4882a593Smuzhiyun 	clk_notifier_unregister(clk, &cpuclk->clk_nb);
427*4882a593Smuzhiyun free_alt_parent:
428*4882a593Smuzhiyun 	clk_disable_unprepare(cpuclk->alt_parent);
429*4882a593Smuzhiyun free_cpuclk:
430*4882a593Smuzhiyun 	kfree(cpuclk);
431*4882a593Smuzhiyun 	return ERR_PTR(ret);
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
rockchip_cpuclk_v2_pre_rate_change(struct rockchip_cpuclk * cpuclk,struct clk_notifier_data * ndata)434*4882a593Smuzhiyun static int rockchip_cpuclk_v2_pre_rate_change(struct rockchip_cpuclk *cpuclk,
435*4882a593Smuzhiyun 					      struct clk_notifier_data *ndata)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	unsigned long new_rate = roundup(ndata->new_rate, 1000);
438*4882a593Smuzhiyun 	const struct rockchip_cpuclk_rate_table *rate;
439*4882a593Smuzhiyun 	unsigned long flags;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	rate = rockchip_get_cpuclk_settings(cpuclk, new_rate);
442*4882a593Smuzhiyun 	if (!rate) {
443*4882a593Smuzhiyun 		pr_err("%s: Invalid rate : %lu for cpuclk\n",
444*4882a593Smuzhiyun 		       __func__, new_rate);
445*4882a593Smuzhiyun 		return -EINVAL;
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	if (new_rate > ndata->old_rate) {
449*4882a593Smuzhiyun 		spin_lock_irqsave(cpuclk->lock, flags);
450*4882a593Smuzhiyun 		rockchip_cpuclk_set_dividers(cpuclk, rate);
451*4882a593Smuzhiyun 		spin_unlock_irqrestore(cpuclk->lock, flags);
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	return 0;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
rockchip_cpuclk_v2_post_rate_change(struct rockchip_cpuclk * cpuclk,struct clk_notifier_data * ndata)457*4882a593Smuzhiyun static int rockchip_cpuclk_v2_post_rate_change(struct rockchip_cpuclk *cpuclk,
458*4882a593Smuzhiyun 					       struct clk_notifier_data *ndata)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	unsigned long new_rate = roundup(ndata->new_rate, 1000);
461*4882a593Smuzhiyun 	const struct rockchip_cpuclk_rate_table *rate;
462*4882a593Smuzhiyun 	unsigned long flags;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	rate = rockchip_get_cpuclk_settings(cpuclk, new_rate);
465*4882a593Smuzhiyun 	if (!rate) {
466*4882a593Smuzhiyun 		pr_err("%s: Invalid rate : %lu for cpuclk\n",
467*4882a593Smuzhiyun 		       __func__, new_rate);
468*4882a593Smuzhiyun 		return -EINVAL;
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	if (new_rate < ndata->old_rate) {
472*4882a593Smuzhiyun 		spin_lock_irqsave(cpuclk->lock, flags);
473*4882a593Smuzhiyun 		rockchip_cpuclk_set_dividers(cpuclk, rate);
474*4882a593Smuzhiyun 		spin_unlock_irqrestore(cpuclk->lock, flags);
475*4882a593Smuzhiyun 	}
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	return 0;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun 
rockchip_cpuclk_v2_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)480*4882a593Smuzhiyun static int rockchip_cpuclk_v2_notifier_cb(struct notifier_block *nb,
481*4882a593Smuzhiyun 					  unsigned long event, void *data)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	struct clk_notifier_data *ndata = data;
484*4882a593Smuzhiyun 	struct rockchip_cpuclk *cpuclk = to_rockchip_cpuclk_nb(nb);
485*4882a593Smuzhiyun 	int ret = 0;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
488*4882a593Smuzhiyun 		 __func__, event, ndata->old_rate, ndata->new_rate);
489*4882a593Smuzhiyun 	if (event == PRE_RATE_CHANGE)
490*4882a593Smuzhiyun 		ret = rockchip_cpuclk_v2_pre_rate_change(cpuclk, ndata);
491*4882a593Smuzhiyun 	else if (event == POST_RATE_CHANGE)
492*4882a593Smuzhiyun 		ret = rockchip_cpuclk_v2_post_rate_change(cpuclk, ndata);
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	return notifier_from_errno(ret);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun 
rockchip_clk_register_cpuclk_v2(const char * name,const char * const * parent_names,u8 num_parents,void __iomem * base,int muxdiv_offset,u8 mux_shift,u8 mux_width,u8 mux_flags,int div_offset,u8 div_shift,u8 div_width,u8 div_flags,unsigned long flags,spinlock_t * lock,const struct rockchip_cpuclk_rate_table * rates,int nrates)497*4882a593Smuzhiyun struct clk *rockchip_clk_register_cpuclk_v2(const char *name,
498*4882a593Smuzhiyun 					    const char *const *parent_names,
499*4882a593Smuzhiyun 					    u8 num_parents, void __iomem *base,
500*4882a593Smuzhiyun 					    int muxdiv_offset, u8 mux_shift,
501*4882a593Smuzhiyun 					    u8 mux_width, u8 mux_flags,
502*4882a593Smuzhiyun 					    int div_offset, u8 div_shift,
503*4882a593Smuzhiyun 					    u8 div_width, u8 div_flags,
504*4882a593Smuzhiyun 					    unsigned long flags, spinlock_t *lock,
505*4882a593Smuzhiyun 					    const struct rockchip_cpuclk_rate_table *rates,
506*4882a593Smuzhiyun 					    int nrates)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	struct rockchip_cpuclk *cpuclk;
509*4882a593Smuzhiyun 	struct clk_hw *hw;
510*4882a593Smuzhiyun 	struct clk_mux *mux = NULL;
511*4882a593Smuzhiyun 	struct clk_divider *div = NULL;
512*4882a593Smuzhiyun 	const struct clk_ops *mux_ops = NULL, *div_ops = NULL;
513*4882a593Smuzhiyun 	int ret;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	if (num_parents > 1) {
516*4882a593Smuzhiyun 		mux = kzalloc(sizeof(*mux), GFP_KERNEL);
517*4882a593Smuzhiyun 		if (!mux)
518*4882a593Smuzhiyun 			return ERR_PTR(-ENOMEM);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 		mux->reg = base + muxdiv_offset;
521*4882a593Smuzhiyun 		mux->shift = mux_shift;
522*4882a593Smuzhiyun 		mux->mask = BIT(mux_width) - 1;
523*4882a593Smuzhiyun 		mux->flags = mux_flags;
524*4882a593Smuzhiyun 		mux->lock = lock;
525*4882a593Smuzhiyun 		mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
526*4882a593Smuzhiyun 							: &clk_mux_ops;
527*4882a593Smuzhiyun 	}
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	if (div_width > 0) {
530*4882a593Smuzhiyun 		div = kzalloc(sizeof(*div), GFP_KERNEL);
531*4882a593Smuzhiyun 		if (!div) {
532*4882a593Smuzhiyun 			ret = -ENOMEM;
533*4882a593Smuzhiyun 			goto free_mux;
534*4882a593Smuzhiyun 		}
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 		div->flags = div_flags;
537*4882a593Smuzhiyun 		if (div_offset)
538*4882a593Smuzhiyun 			div->reg = base + div_offset;
539*4882a593Smuzhiyun 		else
540*4882a593Smuzhiyun 			div->reg = base + muxdiv_offset;
541*4882a593Smuzhiyun 		div->shift = div_shift;
542*4882a593Smuzhiyun 		div->width = div_width;
543*4882a593Smuzhiyun 		div->lock = lock;
544*4882a593Smuzhiyun 		div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
545*4882a593Smuzhiyun 						? &clk_divider_ro_ops
546*4882a593Smuzhiyun 						: &clk_divider_ops;
547*4882a593Smuzhiyun 	}
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
550*4882a593Smuzhiyun 				       mux ? &mux->hw : NULL, mux_ops,
551*4882a593Smuzhiyun 				       div ? &div->hw : NULL, div_ops,
552*4882a593Smuzhiyun 				       NULL, NULL, flags);
553*4882a593Smuzhiyun 	if (IS_ERR(hw)) {
554*4882a593Smuzhiyun 		ret = PTR_ERR(hw);
555*4882a593Smuzhiyun 		goto free_div;
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
559*4882a593Smuzhiyun 	if (!cpuclk) {
560*4882a593Smuzhiyun 		ret = -ENOMEM;
561*4882a593Smuzhiyun 		goto unregister_clk;
562*4882a593Smuzhiyun 	}
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	cpuclk->reg_base = base;
565*4882a593Smuzhiyun 	cpuclk->lock = lock;
566*4882a593Smuzhiyun 	cpuclk->clk_nb.notifier_call = rockchip_cpuclk_v2_notifier_cb;
567*4882a593Smuzhiyun 	ret = clk_notifier_register(hw->clk, &cpuclk->clk_nb);
568*4882a593Smuzhiyun 	if (ret) {
569*4882a593Smuzhiyun 		pr_err("%s: failed to register clock notifier for %s\n",
570*4882a593Smuzhiyun 		       __func__, name);
571*4882a593Smuzhiyun 		goto free_cpuclk;
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	if (nrates > 0) {
575*4882a593Smuzhiyun 		cpuclk->rate_count = nrates;
576*4882a593Smuzhiyun 		cpuclk->rate_table = kmemdup(rates,
577*4882a593Smuzhiyun 					     sizeof(*rates) * nrates,
578*4882a593Smuzhiyun 					     GFP_KERNEL);
579*4882a593Smuzhiyun 		if (!cpuclk->rate_table) {
580*4882a593Smuzhiyun 			ret = -ENOMEM;
581*4882a593Smuzhiyun 			goto free_cpuclk;
582*4882a593Smuzhiyun 		}
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	return hw->clk;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun free_cpuclk:
588*4882a593Smuzhiyun 	kfree(cpuclk);
589*4882a593Smuzhiyun unregister_clk:
590*4882a593Smuzhiyun 	clk_hw_unregister_composite(hw);
591*4882a593Smuzhiyun free_div:
592*4882a593Smuzhiyun 	kfree(div);
593*4882a593Smuzhiyun free_mux:
594*4882a593Smuzhiyun 	kfree(mux);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	return ERR_PTR(ret);
597*4882a593Smuzhiyun }
598