xref: /OK3568_Linux_fs/kernel/drivers/clk/samsung/clk-cpu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
4*4882a593Smuzhiyun  * Author: Thomas Abraham <thomas.ab@samsung.com>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7*4882a593Smuzhiyun  * Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This file contains the utility function to register CPU clock for Samsung
10*4882a593Smuzhiyun  * Exynos platforms. A CPU clock is defined as a clock supplied to a CPU or a
11*4882a593Smuzhiyun  * group of CPUs. The CPU clock is typically derived from a hierarchy of clock
12*4882a593Smuzhiyun  * blocks which includes mux and divider blocks. There are a number of other
13*4882a593Smuzhiyun  * auxiliary clocks supplied to the CPU domain such as the debug blocks and AXI
14*4882a593Smuzhiyun  * clock for CPU domain. The rates of these auxiliary clocks are related to the
15*4882a593Smuzhiyun  * CPU clock rate and this relation is usually specified in the hardware manual
16*4882a593Smuzhiyun  * of the SoC or supplied after the SoC characterization.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * The below implementation of the CPU clock allows the rate changes of the CPU
19*4882a593Smuzhiyun  * clock and the corresponding rate changes of the auxillary clocks of the CPU
20*4882a593Smuzhiyun  * domain. The platform clock driver provides a clock register configuration
21*4882a593Smuzhiyun  * for each configurable rate which is then used to program the clock hardware
22*4882a593Smuzhiyun  * registers to acheive a fast co-oridinated rate change for all the CPU domain
23*4882a593Smuzhiyun  * clocks.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * On a rate change request for the CPU clock, the rate change is propagated
26*4882a593Smuzhiyun  * upto the PLL supplying the clock to the CPU domain clock blocks. While the
27*4882a593Smuzhiyun  * CPU domain PLL is reconfigured, the CPU domain clocks are driven using an
28*4882a593Smuzhiyun  * alternate clock source. If required, the alternate clock source is divided
29*4882a593Smuzhiyun  * down in order to keep the output clock rate within the previous OPP limits.
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include <linux/errno.h>
33*4882a593Smuzhiyun #include <linux/io.h>
34*4882a593Smuzhiyun #include <linux/slab.h>
35*4882a593Smuzhiyun #include <linux/clk.h>
36*4882a593Smuzhiyun #include <linux/clk-provider.h>
37*4882a593Smuzhiyun #include "clk-cpu.h"
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define E4210_SRC_CPU		0x0
40*4882a593Smuzhiyun #define E4210_STAT_CPU		0x200
41*4882a593Smuzhiyun #define E4210_DIV_CPU0		0x300
42*4882a593Smuzhiyun #define E4210_DIV_CPU1		0x304
43*4882a593Smuzhiyun #define E4210_DIV_STAT_CPU0	0x400
44*4882a593Smuzhiyun #define E4210_DIV_STAT_CPU1	0x404
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define E5433_MUX_SEL2		0x008
47*4882a593Smuzhiyun #define E5433_MUX_STAT2		0x208
48*4882a593Smuzhiyun #define E5433_DIV_CPU0		0x400
49*4882a593Smuzhiyun #define E5433_DIV_CPU1		0x404
50*4882a593Smuzhiyun #define E5433_DIV_STAT_CPU0	0x500
51*4882a593Smuzhiyun #define E5433_DIV_STAT_CPU1	0x504
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define E4210_DIV0_RATIO0_MASK	0x7
54*4882a593Smuzhiyun #define E4210_DIV1_HPM_MASK	(0x7 << 4)
55*4882a593Smuzhiyun #define E4210_DIV1_COPY_MASK	(0x7 << 0)
56*4882a593Smuzhiyun #define E4210_MUX_HPM_MASK	(1 << 20)
57*4882a593Smuzhiyun #define E4210_DIV0_ATB_SHIFT	16
58*4882a593Smuzhiyun #define E4210_DIV0_ATB_MASK	(DIV_MASK << E4210_DIV0_ATB_SHIFT)
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define MAX_DIV			8
61*4882a593Smuzhiyun #define DIV_MASK		7
62*4882a593Smuzhiyun #define DIV_MASK_ALL		0xffffffff
63*4882a593Smuzhiyun #define MUX_MASK		7
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun  * Helper function to wait until divider(s) have stabilized after the divider
67*4882a593Smuzhiyun  * value has changed.
68*4882a593Smuzhiyun  */
wait_until_divider_stable(void __iomem * div_reg,unsigned long mask)69*4882a593Smuzhiyun static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	unsigned long timeout = jiffies + msecs_to_jiffies(10);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	do {
74*4882a593Smuzhiyun 		if (!(readl(div_reg) & mask))
75*4882a593Smuzhiyun 			return;
76*4882a593Smuzhiyun 	} while (time_before(jiffies, timeout));
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	if (!(readl(div_reg) & mask))
79*4882a593Smuzhiyun 		return;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	pr_err("%s: timeout in divider stablization\n", __func__);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun  * Helper function to wait until mux has stabilized after the mux selection
86*4882a593Smuzhiyun  * value was changed.
87*4882a593Smuzhiyun  */
wait_until_mux_stable(void __iomem * mux_reg,u32 mux_pos,unsigned long mux_value)88*4882a593Smuzhiyun static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos,
89*4882a593Smuzhiyun 					unsigned long mux_value)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	unsigned long timeout = jiffies + msecs_to_jiffies(10);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	do {
94*4882a593Smuzhiyun 		if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
95*4882a593Smuzhiyun 			return;
96*4882a593Smuzhiyun 	} while (time_before(jiffies, timeout));
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
99*4882a593Smuzhiyun 		return;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	pr_err("%s: re-parenting mux timed-out\n", __func__);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /* common round rate callback useable for all types of CPU clocks */
exynos_cpuclk_round_rate(struct clk_hw * hw,unsigned long drate,unsigned long * prate)105*4882a593Smuzhiyun static long exynos_cpuclk_round_rate(struct clk_hw *hw,
106*4882a593Smuzhiyun 			unsigned long drate, unsigned long *prate)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	struct clk_hw *parent = clk_hw_get_parent(hw);
109*4882a593Smuzhiyun 	*prate = clk_hw_round_rate(parent, drate);
110*4882a593Smuzhiyun 	return *prate;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /* common recalc rate callback useable for all types of CPU clocks */
exynos_cpuclk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)114*4882a593Smuzhiyun static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
115*4882a593Smuzhiyun 			unsigned long parent_rate)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	/*
118*4882a593Smuzhiyun 	 * The CPU clock output (armclk) rate is the same as its parent
119*4882a593Smuzhiyun 	 * rate. Although there exist certain dividers inside the CPU
120*4882a593Smuzhiyun 	 * clock block that could be used to divide the parent clock,
121*4882a593Smuzhiyun 	 * the driver does not make use of them currently, except during
122*4882a593Smuzhiyun 	 * frequency transitions.
123*4882a593Smuzhiyun 	 */
124*4882a593Smuzhiyun 	return parent_rate;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun static const struct clk_ops exynos_cpuclk_clk_ops = {
128*4882a593Smuzhiyun 	.recalc_rate = exynos_cpuclk_recalc_rate,
129*4882a593Smuzhiyun 	.round_rate = exynos_cpuclk_round_rate,
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun  * Helper function to set the 'safe' dividers for the CPU clock. The parameters
134*4882a593Smuzhiyun  * div and mask contain the divider value and the register bit mask of the
135*4882a593Smuzhiyun  * dividers to be programmed.
136*4882a593Smuzhiyun  */
exynos_set_safe_div(void __iomem * base,unsigned long div,unsigned long mask)137*4882a593Smuzhiyun static void exynos_set_safe_div(void __iomem *base, unsigned long div,
138*4882a593Smuzhiyun 					unsigned long mask)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	unsigned long div0;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	div0 = readl(base + E4210_DIV_CPU0);
143*4882a593Smuzhiyun 	div0 = (div0 & ~mask) | (div & mask);
144*4882a593Smuzhiyun 	writel(div0, base + E4210_DIV_CPU0);
145*4882a593Smuzhiyun 	wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, mask);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun /* handler for pre-rate change notification from parent clock */
exynos_cpuclk_pre_rate_change(struct clk_notifier_data * ndata,struct exynos_cpuclk * cpuclk,void __iomem * base)149*4882a593Smuzhiyun static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
150*4882a593Smuzhiyun 			struct exynos_cpuclk *cpuclk, void __iomem *base)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
153*4882a593Smuzhiyun 	unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
154*4882a593Smuzhiyun 	unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
155*4882a593Smuzhiyun 	unsigned long div0, div1 = 0, mux_reg;
156*4882a593Smuzhiyun 	unsigned long flags;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* find out the divider values to use for clock data */
159*4882a593Smuzhiyun 	while ((cfg_data->prate * 1000) != ndata->new_rate) {
160*4882a593Smuzhiyun 		if (cfg_data->prate == 0)
161*4882a593Smuzhiyun 			return -EINVAL;
162*4882a593Smuzhiyun 		cfg_data++;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	spin_lock_irqsave(cpuclk->lock, flags);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	/*
168*4882a593Smuzhiyun 	 * For the selected PLL clock frequency, get the pre-defined divider
169*4882a593Smuzhiyun 	 * values. If the clock for sclk_hpm is not sourced from apll, then
170*4882a593Smuzhiyun 	 * the values for DIV_COPY and DIV_HPM dividers need not be set.
171*4882a593Smuzhiyun 	 */
172*4882a593Smuzhiyun 	div0 = cfg_data->div0;
173*4882a593Smuzhiyun 	if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
174*4882a593Smuzhiyun 		div1 = cfg_data->div1;
175*4882a593Smuzhiyun 		if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
176*4882a593Smuzhiyun 			div1 = readl(base + E4210_DIV_CPU1) &
177*4882a593Smuzhiyun 				(E4210_DIV1_HPM_MASK | E4210_DIV1_COPY_MASK);
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	/*
181*4882a593Smuzhiyun 	 * If the old parent clock speed is less than the clock speed of
182*4882a593Smuzhiyun 	 * the alternate parent, then it should be ensured that at no point
183*4882a593Smuzhiyun 	 * the armclk speed is more than the old_prate until the dividers are
184*4882a593Smuzhiyun 	 * set.  Also workaround the issue of the dividers being set to lower
185*4882a593Smuzhiyun 	 * values before the parent clock speed is set to new lower speed
186*4882a593Smuzhiyun 	 * (this can result in too high speed of armclk output clocks).
187*4882a593Smuzhiyun 	 */
188*4882a593Smuzhiyun 	if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
189*4882a593Smuzhiyun 		unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 		alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
192*4882a593Smuzhiyun 		WARN_ON(alt_div >= MAX_DIV);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 		if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
195*4882a593Smuzhiyun 			/*
196*4882a593Smuzhiyun 			 * In Exynos4210, ATB clock parent is also mout_core. So
197*4882a593Smuzhiyun 			 * ATB clock also needs to be mantained at safe speed.
198*4882a593Smuzhiyun 			 */
199*4882a593Smuzhiyun 			alt_div |= E4210_DIV0_ATB_MASK;
200*4882a593Smuzhiyun 			alt_div_mask |= E4210_DIV0_ATB_MASK;
201*4882a593Smuzhiyun 		}
202*4882a593Smuzhiyun 		exynos_set_safe_div(base, alt_div, alt_div_mask);
203*4882a593Smuzhiyun 		div0 |= alt_div;
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/* select sclk_mpll as the alternate parent */
207*4882a593Smuzhiyun 	mux_reg = readl(base + E4210_SRC_CPU);
208*4882a593Smuzhiyun 	writel(mux_reg | (1 << 16), base + E4210_SRC_CPU);
209*4882a593Smuzhiyun 	wait_until_mux_stable(base + E4210_STAT_CPU, 16, 2);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	/* alternate parent is active now. set the dividers */
212*4882a593Smuzhiyun 	writel(div0, base + E4210_DIV_CPU0);
213*4882a593Smuzhiyun 	wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
216*4882a593Smuzhiyun 		writel(div1, base + E4210_DIV_CPU1);
217*4882a593Smuzhiyun 		wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
218*4882a593Smuzhiyun 				DIV_MASK_ALL);
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	spin_unlock_irqrestore(cpuclk->lock, flags);
222*4882a593Smuzhiyun 	return 0;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /* handler for post-rate change notification from parent clock */
exynos_cpuclk_post_rate_change(struct clk_notifier_data * ndata,struct exynos_cpuclk * cpuclk,void __iomem * base)226*4882a593Smuzhiyun static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
227*4882a593Smuzhiyun 			struct exynos_cpuclk *cpuclk, void __iomem *base)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
230*4882a593Smuzhiyun 	unsigned long div = 0, div_mask = DIV_MASK;
231*4882a593Smuzhiyun 	unsigned long mux_reg;
232*4882a593Smuzhiyun 	unsigned long flags;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	/* find out the divider values to use for clock data */
235*4882a593Smuzhiyun 	if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
236*4882a593Smuzhiyun 		while ((cfg_data->prate * 1000) != ndata->new_rate) {
237*4882a593Smuzhiyun 			if (cfg_data->prate == 0)
238*4882a593Smuzhiyun 				return -EINVAL;
239*4882a593Smuzhiyun 			cfg_data++;
240*4882a593Smuzhiyun 		}
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	spin_lock_irqsave(cpuclk->lock, flags);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	/* select mout_apll as the alternate parent */
246*4882a593Smuzhiyun 	mux_reg = readl(base + E4210_SRC_CPU);
247*4882a593Smuzhiyun 	writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
248*4882a593Smuzhiyun 	wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
251*4882a593Smuzhiyun 		div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
252*4882a593Smuzhiyun 		div_mask |= E4210_DIV0_ATB_MASK;
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	exynos_set_safe_div(base, div, div_mask);
256*4882a593Smuzhiyun 	spin_unlock_irqrestore(cpuclk->lock, flags);
257*4882a593Smuzhiyun 	return 0;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun  * Helper function to set the 'safe' dividers for the CPU clock. The parameters
262*4882a593Smuzhiyun  * div and mask contain the divider value and the register bit mask of the
263*4882a593Smuzhiyun  * dividers to be programmed.
264*4882a593Smuzhiyun  */
exynos5433_set_safe_div(void __iomem * base,unsigned long div,unsigned long mask)265*4882a593Smuzhiyun static void exynos5433_set_safe_div(void __iomem *base, unsigned long div,
266*4882a593Smuzhiyun 					unsigned long mask)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	unsigned long div0;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	div0 = readl(base + E5433_DIV_CPU0);
271*4882a593Smuzhiyun 	div0 = (div0 & ~mask) | (div & mask);
272*4882a593Smuzhiyun 	writel(div0, base + E5433_DIV_CPU0);
273*4882a593Smuzhiyun 	wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, mask);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /* handler for pre-rate change notification from parent clock */
exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data * ndata,struct exynos_cpuclk * cpuclk,void __iomem * base)277*4882a593Smuzhiyun static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
278*4882a593Smuzhiyun 			struct exynos_cpuclk *cpuclk, void __iomem *base)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
281*4882a593Smuzhiyun 	unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
282*4882a593Smuzhiyun 	unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
283*4882a593Smuzhiyun 	unsigned long div0, div1 = 0, mux_reg;
284*4882a593Smuzhiyun 	unsigned long flags;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	/* find out the divider values to use for clock data */
287*4882a593Smuzhiyun 	while ((cfg_data->prate * 1000) != ndata->new_rate) {
288*4882a593Smuzhiyun 		if (cfg_data->prate == 0)
289*4882a593Smuzhiyun 			return -EINVAL;
290*4882a593Smuzhiyun 		cfg_data++;
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	spin_lock_irqsave(cpuclk->lock, flags);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	/*
296*4882a593Smuzhiyun 	 * For the selected PLL clock frequency, get the pre-defined divider
297*4882a593Smuzhiyun 	 * values.
298*4882a593Smuzhiyun 	 */
299*4882a593Smuzhiyun 	div0 = cfg_data->div0;
300*4882a593Smuzhiyun 	div1 = cfg_data->div1;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/*
303*4882a593Smuzhiyun 	 * If the old parent clock speed is less than the clock speed of
304*4882a593Smuzhiyun 	 * the alternate parent, then it should be ensured that at no point
305*4882a593Smuzhiyun 	 * the armclk speed is more than the old_prate until the dividers are
306*4882a593Smuzhiyun 	 * set.  Also workaround the issue of the dividers being set to lower
307*4882a593Smuzhiyun 	 * values before the parent clock speed is set to new lower speed
308*4882a593Smuzhiyun 	 * (this can result in too high speed of armclk output clocks).
309*4882a593Smuzhiyun 	 */
310*4882a593Smuzhiyun 	if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
311*4882a593Smuzhiyun 		unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 		alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
314*4882a593Smuzhiyun 		WARN_ON(alt_div >= MAX_DIV);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 		exynos5433_set_safe_div(base, alt_div, alt_div_mask);
317*4882a593Smuzhiyun 		div0 |= alt_div;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	/* select the alternate parent */
321*4882a593Smuzhiyun 	mux_reg = readl(base + E5433_MUX_SEL2);
322*4882a593Smuzhiyun 	writel(mux_reg | 1, base + E5433_MUX_SEL2);
323*4882a593Smuzhiyun 	wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 2);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/* alternate parent is active now. set the dividers */
326*4882a593Smuzhiyun 	writel(div0, base + E5433_DIV_CPU0);
327*4882a593Smuzhiyun 	wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, DIV_MASK_ALL);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	writel(div1, base + E5433_DIV_CPU1);
330*4882a593Smuzhiyun 	wait_until_divider_stable(base + E5433_DIV_STAT_CPU1, DIV_MASK_ALL);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	spin_unlock_irqrestore(cpuclk->lock, flags);
333*4882a593Smuzhiyun 	return 0;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun /* handler for post-rate change notification from parent clock */
exynos5433_cpuclk_post_rate_change(struct clk_notifier_data * ndata,struct exynos_cpuclk * cpuclk,void __iomem * base)337*4882a593Smuzhiyun static int exynos5433_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
338*4882a593Smuzhiyun 			struct exynos_cpuclk *cpuclk, void __iomem *base)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	unsigned long div = 0, div_mask = DIV_MASK;
341*4882a593Smuzhiyun 	unsigned long mux_reg;
342*4882a593Smuzhiyun 	unsigned long flags;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	spin_lock_irqsave(cpuclk->lock, flags);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	/* select apll as the alternate parent */
347*4882a593Smuzhiyun 	mux_reg = readl(base + E5433_MUX_SEL2);
348*4882a593Smuzhiyun 	writel(mux_reg & ~1, base + E5433_MUX_SEL2);
349*4882a593Smuzhiyun 	wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 1);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	exynos5433_set_safe_div(base, div, div_mask);
352*4882a593Smuzhiyun 	spin_unlock_irqrestore(cpuclk->lock, flags);
353*4882a593Smuzhiyun 	return 0;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun  * This notifier function is called for the pre-rate and post-rate change
358*4882a593Smuzhiyun  * notifications of the parent clock of cpuclk.
359*4882a593Smuzhiyun  */
exynos_cpuclk_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)360*4882a593Smuzhiyun static int exynos_cpuclk_notifier_cb(struct notifier_block *nb,
361*4882a593Smuzhiyun 				unsigned long event, void *data)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	struct clk_notifier_data *ndata = data;
364*4882a593Smuzhiyun 	struct exynos_cpuclk *cpuclk;
365*4882a593Smuzhiyun 	void __iomem *base;
366*4882a593Smuzhiyun 	int err = 0;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
369*4882a593Smuzhiyun 	base = cpuclk->ctrl_base;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	if (event == PRE_RATE_CHANGE)
372*4882a593Smuzhiyun 		err = exynos_cpuclk_pre_rate_change(ndata, cpuclk, base);
373*4882a593Smuzhiyun 	else if (event == POST_RATE_CHANGE)
374*4882a593Smuzhiyun 		err = exynos_cpuclk_post_rate_change(ndata, cpuclk, base);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	return notifier_from_errno(err);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun /*
380*4882a593Smuzhiyun  * This notifier function is called for the pre-rate and post-rate change
381*4882a593Smuzhiyun  * notifications of the parent clock of cpuclk.
382*4882a593Smuzhiyun  */
exynos5433_cpuclk_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)383*4882a593Smuzhiyun static int exynos5433_cpuclk_notifier_cb(struct notifier_block *nb,
384*4882a593Smuzhiyun 				unsigned long event, void *data)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	struct clk_notifier_data *ndata = data;
387*4882a593Smuzhiyun 	struct exynos_cpuclk *cpuclk;
388*4882a593Smuzhiyun 	void __iomem *base;
389*4882a593Smuzhiyun 	int err = 0;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
392*4882a593Smuzhiyun 	base = cpuclk->ctrl_base;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	if (event == PRE_RATE_CHANGE)
395*4882a593Smuzhiyun 		err = exynos5433_cpuclk_pre_rate_change(ndata, cpuclk, base);
396*4882a593Smuzhiyun 	else if (event == POST_RATE_CHANGE)
397*4882a593Smuzhiyun 		err = exynos5433_cpuclk_post_rate_change(ndata, cpuclk, base);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	return notifier_from_errno(err);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun /* helper function to register a CPU clock */
exynos_register_cpu_clock(struct samsung_clk_provider * ctx,unsigned int lookup_id,const char * name,const struct clk_hw * parent,const struct clk_hw * alt_parent,unsigned long offset,const struct exynos_cpuclk_cfg_data * cfg,unsigned long num_cfgs,unsigned long flags)403*4882a593Smuzhiyun int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
404*4882a593Smuzhiyun 		unsigned int lookup_id, const char *name,
405*4882a593Smuzhiyun 		const struct clk_hw *parent, const struct clk_hw *alt_parent,
406*4882a593Smuzhiyun 		unsigned long offset, const struct exynos_cpuclk_cfg_data *cfg,
407*4882a593Smuzhiyun 		unsigned long num_cfgs, unsigned long flags)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	struct exynos_cpuclk *cpuclk;
410*4882a593Smuzhiyun 	struct clk_init_data init;
411*4882a593Smuzhiyun 	const char *parent_name;
412*4882a593Smuzhiyun 	int ret = 0;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	if (IS_ERR(parent) || IS_ERR(alt_parent)) {
415*4882a593Smuzhiyun 		pr_err("%s: invalid parent clock(s)\n", __func__);
416*4882a593Smuzhiyun 		return -EINVAL;
417*4882a593Smuzhiyun 	}
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
420*4882a593Smuzhiyun 	if (!cpuclk)
421*4882a593Smuzhiyun 		return -ENOMEM;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	parent_name = clk_hw_get_name(parent);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	init.name = name;
426*4882a593Smuzhiyun 	init.flags = CLK_SET_RATE_PARENT;
427*4882a593Smuzhiyun 	init.parent_names = &parent_name;
428*4882a593Smuzhiyun 	init.num_parents = 1;
429*4882a593Smuzhiyun 	init.ops = &exynos_cpuclk_clk_ops;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	cpuclk->alt_parent = alt_parent;
432*4882a593Smuzhiyun 	cpuclk->hw.init = &init;
433*4882a593Smuzhiyun 	cpuclk->ctrl_base = ctx->reg_base + offset;
434*4882a593Smuzhiyun 	cpuclk->lock = &ctx->lock;
435*4882a593Smuzhiyun 	cpuclk->flags = flags;
436*4882a593Smuzhiyun 	if (flags & CLK_CPU_HAS_E5433_REGS_LAYOUT)
437*4882a593Smuzhiyun 		cpuclk->clk_nb.notifier_call = exynos5433_cpuclk_notifier_cb;
438*4882a593Smuzhiyun 	else
439*4882a593Smuzhiyun 		cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	ret = clk_notifier_register(parent->clk, &cpuclk->clk_nb);
443*4882a593Smuzhiyun 	if (ret) {
444*4882a593Smuzhiyun 		pr_err("%s: failed to register clock notifier for %s\n",
445*4882a593Smuzhiyun 				__func__, name);
446*4882a593Smuzhiyun 		goto free_cpuclk;
447*4882a593Smuzhiyun 	}
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	cpuclk->cfg = kmemdup(cfg, sizeof(*cfg) * num_cfgs, GFP_KERNEL);
450*4882a593Smuzhiyun 	if (!cpuclk->cfg) {
451*4882a593Smuzhiyun 		ret = -ENOMEM;
452*4882a593Smuzhiyun 		goto unregister_clk_nb;
453*4882a593Smuzhiyun 	}
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	ret = clk_hw_register(NULL, &cpuclk->hw);
456*4882a593Smuzhiyun 	if (ret) {
457*4882a593Smuzhiyun 		pr_err("%s: could not register cpuclk %s\n", __func__,	name);
458*4882a593Smuzhiyun 		goto free_cpuclk_data;
459*4882a593Smuzhiyun 	}
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	samsung_clk_add_lookup(ctx, &cpuclk->hw, lookup_id);
462*4882a593Smuzhiyun 	return 0;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun free_cpuclk_data:
465*4882a593Smuzhiyun 	kfree(cpuclk->cfg);
466*4882a593Smuzhiyun unregister_clk_nb:
467*4882a593Smuzhiyun 	clk_notifier_unregister(parent->clk, &cpuclk->clk_nb);
468*4882a593Smuzhiyun free_cpuclk:
469*4882a593Smuzhiyun 	kfree(cpuclk);
470*4882a593Smuzhiyun 	return ret;
471*4882a593Smuzhiyun }
472