xref: /OK3568_Linux_fs/kernel/drivers/clk/mvebu/common.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Marvell EBU SoC common clock handling
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2012 Marvell
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Gregory CLEMENT <gregory.clement@free-electrons.com>
8*4882a593Smuzhiyun  * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
9*4882a593Smuzhiyun  * Andrew Lunn <andrew@lunn.ch>
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/clk.h>
16*4882a593Smuzhiyun #include <linux/clk-provider.h>
17*4882a593Smuzhiyun #include <linux/io.h>
18*4882a593Smuzhiyun #include <linux/of.h>
19*4882a593Smuzhiyun #include <linux/of_address.h>
20*4882a593Smuzhiyun #include <linux/syscore_ops.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "common.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun  * Core Clocks
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define SSCG_CONF_MODE(reg)	(((reg) >> 16) & 0x3)
29*4882a593Smuzhiyun #define SSCG_SPREAD_DOWN	0x0
30*4882a593Smuzhiyun #define SSCG_SPREAD_UP		0x1
31*4882a593Smuzhiyun #define SSCG_SPREAD_CENTRAL	0x2
32*4882a593Smuzhiyun #define SSCG_CONF_LOW(reg)	(((reg) >> 8) & 0xFF)
33*4882a593Smuzhiyun #define SSCG_CONF_HIGH(reg)	((reg) & 0xFF)
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun static struct clk_onecell_data clk_data;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun  * This function can be used by the Kirkwood, the Armada 370, the
39*4882a593Smuzhiyun  * Armada XP and the Armada 375 SoC. The name of the function was
40*4882a593Smuzhiyun  * chosen following the dt convention: using the first known SoC
41*4882a593Smuzhiyun  * compatible with it.
42*4882a593Smuzhiyun  */
kirkwood_fix_sscg_deviation(u32 system_clk)43*4882a593Smuzhiyun u32 kirkwood_fix_sscg_deviation(u32 system_clk)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	struct device_node *sscg_np = NULL;
46*4882a593Smuzhiyun 	void __iomem *sscg_map;
47*4882a593Smuzhiyun 	u32 sscg_reg;
48*4882a593Smuzhiyun 	s32 low_bound, high_bound;
49*4882a593Smuzhiyun 	u64 freq_swing_half;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	sscg_np = of_find_node_by_name(NULL, "sscg");
52*4882a593Smuzhiyun 	if (sscg_np == NULL) {
53*4882a593Smuzhiyun 		pr_err("cannot get SSCG register node\n");
54*4882a593Smuzhiyun 		return system_clk;
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	sscg_map = of_iomap(sscg_np, 0);
58*4882a593Smuzhiyun 	if (sscg_map == NULL) {
59*4882a593Smuzhiyun 		pr_err("cannot map SSCG register\n");
60*4882a593Smuzhiyun 		goto out;
61*4882a593Smuzhiyun 	}
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	sscg_reg = readl(sscg_map);
64*4882a593Smuzhiyun 	high_bound = SSCG_CONF_HIGH(sscg_reg);
65*4882a593Smuzhiyun 	low_bound = SSCG_CONF_LOW(sscg_reg);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	if ((high_bound - low_bound) <= 0)
68*4882a593Smuzhiyun 		goto out;
69*4882a593Smuzhiyun 	/*
70*4882a593Smuzhiyun 	 * From Marvell engineer we got the following formula (when
71*4882a593Smuzhiyun 	 * this code was written, the datasheet was erroneous)
72*4882a593Smuzhiyun 	 * Spread percentage = 1/96 * (H - L) / H
73*4882a593Smuzhiyun 	 * H = SSCG_High_Boundary
74*4882a593Smuzhiyun 	 * L = SSCG_Low_Boundary
75*4882a593Smuzhiyun 	 *
76*4882a593Smuzhiyun 	 * As the deviation is half of spread then it lead to the
77*4882a593Smuzhiyun 	 * following formula in the code.
78*4882a593Smuzhiyun 	 *
79*4882a593Smuzhiyun 	 * To avoid an overflow and not lose any significant digit in
80*4882a593Smuzhiyun 	 * the same time we have to use a 64 bit integer.
81*4882a593Smuzhiyun 	 */
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	freq_swing_half = (((u64)high_bound - (u64)low_bound)
84*4882a593Smuzhiyun 			* (u64)system_clk);
85*4882a593Smuzhiyun 	do_div(freq_swing_half, (2 * 96 * high_bound));
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	switch (SSCG_CONF_MODE(sscg_reg)) {
88*4882a593Smuzhiyun 	case SSCG_SPREAD_DOWN:
89*4882a593Smuzhiyun 		system_clk -= freq_swing_half;
90*4882a593Smuzhiyun 		break;
91*4882a593Smuzhiyun 	case SSCG_SPREAD_UP:
92*4882a593Smuzhiyun 		system_clk += freq_swing_half;
93*4882a593Smuzhiyun 		break;
94*4882a593Smuzhiyun 	case SSCG_SPREAD_CENTRAL:
95*4882a593Smuzhiyun 	default:
96*4882a593Smuzhiyun 		break;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	iounmap(sscg_map);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun out:
102*4882a593Smuzhiyun 	of_node_put(sscg_np);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	return system_clk;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
mvebu_coreclk_setup(struct device_node * np,const struct coreclk_soc_desc * desc)107*4882a593Smuzhiyun void __init mvebu_coreclk_setup(struct device_node *np,
108*4882a593Smuzhiyun 				const struct coreclk_soc_desc *desc)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	const char *tclk_name = "tclk";
111*4882a593Smuzhiyun 	const char *cpuclk_name = "cpuclk";
112*4882a593Smuzhiyun 	void __iomem *base;
113*4882a593Smuzhiyun 	unsigned long rate;
114*4882a593Smuzhiyun 	int n;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	base = of_iomap(np, 0);
117*4882a593Smuzhiyun 	if (WARN_ON(!base))
118*4882a593Smuzhiyun 		return;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	/* Allocate struct for TCLK, cpu clk, and core ratio clocks */
121*4882a593Smuzhiyun 	clk_data.clk_num = 2 + desc->num_ratios;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	/* One more clock for the optional refclk */
124*4882a593Smuzhiyun 	if (desc->get_refclk_freq)
125*4882a593Smuzhiyun 		clk_data.clk_num += 1;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	clk_data.clks = kcalloc(clk_data.clk_num, sizeof(*clk_data.clks),
128*4882a593Smuzhiyun 				GFP_KERNEL);
129*4882a593Smuzhiyun 	if (WARN_ON(!clk_data.clks)) {
130*4882a593Smuzhiyun 		iounmap(base);
131*4882a593Smuzhiyun 		return;
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	/* Register TCLK */
135*4882a593Smuzhiyun 	of_property_read_string_index(np, "clock-output-names", 0,
136*4882a593Smuzhiyun 				      &tclk_name);
137*4882a593Smuzhiyun 	rate = desc->get_tclk_freq(base);
138*4882a593Smuzhiyun 	clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL, 0,
139*4882a593Smuzhiyun 						   rate);
140*4882a593Smuzhiyun 	WARN_ON(IS_ERR(clk_data.clks[0]));
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	/* Register CPU clock */
143*4882a593Smuzhiyun 	of_property_read_string_index(np, "clock-output-names", 1,
144*4882a593Smuzhiyun 				      &cpuclk_name);
145*4882a593Smuzhiyun 	rate = desc->get_cpu_freq(base);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (desc->is_sscg_enabled && desc->fix_sscg_deviation
148*4882a593Smuzhiyun 		&& desc->is_sscg_enabled(base))
149*4882a593Smuzhiyun 		rate = desc->fix_sscg_deviation(rate);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL, 0,
152*4882a593Smuzhiyun 						   rate);
153*4882a593Smuzhiyun 	WARN_ON(IS_ERR(clk_data.clks[1]));
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/* Register fixed-factor clocks derived from CPU clock */
156*4882a593Smuzhiyun 	for (n = 0; n < desc->num_ratios; n++) {
157*4882a593Smuzhiyun 		const char *rclk_name = desc->ratios[n].name;
158*4882a593Smuzhiyun 		int mult, div;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 		of_property_read_string_index(np, "clock-output-names",
161*4882a593Smuzhiyun 					      2+n, &rclk_name);
162*4882a593Smuzhiyun 		desc->get_clk_ratio(base, desc->ratios[n].id, &mult, &div);
163*4882a593Smuzhiyun 		clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name,
164*4882a593Smuzhiyun 				       cpuclk_name, 0, mult, div);
165*4882a593Smuzhiyun 		WARN_ON(IS_ERR(clk_data.clks[2+n]));
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/* Register optional refclk */
169*4882a593Smuzhiyun 	if (desc->get_refclk_freq) {
170*4882a593Smuzhiyun 		const char *name = "refclk";
171*4882a593Smuzhiyun 		of_property_read_string_index(np, "clock-output-names",
172*4882a593Smuzhiyun 					      2 + desc->num_ratios, &name);
173*4882a593Smuzhiyun 		rate = desc->get_refclk_freq(base);
174*4882a593Smuzhiyun 		clk_data.clks[2 + desc->num_ratios] =
175*4882a593Smuzhiyun 			clk_register_fixed_rate(NULL, name, NULL, 0, rate);
176*4882a593Smuzhiyun 		WARN_ON(IS_ERR(clk_data.clks[2 + desc->num_ratios]));
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/* SAR register isn't needed anymore */
180*4882a593Smuzhiyun 	iounmap(base);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun /*
186*4882a593Smuzhiyun  * Clock Gating Control
187*4882a593Smuzhiyun  */
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun DEFINE_SPINLOCK(ctrl_gating_lock);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun struct clk_gating_ctrl {
192*4882a593Smuzhiyun 	spinlock_t *lock;
193*4882a593Smuzhiyun 	struct clk **gates;
194*4882a593Smuzhiyun 	int num_gates;
195*4882a593Smuzhiyun 	void __iomem *base;
196*4882a593Smuzhiyun 	u32 saved_reg;
197*4882a593Smuzhiyun };
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun static struct clk_gating_ctrl *ctrl;
200*4882a593Smuzhiyun 
clk_gating_get_src(struct of_phandle_args * clkspec,void * data)201*4882a593Smuzhiyun static struct clk *clk_gating_get_src(
202*4882a593Smuzhiyun 	struct of_phandle_args *clkspec, void *data)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	int n;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	if (clkspec->args_count < 1)
207*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	for (n = 0; n < ctrl->num_gates; n++) {
210*4882a593Smuzhiyun 		struct clk_gate *gate =
211*4882a593Smuzhiyun 			to_clk_gate(__clk_get_hw(ctrl->gates[n]));
212*4882a593Smuzhiyun 		if (clkspec->args[0] == gate->bit_idx)
213*4882a593Smuzhiyun 			return ctrl->gates[n];
214*4882a593Smuzhiyun 	}
215*4882a593Smuzhiyun 	return ERR_PTR(-ENODEV);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
mvebu_clk_gating_suspend(void)218*4882a593Smuzhiyun static int mvebu_clk_gating_suspend(void)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	ctrl->saved_reg = readl(ctrl->base);
221*4882a593Smuzhiyun 	return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
mvebu_clk_gating_resume(void)224*4882a593Smuzhiyun static void mvebu_clk_gating_resume(void)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	writel(ctrl->saved_reg, ctrl->base);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun static struct syscore_ops clk_gate_syscore_ops = {
230*4882a593Smuzhiyun 	.suspend = mvebu_clk_gating_suspend,
231*4882a593Smuzhiyun 	.resume = mvebu_clk_gating_resume,
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun 
mvebu_clk_gating_setup(struct device_node * np,const struct clk_gating_soc_desc * desc)234*4882a593Smuzhiyun void __init mvebu_clk_gating_setup(struct device_node *np,
235*4882a593Smuzhiyun 				   const struct clk_gating_soc_desc *desc)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	struct clk *clk;
238*4882a593Smuzhiyun 	void __iomem *base;
239*4882a593Smuzhiyun 	const char *default_parent = NULL;
240*4882a593Smuzhiyun 	int n;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	if (ctrl) {
243*4882a593Smuzhiyun 		pr_err("mvebu-clk-gating: cannot instantiate more than one gateable clock device\n");
244*4882a593Smuzhiyun 		return;
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	base = of_iomap(np, 0);
248*4882a593Smuzhiyun 	if (WARN_ON(!base))
249*4882a593Smuzhiyun 		return;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	clk = of_clk_get(np, 0);
252*4882a593Smuzhiyun 	if (!IS_ERR(clk)) {
253*4882a593Smuzhiyun 		default_parent = __clk_get_name(clk);
254*4882a593Smuzhiyun 		clk_put(clk);
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
258*4882a593Smuzhiyun 	if (WARN_ON(!ctrl))
259*4882a593Smuzhiyun 		goto ctrl_out;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	/* lock must already be initialized */
262*4882a593Smuzhiyun 	ctrl->lock = &ctrl_gating_lock;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	ctrl->base = base;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	/* Count, allocate, and register clock gates */
267*4882a593Smuzhiyun 	for (n = 0; desc[n].name;)
268*4882a593Smuzhiyun 		n++;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	ctrl->num_gates = n;
271*4882a593Smuzhiyun 	ctrl->gates = kcalloc(ctrl->num_gates, sizeof(*ctrl->gates),
272*4882a593Smuzhiyun 			      GFP_KERNEL);
273*4882a593Smuzhiyun 	if (WARN_ON(!ctrl->gates))
274*4882a593Smuzhiyun 		goto gates_out;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	for (n = 0; n < ctrl->num_gates; n++) {
277*4882a593Smuzhiyun 		const char *parent =
278*4882a593Smuzhiyun 			(desc[n].parent) ? desc[n].parent : default_parent;
279*4882a593Smuzhiyun 		ctrl->gates[n] = clk_register_gate(NULL, desc[n].name, parent,
280*4882a593Smuzhiyun 					desc[n].flags, base, desc[n].bit_idx,
281*4882a593Smuzhiyun 					0, ctrl->lock);
282*4882a593Smuzhiyun 		WARN_ON(IS_ERR(ctrl->gates[n]));
283*4882a593Smuzhiyun 	}
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	of_clk_add_provider(np, clk_gating_get_src, ctrl);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	register_syscore_ops(&clk_gate_syscore_ops);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	return;
290*4882a593Smuzhiyun gates_out:
291*4882a593Smuzhiyun 	kfree(ctrl);
292*4882a593Smuzhiyun ctrl_out:
293*4882a593Smuzhiyun 	iounmap(base);
294*4882a593Smuzhiyun }
295