xref: /OK3568_Linux_fs/kernel/drivers/clk/mvebu/clk-cpu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Marvell MVEBU CPU clock handling.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2012 Marvell
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Gregory CLEMENT <gregory.clement@free-electrons.com>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/clk.h>
13*4882a593Smuzhiyun #include <linux/clk-provider.h>
14*4882a593Smuzhiyun #include <linux/of_address.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun #include <linux/of.h>
17*4882a593Smuzhiyun #include <linux/delay.h>
18*4882a593Smuzhiyun #include <linux/mvebu-pmsu.h>
19*4882a593Smuzhiyun #include <asm/smp_plat.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET               0x0
22*4882a593Smuzhiyun #define   SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL          0xff
23*4882a593Smuzhiyun #define   SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT        8
24*4882a593Smuzhiyun #define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET              0x8
25*4882a593Smuzhiyun #define   SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
26*4882a593Smuzhiyun #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET              0xC
27*4882a593Smuzhiyun #define SYS_CTRL_CLK_DIVIDER_MASK                      0x3F
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define PMU_DFS_RATIO_SHIFT 16
30*4882a593Smuzhiyun #define PMU_DFS_RATIO_MASK  0x3F
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define MAX_CPU	    4
33*4882a593Smuzhiyun struct cpu_clk {
34*4882a593Smuzhiyun 	struct clk_hw hw;
35*4882a593Smuzhiyun 	int cpu;
36*4882a593Smuzhiyun 	const char *clk_name;
37*4882a593Smuzhiyun 	const char *parent_name;
38*4882a593Smuzhiyun 	void __iomem *reg_base;
39*4882a593Smuzhiyun 	void __iomem *pmu_dfs;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun static struct clk **clks;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static struct clk_onecell_data clk_data;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
47*4882a593Smuzhiyun 
clk_cpu_recalc_rate(struct clk_hw * hwclk,unsigned long parent_rate)48*4882a593Smuzhiyun static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
49*4882a593Smuzhiyun 					 unsigned long parent_rate)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
52*4882a593Smuzhiyun 	u32 reg, div;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
55*4882a593Smuzhiyun 	div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
56*4882a593Smuzhiyun 	return parent_rate / div;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
clk_cpu_round_rate(struct clk_hw * hwclk,unsigned long rate,unsigned long * parent_rate)59*4882a593Smuzhiyun static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
60*4882a593Smuzhiyun 			       unsigned long *parent_rate)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	/* Valid ratio are 1:1, 1:2 and 1:3 */
63*4882a593Smuzhiyun 	u32 div;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	div = *parent_rate / rate;
66*4882a593Smuzhiyun 	if (div == 0)
67*4882a593Smuzhiyun 		div = 1;
68*4882a593Smuzhiyun 	else if (div > 3)
69*4882a593Smuzhiyun 		div = 3;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	return *parent_rate / div;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
clk_cpu_off_set_rate(struct clk_hw * hwclk,unsigned long rate,unsigned long parent_rate)74*4882a593Smuzhiyun static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
75*4882a593Smuzhiyun 				unsigned long parent_rate)
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
79*4882a593Smuzhiyun 	u32 reg, div;
80*4882a593Smuzhiyun 	u32 reload_mask;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	div = parent_rate / rate;
83*4882a593Smuzhiyun 	reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
84*4882a593Smuzhiyun 		& (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
85*4882a593Smuzhiyun 		| (div << (cpuclk->cpu * 8));
86*4882a593Smuzhiyun 	writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
87*4882a593Smuzhiyun 	/* Set clock divider reload smooth bit mask */
88*4882a593Smuzhiyun 	reload_mask = 1 << (20 + cpuclk->cpu);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
91*4882a593Smuzhiyun 	    | reload_mask;
92*4882a593Smuzhiyun 	writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/* Now trigger the clock update */
95*4882a593Smuzhiyun 	reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
96*4882a593Smuzhiyun 	    | 1 << 24;
97*4882a593Smuzhiyun 	writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	/* Wait for clocks to settle down then clear reload request */
100*4882a593Smuzhiyun 	udelay(1000);
101*4882a593Smuzhiyun 	reg &= ~(reload_mask | 1 << 24);
102*4882a593Smuzhiyun 	writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
103*4882a593Smuzhiyun 	udelay(1000);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	return 0;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
clk_cpu_on_set_rate(struct clk_hw * hwclk,unsigned long rate,unsigned long parent_rate)108*4882a593Smuzhiyun static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
109*4882a593Smuzhiyun 			       unsigned long parent_rate)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	u32 reg;
112*4882a593Smuzhiyun 	unsigned long fabric_div, target_div, cur_rate;
113*4882a593Smuzhiyun 	struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/*
116*4882a593Smuzhiyun 	 * PMU DFS registers are not mapped, Device Tree does not
117*4882a593Smuzhiyun 	 * describes them. We cannot change the frequency dynamically.
118*4882a593Smuzhiyun 	 */
119*4882a593Smuzhiyun 	if (!cpuclk->pmu_dfs)
120*4882a593Smuzhiyun 		return -ENODEV;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	cur_rate = clk_hw_get_rate(hwclk);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
125*4882a593Smuzhiyun 	fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
126*4882a593Smuzhiyun 		SYS_CTRL_CLK_DIVIDER_MASK;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	/* Frequency is going up */
129*4882a593Smuzhiyun 	if (rate == 2 * cur_rate)
130*4882a593Smuzhiyun 		target_div = fabric_div / 2;
131*4882a593Smuzhiyun 	/* Frequency is going down */
132*4882a593Smuzhiyun 	else
133*4882a593Smuzhiyun 		target_div = fabric_div;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (target_div == 0)
136*4882a593Smuzhiyun 		target_div = 1;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	reg = readl(cpuclk->pmu_dfs);
139*4882a593Smuzhiyun 	reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
140*4882a593Smuzhiyun 	reg |= (target_div << PMU_DFS_RATIO_SHIFT);
141*4882a593Smuzhiyun 	writel(reg, cpuclk->pmu_dfs);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
144*4882a593Smuzhiyun 	reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
145*4882a593Smuzhiyun 		SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
146*4882a593Smuzhiyun 	writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return mvebu_pmsu_dfs_request(cpuclk->cpu);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
clk_cpu_set_rate(struct clk_hw * hwclk,unsigned long rate,unsigned long parent_rate)151*4882a593Smuzhiyun static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
152*4882a593Smuzhiyun 			    unsigned long parent_rate)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	if (__clk_is_enabled(hwclk->clk))
155*4882a593Smuzhiyun 		return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
156*4882a593Smuzhiyun 	else
157*4882a593Smuzhiyun 		return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun static const struct clk_ops cpu_ops = {
161*4882a593Smuzhiyun 	.recalc_rate = clk_cpu_recalc_rate,
162*4882a593Smuzhiyun 	.round_rate = clk_cpu_round_rate,
163*4882a593Smuzhiyun 	.set_rate = clk_cpu_set_rate,
164*4882a593Smuzhiyun };
165*4882a593Smuzhiyun 
of_cpu_clk_setup(struct device_node * node)166*4882a593Smuzhiyun static void __init of_cpu_clk_setup(struct device_node *node)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	struct cpu_clk *cpuclk;
169*4882a593Smuzhiyun 	void __iomem *clock_complex_base = of_iomap(node, 0);
170*4882a593Smuzhiyun 	void __iomem *pmu_dfs_base = of_iomap(node, 1);
171*4882a593Smuzhiyun 	int ncpus = 0;
172*4882a593Smuzhiyun 	struct device_node *dn;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (clock_complex_base == NULL) {
175*4882a593Smuzhiyun 		pr_err("%s: clock-complex base register not set\n",
176*4882a593Smuzhiyun 			__func__);
177*4882a593Smuzhiyun 		return;
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	if (pmu_dfs_base == NULL)
181*4882a593Smuzhiyun 		pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
182*4882a593Smuzhiyun 			__func__);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	for_each_of_cpu_node(dn)
185*4882a593Smuzhiyun 		ncpus++;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	cpuclk = kcalloc(ncpus, sizeof(*cpuclk), GFP_KERNEL);
188*4882a593Smuzhiyun 	if (WARN_ON(!cpuclk))
189*4882a593Smuzhiyun 		goto cpuclk_out;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	clks = kcalloc(ncpus, sizeof(*clks), GFP_KERNEL);
192*4882a593Smuzhiyun 	if (WARN_ON(!clks))
193*4882a593Smuzhiyun 		goto clks_out;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	for_each_of_cpu_node(dn) {
196*4882a593Smuzhiyun 		struct clk_init_data init;
197*4882a593Smuzhiyun 		struct clk *clk;
198*4882a593Smuzhiyun 		char *clk_name = kzalloc(5, GFP_KERNEL);
199*4882a593Smuzhiyun 		int cpu, err;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 		if (WARN_ON(!clk_name))
202*4882a593Smuzhiyun 			goto bail_out;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		err = of_property_read_u32(dn, "reg", &cpu);
205*4882a593Smuzhiyun 		if (WARN_ON(err))
206*4882a593Smuzhiyun 			goto bail_out;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 		sprintf(clk_name, "cpu%d", cpu);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
211*4882a593Smuzhiyun 		cpuclk[cpu].clk_name = clk_name;
212*4882a593Smuzhiyun 		cpuclk[cpu].cpu = cpu;
213*4882a593Smuzhiyun 		cpuclk[cpu].reg_base = clock_complex_base;
214*4882a593Smuzhiyun 		if (pmu_dfs_base)
215*4882a593Smuzhiyun 			cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
216*4882a593Smuzhiyun 		cpuclk[cpu].hw.init = &init;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 		init.name = cpuclk[cpu].clk_name;
219*4882a593Smuzhiyun 		init.ops = &cpu_ops;
220*4882a593Smuzhiyun 		init.flags = 0;
221*4882a593Smuzhiyun 		init.parent_names = &cpuclk[cpu].parent_name;
222*4882a593Smuzhiyun 		init.num_parents = 1;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 		clk = clk_register(NULL, &cpuclk[cpu].hw);
225*4882a593Smuzhiyun 		if (WARN_ON(IS_ERR(clk)))
226*4882a593Smuzhiyun 			goto bail_out;
227*4882a593Smuzhiyun 		clks[cpu] = clk;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 	clk_data.clk_num = MAX_CPU;
230*4882a593Smuzhiyun 	clk_data.clks = clks;
231*4882a593Smuzhiyun 	of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return;
234*4882a593Smuzhiyun bail_out:
235*4882a593Smuzhiyun 	kfree(clks);
236*4882a593Smuzhiyun 	while(ncpus--)
237*4882a593Smuzhiyun 		kfree(cpuclk[ncpus].clk_name);
238*4882a593Smuzhiyun clks_out:
239*4882a593Smuzhiyun 	kfree(cpuclk);
240*4882a593Smuzhiyun cpuclk_out:
241*4882a593Smuzhiyun 	iounmap(clock_complex_base);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
245*4882a593Smuzhiyun 					 of_cpu_clk_setup);
246*4882a593Smuzhiyun 
of_mv98dx3236_cpu_clk_setup(struct device_node * node)247*4882a593Smuzhiyun static void __init of_mv98dx3236_cpu_clk_setup(struct device_node *node)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	of_clk_add_provider(node, of_clk_src_simple_get, NULL);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun CLK_OF_DECLARE(mv98dx3236_cpu_clock, "marvell,mv98dx3236-cpu-clock",
253*4882a593Smuzhiyun 					 of_mv98dx3236_cpu_clk_setup);
254