xref: /OK3568_Linux_fs/kernel/drivers/clk/mmp/clk-pll.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * MMP PLL clock rate calculation
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/clk-provider.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/io.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "clk.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define to_clk_mmp_pll(hw)	container_of(hw, struct mmp_clk_pll, hw)
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun struct mmp_clk_pll {
17*4882a593Smuzhiyun 	struct clk_hw hw;
18*4882a593Smuzhiyun 	unsigned long default_rate;
19*4882a593Smuzhiyun 	void __iomem *enable_reg;
20*4882a593Smuzhiyun 	u32 enable;
21*4882a593Smuzhiyun 	void __iomem *reg;
22*4882a593Smuzhiyun 	u8 shift;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	unsigned long input_rate;
25*4882a593Smuzhiyun 	void __iomem *postdiv_reg;
26*4882a593Smuzhiyun 	u8 postdiv_shift;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
mmp_clk_pll_is_enabled(struct clk_hw * hw)29*4882a593Smuzhiyun static int mmp_clk_pll_is_enabled(struct clk_hw *hw)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	struct mmp_clk_pll *pll = to_clk_mmp_pll(hw);
32*4882a593Smuzhiyun 	u32 val;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	val = readl_relaxed(pll->enable_reg);
35*4882a593Smuzhiyun 	if ((val & pll->enable) == pll->enable)
36*4882a593Smuzhiyun 		return 1;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	/* Some PLLs, if not software controlled, output default clock. */
39*4882a593Smuzhiyun 	if (pll->default_rate > 0)
40*4882a593Smuzhiyun 		return 1;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	return 0;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
mmp_clk_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)45*4882a593Smuzhiyun static unsigned long mmp_clk_pll_recalc_rate(struct clk_hw *hw,
46*4882a593Smuzhiyun 					unsigned long parent_rate)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	struct mmp_clk_pll *pll = to_clk_mmp_pll(hw);
49*4882a593Smuzhiyun 	u32 fbdiv, refdiv, postdiv;
50*4882a593Smuzhiyun 	u64 rate;
51*4882a593Smuzhiyun 	u32 val;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	val = readl_relaxed(pll->enable_reg);
54*4882a593Smuzhiyun 	if ((val & pll->enable) != pll->enable)
55*4882a593Smuzhiyun 		return pll->default_rate;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	if (pll->reg) {
58*4882a593Smuzhiyun 		val = readl_relaxed(pll->reg);
59*4882a593Smuzhiyun 		fbdiv = (val >> pll->shift) & 0x1ff;
60*4882a593Smuzhiyun 		refdiv = (val >> (pll->shift + 9)) & 0x1f;
61*4882a593Smuzhiyun 	} else {
62*4882a593Smuzhiyun 		fbdiv = 2;
63*4882a593Smuzhiyun 		refdiv = 1;
64*4882a593Smuzhiyun 	}
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	if (pll->postdiv_reg) {
67*4882a593Smuzhiyun 		/* MMP3 clock rate calculation */
68*4882a593Smuzhiyun 		static const u8 postdivs[] = {2, 3, 4, 5, 6, 8, 10, 12, 16};
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 		val = readl_relaxed(pll->postdiv_reg);
71*4882a593Smuzhiyun 		postdiv = (val >> pll->postdiv_shift) & 0x7;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 		rate = pll->input_rate;
74*4882a593Smuzhiyun 		rate *= 2 * fbdiv;
75*4882a593Smuzhiyun 		do_div(rate, refdiv);
76*4882a593Smuzhiyun 		do_div(rate, postdivs[postdiv]);
77*4882a593Smuzhiyun 	} else {
78*4882a593Smuzhiyun 		/* MMP2 clock rate calculation */
79*4882a593Smuzhiyun 		if (refdiv == 3) {
80*4882a593Smuzhiyun 			rate = 19200000;
81*4882a593Smuzhiyun 		} else if (refdiv == 4) {
82*4882a593Smuzhiyun 			rate = 26000000;
83*4882a593Smuzhiyun 		} else {
84*4882a593Smuzhiyun 			pr_err("bad refdiv: %d (0x%08x)\n", refdiv, val);
85*4882a593Smuzhiyun 			return 0;
86*4882a593Smuzhiyun 		}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 		rate *= fbdiv + 2;
89*4882a593Smuzhiyun 		do_div(rate, refdiv + 2);
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	return (unsigned long)rate;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun static const struct clk_ops mmp_clk_pll_ops = {
96*4882a593Smuzhiyun 	.is_enabled = mmp_clk_pll_is_enabled,
97*4882a593Smuzhiyun 	.recalc_rate = mmp_clk_pll_recalc_rate,
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun 
mmp_clk_register_pll(char * name,unsigned long default_rate,void __iomem * enable_reg,u32 enable,void __iomem * reg,u8 shift,unsigned long input_rate,void __iomem * postdiv_reg,u8 postdiv_shift)100*4882a593Smuzhiyun static struct clk *mmp_clk_register_pll(char *name,
101*4882a593Smuzhiyun 			unsigned long default_rate,
102*4882a593Smuzhiyun 			void __iomem *enable_reg, u32 enable,
103*4882a593Smuzhiyun 			void __iomem *reg, u8 shift,
104*4882a593Smuzhiyun 			unsigned long input_rate,
105*4882a593Smuzhiyun 			void __iomem *postdiv_reg, u8 postdiv_shift)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct mmp_clk_pll *pll;
108*4882a593Smuzhiyun 	struct clk *clk;
109*4882a593Smuzhiyun 	struct clk_init_data init;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
112*4882a593Smuzhiyun 	if (!pll)
113*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	init.name = name;
116*4882a593Smuzhiyun 	init.ops = &mmp_clk_pll_ops;
117*4882a593Smuzhiyun 	init.flags = 0;
118*4882a593Smuzhiyun 	init.parent_names = NULL;
119*4882a593Smuzhiyun 	init.num_parents = 0;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	pll->default_rate = default_rate;
122*4882a593Smuzhiyun 	pll->enable_reg = enable_reg;
123*4882a593Smuzhiyun 	pll->enable = enable;
124*4882a593Smuzhiyun 	pll->reg = reg;
125*4882a593Smuzhiyun 	pll->shift = shift;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	pll->input_rate = input_rate;
128*4882a593Smuzhiyun 	pll->postdiv_reg = postdiv_reg;
129*4882a593Smuzhiyun 	pll->postdiv_shift = postdiv_shift;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	pll->hw.init = &init;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	clk = clk_register(NULL, &pll->hw);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (IS_ERR(clk))
136*4882a593Smuzhiyun 		kfree(pll);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	return clk;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
mmp_register_pll_clks(struct mmp_clk_unit * unit,struct mmp_param_pll_clk * clks,void __iomem * base,int size)141*4882a593Smuzhiyun void mmp_register_pll_clks(struct mmp_clk_unit *unit,
142*4882a593Smuzhiyun 			struct mmp_param_pll_clk *clks,
143*4882a593Smuzhiyun 			void __iomem *base, int size)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct clk *clk;
146*4882a593Smuzhiyun 	int i;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	for (i = 0; i < size; i++) {
149*4882a593Smuzhiyun 		void __iomem *reg = NULL;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		if (clks[i].offset)
152*4882a593Smuzhiyun 			reg = base + clks[i].offset;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 		clk = mmp_clk_register_pll(clks[i].name,
155*4882a593Smuzhiyun 					clks[i].default_rate,
156*4882a593Smuzhiyun 					base + clks[i].enable_offset,
157*4882a593Smuzhiyun 					clks[i].enable,
158*4882a593Smuzhiyun 					reg, clks[i].shift,
159*4882a593Smuzhiyun 					clks[i].input_rate,
160*4882a593Smuzhiyun 					base + clks[i].postdiv_offset,
161*4882a593Smuzhiyun 					clks[i].postdiv_shift);
162*4882a593Smuzhiyun 		if (IS_ERR(clk)) {
163*4882a593Smuzhiyun 			pr_err("%s: failed to register clock %s\n",
164*4882a593Smuzhiyun 			       __func__, clks[i].name);
165*4882a593Smuzhiyun 			continue;
166*4882a593Smuzhiyun 		}
167*4882a593Smuzhiyun 		if (clks[i].id)
168*4882a593Smuzhiyun 			unit->clk_table[clks[i].id] = clk;
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun }
171