1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Marvell PXA family clocks
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2014 Robert Jarzmik
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Common clock code for PXA clocks ("CKEN" type clocks + DT)
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/clk-provider.h>
11*4882a593Smuzhiyun #include <linux/clkdev.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/of.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <dt-bindings/clock/pxa-clock.h>
16*4882a593Smuzhiyun #include "clk-pxa.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define KHz 1000
19*4882a593Smuzhiyun #define MHz (1000 * 1000)
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define MDREFR_K0DB4 (1 << 29) /* SDCLK0 Divide by 4 Control/Status */
22*4882a593Smuzhiyun #define MDREFR_K2FREE (1 << 25) /* SDRAM Free-Running Control */
23*4882a593Smuzhiyun #define MDREFR_K1FREE (1 << 24) /* SDRAM Free-Running Control */
24*4882a593Smuzhiyun #define MDREFR_K0FREE (1 << 23) /* SDRAM Free-Running Control */
25*4882a593Smuzhiyun #define MDREFR_SLFRSH (1 << 22) /* SDRAM Self-Refresh Control/Status */
26*4882a593Smuzhiyun #define MDREFR_APD (1 << 20) /* SDRAM/SSRAM Auto-Power-Down Enable */
27*4882a593Smuzhiyun #define MDREFR_K2DB2 (1 << 19) /* SDCLK2 Divide by 2 Control/Status */
28*4882a593Smuzhiyun #define MDREFR_K2RUN (1 << 18) /* SDCLK2 Run Control/Status */
29*4882a593Smuzhiyun #define MDREFR_K1DB2 (1 << 17) /* SDCLK1 Divide by 2 Control/Status */
30*4882a593Smuzhiyun #define MDREFR_K1RUN (1 << 16) /* SDCLK1 Run Control/Status */
31*4882a593Smuzhiyun #define MDREFR_E1PIN (1 << 15) /* SDCKE1 Level Control/Status */
32*4882a593Smuzhiyun #define MDREFR_K0DB2 (1 << 14) /* SDCLK0 Divide by 2 Control/Status */
33*4882a593Smuzhiyun #define MDREFR_K0RUN (1 << 13) /* SDCLK0 Run Control/Status */
34*4882a593Smuzhiyun #define MDREFR_E0PIN (1 << 12) /* SDCKE0 Level Control/Status */
35*4882a593Smuzhiyun #define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
36*4882a593Smuzhiyun #define MDREFR_DRI_MASK 0xFFF
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static DEFINE_SPINLOCK(pxa_clk_lock);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun static struct clk *pxa_clocks[CLK_MAX];
41*4882a593Smuzhiyun static struct clk_onecell_data onecell_data = {
42*4882a593Smuzhiyun .clks = pxa_clocks,
43*4882a593Smuzhiyun .clk_num = CLK_MAX,
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun struct pxa_clk {
47*4882a593Smuzhiyun struct clk_hw hw;
48*4882a593Smuzhiyun struct clk_fixed_factor lp;
49*4882a593Smuzhiyun struct clk_fixed_factor hp;
50*4882a593Smuzhiyun struct clk_gate gate;
51*4882a593Smuzhiyun bool (*is_in_low_power)(void);
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk, hw)
55*4882a593Smuzhiyun
cken_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)56*4882a593Smuzhiyun static unsigned long cken_recalc_rate(struct clk_hw *hw,
57*4882a593Smuzhiyun unsigned long parent_rate)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun struct pxa_clk *pclk = to_pxa_clk(hw);
60*4882a593Smuzhiyun struct clk_fixed_factor *fix;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun if (!pclk->is_in_low_power || pclk->is_in_low_power())
63*4882a593Smuzhiyun fix = &pclk->lp;
64*4882a593Smuzhiyun else
65*4882a593Smuzhiyun fix = &pclk->hp;
66*4882a593Smuzhiyun __clk_hw_set_clk(&fix->hw, hw);
67*4882a593Smuzhiyun return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun static const struct clk_ops cken_rate_ops = {
71*4882a593Smuzhiyun .recalc_rate = cken_recalc_rate,
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun
cken_get_parent(struct clk_hw * hw)74*4882a593Smuzhiyun static u8 cken_get_parent(struct clk_hw *hw)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun struct pxa_clk *pclk = to_pxa_clk(hw);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if (!pclk->is_in_low_power)
79*4882a593Smuzhiyun return 0;
80*4882a593Smuzhiyun return pclk->is_in_low_power() ? 0 : 1;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun static const struct clk_ops cken_mux_ops = {
84*4882a593Smuzhiyun .get_parent = cken_get_parent,
85*4882a593Smuzhiyun .set_parent = dummy_clk_set_parent,
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun
clkdev_pxa_register(int ckid,const char * con_id,const char * dev_id,struct clk * clk)88*4882a593Smuzhiyun void __init clkdev_pxa_register(int ckid, const char *con_id,
89*4882a593Smuzhiyun const char *dev_id, struct clk *clk)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun if (!IS_ERR(clk) && (ckid != CLK_NONE))
92*4882a593Smuzhiyun pxa_clocks[ckid] = clk;
93*4882a593Smuzhiyun if (!IS_ERR(clk))
94*4882a593Smuzhiyun clk_register_clkdev(clk, con_id, dev_id);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
clk_pxa_cken_init(const struct desc_clk_cken * clks,int nb_clks)97*4882a593Smuzhiyun int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun int i;
100*4882a593Smuzhiyun struct pxa_clk *pxa_clk;
101*4882a593Smuzhiyun struct clk *clk;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun for (i = 0; i < nb_clks; i++) {
104*4882a593Smuzhiyun pxa_clk = kzalloc(sizeof(*pxa_clk), GFP_KERNEL);
105*4882a593Smuzhiyun pxa_clk->is_in_low_power = clks[i].is_in_low_power;
106*4882a593Smuzhiyun pxa_clk->lp = clks[i].lp;
107*4882a593Smuzhiyun pxa_clk->hp = clks[i].hp;
108*4882a593Smuzhiyun pxa_clk->gate = clks[i].gate;
109*4882a593Smuzhiyun pxa_clk->gate.lock = &pxa_clk_lock;
110*4882a593Smuzhiyun clk = clk_register_composite(NULL, clks[i].name,
111*4882a593Smuzhiyun clks[i].parent_names, 2,
112*4882a593Smuzhiyun &pxa_clk->hw, &cken_mux_ops,
113*4882a593Smuzhiyun &pxa_clk->hw, &cken_rate_ops,
114*4882a593Smuzhiyun &pxa_clk->gate.hw, &clk_gate_ops,
115*4882a593Smuzhiyun clks[i].flags);
116*4882a593Smuzhiyun clkdev_pxa_register(clks[i].ckid, clks[i].con_id,
117*4882a593Smuzhiyun clks[i].dev_id, clk);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun return 0;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
clk_pxa_dt_common_init(struct device_node * np)122*4882a593Smuzhiyun void __init clk_pxa_dt_common_init(struct device_node *np)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
pxa2xx_core_turbo_switch(bool on)127*4882a593Smuzhiyun void pxa2xx_core_turbo_switch(bool on)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun unsigned long flags;
130*4882a593Smuzhiyun unsigned int unused, clkcfg;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun local_irq_save(flags);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun asm("mrc p14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
135*4882a593Smuzhiyun clkcfg &= ~CLKCFG_TURBO & ~CLKCFG_HALFTURBO;
136*4882a593Smuzhiyun if (on)
137*4882a593Smuzhiyun clkcfg |= CLKCFG_TURBO;
138*4882a593Smuzhiyun clkcfg |= CLKCFG_FCS;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun asm volatile(
141*4882a593Smuzhiyun " b 2f\n"
142*4882a593Smuzhiyun " .align 5\n"
143*4882a593Smuzhiyun "1: mcr p14, 0, %1, c6, c0, 0\n"
144*4882a593Smuzhiyun " b 3f\n"
145*4882a593Smuzhiyun "2: b 1b\n"
146*4882a593Smuzhiyun "3: nop\n"
147*4882a593Smuzhiyun : "=&r" (unused) : "r" (clkcfg));
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun local_irq_restore(flags);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
pxa2xx_cpll_change(struct pxa2xx_freq * freq,u32 (* mdrefr_dri)(unsigned int),void __iomem * mdrefr,void __iomem * cccr)152*4882a593Smuzhiyun void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
153*4882a593Smuzhiyun u32 (*mdrefr_dri)(unsigned int), void __iomem *mdrefr,
154*4882a593Smuzhiyun void __iomem *cccr)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun unsigned int clkcfg = freq->clkcfg;
157*4882a593Smuzhiyun unsigned int unused, preset_mdrefr, postset_mdrefr;
158*4882a593Smuzhiyun unsigned long flags;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun local_irq_save(flags);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* Calculate the next MDREFR. If we're slowing down the SDRAM clock
163*4882a593Smuzhiyun * we need to preset the smaller DRI before the change. If we're
164*4882a593Smuzhiyun * speeding up we need to set the larger DRI value after the change.
165*4882a593Smuzhiyun */
166*4882a593Smuzhiyun preset_mdrefr = postset_mdrefr = readl(mdrefr);
167*4882a593Smuzhiyun if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(freq->membus_khz)) {
168*4882a593Smuzhiyun preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
169*4882a593Smuzhiyun preset_mdrefr |= mdrefr_dri(freq->membus_khz);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun postset_mdrefr =
172*4882a593Smuzhiyun (postset_mdrefr & ~MDREFR_DRI_MASK) |
173*4882a593Smuzhiyun mdrefr_dri(freq->membus_khz);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* If we're dividing the memory clock by two for the SDRAM clock, this
176*4882a593Smuzhiyun * must be set prior to the change. Clearing the divide must be done
177*4882a593Smuzhiyun * after the change.
178*4882a593Smuzhiyun */
179*4882a593Smuzhiyun if (freq->div2) {
180*4882a593Smuzhiyun preset_mdrefr |= MDREFR_DB2_MASK;
181*4882a593Smuzhiyun postset_mdrefr |= MDREFR_DB2_MASK;
182*4882a593Smuzhiyun } else {
183*4882a593Smuzhiyun postset_mdrefr &= ~MDREFR_DB2_MASK;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* Set new the CCCR and prepare CLKCFG */
187*4882a593Smuzhiyun writel(freq->cccr, cccr);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun asm volatile(
190*4882a593Smuzhiyun " ldr r4, [%1]\n"
191*4882a593Smuzhiyun " b 2f\n"
192*4882a593Smuzhiyun " .align 5\n"
193*4882a593Smuzhiyun "1: str %3, [%1] /* preset the MDREFR */\n"
194*4882a593Smuzhiyun " mcr p14, 0, %2, c6, c0, 0 /* set CLKCFG[FCS] */\n"
195*4882a593Smuzhiyun " str %4, [%1] /* postset the MDREFR */\n"
196*4882a593Smuzhiyun " b 3f\n"
197*4882a593Smuzhiyun "2: b 1b\n"
198*4882a593Smuzhiyun "3: nop\n"
199*4882a593Smuzhiyun : "=&r" (unused)
200*4882a593Smuzhiyun : "r" (mdrefr), "r" (clkcfg), "r" (preset_mdrefr),
201*4882a593Smuzhiyun "r" (postset_mdrefr)
202*4882a593Smuzhiyun : "r4", "r5");
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun local_irq_restore(flags);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
pxa2xx_determine_rate(struct clk_rate_request * req,struct pxa2xx_freq * freqs,int nb_freqs)207*4882a593Smuzhiyun int pxa2xx_determine_rate(struct clk_rate_request *req,
208*4882a593Smuzhiyun struct pxa2xx_freq *freqs, int nb_freqs)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun int i, closest_below = -1, closest_above = -1;
211*4882a593Smuzhiyun unsigned long rate;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun for (i = 0; i < nb_freqs; i++) {
214*4882a593Smuzhiyun rate = freqs[i].cpll;
215*4882a593Smuzhiyun if (rate == req->rate)
216*4882a593Smuzhiyun break;
217*4882a593Smuzhiyun if (rate < req->min_rate)
218*4882a593Smuzhiyun continue;
219*4882a593Smuzhiyun if (rate > req->max_rate)
220*4882a593Smuzhiyun continue;
221*4882a593Smuzhiyun if (rate <= req->rate)
222*4882a593Smuzhiyun closest_below = i;
223*4882a593Smuzhiyun if ((rate >= req->rate) && (closest_above == -1))
224*4882a593Smuzhiyun closest_above = i;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun req->best_parent_hw = NULL;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (i < nb_freqs) {
230*4882a593Smuzhiyun rate = req->rate;
231*4882a593Smuzhiyun } else if (closest_below >= 0) {
232*4882a593Smuzhiyun rate = freqs[closest_below].cpll;
233*4882a593Smuzhiyun } else if (closest_above >= 0) {
234*4882a593Smuzhiyun rate = freqs[closest_above].cpll;
235*4882a593Smuzhiyun } else {
236*4882a593Smuzhiyun pr_debug("%s(rate=%lu) no match\n", __func__, req->rate);
237*4882a593Smuzhiyun return -EINVAL;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun pr_debug("%s(rate=%lu) rate=%lu\n", __func__, req->rate, rate);
241*4882a593Smuzhiyun req->rate = rate;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun return 0;
244*4882a593Smuzhiyun }
245