xref: /OK3568_Linux_fs/kernel/drivers/clk/tegra/clk-tegra124-emc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * drivers/clk/tegra/clk-emc.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author:
8*4882a593Smuzhiyun  *	Mikko Perttunen <mperttunen@nvidia.com>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/clk-provider.h>
12*4882a593Smuzhiyun #include <linux/clk.h>
13*4882a593Smuzhiyun #include <linux/clkdev.h>
14*4882a593Smuzhiyun #include <linux/delay.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/of_address.h>
18*4882a593Smuzhiyun #include <linux/of_platform.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/sort.h>
21*4882a593Smuzhiyun #include <linux/string.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <soc/tegra/fuse.h>
24*4882a593Smuzhiyun #include <soc/tegra/emc.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include "clk.h"
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define CLK_SOURCE_EMC 0x19c
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT 0
31*4882a593Smuzhiyun #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK 0xff
32*4882a593Smuzhiyun #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK) << \
33*4882a593Smuzhiyun 					      CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT)
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT 29
36*4882a593Smuzhiyun #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK 0x7
37*4882a593Smuzhiyun #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK) << \
38*4882a593Smuzhiyun 					  CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT)
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun static const char * const emc_parent_clk_names[] = {
41*4882a593Smuzhiyun 	"pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud",
42*4882a593Smuzhiyun 	"pll_c2", "pll_c3", "pll_c_ud"
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * List of clock sources for various parents the EMC clock can have.
47*4882a593Smuzhiyun  * When we change the timing to a timing with a parent that has the same
48*4882a593Smuzhiyun  * clock source as the current parent, we must first change to a backup
49*4882a593Smuzhiyun  * timing that has a different clock source.
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #define EMC_SRC_PLL_M 0
53*4882a593Smuzhiyun #define EMC_SRC_PLL_C 1
54*4882a593Smuzhiyun #define EMC_SRC_PLL_P 2
55*4882a593Smuzhiyun #define EMC_SRC_CLK_M 3
56*4882a593Smuzhiyun #define EMC_SRC_PLL_C2 4
57*4882a593Smuzhiyun #define EMC_SRC_PLL_C3 5
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun static const char emc_parent_clk_sources[] = {
60*4882a593Smuzhiyun 	EMC_SRC_PLL_M, EMC_SRC_PLL_C, EMC_SRC_PLL_P, EMC_SRC_CLK_M,
61*4882a593Smuzhiyun 	EMC_SRC_PLL_M, EMC_SRC_PLL_C2, EMC_SRC_PLL_C3, EMC_SRC_PLL_C
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun struct emc_timing {
65*4882a593Smuzhiyun 	unsigned long rate, parent_rate;
66*4882a593Smuzhiyun 	u8 parent_index;
67*4882a593Smuzhiyun 	struct clk *parent;
68*4882a593Smuzhiyun 	u32 ram_code;
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun struct tegra_clk_emc {
72*4882a593Smuzhiyun 	struct clk_hw hw;
73*4882a593Smuzhiyun 	void __iomem *clk_regs;
74*4882a593Smuzhiyun 	struct clk *prev_parent;
75*4882a593Smuzhiyun 	bool changing_timing;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	struct device_node *emc_node;
78*4882a593Smuzhiyun 	struct tegra_emc *emc;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	int num_timings;
81*4882a593Smuzhiyun 	struct emc_timing *timings;
82*4882a593Smuzhiyun 	spinlock_t *lock;
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun /* Common clock framework callback implementations */
86*4882a593Smuzhiyun 
emc_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)87*4882a593Smuzhiyun static unsigned long emc_recalc_rate(struct clk_hw *hw,
88*4882a593Smuzhiyun 				     unsigned long parent_rate)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct tegra_clk_emc *tegra;
91*4882a593Smuzhiyun 	u32 val, div;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	tegra = container_of(hw, struct tegra_clk_emc, hw);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	/*
96*4882a593Smuzhiyun 	 * CCF wrongly assumes that the parent won't change during set_rate,
97*4882a593Smuzhiyun 	 * so get the parent rate explicitly.
98*4882a593Smuzhiyun 	 */
99*4882a593Smuzhiyun 	parent_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	val = readl(tegra->clk_regs + CLK_SOURCE_EMC);
102*4882a593Smuzhiyun 	div = val & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	return parent_rate / (div + 2) * 2;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun  * Rounds up unless no higher rate exists, in which case down. This way is
109*4882a593Smuzhiyun  * safer since things have EMC rate floors. Also don't touch parent_rate
110*4882a593Smuzhiyun  * since we don't want the CCF to play with our parent clocks.
111*4882a593Smuzhiyun  */
emc_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)112*4882a593Smuzhiyun static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct tegra_clk_emc *tegra;
115*4882a593Smuzhiyun 	u8 ram_code = tegra_read_ram_code();
116*4882a593Smuzhiyun 	struct emc_timing *timing = NULL;
117*4882a593Smuzhiyun 	int i, k, t;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	tegra = container_of(hw, struct tegra_clk_emc, hw);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	for (k = 0; k < tegra->num_timings; k++) {
122*4882a593Smuzhiyun 		if (tegra->timings[k].ram_code == ram_code)
123*4882a593Smuzhiyun 			break;
124*4882a593Smuzhiyun 	}
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	for (t = k; t < tegra->num_timings; t++) {
127*4882a593Smuzhiyun 		if (tegra->timings[t].ram_code != ram_code)
128*4882a593Smuzhiyun 			break;
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	for (i = k; i < t; i++) {
132*4882a593Smuzhiyun 		timing = tegra->timings + i;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 		if (timing->rate < req->rate && i != t - 1)
135*4882a593Smuzhiyun 			continue;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 		if (timing->rate > req->max_rate) {
138*4882a593Smuzhiyun 			i = max(i, k + 1);
139*4882a593Smuzhiyun 			req->rate = tegra->timings[i - 1].rate;
140*4882a593Smuzhiyun 			return 0;
141*4882a593Smuzhiyun 		}
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 		if (timing->rate < req->min_rate)
144*4882a593Smuzhiyun 			continue;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 		req->rate = timing->rate;
147*4882a593Smuzhiyun 		return 0;
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	if (timing) {
151*4882a593Smuzhiyun 		req->rate = timing->rate;
152*4882a593Smuzhiyun 		return 0;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	req->rate = clk_hw_get_rate(hw);
156*4882a593Smuzhiyun 	return 0;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
emc_get_parent(struct clk_hw * hw)159*4882a593Smuzhiyun static u8 emc_get_parent(struct clk_hw *hw)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	struct tegra_clk_emc *tegra;
162*4882a593Smuzhiyun 	u32 val;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	tegra = container_of(hw, struct tegra_clk_emc, hw);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	val = readl(tegra->clk_regs + CLK_SOURCE_EMC);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	return (val >> CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT)
169*4882a593Smuzhiyun 		& CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
emc_ensure_emc_driver(struct tegra_clk_emc * tegra)172*4882a593Smuzhiyun static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct platform_device *pdev;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	if (tegra->emc)
177*4882a593Smuzhiyun 		return tegra->emc;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (!tegra->emc_node)
180*4882a593Smuzhiyun 		return NULL;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	pdev = of_find_device_by_node(tegra->emc_node);
183*4882a593Smuzhiyun 	if (!pdev) {
184*4882a593Smuzhiyun 		pr_err("%s: could not get external memory controller\n",
185*4882a593Smuzhiyun 		       __func__);
186*4882a593Smuzhiyun 		return NULL;
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	of_node_put(tegra->emc_node);
190*4882a593Smuzhiyun 	tegra->emc_node = NULL;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	tegra->emc = platform_get_drvdata(pdev);
193*4882a593Smuzhiyun 	if (!tegra->emc) {
194*4882a593Smuzhiyun 		put_device(&pdev->dev);
195*4882a593Smuzhiyun 		pr_err("%s: cannot find EMC driver\n", __func__);
196*4882a593Smuzhiyun 		return NULL;
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	return tegra->emc;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
emc_set_timing(struct tegra_clk_emc * tegra,struct emc_timing * timing)202*4882a593Smuzhiyun static int emc_set_timing(struct tegra_clk_emc *tegra,
203*4882a593Smuzhiyun 			  struct emc_timing *timing)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	int err;
206*4882a593Smuzhiyun 	u8 div;
207*4882a593Smuzhiyun 	u32 car_value;
208*4882a593Smuzhiyun 	unsigned long flags = 0;
209*4882a593Smuzhiyun 	struct tegra_emc *emc = emc_ensure_emc_driver(tegra);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	if (!emc)
212*4882a593Smuzhiyun 		return -ENOENT;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	pr_debug("going to rate %ld prate %ld p %s\n", timing->rate,
215*4882a593Smuzhiyun 		 timing->parent_rate, __clk_get_name(timing->parent));
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	if (emc_get_parent(&tegra->hw) == timing->parent_index &&
218*4882a593Smuzhiyun 	    clk_get_rate(timing->parent) != timing->parent_rate) {
219*4882a593Smuzhiyun 		WARN_ONCE(1, "parent %s rate mismatch %lu %lu\n",
220*4882a593Smuzhiyun 			  __clk_get_name(timing->parent),
221*4882a593Smuzhiyun 			  clk_get_rate(timing->parent),
222*4882a593Smuzhiyun 			  timing->parent_rate);
223*4882a593Smuzhiyun 		return -EINVAL;
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	tegra->changing_timing = true;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	err = clk_set_rate(timing->parent, timing->parent_rate);
229*4882a593Smuzhiyun 	if (err) {
230*4882a593Smuzhiyun 		pr_err("cannot change parent %s rate to %ld: %d\n",
231*4882a593Smuzhiyun 		       __clk_get_name(timing->parent), timing->parent_rate,
232*4882a593Smuzhiyun 		       err);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 		return err;
235*4882a593Smuzhiyun 	}
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	err = clk_prepare_enable(timing->parent);
238*4882a593Smuzhiyun 	if (err) {
239*4882a593Smuzhiyun 		pr_err("cannot enable parent clock: %d\n", err);
240*4882a593Smuzhiyun 		return err;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	div = timing->parent_rate / (timing->rate / 2) - 2;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	err = tegra_emc_prepare_timing_change(emc, timing->rate);
246*4882a593Smuzhiyun 	if (err)
247*4882a593Smuzhiyun 		return err;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	spin_lock_irqsave(tegra->lock, flags);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	car_value = readl(tegra->clk_regs + CLK_SOURCE_EMC);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_SRC(~0);
254*4882a593Smuzhiyun 	car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_SRC(timing->parent_index);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(~0);
257*4882a593Smuzhiyun 	car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(div);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	writel(car_value, tegra->clk_regs + CLK_SOURCE_EMC);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	spin_unlock_irqrestore(tegra->lock, flags);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	tegra_emc_complete_timing_change(emc, timing->rate);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	clk_hw_reparent(&tegra->hw, __clk_get_hw(timing->parent));
266*4882a593Smuzhiyun 	clk_disable_unprepare(tegra->prev_parent);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	tegra->prev_parent = timing->parent;
269*4882a593Smuzhiyun 	tegra->changing_timing = false;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	return 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun  * Get backup timing to use as an intermediate step when a change between
276*4882a593Smuzhiyun  * two timings with the same clock source has been requested. First try to
277*4882a593Smuzhiyun  * find a timing with a higher clock rate to avoid a rate below any set rate
278*4882a593Smuzhiyun  * floors. If that is not possible, find a lower rate.
279*4882a593Smuzhiyun  */
get_backup_timing(struct tegra_clk_emc * tegra,int timing_index)280*4882a593Smuzhiyun static struct emc_timing *get_backup_timing(struct tegra_clk_emc *tegra,
281*4882a593Smuzhiyun 					    int timing_index)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	int i;
284*4882a593Smuzhiyun 	u32 ram_code = tegra_read_ram_code();
285*4882a593Smuzhiyun 	struct emc_timing *timing;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	for (i = timing_index+1; i < tegra->num_timings; i++) {
288*4882a593Smuzhiyun 		timing = tegra->timings + i;
289*4882a593Smuzhiyun 		if (timing->ram_code != ram_code)
290*4882a593Smuzhiyun 			break;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 		if (emc_parent_clk_sources[timing->parent_index] !=
293*4882a593Smuzhiyun 		    emc_parent_clk_sources[
294*4882a593Smuzhiyun 		      tegra->timings[timing_index].parent_index])
295*4882a593Smuzhiyun 			return timing;
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	for (i = timing_index-1; i >= 0; --i) {
299*4882a593Smuzhiyun 		timing = tegra->timings + i;
300*4882a593Smuzhiyun 		if (timing->ram_code != ram_code)
301*4882a593Smuzhiyun 			break;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 		if (emc_parent_clk_sources[timing->parent_index] !=
304*4882a593Smuzhiyun 		    emc_parent_clk_sources[
305*4882a593Smuzhiyun 		      tegra->timings[timing_index].parent_index])
306*4882a593Smuzhiyun 			return timing;
307*4882a593Smuzhiyun 	}
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	return NULL;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
emc_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)312*4882a593Smuzhiyun static int emc_set_rate(struct clk_hw *hw, unsigned long rate,
313*4882a593Smuzhiyun 			unsigned long parent_rate)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct tegra_clk_emc *tegra;
316*4882a593Smuzhiyun 	struct emc_timing *timing = NULL;
317*4882a593Smuzhiyun 	int i, err;
318*4882a593Smuzhiyun 	u32 ram_code = tegra_read_ram_code();
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	tegra = container_of(hw, struct tegra_clk_emc, hw);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if (clk_hw_get_rate(hw) == rate)
323*4882a593Smuzhiyun 		return 0;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/*
326*4882a593Smuzhiyun 	 * When emc_set_timing changes the parent rate, CCF will propagate
327*4882a593Smuzhiyun 	 * that downward to us, so ignore any set_rate calls while a rate
328*4882a593Smuzhiyun 	 * change is already going on.
329*4882a593Smuzhiyun 	 */
330*4882a593Smuzhiyun 	if (tegra->changing_timing)
331*4882a593Smuzhiyun 		return 0;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	for (i = 0; i < tegra->num_timings; i++) {
334*4882a593Smuzhiyun 		if (tegra->timings[i].rate == rate &&
335*4882a593Smuzhiyun 		    tegra->timings[i].ram_code == ram_code) {
336*4882a593Smuzhiyun 			timing = tegra->timings + i;
337*4882a593Smuzhiyun 			break;
338*4882a593Smuzhiyun 		}
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (!timing) {
342*4882a593Smuzhiyun 		pr_err("cannot switch to rate %ld without emc table\n", rate);
343*4882a593Smuzhiyun 		return -EINVAL;
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	if (emc_parent_clk_sources[emc_get_parent(hw)] ==
347*4882a593Smuzhiyun 	    emc_parent_clk_sources[timing->parent_index] &&
348*4882a593Smuzhiyun 	    clk_get_rate(timing->parent) != timing->parent_rate) {
349*4882a593Smuzhiyun 		/*
350*4882a593Smuzhiyun 		 * Parent clock source not changed but parent rate has changed,
351*4882a593Smuzhiyun 		 * need to temporarily switch to another parent
352*4882a593Smuzhiyun 		 */
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 		struct emc_timing *backup_timing;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 		backup_timing = get_backup_timing(tegra, i);
357*4882a593Smuzhiyun 		if (!backup_timing) {
358*4882a593Smuzhiyun 			pr_err("cannot find backup timing\n");
359*4882a593Smuzhiyun 			return -EINVAL;
360*4882a593Smuzhiyun 		}
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 		pr_debug("using %ld as backup rate when going to %ld\n",
363*4882a593Smuzhiyun 			 backup_timing->rate, rate);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		err = emc_set_timing(tegra, backup_timing);
366*4882a593Smuzhiyun 		if (err) {
367*4882a593Smuzhiyun 			pr_err("cannot set backup timing: %d\n", err);
368*4882a593Smuzhiyun 			return err;
369*4882a593Smuzhiyun 		}
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	return emc_set_timing(tegra, timing);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun /* Initialization and deinitialization */
376*4882a593Smuzhiyun 
load_one_timing_from_dt(struct tegra_clk_emc * tegra,struct emc_timing * timing,struct device_node * node)377*4882a593Smuzhiyun static int load_one_timing_from_dt(struct tegra_clk_emc *tegra,
378*4882a593Smuzhiyun 				   struct emc_timing *timing,
379*4882a593Smuzhiyun 				   struct device_node *node)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	int err, i;
382*4882a593Smuzhiyun 	u32 tmp;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	err = of_property_read_u32(node, "clock-frequency", &tmp);
385*4882a593Smuzhiyun 	if (err) {
386*4882a593Smuzhiyun 		pr_err("timing %pOF: failed to read rate\n", node);
387*4882a593Smuzhiyun 		return err;
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	timing->rate = tmp;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	err = of_property_read_u32(node, "nvidia,parent-clock-frequency", &tmp);
393*4882a593Smuzhiyun 	if (err) {
394*4882a593Smuzhiyun 		pr_err("timing %pOF: failed to read parent rate\n", node);
395*4882a593Smuzhiyun 		return err;
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	timing->parent_rate = tmp;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	timing->parent = of_clk_get_by_name(node, "emc-parent");
401*4882a593Smuzhiyun 	if (IS_ERR(timing->parent)) {
402*4882a593Smuzhiyun 		pr_err("timing %pOF: failed to get parent clock\n", node);
403*4882a593Smuzhiyun 		return PTR_ERR(timing->parent);
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	timing->parent_index = 0xff;
407*4882a593Smuzhiyun 	i = match_string(emc_parent_clk_names, ARRAY_SIZE(emc_parent_clk_names),
408*4882a593Smuzhiyun 			 __clk_get_name(timing->parent));
409*4882a593Smuzhiyun 	if (i < 0) {
410*4882a593Smuzhiyun 		pr_err("timing %pOF: %s is not a valid parent\n",
411*4882a593Smuzhiyun 		       node, __clk_get_name(timing->parent));
412*4882a593Smuzhiyun 		clk_put(timing->parent);
413*4882a593Smuzhiyun 		return -EINVAL;
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	timing->parent_index = i;
417*4882a593Smuzhiyun 	return 0;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
cmp_timings(const void * _a,const void * _b)420*4882a593Smuzhiyun static int cmp_timings(const void *_a, const void *_b)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	const struct emc_timing *a = _a;
423*4882a593Smuzhiyun 	const struct emc_timing *b = _b;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	if (a->rate < b->rate)
426*4882a593Smuzhiyun 		return -1;
427*4882a593Smuzhiyun 	else if (a->rate == b->rate)
428*4882a593Smuzhiyun 		return 0;
429*4882a593Smuzhiyun 	else
430*4882a593Smuzhiyun 		return 1;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
load_timings_from_dt(struct tegra_clk_emc * tegra,struct device_node * node,u32 ram_code)433*4882a593Smuzhiyun static int load_timings_from_dt(struct tegra_clk_emc *tegra,
434*4882a593Smuzhiyun 				struct device_node *node,
435*4882a593Smuzhiyun 				u32 ram_code)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	struct emc_timing *timings_ptr;
438*4882a593Smuzhiyun 	struct device_node *child;
439*4882a593Smuzhiyun 	int child_count = of_get_child_count(node);
440*4882a593Smuzhiyun 	int i = 0, err;
441*4882a593Smuzhiyun 	size_t size;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	size = (tegra->num_timings + child_count) * sizeof(struct emc_timing);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	tegra->timings = krealloc(tegra->timings, size, GFP_KERNEL);
446*4882a593Smuzhiyun 	if (!tegra->timings)
447*4882a593Smuzhiyun 		return -ENOMEM;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	timings_ptr = tegra->timings + tegra->num_timings;
450*4882a593Smuzhiyun 	tegra->num_timings += child_count;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	for_each_child_of_node(node, child) {
453*4882a593Smuzhiyun 		struct emc_timing *timing = timings_ptr + (i++);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 		err = load_one_timing_from_dt(tegra, timing, child);
456*4882a593Smuzhiyun 		if (err) {
457*4882a593Smuzhiyun 			of_node_put(child);
458*4882a593Smuzhiyun 			return err;
459*4882a593Smuzhiyun 		}
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 		timing->ram_code = ram_code;
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	sort(timings_ptr, child_count, sizeof(struct emc_timing),
465*4882a593Smuzhiyun 	     cmp_timings, NULL);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	return 0;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun static const struct clk_ops tegra_clk_emc_ops = {
471*4882a593Smuzhiyun 	.recalc_rate = emc_recalc_rate,
472*4882a593Smuzhiyun 	.determine_rate = emc_determine_rate,
473*4882a593Smuzhiyun 	.set_rate = emc_set_rate,
474*4882a593Smuzhiyun 	.get_parent = emc_get_parent,
475*4882a593Smuzhiyun };
476*4882a593Smuzhiyun 
tegra_clk_register_emc(void __iomem * base,struct device_node * np,spinlock_t * lock)477*4882a593Smuzhiyun struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
478*4882a593Smuzhiyun 				   spinlock_t *lock)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	struct tegra_clk_emc *tegra;
481*4882a593Smuzhiyun 	struct clk_init_data init;
482*4882a593Smuzhiyun 	struct device_node *node;
483*4882a593Smuzhiyun 	u32 node_ram_code;
484*4882a593Smuzhiyun 	struct clk *clk;
485*4882a593Smuzhiyun 	int err;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	tegra = kcalloc(1, sizeof(*tegra), GFP_KERNEL);
488*4882a593Smuzhiyun 	if (!tegra)
489*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	tegra->clk_regs = base;
492*4882a593Smuzhiyun 	tegra->lock = lock;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	tegra->num_timings = 0;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	for_each_child_of_node(np, node) {
497*4882a593Smuzhiyun 		err = of_property_read_u32(node, "nvidia,ram-code",
498*4882a593Smuzhiyun 					   &node_ram_code);
499*4882a593Smuzhiyun 		if (err)
500*4882a593Smuzhiyun 			continue;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 		/*
503*4882a593Smuzhiyun 		 * Store timings for all ram codes as we cannot read the
504*4882a593Smuzhiyun 		 * fuses until the apbmisc driver is loaded.
505*4882a593Smuzhiyun 		 */
506*4882a593Smuzhiyun 		err = load_timings_from_dt(tegra, node, node_ram_code);
507*4882a593Smuzhiyun 		if (err) {
508*4882a593Smuzhiyun 			of_node_put(node);
509*4882a593Smuzhiyun 			return ERR_PTR(err);
510*4882a593Smuzhiyun 		}
511*4882a593Smuzhiyun 	}
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	if (tegra->num_timings == 0)
514*4882a593Smuzhiyun 		pr_warn("%s: no memory timings registered\n", __func__);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	tegra->emc_node = of_parse_phandle(np,
517*4882a593Smuzhiyun 			"nvidia,external-memory-controller", 0);
518*4882a593Smuzhiyun 	if (!tegra->emc_node)
519*4882a593Smuzhiyun 		pr_warn("%s: couldn't find node for EMC driver\n", __func__);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	init.name = "emc";
522*4882a593Smuzhiyun 	init.ops = &tegra_clk_emc_ops;
523*4882a593Smuzhiyun 	init.flags = CLK_IS_CRITICAL;
524*4882a593Smuzhiyun 	init.parent_names = emc_parent_clk_names;
525*4882a593Smuzhiyun 	init.num_parents = ARRAY_SIZE(emc_parent_clk_names);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	tegra->hw.init = &init;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	clk = clk_register(NULL, &tegra->hw);
530*4882a593Smuzhiyun 	if (IS_ERR(clk))
531*4882a593Smuzhiyun 		return clk;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	tegra->prev_parent = clk_hw_get_parent_by_index(
534*4882a593Smuzhiyun 		&tegra->hw, emc_get_parent(&tegra->hw))->clk;
535*4882a593Smuzhiyun 	tegra->changing_timing = false;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	/* Allow debugging tools to see the EMC clock */
538*4882a593Smuzhiyun 	clk_register_clkdev(clk, "emc", "tegra-clk-debug");
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	return clk;
541*4882a593Smuzhiyun };
542