xref: /OK3568_Linux_fs/kernel/drivers/clk/tegra/clk.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/clkdev.h>
7*4882a593Smuzhiyun #include <linux/clk.h>
8*4882a593Smuzhiyun #include <linux/clk-provider.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/io.h>
11*4882a593Smuzhiyun #include <linux/of.h>
12*4882a593Smuzhiyun #include <linux/clk/tegra.h>
13*4882a593Smuzhiyun #include <linux/reset-controller.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <soc/tegra/fuse.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "clk.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /* Global data of Tegra CPU CAR ops */
20*4882a593Smuzhiyun static struct tegra_cpu_car_ops dummy_car_ops;
21*4882a593Smuzhiyun struct tegra_cpu_car_ops *tegra_cpu_car_ops = &dummy_car_ops;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun int *periph_clk_enb_refcnt;
24*4882a593Smuzhiyun static int periph_banks;
25*4882a593Smuzhiyun static u32 *periph_state_ctx;
26*4882a593Smuzhiyun static struct clk **clks;
27*4882a593Smuzhiyun static int clk_num;
28*4882a593Smuzhiyun static struct clk_onecell_data clk_data;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /* Handlers for SoC-specific reset lines */
31*4882a593Smuzhiyun static int (*special_reset_assert)(unsigned long);
32*4882a593Smuzhiyun static int (*special_reset_deassert)(unsigned long);
33*4882a593Smuzhiyun static unsigned int num_special_reset;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun static const struct tegra_clk_periph_regs periph_regs[] = {
36*4882a593Smuzhiyun 	[0] = {
37*4882a593Smuzhiyun 		.enb_reg = CLK_OUT_ENB_L,
38*4882a593Smuzhiyun 		.enb_set_reg = CLK_OUT_ENB_SET_L,
39*4882a593Smuzhiyun 		.enb_clr_reg = CLK_OUT_ENB_CLR_L,
40*4882a593Smuzhiyun 		.rst_reg = RST_DEVICES_L,
41*4882a593Smuzhiyun 		.rst_set_reg = RST_DEVICES_SET_L,
42*4882a593Smuzhiyun 		.rst_clr_reg = RST_DEVICES_CLR_L,
43*4882a593Smuzhiyun 	},
44*4882a593Smuzhiyun 	[1] = {
45*4882a593Smuzhiyun 		.enb_reg = CLK_OUT_ENB_H,
46*4882a593Smuzhiyun 		.enb_set_reg = CLK_OUT_ENB_SET_H,
47*4882a593Smuzhiyun 		.enb_clr_reg = CLK_OUT_ENB_CLR_H,
48*4882a593Smuzhiyun 		.rst_reg = RST_DEVICES_H,
49*4882a593Smuzhiyun 		.rst_set_reg = RST_DEVICES_SET_H,
50*4882a593Smuzhiyun 		.rst_clr_reg = RST_DEVICES_CLR_H,
51*4882a593Smuzhiyun 	},
52*4882a593Smuzhiyun 	[2] = {
53*4882a593Smuzhiyun 		.enb_reg = CLK_OUT_ENB_U,
54*4882a593Smuzhiyun 		.enb_set_reg = CLK_OUT_ENB_SET_U,
55*4882a593Smuzhiyun 		.enb_clr_reg = CLK_OUT_ENB_CLR_U,
56*4882a593Smuzhiyun 		.rst_reg = RST_DEVICES_U,
57*4882a593Smuzhiyun 		.rst_set_reg = RST_DEVICES_SET_U,
58*4882a593Smuzhiyun 		.rst_clr_reg = RST_DEVICES_CLR_U,
59*4882a593Smuzhiyun 	},
60*4882a593Smuzhiyun 	[3] = {
61*4882a593Smuzhiyun 		.enb_reg = CLK_OUT_ENB_V,
62*4882a593Smuzhiyun 		.enb_set_reg = CLK_OUT_ENB_SET_V,
63*4882a593Smuzhiyun 		.enb_clr_reg = CLK_OUT_ENB_CLR_V,
64*4882a593Smuzhiyun 		.rst_reg = RST_DEVICES_V,
65*4882a593Smuzhiyun 		.rst_set_reg = RST_DEVICES_SET_V,
66*4882a593Smuzhiyun 		.rst_clr_reg = RST_DEVICES_CLR_V,
67*4882a593Smuzhiyun 	},
68*4882a593Smuzhiyun 	[4] = {
69*4882a593Smuzhiyun 		.enb_reg = CLK_OUT_ENB_W,
70*4882a593Smuzhiyun 		.enb_set_reg = CLK_OUT_ENB_SET_W,
71*4882a593Smuzhiyun 		.enb_clr_reg = CLK_OUT_ENB_CLR_W,
72*4882a593Smuzhiyun 		.rst_reg = RST_DEVICES_W,
73*4882a593Smuzhiyun 		.rst_set_reg = RST_DEVICES_SET_W,
74*4882a593Smuzhiyun 		.rst_clr_reg = RST_DEVICES_CLR_W,
75*4882a593Smuzhiyun 	},
76*4882a593Smuzhiyun 	[5] = {
77*4882a593Smuzhiyun 		.enb_reg = CLK_OUT_ENB_X,
78*4882a593Smuzhiyun 		.enb_set_reg = CLK_OUT_ENB_SET_X,
79*4882a593Smuzhiyun 		.enb_clr_reg = CLK_OUT_ENB_CLR_X,
80*4882a593Smuzhiyun 		.rst_reg = RST_DEVICES_X,
81*4882a593Smuzhiyun 		.rst_set_reg = RST_DEVICES_SET_X,
82*4882a593Smuzhiyun 		.rst_clr_reg = RST_DEVICES_CLR_X,
83*4882a593Smuzhiyun 	},
84*4882a593Smuzhiyun 	[6] = {
85*4882a593Smuzhiyun 		.enb_reg = CLK_OUT_ENB_Y,
86*4882a593Smuzhiyun 		.enb_set_reg = CLK_OUT_ENB_SET_Y,
87*4882a593Smuzhiyun 		.enb_clr_reg = CLK_OUT_ENB_CLR_Y,
88*4882a593Smuzhiyun 		.rst_reg = RST_DEVICES_Y,
89*4882a593Smuzhiyun 		.rst_set_reg = RST_DEVICES_SET_Y,
90*4882a593Smuzhiyun 		.rst_clr_reg = RST_DEVICES_CLR_Y,
91*4882a593Smuzhiyun 	},
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun static void __iomem *clk_base;
95*4882a593Smuzhiyun 
tegra_clk_rst_assert(struct reset_controller_dev * rcdev,unsigned long id)96*4882a593Smuzhiyun static int tegra_clk_rst_assert(struct reset_controller_dev *rcdev,
97*4882a593Smuzhiyun 		unsigned long id)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	/*
100*4882a593Smuzhiyun 	 * If peripheral is on the APB bus then we must read the APB bus to
101*4882a593Smuzhiyun 	 * flush the write operation in apb bus. This will avoid peripheral
102*4882a593Smuzhiyun 	 * access after disabling clock. Since the reset driver has no
103*4882a593Smuzhiyun 	 * knowledge of which reset IDs represent which devices, simply do
104*4882a593Smuzhiyun 	 * this all the time.
105*4882a593Smuzhiyun 	 */
106*4882a593Smuzhiyun 	tegra_read_chipid();
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (id < periph_banks * 32) {
109*4882a593Smuzhiyun 		writel_relaxed(BIT(id % 32),
110*4882a593Smuzhiyun 			       clk_base + periph_regs[id / 32].rst_set_reg);
111*4882a593Smuzhiyun 		return 0;
112*4882a593Smuzhiyun 	} else if (id < periph_banks * 32 + num_special_reset) {
113*4882a593Smuzhiyun 		return special_reset_assert(id);
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	return -EINVAL;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
tegra_clk_rst_deassert(struct reset_controller_dev * rcdev,unsigned long id)119*4882a593Smuzhiyun static int tegra_clk_rst_deassert(struct reset_controller_dev *rcdev,
120*4882a593Smuzhiyun 		unsigned long id)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	if (id < periph_banks * 32) {
123*4882a593Smuzhiyun 		writel_relaxed(BIT(id % 32),
124*4882a593Smuzhiyun 			       clk_base + periph_regs[id / 32].rst_clr_reg);
125*4882a593Smuzhiyun 		return 0;
126*4882a593Smuzhiyun 	} else if (id < periph_banks * 32 + num_special_reset) {
127*4882a593Smuzhiyun 		return special_reset_deassert(id);
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	return -EINVAL;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
tegra_clk_rst_reset(struct reset_controller_dev * rcdev,unsigned long id)133*4882a593Smuzhiyun static int tegra_clk_rst_reset(struct reset_controller_dev *rcdev,
134*4882a593Smuzhiyun 		unsigned long id)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	int err;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	err = tegra_clk_rst_assert(rcdev, id);
139*4882a593Smuzhiyun 	if (err)
140*4882a593Smuzhiyun 		return err;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	udelay(1);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	return tegra_clk_rst_deassert(rcdev, id);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
get_reg_bank(int clkid)147*4882a593Smuzhiyun const struct tegra_clk_periph_regs *get_reg_bank(int clkid)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	int reg_bank = clkid / 32;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	if (reg_bank < periph_banks)
152*4882a593Smuzhiyun 		return &periph_regs[reg_bank];
153*4882a593Smuzhiyun 	else {
154*4882a593Smuzhiyun 		WARN_ON(1);
155*4882a593Smuzhiyun 		return NULL;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
tegra_clk_set_pllp_out_cpu(bool enable)159*4882a593Smuzhiyun void tegra_clk_set_pllp_out_cpu(bool enable)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	u32 val;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	val = readl_relaxed(clk_base + CLK_OUT_ENB_Y);
164*4882a593Smuzhiyun 	if (enable)
165*4882a593Smuzhiyun 		val |= CLK_ENB_PLLP_OUT_CPU;
166*4882a593Smuzhiyun 	else
167*4882a593Smuzhiyun 		val &= ~CLK_ENB_PLLP_OUT_CPU;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	writel_relaxed(val, clk_base + CLK_OUT_ENB_Y);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
tegra_clk_periph_suspend(void)172*4882a593Smuzhiyun void tegra_clk_periph_suspend(void)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	unsigned int i, idx;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	idx = 0;
177*4882a593Smuzhiyun 	for (i = 0; i < periph_banks; i++, idx++)
178*4882a593Smuzhiyun 		periph_state_ctx[idx] =
179*4882a593Smuzhiyun 			readl_relaxed(clk_base + periph_regs[i].enb_reg);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	for (i = 0; i < periph_banks; i++, idx++)
182*4882a593Smuzhiyun 		periph_state_ctx[idx] =
183*4882a593Smuzhiyun 			readl_relaxed(clk_base + periph_regs[i].rst_reg);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
tegra_clk_periph_resume(void)186*4882a593Smuzhiyun void tegra_clk_periph_resume(void)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	unsigned int i, idx;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	idx = 0;
191*4882a593Smuzhiyun 	for (i = 0; i < periph_banks; i++, idx++)
192*4882a593Smuzhiyun 		writel_relaxed(periph_state_ctx[idx],
193*4882a593Smuzhiyun 			       clk_base + periph_regs[i].enb_reg);
194*4882a593Smuzhiyun 	/*
195*4882a593Smuzhiyun 	 * All non-boot peripherals will be in reset state on resume.
196*4882a593Smuzhiyun 	 * Wait for 5us of reset propagation delay before de-asserting
197*4882a593Smuzhiyun 	 * the peripherals based on the saved context.
198*4882a593Smuzhiyun 	 */
199*4882a593Smuzhiyun 	fence_udelay(5, clk_base);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	for (i = 0; i < periph_banks; i++, idx++)
202*4882a593Smuzhiyun 		writel_relaxed(periph_state_ctx[idx],
203*4882a593Smuzhiyun 			       clk_base + periph_regs[i].rst_reg);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	fence_udelay(2, clk_base);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
tegra_clk_periph_ctx_init(int banks)208*4882a593Smuzhiyun static int tegra_clk_periph_ctx_init(int banks)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	periph_state_ctx = kcalloc(2 * banks, sizeof(*periph_state_ctx),
211*4882a593Smuzhiyun 				   GFP_KERNEL);
212*4882a593Smuzhiyun 	if (!periph_state_ctx)
213*4882a593Smuzhiyun 		return -ENOMEM;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	return 0;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
tegra_clk_init(void __iomem * regs,int num,int banks)218*4882a593Smuzhiyun struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	clk_base = regs;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (WARN_ON(banks > ARRAY_SIZE(periph_regs)))
223*4882a593Smuzhiyun 		return NULL;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	periph_clk_enb_refcnt = kcalloc(32 * banks,
226*4882a593Smuzhiyun 					sizeof(*periph_clk_enb_refcnt),
227*4882a593Smuzhiyun 					GFP_KERNEL);
228*4882a593Smuzhiyun 	if (!periph_clk_enb_refcnt)
229*4882a593Smuzhiyun 		return NULL;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	periph_banks = banks;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL);
234*4882a593Smuzhiyun 	if (!clks) {
235*4882a593Smuzhiyun 		kfree(periph_clk_enb_refcnt);
236*4882a593Smuzhiyun 		return NULL;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	clk_num = num;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PM_SLEEP)) {
242*4882a593Smuzhiyun 		if (tegra_clk_periph_ctx_init(banks)) {
243*4882a593Smuzhiyun 			kfree(periph_clk_enb_refcnt);
244*4882a593Smuzhiyun 			kfree(clks);
245*4882a593Smuzhiyun 			return NULL;
246*4882a593Smuzhiyun 		}
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	return clks;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
tegra_init_dup_clks(struct tegra_clk_duplicate * dup_list,struct clk * clks[],int clk_max)252*4882a593Smuzhiyun void __init tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
253*4882a593Smuzhiyun 				struct clk *clks[], int clk_max)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	struct clk *clk;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	for (; dup_list->clk_id < clk_max; dup_list++) {
258*4882a593Smuzhiyun 		clk = clks[dup_list->clk_id];
259*4882a593Smuzhiyun 		dup_list->lookup.clk = clk;
260*4882a593Smuzhiyun 		clkdev_add(&dup_list->lookup);
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
tegra_init_from_table(struct tegra_clk_init_table * tbl,struct clk * clks[],int clk_max)264*4882a593Smuzhiyun void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
265*4882a593Smuzhiyun 				  struct clk *clks[], int clk_max)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	struct clk *clk;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	for (; tbl->clk_id < clk_max; tbl++) {
270*4882a593Smuzhiyun 		clk = clks[tbl->clk_id];
271*4882a593Smuzhiyun 		if (IS_ERR_OR_NULL(clk)) {
272*4882a593Smuzhiyun 			pr_err("%s: invalid entry %ld in clks array for id %d\n",
273*4882a593Smuzhiyun 			       __func__, PTR_ERR(clk), tbl->clk_id);
274*4882a593Smuzhiyun 			WARN_ON(1);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 			continue;
277*4882a593Smuzhiyun 		}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 		if (tbl->parent_id < clk_max) {
280*4882a593Smuzhiyun 			struct clk *parent = clks[tbl->parent_id];
281*4882a593Smuzhiyun 			if (clk_set_parent(clk, parent)) {
282*4882a593Smuzhiyun 				pr_err("%s: Failed to set parent %s of %s\n",
283*4882a593Smuzhiyun 				       __func__, __clk_get_name(parent),
284*4882a593Smuzhiyun 				       __clk_get_name(clk));
285*4882a593Smuzhiyun 				WARN_ON(1);
286*4882a593Smuzhiyun 			}
287*4882a593Smuzhiyun 		}
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 		if (tbl->rate)
290*4882a593Smuzhiyun 			if (clk_set_rate(clk, tbl->rate)) {
291*4882a593Smuzhiyun 				pr_err("%s: Failed to set rate %lu of %s\n",
292*4882a593Smuzhiyun 				       __func__, tbl->rate,
293*4882a593Smuzhiyun 				       __clk_get_name(clk));
294*4882a593Smuzhiyun 				WARN_ON(1);
295*4882a593Smuzhiyun 			}
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 		if (tbl->state)
298*4882a593Smuzhiyun 			if (clk_prepare_enable(clk)) {
299*4882a593Smuzhiyun 				pr_err("%s: Failed to enable %s\n", __func__,
300*4882a593Smuzhiyun 				       __clk_get_name(clk));
301*4882a593Smuzhiyun 				WARN_ON(1);
302*4882a593Smuzhiyun 			}
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun static const struct reset_control_ops rst_ops = {
307*4882a593Smuzhiyun 	.assert = tegra_clk_rst_assert,
308*4882a593Smuzhiyun 	.deassert = tegra_clk_rst_deassert,
309*4882a593Smuzhiyun 	.reset = tegra_clk_rst_reset,
310*4882a593Smuzhiyun };
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun static struct reset_controller_dev rst_ctlr = {
313*4882a593Smuzhiyun 	.ops = &rst_ops,
314*4882a593Smuzhiyun 	.owner = THIS_MODULE,
315*4882a593Smuzhiyun 	.of_reset_n_cells = 1,
316*4882a593Smuzhiyun };
317*4882a593Smuzhiyun 
tegra_add_of_provider(struct device_node * np,void * clk_src_onecell_get)318*4882a593Smuzhiyun void __init tegra_add_of_provider(struct device_node *np,
319*4882a593Smuzhiyun 				  void *clk_src_onecell_get)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	int i;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	for (i = 0; i < clk_num; i++) {
324*4882a593Smuzhiyun 		if (IS_ERR(clks[i])) {
325*4882a593Smuzhiyun 			pr_err
326*4882a593Smuzhiyun 			    ("Tegra clk %d: register failed with %ld\n",
327*4882a593Smuzhiyun 			     i, PTR_ERR(clks[i]));
328*4882a593Smuzhiyun 		}
329*4882a593Smuzhiyun 		if (!clks[i])
330*4882a593Smuzhiyun 			clks[i] = ERR_PTR(-EINVAL);
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	clk_data.clks = clks;
334*4882a593Smuzhiyun 	clk_data.clk_num = clk_num;
335*4882a593Smuzhiyun 	of_clk_add_provider(np, clk_src_onecell_get, &clk_data);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	rst_ctlr.of_node = np;
338*4882a593Smuzhiyun 	rst_ctlr.nr_resets = periph_banks * 32 + num_special_reset;
339*4882a593Smuzhiyun 	reset_controller_register(&rst_ctlr);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
tegra_init_special_resets(unsigned int num,int (* assert)(unsigned long),int (* deassert)(unsigned long))342*4882a593Smuzhiyun void __init tegra_init_special_resets(unsigned int num,
343*4882a593Smuzhiyun 				      int (*assert)(unsigned long),
344*4882a593Smuzhiyun 				      int (*deassert)(unsigned long))
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	num_special_reset = num;
347*4882a593Smuzhiyun 	special_reset_assert = assert;
348*4882a593Smuzhiyun 	special_reset_deassert = deassert;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
tegra_register_devclks(struct tegra_devclk * dev_clks,int num)351*4882a593Smuzhiyun void __init tegra_register_devclks(struct tegra_devclk *dev_clks, int num)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	int i;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	for (i = 0; i < num; i++, dev_clks++)
356*4882a593Smuzhiyun 		clk_register_clkdev(clks[dev_clks->dt_id], dev_clks->con_id,
357*4882a593Smuzhiyun 				dev_clks->dev_id);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	for (i = 0; i < clk_num; i++) {
360*4882a593Smuzhiyun 		if (!IS_ERR_OR_NULL(clks[i]))
361*4882a593Smuzhiyun 			clk_register_clkdev(clks[i], __clk_get_name(clks[i]),
362*4882a593Smuzhiyun 				"tegra-clk-debug");
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
tegra_lookup_dt_id(int clk_id,struct tegra_clk * tegra_clk)366*4882a593Smuzhiyun struct clk ** __init tegra_lookup_dt_id(int clk_id,
367*4882a593Smuzhiyun 					struct tegra_clk *tegra_clk)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	if (tegra_clk[clk_id].present)
370*4882a593Smuzhiyun 		return &clks[tegra_clk[clk_id].dt_id];
371*4882a593Smuzhiyun 	else
372*4882a593Smuzhiyun 		return NULL;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
376*4882a593Smuzhiyun 
tegra_clocks_apply_init_table(void)377*4882a593Smuzhiyun static int __init tegra_clocks_apply_init_table(void)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	if (!tegra_clk_apply_init_table)
380*4882a593Smuzhiyun 		return 0;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	tegra_clk_apply_init_table();
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	return 0;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun arch_initcall(tegra_clocks_apply_init_table);
387