xref: /OK3568_Linux_fs/kernel/drivers/clk/x86/clk-cgu-pll.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2020 Intel Corporation.
4*4882a593Smuzhiyun  * Zhu YiXin <yixin.zhu@intel.com>
5*4882a593Smuzhiyun  * Rahul Tanwar <rahul.tanwar@intel.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/clk-provider.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/device.h>
11*4882a593Smuzhiyun #include <linux/iopoll.h>
12*4882a593Smuzhiyun #include <linux/of.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "clk-cgu.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define to_lgm_clk_pll(_hw)	container_of(_hw, struct lgm_clk_pll, hw)
17*4882a593Smuzhiyun #define PLL_REF_DIV(x)		((x) + 0x08)
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * Calculate formula:
21*4882a593Smuzhiyun  * rate = (prate * mult + (prate * frac) / frac_div) / div
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun static unsigned long
lgm_pll_calc_rate(unsigned long prate,unsigned int mult,unsigned int div,unsigned int frac,unsigned int frac_div)24*4882a593Smuzhiyun lgm_pll_calc_rate(unsigned long prate, unsigned int mult,
25*4882a593Smuzhiyun 		  unsigned int div, unsigned int frac, unsigned int frac_div)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	u64 crate, frate, rate64;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	rate64 = prate;
30*4882a593Smuzhiyun 	crate = rate64 * mult;
31*4882a593Smuzhiyun 	frate = rate64 * frac;
32*4882a593Smuzhiyun 	do_div(frate, frac_div);
33*4882a593Smuzhiyun 	crate += frate;
34*4882a593Smuzhiyun 	do_div(crate, div);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	return crate;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
lgm_pll_recalc_rate(struct clk_hw * hw,unsigned long prate)39*4882a593Smuzhiyun static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
42*4882a593Smuzhiyun 	unsigned int div, mult, frac;
43*4882a593Smuzhiyun 	unsigned long flags;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	spin_lock_irqsave(&pll->lock, flags);
46*4882a593Smuzhiyun 	mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12);
47*4882a593Smuzhiyun 	div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6);
48*4882a593Smuzhiyun 	frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24);
49*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pll->lock, flags);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	if (pll->type == TYPE_LJPLL)
52*4882a593Smuzhiyun 		div *= 4;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	return lgm_pll_calc_rate(prate, mult, div, frac, BIT(24));
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
lgm_pll_is_enabled(struct clk_hw * hw)57*4882a593Smuzhiyun static int lgm_pll_is_enabled(struct clk_hw *hw)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
60*4882a593Smuzhiyun 	unsigned long flags;
61*4882a593Smuzhiyun 	unsigned int ret;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	spin_lock_irqsave(&pll->lock, flags);
64*4882a593Smuzhiyun 	ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1);
65*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pll->lock, flags);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	return ret;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
lgm_pll_enable(struct clk_hw * hw)70*4882a593Smuzhiyun static int lgm_pll_enable(struct clk_hw *hw)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
73*4882a593Smuzhiyun 	unsigned long flags;
74*4882a593Smuzhiyun 	u32 val;
75*4882a593Smuzhiyun 	int ret;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	spin_lock_irqsave(&pll->lock, flags);
78*4882a593Smuzhiyun 	lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1);
79*4882a593Smuzhiyun 	ret = readl_poll_timeout_atomic(pll->membase + pll->reg,
80*4882a593Smuzhiyun 					val, (val & 0x1), 1, 100);
81*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pll->lock, flags);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	return ret;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
lgm_pll_disable(struct clk_hw * hw)86*4882a593Smuzhiyun static void lgm_pll_disable(struct clk_hw *hw)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
89*4882a593Smuzhiyun 	unsigned long flags;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	spin_lock_irqsave(&pll->lock, flags);
92*4882a593Smuzhiyun 	lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0);
93*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pll->lock, flags);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun static const struct clk_ops lgm_pll_ops = {
97*4882a593Smuzhiyun 	.recalc_rate = lgm_pll_recalc_rate,
98*4882a593Smuzhiyun 	.is_enabled = lgm_pll_is_enabled,
99*4882a593Smuzhiyun 	.enable = lgm_pll_enable,
100*4882a593Smuzhiyun 	.disable = lgm_pll_disable,
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun static struct clk_hw *
lgm_clk_register_pll(struct lgm_clk_provider * ctx,const struct lgm_pll_clk_data * list)104*4882a593Smuzhiyun lgm_clk_register_pll(struct lgm_clk_provider *ctx,
105*4882a593Smuzhiyun 		     const struct lgm_pll_clk_data *list)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct clk_init_data init = {};
108*4882a593Smuzhiyun 	struct lgm_clk_pll *pll;
109*4882a593Smuzhiyun 	struct device *dev = ctx->dev;
110*4882a593Smuzhiyun 	struct clk_hw *hw;
111*4882a593Smuzhiyun 	int ret;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	init.ops = &lgm_pll_ops;
114*4882a593Smuzhiyun 	init.name = list->name;
115*4882a593Smuzhiyun 	init.flags = list->flags;
116*4882a593Smuzhiyun 	init.parent_data = list->parent_data;
117*4882a593Smuzhiyun 	init.num_parents = list->num_parents;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
120*4882a593Smuzhiyun 	if (!pll)
121*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	pll->membase = ctx->membase;
124*4882a593Smuzhiyun 	pll->lock = ctx->lock;
125*4882a593Smuzhiyun 	pll->reg = list->reg;
126*4882a593Smuzhiyun 	pll->flags = list->flags;
127*4882a593Smuzhiyun 	pll->type = list->type;
128*4882a593Smuzhiyun 	pll->hw.init = &init;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	hw = &pll->hw;
131*4882a593Smuzhiyun 	ret = devm_clk_hw_register(dev, hw);
132*4882a593Smuzhiyun 	if (ret)
133*4882a593Smuzhiyun 		return ERR_PTR(ret);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	return hw;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
lgm_clk_register_plls(struct lgm_clk_provider * ctx,const struct lgm_pll_clk_data * list,unsigned int nr_clk)138*4882a593Smuzhiyun int lgm_clk_register_plls(struct lgm_clk_provider *ctx,
139*4882a593Smuzhiyun 			  const struct lgm_pll_clk_data *list,
140*4882a593Smuzhiyun 			  unsigned int nr_clk)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	struct clk_hw *hw;
143*4882a593Smuzhiyun 	int i;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	for (i = 0; i < nr_clk; i++, list++) {
146*4882a593Smuzhiyun 		hw = lgm_clk_register_pll(ctx, list);
147*4882a593Smuzhiyun 		if (IS_ERR(hw)) {
148*4882a593Smuzhiyun 			dev_err(ctx->dev, "failed to register pll: %s\n",
149*4882a593Smuzhiyun 				list->name);
150*4882a593Smuzhiyun 			return PTR_ERR(hw);
151*4882a593Smuzhiyun 		}
152*4882a593Smuzhiyun 		ctx->clk_data.hws[list->id] = hw;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	return 0;
156*4882a593Smuzhiyun }
157