xref: /OK3568_Linux_fs/kernel/drivers/clk/clk-mux.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
4*4882a593Smuzhiyun  * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
5*4882a593Smuzhiyun  * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Simple multiplexer clock implementation
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/clk-provider.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/io.h>
14*4882a593Smuzhiyun #include <linux/err.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun  * DOC: basic adjustable multiplexer clock that cannot gate
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * Traits of this clock:
20*4882a593Smuzhiyun  * prepare - clk_prepare only ensures that parents are prepared
21*4882a593Smuzhiyun  * enable - clk_enable only ensures that parents are enabled
22*4882a593Smuzhiyun  * rate - rate is only affected by parent switching.  No clk_set_rate support
23*4882a593Smuzhiyun  * parent - parent is adjustable through clk_set_parent
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
clk_mux_readl(struct clk_mux * mux)26*4882a593Smuzhiyun static inline u32 clk_mux_readl(struct clk_mux *mux)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	if (mux->flags & CLK_MUX_BIG_ENDIAN)
29*4882a593Smuzhiyun 		return ioread32be(mux->reg);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	return readl(mux->reg);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
clk_mux_writel(struct clk_mux * mux,u32 val)34*4882a593Smuzhiyun static inline void clk_mux_writel(struct clk_mux *mux, u32 val)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	if (mux->flags & CLK_MUX_BIG_ENDIAN)
37*4882a593Smuzhiyun 		iowrite32be(val, mux->reg);
38*4882a593Smuzhiyun 	else
39*4882a593Smuzhiyun 		writel(val, mux->reg);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
clk_mux_val_to_index(struct clk_hw * hw,u32 * table,unsigned int flags,unsigned int val)42*4882a593Smuzhiyun int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
43*4882a593Smuzhiyun 			 unsigned int val)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	int num_parents = clk_hw_get_num_parents(hw);
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	if (table) {
48*4882a593Smuzhiyun 		int i;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 		for (i = 0; i < num_parents; i++)
51*4882a593Smuzhiyun 			if (table[i] == val)
52*4882a593Smuzhiyun 				return i;
53*4882a593Smuzhiyun 		return -EINVAL;
54*4882a593Smuzhiyun 	}
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	if (val && (flags & CLK_MUX_INDEX_BIT))
57*4882a593Smuzhiyun 		val = ffs(val) - 1;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (val && (flags & CLK_MUX_INDEX_ONE))
60*4882a593Smuzhiyun 		val--;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (val >= num_parents)
63*4882a593Smuzhiyun 		return -EINVAL;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	return val;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_mux_val_to_index);
68*4882a593Smuzhiyun 
clk_mux_index_to_val(u32 * table,unsigned int flags,u8 index)69*4882a593Smuzhiyun unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	unsigned int val = index;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	if (table) {
74*4882a593Smuzhiyun 		val = table[index];
75*4882a593Smuzhiyun 	} else {
76*4882a593Smuzhiyun 		if (flags & CLK_MUX_INDEX_BIT)
77*4882a593Smuzhiyun 			val = 1 << index;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 		if (flags & CLK_MUX_INDEX_ONE)
80*4882a593Smuzhiyun 			val++;
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	return val;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_mux_index_to_val);
86*4882a593Smuzhiyun 
clk_mux_get_parent(struct clk_hw * hw)87*4882a593Smuzhiyun static u8 clk_mux_get_parent(struct clk_hw *hw)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	struct clk_mux *mux = to_clk_mux(hw);
90*4882a593Smuzhiyun 	u32 val;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	val = clk_mux_readl(mux) >> mux->shift;
93*4882a593Smuzhiyun 	val &= mux->mask;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
clk_mux_set_parent(struct clk_hw * hw,u8 index)98*4882a593Smuzhiyun static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	struct clk_mux *mux = to_clk_mux(hw);
101*4882a593Smuzhiyun 	u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
102*4882a593Smuzhiyun 	unsigned long flags = 0;
103*4882a593Smuzhiyun 	u32 reg;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (mux->lock)
106*4882a593Smuzhiyun 		spin_lock_irqsave(mux->lock, flags);
107*4882a593Smuzhiyun 	else
108*4882a593Smuzhiyun 		__acquire(mux->lock);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (mux->flags & CLK_MUX_HIWORD_MASK) {
111*4882a593Smuzhiyun 		reg = mux->mask << (mux->shift + 16);
112*4882a593Smuzhiyun 	} else {
113*4882a593Smuzhiyun 		reg = clk_mux_readl(mux);
114*4882a593Smuzhiyun 		reg &= ~(mux->mask << mux->shift);
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 	val = val << mux->shift;
117*4882a593Smuzhiyun 	reg |= val;
118*4882a593Smuzhiyun 	clk_mux_writel(mux, reg);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	if (mux->lock)
121*4882a593Smuzhiyun 		spin_unlock_irqrestore(mux->lock, flags);
122*4882a593Smuzhiyun 	else
123*4882a593Smuzhiyun 		__release(mux->lock);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	return 0;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)128*4882a593Smuzhiyun static int clk_mux_determine_rate(struct clk_hw *hw,
129*4882a593Smuzhiyun 				  struct clk_rate_request *req)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	struct clk_mux *mux = to_clk_mux(hw);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	return clk_mux_determine_rate_flags(hw, req, mux->flags);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun const struct clk_ops clk_mux_ops = {
137*4882a593Smuzhiyun 	.get_parent = clk_mux_get_parent,
138*4882a593Smuzhiyun 	.set_parent = clk_mux_set_parent,
139*4882a593Smuzhiyun 	.determine_rate = clk_mux_determine_rate,
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_mux_ops);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun const struct clk_ops clk_mux_ro_ops = {
144*4882a593Smuzhiyun 	.get_parent = clk_mux_get_parent,
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
147*4882a593Smuzhiyun 
__clk_hw_register_mux(struct device * dev,struct device_node * np,const char * name,u8 num_parents,const char * const * parent_names,const struct clk_hw ** parent_hws,const struct clk_parent_data * parent_data,unsigned long flags,void __iomem * reg,u8 shift,u32 mask,u8 clk_mux_flags,u32 * table,spinlock_t * lock)148*4882a593Smuzhiyun struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
149*4882a593Smuzhiyun 		const char *name, u8 num_parents,
150*4882a593Smuzhiyun 		const char * const *parent_names,
151*4882a593Smuzhiyun 		const struct clk_hw **parent_hws,
152*4882a593Smuzhiyun 		const struct clk_parent_data *parent_data,
153*4882a593Smuzhiyun 		unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
154*4882a593Smuzhiyun 		u8 clk_mux_flags, u32 *table, spinlock_t *lock)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	struct clk_mux *mux;
157*4882a593Smuzhiyun 	struct clk_hw *hw;
158*4882a593Smuzhiyun 	struct clk_init_data init = {};
159*4882a593Smuzhiyun 	u8 width = 0;
160*4882a593Smuzhiyun 	int ret = -EINVAL;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
163*4882a593Smuzhiyun 		width = fls(mask) - ffs(mask) + 1;
164*4882a593Smuzhiyun 		if (width + shift > 16) {
165*4882a593Smuzhiyun 			pr_err("mux value exceeds LOWORD field\n");
166*4882a593Smuzhiyun 			return ERR_PTR(-EINVAL);
167*4882a593Smuzhiyun 		}
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/* allocate the mux */
171*4882a593Smuzhiyun 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
172*4882a593Smuzhiyun 	if (!mux)
173*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	init.name = name;
176*4882a593Smuzhiyun 	if (clk_mux_flags & CLK_MUX_READ_ONLY)
177*4882a593Smuzhiyun 		init.ops = &clk_mux_ro_ops;
178*4882a593Smuzhiyun 	else
179*4882a593Smuzhiyun 		init.ops = &clk_mux_ops;
180*4882a593Smuzhiyun 	init.flags = flags;
181*4882a593Smuzhiyun 	init.parent_names = parent_names;
182*4882a593Smuzhiyun 	init.parent_data = parent_data;
183*4882a593Smuzhiyun 	init.parent_hws = parent_hws;
184*4882a593Smuzhiyun 	init.num_parents = num_parents;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	/* struct clk_mux assignments */
187*4882a593Smuzhiyun 	mux->reg = reg;
188*4882a593Smuzhiyun 	mux->shift = shift;
189*4882a593Smuzhiyun 	mux->mask = mask;
190*4882a593Smuzhiyun 	mux->flags = clk_mux_flags;
191*4882a593Smuzhiyun 	mux->lock = lock;
192*4882a593Smuzhiyun 	mux->table = table;
193*4882a593Smuzhiyun 	mux->hw.init = &init;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	hw = &mux->hw;
196*4882a593Smuzhiyun 	if (dev || !np)
197*4882a593Smuzhiyun 		ret = clk_hw_register(dev, hw);
198*4882a593Smuzhiyun 	else if (np)
199*4882a593Smuzhiyun 		ret = of_clk_hw_register(np, hw);
200*4882a593Smuzhiyun 	if (ret) {
201*4882a593Smuzhiyun 		kfree(mux);
202*4882a593Smuzhiyun 		hw = ERR_PTR(ret);
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return hw;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__clk_hw_register_mux);
208*4882a593Smuzhiyun 
clk_register_mux_table(struct device * dev,const char * name,const char * const * parent_names,u8 num_parents,unsigned long flags,void __iomem * reg,u8 shift,u32 mask,u8 clk_mux_flags,u32 * table,spinlock_t * lock)209*4882a593Smuzhiyun struct clk *clk_register_mux_table(struct device *dev, const char *name,
210*4882a593Smuzhiyun 		const char * const *parent_names, u8 num_parents,
211*4882a593Smuzhiyun 		unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
212*4882a593Smuzhiyun 		u8 clk_mux_flags, u32 *table, spinlock_t *lock)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	struct clk_hw *hw;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	hw = clk_hw_register_mux_table(dev, name, parent_names,
217*4882a593Smuzhiyun 				       num_parents, flags, reg, shift, mask,
218*4882a593Smuzhiyun 				       clk_mux_flags, table, lock);
219*4882a593Smuzhiyun 	if (IS_ERR(hw))
220*4882a593Smuzhiyun 		return ERR_CAST(hw);
221*4882a593Smuzhiyun 	return hw->clk;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_register_mux_table);
224*4882a593Smuzhiyun 
clk_unregister_mux(struct clk * clk)225*4882a593Smuzhiyun void clk_unregister_mux(struct clk *clk)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	struct clk_mux *mux;
228*4882a593Smuzhiyun 	struct clk_hw *hw;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	hw = __clk_get_hw(clk);
231*4882a593Smuzhiyun 	if (!hw)
232*4882a593Smuzhiyun 		return;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	mux = to_clk_mux(hw);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	clk_unregister(clk);
237*4882a593Smuzhiyun 	kfree(mux);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_unregister_mux);
240*4882a593Smuzhiyun 
clk_hw_unregister_mux(struct clk_hw * hw)241*4882a593Smuzhiyun void clk_hw_unregister_mux(struct clk_hw *hw)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	struct clk_mux *mux;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	mux = to_clk_mux(hw);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	clk_hw_unregister(hw);
248*4882a593Smuzhiyun 	kfree(mux);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(clk_hw_unregister_mux);
251