xref: /OK3568_Linux_fs/kernel/drivers/clk/mmp/clk-gate.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * mmp gate clock operation source file
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2014 Marvell
5*4882a593Smuzhiyun  * Chao Xie <chao.xie@marvell.com>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This file is licensed under the terms of the GNU General Public
8*4882a593Smuzhiyun  * License version 2. This program is licensed "as is" without any
9*4882a593Smuzhiyun  * warranty of any kind, whether express or implied.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/clk-provider.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/io.h>
15*4882a593Smuzhiyun #include <linux/err.h>
16*4882a593Smuzhiyun #include <linux/delay.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "clk.h"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * Some clocks will have mutiple bits to enable the clocks, and
22*4882a593Smuzhiyun  * the bits to disable the clock is not same as enabling bits.
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define to_clk_mmp_gate(hw)	container_of(hw, struct mmp_clk_gate, hw)
26*4882a593Smuzhiyun 
mmp_clk_gate_enable(struct clk_hw * hw)27*4882a593Smuzhiyun static int mmp_clk_gate_enable(struct clk_hw *hw)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
30*4882a593Smuzhiyun 	unsigned long flags = 0;
31*4882a593Smuzhiyun 	unsigned long rate;
32*4882a593Smuzhiyun 	u32 tmp;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	if (gate->lock)
35*4882a593Smuzhiyun 		spin_lock_irqsave(gate->lock, flags);
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	tmp = readl(gate->reg);
38*4882a593Smuzhiyun 	tmp &= ~gate->mask;
39*4882a593Smuzhiyun 	tmp |= gate->val_enable;
40*4882a593Smuzhiyun 	writel(tmp, gate->reg);
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	if (gate->lock)
43*4882a593Smuzhiyun 		spin_unlock_irqrestore(gate->lock, flags);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	if (gate->flags & MMP_CLK_GATE_NEED_DELAY) {
46*4882a593Smuzhiyun 		rate = clk_hw_get_rate(hw);
47*4882a593Smuzhiyun 		/* Need delay 2 cycles. */
48*4882a593Smuzhiyun 		udelay(2000000/rate);
49*4882a593Smuzhiyun 	}
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	return 0;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
mmp_clk_gate_disable(struct clk_hw * hw)54*4882a593Smuzhiyun static void mmp_clk_gate_disable(struct clk_hw *hw)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
57*4882a593Smuzhiyun 	unsigned long flags = 0;
58*4882a593Smuzhiyun 	u32 tmp;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	if (gate->lock)
61*4882a593Smuzhiyun 		spin_lock_irqsave(gate->lock, flags);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	tmp = readl(gate->reg);
64*4882a593Smuzhiyun 	tmp &= ~gate->mask;
65*4882a593Smuzhiyun 	tmp |= gate->val_disable;
66*4882a593Smuzhiyun 	writel(tmp, gate->reg);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	if (gate->lock)
69*4882a593Smuzhiyun 		spin_unlock_irqrestore(gate->lock, flags);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
mmp_clk_gate_is_enabled(struct clk_hw * hw)72*4882a593Smuzhiyun static int mmp_clk_gate_is_enabled(struct clk_hw *hw)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
75*4882a593Smuzhiyun 	unsigned long flags = 0;
76*4882a593Smuzhiyun 	u32 tmp;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	if (gate->lock)
79*4882a593Smuzhiyun 		spin_lock_irqsave(gate->lock, flags);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	tmp = readl(gate->reg);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (gate->lock)
84*4882a593Smuzhiyun 		spin_unlock_irqrestore(gate->lock, flags);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	return (tmp & gate->mask) == gate->val_enable;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun const struct clk_ops mmp_clk_gate_ops = {
90*4882a593Smuzhiyun 	.enable = mmp_clk_gate_enable,
91*4882a593Smuzhiyun 	.disable = mmp_clk_gate_disable,
92*4882a593Smuzhiyun 	.is_enabled = mmp_clk_gate_is_enabled,
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun 
mmp_clk_register_gate(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * reg,u32 mask,u32 val_enable,u32 val_disable,unsigned int gate_flags,spinlock_t * lock)95*4882a593Smuzhiyun struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
96*4882a593Smuzhiyun 		const char *parent_name, unsigned long flags,
97*4882a593Smuzhiyun 		void __iomem *reg, u32 mask, u32 val_enable, u32 val_disable,
98*4882a593Smuzhiyun 		unsigned int gate_flags, spinlock_t *lock)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	struct mmp_clk_gate *gate;
101*4882a593Smuzhiyun 	struct clk *clk;
102*4882a593Smuzhiyun 	struct clk_init_data init;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/* allocate the gate */
105*4882a593Smuzhiyun 	gate = kzalloc(sizeof(*gate), GFP_KERNEL);
106*4882a593Smuzhiyun 	if (!gate)
107*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	init.name = name;
110*4882a593Smuzhiyun 	init.ops = &mmp_clk_gate_ops;
111*4882a593Smuzhiyun 	init.flags = flags;
112*4882a593Smuzhiyun 	init.parent_names = (parent_name ? &parent_name : NULL);
113*4882a593Smuzhiyun 	init.num_parents = (parent_name ? 1 : 0);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/* struct clk_gate assignments */
116*4882a593Smuzhiyun 	gate->reg = reg;
117*4882a593Smuzhiyun 	gate->mask = mask;
118*4882a593Smuzhiyun 	gate->val_enable = val_enable;
119*4882a593Smuzhiyun 	gate->val_disable = val_disable;
120*4882a593Smuzhiyun 	gate->flags = gate_flags;
121*4882a593Smuzhiyun 	gate->lock = lock;
122*4882a593Smuzhiyun 	gate->hw.init = &init;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	clk = clk_register(dev, &gate->hw);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (IS_ERR(clk))
127*4882a593Smuzhiyun 		kfree(gate);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	return clk;
130*4882a593Smuzhiyun }
131