xref: /OK3568_Linux_fs/kernel/drivers/soc/dove/pmu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Marvell Dove PMU support
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #include <linux/io.h>
6*4882a593Smuzhiyun #include <linux/irq.h>
7*4882a593Smuzhiyun #include <linux/irqdomain.h>
8*4882a593Smuzhiyun #include <linux/of.h>
9*4882a593Smuzhiyun #include <linux/of_irq.h>
10*4882a593Smuzhiyun #include <linux/of_address.h>
11*4882a593Smuzhiyun #include <linux/platform_device.h>
12*4882a593Smuzhiyun #include <linux/pm_domain.h>
13*4882a593Smuzhiyun #include <linux/reset.h>
14*4882a593Smuzhiyun #include <linux/reset-controller.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/soc/dove/pmu.h>
18*4882a593Smuzhiyun #include <linux/spinlock.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define NR_PMU_IRQS		7
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define PMC_SW_RST		0x30
23*4882a593Smuzhiyun #define PMC_IRQ_CAUSE		0x50
24*4882a593Smuzhiyun #define PMC_IRQ_MASK		0x54
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define PMU_PWR			0x10
27*4882a593Smuzhiyun #define PMU_ISO			0x58
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun struct pmu_data {
30*4882a593Smuzhiyun 	spinlock_t lock;
31*4882a593Smuzhiyun 	struct device_node *of_node;
32*4882a593Smuzhiyun 	void __iomem *pmc_base;
33*4882a593Smuzhiyun 	void __iomem *pmu_base;
34*4882a593Smuzhiyun 	struct irq_chip_generic *irq_gc;
35*4882a593Smuzhiyun 	struct irq_domain *irq_domain;
36*4882a593Smuzhiyun #ifdef CONFIG_RESET_CONTROLLER
37*4882a593Smuzhiyun 	struct reset_controller_dev reset;
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun  * The PMU contains a register to reset various subsystems within the
43*4882a593Smuzhiyun  * SoC.  Export this as a reset controller.
44*4882a593Smuzhiyun  */
45*4882a593Smuzhiyun #ifdef CONFIG_RESET_CONTROLLER
46*4882a593Smuzhiyun #define rcdev_to_pmu(rcdev) container_of(rcdev, struct pmu_data, reset)
47*4882a593Smuzhiyun 
pmu_reset_reset(struct reset_controller_dev * rc,unsigned long id)48*4882a593Smuzhiyun static int pmu_reset_reset(struct reset_controller_dev *rc, unsigned long id)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct pmu_data *pmu = rcdev_to_pmu(rc);
51*4882a593Smuzhiyun 	unsigned long flags;
52*4882a593Smuzhiyun 	u32 val;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	spin_lock_irqsave(&pmu->lock, flags);
55*4882a593Smuzhiyun 	val = readl_relaxed(pmu->pmc_base + PMC_SW_RST);
56*4882a593Smuzhiyun 	writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST);
57*4882a593Smuzhiyun 	writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST);
58*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pmu->lock, flags);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	return 0;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
pmu_reset_assert(struct reset_controller_dev * rc,unsigned long id)63*4882a593Smuzhiyun static int pmu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	struct pmu_data *pmu = rcdev_to_pmu(rc);
66*4882a593Smuzhiyun 	unsigned long flags;
67*4882a593Smuzhiyun 	u32 val = ~BIT(id);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	spin_lock_irqsave(&pmu->lock, flags);
70*4882a593Smuzhiyun 	val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
71*4882a593Smuzhiyun 	writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
72*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pmu->lock, flags);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	return 0;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
pmu_reset_deassert(struct reset_controller_dev * rc,unsigned long id)77*4882a593Smuzhiyun static int pmu_reset_deassert(struct reset_controller_dev *rc, unsigned long id)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	struct pmu_data *pmu = rcdev_to_pmu(rc);
80*4882a593Smuzhiyun 	unsigned long flags;
81*4882a593Smuzhiyun 	u32 val = BIT(id);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	spin_lock_irqsave(&pmu->lock, flags);
84*4882a593Smuzhiyun 	val |= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
85*4882a593Smuzhiyun 	writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
86*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pmu->lock, flags);
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	return 0;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun static const struct reset_control_ops pmu_reset_ops = {
92*4882a593Smuzhiyun 	.reset = pmu_reset_reset,
93*4882a593Smuzhiyun 	.assert = pmu_reset_assert,
94*4882a593Smuzhiyun 	.deassert = pmu_reset_deassert,
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun static struct reset_controller_dev pmu_reset __initdata = {
98*4882a593Smuzhiyun 	.ops = &pmu_reset_ops,
99*4882a593Smuzhiyun 	.owner = THIS_MODULE,
100*4882a593Smuzhiyun 	.nr_resets = 32,
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun 
pmu_reset_init(struct pmu_data * pmu)103*4882a593Smuzhiyun static void __init pmu_reset_init(struct pmu_data *pmu)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	int ret;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	pmu->reset = pmu_reset;
108*4882a593Smuzhiyun 	pmu->reset.of_node = pmu->of_node;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	ret = reset_controller_register(&pmu->reset);
111*4882a593Smuzhiyun 	if (ret)
112*4882a593Smuzhiyun 		pr_err("pmu: %s failed: %d\n", "reset_controller_register", ret);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun #else
pmu_reset_init(struct pmu_data * pmu)115*4882a593Smuzhiyun static void __init pmu_reset_init(struct pmu_data *pmu)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun #endif
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun struct pmu_domain {
121*4882a593Smuzhiyun 	struct pmu_data *pmu;
122*4882a593Smuzhiyun 	u32 pwr_mask;
123*4882a593Smuzhiyun 	u32 rst_mask;
124*4882a593Smuzhiyun 	u32 iso_mask;
125*4882a593Smuzhiyun 	struct generic_pm_domain base;
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun #define to_pmu_domain(dom) container_of(dom, struct pmu_domain, base)
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun  * This deals with the "old" Marvell sequence of bringing a power domain
132*4882a593Smuzhiyun  * down/up, which is: apply power, release reset, disable isolators.
133*4882a593Smuzhiyun  *
134*4882a593Smuzhiyun  * Later devices apparantly use a different sequence: power up, disable
135*4882a593Smuzhiyun  * isolators, assert repair signal, enable SRMA clock, enable AXI clock,
136*4882a593Smuzhiyun  * enable module clock, deassert reset.
137*4882a593Smuzhiyun  *
138*4882a593Smuzhiyun  * Note: reading the assembly, it seems that the IO accessors have an
139*4882a593Smuzhiyun  * unfortunate side-effect - they cause memory already read into registers
140*4882a593Smuzhiyun  * for the if () to be re-read for the bit-set or bit-clear operation.
141*4882a593Smuzhiyun  * The code is written to avoid this.
142*4882a593Smuzhiyun  */
pmu_domain_power_off(struct generic_pm_domain * domain)143*4882a593Smuzhiyun static int pmu_domain_power_off(struct generic_pm_domain *domain)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct pmu_domain *pmu_dom = to_pmu_domain(domain);
146*4882a593Smuzhiyun 	struct pmu_data *pmu = pmu_dom->pmu;
147*4882a593Smuzhiyun 	unsigned long flags;
148*4882a593Smuzhiyun 	unsigned int val;
149*4882a593Smuzhiyun 	void __iomem *pmu_base = pmu->pmu_base;
150*4882a593Smuzhiyun 	void __iomem *pmc_base = pmu->pmc_base;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	spin_lock_irqsave(&pmu->lock, flags);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* Enable isolators */
155*4882a593Smuzhiyun 	if (pmu_dom->iso_mask) {
156*4882a593Smuzhiyun 		val = ~pmu_dom->iso_mask;
157*4882a593Smuzhiyun 		val &= readl_relaxed(pmu_base + PMU_ISO);
158*4882a593Smuzhiyun 		writel_relaxed(val, pmu_base + PMU_ISO);
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/* Reset unit */
162*4882a593Smuzhiyun 	if (pmu_dom->rst_mask) {
163*4882a593Smuzhiyun 		val = ~pmu_dom->rst_mask;
164*4882a593Smuzhiyun 		val &= readl_relaxed(pmc_base + PMC_SW_RST);
165*4882a593Smuzhiyun 		writel_relaxed(val, pmc_base + PMC_SW_RST);
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/* Power down */
169*4882a593Smuzhiyun 	val = readl_relaxed(pmu_base + PMU_PWR) | pmu_dom->pwr_mask;
170*4882a593Smuzhiyun 	writel_relaxed(val, pmu_base + PMU_PWR);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pmu->lock, flags);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	return 0;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
pmu_domain_power_on(struct generic_pm_domain * domain)177*4882a593Smuzhiyun static int pmu_domain_power_on(struct generic_pm_domain *domain)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	struct pmu_domain *pmu_dom = to_pmu_domain(domain);
180*4882a593Smuzhiyun 	struct pmu_data *pmu = pmu_dom->pmu;
181*4882a593Smuzhiyun 	unsigned long flags;
182*4882a593Smuzhiyun 	unsigned int val;
183*4882a593Smuzhiyun 	void __iomem *pmu_base = pmu->pmu_base;
184*4882a593Smuzhiyun 	void __iomem *pmc_base = pmu->pmc_base;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	spin_lock_irqsave(&pmu->lock, flags);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/* Power on */
189*4882a593Smuzhiyun 	val = ~pmu_dom->pwr_mask & readl_relaxed(pmu_base + PMU_PWR);
190*4882a593Smuzhiyun 	writel_relaxed(val, pmu_base + PMU_PWR);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/* Release reset */
193*4882a593Smuzhiyun 	if (pmu_dom->rst_mask) {
194*4882a593Smuzhiyun 		val = pmu_dom->rst_mask;
195*4882a593Smuzhiyun 		val |= readl_relaxed(pmc_base + PMC_SW_RST);
196*4882a593Smuzhiyun 		writel_relaxed(val, pmc_base + PMC_SW_RST);
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/* Disable isolators */
200*4882a593Smuzhiyun 	if (pmu_dom->iso_mask) {
201*4882a593Smuzhiyun 		val = pmu_dom->iso_mask;
202*4882a593Smuzhiyun 		val |= readl_relaxed(pmu_base + PMU_ISO);
203*4882a593Smuzhiyun 		writel_relaxed(val, pmu_base + PMU_ISO);
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pmu->lock, flags);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	return 0;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
__pmu_domain_register(struct pmu_domain * domain,struct device_node * np)211*4882a593Smuzhiyun static void __pmu_domain_register(struct pmu_domain *domain,
212*4882a593Smuzhiyun 	struct device_node *np)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	domain->base.power_off = pmu_domain_power_off;
217*4882a593Smuzhiyun 	domain->base.power_on = pmu_domain_power_on;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	pm_genpd_init(&domain->base, NULL, !(val & domain->pwr_mask));
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	if (np)
222*4882a593Smuzhiyun 		of_genpd_add_provider_simple(np, &domain->base);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /* PMU IRQ controller */
pmu_irq_handler(struct irq_desc * desc)226*4882a593Smuzhiyun static void pmu_irq_handler(struct irq_desc *desc)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	struct pmu_data *pmu = irq_desc_get_handler_data(desc);
229*4882a593Smuzhiyun 	struct irq_chip_generic *gc = pmu->irq_gc;
230*4882a593Smuzhiyun 	struct irq_domain *domain = pmu->irq_domain;
231*4882a593Smuzhiyun 	void __iomem *base = gc->reg_base;
232*4882a593Smuzhiyun 	u32 stat = readl_relaxed(base + PMC_IRQ_CAUSE) & gc->mask_cache;
233*4882a593Smuzhiyun 	u32 done = ~0;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	if (stat == 0) {
236*4882a593Smuzhiyun 		handle_bad_irq(desc);
237*4882a593Smuzhiyun 		return;
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	while (stat) {
241*4882a593Smuzhiyun 		u32 hwirq = fls(stat) - 1;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 		stat &= ~(1 << hwirq);
244*4882a593Smuzhiyun 		done &= ~(1 << hwirq);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		generic_handle_irq(irq_find_mapping(domain, hwirq));
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/*
250*4882a593Smuzhiyun 	 * The PMU mask register is not RW0C: it is RW.  This means that
251*4882a593Smuzhiyun 	 * the bits take whatever value is written to them; if you write
252*4882a593Smuzhiyun 	 * a '1', you will set the interrupt.
253*4882a593Smuzhiyun 	 *
254*4882a593Smuzhiyun 	 * Unfortunately this means there is NO race free way to clear
255*4882a593Smuzhiyun 	 * these interrupts.
256*4882a593Smuzhiyun 	 *
257*4882a593Smuzhiyun 	 * So, let's structure the code so that the window is as small as
258*4882a593Smuzhiyun 	 * possible.
259*4882a593Smuzhiyun 	 */
260*4882a593Smuzhiyun 	irq_gc_lock(gc);
261*4882a593Smuzhiyun 	done &= readl_relaxed(base + PMC_IRQ_CAUSE);
262*4882a593Smuzhiyun 	writel_relaxed(done, base + PMC_IRQ_CAUSE);
263*4882a593Smuzhiyun 	irq_gc_unlock(gc);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
dove_init_pmu_irq(struct pmu_data * pmu,int irq)266*4882a593Smuzhiyun static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	const char *name = "pmu_irq";
269*4882a593Smuzhiyun 	struct irq_chip_generic *gc;
270*4882a593Smuzhiyun 	struct irq_domain *domain;
271*4882a593Smuzhiyun 	int ret;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	/* mask and clear all interrupts */
274*4882a593Smuzhiyun 	writel(0, pmu->pmc_base + PMC_IRQ_MASK);
275*4882a593Smuzhiyun 	writel(0, pmu->pmc_base + PMC_IRQ_CAUSE);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS,
278*4882a593Smuzhiyun 				       &irq_generic_chip_ops, NULL);
279*4882a593Smuzhiyun 	if (!domain) {
280*4882a593Smuzhiyun 		pr_err("%s: unable to add irq domain\n", name);
281*4882a593Smuzhiyun 		return -ENOMEM;
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	ret = irq_alloc_domain_generic_chips(domain, NR_PMU_IRQS, 1, name,
285*4882a593Smuzhiyun 					     handle_level_irq,
286*4882a593Smuzhiyun 					     IRQ_NOREQUEST | IRQ_NOPROBE, 0,
287*4882a593Smuzhiyun 					     IRQ_GC_INIT_MASK_CACHE);
288*4882a593Smuzhiyun 	if (ret) {
289*4882a593Smuzhiyun 		pr_err("%s: unable to alloc irq domain gc: %d\n", name, ret);
290*4882a593Smuzhiyun 		irq_domain_remove(domain);
291*4882a593Smuzhiyun 		return ret;
292*4882a593Smuzhiyun 	}
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	gc = irq_get_domain_generic_chip(domain, 0);
295*4882a593Smuzhiyun 	gc->reg_base = pmu->pmc_base;
296*4882a593Smuzhiyun 	gc->chip_types[0].regs.mask = PMC_IRQ_MASK;
297*4882a593Smuzhiyun 	gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
298*4882a593Smuzhiyun 	gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	pmu->irq_domain = domain;
301*4882a593Smuzhiyun 	pmu->irq_gc = gc;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	irq_set_handler_data(irq, pmu);
304*4882a593Smuzhiyun 	irq_set_chained_handler(irq, pmu_irq_handler);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	return 0;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
dove_init_pmu_legacy(const struct dove_pmu_initdata * initdata)309*4882a593Smuzhiyun int __init dove_init_pmu_legacy(const struct dove_pmu_initdata *initdata)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	const struct dove_pmu_domain_initdata *domain_initdata;
312*4882a593Smuzhiyun 	struct pmu_data *pmu;
313*4882a593Smuzhiyun 	int ret;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
316*4882a593Smuzhiyun 	if (!pmu)
317*4882a593Smuzhiyun 		return -ENOMEM;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	spin_lock_init(&pmu->lock);
320*4882a593Smuzhiyun 	pmu->pmc_base = initdata->pmc_base;
321*4882a593Smuzhiyun 	pmu->pmu_base = initdata->pmu_base;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	pmu_reset_init(pmu);
324*4882a593Smuzhiyun 	for (domain_initdata = initdata->domains; domain_initdata->name;
325*4882a593Smuzhiyun 	     domain_initdata++) {
326*4882a593Smuzhiyun 		struct pmu_domain *domain;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 		domain = kzalloc(sizeof(*domain), GFP_KERNEL);
329*4882a593Smuzhiyun 		if (domain) {
330*4882a593Smuzhiyun 			domain->pmu = pmu;
331*4882a593Smuzhiyun 			domain->pwr_mask = domain_initdata->pwr_mask;
332*4882a593Smuzhiyun 			domain->rst_mask = domain_initdata->rst_mask;
333*4882a593Smuzhiyun 			domain->iso_mask = domain_initdata->iso_mask;
334*4882a593Smuzhiyun 			domain->base.name = domain_initdata->name;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 			__pmu_domain_register(domain, NULL);
337*4882a593Smuzhiyun 		}
338*4882a593Smuzhiyun 	}
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	ret = dove_init_pmu_irq(pmu, initdata->irq);
341*4882a593Smuzhiyun 	if (ret)
342*4882a593Smuzhiyun 		pr_err("dove_init_pmu_irq() failed: %d\n", ret);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	if (pmu->irq_domain)
345*4882a593Smuzhiyun 		irq_domain_associate_many(pmu->irq_domain,
346*4882a593Smuzhiyun 					  initdata->irq_domain_start,
347*4882a593Smuzhiyun 					  0, NR_PMU_IRQS);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	return 0;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun /*
353*4882a593Smuzhiyun  * pmu: power-manager@d0000 {
354*4882a593Smuzhiyun  *	compatible = "marvell,dove-pmu";
355*4882a593Smuzhiyun  *	reg = <0xd0000 0x8000> <0xd8000 0x8000>;
356*4882a593Smuzhiyun  *	interrupts = <33>;
357*4882a593Smuzhiyun  *	interrupt-controller;
358*4882a593Smuzhiyun  *	#reset-cells = 1;
359*4882a593Smuzhiyun  *	vpu_domain: vpu-domain {
360*4882a593Smuzhiyun  *		#power-domain-cells = <0>;
361*4882a593Smuzhiyun  *		marvell,pmu_pwr_mask = <0x00000008>;
362*4882a593Smuzhiyun  *		marvell,pmu_iso_mask = <0x00000001>;
363*4882a593Smuzhiyun  *		resets = <&pmu 16>;
364*4882a593Smuzhiyun  *	};
365*4882a593Smuzhiyun  *	gpu_domain: gpu-domain {
366*4882a593Smuzhiyun  *		#power-domain-cells = <0>;
367*4882a593Smuzhiyun  *		marvell,pmu_pwr_mask = <0x00000004>;
368*4882a593Smuzhiyun  *		marvell,pmu_iso_mask = <0x00000002>;
369*4882a593Smuzhiyun  *		resets = <&pmu 18>;
370*4882a593Smuzhiyun  *	};
371*4882a593Smuzhiyun  * };
372*4882a593Smuzhiyun  */
dove_init_pmu(void)373*4882a593Smuzhiyun int __init dove_init_pmu(void)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	struct device_node *np_pmu, *domains_node, *np;
376*4882a593Smuzhiyun 	struct pmu_data *pmu;
377*4882a593Smuzhiyun 	int ret, parent_irq;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* Lookup the PMU node */
380*4882a593Smuzhiyun 	np_pmu = of_find_compatible_node(NULL, NULL, "marvell,dove-pmu");
381*4882a593Smuzhiyun 	if (!np_pmu)
382*4882a593Smuzhiyun 		return 0;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	domains_node = of_get_child_by_name(np_pmu, "domains");
385*4882a593Smuzhiyun 	if (!domains_node) {
386*4882a593Smuzhiyun 		pr_err("%pOFn: failed to find domains sub-node\n", np_pmu);
387*4882a593Smuzhiyun 		return 0;
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
391*4882a593Smuzhiyun 	if (!pmu)
392*4882a593Smuzhiyun 		return -ENOMEM;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	spin_lock_init(&pmu->lock);
395*4882a593Smuzhiyun 	pmu->of_node = np_pmu;
396*4882a593Smuzhiyun 	pmu->pmc_base = of_iomap(pmu->of_node, 0);
397*4882a593Smuzhiyun 	pmu->pmu_base = of_iomap(pmu->of_node, 1);
398*4882a593Smuzhiyun 	if (!pmu->pmc_base || !pmu->pmu_base) {
399*4882a593Smuzhiyun 		pr_err("%pOFn: failed to map PMU\n", np_pmu);
400*4882a593Smuzhiyun 		iounmap(pmu->pmu_base);
401*4882a593Smuzhiyun 		iounmap(pmu->pmc_base);
402*4882a593Smuzhiyun 		kfree(pmu);
403*4882a593Smuzhiyun 		return -ENOMEM;
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	pmu_reset_init(pmu);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	for_each_available_child_of_node(domains_node, np) {
409*4882a593Smuzhiyun 		struct of_phandle_args args;
410*4882a593Smuzhiyun 		struct pmu_domain *domain;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 		domain = kzalloc(sizeof(*domain), GFP_KERNEL);
413*4882a593Smuzhiyun 		if (!domain)
414*4882a593Smuzhiyun 			break;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 		domain->pmu = pmu;
417*4882a593Smuzhiyun 		domain->base.name = kasprintf(GFP_KERNEL, "%pOFn", np);
418*4882a593Smuzhiyun 		if (!domain->base.name) {
419*4882a593Smuzhiyun 			kfree(domain);
420*4882a593Smuzhiyun 			break;
421*4882a593Smuzhiyun 		}
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 		of_property_read_u32(np, "marvell,pmu_pwr_mask",
424*4882a593Smuzhiyun 				     &domain->pwr_mask);
425*4882a593Smuzhiyun 		of_property_read_u32(np, "marvell,pmu_iso_mask",
426*4882a593Smuzhiyun 				     &domain->iso_mask);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 		/*
429*4882a593Smuzhiyun 		 * We parse the reset controller property directly here
430*4882a593Smuzhiyun 		 * to ensure that we can operate when the reset controller
431*4882a593Smuzhiyun 		 * support is not configured into the kernel.
432*4882a593Smuzhiyun 		 */
433*4882a593Smuzhiyun 		ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
434*4882a593Smuzhiyun 						 0, &args);
435*4882a593Smuzhiyun 		if (ret == 0) {
436*4882a593Smuzhiyun 			if (args.np == pmu->of_node)
437*4882a593Smuzhiyun 				domain->rst_mask = BIT(args.args[0]);
438*4882a593Smuzhiyun 			of_node_put(args.np);
439*4882a593Smuzhiyun 		}
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 		__pmu_domain_register(domain, np);
442*4882a593Smuzhiyun 	}
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	/* Loss of the interrupt controller is not a fatal error. */
445*4882a593Smuzhiyun 	parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
446*4882a593Smuzhiyun 	if (!parent_irq) {
447*4882a593Smuzhiyun 		pr_err("%pOFn: no interrupt specified\n", np_pmu);
448*4882a593Smuzhiyun 	} else {
449*4882a593Smuzhiyun 		ret = dove_init_pmu_irq(pmu, parent_irq);
450*4882a593Smuzhiyun 		if (ret)
451*4882a593Smuzhiyun 			pr_err("dove_init_pmu_irq() failed: %d\n", ret);
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	return 0;
455*4882a593Smuzhiyun }
456