xref: /OK3568_Linux_fs/kernel/drivers/irqchip/irq-mvebu-icu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2017 Marvell
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Hanna Hawa <hannah@marvell.com>
5*4882a593Smuzhiyun  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This file is licensed under the terms of the GNU General Public
8*4882a593Smuzhiyun  * License version 2. This program is licensed "as is" without any
9*4882a593Smuzhiyun  * warranty of any kind, whether express or implied.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/irq.h>
14*4882a593Smuzhiyun #include <linux/irqchip.h>
15*4882a593Smuzhiyun #include <linux/irqdomain.h>
16*4882a593Smuzhiyun #include <linux/jump_label.h>
17*4882a593Smuzhiyun #include <linux/kernel.h>
18*4882a593Smuzhiyun #include <linux/msi.h>
19*4882a593Smuzhiyun #include <linux/of_irq.h>
20*4882a593Smuzhiyun #include <linux/of_platform.h>
21*4882a593Smuzhiyun #include <linux/platform_device.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <dt-bindings/interrupt-controller/mvebu-icu.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /* ICU registers */
26*4882a593Smuzhiyun #define ICU_SETSPI_NSR_AL	0x10
27*4882a593Smuzhiyun #define ICU_SETSPI_NSR_AH	0x14
28*4882a593Smuzhiyun #define ICU_CLRSPI_NSR_AL	0x18
29*4882a593Smuzhiyun #define ICU_CLRSPI_NSR_AH	0x1c
30*4882a593Smuzhiyun #define ICU_SET_SEI_AL		0x50
31*4882a593Smuzhiyun #define ICU_SET_SEI_AH		0x54
32*4882a593Smuzhiyun #define ICU_CLR_SEI_AL		0x58
33*4882a593Smuzhiyun #define ICU_CLR_SEI_AH		0x5C
34*4882a593Smuzhiyun #define ICU_INT_CFG(x)          (0x100 + 4 * (x))
35*4882a593Smuzhiyun #define   ICU_INT_ENABLE	BIT(24)
36*4882a593Smuzhiyun #define   ICU_IS_EDGE		BIT(28)
37*4882a593Smuzhiyun #define   ICU_GROUP_SHIFT	29
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* ICU definitions */
40*4882a593Smuzhiyun #define ICU_MAX_IRQS		207
41*4882a593Smuzhiyun #define ICU_SATA0_ICU_ID	109
42*4882a593Smuzhiyun #define ICU_SATA1_ICU_ID	107
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun struct mvebu_icu_subset_data {
45*4882a593Smuzhiyun 	unsigned int icu_group;
46*4882a593Smuzhiyun 	unsigned int offset_set_ah;
47*4882a593Smuzhiyun 	unsigned int offset_set_al;
48*4882a593Smuzhiyun 	unsigned int offset_clr_ah;
49*4882a593Smuzhiyun 	unsigned int offset_clr_al;
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun struct mvebu_icu {
53*4882a593Smuzhiyun 	void __iomem *base;
54*4882a593Smuzhiyun 	struct device *dev;
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun struct mvebu_icu_msi_data {
58*4882a593Smuzhiyun 	struct mvebu_icu *icu;
59*4882a593Smuzhiyun 	atomic_t initialized;
60*4882a593Smuzhiyun 	const struct mvebu_icu_subset_data *subset_data;
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun struct mvebu_icu_irq_data {
64*4882a593Smuzhiyun 	struct mvebu_icu *icu;
65*4882a593Smuzhiyun 	unsigned int icu_group;
66*4882a593Smuzhiyun 	unsigned int type;
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun static DEFINE_STATIC_KEY_FALSE(legacy_bindings);
70*4882a593Smuzhiyun 
mvebu_icu_init(struct mvebu_icu * icu,struct mvebu_icu_msi_data * msi_data,struct msi_msg * msg)71*4882a593Smuzhiyun static void mvebu_icu_init(struct mvebu_icu *icu,
72*4882a593Smuzhiyun 			   struct mvebu_icu_msi_data *msi_data,
73*4882a593Smuzhiyun 			   struct msi_msg *msg)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	const struct mvebu_icu_subset_data *subset = msi_data->subset_data;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	if (atomic_cmpxchg(&msi_data->initialized, false, true))
78*4882a593Smuzhiyun 		return;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/* Set 'SET' ICU SPI message address in AP */
81*4882a593Smuzhiyun 	writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah);
82*4882a593Smuzhiyun 	writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	if (subset->icu_group != ICU_GRP_NSR)
85*4882a593Smuzhiyun 		return;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	/* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */
88*4882a593Smuzhiyun 	writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah);
89*4882a593Smuzhiyun 	writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
mvebu_icu_write_msg(struct msi_desc * desc,struct msi_msg * msg)92*4882a593Smuzhiyun static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct irq_data *d = irq_get_irq_data(desc->irq);
95*4882a593Smuzhiyun 	struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d->domain);
96*4882a593Smuzhiyun 	struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
97*4882a593Smuzhiyun 	struct mvebu_icu *icu = icu_irqd->icu;
98*4882a593Smuzhiyun 	unsigned int icu_int;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if (msg->address_lo || msg->address_hi) {
101*4882a593Smuzhiyun 		/* One off initialization per domain */
102*4882a593Smuzhiyun 		mvebu_icu_init(icu, msi_data, msg);
103*4882a593Smuzhiyun 		/* Configure the ICU with irq number & type */
104*4882a593Smuzhiyun 		icu_int = msg->data | ICU_INT_ENABLE;
105*4882a593Smuzhiyun 		if (icu_irqd->type & IRQ_TYPE_EDGE_RISING)
106*4882a593Smuzhiyun 			icu_int |= ICU_IS_EDGE;
107*4882a593Smuzhiyun 		icu_int |= icu_irqd->icu_group << ICU_GROUP_SHIFT;
108*4882a593Smuzhiyun 	} else {
109*4882a593Smuzhiyun 		/* De-configure the ICU */
110*4882a593Smuzhiyun 		icu_int = 0;
111*4882a593Smuzhiyun 	}
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq));
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/*
116*4882a593Smuzhiyun 	 * The SATA unit has 2 ports, and a dedicated ICU entry per
117*4882a593Smuzhiyun 	 * port. The ahci sata driver supports only one irq interrupt
118*4882a593Smuzhiyun 	 * per SATA unit. To solve this conflict, we configure the 2
119*4882a593Smuzhiyun 	 * SATA wired interrupts in the south bridge into 1 GIC
120*4882a593Smuzhiyun 	 * interrupt in the north bridge. Even if only a single port
121*4882a593Smuzhiyun 	 * is enabled, if sata node is enabled, both interrupts are
122*4882a593Smuzhiyun 	 * configured (regardless of which port is actually in use).
123*4882a593Smuzhiyun 	 */
124*4882a593Smuzhiyun 	if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) {
125*4882a593Smuzhiyun 		writel_relaxed(icu_int,
126*4882a593Smuzhiyun 			       icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID));
127*4882a593Smuzhiyun 		writel_relaxed(icu_int,
128*4882a593Smuzhiyun 			       icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID));
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun static struct irq_chip mvebu_icu_nsr_chip = {
133*4882a593Smuzhiyun 	.name			= "ICU-NSR",
134*4882a593Smuzhiyun 	.irq_mask		= irq_chip_mask_parent,
135*4882a593Smuzhiyun 	.irq_unmask		= irq_chip_unmask_parent,
136*4882a593Smuzhiyun 	.irq_eoi		= irq_chip_eoi_parent,
137*4882a593Smuzhiyun 	.irq_set_type		= irq_chip_set_type_parent,
138*4882a593Smuzhiyun 	.irq_set_affinity	= irq_chip_set_affinity_parent,
139*4882a593Smuzhiyun };
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun static struct irq_chip mvebu_icu_sei_chip = {
142*4882a593Smuzhiyun 	.name			= "ICU-SEI",
143*4882a593Smuzhiyun 	.irq_ack		= irq_chip_ack_parent,
144*4882a593Smuzhiyun 	.irq_mask		= irq_chip_mask_parent,
145*4882a593Smuzhiyun 	.irq_unmask		= irq_chip_unmask_parent,
146*4882a593Smuzhiyun 	.irq_set_type		= irq_chip_set_type_parent,
147*4882a593Smuzhiyun 	.irq_set_affinity	= irq_chip_set_affinity_parent,
148*4882a593Smuzhiyun };
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun static int
mvebu_icu_irq_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)151*4882a593Smuzhiyun mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
152*4882a593Smuzhiyun 			       unsigned long *hwirq, unsigned int *type)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d);
155*4882a593Smuzhiyun 	struct mvebu_icu *icu = platform_msi_get_host_data(d);
156*4882a593Smuzhiyun 	unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* Check the count of the parameters in dt */
159*4882a593Smuzhiyun 	if (WARN_ON(fwspec->param_count != param_count)) {
160*4882a593Smuzhiyun 		dev_err(icu->dev, "wrong ICU parameter count %d\n",
161*4882a593Smuzhiyun 			fwspec->param_count);
162*4882a593Smuzhiyun 		return -EINVAL;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	if (static_branch_unlikely(&legacy_bindings)) {
166*4882a593Smuzhiyun 		*hwirq = fwspec->param[1];
167*4882a593Smuzhiyun 		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
168*4882a593Smuzhiyun 		if (fwspec->param[0] != ICU_GRP_NSR) {
169*4882a593Smuzhiyun 			dev_err(icu->dev, "wrong ICU group type %x\n",
170*4882a593Smuzhiyun 				fwspec->param[0]);
171*4882a593Smuzhiyun 			return -EINVAL;
172*4882a593Smuzhiyun 		}
173*4882a593Smuzhiyun 	} else {
174*4882a593Smuzhiyun 		*hwirq = fwspec->param[0];
175*4882a593Smuzhiyun 		*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 		/*
178*4882a593Smuzhiyun 		 * The ICU receives level interrupts. While the NSR are also
179*4882a593Smuzhiyun 		 * level interrupts, SEI are edge interrupts. Force the type
180*4882a593Smuzhiyun 		 * here in this case. Please note that this makes the interrupt
181*4882a593Smuzhiyun 		 * handling unreliable.
182*4882a593Smuzhiyun 		 */
183*4882a593Smuzhiyun 		if (msi_data->subset_data->icu_group == ICU_GRP_SEI)
184*4882a593Smuzhiyun 			*type = IRQ_TYPE_EDGE_RISING;
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	if (*hwirq >= ICU_MAX_IRQS) {
188*4882a593Smuzhiyun 		dev_err(icu->dev, "invalid interrupt number %ld\n", *hwirq);
189*4882a593Smuzhiyun 		return -EINVAL;
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	return 0;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun static int
mvebu_icu_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)196*4882a593Smuzhiyun mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
197*4882a593Smuzhiyun 			   unsigned int nr_irqs, void *args)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	int err;
200*4882a593Smuzhiyun 	unsigned long hwirq;
201*4882a593Smuzhiyun 	struct irq_fwspec *fwspec = args;
202*4882a593Smuzhiyun 	struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(domain);
203*4882a593Smuzhiyun 	struct mvebu_icu *icu = msi_data->icu;
204*4882a593Smuzhiyun 	struct mvebu_icu_irq_data *icu_irqd;
205*4882a593Smuzhiyun 	struct irq_chip *chip = &mvebu_icu_nsr_chip;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	icu_irqd = kmalloc(sizeof(*icu_irqd), GFP_KERNEL);
208*4882a593Smuzhiyun 	if (!icu_irqd)
209*4882a593Smuzhiyun 		return -ENOMEM;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	err = mvebu_icu_irq_domain_translate(domain, fwspec, &hwirq,
212*4882a593Smuzhiyun 					     &icu_irqd->type);
213*4882a593Smuzhiyun 	if (err) {
214*4882a593Smuzhiyun 		dev_err(icu->dev, "failed to translate ICU parameters\n");
215*4882a593Smuzhiyun 		goto free_irqd;
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	if (static_branch_unlikely(&legacy_bindings))
219*4882a593Smuzhiyun 		icu_irqd->icu_group = fwspec->param[0];
220*4882a593Smuzhiyun 	else
221*4882a593Smuzhiyun 		icu_irqd->icu_group = msi_data->subset_data->icu_group;
222*4882a593Smuzhiyun 	icu_irqd->icu = icu;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	err = platform_msi_domain_alloc(domain, virq, nr_irqs);
225*4882a593Smuzhiyun 	if (err) {
226*4882a593Smuzhiyun 		dev_err(icu->dev, "failed to allocate ICU interrupt in parent domain\n");
227*4882a593Smuzhiyun 		goto free_irqd;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/* Make sure there is no interrupt left pending by the firmware */
231*4882a593Smuzhiyun 	err = irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false);
232*4882a593Smuzhiyun 	if (err)
233*4882a593Smuzhiyun 		goto free_msi;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	if (icu_irqd->icu_group == ICU_GRP_SEI)
236*4882a593Smuzhiyun 		chip = &mvebu_icu_sei_chip;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	err = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
239*4882a593Smuzhiyun 					    chip, icu_irqd);
240*4882a593Smuzhiyun 	if (err) {
241*4882a593Smuzhiyun 		dev_err(icu->dev, "failed to set the data to IRQ domain\n");
242*4882a593Smuzhiyun 		goto free_msi;
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	return 0;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun free_msi:
248*4882a593Smuzhiyun 	platform_msi_domain_free(domain, virq, nr_irqs);
249*4882a593Smuzhiyun free_irqd:
250*4882a593Smuzhiyun 	kfree(icu_irqd);
251*4882a593Smuzhiyun 	return err;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun static void
mvebu_icu_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)255*4882a593Smuzhiyun mvebu_icu_irq_domain_free(struct irq_domain *domain, unsigned int virq,
256*4882a593Smuzhiyun 			  unsigned int nr_irqs)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	struct irq_data *d = irq_get_irq_data(virq);
259*4882a593Smuzhiyun 	struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	kfree(icu_irqd);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	platform_msi_domain_free(domain, virq, nr_irqs);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun static const struct irq_domain_ops mvebu_icu_domain_ops = {
267*4882a593Smuzhiyun 	.translate = mvebu_icu_irq_domain_translate,
268*4882a593Smuzhiyun 	.alloc     = mvebu_icu_irq_domain_alloc,
269*4882a593Smuzhiyun 	.free      = mvebu_icu_irq_domain_free,
270*4882a593Smuzhiyun };
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun static const struct mvebu_icu_subset_data mvebu_icu_nsr_subset_data = {
273*4882a593Smuzhiyun 	.icu_group = ICU_GRP_NSR,
274*4882a593Smuzhiyun 	.offset_set_ah = ICU_SETSPI_NSR_AH,
275*4882a593Smuzhiyun 	.offset_set_al = ICU_SETSPI_NSR_AL,
276*4882a593Smuzhiyun 	.offset_clr_ah = ICU_CLRSPI_NSR_AH,
277*4882a593Smuzhiyun 	.offset_clr_al = ICU_CLRSPI_NSR_AL,
278*4882a593Smuzhiyun };
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun static const struct mvebu_icu_subset_data mvebu_icu_sei_subset_data = {
281*4882a593Smuzhiyun 	.icu_group = ICU_GRP_SEI,
282*4882a593Smuzhiyun 	.offset_set_ah = ICU_SET_SEI_AH,
283*4882a593Smuzhiyun 	.offset_set_al = ICU_SET_SEI_AL,
284*4882a593Smuzhiyun };
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun static const struct of_device_id mvebu_icu_subset_of_match[] = {
287*4882a593Smuzhiyun 	{
288*4882a593Smuzhiyun 		.compatible = "marvell,cp110-icu-nsr",
289*4882a593Smuzhiyun 		.data = &mvebu_icu_nsr_subset_data,
290*4882a593Smuzhiyun 	},
291*4882a593Smuzhiyun 	{
292*4882a593Smuzhiyun 		.compatible = "marvell,cp110-icu-sei",
293*4882a593Smuzhiyun 		.data = &mvebu_icu_sei_subset_data,
294*4882a593Smuzhiyun 	},
295*4882a593Smuzhiyun 	{},
296*4882a593Smuzhiyun };
297*4882a593Smuzhiyun 
mvebu_icu_subset_probe(struct platform_device * pdev)298*4882a593Smuzhiyun static int mvebu_icu_subset_probe(struct platform_device *pdev)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	struct mvebu_icu_msi_data *msi_data;
301*4882a593Smuzhiyun 	struct device_node *msi_parent_dn;
302*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
303*4882a593Smuzhiyun 	struct irq_domain *irq_domain;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	msi_data = devm_kzalloc(dev, sizeof(*msi_data), GFP_KERNEL);
306*4882a593Smuzhiyun 	if (!msi_data)
307*4882a593Smuzhiyun 		return -ENOMEM;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	if (static_branch_unlikely(&legacy_bindings)) {
310*4882a593Smuzhiyun 		msi_data->icu = dev_get_drvdata(dev);
311*4882a593Smuzhiyun 		msi_data->subset_data = &mvebu_icu_nsr_subset_data;
312*4882a593Smuzhiyun 	} else {
313*4882a593Smuzhiyun 		msi_data->icu = dev_get_drvdata(dev->parent);
314*4882a593Smuzhiyun 		msi_data->subset_data = of_device_get_match_data(dev);
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
318*4882a593Smuzhiyun 					    DOMAIN_BUS_PLATFORM_MSI);
319*4882a593Smuzhiyun 	if (!dev->msi_domain)
320*4882a593Smuzhiyun 		return -EPROBE_DEFER;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	msi_parent_dn = irq_domain_get_of_node(dev->msi_domain);
323*4882a593Smuzhiyun 	if (!msi_parent_dn)
324*4882a593Smuzhiyun 		return -ENODEV;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	irq_domain = platform_msi_create_device_tree_domain(dev, ICU_MAX_IRQS,
327*4882a593Smuzhiyun 							    mvebu_icu_write_msg,
328*4882a593Smuzhiyun 							    &mvebu_icu_domain_ops,
329*4882a593Smuzhiyun 							    msi_data);
330*4882a593Smuzhiyun 	if (!irq_domain) {
331*4882a593Smuzhiyun 		dev_err(dev, "Failed to create ICU MSI domain\n");
332*4882a593Smuzhiyun 		return -ENOMEM;
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	return 0;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun static struct platform_driver mvebu_icu_subset_driver = {
339*4882a593Smuzhiyun 	.probe  = mvebu_icu_subset_probe,
340*4882a593Smuzhiyun 	.driver = {
341*4882a593Smuzhiyun 		.name = "mvebu-icu-subset",
342*4882a593Smuzhiyun 		.of_match_table = mvebu_icu_subset_of_match,
343*4882a593Smuzhiyun 	},
344*4882a593Smuzhiyun };
345*4882a593Smuzhiyun builtin_platform_driver(mvebu_icu_subset_driver);
346*4882a593Smuzhiyun 
mvebu_icu_probe(struct platform_device * pdev)347*4882a593Smuzhiyun static int mvebu_icu_probe(struct platform_device *pdev)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	struct mvebu_icu *icu;
350*4882a593Smuzhiyun 	struct resource *res;
351*4882a593Smuzhiyun 	int i;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	icu = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_icu),
354*4882a593Smuzhiyun 			   GFP_KERNEL);
355*4882a593Smuzhiyun 	if (!icu)
356*4882a593Smuzhiyun 		return -ENOMEM;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	icu->dev = &pdev->dev;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
361*4882a593Smuzhiyun 	icu->base = devm_ioremap_resource(&pdev->dev, res);
362*4882a593Smuzhiyun 	if (IS_ERR(icu->base)) {
363*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to map icu base address.\n");
364*4882a593Smuzhiyun 		return PTR_ERR(icu->base);
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/*
368*4882a593Smuzhiyun 	 * Legacy bindings: ICU is one node with one MSI parent: force manually
369*4882a593Smuzhiyun 	 *                  the probe of the NSR interrupts side.
370*4882a593Smuzhiyun 	 * New bindings: ICU node has children, one per interrupt controller
371*4882a593Smuzhiyun 	 *               having its own MSI parent: call platform_populate().
372*4882a593Smuzhiyun 	 * All ICU instances should use the same bindings.
373*4882a593Smuzhiyun 	 */
374*4882a593Smuzhiyun 	if (!of_get_child_count(pdev->dev.of_node))
375*4882a593Smuzhiyun 		static_branch_enable(&legacy_bindings);
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	/*
378*4882a593Smuzhiyun 	 * Clean all ICU interrupts of type NSR and SEI, required to
379*4882a593Smuzhiyun 	 * avoid unpredictable SPI assignments done by firmware.
380*4882a593Smuzhiyun 	 */
381*4882a593Smuzhiyun 	for (i = 0 ; i < ICU_MAX_IRQS ; i++) {
382*4882a593Smuzhiyun 		u32 icu_int, icu_grp;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 		icu_int = readl_relaxed(icu->base + ICU_INT_CFG(i));
385*4882a593Smuzhiyun 		icu_grp = icu_int >> ICU_GROUP_SHIFT;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		if (icu_grp == ICU_GRP_NSR ||
388*4882a593Smuzhiyun 		    (icu_grp == ICU_GRP_SEI &&
389*4882a593Smuzhiyun 		     !static_branch_unlikely(&legacy_bindings)))
390*4882a593Smuzhiyun 			writel_relaxed(0x0, icu->base + ICU_INT_CFG(i));
391*4882a593Smuzhiyun 	}
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	platform_set_drvdata(pdev, icu);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	if (static_branch_unlikely(&legacy_bindings))
396*4882a593Smuzhiyun 		return mvebu_icu_subset_probe(pdev);
397*4882a593Smuzhiyun 	else
398*4882a593Smuzhiyun 		return devm_of_platform_populate(&pdev->dev);
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun static const struct of_device_id mvebu_icu_of_match[] = {
402*4882a593Smuzhiyun 	{ .compatible = "marvell,cp110-icu", },
403*4882a593Smuzhiyun 	{},
404*4882a593Smuzhiyun };
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun static struct platform_driver mvebu_icu_driver = {
407*4882a593Smuzhiyun 	.probe  = mvebu_icu_probe,
408*4882a593Smuzhiyun 	.driver = {
409*4882a593Smuzhiyun 		.name = "mvebu-icu",
410*4882a593Smuzhiyun 		.of_match_table = mvebu_icu_of_match,
411*4882a593Smuzhiyun 	},
412*4882a593Smuzhiyun };
413*4882a593Smuzhiyun builtin_platform_driver(mvebu_icu_driver);
414