xref: /OK3568_Linux_fs/kernel/drivers/irqchip/irq-mvebu-sei.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun #define pr_fmt(fmt) "mvebu-sei: " fmt
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/interrupt.h>
6*4882a593Smuzhiyun #include <linux/irq.h>
7*4882a593Smuzhiyun #include <linux/irqchip.h>
8*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
9*4882a593Smuzhiyun #include <linux/irqdomain.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/msi.h>
12*4882a593Smuzhiyun #include <linux/platform_device.h>
13*4882a593Smuzhiyun #include <linux/of_address.h>
14*4882a593Smuzhiyun #include <linux/of_irq.h>
15*4882a593Smuzhiyun #include <linux/of_platform.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /* Cause register */
18*4882a593Smuzhiyun #define GICP_SECR(idx)		(0x0  + ((idx) * 0x4))
19*4882a593Smuzhiyun /* Mask register */
20*4882a593Smuzhiyun #define GICP_SEMR(idx)		(0x20 + ((idx) * 0x4))
21*4882a593Smuzhiyun #define GICP_SET_SEI_OFFSET	0x30
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define SEI_IRQ_COUNT_PER_REG	32
24*4882a593Smuzhiyun #define SEI_IRQ_REG_COUNT	2
25*4882a593Smuzhiyun #define SEI_IRQ_COUNT		(SEI_IRQ_COUNT_PER_REG * SEI_IRQ_REG_COUNT)
26*4882a593Smuzhiyun #define SEI_IRQ_REG_IDX(irq_id)	((irq_id) / SEI_IRQ_COUNT_PER_REG)
27*4882a593Smuzhiyun #define SEI_IRQ_REG_BIT(irq_id)	((irq_id) % SEI_IRQ_COUNT_PER_REG)
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun struct mvebu_sei_interrupt_range {
30*4882a593Smuzhiyun 	u32 first;
31*4882a593Smuzhiyun 	u32 size;
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun struct mvebu_sei_caps {
35*4882a593Smuzhiyun 	struct mvebu_sei_interrupt_range ap_range;
36*4882a593Smuzhiyun 	struct mvebu_sei_interrupt_range cp_range;
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun struct mvebu_sei {
40*4882a593Smuzhiyun 	struct device *dev;
41*4882a593Smuzhiyun 	void __iomem *base;
42*4882a593Smuzhiyun 	struct resource *res;
43*4882a593Smuzhiyun 	struct irq_domain *sei_domain;
44*4882a593Smuzhiyun 	struct irq_domain *ap_domain;
45*4882a593Smuzhiyun 	struct irq_domain *cp_domain;
46*4882a593Smuzhiyun 	const struct mvebu_sei_caps *caps;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	/* Lock on MSI allocations/releases */
49*4882a593Smuzhiyun 	struct mutex cp_msi_lock;
50*4882a593Smuzhiyun 	DECLARE_BITMAP(cp_msi_bitmap, SEI_IRQ_COUNT);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	/* Lock on IRQ masking register */
53*4882a593Smuzhiyun 	raw_spinlock_t mask_lock;
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
mvebu_sei_ack_irq(struct irq_data * d)56*4882a593Smuzhiyun static void mvebu_sei_ack_irq(struct irq_data *d)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
59*4882a593Smuzhiyun 	u32 reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	writel_relaxed(BIT(SEI_IRQ_REG_BIT(d->hwirq)),
62*4882a593Smuzhiyun 		       sei->base + GICP_SECR(reg_idx));
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
mvebu_sei_mask_irq(struct irq_data * d)65*4882a593Smuzhiyun static void mvebu_sei_mask_irq(struct irq_data *d)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
68*4882a593Smuzhiyun 	u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
69*4882a593Smuzhiyun 	unsigned long flags;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	/* 1 disables the interrupt */
72*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&sei->mask_lock, flags);
73*4882a593Smuzhiyun 	reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
74*4882a593Smuzhiyun 	reg |= BIT(SEI_IRQ_REG_BIT(d->hwirq));
75*4882a593Smuzhiyun 	writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
76*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
mvebu_sei_unmask_irq(struct irq_data * d)79*4882a593Smuzhiyun static void mvebu_sei_unmask_irq(struct irq_data *d)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
82*4882a593Smuzhiyun 	u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
83*4882a593Smuzhiyun 	unsigned long flags;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	/* 0 enables the interrupt */
86*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&sei->mask_lock, flags);
87*4882a593Smuzhiyun 	reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
88*4882a593Smuzhiyun 	reg &= ~BIT(SEI_IRQ_REG_BIT(d->hwirq));
89*4882a593Smuzhiyun 	writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
90*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
mvebu_sei_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)93*4882a593Smuzhiyun static int mvebu_sei_set_affinity(struct irq_data *d,
94*4882a593Smuzhiyun 				  const struct cpumask *mask_val,
95*4882a593Smuzhiyun 				  bool force)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	return -EINVAL;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
mvebu_sei_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)100*4882a593Smuzhiyun static int mvebu_sei_set_irqchip_state(struct irq_data *d,
101*4882a593Smuzhiyun 				       enum irqchip_irq_state which,
102*4882a593Smuzhiyun 				       bool state)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	/* We can only clear the pending state by acking the interrupt */
105*4882a593Smuzhiyun 	if (which != IRQCHIP_STATE_PENDING || state)
106*4882a593Smuzhiyun 		return -EINVAL;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	mvebu_sei_ack_irq(d);
109*4882a593Smuzhiyun 	return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun static struct irq_chip mvebu_sei_irq_chip = {
113*4882a593Smuzhiyun 	.name			= "SEI",
114*4882a593Smuzhiyun 	.irq_ack		= mvebu_sei_ack_irq,
115*4882a593Smuzhiyun 	.irq_mask		= mvebu_sei_mask_irq,
116*4882a593Smuzhiyun 	.irq_unmask		= mvebu_sei_unmask_irq,
117*4882a593Smuzhiyun 	.irq_set_affinity       = mvebu_sei_set_affinity,
118*4882a593Smuzhiyun 	.irq_set_irqchip_state	= mvebu_sei_set_irqchip_state,
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun 
mvebu_sei_ap_set_type(struct irq_data * data,unsigned int type)121*4882a593Smuzhiyun static int mvebu_sei_ap_set_type(struct irq_data *data, unsigned int type)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH)
124*4882a593Smuzhiyun 		return -EINVAL;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun static struct irq_chip mvebu_sei_ap_irq_chip = {
130*4882a593Smuzhiyun 	.name			= "AP SEI",
131*4882a593Smuzhiyun 	.irq_ack		= irq_chip_ack_parent,
132*4882a593Smuzhiyun 	.irq_mask		= irq_chip_mask_parent,
133*4882a593Smuzhiyun 	.irq_unmask		= irq_chip_unmask_parent,
134*4882a593Smuzhiyun 	.irq_set_affinity       = irq_chip_set_affinity_parent,
135*4882a593Smuzhiyun 	.irq_set_type		= mvebu_sei_ap_set_type,
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
mvebu_sei_cp_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)138*4882a593Smuzhiyun static void mvebu_sei_cp_compose_msi_msg(struct irq_data *data,
139*4882a593Smuzhiyun 					 struct msi_msg *msg)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct mvebu_sei *sei = data->chip_data;
142*4882a593Smuzhiyun 	phys_addr_t set = sei->res->start + GICP_SET_SEI_OFFSET;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	msg->data = data->hwirq + sei->caps->cp_range.first;
145*4882a593Smuzhiyun 	msg->address_lo = lower_32_bits(set);
146*4882a593Smuzhiyun 	msg->address_hi = upper_32_bits(set);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
mvebu_sei_cp_set_type(struct irq_data * data,unsigned int type)149*4882a593Smuzhiyun static int mvebu_sei_cp_set_type(struct irq_data *data, unsigned int type)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)
152*4882a593Smuzhiyun 		return -EINVAL;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	return 0;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun static struct irq_chip mvebu_sei_cp_irq_chip = {
158*4882a593Smuzhiyun 	.name			= "CP SEI",
159*4882a593Smuzhiyun 	.irq_ack		= irq_chip_ack_parent,
160*4882a593Smuzhiyun 	.irq_mask		= irq_chip_mask_parent,
161*4882a593Smuzhiyun 	.irq_unmask		= irq_chip_unmask_parent,
162*4882a593Smuzhiyun 	.irq_set_affinity       = irq_chip_set_affinity_parent,
163*4882a593Smuzhiyun 	.irq_set_type		= mvebu_sei_cp_set_type,
164*4882a593Smuzhiyun 	.irq_compose_msi_msg	= mvebu_sei_cp_compose_msi_msg,
165*4882a593Smuzhiyun };
166*4882a593Smuzhiyun 
mvebu_sei_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)167*4882a593Smuzhiyun static int mvebu_sei_domain_alloc(struct irq_domain *domain, unsigned int virq,
168*4882a593Smuzhiyun 				  unsigned int nr_irqs, void *arg)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	struct mvebu_sei *sei = domain->host_data;
171*4882a593Smuzhiyun 	struct irq_fwspec *fwspec = arg;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/* Not much to do, just setup the irqdata */
174*4882a593Smuzhiyun 	irq_domain_set_hwirq_and_chip(domain, virq, fwspec->param[0],
175*4882a593Smuzhiyun 				      &mvebu_sei_irq_chip, sei);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	return 0;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
mvebu_sei_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)180*4882a593Smuzhiyun static void mvebu_sei_domain_free(struct irq_domain *domain, unsigned int virq,
181*4882a593Smuzhiyun 				  unsigned int nr_irqs)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	int i;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	for (i = 0; i < nr_irqs; i++) {
186*4882a593Smuzhiyun 		struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
187*4882a593Smuzhiyun 		irq_set_handler(virq + i, NULL);
188*4882a593Smuzhiyun 		irq_domain_reset_irq_data(d);
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun static const struct irq_domain_ops mvebu_sei_domain_ops = {
193*4882a593Smuzhiyun 	.alloc	= mvebu_sei_domain_alloc,
194*4882a593Smuzhiyun 	.free	= mvebu_sei_domain_free,
195*4882a593Smuzhiyun };
196*4882a593Smuzhiyun 
mvebu_sei_ap_translate(struct irq_domain * domain,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)197*4882a593Smuzhiyun static int mvebu_sei_ap_translate(struct irq_domain *domain,
198*4882a593Smuzhiyun 				  struct irq_fwspec *fwspec,
199*4882a593Smuzhiyun 				  unsigned long *hwirq,
200*4882a593Smuzhiyun 				  unsigned int *type)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	*hwirq = fwspec->param[0];
203*4882a593Smuzhiyun 	*type  = IRQ_TYPE_LEVEL_HIGH;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
mvebu_sei_ap_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)208*4882a593Smuzhiyun static int mvebu_sei_ap_alloc(struct irq_domain *domain, unsigned int virq,
209*4882a593Smuzhiyun 			      unsigned int nr_irqs, void *arg)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	struct mvebu_sei *sei = domain->host_data;
212*4882a593Smuzhiyun 	struct irq_fwspec fwspec;
213*4882a593Smuzhiyun 	unsigned long hwirq;
214*4882a593Smuzhiyun 	unsigned int type;
215*4882a593Smuzhiyun 	int err;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	mvebu_sei_ap_translate(domain, arg, &hwirq, &type);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	fwspec.fwnode = domain->parent->fwnode;
220*4882a593Smuzhiyun 	fwspec.param_count = 1;
221*4882a593Smuzhiyun 	fwspec.param[0] = hwirq + sei->caps->ap_range.first;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
224*4882a593Smuzhiyun 	if (err)
225*4882a593Smuzhiyun 		return err;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	irq_domain_set_info(domain, virq, hwirq,
228*4882a593Smuzhiyun 			    &mvebu_sei_ap_irq_chip, sei,
229*4882a593Smuzhiyun 			    handle_level_irq, NULL, NULL);
230*4882a593Smuzhiyun 	irq_set_probe(virq);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	return 0;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun static const struct irq_domain_ops mvebu_sei_ap_domain_ops = {
236*4882a593Smuzhiyun 	.translate	= mvebu_sei_ap_translate,
237*4882a593Smuzhiyun 	.alloc		= mvebu_sei_ap_alloc,
238*4882a593Smuzhiyun 	.free		= irq_domain_free_irqs_parent,
239*4882a593Smuzhiyun };
240*4882a593Smuzhiyun 
mvebu_sei_cp_release_irq(struct mvebu_sei * sei,unsigned long hwirq)241*4882a593Smuzhiyun static void mvebu_sei_cp_release_irq(struct mvebu_sei *sei, unsigned long hwirq)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	mutex_lock(&sei->cp_msi_lock);
244*4882a593Smuzhiyun 	clear_bit(hwirq, sei->cp_msi_bitmap);
245*4882a593Smuzhiyun 	mutex_unlock(&sei->cp_msi_lock);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
mvebu_sei_cp_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)248*4882a593Smuzhiyun static int mvebu_sei_cp_domain_alloc(struct irq_domain *domain,
249*4882a593Smuzhiyun 				     unsigned int virq, unsigned int nr_irqs,
250*4882a593Smuzhiyun 				     void *args)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	struct mvebu_sei *sei = domain->host_data;
253*4882a593Smuzhiyun 	struct irq_fwspec fwspec;
254*4882a593Smuzhiyun 	unsigned long hwirq;
255*4882a593Smuzhiyun 	int ret;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/* The software only supports single allocations for now */
258*4882a593Smuzhiyun 	if (nr_irqs != 1)
259*4882a593Smuzhiyun 		return -ENOTSUPP;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	mutex_lock(&sei->cp_msi_lock);
262*4882a593Smuzhiyun 	hwirq = find_first_zero_bit(sei->cp_msi_bitmap,
263*4882a593Smuzhiyun 				    sei->caps->cp_range.size);
264*4882a593Smuzhiyun 	if (hwirq < sei->caps->cp_range.size)
265*4882a593Smuzhiyun 		set_bit(hwirq, sei->cp_msi_bitmap);
266*4882a593Smuzhiyun 	mutex_unlock(&sei->cp_msi_lock);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	if (hwirq == sei->caps->cp_range.size)
269*4882a593Smuzhiyun 		return -ENOSPC;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	fwspec.fwnode = domain->parent->fwnode;
272*4882a593Smuzhiyun 	fwspec.param_count = 1;
273*4882a593Smuzhiyun 	fwspec.param[0] = hwirq + sei->caps->cp_range.first;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
276*4882a593Smuzhiyun 	if (ret)
277*4882a593Smuzhiyun 		goto free_irq;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	irq_domain_set_info(domain, virq, hwirq,
280*4882a593Smuzhiyun 			    &mvebu_sei_cp_irq_chip, sei,
281*4882a593Smuzhiyun 			    handle_edge_irq, NULL, NULL);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	return 0;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun free_irq:
286*4882a593Smuzhiyun 	mvebu_sei_cp_release_irq(sei, hwirq);
287*4882a593Smuzhiyun 	return ret;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
mvebu_sei_cp_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)290*4882a593Smuzhiyun static void mvebu_sei_cp_domain_free(struct irq_domain *domain,
291*4882a593Smuzhiyun 				     unsigned int virq, unsigned int nr_irqs)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	struct mvebu_sei *sei = domain->host_data;
294*4882a593Smuzhiyun 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	if (nr_irqs != 1 || d->hwirq >= sei->caps->cp_range.size) {
297*4882a593Smuzhiyun 		dev_err(sei->dev, "Invalid hwirq %lu\n", d->hwirq);
298*4882a593Smuzhiyun 		return;
299*4882a593Smuzhiyun 	}
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	mvebu_sei_cp_release_irq(sei, d->hwirq);
302*4882a593Smuzhiyun 	irq_domain_free_irqs_parent(domain, virq, 1);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun static const struct irq_domain_ops mvebu_sei_cp_domain_ops = {
306*4882a593Smuzhiyun 	.alloc	= mvebu_sei_cp_domain_alloc,
307*4882a593Smuzhiyun 	.free	= mvebu_sei_cp_domain_free,
308*4882a593Smuzhiyun };
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun static struct irq_chip mvebu_sei_msi_irq_chip = {
311*4882a593Smuzhiyun 	.name		= "SEI pMSI",
312*4882a593Smuzhiyun 	.irq_ack	= irq_chip_ack_parent,
313*4882a593Smuzhiyun 	.irq_set_type	= irq_chip_set_type_parent,
314*4882a593Smuzhiyun };
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun static struct msi_domain_ops mvebu_sei_msi_ops = {
317*4882a593Smuzhiyun };
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun static struct msi_domain_info mvebu_sei_msi_domain_info = {
320*4882a593Smuzhiyun 	.flags	= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS,
321*4882a593Smuzhiyun 	.ops	= &mvebu_sei_msi_ops,
322*4882a593Smuzhiyun 	.chip	= &mvebu_sei_msi_irq_chip,
323*4882a593Smuzhiyun };
324*4882a593Smuzhiyun 
mvebu_sei_handle_cascade_irq(struct irq_desc * desc)325*4882a593Smuzhiyun static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	struct mvebu_sei *sei = irq_desc_get_handler_data(desc);
328*4882a593Smuzhiyun 	struct irq_chip *chip = irq_desc_get_chip(desc);
329*4882a593Smuzhiyun 	u32 idx;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	chained_irq_enter(chip, desc);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	for (idx = 0; idx < SEI_IRQ_REG_COUNT; idx++) {
334*4882a593Smuzhiyun 		unsigned long irqmap;
335*4882a593Smuzhiyun 		int bit;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 		irqmap = readl_relaxed(sei->base + GICP_SECR(idx));
338*4882a593Smuzhiyun 		for_each_set_bit(bit, &irqmap, SEI_IRQ_COUNT_PER_REG) {
339*4882a593Smuzhiyun 			unsigned long hwirq;
340*4882a593Smuzhiyun 			unsigned int virq;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 			hwirq = idx * SEI_IRQ_COUNT_PER_REG + bit;
343*4882a593Smuzhiyun 			virq = irq_find_mapping(sei->sei_domain, hwirq);
344*4882a593Smuzhiyun 			if (likely(virq)) {
345*4882a593Smuzhiyun 				generic_handle_irq(virq);
346*4882a593Smuzhiyun 				continue;
347*4882a593Smuzhiyun 			}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 			dev_warn(sei->dev,
350*4882a593Smuzhiyun 				 "Spurious IRQ detected (hwirq %lu)\n", hwirq);
351*4882a593Smuzhiyun 		}
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	chained_irq_exit(chip, desc);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
mvebu_sei_reset(struct mvebu_sei * sei)357*4882a593Smuzhiyun static void mvebu_sei_reset(struct mvebu_sei *sei)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	u32 reg_idx;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* Clear IRQ cause registers, mask all interrupts */
362*4882a593Smuzhiyun 	for (reg_idx = 0; reg_idx < SEI_IRQ_REG_COUNT; reg_idx++) {
363*4882a593Smuzhiyun 		writel_relaxed(0xFFFFFFFF, sei->base + GICP_SECR(reg_idx));
364*4882a593Smuzhiyun 		writel_relaxed(0xFFFFFFFF, sei->base + GICP_SEMR(reg_idx));
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
mvebu_sei_probe(struct platform_device * pdev)368*4882a593Smuzhiyun static int mvebu_sei_probe(struct platform_device *pdev)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	struct device_node *node = pdev->dev.of_node;
371*4882a593Smuzhiyun 	struct irq_domain *plat_domain;
372*4882a593Smuzhiyun 	struct mvebu_sei *sei;
373*4882a593Smuzhiyun 	u32 parent_irq;
374*4882a593Smuzhiyun 	int ret;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	sei = devm_kzalloc(&pdev->dev, sizeof(*sei), GFP_KERNEL);
377*4882a593Smuzhiyun 	if (!sei)
378*4882a593Smuzhiyun 		return -ENOMEM;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	sei->dev = &pdev->dev;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	mutex_init(&sei->cp_msi_lock);
383*4882a593Smuzhiyun 	raw_spin_lock_init(&sei->mask_lock);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
386*4882a593Smuzhiyun 	sei->base = devm_ioremap_resource(sei->dev, sei->res);
387*4882a593Smuzhiyun 	if (IS_ERR(sei->base)) {
388*4882a593Smuzhiyun 		dev_err(sei->dev, "Failed to remap SEI resource\n");
389*4882a593Smuzhiyun 		return PTR_ERR(sei->base);
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/* Retrieve the SEI capabilities with the interrupt ranges */
393*4882a593Smuzhiyun 	sei->caps = of_device_get_match_data(&pdev->dev);
394*4882a593Smuzhiyun 	if (!sei->caps) {
395*4882a593Smuzhiyun 		dev_err(sei->dev,
396*4882a593Smuzhiyun 			"Could not retrieve controller capabilities\n");
397*4882a593Smuzhiyun 		return -EINVAL;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/*
401*4882a593Smuzhiyun 	 * Reserve the single (top-level) parent SPI IRQ from which all the
402*4882a593Smuzhiyun 	 * interrupts handled by this driver will be signaled.
403*4882a593Smuzhiyun 	 */
404*4882a593Smuzhiyun 	parent_irq = irq_of_parse_and_map(node, 0);
405*4882a593Smuzhiyun 	if (parent_irq <= 0) {
406*4882a593Smuzhiyun 		dev_err(sei->dev, "Failed to retrieve top-level SPI IRQ\n");
407*4882a593Smuzhiyun 		return -ENODEV;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	/* Create the root SEI domain */
411*4882a593Smuzhiyun 	sei->sei_domain = irq_domain_create_linear(of_node_to_fwnode(node),
412*4882a593Smuzhiyun 						   (sei->caps->ap_range.size +
413*4882a593Smuzhiyun 						    sei->caps->cp_range.size),
414*4882a593Smuzhiyun 						   &mvebu_sei_domain_ops,
415*4882a593Smuzhiyun 						   sei);
416*4882a593Smuzhiyun 	if (!sei->sei_domain) {
417*4882a593Smuzhiyun 		dev_err(sei->dev, "Failed to create SEI IRQ domain\n");
418*4882a593Smuzhiyun 		ret = -ENOMEM;
419*4882a593Smuzhiyun 		goto dispose_irq;
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	irq_domain_update_bus_token(sei->sei_domain, DOMAIN_BUS_NEXUS);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	/* Create the 'wired' domain */
425*4882a593Smuzhiyun 	sei->ap_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
426*4882a593Smuzhiyun 						     sei->caps->ap_range.size,
427*4882a593Smuzhiyun 						     of_node_to_fwnode(node),
428*4882a593Smuzhiyun 						     &mvebu_sei_ap_domain_ops,
429*4882a593Smuzhiyun 						     sei);
430*4882a593Smuzhiyun 	if (!sei->ap_domain) {
431*4882a593Smuzhiyun 		dev_err(sei->dev, "Failed to create AP IRQ domain\n");
432*4882a593Smuzhiyun 		ret = -ENOMEM;
433*4882a593Smuzhiyun 		goto remove_sei_domain;
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	irq_domain_update_bus_token(sei->ap_domain, DOMAIN_BUS_WIRED);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	/* Create the 'MSI' domain */
439*4882a593Smuzhiyun 	sei->cp_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
440*4882a593Smuzhiyun 						     sei->caps->cp_range.size,
441*4882a593Smuzhiyun 						     of_node_to_fwnode(node),
442*4882a593Smuzhiyun 						     &mvebu_sei_cp_domain_ops,
443*4882a593Smuzhiyun 						     sei);
444*4882a593Smuzhiyun 	if (!sei->cp_domain) {
445*4882a593Smuzhiyun 		pr_err("Failed to create CPs IRQ domain\n");
446*4882a593Smuzhiyun 		ret = -ENOMEM;
447*4882a593Smuzhiyun 		goto remove_ap_domain;
448*4882a593Smuzhiyun 	}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
453*4882a593Smuzhiyun 						     &mvebu_sei_msi_domain_info,
454*4882a593Smuzhiyun 						     sei->cp_domain);
455*4882a593Smuzhiyun 	if (!plat_domain) {
456*4882a593Smuzhiyun 		pr_err("Failed to create CPs MSI domain\n");
457*4882a593Smuzhiyun 		ret = -ENOMEM;
458*4882a593Smuzhiyun 		goto remove_cp_domain;
459*4882a593Smuzhiyun 	}
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	mvebu_sei_reset(sei);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	irq_set_chained_handler_and_data(parent_irq,
464*4882a593Smuzhiyun 					 mvebu_sei_handle_cascade_irq,
465*4882a593Smuzhiyun 					 sei);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	return 0;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun remove_cp_domain:
470*4882a593Smuzhiyun 	irq_domain_remove(sei->cp_domain);
471*4882a593Smuzhiyun remove_ap_domain:
472*4882a593Smuzhiyun 	irq_domain_remove(sei->ap_domain);
473*4882a593Smuzhiyun remove_sei_domain:
474*4882a593Smuzhiyun 	irq_domain_remove(sei->sei_domain);
475*4882a593Smuzhiyun dispose_irq:
476*4882a593Smuzhiyun 	irq_dispose_mapping(parent_irq);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	return ret;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun static struct mvebu_sei_caps mvebu_sei_ap806_caps = {
482*4882a593Smuzhiyun 	.ap_range = {
483*4882a593Smuzhiyun 		.first = 0,
484*4882a593Smuzhiyun 		.size = 21,
485*4882a593Smuzhiyun 	},
486*4882a593Smuzhiyun 	.cp_range = {
487*4882a593Smuzhiyun 		.first = 21,
488*4882a593Smuzhiyun 		.size = 43,
489*4882a593Smuzhiyun 	},
490*4882a593Smuzhiyun };
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun static const struct of_device_id mvebu_sei_of_match[] = {
493*4882a593Smuzhiyun 	{
494*4882a593Smuzhiyun 		.compatible = "marvell,ap806-sei",
495*4882a593Smuzhiyun 		.data = &mvebu_sei_ap806_caps,
496*4882a593Smuzhiyun 	},
497*4882a593Smuzhiyun 	{},
498*4882a593Smuzhiyun };
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun static struct platform_driver mvebu_sei_driver = {
501*4882a593Smuzhiyun 	.probe  = mvebu_sei_probe,
502*4882a593Smuzhiyun 	.driver = {
503*4882a593Smuzhiyun 		.name = "mvebu-sei",
504*4882a593Smuzhiyun 		.of_match_table = mvebu_sei_of_match,
505*4882a593Smuzhiyun 	},
506*4882a593Smuzhiyun };
507*4882a593Smuzhiyun builtin_platform_driver(mvebu_sei_driver);
508