xref: /OK3568_Linux_fs/kernel/drivers/irqchip/irq-mips-gic.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun  * for more details.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7*4882a593Smuzhiyun  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #define pr_fmt(fmt) "irq-mips-gic: " fmt
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/bitmap.h>
13*4882a593Smuzhiyun #include <linux/clocksource.h>
14*4882a593Smuzhiyun #include <linux/cpuhotplug.h>
15*4882a593Smuzhiyun #include <linux/init.h>
16*4882a593Smuzhiyun #include <linux/interrupt.h>
17*4882a593Smuzhiyun #include <linux/irq.h>
18*4882a593Smuzhiyun #include <linux/irqchip.h>
19*4882a593Smuzhiyun #include <linux/of_address.h>
20*4882a593Smuzhiyun #include <linux/percpu.h>
21*4882a593Smuzhiyun #include <linux/sched.h>
22*4882a593Smuzhiyun #include <linux/smp.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <asm/mips-cps.h>
25*4882a593Smuzhiyun #include <asm/setup.h>
26*4882a593Smuzhiyun #include <asm/traps.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <dt-bindings/interrupt-controller/mips-gic.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define GIC_MAX_INTRS		256
31*4882a593Smuzhiyun #define GIC_MAX_LONGS		BITS_TO_LONGS(GIC_MAX_INTRS)
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /* Add 2 to convert GIC CPU pin to core interrupt */
34*4882a593Smuzhiyun #define GIC_CPU_PIN_OFFSET	2
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
37*4882a593Smuzhiyun #define GIC_PIN_TO_VEC_OFFSET	1
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* Convert between local/shared IRQ number and GIC HW IRQ number. */
40*4882a593Smuzhiyun #define GIC_LOCAL_HWIRQ_BASE	0
41*4882a593Smuzhiyun #define GIC_LOCAL_TO_HWIRQ(x)	(GIC_LOCAL_HWIRQ_BASE + (x))
42*4882a593Smuzhiyun #define GIC_HWIRQ_TO_LOCAL(x)	((x) - GIC_LOCAL_HWIRQ_BASE)
43*4882a593Smuzhiyun #define GIC_SHARED_HWIRQ_BASE	GIC_NUM_LOCAL_INTRS
44*4882a593Smuzhiyun #define GIC_SHARED_TO_HWIRQ(x)	(GIC_SHARED_HWIRQ_BASE + (x))
45*4882a593Smuzhiyun #define GIC_HWIRQ_TO_SHARED(x)	((x) - GIC_SHARED_HWIRQ_BASE)
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun void __iomem *mips_gic_base;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun static DEFINE_SPINLOCK(gic_lock);
52*4882a593Smuzhiyun static struct irq_domain *gic_irq_domain;
53*4882a593Smuzhiyun static int gic_shared_intrs;
54*4882a593Smuzhiyun static unsigned int gic_cpu_pin;
55*4882a593Smuzhiyun static unsigned int timer_cpu_pin;
56*4882a593Smuzhiyun static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #ifdef CONFIG_GENERIC_IRQ_IPI
59*4882a593Smuzhiyun static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
60*4882a593Smuzhiyun static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
61*4882a593Smuzhiyun #endif /* CONFIG_GENERIC_IRQ_IPI */
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun static struct gic_all_vpes_chip_data {
64*4882a593Smuzhiyun 	u32	map;
65*4882a593Smuzhiyun 	bool	mask;
66*4882a593Smuzhiyun } gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
67*4882a593Smuzhiyun 
gic_clear_pcpu_masks(unsigned int intr)68*4882a593Smuzhiyun static void gic_clear_pcpu_masks(unsigned int intr)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	unsigned int i;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	/* Clear the interrupt's bit in all pcpu_masks */
73*4882a593Smuzhiyun 	for_each_possible_cpu(i)
74*4882a593Smuzhiyun 		clear_bit(intr, per_cpu_ptr(pcpu_masks, i));
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
gic_local_irq_is_routable(int intr)77*4882a593Smuzhiyun static bool gic_local_irq_is_routable(int intr)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	u32 vpe_ctl;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	/* All local interrupts are routable in EIC mode. */
82*4882a593Smuzhiyun 	if (cpu_has_veic)
83*4882a593Smuzhiyun 		return true;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	vpe_ctl = read_gic_vl_ctl();
86*4882a593Smuzhiyun 	switch (intr) {
87*4882a593Smuzhiyun 	case GIC_LOCAL_INT_TIMER:
88*4882a593Smuzhiyun 		return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE;
89*4882a593Smuzhiyun 	case GIC_LOCAL_INT_PERFCTR:
90*4882a593Smuzhiyun 		return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE;
91*4882a593Smuzhiyun 	case GIC_LOCAL_INT_FDC:
92*4882a593Smuzhiyun 		return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE;
93*4882a593Smuzhiyun 	case GIC_LOCAL_INT_SWINT0:
94*4882a593Smuzhiyun 	case GIC_LOCAL_INT_SWINT1:
95*4882a593Smuzhiyun 		return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE;
96*4882a593Smuzhiyun 	default:
97*4882a593Smuzhiyun 		return true;
98*4882a593Smuzhiyun 	}
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
gic_bind_eic_interrupt(int irq,int set)101*4882a593Smuzhiyun static void gic_bind_eic_interrupt(int irq, int set)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	/* Convert irq vector # to hw int # */
104*4882a593Smuzhiyun 	irq -= GIC_PIN_TO_VEC_OFFSET;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	/* Set irq to use shadow set */
107*4882a593Smuzhiyun 	write_gic_vl_eic_shadow_set(irq, set);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
gic_send_ipi(struct irq_data * d,unsigned int cpu)110*4882a593Smuzhiyun static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	write_gic_wedge(GIC_WEDGE_RW | hwirq);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
gic_get_c0_compare_int(void)117*4882a593Smuzhiyun int gic_get_c0_compare_int(void)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
120*4882a593Smuzhiyun 		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
121*4882a593Smuzhiyun 	return irq_create_mapping(gic_irq_domain,
122*4882a593Smuzhiyun 				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
gic_get_c0_perfcount_int(void)125*4882a593Smuzhiyun int gic_get_c0_perfcount_int(void)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
128*4882a593Smuzhiyun 		/* Is the performance counter shared with the timer? */
129*4882a593Smuzhiyun 		if (cp0_perfcount_irq < 0)
130*4882a593Smuzhiyun 			return -1;
131*4882a593Smuzhiyun 		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 	return irq_create_mapping(gic_irq_domain,
134*4882a593Smuzhiyun 				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
gic_get_c0_fdc_int(void)137*4882a593Smuzhiyun int gic_get_c0_fdc_int(void)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
140*4882a593Smuzhiyun 		/* Is the FDC IRQ even present? */
141*4882a593Smuzhiyun 		if (cp0_fdc_irq < 0)
142*4882a593Smuzhiyun 			return -1;
143*4882a593Smuzhiyun 		return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	return irq_create_mapping(gic_irq_domain,
147*4882a593Smuzhiyun 				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
gic_handle_shared_int(bool chained)150*4882a593Smuzhiyun static void gic_handle_shared_int(bool chained)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	unsigned int intr, virq;
153*4882a593Smuzhiyun 	unsigned long *pcpu_mask;
154*4882a593Smuzhiyun 	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	/* Get per-cpu bitmaps */
157*4882a593Smuzhiyun 	pcpu_mask = this_cpu_ptr(pcpu_masks);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (mips_cm_is64)
160*4882a593Smuzhiyun 		__ioread64_copy(pending, addr_gic_pend(),
161*4882a593Smuzhiyun 				DIV_ROUND_UP(gic_shared_intrs, 64));
162*4882a593Smuzhiyun 	else
163*4882a593Smuzhiyun 		__ioread32_copy(pending, addr_gic_pend(),
164*4882a593Smuzhiyun 				DIV_ROUND_UP(gic_shared_intrs, 32));
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	for_each_set_bit(intr, pending, gic_shared_intrs) {
169*4882a593Smuzhiyun 		virq = irq_linear_revmap(gic_irq_domain,
170*4882a593Smuzhiyun 					 GIC_SHARED_TO_HWIRQ(intr));
171*4882a593Smuzhiyun 		if (chained)
172*4882a593Smuzhiyun 			generic_handle_irq(virq);
173*4882a593Smuzhiyun 		else
174*4882a593Smuzhiyun 			do_IRQ(virq);
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
gic_mask_irq(struct irq_data * d)178*4882a593Smuzhiyun static void gic_mask_irq(struct irq_data *d)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	write_gic_rmask(intr);
183*4882a593Smuzhiyun 	gic_clear_pcpu_masks(intr);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
gic_unmask_irq(struct irq_data * d)186*4882a593Smuzhiyun static void gic_unmask_irq(struct irq_data *d)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
189*4882a593Smuzhiyun 	unsigned int cpu;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	write_gic_smask(intr);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	gic_clear_pcpu_masks(intr);
194*4882a593Smuzhiyun 	cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
195*4882a593Smuzhiyun 	set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
gic_ack_irq(struct irq_data * d)198*4882a593Smuzhiyun static void gic_ack_irq(struct irq_data *d)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	write_gic_wedge(irq);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
gic_set_type(struct irq_data * d,unsigned int type)205*4882a593Smuzhiyun static int gic_set_type(struct irq_data *d, unsigned int type)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	unsigned int irq, pol, trig, dual;
208*4882a593Smuzhiyun 	unsigned long flags;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	spin_lock_irqsave(&gic_lock, flags);
213*4882a593Smuzhiyun 	switch (type & IRQ_TYPE_SENSE_MASK) {
214*4882a593Smuzhiyun 	case IRQ_TYPE_EDGE_FALLING:
215*4882a593Smuzhiyun 		pol = GIC_POL_FALLING_EDGE;
216*4882a593Smuzhiyun 		trig = GIC_TRIG_EDGE;
217*4882a593Smuzhiyun 		dual = GIC_DUAL_SINGLE;
218*4882a593Smuzhiyun 		break;
219*4882a593Smuzhiyun 	case IRQ_TYPE_EDGE_RISING:
220*4882a593Smuzhiyun 		pol = GIC_POL_RISING_EDGE;
221*4882a593Smuzhiyun 		trig = GIC_TRIG_EDGE;
222*4882a593Smuzhiyun 		dual = GIC_DUAL_SINGLE;
223*4882a593Smuzhiyun 		break;
224*4882a593Smuzhiyun 	case IRQ_TYPE_EDGE_BOTH:
225*4882a593Smuzhiyun 		pol = 0; /* Doesn't matter */
226*4882a593Smuzhiyun 		trig = GIC_TRIG_EDGE;
227*4882a593Smuzhiyun 		dual = GIC_DUAL_DUAL;
228*4882a593Smuzhiyun 		break;
229*4882a593Smuzhiyun 	case IRQ_TYPE_LEVEL_LOW:
230*4882a593Smuzhiyun 		pol = GIC_POL_ACTIVE_LOW;
231*4882a593Smuzhiyun 		trig = GIC_TRIG_LEVEL;
232*4882a593Smuzhiyun 		dual = GIC_DUAL_SINGLE;
233*4882a593Smuzhiyun 		break;
234*4882a593Smuzhiyun 	case IRQ_TYPE_LEVEL_HIGH:
235*4882a593Smuzhiyun 	default:
236*4882a593Smuzhiyun 		pol = GIC_POL_ACTIVE_HIGH;
237*4882a593Smuzhiyun 		trig = GIC_TRIG_LEVEL;
238*4882a593Smuzhiyun 		dual = GIC_DUAL_SINGLE;
239*4882a593Smuzhiyun 		break;
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	change_gic_pol(irq, pol);
243*4882a593Smuzhiyun 	change_gic_trig(irq, trig);
244*4882a593Smuzhiyun 	change_gic_dual(irq, dual);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	if (trig == GIC_TRIG_EDGE)
247*4882a593Smuzhiyun 		irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
248*4882a593Smuzhiyun 						 handle_edge_irq, NULL);
249*4882a593Smuzhiyun 	else
250*4882a593Smuzhiyun 		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
251*4882a593Smuzhiyun 						 handle_level_irq, NULL);
252*4882a593Smuzhiyun 	spin_unlock_irqrestore(&gic_lock, flags);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	return 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun #ifdef CONFIG_SMP
gic_set_affinity(struct irq_data * d,const struct cpumask * cpumask,bool force)258*4882a593Smuzhiyun static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
259*4882a593Smuzhiyun 			    bool force)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
262*4882a593Smuzhiyun 	unsigned long flags;
263*4882a593Smuzhiyun 	unsigned int cpu;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	cpu = cpumask_first_and(cpumask, cpu_online_mask);
266*4882a593Smuzhiyun 	if (cpu >= NR_CPUS)
267*4882a593Smuzhiyun 		return -EINVAL;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/* Assumption : cpumask refers to a single CPU */
270*4882a593Smuzhiyun 	spin_lock_irqsave(&gic_lock, flags);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	/* Re-route this IRQ */
273*4882a593Smuzhiyun 	write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/* Update the pcpu_masks */
276*4882a593Smuzhiyun 	gic_clear_pcpu_masks(irq);
277*4882a593Smuzhiyun 	if (read_gic_mask(irq))
278*4882a593Smuzhiyun 		set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
281*4882a593Smuzhiyun 	spin_unlock_irqrestore(&gic_lock, flags);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	return IRQ_SET_MASK_OK;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun #endif
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun static struct irq_chip gic_level_irq_controller = {
288*4882a593Smuzhiyun 	.name			=	"MIPS GIC",
289*4882a593Smuzhiyun 	.irq_mask		=	gic_mask_irq,
290*4882a593Smuzhiyun 	.irq_unmask		=	gic_unmask_irq,
291*4882a593Smuzhiyun 	.irq_set_type		=	gic_set_type,
292*4882a593Smuzhiyun #ifdef CONFIG_SMP
293*4882a593Smuzhiyun 	.irq_set_affinity	=	gic_set_affinity,
294*4882a593Smuzhiyun #endif
295*4882a593Smuzhiyun };
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun static struct irq_chip gic_edge_irq_controller = {
298*4882a593Smuzhiyun 	.name			=	"MIPS GIC",
299*4882a593Smuzhiyun 	.irq_ack		=	gic_ack_irq,
300*4882a593Smuzhiyun 	.irq_mask		=	gic_mask_irq,
301*4882a593Smuzhiyun 	.irq_unmask		=	gic_unmask_irq,
302*4882a593Smuzhiyun 	.irq_set_type		=	gic_set_type,
303*4882a593Smuzhiyun #ifdef CONFIG_SMP
304*4882a593Smuzhiyun 	.irq_set_affinity	=	gic_set_affinity,
305*4882a593Smuzhiyun #endif
306*4882a593Smuzhiyun 	.ipi_send_single	=	gic_send_ipi,
307*4882a593Smuzhiyun };
308*4882a593Smuzhiyun 
gic_handle_local_int(bool chained)309*4882a593Smuzhiyun static void gic_handle_local_int(bool chained)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	unsigned long pending, masked;
312*4882a593Smuzhiyun 	unsigned int intr, virq;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	pending = read_gic_vl_pend();
315*4882a593Smuzhiyun 	masked = read_gic_vl_mask();
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
320*4882a593Smuzhiyun 		virq = irq_linear_revmap(gic_irq_domain,
321*4882a593Smuzhiyun 					 GIC_LOCAL_TO_HWIRQ(intr));
322*4882a593Smuzhiyun 		if (chained)
323*4882a593Smuzhiyun 			generic_handle_irq(virq);
324*4882a593Smuzhiyun 		else
325*4882a593Smuzhiyun 			do_IRQ(virq);
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
gic_mask_local_irq(struct irq_data * d)329*4882a593Smuzhiyun static void gic_mask_local_irq(struct irq_data *d)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	write_gic_vl_rmask(BIT(intr));
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
gic_unmask_local_irq(struct irq_data * d)336*4882a593Smuzhiyun static void gic_unmask_local_irq(struct irq_data *d)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	write_gic_vl_smask(BIT(intr));
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun static struct irq_chip gic_local_irq_controller = {
344*4882a593Smuzhiyun 	.name			=	"MIPS GIC Local",
345*4882a593Smuzhiyun 	.irq_mask		=	gic_mask_local_irq,
346*4882a593Smuzhiyun 	.irq_unmask		=	gic_unmask_local_irq,
347*4882a593Smuzhiyun };
348*4882a593Smuzhiyun 
gic_mask_local_irq_all_vpes(struct irq_data * d)349*4882a593Smuzhiyun static void gic_mask_local_irq_all_vpes(struct irq_data *d)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	struct gic_all_vpes_chip_data *cd;
352*4882a593Smuzhiyun 	unsigned long flags;
353*4882a593Smuzhiyun 	int intr, cpu;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
356*4882a593Smuzhiyun 	cd = irq_data_get_irq_chip_data(d);
357*4882a593Smuzhiyun 	cd->mask = false;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	spin_lock_irqsave(&gic_lock, flags);
360*4882a593Smuzhiyun 	for_each_online_cpu(cpu) {
361*4882a593Smuzhiyun 		write_gic_vl_other(mips_cm_vp_id(cpu));
362*4882a593Smuzhiyun 		write_gic_vo_rmask(BIT(intr));
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 	spin_unlock_irqrestore(&gic_lock, flags);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
gic_unmask_local_irq_all_vpes(struct irq_data * d)367*4882a593Smuzhiyun static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	struct gic_all_vpes_chip_data *cd;
370*4882a593Smuzhiyun 	unsigned long flags;
371*4882a593Smuzhiyun 	int intr, cpu;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
374*4882a593Smuzhiyun 	cd = irq_data_get_irq_chip_data(d);
375*4882a593Smuzhiyun 	cd->mask = true;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	spin_lock_irqsave(&gic_lock, flags);
378*4882a593Smuzhiyun 	for_each_online_cpu(cpu) {
379*4882a593Smuzhiyun 		write_gic_vl_other(mips_cm_vp_id(cpu));
380*4882a593Smuzhiyun 		write_gic_vo_smask(BIT(intr));
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun 	spin_unlock_irqrestore(&gic_lock, flags);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
gic_all_vpes_irq_cpu_online(struct irq_data * d)385*4882a593Smuzhiyun static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	struct gic_all_vpes_chip_data *cd;
388*4882a593Smuzhiyun 	unsigned int intr;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
391*4882a593Smuzhiyun 	cd = irq_data_get_irq_chip_data(d);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
394*4882a593Smuzhiyun 	if (cd->mask)
395*4882a593Smuzhiyun 		write_gic_vl_smask(BIT(intr));
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun static struct irq_chip gic_all_vpes_local_irq_controller = {
399*4882a593Smuzhiyun 	.name			= "MIPS GIC Local",
400*4882a593Smuzhiyun 	.irq_mask		= gic_mask_local_irq_all_vpes,
401*4882a593Smuzhiyun 	.irq_unmask		= gic_unmask_local_irq_all_vpes,
402*4882a593Smuzhiyun 	.irq_cpu_online		= gic_all_vpes_irq_cpu_online,
403*4882a593Smuzhiyun };
404*4882a593Smuzhiyun 
__gic_irq_dispatch(void)405*4882a593Smuzhiyun static void __gic_irq_dispatch(void)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	gic_handle_local_int(false);
408*4882a593Smuzhiyun 	gic_handle_shared_int(false);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun 
gic_irq_dispatch(struct irq_desc * desc)411*4882a593Smuzhiyun static void gic_irq_dispatch(struct irq_desc *desc)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun 	gic_handle_local_int(true);
414*4882a593Smuzhiyun 	gic_handle_shared_int(true);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
gic_shared_irq_domain_map(struct irq_domain * d,unsigned int virq,irq_hw_number_t hw,unsigned int cpu)417*4882a593Smuzhiyun static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
418*4882a593Smuzhiyun 				     irq_hw_number_t hw, unsigned int cpu)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	int intr = GIC_HWIRQ_TO_SHARED(hw);
421*4882a593Smuzhiyun 	struct irq_data *data;
422*4882a593Smuzhiyun 	unsigned long flags;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	data = irq_get_irq_data(virq);
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	spin_lock_irqsave(&gic_lock, flags);
427*4882a593Smuzhiyun 	write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
428*4882a593Smuzhiyun 	write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
429*4882a593Smuzhiyun 	irq_data_update_effective_affinity(data, cpumask_of(cpu));
430*4882a593Smuzhiyun 	spin_unlock_irqrestore(&gic_lock, flags);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	return 0;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
gic_irq_domain_xlate(struct irq_domain * d,struct device_node * ctrlr,const u32 * intspec,unsigned int intsize,irq_hw_number_t * out_hwirq,unsigned int * out_type)435*4882a593Smuzhiyun static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
436*4882a593Smuzhiyun 				const u32 *intspec, unsigned int intsize,
437*4882a593Smuzhiyun 				irq_hw_number_t *out_hwirq,
438*4882a593Smuzhiyun 				unsigned int *out_type)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	if (intsize != 3)
441*4882a593Smuzhiyun 		return -EINVAL;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (intspec[0] == GIC_SHARED)
444*4882a593Smuzhiyun 		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
445*4882a593Smuzhiyun 	else if (intspec[0] == GIC_LOCAL)
446*4882a593Smuzhiyun 		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
447*4882a593Smuzhiyun 	else
448*4882a593Smuzhiyun 		return -EINVAL;
449*4882a593Smuzhiyun 	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	return 0;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
gic_irq_domain_map(struct irq_domain * d,unsigned int virq,irq_hw_number_t hwirq)454*4882a593Smuzhiyun static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
455*4882a593Smuzhiyun 			      irq_hw_number_t hwirq)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	struct gic_all_vpes_chip_data *cd;
458*4882a593Smuzhiyun 	unsigned long flags;
459*4882a593Smuzhiyun 	unsigned int intr;
460*4882a593Smuzhiyun 	int err, cpu;
461*4882a593Smuzhiyun 	u32 map;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
464*4882a593Smuzhiyun #ifdef CONFIG_GENERIC_IRQ_IPI
465*4882a593Smuzhiyun 		/* verify that shared irqs don't conflict with an IPI irq */
466*4882a593Smuzhiyun 		if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
467*4882a593Smuzhiyun 			return -EBUSY;
468*4882a593Smuzhiyun #endif /* CONFIG_GENERIC_IRQ_IPI */
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
471*4882a593Smuzhiyun 						    &gic_level_irq_controller,
472*4882a593Smuzhiyun 						    NULL);
473*4882a593Smuzhiyun 		if (err)
474*4882a593Smuzhiyun 			return err;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 		irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
477*4882a593Smuzhiyun 		return gic_shared_irq_domain_map(d, virq, hwirq, 0);
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	intr = GIC_HWIRQ_TO_LOCAL(hwirq);
481*4882a593Smuzhiyun 	map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	switch (intr) {
484*4882a593Smuzhiyun 	case GIC_LOCAL_INT_TIMER:
485*4882a593Smuzhiyun 		/* CONFIG_MIPS_CMP workaround (see __gic_init) */
486*4882a593Smuzhiyun 		map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
487*4882a593Smuzhiyun 		fallthrough;
488*4882a593Smuzhiyun 	case GIC_LOCAL_INT_PERFCTR:
489*4882a593Smuzhiyun 	case GIC_LOCAL_INT_FDC:
490*4882a593Smuzhiyun 		/*
491*4882a593Smuzhiyun 		 * HACK: These are all really percpu interrupts, but
492*4882a593Smuzhiyun 		 * the rest of the MIPS kernel code does not use the
493*4882a593Smuzhiyun 		 * percpu IRQ API for them.
494*4882a593Smuzhiyun 		 */
495*4882a593Smuzhiyun 		cd = &gic_all_vpes_chip_data[intr];
496*4882a593Smuzhiyun 		cd->map = map;
497*4882a593Smuzhiyun 		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
498*4882a593Smuzhiyun 						    &gic_all_vpes_local_irq_controller,
499*4882a593Smuzhiyun 						    cd);
500*4882a593Smuzhiyun 		if (err)
501*4882a593Smuzhiyun 			return err;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 		irq_set_handler(virq, handle_percpu_irq);
504*4882a593Smuzhiyun 		break;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	default:
507*4882a593Smuzhiyun 		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
508*4882a593Smuzhiyun 						    &gic_local_irq_controller,
509*4882a593Smuzhiyun 						    NULL);
510*4882a593Smuzhiyun 		if (err)
511*4882a593Smuzhiyun 			return err;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 		irq_set_handler(virq, handle_percpu_devid_irq);
514*4882a593Smuzhiyun 		irq_set_percpu_devid(virq);
515*4882a593Smuzhiyun 		break;
516*4882a593Smuzhiyun 	}
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	if (!gic_local_irq_is_routable(intr))
519*4882a593Smuzhiyun 		return -EPERM;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	spin_lock_irqsave(&gic_lock, flags);
522*4882a593Smuzhiyun 	for_each_online_cpu(cpu) {
523*4882a593Smuzhiyun 		write_gic_vl_other(mips_cm_vp_id(cpu));
524*4882a593Smuzhiyun 		write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 	spin_unlock_irqrestore(&gic_lock, flags);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	return 0;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
gic_irq_domain_alloc(struct irq_domain * d,unsigned int virq,unsigned int nr_irqs,void * arg)531*4882a593Smuzhiyun static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
532*4882a593Smuzhiyun 				unsigned int nr_irqs, void *arg)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	struct irq_fwspec *fwspec = arg;
535*4882a593Smuzhiyun 	irq_hw_number_t hwirq;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	if (fwspec->param[0] == GIC_SHARED)
538*4882a593Smuzhiyun 		hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
539*4882a593Smuzhiyun 	else
540*4882a593Smuzhiyun 		hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	return gic_irq_domain_map(d, virq, hwirq);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
gic_irq_domain_free(struct irq_domain * d,unsigned int virq,unsigned int nr_irqs)545*4882a593Smuzhiyun void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
546*4882a593Smuzhiyun 			 unsigned int nr_irqs)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun static const struct irq_domain_ops gic_irq_domain_ops = {
551*4882a593Smuzhiyun 	.xlate = gic_irq_domain_xlate,
552*4882a593Smuzhiyun 	.alloc = gic_irq_domain_alloc,
553*4882a593Smuzhiyun 	.free = gic_irq_domain_free,
554*4882a593Smuzhiyun 	.map = gic_irq_domain_map,
555*4882a593Smuzhiyun };
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun #ifdef CONFIG_GENERIC_IRQ_IPI
558*4882a593Smuzhiyun 
gic_ipi_domain_xlate(struct irq_domain * d,struct device_node * ctrlr,const u32 * intspec,unsigned int intsize,irq_hw_number_t * out_hwirq,unsigned int * out_type)559*4882a593Smuzhiyun static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
560*4882a593Smuzhiyun 				const u32 *intspec, unsigned int intsize,
561*4882a593Smuzhiyun 				irq_hw_number_t *out_hwirq,
562*4882a593Smuzhiyun 				unsigned int *out_type)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun 	/*
565*4882a593Smuzhiyun 	 * There's nothing to translate here. hwirq is dynamically allocated and
566*4882a593Smuzhiyun 	 * the irq type is always edge triggered.
567*4882a593Smuzhiyun 	 * */
568*4882a593Smuzhiyun 	*out_hwirq = 0;
569*4882a593Smuzhiyun 	*out_type = IRQ_TYPE_EDGE_RISING;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	return 0;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
gic_ipi_domain_alloc(struct irq_domain * d,unsigned int virq,unsigned int nr_irqs,void * arg)574*4882a593Smuzhiyun static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
575*4882a593Smuzhiyun 				unsigned int nr_irqs, void *arg)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun 	struct cpumask *ipimask = arg;
578*4882a593Smuzhiyun 	irq_hw_number_t hwirq, base_hwirq;
579*4882a593Smuzhiyun 	int cpu, ret, i;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
582*4882a593Smuzhiyun 	if (base_hwirq == gic_shared_intrs)
583*4882a593Smuzhiyun 		return -ENOMEM;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	/* check that we have enough space */
586*4882a593Smuzhiyun 	for (i = base_hwirq; i < nr_irqs; i++) {
587*4882a593Smuzhiyun 		if (!test_bit(i, ipi_available))
588*4882a593Smuzhiyun 			return -EBUSY;
589*4882a593Smuzhiyun 	}
590*4882a593Smuzhiyun 	bitmap_clear(ipi_available, base_hwirq, nr_irqs);
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	/* map the hwirq for each cpu consecutively */
593*4882a593Smuzhiyun 	i = 0;
594*4882a593Smuzhiyun 	for_each_cpu(cpu, ipimask) {
595*4882a593Smuzhiyun 		hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 		ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
598*4882a593Smuzhiyun 						    &gic_edge_irq_controller,
599*4882a593Smuzhiyun 						    NULL);
600*4882a593Smuzhiyun 		if (ret)
601*4882a593Smuzhiyun 			goto error;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 		ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
604*4882a593Smuzhiyun 						    &gic_edge_irq_controller,
605*4882a593Smuzhiyun 						    NULL);
606*4882a593Smuzhiyun 		if (ret)
607*4882a593Smuzhiyun 			goto error;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 		ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
610*4882a593Smuzhiyun 		if (ret)
611*4882a593Smuzhiyun 			goto error;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 		ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
614*4882a593Smuzhiyun 		if (ret)
615*4882a593Smuzhiyun 			goto error;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 		i++;
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	return 0;
621*4882a593Smuzhiyun error:
622*4882a593Smuzhiyun 	bitmap_set(ipi_available, base_hwirq, nr_irqs);
623*4882a593Smuzhiyun 	return ret;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun 
gic_ipi_domain_free(struct irq_domain * d,unsigned int virq,unsigned int nr_irqs)626*4882a593Smuzhiyun static void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
627*4882a593Smuzhiyun 				unsigned int nr_irqs)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun 	irq_hw_number_t base_hwirq;
630*4882a593Smuzhiyun 	struct irq_data *data;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	data = irq_get_irq_data(virq);
633*4882a593Smuzhiyun 	if (!data)
634*4882a593Smuzhiyun 		return;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
637*4882a593Smuzhiyun 	bitmap_set(ipi_available, base_hwirq, nr_irqs);
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun 
gic_ipi_domain_match(struct irq_domain * d,struct device_node * node,enum irq_domain_bus_token bus_token)640*4882a593Smuzhiyun static int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
641*4882a593Smuzhiyun 				enum irq_domain_bus_token bus_token)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun 	bool is_ipi;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	switch (bus_token) {
646*4882a593Smuzhiyun 	case DOMAIN_BUS_IPI:
647*4882a593Smuzhiyun 		is_ipi = d->bus_token == bus_token;
648*4882a593Smuzhiyun 		return (!node || to_of_node(d->fwnode) == node) && is_ipi;
649*4882a593Smuzhiyun 		break;
650*4882a593Smuzhiyun 	default:
651*4882a593Smuzhiyun 		return 0;
652*4882a593Smuzhiyun 	}
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun static const struct irq_domain_ops gic_ipi_domain_ops = {
656*4882a593Smuzhiyun 	.xlate = gic_ipi_domain_xlate,
657*4882a593Smuzhiyun 	.alloc = gic_ipi_domain_alloc,
658*4882a593Smuzhiyun 	.free = gic_ipi_domain_free,
659*4882a593Smuzhiyun 	.match = gic_ipi_domain_match,
660*4882a593Smuzhiyun };
661*4882a593Smuzhiyun 
gic_register_ipi_domain(struct device_node * node)662*4882a593Smuzhiyun static int gic_register_ipi_domain(struct device_node *node)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun 	struct irq_domain *gic_ipi_domain;
665*4882a593Smuzhiyun 	unsigned int v[2], num_ipis;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
668*4882a593Smuzhiyun 						  IRQ_DOMAIN_FLAG_IPI_PER_CPU,
669*4882a593Smuzhiyun 						  GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
670*4882a593Smuzhiyun 						  node, &gic_ipi_domain_ops, NULL);
671*4882a593Smuzhiyun 	if (!gic_ipi_domain) {
672*4882a593Smuzhiyun 		pr_err("Failed to add IPI domain");
673*4882a593Smuzhiyun 		return -ENXIO;
674*4882a593Smuzhiyun 	}
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	if (node &&
679*4882a593Smuzhiyun 	    !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
680*4882a593Smuzhiyun 		bitmap_set(ipi_resrv, v[0], v[1]);
681*4882a593Smuzhiyun 	} else {
682*4882a593Smuzhiyun 		/*
683*4882a593Smuzhiyun 		 * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
684*4882a593Smuzhiyun 		 * meeting the requirements of arch/mips SMP.
685*4882a593Smuzhiyun 		 */
686*4882a593Smuzhiyun 		num_ipis = 2 * num_possible_cpus();
687*4882a593Smuzhiyun 		bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
688*4882a593Smuzhiyun 	}
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	return 0;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun #else /* !CONFIG_GENERIC_IRQ_IPI */
696*4882a593Smuzhiyun 
gic_register_ipi_domain(struct device_node * node)697*4882a593Smuzhiyun static inline int gic_register_ipi_domain(struct device_node *node)
698*4882a593Smuzhiyun {
699*4882a593Smuzhiyun 	return 0;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun #endif /* !CONFIG_GENERIC_IRQ_IPI */
703*4882a593Smuzhiyun 
gic_cpu_startup(unsigned int cpu)704*4882a593Smuzhiyun static int gic_cpu_startup(unsigned int cpu)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun 	/* Enable or disable EIC */
707*4882a593Smuzhiyun 	change_gic_vl_ctl(GIC_VX_CTL_EIC,
708*4882a593Smuzhiyun 			  cpu_has_veic ? GIC_VX_CTL_EIC : 0);
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	/* Clear all local IRQ masks (ie. disable all local interrupts) */
711*4882a593Smuzhiyun 	write_gic_vl_rmask(~0);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	/* Invoke irq_cpu_online callbacks to enable desired interrupts */
714*4882a593Smuzhiyun 	irq_cpu_online();
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	return 0;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun 
gic_of_init(struct device_node * node,struct device_node * parent)719*4882a593Smuzhiyun static int __init gic_of_init(struct device_node *node,
720*4882a593Smuzhiyun 			      struct device_node *parent)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun 	unsigned int cpu_vec, i, gicconfig;
723*4882a593Smuzhiyun 	unsigned long reserved;
724*4882a593Smuzhiyun 	phys_addr_t gic_base;
725*4882a593Smuzhiyun 	struct resource res;
726*4882a593Smuzhiyun 	size_t gic_len;
727*4882a593Smuzhiyun 	int ret;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	/* Find the first available CPU vector. */
730*4882a593Smuzhiyun 	i = 0;
731*4882a593Smuzhiyun 	reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0);
732*4882a593Smuzhiyun 	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
733*4882a593Smuzhiyun 					   i++, &cpu_vec))
734*4882a593Smuzhiyun 		reserved |= BIT(cpu_vec);
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM));
737*4882a593Smuzhiyun 	if (cpu_vec == hweight_long(ST0_IM)) {
738*4882a593Smuzhiyun 		pr_err("No CPU vectors available\n");
739*4882a593Smuzhiyun 		return -ENODEV;
740*4882a593Smuzhiyun 	}
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	if (of_address_to_resource(node, 0, &res)) {
743*4882a593Smuzhiyun 		/*
744*4882a593Smuzhiyun 		 * Probe the CM for the GIC base address if not specified
745*4882a593Smuzhiyun 		 * in the device-tree.
746*4882a593Smuzhiyun 		 */
747*4882a593Smuzhiyun 		if (mips_cm_present()) {
748*4882a593Smuzhiyun 			gic_base = read_gcr_gic_base() &
749*4882a593Smuzhiyun 				~CM_GCR_GIC_BASE_GICEN;
750*4882a593Smuzhiyun 			gic_len = 0x20000;
751*4882a593Smuzhiyun 			pr_warn("Using inherited base address %pa\n",
752*4882a593Smuzhiyun 				&gic_base);
753*4882a593Smuzhiyun 		} else {
754*4882a593Smuzhiyun 			pr_err("Failed to get memory range\n");
755*4882a593Smuzhiyun 			return -ENODEV;
756*4882a593Smuzhiyun 		}
757*4882a593Smuzhiyun 	} else {
758*4882a593Smuzhiyun 		gic_base = res.start;
759*4882a593Smuzhiyun 		gic_len = resource_size(&res);
760*4882a593Smuzhiyun 	}
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	if (mips_cm_present()) {
763*4882a593Smuzhiyun 		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN);
764*4882a593Smuzhiyun 		/* Ensure GIC region is enabled before trying to access it */
765*4882a593Smuzhiyun 		__sync();
766*4882a593Smuzhiyun 	}
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	mips_gic_base = ioremap(gic_base, gic_len);
769*4882a593Smuzhiyun 	if (!mips_gic_base) {
770*4882a593Smuzhiyun 		pr_err("Failed to ioremap gic_base\n");
771*4882a593Smuzhiyun 		return -ENOMEM;
772*4882a593Smuzhiyun 	}
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	gicconfig = read_gic_config();
775*4882a593Smuzhiyun 	gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
776*4882a593Smuzhiyun 	gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
777*4882a593Smuzhiyun 	gic_shared_intrs = (gic_shared_intrs + 1) * 8;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	if (cpu_has_veic) {
780*4882a593Smuzhiyun 		/* Always use vector 1 in EIC mode */
781*4882a593Smuzhiyun 		gic_cpu_pin = 0;
782*4882a593Smuzhiyun 		timer_cpu_pin = gic_cpu_pin;
783*4882a593Smuzhiyun 		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
784*4882a593Smuzhiyun 			       __gic_irq_dispatch);
785*4882a593Smuzhiyun 	} else {
786*4882a593Smuzhiyun 		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
787*4882a593Smuzhiyun 		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
788*4882a593Smuzhiyun 					gic_irq_dispatch);
789*4882a593Smuzhiyun 		/*
790*4882a593Smuzhiyun 		 * With the CMP implementation of SMP (deprecated), other CPUs
791*4882a593Smuzhiyun 		 * are started by the bootloader and put into a timer based
792*4882a593Smuzhiyun 		 * waiting poll loop. We must not re-route those CPU's local
793*4882a593Smuzhiyun 		 * timer interrupts as the wait instruction will never finish,
794*4882a593Smuzhiyun 		 * so just handle whatever CPU interrupt it is routed to by
795*4882a593Smuzhiyun 		 * default.
796*4882a593Smuzhiyun 		 *
797*4882a593Smuzhiyun 		 * This workaround should be removed when CMP support is
798*4882a593Smuzhiyun 		 * dropped.
799*4882a593Smuzhiyun 		 */
800*4882a593Smuzhiyun 		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
801*4882a593Smuzhiyun 		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
802*4882a593Smuzhiyun 			timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP;
803*4882a593Smuzhiyun 			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
804*4882a593Smuzhiyun 						GIC_CPU_PIN_OFFSET +
805*4882a593Smuzhiyun 						timer_cpu_pin,
806*4882a593Smuzhiyun 						gic_irq_dispatch);
807*4882a593Smuzhiyun 		} else {
808*4882a593Smuzhiyun 			timer_cpu_pin = gic_cpu_pin;
809*4882a593Smuzhiyun 		}
810*4882a593Smuzhiyun 	}
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
813*4882a593Smuzhiyun 					       gic_shared_intrs, 0,
814*4882a593Smuzhiyun 					       &gic_irq_domain_ops, NULL);
815*4882a593Smuzhiyun 	if (!gic_irq_domain) {
816*4882a593Smuzhiyun 		pr_err("Failed to add IRQ domain");
817*4882a593Smuzhiyun 		return -ENXIO;
818*4882a593Smuzhiyun 	}
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	ret = gic_register_ipi_domain(node);
821*4882a593Smuzhiyun 	if (ret)
822*4882a593Smuzhiyun 		return ret;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	/* Setup defaults */
827*4882a593Smuzhiyun 	for (i = 0; i < gic_shared_intrs; i++) {
828*4882a593Smuzhiyun 		change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
829*4882a593Smuzhiyun 		change_gic_trig(i, GIC_TRIG_LEVEL);
830*4882a593Smuzhiyun 		write_gic_rmask(i);
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,
834*4882a593Smuzhiyun 				 "irqchip/mips/gic:starting",
835*4882a593Smuzhiyun 				 gic_cpu_startup, NULL);
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
838