1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2010 John Crispin <john@phrozen.org>
5*4882a593Smuzhiyun * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/interrupt.h>
9*4882a593Smuzhiyun #include <linux/ioport.h>
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include <linux/irqdomain.h>
12*4882a593Smuzhiyun #include <linux/of_platform.h>
13*4882a593Smuzhiyun #include <linux/of_address.h>
14*4882a593Smuzhiyun #include <linux/of_irq.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <asm/bootinfo.h>
17*4882a593Smuzhiyun #include <asm/irq_cpu.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <lantiq_soc.h>
20*4882a593Smuzhiyun #include <irq.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /* register definitions - internal irqs */
23*4882a593Smuzhiyun #define LTQ_ICU_ISR 0x0000
24*4882a593Smuzhiyun #define LTQ_ICU_IER 0x0008
25*4882a593Smuzhiyun #define LTQ_ICU_IOSR 0x0010
26*4882a593Smuzhiyun #define LTQ_ICU_IRSR 0x0018
27*4882a593Smuzhiyun #define LTQ_ICU_IMR 0x0020
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define LTQ_ICU_IM_SIZE 0x28
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /* register definitions - external irqs */
32*4882a593Smuzhiyun #define LTQ_EIU_EXIN_C 0x0000
33*4882a593Smuzhiyun #define LTQ_EIU_EXIN_INIC 0x0004
34*4882a593Smuzhiyun #define LTQ_EIU_EXIN_INC 0x0008
35*4882a593Smuzhiyun #define LTQ_EIU_EXIN_INEN 0x000C
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* number of external interrupts */
38*4882a593Smuzhiyun #define MAX_EIU 6
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* the performance counter */
41*4882a593Smuzhiyun #define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31)
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun * irqs generated by devices attached to the EBU need to be acked in
45*4882a593Smuzhiyun * a special manner
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun #define LTQ_ICU_EBU_IRQ 22
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #define ltq_icu_w32(vpe, m, x, y) \
50*4882a593Smuzhiyun ltq_w32((x), ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (y))
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define ltq_icu_r32(vpe, m, x) \
53*4882a593Smuzhiyun ltq_r32(ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (x))
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
56*4882a593Smuzhiyun #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* we have a cascade of 8 irqs */
59*4882a593Smuzhiyun #define MIPS_CPU_IRQ_CASCADE 8
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static int exin_avail;
62*4882a593Smuzhiyun static u32 ltq_eiu_irq[MAX_EIU];
63*4882a593Smuzhiyun static void __iomem *ltq_icu_membase[NR_CPUS];
64*4882a593Smuzhiyun static void __iomem *ltq_eiu_membase;
65*4882a593Smuzhiyun static struct irq_domain *ltq_domain;
66*4882a593Smuzhiyun static DEFINE_SPINLOCK(ltq_eiu_lock);
67*4882a593Smuzhiyun static DEFINE_RAW_SPINLOCK(ltq_icu_lock);
68*4882a593Smuzhiyun static int ltq_perfcount_irq;
69*4882a593Smuzhiyun
ltq_eiu_get_irq(int exin)70*4882a593Smuzhiyun int ltq_eiu_get_irq(int exin)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun if (exin < exin_avail)
73*4882a593Smuzhiyun return ltq_eiu_irq[exin];
74*4882a593Smuzhiyun return -1;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
ltq_disable_irq(struct irq_data * d)77*4882a593Smuzhiyun void ltq_disable_irq(struct irq_data *d)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
80*4882a593Smuzhiyun unsigned long im = offset / INT_NUM_IM_OFFSET;
81*4882a593Smuzhiyun unsigned long flags;
82*4882a593Smuzhiyun int vpe;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun offset %= INT_NUM_IM_OFFSET;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun raw_spin_lock_irqsave(<q_icu_lock, flags);
87*4882a593Smuzhiyun for_each_present_cpu(vpe) {
88*4882a593Smuzhiyun ltq_icu_w32(vpe, im,
89*4882a593Smuzhiyun ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
90*4882a593Smuzhiyun LTQ_ICU_IER);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun raw_spin_unlock_irqrestore(<q_icu_lock, flags);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
ltq_mask_and_ack_irq(struct irq_data * d)95*4882a593Smuzhiyun void ltq_mask_and_ack_irq(struct irq_data *d)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
98*4882a593Smuzhiyun unsigned long im = offset / INT_NUM_IM_OFFSET;
99*4882a593Smuzhiyun unsigned long flags;
100*4882a593Smuzhiyun int vpe;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun offset %= INT_NUM_IM_OFFSET;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun raw_spin_lock_irqsave(<q_icu_lock, flags);
105*4882a593Smuzhiyun for_each_present_cpu(vpe) {
106*4882a593Smuzhiyun ltq_icu_w32(vpe, im,
107*4882a593Smuzhiyun ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
108*4882a593Smuzhiyun LTQ_ICU_IER);
109*4882a593Smuzhiyun ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun raw_spin_unlock_irqrestore(<q_icu_lock, flags);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
ltq_ack_irq(struct irq_data * d)114*4882a593Smuzhiyun static void ltq_ack_irq(struct irq_data *d)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
117*4882a593Smuzhiyun unsigned long im = offset / INT_NUM_IM_OFFSET;
118*4882a593Smuzhiyun unsigned long flags;
119*4882a593Smuzhiyun int vpe;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun offset %= INT_NUM_IM_OFFSET;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun raw_spin_lock_irqsave(<q_icu_lock, flags);
124*4882a593Smuzhiyun for_each_present_cpu(vpe) {
125*4882a593Smuzhiyun ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun raw_spin_unlock_irqrestore(<q_icu_lock, flags);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
ltq_enable_irq(struct irq_data * d)130*4882a593Smuzhiyun void ltq_enable_irq(struct irq_data *d)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
133*4882a593Smuzhiyun unsigned long im = offset / INT_NUM_IM_OFFSET;
134*4882a593Smuzhiyun unsigned long flags;
135*4882a593Smuzhiyun int vpe;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun offset %= INT_NUM_IM_OFFSET;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun vpe = cpumask_first(irq_data_get_effective_affinity_mask(d));
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* This shouldn't be even possible, maybe during CPU hotplug spam */
142*4882a593Smuzhiyun if (unlikely(vpe >= nr_cpu_ids))
143*4882a593Smuzhiyun vpe = smp_processor_id();
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun raw_spin_lock_irqsave(<q_icu_lock, flags);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, LTQ_ICU_IER) | BIT(offset),
148*4882a593Smuzhiyun LTQ_ICU_IER);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun raw_spin_unlock_irqrestore(<q_icu_lock, flags);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
ltq_eiu_settype(struct irq_data * d,unsigned int type)153*4882a593Smuzhiyun static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun int i;
156*4882a593Smuzhiyun unsigned long flags;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun for (i = 0; i < exin_avail; i++) {
159*4882a593Smuzhiyun if (d->hwirq == ltq_eiu_irq[i]) {
160*4882a593Smuzhiyun int val = 0;
161*4882a593Smuzhiyun int edge = 0;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun switch (type) {
164*4882a593Smuzhiyun case IRQF_TRIGGER_NONE:
165*4882a593Smuzhiyun break;
166*4882a593Smuzhiyun case IRQF_TRIGGER_RISING:
167*4882a593Smuzhiyun val = 1;
168*4882a593Smuzhiyun edge = 1;
169*4882a593Smuzhiyun break;
170*4882a593Smuzhiyun case IRQF_TRIGGER_FALLING:
171*4882a593Smuzhiyun val = 2;
172*4882a593Smuzhiyun edge = 1;
173*4882a593Smuzhiyun break;
174*4882a593Smuzhiyun case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
175*4882a593Smuzhiyun val = 3;
176*4882a593Smuzhiyun edge = 1;
177*4882a593Smuzhiyun break;
178*4882a593Smuzhiyun case IRQF_TRIGGER_HIGH:
179*4882a593Smuzhiyun val = 5;
180*4882a593Smuzhiyun break;
181*4882a593Smuzhiyun case IRQF_TRIGGER_LOW:
182*4882a593Smuzhiyun val = 6;
183*4882a593Smuzhiyun break;
184*4882a593Smuzhiyun default:
185*4882a593Smuzhiyun pr_err("invalid type %d for irq %ld\n",
186*4882a593Smuzhiyun type, d->hwirq);
187*4882a593Smuzhiyun return -EINVAL;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun if (edge)
191*4882a593Smuzhiyun irq_set_handler(d->hwirq, handle_edge_irq);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun spin_lock_irqsave(<q_eiu_lock, flags);
194*4882a593Smuzhiyun ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
195*4882a593Smuzhiyun (~(7 << (i * 4)))) | (val << (i * 4)),
196*4882a593Smuzhiyun LTQ_EIU_EXIN_C);
197*4882a593Smuzhiyun spin_unlock_irqrestore(<q_eiu_lock, flags);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun return 0;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
ltq_startup_eiu_irq(struct irq_data * d)204*4882a593Smuzhiyun static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun int i;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun ltq_enable_irq(d);
209*4882a593Smuzhiyun for (i = 0; i < exin_avail; i++) {
210*4882a593Smuzhiyun if (d->hwirq == ltq_eiu_irq[i]) {
211*4882a593Smuzhiyun /* by default we are low level triggered */
212*4882a593Smuzhiyun ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
213*4882a593Smuzhiyun /* clear all pending */
214*4882a593Smuzhiyun ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
215*4882a593Smuzhiyun LTQ_EIU_EXIN_INC);
216*4882a593Smuzhiyun /* enable */
217*4882a593Smuzhiyun ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
218*4882a593Smuzhiyun LTQ_EIU_EXIN_INEN);
219*4882a593Smuzhiyun break;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
ltq_shutdown_eiu_irq(struct irq_data * d)226*4882a593Smuzhiyun static void ltq_shutdown_eiu_irq(struct irq_data *d)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun int i;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun ltq_disable_irq(d);
231*4882a593Smuzhiyun for (i = 0; i < exin_avail; i++) {
232*4882a593Smuzhiyun if (d->hwirq == ltq_eiu_irq[i]) {
233*4882a593Smuzhiyun /* disable */
234*4882a593Smuzhiyun ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
235*4882a593Smuzhiyun LTQ_EIU_EXIN_INEN);
236*4882a593Smuzhiyun break;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun #if defined(CONFIG_SMP)
ltq_icu_irq_set_affinity(struct irq_data * d,const struct cpumask * cpumask,bool force)242*4882a593Smuzhiyun static int ltq_icu_irq_set_affinity(struct irq_data *d,
243*4882a593Smuzhiyun const struct cpumask *cpumask, bool force)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun struct cpumask tmask;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun if (!cpumask_and(&tmask, cpumask, cpu_online_mask))
248*4882a593Smuzhiyun return -EINVAL;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun irq_data_update_effective_affinity(d, &tmask);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun return IRQ_SET_MASK_OK;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun #endif
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun static struct irq_chip ltq_irq_type = {
257*4882a593Smuzhiyun .name = "icu",
258*4882a593Smuzhiyun .irq_enable = ltq_enable_irq,
259*4882a593Smuzhiyun .irq_disable = ltq_disable_irq,
260*4882a593Smuzhiyun .irq_unmask = ltq_enable_irq,
261*4882a593Smuzhiyun .irq_ack = ltq_ack_irq,
262*4882a593Smuzhiyun .irq_mask = ltq_disable_irq,
263*4882a593Smuzhiyun .irq_mask_ack = ltq_mask_and_ack_irq,
264*4882a593Smuzhiyun #if defined(CONFIG_SMP)
265*4882a593Smuzhiyun .irq_set_affinity = ltq_icu_irq_set_affinity,
266*4882a593Smuzhiyun #endif
267*4882a593Smuzhiyun };
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun static struct irq_chip ltq_eiu_type = {
270*4882a593Smuzhiyun .name = "eiu",
271*4882a593Smuzhiyun .irq_startup = ltq_startup_eiu_irq,
272*4882a593Smuzhiyun .irq_shutdown = ltq_shutdown_eiu_irq,
273*4882a593Smuzhiyun .irq_enable = ltq_enable_irq,
274*4882a593Smuzhiyun .irq_disable = ltq_disable_irq,
275*4882a593Smuzhiyun .irq_unmask = ltq_enable_irq,
276*4882a593Smuzhiyun .irq_ack = ltq_ack_irq,
277*4882a593Smuzhiyun .irq_mask = ltq_disable_irq,
278*4882a593Smuzhiyun .irq_mask_ack = ltq_mask_and_ack_irq,
279*4882a593Smuzhiyun .irq_set_type = ltq_eiu_settype,
280*4882a593Smuzhiyun #if defined(CONFIG_SMP)
281*4882a593Smuzhiyun .irq_set_affinity = ltq_icu_irq_set_affinity,
282*4882a593Smuzhiyun #endif
283*4882a593Smuzhiyun };
284*4882a593Smuzhiyun
ltq_hw_irq_handler(struct irq_desc * desc)285*4882a593Smuzhiyun static void ltq_hw_irq_handler(struct irq_desc *desc)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun unsigned int module = irq_desc_get_irq(desc) - 2;
288*4882a593Smuzhiyun u32 irq;
289*4882a593Smuzhiyun irq_hw_number_t hwirq;
290*4882a593Smuzhiyun int vpe = smp_processor_id();
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun irq = ltq_icu_r32(vpe, module, LTQ_ICU_IOSR);
293*4882a593Smuzhiyun if (irq == 0)
294*4882a593Smuzhiyun return;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun * silicon bug causes only the msb set to 1 to be valid. all
298*4882a593Smuzhiyun * other bits might be bogus
299*4882a593Smuzhiyun */
300*4882a593Smuzhiyun irq = __fls(irq);
301*4882a593Smuzhiyun hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
302*4882a593Smuzhiyun generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* if this is a EBU irq, we need to ack it or get a deadlock */
305*4882a593Smuzhiyun if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
306*4882a593Smuzhiyun ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
307*4882a593Smuzhiyun LTQ_EBU_PCC_ISTAT);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
icu_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)310*4882a593Smuzhiyun static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun struct irq_chip *chip = <q_irq_type;
313*4882a593Smuzhiyun struct irq_data *data;
314*4882a593Smuzhiyun int i;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (hw < MIPS_CPU_IRQ_CASCADE)
317*4882a593Smuzhiyun return 0;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun for (i = 0; i < exin_avail; i++)
320*4882a593Smuzhiyun if (hw == ltq_eiu_irq[i])
321*4882a593Smuzhiyun chip = <q_eiu_type;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun data = irq_get_irq_data(irq);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun irq_data_update_effective_affinity(data, cpumask_of(0));
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun irq_set_chip_and_handler(irq, chip, handle_level_irq);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun return 0;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun static const struct irq_domain_ops irq_domain_ops = {
333*4882a593Smuzhiyun .xlate = irq_domain_xlate_onetwocell,
334*4882a593Smuzhiyun .map = icu_map,
335*4882a593Smuzhiyun };
336*4882a593Smuzhiyun
icu_of_init(struct device_node * node,struct device_node * parent)337*4882a593Smuzhiyun int __init icu_of_init(struct device_node *node, struct device_node *parent)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun struct device_node *eiu_node;
340*4882a593Smuzhiyun struct resource res;
341*4882a593Smuzhiyun int i, ret, vpe;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /* load register regions of available ICUs */
344*4882a593Smuzhiyun for_each_possible_cpu(vpe) {
345*4882a593Smuzhiyun if (of_address_to_resource(node, vpe, &res))
346*4882a593Smuzhiyun panic("Failed to get icu%i memory range", vpe);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun if (!request_mem_region(res.start, resource_size(&res),
349*4882a593Smuzhiyun res.name))
350*4882a593Smuzhiyun pr_err("Failed to request icu%i memory\n", vpe);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun ltq_icu_membase[vpe] = ioremap(res.start,
353*4882a593Smuzhiyun resource_size(&res));
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (!ltq_icu_membase[vpe])
356*4882a593Smuzhiyun panic("Failed to remap icu%i memory", vpe);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* turn off all irqs by default */
360*4882a593Smuzhiyun for_each_possible_cpu(vpe) {
361*4882a593Smuzhiyun for (i = 0; i < MAX_IM; i++) {
362*4882a593Smuzhiyun /* make sure all irqs are turned off by default */
363*4882a593Smuzhiyun ltq_icu_w32(vpe, i, 0, LTQ_ICU_IER);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /* clear all possibly pending interrupts */
366*4882a593Smuzhiyun ltq_icu_w32(vpe, i, ~0, LTQ_ICU_ISR);
367*4882a593Smuzhiyun ltq_icu_w32(vpe, i, ~0, LTQ_ICU_IMR);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /* clear resend */
370*4882a593Smuzhiyun ltq_icu_w32(vpe, i, 0, LTQ_ICU_IRSR);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun mips_cpu_irq_init();
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun for (i = 0; i < MAX_IM; i++)
377*4882a593Smuzhiyun irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun ltq_domain = irq_domain_add_linear(node,
380*4882a593Smuzhiyun (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
381*4882a593Smuzhiyun &irq_domain_ops, 0);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* tell oprofile which irq to use */
384*4882a593Smuzhiyun ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* the external interrupts are optional and xway only */
387*4882a593Smuzhiyun eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
388*4882a593Smuzhiyun if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
389*4882a593Smuzhiyun /* find out how many external irq sources we have */
390*4882a593Smuzhiyun exin_avail = of_property_count_u32_elems(eiu_node,
391*4882a593Smuzhiyun "lantiq,eiu-irqs");
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (exin_avail > MAX_EIU)
394*4882a593Smuzhiyun exin_avail = MAX_EIU;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
397*4882a593Smuzhiyun ltq_eiu_irq, exin_avail);
398*4882a593Smuzhiyun if (ret)
399*4882a593Smuzhiyun panic("failed to load external irq resources");
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun if (!request_mem_region(res.start, resource_size(&res),
402*4882a593Smuzhiyun res.name))
403*4882a593Smuzhiyun pr_err("Failed to request eiu memory");
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun ltq_eiu_membase = ioremap(res.start,
406*4882a593Smuzhiyun resource_size(&res));
407*4882a593Smuzhiyun if (!ltq_eiu_membase)
408*4882a593Smuzhiyun panic("Failed to remap eiu memory");
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun return 0;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
get_c0_perfcount_int(void)414*4882a593Smuzhiyun int get_c0_perfcount_int(void)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun return ltq_perfcount_irq;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
419*4882a593Smuzhiyun
get_c0_compare_int(void)420*4882a593Smuzhiyun unsigned int get_c0_compare_int(void)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun return CP0_LEGACY_COMPARE_IRQ;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun static const struct of_device_id of_irq_ids[] __initconst = {
426*4882a593Smuzhiyun { .compatible = "lantiq,icu", .data = icu_of_init },
427*4882a593Smuzhiyun {},
428*4882a593Smuzhiyun };
429*4882a593Smuzhiyun
arch_init_irq(void)430*4882a593Smuzhiyun void __init arch_init_irq(void)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun of_irq_init(of_irq_ids);
433*4882a593Smuzhiyun }
434