1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun * for more details.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7*4882a593Smuzhiyun * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/irq.h>
14*4882a593Smuzhiyun #include <linux/spinlock.h>
15*4882a593Smuzhiyun #include <asm/irq_cpu.h>
16*4882a593Smuzhiyun #include <asm/mipsregs.h>
17*4882a593Smuzhiyun #include <bcm63xx_cpu.h>
18*4882a593Smuzhiyun #include <bcm63xx_regs.h>
19*4882a593Smuzhiyun #include <bcm63xx_io.h>
20*4882a593Smuzhiyun #include <bcm63xx_irq.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun static DEFINE_SPINLOCK(ipic_lock);
24*4882a593Smuzhiyun static DEFINE_SPINLOCK(epic_lock);
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun static u32 irq_stat_addr[2];
27*4882a593Smuzhiyun static u32 irq_mask_addr[2];
28*4882a593Smuzhiyun static void (*dispatch_internal)(int cpu);
29*4882a593Smuzhiyun static int is_ext_irq_cascaded;
30*4882a593Smuzhiyun static unsigned int ext_irq_count;
31*4882a593Smuzhiyun static unsigned int ext_irq_start, ext_irq_end;
32*4882a593Smuzhiyun static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
33*4882a593Smuzhiyun static void (*internal_irq_mask)(struct irq_data *d);
34*4882a593Smuzhiyun static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun
get_ext_irq_perf_reg(int irq)37*4882a593Smuzhiyun static inline u32 get_ext_irq_perf_reg(int irq)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun if (irq < 4)
40*4882a593Smuzhiyun return ext_irq_cfg_reg1;
41*4882a593Smuzhiyun return ext_irq_cfg_reg2;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
handle_internal(int intbit)44*4882a593Smuzhiyun static inline void handle_internal(int intbit)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun if (is_ext_irq_cascaded &&
47*4882a593Smuzhiyun intbit >= ext_irq_start && intbit <= ext_irq_end)
48*4882a593Smuzhiyun do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
49*4882a593Smuzhiyun else
50*4882a593Smuzhiyun do_IRQ(intbit + IRQ_INTERNAL_BASE);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
enable_irq_for_cpu(int cpu,struct irq_data * d,const struct cpumask * m)53*4882a593Smuzhiyun static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
54*4882a593Smuzhiyun const struct cpumask *m)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun bool enable = cpu_online(cpu);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #ifdef CONFIG_SMP
59*4882a593Smuzhiyun if (m)
60*4882a593Smuzhiyun enable &= cpumask_test_cpu(cpu, m);
61*4882a593Smuzhiyun else if (irqd_affinity_was_set(d))
62*4882a593Smuzhiyun enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
63*4882a593Smuzhiyun #endif
64*4882a593Smuzhiyun return enable;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
69*4882a593Smuzhiyun * prioritize any interrupt relatively to another. the static counter
70*4882a593Smuzhiyun * will resume the loop where it ended the last time we left this
71*4882a593Smuzhiyun * function.
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun #define BUILD_IPIC_INTERNAL(width) \
75*4882a593Smuzhiyun void __dispatch_internal_##width(int cpu) \
76*4882a593Smuzhiyun { \
77*4882a593Smuzhiyun u32 pending[width / 32]; \
78*4882a593Smuzhiyun unsigned int src, tgt; \
79*4882a593Smuzhiyun bool irqs_pending = false; \
80*4882a593Smuzhiyun static unsigned int i[2]; \
81*4882a593Smuzhiyun unsigned int *next = &i[cpu]; \
82*4882a593Smuzhiyun unsigned long flags; \
83*4882a593Smuzhiyun \
84*4882a593Smuzhiyun /* read registers in reverse order */ \
85*4882a593Smuzhiyun spin_lock_irqsave(&ipic_lock, flags); \
86*4882a593Smuzhiyun for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
87*4882a593Smuzhiyun u32 val; \
88*4882a593Smuzhiyun \
89*4882a593Smuzhiyun val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
90*4882a593Smuzhiyun val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
91*4882a593Smuzhiyun pending[--tgt] = val; \
92*4882a593Smuzhiyun \
93*4882a593Smuzhiyun if (val) \
94*4882a593Smuzhiyun irqs_pending = true; \
95*4882a593Smuzhiyun } \
96*4882a593Smuzhiyun spin_unlock_irqrestore(&ipic_lock, flags); \
97*4882a593Smuzhiyun \
98*4882a593Smuzhiyun if (!irqs_pending) \
99*4882a593Smuzhiyun return; \
100*4882a593Smuzhiyun \
101*4882a593Smuzhiyun while (1) { \
102*4882a593Smuzhiyun unsigned int to_call = *next; \
103*4882a593Smuzhiyun \
104*4882a593Smuzhiyun *next = (*next + 1) & (width - 1); \
105*4882a593Smuzhiyun if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
106*4882a593Smuzhiyun handle_internal(to_call); \
107*4882a593Smuzhiyun break; \
108*4882a593Smuzhiyun } \
109*4882a593Smuzhiyun } \
110*4882a593Smuzhiyun } \
111*4882a593Smuzhiyun \
112*4882a593Smuzhiyun static void __internal_irq_mask_##width(struct irq_data *d) \
113*4882a593Smuzhiyun { \
114*4882a593Smuzhiyun u32 val; \
115*4882a593Smuzhiyun unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
116*4882a593Smuzhiyun unsigned reg = (irq / 32) ^ (width/32 - 1); \
117*4882a593Smuzhiyun unsigned bit = irq & 0x1f; \
118*4882a593Smuzhiyun unsigned long flags; \
119*4882a593Smuzhiyun int cpu; \
120*4882a593Smuzhiyun \
121*4882a593Smuzhiyun spin_lock_irqsave(&ipic_lock, flags); \
122*4882a593Smuzhiyun for_each_present_cpu(cpu) { \
123*4882a593Smuzhiyun if (!irq_mask_addr[cpu]) \
124*4882a593Smuzhiyun break; \
125*4882a593Smuzhiyun \
126*4882a593Smuzhiyun val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
127*4882a593Smuzhiyun val &= ~(1 << bit); \
128*4882a593Smuzhiyun bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
129*4882a593Smuzhiyun } \
130*4882a593Smuzhiyun spin_unlock_irqrestore(&ipic_lock, flags); \
131*4882a593Smuzhiyun } \
132*4882a593Smuzhiyun \
133*4882a593Smuzhiyun static void __internal_irq_unmask_##width(struct irq_data *d, \
134*4882a593Smuzhiyun const struct cpumask *m) \
135*4882a593Smuzhiyun { \
136*4882a593Smuzhiyun u32 val; \
137*4882a593Smuzhiyun unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
138*4882a593Smuzhiyun unsigned reg = (irq / 32) ^ (width/32 - 1); \
139*4882a593Smuzhiyun unsigned bit = irq & 0x1f; \
140*4882a593Smuzhiyun unsigned long flags; \
141*4882a593Smuzhiyun int cpu; \
142*4882a593Smuzhiyun \
143*4882a593Smuzhiyun spin_lock_irqsave(&ipic_lock, flags); \
144*4882a593Smuzhiyun for_each_present_cpu(cpu) { \
145*4882a593Smuzhiyun if (!irq_mask_addr[cpu]) \
146*4882a593Smuzhiyun break; \
147*4882a593Smuzhiyun \
148*4882a593Smuzhiyun val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
149*4882a593Smuzhiyun if (enable_irq_for_cpu(cpu, d, m)) \
150*4882a593Smuzhiyun val |= (1 << bit); \
151*4882a593Smuzhiyun else \
152*4882a593Smuzhiyun val &= ~(1 << bit); \
153*4882a593Smuzhiyun bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
154*4882a593Smuzhiyun } \
155*4882a593Smuzhiyun spin_unlock_irqrestore(&ipic_lock, flags); \
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun BUILD_IPIC_INTERNAL(32);
159*4882a593Smuzhiyun BUILD_IPIC_INTERNAL(64);
160*4882a593Smuzhiyun
plat_irq_dispatch(void)161*4882a593Smuzhiyun asmlinkage void plat_irq_dispatch(void)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun u32 cause;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun do {
166*4882a593Smuzhiyun cause = read_c0_cause() & read_c0_status() & ST0_IM;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (!cause)
169*4882a593Smuzhiyun break;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (cause & CAUSEF_IP7)
172*4882a593Smuzhiyun do_IRQ(7);
173*4882a593Smuzhiyun if (cause & CAUSEF_IP0)
174*4882a593Smuzhiyun do_IRQ(0);
175*4882a593Smuzhiyun if (cause & CAUSEF_IP1)
176*4882a593Smuzhiyun do_IRQ(1);
177*4882a593Smuzhiyun if (cause & CAUSEF_IP2)
178*4882a593Smuzhiyun dispatch_internal(0);
179*4882a593Smuzhiyun if (is_ext_irq_cascaded) {
180*4882a593Smuzhiyun if (cause & CAUSEF_IP3)
181*4882a593Smuzhiyun dispatch_internal(1);
182*4882a593Smuzhiyun } else {
183*4882a593Smuzhiyun if (cause & CAUSEF_IP3)
184*4882a593Smuzhiyun do_IRQ(IRQ_EXT_0);
185*4882a593Smuzhiyun if (cause & CAUSEF_IP4)
186*4882a593Smuzhiyun do_IRQ(IRQ_EXT_1);
187*4882a593Smuzhiyun if (cause & CAUSEF_IP5)
188*4882a593Smuzhiyun do_IRQ(IRQ_EXT_2);
189*4882a593Smuzhiyun if (cause & CAUSEF_IP6)
190*4882a593Smuzhiyun do_IRQ(IRQ_EXT_3);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun } while (1);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun * internal IRQs operations: only mask/unmask on PERF irq mask
197*4882a593Smuzhiyun * register.
198*4882a593Smuzhiyun */
bcm63xx_internal_irq_mask(struct irq_data * d)199*4882a593Smuzhiyun static void bcm63xx_internal_irq_mask(struct irq_data *d)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun internal_irq_mask(d);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
bcm63xx_internal_irq_unmask(struct irq_data * d)204*4882a593Smuzhiyun static void bcm63xx_internal_irq_unmask(struct irq_data *d)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun internal_irq_unmask(d, NULL);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /*
210*4882a593Smuzhiyun * external IRQs operations: mask/unmask and clear on PERF external
211*4882a593Smuzhiyun * irq control register.
212*4882a593Smuzhiyun */
bcm63xx_external_irq_mask(struct irq_data * d)213*4882a593Smuzhiyun static void bcm63xx_external_irq_mask(struct irq_data *d)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
216*4882a593Smuzhiyun u32 reg, regaddr;
217*4882a593Smuzhiyun unsigned long flags;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun regaddr = get_ext_irq_perf_reg(irq);
220*4882a593Smuzhiyun spin_lock_irqsave(&epic_lock, flags);
221*4882a593Smuzhiyun reg = bcm_perf_readl(regaddr);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun if (BCMCPU_IS_6348())
224*4882a593Smuzhiyun reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
225*4882a593Smuzhiyun else
226*4882a593Smuzhiyun reg &= ~EXTIRQ_CFG_MASK(irq % 4);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun bcm_perf_writel(reg, regaddr);
229*4882a593Smuzhiyun spin_unlock_irqrestore(&epic_lock, flags);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (is_ext_irq_cascaded)
232*4882a593Smuzhiyun internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
bcm63xx_external_irq_unmask(struct irq_data * d)235*4882a593Smuzhiyun static void bcm63xx_external_irq_unmask(struct irq_data *d)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
238*4882a593Smuzhiyun u32 reg, regaddr;
239*4882a593Smuzhiyun unsigned long flags;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun regaddr = get_ext_irq_perf_reg(irq);
242*4882a593Smuzhiyun spin_lock_irqsave(&epic_lock, flags);
243*4882a593Smuzhiyun reg = bcm_perf_readl(regaddr);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (BCMCPU_IS_6348())
246*4882a593Smuzhiyun reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
247*4882a593Smuzhiyun else
248*4882a593Smuzhiyun reg |= EXTIRQ_CFG_MASK(irq % 4);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun bcm_perf_writel(reg, regaddr);
251*4882a593Smuzhiyun spin_unlock_irqrestore(&epic_lock, flags);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (is_ext_irq_cascaded)
254*4882a593Smuzhiyun internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
255*4882a593Smuzhiyun NULL);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
bcm63xx_external_irq_clear(struct irq_data * d)258*4882a593Smuzhiyun static void bcm63xx_external_irq_clear(struct irq_data *d)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
261*4882a593Smuzhiyun u32 reg, regaddr;
262*4882a593Smuzhiyun unsigned long flags;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun regaddr = get_ext_irq_perf_reg(irq);
265*4882a593Smuzhiyun spin_lock_irqsave(&epic_lock, flags);
266*4882a593Smuzhiyun reg = bcm_perf_readl(regaddr);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (BCMCPU_IS_6348())
269*4882a593Smuzhiyun reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
270*4882a593Smuzhiyun else
271*4882a593Smuzhiyun reg |= EXTIRQ_CFG_CLEAR(irq % 4);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun bcm_perf_writel(reg, regaddr);
274*4882a593Smuzhiyun spin_unlock_irqrestore(&epic_lock, flags);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
bcm63xx_external_irq_set_type(struct irq_data * d,unsigned int flow_type)277*4882a593Smuzhiyun static int bcm63xx_external_irq_set_type(struct irq_data *d,
278*4882a593Smuzhiyun unsigned int flow_type)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
281*4882a593Smuzhiyun u32 reg, regaddr;
282*4882a593Smuzhiyun int levelsense, sense, bothedge;
283*4882a593Smuzhiyun unsigned long flags;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun flow_type &= IRQ_TYPE_SENSE_MASK;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (flow_type == IRQ_TYPE_NONE)
288*4882a593Smuzhiyun flow_type = IRQ_TYPE_LEVEL_LOW;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun levelsense = sense = bothedge = 0;
291*4882a593Smuzhiyun switch (flow_type) {
292*4882a593Smuzhiyun case IRQ_TYPE_EDGE_BOTH:
293*4882a593Smuzhiyun bothedge = 1;
294*4882a593Smuzhiyun break;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun case IRQ_TYPE_EDGE_RISING:
297*4882a593Smuzhiyun sense = 1;
298*4882a593Smuzhiyun break;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun case IRQ_TYPE_EDGE_FALLING:
301*4882a593Smuzhiyun break;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun case IRQ_TYPE_LEVEL_HIGH:
304*4882a593Smuzhiyun levelsense = 1;
305*4882a593Smuzhiyun sense = 1;
306*4882a593Smuzhiyun break;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun case IRQ_TYPE_LEVEL_LOW:
309*4882a593Smuzhiyun levelsense = 1;
310*4882a593Smuzhiyun break;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun default:
313*4882a593Smuzhiyun pr_err("bogus flow type combination given !\n");
314*4882a593Smuzhiyun return -EINVAL;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun regaddr = get_ext_irq_perf_reg(irq);
318*4882a593Smuzhiyun spin_lock_irqsave(&epic_lock, flags);
319*4882a593Smuzhiyun reg = bcm_perf_readl(regaddr);
320*4882a593Smuzhiyun irq %= 4;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun switch (bcm63xx_get_cpu_id()) {
323*4882a593Smuzhiyun case BCM6348_CPU_ID:
324*4882a593Smuzhiyun if (levelsense)
325*4882a593Smuzhiyun reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
326*4882a593Smuzhiyun else
327*4882a593Smuzhiyun reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
328*4882a593Smuzhiyun if (sense)
329*4882a593Smuzhiyun reg |= EXTIRQ_CFG_SENSE_6348(irq);
330*4882a593Smuzhiyun else
331*4882a593Smuzhiyun reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
332*4882a593Smuzhiyun if (bothedge)
333*4882a593Smuzhiyun reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
334*4882a593Smuzhiyun else
335*4882a593Smuzhiyun reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
336*4882a593Smuzhiyun break;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun case BCM3368_CPU_ID:
339*4882a593Smuzhiyun case BCM6328_CPU_ID:
340*4882a593Smuzhiyun case BCM6338_CPU_ID:
341*4882a593Smuzhiyun case BCM6345_CPU_ID:
342*4882a593Smuzhiyun case BCM6358_CPU_ID:
343*4882a593Smuzhiyun case BCM6362_CPU_ID:
344*4882a593Smuzhiyun case BCM6368_CPU_ID:
345*4882a593Smuzhiyun if (levelsense)
346*4882a593Smuzhiyun reg |= EXTIRQ_CFG_LEVELSENSE(irq);
347*4882a593Smuzhiyun else
348*4882a593Smuzhiyun reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
349*4882a593Smuzhiyun if (sense)
350*4882a593Smuzhiyun reg |= EXTIRQ_CFG_SENSE(irq);
351*4882a593Smuzhiyun else
352*4882a593Smuzhiyun reg &= ~EXTIRQ_CFG_SENSE(irq);
353*4882a593Smuzhiyun if (bothedge)
354*4882a593Smuzhiyun reg |= EXTIRQ_CFG_BOTHEDGE(irq);
355*4882a593Smuzhiyun else
356*4882a593Smuzhiyun reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
357*4882a593Smuzhiyun break;
358*4882a593Smuzhiyun default:
359*4882a593Smuzhiyun BUG();
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun bcm_perf_writel(reg, regaddr);
363*4882a593Smuzhiyun spin_unlock_irqrestore(&epic_lock, flags);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun irqd_set_trigger_type(d, flow_type);
366*4882a593Smuzhiyun if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
367*4882a593Smuzhiyun irq_set_handler_locked(d, handle_level_irq);
368*4882a593Smuzhiyun else
369*4882a593Smuzhiyun irq_set_handler_locked(d, handle_edge_irq);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun return IRQ_SET_MASK_OK_NOCOPY;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun #ifdef CONFIG_SMP
bcm63xx_internal_set_affinity(struct irq_data * data,const struct cpumask * dest,bool force)375*4882a593Smuzhiyun static int bcm63xx_internal_set_affinity(struct irq_data *data,
376*4882a593Smuzhiyun const struct cpumask *dest,
377*4882a593Smuzhiyun bool force)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun if (!irqd_irq_disabled(data))
380*4882a593Smuzhiyun internal_irq_unmask(data, dest);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun return 0;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun #endif
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun static struct irq_chip bcm63xx_internal_irq_chip = {
387*4882a593Smuzhiyun .name = "bcm63xx_ipic",
388*4882a593Smuzhiyun .irq_mask = bcm63xx_internal_irq_mask,
389*4882a593Smuzhiyun .irq_unmask = bcm63xx_internal_irq_unmask,
390*4882a593Smuzhiyun };
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun static struct irq_chip bcm63xx_external_irq_chip = {
393*4882a593Smuzhiyun .name = "bcm63xx_epic",
394*4882a593Smuzhiyun .irq_ack = bcm63xx_external_irq_clear,
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun .irq_mask = bcm63xx_external_irq_mask,
397*4882a593Smuzhiyun .irq_unmask = bcm63xx_external_irq_unmask,
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun .irq_set_type = bcm63xx_external_irq_set_type,
400*4882a593Smuzhiyun };
401*4882a593Smuzhiyun
bcm63xx_init_irq(void)402*4882a593Smuzhiyun static void bcm63xx_init_irq(void)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun int irq_bits;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
407*4882a593Smuzhiyun irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
408*4882a593Smuzhiyun irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
409*4882a593Smuzhiyun irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun switch (bcm63xx_get_cpu_id()) {
412*4882a593Smuzhiyun case BCM3368_CPU_ID:
413*4882a593Smuzhiyun irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
414*4882a593Smuzhiyun irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
415*4882a593Smuzhiyun irq_stat_addr[1] = 0;
416*4882a593Smuzhiyun irq_mask_addr[1] = 0;
417*4882a593Smuzhiyun irq_bits = 32;
418*4882a593Smuzhiyun ext_irq_count = 4;
419*4882a593Smuzhiyun ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
420*4882a593Smuzhiyun break;
421*4882a593Smuzhiyun case BCM6328_CPU_ID:
422*4882a593Smuzhiyun irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
423*4882a593Smuzhiyun irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
424*4882a593Smuzhiyun irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
425*4882a593Smuzhiyun irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
426*4882a593Smuzhiyun irq_bits = 64;
427*4882a593Smuzhiyun ext_irq_count = 4;
428*4882a593Smuzhiyun is_ext_irq_cascaded = 1;
429*4882a593Smuzhiyun ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
430*4882a593Smuzhiyun ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
431*4882a593Smuzhiyun ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
432*4882a593Smuzhiyun break;
433*4882a593Smuzhiyun case BCM6338_CPU_ID:
434*4882a593Smuzhiyun irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
435*4882a593Smuzhiyun irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
436*4882a593Smuzhiyun irq_stat_addr[1] = 0;
437*4882a593Smuzhiyun irq_mask_addr[1] = 0;
438*4882a593Smuzhiyun irq_bits = 32;
439*4882a593Smuzhiyun ext_irq_count = 4;
440*4882a593Smuzhiyun ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
441*4882a593Smuzhiyun break;
442*4882a593Smuzhiyun case BCM6345_CPU_ID:
443*4882a593Smuzhiyun irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
444*4882a593Smuzhiyun irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
445*4882a593Smuzhiyun irq_stat_addr[1] = 0;
446*4882a593Smuzhiyun irq_mask_addr[1] = 0;
447*4882a593Smuzhiyun irq_bits = 32;
448*4882a593Smuzhiyun ext_irq_count = 4;
449*4882a593Smuzhiyun ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
450*4882a593Smuzhiyun break;
451*4882a593Smuzhiyun case BCM6348_CPU_ID:
452*4882a593Smuzhiyun irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
453*4882a593Smuzhiyun irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
454*4882a593Smuzhiyun irq_stat_addr[1] = 0;
455*4882a593Smuzhiyun irq_mask_addr[1] = 0;
456*4882a593Smuzhiyun irq_bits = 32;
457*4882a593Smuzhiyun ext_irq_count = 4;
458*4882a593Smuzhiyun ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
459*4882a593Smuzhiyun break;
460*4882a593Smuzhiyun case BCM6358_CPU_ID:
461*4882a593Smuzhiyun irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
462*4882a593Smuzhiyun irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
463*4882a593Smuzhiyun irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
464*4882a593Smuzhiyun irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
465*4882a593Smuzhiyun irq_bits = 32;
466*4882a593Smuzhiyun ext_irq_count = 4;
467*4882a593Smuzhiyun is_ext_irq_cascaded = 1;
468*4882a593Smuzhiyun ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
469*4882a593Smuzhiyun ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
470*4882a593Smuzhiyun ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
471*4882a593Smuzhiyun break;
472*4882a593Smuzhiyun case BCM6362_CPU_ID:
473*4882a593Smuzhiyun irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
474*4882a593Smuzhiyun irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
475*4882a593Smuzhiyun irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
476*4882a593Smuzhiyun irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
477*4882a593Smuzhiyun irq_bits = 64;
478*4882a593Smuzhiyun ext_irq_count = 4;
479*4882a593Smuzhiyun is_ext_irq_cascaded = 1;
480*4882a593Smuzhiyun ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
481*4882a593Smuzhiyun ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
482*4882a593Smuzhiyun ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
483*4882a593Smuzhiyun break;
484*4882a593Smuzhiyun case BCM6368_CPU_ID:
485*4882a593Smuzhiyun irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
486*4882a593Smuzhiyun irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
487*4882a593Smuzhiyun irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
488*4882a593Smuzhiyun irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
489*4882a593Smuzhiyun irq_bits = 64;
490*4882a593Smuzhiyun ext_irq_count = 6;
491*4882a593Smuzhiyun is_ext_irq_cascaded = 1;
492*4882a593Smuzhiyun ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
493*4882a593Smuzhiyun ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
494*4882a593Smuzhiyun ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
495*4882a593Smuzhiyun ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
496*4882a593Smuzhiyun break;
497*4882a593Smuzhiyun default:
498*4882a593Smuzhiyun BUG();
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (irq_bits == 32) {
502*4882a593Smuzhiyun dispatch_internal = __dispatch_internal_32;
503*4882a593Smuzhiyun internal_irq_mask = __internal_irq_mask_32;
504*4882a593Smuzhiyun internal_irq_unmask = __internal_irq_unmask_32;
505*4882a593Smuzhiyun } else {
506*4882a593Smuzhiyun dispatch_internal = __dispatch_internal_64;
507*4882a593Smuzhiyun internal_irq_mask = __internal_irq_mask_64;
508*4882a593Smuzhiyun internal_irq_unmask = __internal_irq_unmask_64;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
arch_init_irq(void)512*4882a593Smuzhiyun void __init arch_init_irq(void)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun int i, irq;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun bcm63xx_init_irq();
517*4882a593Smuzhiyun mips_cpu_irq_init();
518*4882a593Smuzhiyun for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
519*4882a593Smuzhiyun irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
520*4882a593Smuzhiyun handle_level_irq);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
523*4882a593Smuzhiyun irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
524*4882a593Smuzhiyun handle_edge_irq);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (!is_ext_irq_cascaded) {
527*4882a593Smuzhiyun for (i = 3; i < 3 + ext_irq_count; ++i) {
528*4882a593Smuzhiyun irq = MIPS_CPU_IRQ_BASE + i;
529*4882a593Smuzhiyun if (request_irq(irq, no_action, IRQF_NO_THREAD,
530*4882a593Smuzhiyun "cascade_extirq", NULL)) {
531*4882a593Smuzhiyun pr_err("Failed to request irq %d (cascade_extirq)\n",
532*4882a593Smuzhiyun irq);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun irq = MIPS_CPU_IRQ_BASE + 2;
538*4882a593Smuzhiyun if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip2", NULL))
539*4882a593Smuzhiyun pr_err("Failed to request irq %d (cascade_ip2)\n", irq);
540*4882a593Smuzhiyun #ifdef CONFIG_SMP
541*4882a593Smuzhiyun if (is_ext_irq_cascaded) {
542*4882a593Smuzhiyun irq = MIPS_CPU_IRQ_BASE + 3;
543*4882a593Smuzhiyun if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip3",
544*4882a593Smuzhiyun NULL))
545*4882a593Smuzhiyun pr_err("Failed to request irq %d (cascade_ip3)\n", irq);
546*4882a593Smuzhiyun bcm63xx_internal_irq_chip.irq_set_affinity =
547*4882a593Smuzhiyun bcm63xx_internal_set_affinity;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun cpumask_clear(irq_default_affinity);
550*4882a593Smuzhiyun cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun #endif
553*4882a593Smuzhiyun }
554