1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
6*4882a593Smuzhiyun * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
7*4882a593Smuzhiyun * Copyright (C) 1999 - 2001 Kanoj Sarcar
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/interrupt.h>
11*4882a593Smuzhiyun #include <linux/irq.h>
12*4882a593Smuzhiyun #include <linux/ioport.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/bitops.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <asm/io.h>
18*4882a593Smuzhiyun #include <asm/irq_cpu.h>
19*4882a593Smuzhiyun #include <asm/sn/addrs.h>
20*4882a593Smuzhiyun #include <asm/sn/agent.h>
21*4882a593Smuzhiyun #include <asm/sn/arch.h>
22*4882a593Smuzhiyun #include <asm/sn/intr.h>
23*4882a593Smuzhiyun #include <asm/sn/irq_alloc.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun struct hub_irq_data {
26*4882a593Smuzhiyun u64 *irq_mask[2];
27*4882a593Smuzhiyun cpuid_t cpu;
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask);
33*4882a593Smuzhiyun
alloc_level(void)34*4882a593Smuzhiyun static inline int alloc_level(void)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun int level;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun again:
39*4882a593Smuzhiyun level = find_first_zero_bit(hub_irq_map, IP27_HUB_IRQ_COUNT);
40*4882a593Smuzhiyun if (level >= IP27_HUB_IRQ_COUNT)
41*4882a593Smuzhiyun return -ENOSPC;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun if (test_and_set_bit(level, hub_irq_map))
44*4882a593Smuzhiyun goto again;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun return level;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
enable_hub_irq(struct irq_data * d)49*4882a593Smuzhiyun static void enable_hub_irq(struct irq_data *d)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
52*4882a593Smuzhiyun unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun set_bit(d->hwirq, mask);
55*4882a593Smuzhiyun __raw_writeq(mask[0], hd->irq_mask[0]);
56*4882a593Smuzhiyun __raw_writeq(mask[1], hd->irq_mask[1]);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
disable_hub_irq(struct irq_data * d)59*4882a593Smuzhiyun static void disable_hub_irq(struct irq_data *d)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
62*4882a593Smuzhiyun unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun clear_bit(d->hwirq, mask);
65*4882a593Smuzhiyun __raw_writeq(mask[0], hd->irq_mask[0]);
66*4882a593Smuzhiyun __raw_writeq(mask[1], hd->irq_mask[1]);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
setup_hub_mask(struct hub_irq_data * hd,const struct cpumask * mask)69*4882a593Smuzhiyun static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun nasid_t nasid;
72*4882a593Smuzhiyun int cpu;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun cpu = cpumask_first_and(mask, cpu_online_mask);
75*4882a593Smuzhiyun if (cpu >= nr_cpu_ids)
76*4882a593Smuzhiyun cpu = cpumask_any(cpu_online_mask);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun nasid = cpu_to_node(cpu);
79*4882a593Smuzhiyun hd->cpu = cpu;
80*4882a593Smuzhiyun if (!cputoslice(cpu)) {
81*4882a593Smuzhiyun hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A);
82*4882a593Smuzhiyun hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A);
83*4882a593Smuzhiyun } else {
84*4882a593Smuzhiyun hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
85*4882a593Smuzhiyun hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
set_affinity_hub_irq(struct irq_data * d,const struct cpumask * mask,bool force)89*4882a593Smuzhiyun static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
90*4882a593Smuzhiyun bool force)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (!hd)
95*4882a593Smuzhiyun return -EINVAL;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun if (irqd_is_started(d))
98*4882a593Smuzhiyun disable_hub_irq(d);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun setup_hub_mask(hd, mask);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if (irqd_is_started(d))
103*4882a593Smuzhiyun enable_hub_irq(d);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun return 0;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun static struct irq_chip hub_irq_type = {
111*4882a593Smuzhiyun .name = "HUB",
112*4882a593Smuzhiyun .irq_mask = disable_hub_irq,
113*4882a593Smuzhiyun .irq_unmask = enable_hub_irq,
114*4882a593Smuzhiyun .irq_set_affinity = set_affinity_hub_irq,
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun
hub_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)117*4882a593Smuzhiyun static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
118*4882a593Smuzhiyun unsigned int nr_irqs, void *arg)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun struct irq_alloc_info *info = arg;
121*4882a593Smuzhiyun struct hub_irq_data *hd;
122*4882a593Smuzhiyun struct hub_data *hub;
123*4882a593Smuzhiyun struct irq_desc *desc;
124*4882a593Smuzhiyun int swlevel;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun if (nr_irqs > 1 || !info)
127*4882a593Smuzhiyun return -EINVAL;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun hd = kzalloc(sizeof(*hd), GFP_KERNEL);
130*4882a593Smuzhiyun if (!hd)
131*4882a593Smuzhiyun return -ENOMEM;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun swlevel = alloc_level();
134*4882a593Smuzhiyun if (unlikely(swlevel < 0)) {
135*4882a593Smuzhiyun kfree(hd);
136*4882a593Smuzhiyun return -EAGAIN;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun irq_domain_set_info(domain, virq, swlevel, &hub_irq_type, hd,
139*4882a593Smuzhiyun handle_level_irq, NULL, NULL);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* use CPU connected to nearest hub */
142*4882a593Smuzhiyun hub = hub_data(info->nasid);
143*4882a593Smuzhiyun setup_hub_mask(hd, &hub->h_cpus);
144*4882a593Smuzhiyun info->nasid = cpu_to_node(hd->cpu);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* Make sure it's not already pending when we connect it. */
147*4882a593Smuzhiyun REMOTE_HUB_CLR_INTR(info->nasid, swlevel);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun desc = irq_to_desc(virq);
150*4882a593Smuzhiyun desc->irq_common_data.node = info->nasid;
151*4882a593Smuzhiyun cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun return 0;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
hub_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)156*4882a593Smuzhiyun static void hub_domain_free(struct irq_domain *domain,
157*4882a593Smuzhiyun unsigned int virq, unsigned int nr_irqs)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun struct irq_data *irqd;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (nr_irqs > 1)
162*4882a593Smuzhiyun return;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun irqd = irq_domain_get_irq_data(domain, virq);
165*4882a593Smuzhiyun if (irqd && irqd->chip_data)
166*4882a593Smuzhiyun kfree(irqd->chip_data);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun static const struct irq_domain_ops hub_domain_ops = {
170*4882a593Smuzhiyun .alloc = hub_domain_alloc,
171*4882a593Smuzhiyun .free = hub_domain_free,
172*4882a593Smuzhiyun };
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun * This code is unnecessarily complex, because we do
176*4882a593Smuzhiyun * intr enabling. Basically, once we grab the set of intrs we need
177*4882a593Smuzhiyun * to service, we must mask _all_ these interrupts; firstly, to make
178*4882a593Smuzhiyun * sure the same intr does not intr again, causing recursion that
179*4882a593Smuzhiyun * can lead to stack overflow. Secondly, we can not just mask the
180*4882a593Smuzhiyun * one intr we are do_IRQing, because the non-masked intrs in the
181*4882a593Smuzhiyun * first set might intr again, causing multiple servicings of the
182*4882a593Smuzhiyun * same intr. This effect is mostly seen for intercpu intrs.
183*4882a593Smuzhiyun * Kanoj 05.13.00
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun
ip27_do_irq_mask0(struct irq_desc * desc)186*4882a593Smuzhiyun static void ip27_do_irq_mask0(struct irq_desc *desc)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun cpuid_t cpu = smp_processor_id();
189*4882a593Smuzhiyun unsigned long *mask = per_cpu(irq_enable_mask, cpu);
190*4882a593Smuzhiyun struct irq_domain *domain;
191*4882a593Smuzhiyun u64 pend0;
192*4882a593Smuzhiyun int irq;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* copied from Irix intpend0() */
195*4882a593Smuzhiyun pend0 = LOCAL_HUB_L(PI_INT_PEND0);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun pend0 &= mask[0]; /* Pick intrs we should look at */
198*4882a593Smuzhiyun if (!pend0)
199*4882a593Smuzhiyun return;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun #ifdef CONFIG_SMP
202*4882a593Smuzhiyun if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
203*4882a593Smuzhiyun LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
204*4882a593Smuzhiyun scheduler_ipi();
205*4882a593Smuzhiyun } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
206*4882a593Smuzhiyun LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
207*4882a593Smuzhiyun scheduler_ipi();
208*4882a593Smuzhiyun } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
209*4882a593Smuzhiyun LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
210*4882a593Smuzhiyun generic_smp_call_function_interrupt();
211*4882a593Smuzhiyun } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
212*4882a593Smuzhiyun LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
213*4882a593Smuzhiyun generic_smp_call_function_interrupt();
214*4882a593Smuzhiyun } else
215*4882a593Smuzhiyun #endif
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun domain = irq_desc_get_handler_data(desc);
218*4882a593Smuzhiyun irq = irq_linear_revmap(domain, __ffs(pend0));
219*4882a593Smuzhiyun if (irq)
220*4882a593Smuzhiyun generic_handle_irq(irq);
221*4882a593Smuzhiyun else
222*4882a593Smuzhiyun spurious_interrupt();
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun LOCAL_HUB_L(PI_INT_PEND0);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
ip27_do_irq_mask1(struct irq_desc * desc)228*4882a593Smuzhiyun static void ip27_do_irq_mask1(struct irq_desc *desc)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun cpuid_t cpu = smp_processor_id();
231*4882a593Smuzhiyun unsigned long *mask = per_cpu(irq_enable_mask, cpu);
232*4882a593Smuzhiyun struct irq_domain *domain;
233*4882a593Smuzhiyun u64 pend1;
234*4882a593Smuzhiyun int irq;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* copied from Irix intpend0() */
237*4882a593Smuzhiyun pend1 = LOCAL_HUB_L(PI_INT_PEND1);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun pend1 &= mask[1]; /* Pick intrs we should look at */
240*4882a593Smuzhiyun if (!pend1)
241*4882a593Smuzhiyun return;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun domain = irq_desc_get_handler_data(desc);
244*4882a593Smuzhiyun irq = irq_linear_revmap(domain, __ffs(pend1) + 64);
245*4882a593Smuzhiyun if (irq)
246*4882a593Smuzhiyun generic_handle_irq(irq);
247*4882a593Smuzhiyun else
248*4882a593Smuzhiyun spurious_interrupt();
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun LOCAL_HUB_L(PI_INT_PEND1);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
install_ipi(void)253*4882a593Smuzhiyun void install_ipi(void)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun int cpu = smp_processor_id();
256*4882a593Smuzhiyun unsigned long *mask = per_cpu(irq_enable_mask, cpu);
257*4882a593Smuzhiyun int slice = LOCAL_HUB_L(PI_CPU_NUM);
258*4882a593Smuzhiyun int resched, call;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun resched = CPU_RESCHED_A_IRQ + slice;
261*4882a593Smuzhiyun set_bit(resched, mask);
262*4882a593Smuzhiyun LOCAL_HUB_CLR_INTR(resched);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun call = CPU_CALL_A_IRQ + slice;
265*4882a593Smuzhiyun set_bit(call, mask);
266*4882a593Smuzhiyun LOCAL_HUB_CLR_INTR(call);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (slice == 0) {
269*4882a593Smuzhiyun LOCAL_HUB_S(PI_INT_MASK0_A, mask[0]);
270*4882a593Smuzhiyun LOCAL_HUB_S(PI_INT_MASK1_A, mask[1]);
271*4882a593Smuzhiyun } else {
272*4882a593Smuzhiyun LOCAL_HUB_S(PI_INT_MASK0_B, mask[0]);
273*4882a593Smuzhiyun LOCAL_HUB_S(PI_INT_MASK1_B, mask[1]);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
arch_init_irq(void)277*4882a593Smuzhiyun void __init arch_init_irq(void)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun struct irq_domain *domain;
280*4882a593Smuzhiyun struct fwnode_handle *fn;
281*4882a593Smuzhiyun int i;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun mips_cpu_irq_init();
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun * Some interrupts are reserved by hardware or by software convention.
287*4882a593Smuzhiyun * Mark these as reserved right away so they won't be used accidentally
288*4882a593Smuzhiyun * later.
289*4882a593Smuzhiyun */
290*4882a593Smuzhiyun for (i = 0; i <= CPU_CALL_B_IRQ; i++)
291*4882a593Smuzhiyun set_bit(i, hub_irq_map);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
294*4882a593Smuzhiyun set_bit(i, hub_irq_map);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun fn = irq_domain_alloc_named_fwnode("HUB");
297*4882a593Smuzhiyun WARN_ON(fn == NULL);
298*4882a593Smuzhiyun if (!fn)
299*4882a593Smuzhiyun return;
300*4882a593Smuzhiyun domain = irq_domain_create_linear(fn, IP27_HUB_IRQ_COUNT,
301*4882a593Smuzhiyun &hub_domain_ops, NULL);
302*4882a593Smuzhiyun WARN_ON(domain == NULL);
303*4882a593Smuzhiyun if (!domain)
304*4882a593Smuzhiyun return;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun irq_set_default_host(domain);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
309*4882a593Smuzhiyun irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0,
310*4882a593Smuzhiyun domain);
311*4882a593Smuzhiyun irq_set_percpu_devid(IP27_HUB_PEND1_IRQ);
312*4882a593Smuzhiyun irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1,
313*4882a593Smuzhiyun domain);
314*4882a593Smuzhiyun }
315