1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Cell Internal Interrupt Controller
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org)
6*4882a593Smuzhiyun * IBM, Corp.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Author: Arnd Bergmann <arndb@de.ibm.com>
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * TODO:
13*4882a593Smuzhiyun * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers
14*4882a593Smuzhiyun * vs node numbers in the setup code
15*4882a593Smuzhiyun * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from
16*4882a593Smuzhiyun * a non-active node to the active node)
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/interrupt.h>
20*4882a593Smuzhiyun #include <linux/irq.h>
21*4882a593Smuzhiyun #include <linux/export.h>
22*4882a593Smuzhiyun #include <linux/percpu.h>
23*4882a593Smuzhiyun #include <linux/types.h>
24*4882a593Smuzhiyun #include <linux/ioport.h>
25*4882a593Smuzhiyun #include <linux/kernel_stat.h>
26*4882a593Smuzhiyun #include <linux/pgtable.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <asm/io.h>
29*4882a593Smuzhiyun #include <asm/prom.h>
30*4882a593Smuzhiyun #include <asm/ptrace.h>
31*4882a593Smuzhiyun #include <asm/machdep.h>
32*4882a593Smuzhiyun #include <asm/cell-regs.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include "interrupt.h"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct iic {
37*4882a593Smuzhiyun struct cbe_iic_thread_regs __iomem *regs;
38*4882a593Smuzhiyun u8 target_id;
39*4882a593Smuzhiyun u8 eoi_stack[16];
40*4882a593Smuzhiyun int eoi_ptr;
41*4882a593Smuzhiyun struct device_node *node;
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static DEFINE_PER_CPU(struct iic, cpu_iic);
45*4882a593Smuzhiyun #define IIC_NODE_COUNT 2
46*4882a593Smuzhiyun static struct irq_domain *iic_host;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* Convert between "pending" bits and hw irq number */
iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)49*4882a593Smuzhiyun static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun unsigned char unit = bits.source & 0xf;
52*4882a593Smuzhiyun unsigned char node = bits.source >> 4;
53*4882a593Smuzhiyun unsigned char class = bits.class & 3;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* Decode IPIs */
56*4882a593Smuzhiyun if (bits.flags & CBE_IIC_IRQ_IPI)
57*4882a593Smuzhiyun return IIC_IRQ_TYPE_IPI | (bits.prio >> 4);
58*4882a593Smuzhiyun else
59*4882a593Smuzhiyun return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
iic_mask(struct irq_data * d)62*4882a593Smuzhiyun static void iic_mask(struct irq_data *d)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
iic_unmask(struct irq_data * d)66*4882a593Smuzhiyun static void iic_unmask(struct irq_data *d)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
iic_eoi(struct irq_data * d)70*4882a593Smuzhiyun static void iic_eoi(struct irq_data *d)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct iic *iic = this_cpu_ptr(&cpu_iic);
73*4882a593Smuzhiyun out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
74*4882a593Smuzhiyun BUG_ON(iic->eoi_ptr < 0);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun static struct irq_chip iic_chip = {
78*4882a593Smuzhiyun .name = "CELL-IIC",
79*4882a593Smuzhiyun .irq_mask = iic_mask,
80*4882a593Smuzhiyun .irq_unmask = iic_unmask,
81*4882a593Smuzhiyun .irq_eoi = iic_eoi,
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun
iic_ioexc_eoi(struct irq_data * d)85*4882a593Smuzhiyun static void iic_ioexc_eoi(struct irq_data *d)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
iic_ioexc_cascade(struct irq_desc * desc)89*4882a593Smuzhiyun static void iic_ioexc_cascade(struct irq_desc *desc)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun struct irq_chip *chip = irq_desc_get_chip(desc);
92*4882a593Smuzhiyun struct cbe_iic_regs __iomem *node_iic =
93*4882a593Smuzhiyun (void __iomem *)irq_desc_get_handler_data(desc);
94*4882a593Smuzhiyun unsigned int irq = irq_desc_get_irq(desc);
95*4882a593Smuzhiyun unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
96*4882a593Smuzhiyun unsigned long bits, ack;
97*4882a593Smuzhiyun int cascade;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun for (;;) {
100*4882a593Smuzhiyun bits = in_be64(&node_iic->iic_is);
101*4882a593Smuzhiyun if (bits == 0)
102*4882a593Smuzhiyun break;
103*4882a593Smuzhiyun /* pre-ack edge interrupts */
104*4882a593Smuzhiyun ack = bits & IIC_ISR_EDGE_MASK;
105*4882a593Smuzhiyun if (ack)
106*4882a593Smuzhiyun out_be64(&node_iic->iic_is, ack);
107*4882a593Smuzhiyun /* handle them */
108*4882a593Smuzhiyun for (cascade = 63; cascade >= 0; cascade--)
109*4882a593Smuzhiyun if (bits & (0x8000000000000000UL >> cascade)) {
110*4882a593Smuzhiyun unsigned int cirq =
111*4882a593Smuzhiyun irq_linear_revmap(iic_host,
112*4882a593Smuzhiyun base | cascade);
113*4882a593Smuzhiyun if (cirq)
114*4882a593Smuzhiyun generic_handle_irq(cirq);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun /* post-ack level interrupts */
117*4882a593Smuzhiyun ack = bits & ~IIC_ISR_EDGE_MASK;
118*4882a593Smuzhiyun if (ack)
119*4882a593Smuzhiyun out_be64(&node_iic->iic_is, ack);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun chip->irq_eoi(&desc->irq_data);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun static struct irq_chip iic_ioexc_chip = {
126*4882a593Smuzhiyun .name = "CELL-IOEX",
127*4882a593Smuzhiyun .irq_mask = iic_mask,
128*4882a593Smuzhiyun .irq_unmask = iic_unmask,
129*4882a593Smuzhiyun .irq_eoi = iic_ioexc_eoi,
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* Get an IRQ number from the pending state register of the IIC */
iic_get_irq(void)133*4882a593Smuzhiyun static unsigned int iic_get_irq(void)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun struct cbe_iic_pending_bits pending;
136*4882a593Smuzhiyun struct iic *iic;
137*4882a593Smuzhiyun unsigned int virq;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun iic = this_cpu_ptr(&cpu_iic);
140*4882a593Smuzhiyun *(unsigned long *) &pending =
141*4882a593Smuzhiyun in_be64((u64 __iomem *) &iic->regs->pending_destr);
142*4882a593Smuzhiyun if (!(pending.flags & CBE_IIC_IRQ_VALID))
143*4882a593Smuzhiyun return 0;
144*4882a593Smuzhiyun virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
145*4882a593Smuzhiyun if (!virq)
146*4882a593Smuzhiyun return 0;
147*4882a593Smuzhiyun iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
148*4882a593Smuzhiyun BUG_ON(iic->eoi_ptr > 15);
149*4882a593Smuzhiyun return virq;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
iic_setup_cpu(void)152*4882a593Smuzhiyun void iic_setup_cpu(void)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
iic_get_target_id(int cpu)157*4882a593Smuzhiyun u8 iic_get_target_id(int cpu)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun return per_cpu(cpu_iic, cpu).target_id;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iic_get_target_id);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun #ifdef CONFIG_SMP
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* Use the highest interrupt priorities for IPI */
iic_msg_to_irq(int msg)167*4882a593Smuzhiyun static inline int iic_msg_to_irq(int msg)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun return IIC_IRQ_TYPE_IPI + 0xf - msg;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
iic_message_pass(int cpu,int msg)172*4882a593Smuzhiyun void iic_message_pass(int cpu, int msg)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
iic_request_ipi(int msg)177*4882a593Smuzhiyun static void iic_request_ipi(int msg)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun int virq;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg));
182*4882a593Smuzhiyun if (!virq) {
183*4882a593Smuzhiyun printk(KERN_ERR
184*4882a593Smuzhiyun "iic: failed to map IPI %s\n", smp_ipi_name[msg]);
185*4882a593Smuzhiyun return;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun * If smp_request_message_ipi encounters an error it will notify
190*4882a593Smuzhiyun * the error. If a message is not needed it will return non-zero.
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyun if (smp_request_message_ipi(virq, msg))
193*4882a593Smuzhiyun irq_dispose_mapping(virq);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
iic_request_IPIs(void)196*4882a593Smuzhiyun void iic_request_IPIs(void)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun iic_request_ipi(PPC_MSG_CALL_FUNCTION);
199*4882a593Smuzhiyun iic_request_ipi(PPC_MSG_RESCHEDULE);
200*4882a593Smuzhiyun iic_request_ipi(PPC_MSG_TICK_BROADCAST);
201*4882a593Smuzhiyun iic_request_ipi(PPC_MSG_NMI_IPI);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun #endif /* CONFIG_SMP */
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun
iic_host_match(struct irq_domain * h,struct device_node * node,enum irq_domain_bus_token bus_token)207*4882a593Smuzhiyun static int iic_host_match(struct irq_domain *h, struct device_node *node,
208*4882a593Smuzhiyun enum irq_domain_bus_token bus_token)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun return of_device_is_compatible(node,
211*4882a593Smuzhiyun "IBM,CBEA-Internal-Interrupt-Controller");
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
iic_host_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)214*4882a593Smuzhiyun static int iic_host_map(struct irq_domain *h, unsigned int virq,
215*4882a593Smuzhiyun irq_hw_number_t hw)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun switch (hw & IIC_IRQ_TYPE_MASK) {
218*4882a593Smuzhiyun case IIC_IRQ_TYPE_IPI:
219*4882a593Smuzhiyun irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
220*4882a593Smuzhiyun break;
221*4882a593Smuzhiyun case IIC_IRQ_TYPE_IOEXC:
222*4882a593Smuzhiyun irq_set_chip_and_handler(virq, &iic_ioexc_chip,
223*4882a593Smuzhiyun handle_edge_eoi_irq);
224*4882a593Smuzhiyun break;
225*4882a593Smuzhiyun default:
226*4882a593Smuzhiyun irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun return 0;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
iic_host_xlate(struct irq_domain * h,struct device_node * ct,const u32 * intspec,unsigned int intsize,irq_hw_number_t * out_hwirq,unsigned int * out_flags)231*4882a593Smuzhiyun static int iic_host_xlate(struct irq_domain *h, struct device_node *ct,
232*4882a593Smuzhiyun const u32 *intspec, unsigned int intsize,
233*4882a593Smuzhiyun irq_hw_number_t *out_hwirq, unsigned int *out_flags)
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun unsigned int node, ext, unit, class;
237*4882a593Smuzhiyun const u32 *val;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (!of_device_is_compatible(ct,
240*4882a593Smuzhiyun "IBM,CBEA-Internal-Interrupt-Controller"))
241*4882a593Smuzhiyun return -ENODEV;
242*4882a593Smuzhiyun if (intsize != 1)
243*4882a593Smuzhiyun return -ENODEV;
244*4882a593Smuzhiyun val = of_get_property(ct, "#interrupt-cells", NULL);
245*4882a593Smuzhiyun if (val == NULL || *val != 1)
246*4882a593Smuzhiyun return -ENODEV;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun node = intspec[0] >> 24;
249*4882a593Smuzhiyun ext = (intspec[0] >> 16) & 0xff;
250*4882a593Smuzhiyun class = (intspec[0] >> 8) & 0xff;
251*4882a593Smuzhiyun unit = intspec[0] & 0xff;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* Check if node is in supported range */
254*4882a593Smuzhiyun if (node > 1)
255*4882a593Smuzhiyun return -EINVAL;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* Build up interrupt number, special case for IO exceptions */
258*4882a593Smuzhiyun *out_hwirq = (node << IIC_IRQ_NODE_SHIFT);
259*4882a593Smuzhiyun if (unit == IIC_UNIT_IIC && class == 1)
260*4882a593Smuzhiyun *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext;
261*4882a593Smuzhiyun else
262*4882a593Smuzhiyun *out_hwirq |= IIC_IRQ_TYPE_NORMAL |
263*4882a593Smuzhiyun (class << IIC_IRQ_CLASS_SHIFT) | unit;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* Dummy flags, ignored by iic code */
266*4882a593Smuzhiyun *out_flags = IRQ_TYPE_EDGE_RISING;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun return 0;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun static const struct irq_domain_ops iic_host_ops = {
272*4882a593Smuzhiyun .match = iic_host_match,
273*4882a593Smuzhiyun .map = iic_host_map,
274*4882a593Smuzhiyun .xlate = iic_host_xlate,
275*4882a593Smuzhiyun };
276*4882a593Smuzhiyun
init_one_iic(unsigned int hw_cpu,unsigned long addr,struct device_node * node)277*4882a593Smuzhiyun static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
278*4882a593Smuzhiyun struct device_node *node)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun /* XXX FIXME: should locate the linux CPU number from the HW cpu
281*4882a593Smuzhiyun * number properly. We are lucky for now
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
286*4882a593Smuzhiyun BUG_ON(iic->regs == NULL);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
289*4882a593Smuzhiyun iic->eoi_stack[0] = 0xff;
290*4882a593Smuzhiyun iic->node = of_node_get(node);
291*4882a593Smuzhiyun out_be64(&iic->regs->prio, 0);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun printk(KERN_INFO "IIC for CPU %d target id 0x%x : %pOF\n",
294*4882a593Smuzhiyun hw_cpu, iic->target_id, node);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
setup_iic(void)297*4882a593Smuzhiyun static int __init setup_iic(void)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun struct device_node *dn;
300*4882a593Smuzhiyun struct resource r0, r1;
301*4882a593Smuzhiyun unsigned int node, cascade, found = 0;
302*4882a593Smuzhiyun struct cbe_iic_regs __iomem *node_iic;
303*4882a593Smuzhiyun const u32 *np;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun for_each_node_by_name(dn, "interrupt-controller") {
306*4882a593Smuzhiyun if (!of_device_is_compatible(dn,
307*4882a593Smuzhiyun "IBM,CBEA-Internal-Interrupt-Controller"))
308*4882a593Smuzhiyun continue;
309*4882a593Smuzhiyun np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL);
310*4882a593Smuzhiyun if (np == NULL) {
311*4882a593Smuzhiyun printk(KERN_WARNING "IIC: CPU association not found\n");
312*4882a593Smuzhiyun of_node_put(dn);
313*4882a593Smuzhiyun return -ENODEV;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun if (of_address_to_resource(dn, 0, &r0) ||
316*4882a593Smuzhiyun of_address_to_resource(dn, 1, &r1)) {
317*4882a593Smuzhiyun printk(KERN_WARNING "IIC: Can't resolve addresses\n");
318*4882a593Smuzhiyun of_node_put(dn);
319*4882a593Smuzhiyun return -ENODEV;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun found++;
322*4882a593Smuzhiyun init_one_iic(np[0], r0.start, dn);
323*4882a593Smuzhiyun init_one_iic(np[1], r1.start, dn);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /* Setup cascade for IO exceptions. XXX cleanup tricks to get
326*4882a593Smuzhiyun * node vs CPU etc...
327*4882a593Smuzhiyun * Note that we configure the IIC_IRR here with a hard coded
328*4882a593Smuzhiyun * priority of 1. We might want to improve that later.
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun node = np[0] >> 1;
331*4882a593Smuzhiyun node_iic = cbe_get_cpu_iic_regs(np[0]);
332*4882a593Smuzhiyun cascade = node << IIC_IRQ_NODE_SHIFT;
333*4882a593Smuzhiyun cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
334*4882a593Smuzhiyun cascade |= IIC_UNIT_IIC;
335*4882a593Smuzhiyun cascade = irq_create_mapping(iic_host, cascade);
336*4882a593Smuzhiyun if (!cascade)
337*4882a593Smuzhiyun continue;
338*4882a593Smuzhiyun /*
339*4882a593Smuzhiyun * irq_data is a generic pointer that gets passed back
340*4882a593Smuzhiyun * to us later, so the forced cast is fine.
341*4882a593Smuzhiyun */
342*4882a593Smuzhiyun irq_set_handler_data(cascade, (void __force *)node_iic);
343*4882a593Smuzhiyun irq_set_chained_handler(cascade, iic_ioexc_cascade);
344*4882a593Smuzhiyun out_be64(&node_iic->iic_ir,
345*4882a593Smuzhiyun (1 << 12) /* priority */ |
346*4882a593Smuzhiyun (node << 4) /* dest node */ |
347*4882a593Smuzhiyun IIC_UNIT_THREAD_0 /* route them to thread 0 */);
348*4882a593Smuzhiyun /* Flush pending (make sure it triggers if there is
349*4882a593Smuzhiyun * anything pending
350*4882a593Smuzhiyun */
351*4882a593Smuzhiyun out_be64(&node_iic->iic_is, 0xfffffffffffffffful);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (found)
355*4882a593Smuzhiyun return 0;
356*4882a593Smuzhiyun else
357*4882a593Smuzhiyun return -ENODEV;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
iic_init_IRQ(void)360*4882a593Smuzhiyun void __init iic_init_IRQ(void)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun /* Setup an irq host data structure */
363*4882a593Smuzhiyun iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops,
364*4882a593Smuzhiyun NULL);
365*4882a593Smuzhiyun BUG_ON(iic_host == NULL);
366*4882a593Smuzhiyun irq_set_default_host(iic_host);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* Discover and initialize iics */
369*4882a593Smuzhiyun if (setup_iic() < 0)
370*4882a593Smuzhiyun panic("IIC: Failed to initialize !\n");
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* Set master interrupt handling function */
373*4882a593Smuzhiyun ppc_md.get_irq = iic_get_irq;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* Enable on current CPU */
376*4882a593Smuzhiyun iic_setup_cpu();
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
iic_set_interrupt_routing(int cpu,int thread,int priority)379*4882a593Smuzhiyun void iic_set_interrupt_routing(int cpu, int thread, int priority)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
382*4882a593Smuzhiyun u64 iic_ir = 0;
383*4882a593Smuzhiyun int node = cpu >> 1;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* Set which node and thread will handle the next interrupt */
386*4882a593Smuzhiyun iic_ir |= CBE_IIC_IR_PRIO(priority) |
387*4882a593Smuzhiyun CBE_IIC_IR_DEST_NODE(node);
388*4882a593Smuzhiyun if (thread == 0)
389*4882a593Smuzhiyun iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
390*4882a593Smuzhiyun else
391*4882a593Smuzhiyun iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
392*4882a593Smuzhiyun out_be64(&iic_regs->iic_ir, iic_ir);
393*4882a593Smuzhiyun }
394