1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Driver for ePAPR Embedded Hypervisor PIC
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright 2008-2011 Freescale Semiconductor, Inc.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Author: Ashish Kalra <ashish.kalra@freescale.com>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * This file is licensed under the terms of the GNU General Public License
9*4882a593Smuzhiyun * version 2. This program is licensed "as is" without any warranty of any
10*4882a593Smuzhiyun * kind, whether express or implied.
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/init.h>
16*4882a593Smuzhiyun #include <linux/irq.h>
17*4882a593Smuzhiyun #include <linux/smp.h>
18*4882a593Smuzhiyun #include <linux/interrupt.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/spinlock.h>
21*4882a593Smuzhiyun #include <linux/of.h>
22*4882a593Smuzhiyun #include <linux/of_address.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <asm/io.h>
25*4882a593Smuzhiyun #include <asm/irq.h>
26*4882a593Smuzhiyun #include <asm/smp.h>
27*4882a593Smuzhiyun #include <asm/machdep.h>
28*4882a593Smuzhiyun #include <asm/ehv_pic.h>
29*4882a593Smuzhiyun #include <asm/fsl_hcalls.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun static struct ehv_pic *global_ehv_pic;
32*4882a593Smuzhiyun static DEFINE_SPINLOCK(ehv_pic_lock);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun static u32 hwirq_intspec[NR_EHV_PIC_INTS];
35*4882a593Smuzhiyun static u32 __iomem *mpic_percpu_base_vaddr;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #define IRQ_TYPE_MPIC_DIRECT 4
38*4882a593Smuzhiyun #define MPIC_EOI 0x00B0
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * Linux descriptor level callbacks
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun
ehv_pic_unmask_irq(struct irq_data * d)44*4882a593Smuzhiyun void ehv_pic_unmask_irq(struct irq_data *d)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun unsigned int src = virq_to_hw(d->irq);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun ev_int_set_mask(src, 0);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
ehv_pic_mask_irq(struct irq_data * d)51*4882a593Smuzhiyun void ehv_pic_mask_irq(struct irq_data *d)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun unsigned int src = virq_to_hw(d->irq);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun ev_int_set_mask(src, 1);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
ehv_pic_end_irq(struct irq_data * d)58*4882a593Smuzhiyun void ehv_pic_end_irq(struct irq_data *d)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun unsigned int src = virq_to_hw(d->irq);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun ev_int_eoi(src);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
ehv_pic_direct_end_irq(struct irq_data * d)65*4882a593Smuzhiyun void ehv_pic_direct_end_irq(struct irq_data *d)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun out_be32(mpic_percpu_base_vaddr + MPIC_EOI / 4, 0);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
ehv_pic_set_affinity(struct irq_data * d,const struct cpumask * dest,bool force)70*4882a593Smuzhiyun int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
71*4882a593Smuzhiyun bool force)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun unsigned int src = virq_to_hw(d->irq);
74*4882a593Smuzhiyun unsigned int config, prio, cpu_dest;
75*4882a593Smuzhiyun int cpuid = irq_choose_cpu(dest);
76*4882a593Smuzhiyun unsigned long flags;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun spin_lock_irqsave(&ehv_pic_lock, flags);
79*4882a593Smuzhiyun ev_int_get_config(src, &config, &prio, &cpu_dest);
80*4882a593Smuzhiyun ev_int_set_config(src, config, prio, cpuid);
81*4882a593Smuzhiyun spin_unlock_irqrestore(&ehv_pic_lock, flags);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun return IRQ_SET_MASK_OK;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
ehv_pic_type_to_vecpri(unsigned int type)86*4882a593Smuzhiyun static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun /* Now convert sense value */
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun switch (type & IRQ_TYPE_SENSE_MASK) {
91*4882a593Smuzhiyun case IRQ_TYPE_EDGE_RISING:
92*4882a593Smuzhiyun return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
93*4882a593Smuzhiyun EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun case IRQ_TYPE_EDGE_FALLING:
96*4882a593Smuzhiyun case IRQ_TYPE_EDGE_BOTH:
97*4882a593Smuzhiyun return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
98*4882a593Smuzhiyun EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun case IRQ_TYPE_LEVEL_HIGH:
101*4882a593Smuzhiyun return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
102*4882a593Smuzhiyun EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun case IRQ_TYPE_LEVEL_LOW:
105*4882a593Smuzhiyun default:
106*4882a593Smuzhiyun return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
107*4882a593Smuzhiyun EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
ehv_pic_set_irq_type(struct irq_data * d,unsigned int flow_type)111*4882a593Smuzhiyun int ehv_pic_set_irq_type(struct irq_data *d, unsigned int flow_type)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun unsigned int src = virq_to_hw(d->irq);
114*4882a593Smuzhiyun unsigned int vecpri, vold, vnew, prio, cpu_dest;
115*4882a593Smuzhiyun unsigned long flags;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (flow_type == IRQ_TYPE_NONE)
118*4882a593Smuzhiyun flow_type = IRQ_TYPE_LEVEL_LOW;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun irqd_set_trigger_type(d, flow_type);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun vecpri = ehv_pic_type_to_vecpri(flow_type);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun spin_lock_irqsave(&ehv_pic_lock, flags);
125*4882a593Smuzhiyun ev_int_get_config(src, &vold, &prio, &cpu_dest);
126*4882a593Smuzhiyun vnew = vold & ~(EHV_PIC_INFO(VECPRI_POLARITY_MASK) |
127*4882a593Smuzhiyun EHV_PIC_INFO(VECPRI_SENSE_MASK));
128*4882a593Smuzhiyun vnew |= vecpri;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun * TODO : Add specific interface call for platform to set
132*4882a593Smuzhiyun * individual interrupt priorities.
133*4882a593Smuzhiyun * platform currently using static/default priority for all ints
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun prio = 8;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun ev_int_set_config(src, vecpri, prio, cpu_dest);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun spin_unlock_irqrestore(&ehv_pic_lock, flags);
141*4882a593Smuzhiyun return IRQ_SET_MASK_OK_NOCOPY;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun static struct irq_chip ehv_pic_irq_chip = {
145*4882a593Smuzhiyun .irq_mask = ehv_pic_mask_irq,
146*4882a593Smuzhiyun .irq_unmask = ehv_pic_unmask_irq,
147*4882a593Smuzhiyun .irq_eoi = ehv_pic_end_irq,
148*4882a593Smuzhiyun .irq_set_type = ehv_pic_set_irq_type,
149*4882a593Smuzhiyun };
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun static struct irq_chip ehv_pic_direct_eoi_irq_chip = {
152*4882a593Smuzhiyun .irq_mask = ehv_pic_mask_irq,
153*4882a593Smuzhiyun .irq_unmask = ehv_pic_unmask_irq,
154*4882a593Smuzhiyun .irq_eoi = ehv_pic_direct_end_irq,
155*4882a593Smuzhiyun .irq_set_type = ehv_pic_set_irq_type,
156*4882a593Smuzhiyun };
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /* Return an interrupt vector or 0 if no interrupt is pending. */
ehv_pic_get_irq(void)159*4882a593Smuzhiyun unsigned int ehv_pic_get_irq(void)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun int irq;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun BUG_ON(global_ehv_pic == NULL);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (global_ehv_pic->coreint_flag)
166*4882a593Smuzhiyun irq = mfspr(SPRN_EPR); /* if core int mode */
167*4882a593Smuzhiyun else
168*4882a593Smuzhiyun ev_int_iack(0, &irq); /* legacy mode */
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (irq == 0xFFFF) /* 0xFFFF --> no irq is pending */
171*4882a593Smuzhiyun return 0;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * this will also setup revmap[] in the slow path for the first
175*4882a593Smuzhiyun * time, next calls will always use fast path by indexing revmap
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun return irq_linear_revmap(global_ehv_pic->irqhost, irq);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
ehv_pic_host_match(struct irq_domain * h,struct device_node * node,enum irq_domain_bus_token bus_token)180*4882a593Smuzhiyun static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node,
181*4882a593Smuzhiyun enum irq_domain_bus_token bus_token)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun /* Exact match, unless ehv_pic node is NULL */
184*4882a593Smuzhiyun struct device_node *of_node = irq_domain_get_of_node(h);
185*4882a593Smuzhiyun return of_node == NULL || of_node == node;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
ehv_pic_host_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)188*4882a593Smuzhiyun static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq,
189*4882a593Smuzhiyun irq_hw_number_t hw)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct ehv_pic *ehv_pic = h->host_data;
192*4882a593Smuzhiyun struct irq_chip *chip;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* Default chip */
195*4882a593Smuzhiyun chip = &ehv_pic->hc_irq;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun if (mpic_percpu_base_vaddr)
198*4882a593Smuzhiyun if (hwirq_intspec[hw] & IRQ_TYPE_MPIC_DIRECT)
199*4882a593Smuzhiyun chip = &ehv_pic_direct_eoi_irq_chip;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun irq_set_chip_data(virq, chip);
202*4882a593Smuzhiyun /*
203*4882a593Smuzhiyun * using handle_fasteoi_irq as our irq handler, this will
204*4882a593Smuzhiyun * only call the eoi callback and suitable for the MPIC
205*4882a593Smuzhiyun * controller which set ISR/IPR automatically and clear the
206*4882a593Smuzhiyun * highest priority active interrupt in ISR/IPR when we do
207*4882a593Smuzhiyun * a specific eoi
208*4882a593Smuzhiyun */
209*4882a593Smuzhiyun irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* Set default irq type */
212*4882a593Smuzhiyun irq_set_irq_type(virq, IRQ_TYPE_NONE);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun return 0;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
ehv_pic_host_xlate(struct irq_domain * h,struct device_node * ct,const u32 * intspec,unsigned int intsize,irq_hw_number_t * out_hwirq,unsigned int * out_flags)217*4882a593Smuzhiyun static int ehv_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
218*4882a593Smuzhiyun const u32 *intspec, unsigned int intsize,
219*4882a593Smuzhiyun irq_hw_number_t *out_hwirq, unsigned int *out_flags)
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun * interrupt sense values coming from the guest device tree
224*4882a593Smuzhiyun * interrupt specifiers can have four possible sense and
225*4882a593Smuzhiyun * level encoding information and they need to
226*4882a593Smuzhiyun * be translated between firmware type & linux type.
227*4882a593Smuzhiyun */
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun static unsigned char map_of_senses_to_linux_irqtype[4] = {
230*4882a593Smuzhiyun IRQ_TYPE_EDGE_FALLING,
231*4882a593Smuzhiyun IRQ_TYPE_EDGE_RISING,
232*4882a593Smuzhiyun IRQ_TYPE_LEVEL_LOW,
233*4882a593Smuzhiyun IRQ_TYPE_LEVEL_HIGH,
234*4882a593Smuzhiyun };
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun *out_hwirq = intspec[0];
237*4882a593Smuzhiyun if (intsize > 1) {
238*4882a593Smuzhiyun hwirq_intspec[intspec[0]] = intspec[1];
239*4882a593Smuzhiyun *out_flags = map_of_senses_to_linux_irqtype[intspec[1] &
240*4882a593Smuzhiyun ~IRQ_TYPE_MPIC_DIRECT];
241*4882a593Smuzhiyun } else {
242*4882a593Smuzhiyun *out_flags = IRQ_TYPE_NONE;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun return 0;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun static const struct irq_domain_ops ehv_pic_host_ops = {
249*4882a593Smuzhiyun .match = ehv_pic_host_match,
250*4882a593Smuzhiyun .map = ehv_pic_host_map,
251*4882a593Smuzhiyun .xlate = ehv_pic_host_xlate,
252*4882a593Smuzhiyun };
253*4882a593Smuzhiyun
ehv_pic_init(void)254*4882a593Smuzhiyun void __init ehv_pic_init(void)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun struct device_node *np, *np2;
257*4882a593Smuzhiyun struct ehv_pic *ehv_pic;
258*4882a593Smuzhiyun int coreint_flag = 1;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun np = of_find_compatible_node(NULL, NULL, "epapr,hv-pic");
261*4882a593Smuzhiyun if (!np) {
262*4882a593Smuzhiyun pr_err("ehv_pic_init: could not find epapr,hv-pic node\n");
263*4882a593Smuzhiyun return;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (!of_find_property(np, "has-external-proxy", NULL))
267*4882a593Smuzhiyun coreint_flag = 0;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun ehv_pic = kzalloc(sizeof(struct ehv_pic), GFP_KERNEL);
270*4882a593Smuzhiyun if (!ehv_pic) {
271*4882a593Smuzhiyun of_node_put(np);
272*4882a593Smuzhiyun return;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS,
276*4882a593Smuzhiyun &ehv_pic_host_ops, ehv_pic);
277*4882a593Smuzhiyun if (!ehv_pic->irqhost) {
278*4882a593Smuzhiyun of_node_put(np);
279*4882a593Smuzhiyun kfree(ehv_pic);
280*4882a593Smuzhiyun return;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun np2 = of_find_compatible_node(NULL, NULL, "fsl,hv-mpic-per-cpu");
284*4882a593Smuzhiyun if (np2) {
285*4882a593Smuzhiyun mpic_percpu_base_vaddr = of_iomap(np2, 0);
286*4882a593Smuzhiyun if (!mpic_percpu_base_vaddr)
287*4882a593Smuzhiyun pr_err("ehv_pic_init: of_iomap failed\n");
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun of_node_put(np2);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun ehv_pic->hc_irq = ehv_pic_irq_chip;
293*4882a593Smuzhiyun ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity;
294*4882a593Smuzhiyun ehv_pic->coreint_flag = coreint_flag;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun global_ehv_pic = ehv_pic;
297*4882a593Smuzhiyun irq_set_default_host(global_ehv_pic->irqhost);
298*4882a593Smuzhiyun }
299