1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Xen event channels
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Xen models interrupts with abstract event channels. Because each
6*4882a593Smuzhiyun * domain gets 1024 event channels, but NR_IRQ is not that large, we
7*4882a593Smuzhiyun * must dynamically map irqs<->event channels. The event channels
8*4882a593Smuzhiyun * interface with the rest of the kernel by defining a xen interrupt
9*4882a593Smuzhiyun * chip. When an event is received, it is mapped to an irq and sent
10*4882a593Smuzhiyun * through the normal interrupt processing path.
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * There are four kinds of events which can be mapped to an event
13*4882a593Smuzhiyun * channel:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * 1. Inter-domain notifications. This includes all the virtual
16*4882a593Smuzhiyun * device events, since they're driven by front-ends in another domain
17*4882a593Smuzhiyun * (typically dom0).
18*4882a593Smuzhiyun * 2. VIRQs, typically used for timers. These are per-cpu events.
19*4882a593Smuzhiyun * 3. IPIs.
20*4882a593Smuzhiyun * 4. PIRQs - Hardware interrupts.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include <linux/linkage.h>
28*4882a593Smuzhiyun #include <linux/interrupt.h>
29*4882a593Smuzhiyun #include <linux/irq.h>
30*4882a593Smuzhiyun #include <linux/moduleparam.h>
31*4882a593Smuzhiyun #include <linux/string.h>
32*4882a593Smuzhiyun #include <linux/memblock.h>
33*4882a593Smuzhiyun #include <linux/slab.h>
34*4882a593Smuzhiyun #include <linux/irqnr.h>
35*4882a593Smuzhiyun #include <linux/pci.h>
36*4882a593Smuzhiyun #include <linux/spinlock.h>
37*4882a593Smuzhiyun #include <linux/cpuhotplug.h>
38*4882a593Smuzhiyun #include <linux/atomic.h>
39*4882a593Smuzhiyun #include <linux/ktime.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #ifdef CONFIG_X86
42*4882a593Smuzhiyun #include <asm/desc.h>
43*4882a593Smuzhiyun #include <asm/ptrace.h>
44*4882a593Smuzhiyun #include <asm/idtentry.h>
45*4882a593Smuzhiyun #include <asm/irq.h>
46*4882a593Smuzhiyun #include <asm/io_apic.h>
47*4882a593Smuzhiyun #include <asm/i8259.h>
48*4882a593Smuzhiyun #include <asm/xen/pci.h>
49*4882a593Smuzhiyun #endif
50*4882a593Smuzhiyun #include <asm/sync_bitops.h>
51*4882a593Smuzhiyun #include <asm/xen/hypercall.h>
52*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
53*4882a593Smuzhiyun #include <xen/page.h>
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #include <xen/xen.h>
56*4882a593Smuzhiyun #include <xen/hvm.h>
57*4882a593Smuzhiyun #include <xen/xen-ops.h>
58*4882a593Smuzhiyun #include <xen/events.h>
59*4882a593Smuzhiyun #include <xen/interface/xen.h>
60*4882a593Smuzhiyun #include <xen/interface/event_channel.h>
61*4882a593Smuzhiyun #include <xen/interface/hvm/hvm_op.h>
62*4882a593Smuzhiyun #include <xen/interface/hvm/params.h>
63*4882a593Smuzhiyun #include <xen/interface/physdev.h>
64*4882a593Smuzhiyun #include <xen/interface/sched.h>
65*4882a593Smuzhiyun #include <xen/interface/vcpu.h>
66*4882a593Smuzhiyun #include <asm/hw_irq.h>
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #include "events_internal.h"
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #undef MODULE_PARAM_PREFIX
71*4882a593Smuzhiyun #define MODULE_PARAM_PREFIX "xen."
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* Interrupt types. */
74*4882a593Smuzhiyun enum xen_irq_type {
75*4882a593Smuzhiyun IRQT_UNBOUND = 0,
76*4882a593Smuzhiyun IRQT_PIRQ,
77*4882a593Smuzhiyun IRQT_VIRQ,
78*4882a593Smuzhiyun IRQT_IPI,
79*4882a593Smuzhiyun IRQT_EVTCHN
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * Packed IRQ information:
84*4882a593Smuzhiyun * type - enum xen_irq_type
85*4882a593Smuzhiyun * event channel - irq->event channel mapping
86*4882a593Smuzhiyun * cpu - cpu this event channel is bound to
87*4882a593Smuzhiyun * index - type-specific information:
88*4882a593Smuzhiyun * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
89*4882a593Smuzhiyun * guest, or GSI (real passthrough IRQ) of the device.
90*4882a593Smuzhiyun * VIRQ - virq number
91*4882a593Smuzhiyun * IPI - IPI vector
92*4882a593Smuzhiyun * EVTCHN -
93*4882a593Smuzhiyun */
94*4882a593Smuzhiyun struct irq_info {
95*4882a593Smuzhiyun struct list_head list;
96*4882a593Smuzhiyun struct list_head eoi_list;
97*4882a593Smuzhiyun short refcnt;
98*4882a593Smuzhiyun short spurious_cnt;
99*4882a593Smuzhiyun short type; /* type */
100*4882a593Smuzhiyun u8 mask_reason; /* Why is event channel masked */
101*4882a593Smuzhiyun #define EVT_MASK_REASON_EXPLICIT 0x01
102*4882a593Smuzhiyun #define EVT_MASK_REASON_TEMPORARY 0x02
103*4882a593Smuzhiyun #define EVT_MASK_REASON_EOI_PENDING 0x04
104*4882a593Smuzhiyun u8 is_active; /* Is event just being handled? */
105*4882a593Smuzhiyun unsigned irq;
106*4882a593Smuzhiyun evtchn_port_t evtchn; /* event channel */
107*4882a593Smuzhiyun unsigned short cpu; /* cpu bound */
108*4882a593Smuzhiyun unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
109*4882a593Smuzhiyun unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
110*4882a593Smuzhiyun u64 eoi_time; /* Time in jiffies when to EOI. */
111*4882a593Smuzhiyun raw_spinlock_t lock;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun union {
114*4882a593Smuzhiyun unsigned short virq;
115*4882a593Smuzhiyun enum ipi_vector ipi;
116*4882a593Smuzhiyun struct {
117*4882a593Smuzhiyun unsigned short pirq;
118*4882a593Smuzhiyun unsigned short gsi;
119*4882a593Smuzhiyun unsigned char vector;
120*4882a593Smuzhiyun unsigned char flags;
121*4882a593Smuzhiyun uint16_t domid;
122*4882a593Smuzhiyun } pirq;
123*4882a593Smuzhiyun } u;
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun #define PIRQ_NEEDS_EOI (1 << 0)
127*4882a593Smuzhiyun #define PIRQ_SHAREABLE (1 << 1)
128*4882a593Smuzhiyun #define PIRQ_MSI_GROUP (1 << 2)
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun static uint __read_mostly event_loop_timeout = 2;
131*4882a593Smuzhiyun module_param(event_loop_timeout, uint, 0644);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun static uint __read_mostly event_eoi_delay = 10;
134*4882a593Smuzhiyun module_param(event_eoi_delay, uint, 0644);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun const struct evtchn_ops *evtchn_ops;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun * This lock protects updates to the following mapping and reference-count
140*4882a593Smuzhiyun * arrays. The lock does not need to be acquired to read the mapping tables.
141*4882a593Smuzhiyun */
142*4882a593Smuzhiyun static DEFINE_MUTEX(irq_mapping_update_lock);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun * Lock protecting event handling loop against removing event channels.
146*4882a593Smuzhiyun * Adding of event channels is no issue as the associated IRQ becomes active
147*4882a593Smuzhiyun * only after everything is setup (before request_[threaded_]irq() the handler
148*4882a593Smuzhiyun * can't be entered for an event, as the event channel will be unmasked only
149*4882a593Smuzhiyun * then).
150*4882a593Smuzhiyun */
151*4882a593Smuzhiyun static DEFINE_RWLOCK(evtchn_rwlock);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun * Lock hierarchy:
155*4882a593Smuzhiyun *
156*4882a593Smuzhiyun * irq_mapping_update_lock
157*4882a593Smuzhiyun * evtchn_rwlock
158*4882a593Smuzhiyun * IRQ-desc lock
159*4882a593Smuzhiyun * percpu eoi_list_lock
160*4882a593Smuzhiyun * irq_info->lock
161*4882a593Smuzhiyun */
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun static LIST_HEAD(xen_irq_list_head);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* IRQ <-> VIRQ mapping. */
166*4882a593Smuzhiyun static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* IRQ <-> IPI mapping */
169*4882a593Smuzhiyun static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun static int **evtchn_to_irq;
172*4882a593Smuzhiyun #ifdef CONFIG_X86
173*4882a593Smuzhiyun static unsigned long *pirq_eoi_map;
174*4882a593Smuzhiyun #endif
175*4882a593Smuzhiyun static bool (*pirq_needs_eoi)(unsigned irq);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
178*4882a593Smuzhiyun #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
179*4882a593Smuzhiyun #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* Xen will never allocate port zero for any purpose. */
182*4882a593Smuzhiyun #define VALID_EVTCHN(chn) ((chn) != 0)
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun static struct irq_chip xen_dynamic_chip;
187*4882a593Smuzhiyun static struct irq_chip xen_lateeoi_chip;
188*4882a593Smuzhiyun static struct irq_chip xen_percpu_chip;
189*4882a593Smuzhiyun static struct irq_chip xen_pirq_chip;
190*4882a593Smuzhiyun static void enable_dynirq(struct irq_data *data);
191*4882a593Smuzhiyun static void disable_dynirq(struct irq_data *data);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun static DEFINE_PER_CPU(unsigned int, irq_epoch);
194*4882a593Smuzhiyun
clear_evtchn_to_irq_row(int * evtchn_row)195*4882a593Smuzhiyun static void clear_evtchn_to_irq_row(int *evtchn_row)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun unsigned col;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun for (col = 0; col < EVTCHN_PER_ROW; col++)
200*4882a593Smuzhiyun WRITE_ONCE(evtchn_row[col], -1);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
clear_evtchn_to_irq_all(void)203*4882a593Smuzhiyun static void clear_evtchn_to_irq_all(void)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun unsigned row;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
208*4882a593Smuzhiyun if (evtchn_to_irq[row] == NULL)
209*4882a593Smuzhiyun continue;
210*4882a593Smuzhiyun clear_evtchn_to_irq_row(evtchn_to_irq[row]);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
set_evtchn_to_irq(evtchn_port_t evtchn,unsigned int irq)214*4882a593Smuzhiyun static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun unsigned row;
217*4882a593Smuzhiyun unsigned col;
218*4882a593Smuzhiyun int *evtchn_row;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun if (evtchn >= xen_evtchn_max_channels())
221*4882a593Smuzhiyun return -EINVAL;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun row = EVTCHN_ROW(evtchn);
224*4882a593Smuzhiyun col = EVTCHN_COL(evtchn);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (evtchn_to_irq[row] == NULL) {
227*4882a593Smuzhiyun /* Unallocated irq entries return -1 anyway */
228*4882a593Smuzhiyun if (irq == -1)
229*4882a593Smuzhiyun return 0;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0);
232*4882a593Smuzhiyun if (evtchn_row == NULL)
233*4882a593Smuzhiyun return -ENOMEM;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun clear_evtchn_to_irq_row(evtchn_row);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * We've prepared an empty row for the mapping. If a different
239*4882a593Smuzhiyun * thread was faster inserting it, we can drop ours.
240*4882a593Smuzhiyun */
241*4882a593Smuzhiyun if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL)
242*4882a593Smuzhiyun free_page((unsigned long) evtchn_row);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun WRITE_ONCE(evtchn_to_irq[row][col], irq);
246*4882a593Smuzhiyun return 0;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
get_evtchn_to_irq(evtchn_port_t evtchn)249*4882a593Smuzhiyun int get_evtchn_to_irq(evtchn_port_t evtchn)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun if (evtchn >= xen_evtchn_max_channels())
252*4882a593Smuzhiyun return -1;
253*4882a593Smuzhiyun if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
254*4882a593Smuzhiyun return -1;
255*4882a593Smuzhiyun return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* Get info for IRQ */
info_for_irq(unsigned irq)259*4882a593Smuzhiyun static struct irq_info *info_for_irq(unsigned irq)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun if (irq < nr_legacy_irqs())
262*4882a593Smuzhiyun return legacy_info_ptrs[irq];
263*4882a593Smuzhiyun else
264*4882a593Smuzhiyun return irq_get_chip_data(irq);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
set_info_for_irq(unsigned int irq,struct irq_info * info)267*4882a593Smuzhiyun static void set_info_for_irq(unsigned int irq, struct irq_info *info)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun if (irq < nr_legacy_irqs())
270*4882a593Smuzhiyun legacy_info_ptrs[irq] = info;
271*4882a593Smuzhiyun else
272*4882a593Smuzhiyun irq_set_chip_data(irq, info);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /* Constructors for packed IRQ information. */
xen_irq_info_common_setup(struct irq_info * info,unsigned irq,enum xen_irq_type type,evtchn_port_t evtchn,unsigned short cpu)276*4882a593Smuzhiyun static int xen_irq_info_common_setup(struct irq_info *info,
277*4882a593Smuzhiyun unsigned irq,
278*4882a593Smuzhiyun enum xen_irq_type type,
279*4882a593Smuzhiyun evtchn_port_t evtchn,
280*4882a593Smuzhiyun unsigned short cpu)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun int ret;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun info->type = type;
287*4882a593Smuzhiyun info->irq = irq;
288*4882a593Smuzhiyun info->evtchn = evtchn;
289*4882a593Smuzhiyun info->cpu = cpu;
290*4882a593Smuzhiyun info->mask_reason = EVT_MASK_REASON_EXPLICIT;
291*4882a593Smuzhiyun raw_spin_lock_init(&info->lock);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun ret = set_evtchn_to_irq(evtchn, irq);
294*4882a593Smuzhiyun if (ret < 0)
295*4882a593Smuzhiyun return ret;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun return xen_evtchn_port_setup(evtchn);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
xen_irq_info_evtchn_setup(unsigned irq,evtchn_port_t evtchn)302*4882a593Smuzhiyun static int xen_irq_info_evtchn_setup(unsigned irq,
303*4882a593Smuzhiyun evtchn_port_t evtchn)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
xen_irq_info_ipi_setup(unsigned cpu,unsigned irq,evtchn_port_t evtchn,enum ipi_vector ipi)310*4882a593Smuzhiyun static int xen_irq_info_ipi_setup(unsigned cpu,
311*4882a593Smuzhiyun unsigned irq,
312*4882a593Smuzhiyun evtchn_port_t evtchn,
313*4882a593Smuzhiyun enum ipi_vector ipi)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun info->u.ipi = ipi;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun per_cpu(ipi_to_irq, cpu)[ipi] = irq;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
xen_irq_info_virq_setup(unsigned cpu,unsigned irq,evtchn_port_t evtchn,unsigned virq)324*4882a593Smuzhiyun static int xen_irq_info_virq_setup(unsigned cpu,
325*4882a593Smuzhiyun unsigned irq,
326*4882a593Smuzhiyun evtchn_port_t evtchn,
327*4882a593Smuzhiyun unsigned virq)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun info->u.virq = virq;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun per_cpu(virq_to_irq, cpu)[virq] = irq;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
xen_irq_info_pirq_setup(unsigned irq,evtchn_port_t evtchn,unsigned pirq,unsigned gsi,uint16_t domid,unsigned char flags)338*4882a593Smuzhiyun static int xen_irq_info_pirq_setup(unsigned irq,
339*4882a593Smuzhiyun evtchn_port_t evtchn,
340*4882a593Smuzhiyun unsigned pirq,
341*4882a593Smuzhiyun unsigned gsi,
342*4882a593Smuzhiyun uint16_t domid,
343*4882a593Smuzhiyun unsigned char flags)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun info->u.pirq.pirq = pirq;
348*4882a593Smuzhiyun info->u.pirq.gsi = gsi;
349*4882a593Smuzhiyun info->u.pirq.domid = domid;
350*4882a593Smuzhiyun info->u.pirq.flags = flags;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
xen_irq_info_cleanup(struct irq_info * info)355*4882a593Smuzhiyun static void xen_irq_info_cleanup(struct irq_info *info)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun set_evtchn_to_irq(info->evtchn, -1);
358*4882a593Smuzhiyun xen_evtchn_port_remove(info->evtchn, info->cpu);
359*4882a593Smuzhiyun info->evtchn = 0;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /*
363*4882a593Smuzhiyun * Accessors for packed IRQ information.
364*4882a593Smuzhiyun */
evtchn_from_irq(unsigned irq)365*4882a593Smuzhiyun evtchn_port_t evtchn_from_irq(unsigned irq)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun const struct irq_info *info = NULL;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (likely(irq < nr_irqs))
370*4882a593Smuzhiyun info = info_for_irq(irq);
371*4882a593Smuzhiyun if (!info)
372*4882a593Smuzhiyun return 0;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun return info->evtchn;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
irq_from_evtchn(evtchn_port_t evtchn)377*4882a593Smuzhiyun unsigned int irq_from_evtchn(evtchn_port_t evtchn)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun return get_evtchn_to_irq(evtchn);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(irq_from_evtchn);
382*4882a593Smuzhiyun
irq_from_virq(unsigned int cpu,unsigned int virq)383*4882a593Smuzhiyun int irq_from_virq(unsigned int cpu, unsigned int virq)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun return per_cpu(virq_to_irq, cpu)[virq];
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
ipi_from_irq(unsigned irq)388*4882a593Smuzhiyun static enum ipi_vector ipi_from_irq(unsigned irq)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun BUG_ON(info == NULL);
393*4882a593Smuzhiyun BUG_ON(info->type != IRQT_IPI);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun return info->u.ipi;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
virq_from_irq(unsigned irq)398*4882a593Smuzhiyun static unsigned virq_from_irq(unsigned irq)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun BUG_ON(info == NULL);
403*4882a593Smuzhiyun BUG_ON(info->type != IRQT_VIRQ);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun return info->u.virq;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
pirq_from_irq(unsigned irq)408*4882a593Smuzhiyun static unsigned pirq_from_irq(unsigned irq)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun BUG_ON(info == NULL);
413*4882a593Smuzhiyun BUG_ON(info->type != IRQT_PIRQ);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun return info->u.pirq.pirq;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
type_from_irq(unsigned irq)418*4882a593Smuzhiyun static enum xen_irq_type type_from_irq(unsigned irq)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun return info_for_irq(irq)->type;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
cpu_from_irq(unsigned irq)423*4882a593Smuzhiyun static unsigned cpu_from_irq(unsigned irq)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun return info_for_irq(irq)->cpu;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
cpu_from_evtchn(evtchn_port_t evtchn)428*4882a593Smuzhiyun unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun int irq = get_evtchn_to_irq(evtchn);
431*4882a593Smuzhiyun unsigned ret = 0;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if (irq != -1)
434*4882a593Smuzhiyun ret = cpu_from_irq(irq);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun return ret;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
do_mask(struct irq_info * info,u8 reason)439*4882a593Smuzhiyun static void do_mask(struct irq_info *info, u8 reason)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun unsigned long flags;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun raw_spin_lock_irqsave(&info->lock, flags);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (!info->mask_reason)
446*4882a593Smuzhiyun mask_evtchn(info->evtchn);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun info->mask_reason |= reason;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&info->lock, flags);
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
do_unmask(struct irq_info * info,u8 reason)453*4882a593Smuzhiyun static void do_unmask(struct irq_info *info, u8 reason)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun unsigned long flags;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun raw_spin_lock_irqsave(&info->lock, flags);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun info->mask_reason &= ~reason;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (!info->mask_reason)
462*4882a593Smuzhiyun unmask_evtchn(info->evtchn);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&info->lock, flags);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun #ifdef CONFIG_X86
pirq_check_eoi_map(unsigned irq)468*4882a593Smuzhiyun static bool pirq_check_eoi_map(unsigned irq)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun return test_bit(pirq_from_irq(irq), pirq_eoi_map);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun #endif
473*4882a593Smuzhiyun
pirq_needs_eoi_flag(unsigned irq)474*4882a593Smuzhiyun static bool pirq_needs_eoi_flag(unsigned irq)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
477*4882a593Smuzhiyun BUG_ON(info->type != IRQT_PIRQ);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun return info->u.pirq.flags & PIRQ_NEEDS_EOI;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
bind_evtchn_to_cpu(evtchn_port_t evtchn,unsigned int cpu)482*4882a593Smuzhiyun static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun int irq = get_evtchn_to_irq(evtchn);
485*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun BUG_ON(irq == -1);
488*4882a593Smuzhiyun #ifdef CONFIG_SMP
489*4882a593Smuzhiyun cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
490*4882a593Smuzhiyun #endif
491*4882a593Smuzhiyun xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun info->cpu = cpu;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /**
497*4882a593Smuzhiyun * notify_remote_via_irq - send event to remote end of event channel via irq
498*4882a593Smuzhiyun * @irq: irq of event channel to send event to
499*4882a593Smuzhiyun *
500*4882a593Smuzhiyun * Unlike notify_remote_via_evtchn(), this is safe to use across
501*4882a593Smuzhiyun * save/restore. Notifications on a broken connection are silently
502*4882a593Smuzhiyun * dropped.
503*4882a593Smuzhiyun */
notify_remote_via_irq(int irq)504*4882a593Smuzhiyun void notify_remote_via_irq(int irq)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun evtchn_port_t evtchn = evtchn_from_irq(irq);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun if (VALID_EVTCHN(evtchn))
509*4882a593Smuzhiyun notify_remote_via_evtchn(evtchn);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(notify_remote_via_irq);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun struct lateeoi_work {
514*4882a593Smuzhiyun struct delayed_work delayed;
515*4882a593Smuzhiyun spinlock_t eoi_list_lock;
516*4882a593Smuzhiyun struct list_head eoi_list;
517*4882a593Smuzhiyun };
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
520*4882a593Smuzhiyun
lateeoi_list_del(struct irq_info * info)521*4882a593Smuzhiyun static void lateeoi_list_del(struct irq_info *info)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
524*4882a593Smuzhiyun unsigned long flags;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun spin_lock_irqsave(&eoi->eoi_list_lock, flags);
527*4882a593Smuzhiyun list_del_init(&info->eoi_list);
528*4882a593Smuzhiyun spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
lateeoi_list_add(struct irq_info * info)531*4882a593Smuzhiyun static void lateeoi_list_add(struct irq_info *info)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
534*4882a593Smuzhiyun struct irq_info *elem;
535*4882a593Smuzhiyun u64 now = get_jiffies_64();
536*4882a593Smuzhiyun unsigned long delay;
537*4882a593Smuzhiyun unsigned long flags;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (now < info->eoi_time)
540*4882a593Smuzhiyun delay = info->eoi_time - now;
541*4882a593Smuzhiyun else
542*4882a593Smuzhiyun delay = 1;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun spin_lock_irqsave(&eoi->eoi_list_lock, flags);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun if (list_empty(&eoi->eoi_list)) {
547*4882a593Smuzhiyun list_add(&info->eoi_list, &eoi->eoi_list);
548*4882a593Smuzhiyun mod_delayed_work_on(info->eoi_cpu, system_wq,
549*4882a593Smuzhiyun &eoi->delayed, delay);
550*4882a593Smuzhiyun } else {
551*4882a593Smuzhiyun list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
552*4882a593Smuzhiyun if (elem->eoi_time <= info->eoi_time)
553*4882a593Smuzhiyun break;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun list_add(&info->eoi_list, &elem->eoi_list);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
xen_irq_lateeoi_locked(struct irq_info * info,bool spurious)561*4882a593Smuzhiyun static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun evtchn_port_t evtchn;
564*4882a593Smuzhiyun unsigned int cpu;
565*4882a593Smuzhiyun unsigned int delay = 0;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun evtchn = info->evtchn;
568*4882a593Smuzhiyun if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
569*4882a593Smuzhiyun return;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun if (spurious) {
572*4882a593Smuzhiyun if ((1 << info->spurious_cnt) < (HZ << 2))
573*4882a593Smuzhiyun info->spurious_cnt++;
574*4882a593Smuzhiyun if (info->spurious_cnt > 1) {
575*4882a593Smuzhiyun delay = 1 << (info->spurious_cnt - 2);
576*4882a593Smuzhiyun if (delay > HZ)
577*4882a593Smuzhiyun delay = HZ;
578*4882a593Smuzhiyun if (!info->eoi_time)
579*4882a593Smuzhiyun info->eoi_cpu = smp_processor_id();
580*4882a593Smuzhiyun info->eoi_time = get_jiffies_64() + delay;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun } else {
583*4882a593Smuzhiyun info->spurious_cnt = 0;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun cpu = info->eoi_cpu;
587*4882a593Smuzhiyun if (info->eoi_time &&
588*4882a593Smuzhiyun (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
589*4882a593Smuzhiyun lateeoi_list_add(info);
590*4882a593Smuzhiyun return;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun info->eoi_time = 0;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /* is_active hasn't been reset yet, do it now. */
596*4882a593Smuzhiyun smp_store_release(&info->is_active, 0);
597*4882a593Smuzhiyun do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
xen_irq_lateeoi_worker(struct work_struct * work)600*4882a593Smuzhiyun static void xen_irq_lateeoi_worker(struct work_struct *work)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun struct lateeoi_work *eoi;
603*4882a593Smuzhiyun struct irq_info *info;
604*4882a593Smuzhiyun u64 now = get_jiffies_64();
605*4882a593Smuzhiyun unsigned long flags;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun read_lock_irqsave(&evtchn_rwlock, flags);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun while (true) {
612*4882a593Smuzhiyun spin_lock(&eoi->eoi_list_lock);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
615*4882a593Smuzhiyun eoi_list);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun if (info == NULL || now < info->eoi_time) {
618*4882a593Smuzhiyun spin_unlock(&eoi->eoi_list_lock);
619*4882a593Smuzhiyun break;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun list_del_init(&info->eoi_list);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun spin_unlock(&eoi->eoi_list_lock);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun info->eoi_time = 0;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun xen_irq_lateeoi_locked(info, false);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun if (info)
632*4882a593Smuzhiyun mod_delayed_work_on(info->eoi_cpu, system_wq,
633*4882a593Smuzhiyun &eoi->delayed, info->eoi_time - now);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun read_unlock_irqrestore(&evtchn_rwlock, flags);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
xen_cpu_init_eoi(unsigned int cpu)638*4882a593Smuzhiyun static void xen_cpu_init_eoi(unsigned int cpu)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
643*4882a593Smuzhiyun spin_lock_init(&eoi->eoi_list_lock);
644*4882a593Smuzhiyun INIT_LIST_HEAD(&eoi->eoi_list);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
xen_irq_lateeoi(unsigned int irq,unsigned int eoi_flags)647*4882a593Smuzhiyun void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun struct irq_info *info;
650*4882a593Smuzhiyun unsigned long flags;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun read_lock_irqsave(&evtchn_rwlock, flags);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun info = info_for_irq(irq);
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun if (info)
657*4882a593Smuzhiyun xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun read_unlock_irqrestore(&evtchn_rwlock, flags);
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
662*4882a593Smuzhiyun
xen_irq_init(unsigned irq)663*4882a593Smuzhiyun static void xen_irq_init(unsigned irq)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun struct irq_info *info;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun #ifdef CONFIG_SMP
668*4882a593Smuzhiyun /* By default all event channels notify CPU#0. */
669*4882a593Smuzhiyun cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
670*4882a593Smuzhiyun #endif
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun info = kzalloc(sizeof(*info), GFP_KERNEL);
673*4882a593Smuzhiyun if (info == NULL)
674*4882a593Smuzhiyun panic("Unable to allocate metadata for IRQ%d\n", irq);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun info->type = IRQT_UNBOUND;
677*4882a593Smuzhiyun info->refcnt = -1;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun set_info_for_irq(irq, info);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun INIT_LIST_HEAD(&info->eoi_list);
682*4882a593Smuzhiyun list_add_tail(&info->list, &xen_irq_list_head);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
xen_allocate_irqs_dynamic(int nvec)685*4882a593Smuzhiyun static int __must_check xen_allocate_irqs_dynamic(int nvec)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun if (irq >= 0) {
690*4882a593Smuzhiyun for (i = 0; i < nvec; i++)
691*4882a593Smuzhiyun xen_irq_init(irq + i);
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun return irq;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
xen_allocate_irq_dynamic(void)697*4882a593Smuzhiyun static inline int __must_check xen_allocate_irq_dynamic(void)
698*4882a593Smuzhiyun {
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun return xen_allocate_irqs_dynamic(1);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
xen_allocate_irq_gsi(unsigned gsi)703*4882a593Smuzhiyun static int __must_check xen_allocate_irq_gsi(unsigned gsi)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun int irq;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /*
708*4882a593Smuzhiyun * A PV guest has no concept of a GSI (since it has no ACPI
709*4882a593Smuzhiyun * nor access to/knowledge of the physical APICs). Therefore
710*4882a593Smuzhiyun * all IRQs are dynamically allocated from the entire IRQ
711*4882a593Smuzhiyun * space.
712*4882a593Smuzhiyun */
713*4882a593Smuzhiyun if (xen_pv_domain() && !xen_initial_domain())
714*4882a593Smuzhiyun return xen_allocate_irq_dynamic();
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /* Legacy IRQ descriptors are already allocated by the arch. */
717*4882a593Smuzhiyun if (gsi < nr_legacy_irqs())
718*4882a593Smuzhiyun irq = gsi;
719*4882a593Smuzhiyun else
720*4882a593Smuzhiyun irq = irq_alloc_desc_at(gsi, -1);
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun xen_irq_init(irq);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun return irq;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
xen_free_irq(unsigned irq)727*4882a593Smuzhiyun static void xen_free_irq(unsigned irq)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
730*4882a593Smuzhiyun unsigned long flags;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun if (WARN_ON(!info))
733*4882a593Smuzhiyun return;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun write_lock_irqsave(&evtchn_rwlock, flags);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun if (!list_empty(&info->eoi_list))
738*4882a593Smuzhiyun lateeoi_list_del(info);
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun list_del(&info->list);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun set_info_for_irq(irq, NULL);
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun WARN_ON(info->refcnt > 0);
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun write_unlock_irqrestore(&evtchn_rwlock, flags);
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun kfree(info);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun /* Legacy IRQ descriptors are managed by the arch. */
751*4882a593Smuzhiyun if (irq < nr_legacy_irqs())
752*4882a593Smuzhiyun return;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun irq_free_desc(irq);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
xen_evtchn_close(evtchn_port_t port)757*4882a593Smuzhiyun static void xen_evtchn_close(evtchn_port_t port)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun struct evtchn_close close;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun close.port = port;
762*4882a593Smuzhiyun if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
763*4882a593Smuzhiyun BUG();
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
event_handler_exit(struct irq_info * info)766*4882a593Smuzhiyun static void event_handler_exit(struct irq_info *info)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun smp_store_release(&info->is_active, 0);
769*4882a593Smuzhiyun clear_evtchn(info->evtchn);
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
pirq_query_unmask(int irq)772*4882a593Smuzhiyun static void pirq_query_unmask(int irq)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun struct physdev_irq_status_query irq_status;
775*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun BUG_ON(info->type != IRQT_PIRQ);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun irq_status.irq = pirq_from_irq(irq);
780*4882a593Smuzhiyun if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
781*4882a593Smuzhiyun irq_status.flags = 0;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
784*4882a593Smuzhiyun if (irq_status.flags & XENIRQSTAT_needs_eoi)
785*4882a593Smuzhiyun info->u.pirq.flags |= PIRQ_NEEDS_EOI;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
eoi_pirq(struct irq_data * data)788*4882a593Smuzhiyun static void eoi_pirq(struct irq_data *data)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun struct irq_info *info = info_for_irq(data->irq);
791*4882a593Smuzhiyun evtchn_port_t evtchn = info ? info->evtchn : 0;
792*4882a593Smuzhiyun struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
793*4882a593Smuzhiyun int rc = 0;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun if (!VALID_EVTCHN(evtchn))
796*4882a593Smuzhiyun return;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (unlikely(irqd_is_setaffinity_pending(data)) &&
799*4882a593Smuzhiyun likely(!irqd_irq_disabled(data))) {
800*4882a593Smuzhiyun do_mask(info, EVT_MASK_REASON_TEMPORARY);
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun event_handler_exit(info);
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun irq_move_masked_irq(data);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun do_unmask(info, EVT_MASK_REASON_TEMPORARY);
807*4882a593Smuzhiyun } else
808*4882a593Smuzhiyun event_handler_exit(info);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun if (pirq_needs_eoi(data->irq)) {
811*4882a593Smuzhiyun rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
812*4882a593Smuzhiyun WARN_ON(rc);
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
mask_ack_pirq(struct irq_data * data)816*4882a593Smuzhiyun static void mask_ack_pirq(struct irq_data *data)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun disable_dynirq(data);
819*4882a593Smuzhiyun eoi_pirq(data);
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
__startup_pirq(unsigned int irq)822*4882a593Smuzhiyun static unsigned int __startup_pirq(unsigned int irq)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun struct evtchn_bind_pirq bind_pirq;
825*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
826*4882a593Smuzhiyun evtchn_port_t evtchn = evtchn_from_irq(irq);
827*4882a593Smuzhiyun int rc;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun BUG_ON(info->type != IRQT_PIRQ);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun if (VALID_EVTCHN(evtchn))
832*4882a593Smuzhiyun goto out;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun bind_pirq.pirq = pirq_from_irq(irq);
835*4882a593Smuzhiyun /* NB. We are happy to share unless we are probing. */
836*4882a593Smuzhiyun bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
837*4882a593Smuzhiyun BIND_PIRQ__WILL_SHARE : 0;
838*4882a593Smuzhiyun rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
839*4882a593Smuzhiyun if (rc != 0) {
840*4882a593Smuzhiyun pr_warn("Failed to obtain physical IRQ %d\n", irq);
841*4882a593Smuzhiyun return 0;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun evtchn = bind_pirq.port;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun pirq_query_unmask(irq);
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun rc = set_evtchn_to_irq(evtchn, irq);
848*4882a593Smuzhiyun if (rc)
849*4882a593Smuzhiyun goto err;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun info->evtchn = evtchn;
852*4882a593Smuzhiyun bind_evtchn_to_cpu(evtchn, 0);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun rc = xen_evtchn_port_setup(evtchn);
855*4882a593Smuzhiyun if (rc)
856*4882a593Smuzhiyun goto err;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun out:
859*4882a593Smuzhiyun do_unmask(info, EVT_MASK_REASON_EXPLICIT);
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun eoi_pirq(irq_get_irq_data(irq));
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun return 0;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun err:
866*4882a593Smuzhiyun pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
867*4882a593Smuzhiyun xen_evtchn_close(evtchn);
868*4882a593Smuzhiyun return 0;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
startup_pirq(struct irq_data * data)871*4882a593Smuzhiyun static unsigned int startup_pirq(struct irq_data *data)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun return __startup_pirq(data->irq);
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
shutdown_pirq(struct irq_data * data)876*4882a593Smuzhiyun static void shutdown_pirq(struct irq_data *data)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun unsigned int irq = data->irq;
879*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
880*4882a593Smuzhiyun evtchn_port_t evtchn = evtchn_from_irq(irq);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun BUG_ON(info->type != IRQT_PIRQ);
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun if (!VALID_EVTCHN(evtchn))
885*4882a593Smuzhiyun return;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun do_mask(info, EVT_MASK_REASON_EXPLICIT);
888*4882a593Smuzhiyun xen_evtchn_close(evtchn);
889*4882a593Smuzhiyun xen_irq_info_cleanup(info);
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
enable_pirq(struct irq_data * data)892*4882a593Smuzhiyun static void enable_pirq(struct irq_data *data)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun enable_dynirq(data);
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
disable_pirq(struct irq_data * data)897*4882a593Smuzhiyun static void disable_pirq(struct irq_data *data)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun disable_dynirq(data);
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
xen_irq_from_gsi(unsigned gsi)902*4882a593Smuzhiyun int xen_irq_from_gsi(unsigned gsi)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun struct irq_info *info;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun list_for_each_entry(info, &xen_irq_list_head, list) {
907*4882a593Smuzhiyun if (info->type != IRQT_PIRQ)
908*4882a593Smuzhiyun continue;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun if (info->u.pirq.gsi == gsi)
911*4882a593Smuzhiyun return info->irq;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun return -1;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
917*4882a593Smuzhiyun
__unbind_from_irq(unsigned int irq)918*4882a593Smuzhiyun static void __unbind_from_irq(unsigned int irq)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun evtchn_port_t evtchn = evtchn_from_irq(irq);
921*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun if (info->refcnt > 0) {
924*4882a593Smuzhiyun info->refcnt--;
925*4882a593Smuzhiyun if (info->refcnt != 0)
926*4882a593Smuzhiyun return;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun if (VALID_EVTCHN(evtchn)) {
930*4882a593Smuzhiyun unsigned int cpu = cpu_from_irq(irq);
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun xen_evtchn_close(evtchn);
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun switch (type_from_irq(irq)) {
935*4882a593Smuzhiyun case IRQT_VIRQ:
936*4882a593Smuzhiyun per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
937*4882a593Smuzhiyun break;
938*4882a593Smuzhiyun case IRQT_IPI:
939*4882a593Smuzhiyun per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
940*4882a593Smuzhiyun break;
941*4882a593Smuzhiyun default:
942*4882a593Smuzhiyun break;
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun xen_irq_info_cleanup(info);
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun xen_free_irq(irq);
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun /*
952*4882a593Smuzhiyun * Do not make any assumptions regarding the relationship between the
953*4882a593Smuzhiyun * IRQ number returned here and the Xen pirq argument.
954*4882a593Smuzhiyun *
955*4882a593Smuzhiyun * Note: We don't assign an event channel until the irq actually started
956*4882a593Smuzhiyun * up. Return an existing irq if we've already got one for the gsi.
957*4882a593Smuzhiyun *
958*4882a593Smuzhiyun * Shareable implies level triggered, not shareable implies edge
959*4882a593Smuzhiyun * triggered here.
960*4882a593Smuzhiyun */
xen_bind_pirq_gsi_to_irq(unsigned gsi,unsigned pirq,int shareable,char * name)961*4882a593Smuzhiyun int xen_bind_pirq_gsi_to_irq(unsigned gsi,
962*4882a593Smuzhiyun unsigned pirq, int shareable, char *name)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun int irq = -1;
965*4882a593Smuzhiyun struct physdev_irq irq_op;
966*4882a593Smuzhiyun int ret;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun mutex_lock(&irq_mapping_update_lock);
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun irq = xen_irq_from_gsi(gsi);
971*4882a593Smuzhiyun if (irq != -1) {
972*4882a593Smuzhiyun pr_info("%s: returning irq %d for gsi %u\n",
973*4882a593Smuzhiyun __func__, irq, gsi);
974*4882a593Smuzhiyun goto out;
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun irq = xen_allocate_irq_gsi(gsi);
978*4882a593Smuzhiyun if (irq < 0)
979*4882a593Smuzhiyun goto out;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun irq_op.irq = irq;
982*4882a593Smuzhiyun irq_op.vector = 0;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun /* Only the privileged domain can do this. For non-priv, the pcifront
985*4882a593Smuzhiyun * driver provides a PCI bus that does the call to do exactly
986*4882a593Smuzhiyun * this in the priv domain. */
987*4882a593Smuzhiyun if (xen_initial_domain() &&
988*4882a593Smuzhiyun HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
989*4882a593Smuzhiyun xen_free_irq(irq);
990*4882a593Smuzhiyun irq = -ENOSPC;
991*4882a593Smuzhiyun goto out;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
995*4882a593Smuzhiyun shareable ? PIRQ_SHAREABLE : 0);
996*4882a593Smuzhiyun if (ret < 0) {
997*4882a593Smuzhiyun __unbind_from_irq(irq);
998*4882a593Smuzhiyun irq = ret;
999*4882a593Smuzhiyun goto out;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun pirq_query_unmask(irq);
1003*4882a593Smuzhiyun /* We try to use the handler with the appropriate semantic for the
1004*4882a593Smuzhiyun * type of interrupt: if the interrupt is an edge triggered
1005*4882a593Smuzhiyun * interrupt we use handle_edge_irq.
1006*4882a593Smuzhiyun *
1007*4882a593Smuzhiyun * On the other hand if the interrupt is level triggered we use
1008*4882a593Smuzhiyun * handle_fasteoi_irq like the native code does for this kind of
1009*4882a593Smuzhiyun * interrupts.
1010*4882a593Smuzhiyun *
1011*4882a593Smuzhiyun * Depending on the Xen version, pirq_needs_eoi might return true
1012*4882a593Smuzhiyun * not only for level triggered interrupts but for edge triggered
1013*4882a593Smuzhiyun * interrupts too. In any case Xen always honors the eoi mechanism,
1014*4882a593Smuzhiyun * not injecting any more pirqs of the same kind if the first one
1015*4882a593Smuzhiyun * hasn't received an eoi yet. Therefore using the fasteoi handler
1016*4882a593Smuzhiyun * is the right choice either way.
1017*4882a593Smuzhiyun */
1018*4882a593Smuzhiyun if (shareable)
1019*4882a593Smuzhiyun irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
1020*4882a593Smuzhiyun handle_fasteoi_irq, name);
1021*4882a593Smuzhiyun else
1022*4882a593Smuzhiyun irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
1023*4882a593Smuzhiyun handle_edge_irq, name);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun out:
1026*4882a593Smuzhiyun mutex_unlock(&irq_mapping_update_lock);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun return irq;
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
xen_allocate_pirq_msi(struct pci_dev * dev,struct msi_desc * msidesc)1032*4882a593Smuzhiyun int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun int rc;
1035*4882a593Smuzhiyun struct physdev_get_free_pirq op_get_free_pirq;
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
1038*4882a593Smuzhiyun rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun WARN_ONCE(rc == -ENOSYS,
1041*4882a593Smuzhiyun "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun return rc ? -1 : op_get_free_pirq.pirq;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun
xen_bind_pirq_msi_to_irq(struct pci_dev * dev,struct msi_desc * msidesc,int pirq,int nvec,const char * name,domid_t domid)1046*4882a593Smuzhiyun int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
1047*4882a593Smuzhiyun int pirq, int nvec, const char *name, domid_t domid)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun int i, irq, ret;
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun mutex_lock(&irq_mapping_update_lock);
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun irq = xen_allocate_irqs_dynamic(nvec);
1054*4882a593Smuzhiyun if (irq < 0)
1055*4882a593Smuzhiyun goto out;
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun for (i = 0; i < nvec; i++) {
1058*4882a593Smuzhiyun irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
1061*4882a593Smuzhiyun i == 0 ? 0 : PIRQ_MSI_GROUP);
1062*4882a593Smuzhiyun if (ret < 0)
1063*4882a593Smuzhiyun goto error_irq;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun ret = irq_set_msi_desc(irq, msidesc);
1067*4882a593Smuzhiyun if (ret < 0)
1068*4882a593Smuzhiyun goto error_irq;
1069*4882a593Smuzhiyun out:
1070*4882a593Smuzhiyun mutex_unlock(&irq_mapping_update_lock);
1071*4882a593Smuzhiyun return irq;
1072*4882a593Smuzhiyun error_irq:
1073*4882a593Smuzhiyun while (nvec--)
1074*4882a593Smuzhiyun __unbind_from_irq(irq + nvec);
1075*4882a593Smuzhiyun mutex_unlock(&irq_mapping_update_lock);
1076*4882a593Smuzhiyun return ret;
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun #endif
1079*4882a593Smuzhiyun
xen_destroy_irq(int irq)1080*4882a593Smuzhiyun int xen_destroy_irq(int irq)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun struct physdev_unmap_pirq unmap_irq;
1083*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
1084*4882a593Smuzhiyun int rc = -ENOENT;
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun mutex_lock(&irq_mapping_update_lock);
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun /*
1089*4882a593Smuzhiyun * If trying to remove a vector in a MSI group different
1090*4882a593Smuzhiyun * than the first one skip the PIRQ unmap unless this vector
1091*4882a593Smuzhiyun * is the first one in the group.
1092*4882a593Smuzhiyun */
1093*4882a593Smuzhiyun if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
1094*4882a593Smuzhiyun unmap_irq.pirq = info->u.pirq.pirq;
1095*4882a593Smuzhiyun unmap_irq.domid = info->u.pirq.domid;
1096*4882a593Smuzhiyun rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
1097*4882a593Smuzhiyun /* If another domain quits without making the pci_disable_msix
1098*4882a593Smuzhiyun * call, the Xen hypervisor takes care of freeing the PIRQs
1099*4882a593Smuzhiyun * (free_domain_pirqs).
1100*4882a593Smuzhiyun */
1101*4882a593Smuzhiyun if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
1102*4882a593Smuzhiyun pr_info("domain %d does not have %d anymore\n",
1103*4882a593Smuzhiyun info->u.pirq.domid, info->u.pirq.pirq);
1104*4882a593Smuzhiyun else if (rc) {
1105*4882a593Smuzhiyun pr_warn("unmap irq failed %d\n", rc);
1106*4882a593Smuzhiyun goto out;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun xen_free_irq(irq);
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun out:
1113*4882a593Smuzhiyun mutex_unlock(&irq_mapping_update_lock);
1114*4882a593Smuzhiyun return rc;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
xen_irq_from_pirq(unsigned pirq)1117*4882a593Smuzhiyun int xen_irq_from_pirq(unsigned pirq)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun int irq;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun struct irq_info *info;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun mutex_lock(&irq_mapping_update_lock);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun list_for_each_entry(info, &xen_irq_list_head, list) {
1126*4882a593Smuzhiyun if (info->type != IRQT_PIRQ)
1127*4882a593Smuzhiyun continue;
1128*4882a593Smuzhiyun irq = info->irq;
1129*4882a593Smuzhiyun if (info->u.pirq.pirq == pirq)
1130*4882a593Smuzhiyun goto out;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun irq = -1;
1133*4882a593Smuzhiyun out:
1134*4882a593Smuzhiyun mutex_unlock(&irq_mapping_update_lock);
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun return irq;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun
xen_pirq_from_irq(unsigned irq)1140*4882a593Smuzhiyun int xen_pirq_from_irq(unsigned irq)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun return pirq_from_irq(irq);
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
1145*4882a593Smuzhiyun
bind_evtchn_to_irq_chip(evtchn_port_t evtchn,struct irq_chip * chip)1146*4882a593Smuzhiyun static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun int irq;
1149*4882a593Smuzhiyun int ret;
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun if (evtchn >= xen_evtchn_max_channels())
1152*4882a593Smuzhiyun return -ENOMEM;
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun mutex_lock(&irq_mapping_update_lock);
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun irq = get_evtchn_to_irq(evtchn);
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun if (irq == -1) {
1159*4882a593Smuzhiyun irq = xen_allocate_irq_dynamic();
1160*4882a593Smuzhiyun if (irq < 0)
1161*4882a593Smuzhiyun goto out;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun irq_set_chip_and_handler_name(irq, chip,
1164*4882a593Smuzhiyun handle_edge_irq, "event");
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun ret = xen_irq_info_evtchn_setup(irq, evtchn);
1167*4882a593Smuzhiyun if (ret < 0) {
1168*4882a593Smuzhiyun __unbind_from_irq(irq);
1169*4882a593Smuzhiyun irq = ret;
1170*4882a593Smuzhiyun goto out;
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun /* New interdomain events are bound to VCPU 0. */
1173*4882a593Smuzhiyun bind_evtchn_to_cpu(evtchn, 0);
1174*4882a593Smuzhiyun } else {
1175*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
1176*4882a593Smuzhiyun WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun out:
1180*4882a593Smuzhiyun mutex_unlock(&irq_mapping_update_lock);
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun return irq;
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun
bind_evtchn_to_irq(evtchn_port_t evtchn)1185*4882a593Smuzhiyun int bind_evtchn_to_irq(evtchn_port_t evtchn)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip);
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
1190*4882a593Smuzhiyun
bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)1191*4882a593Smuzhiyun int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
1192*4882a593Smuzhiyun {
1193*4882a593Smuzhiyun return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip);
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
1196*4882a593Smuzhiyun
bind_ipi_to_irq(unsigned int ipi,unsigned int cpu)1197*4882a593Smuzhiyun static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun struct evtchn_bind_ipi bind_ipi;
1200*4882a593Smuzhiyun evtchn_port_t evtchn;
1201*4882a593Smuzhiyun int ret, irq;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun mutex_lock(&irq_mapping_update_lock);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun irq = per_cpu(ipi_to_irq, cpu)[ipi];
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun if (irq == -1) {
1208*4882a593Smuzhiyun irq = xen_allocate_irq_dynamic();
1209*4882a593Smuzhiyun if (irq < 0)
1210*4882a593Smuzhiyun goto out;
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
1213*4882a593Smuzhiyun handle_percpu_irq, "ipi");
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun bind_ipi.vcpu = xen_vcpu_nr(cpu);
1216*4882a593Smuzhiyun if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1217*4882a593Smuzhiyun &bind_ipi) != 0)
1218*4882a593Smuzhiyun BUG();
1219*4882a593Smuzhiyun evtchn = bind_ipi.port;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
1222*4882a593Smuzhiyun if (ret < 0) {
1223*4882a593Smuzhiyun __unbind_from_irq(irq);
1224*4882a593Smuzhiyun irq = ret;
1225*4882a593Smuzhiyun goto out;
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun bind_evtchn_to_cpu(evtchn, cpu);
1228*4882a593Smuzhiyun } else {
1229*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
1230*4882a593Smuzhiyun WARN_ON(info == NULL || info->type != IRQT_IPI);
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun out:
1234*4882a593Smuzhiyun mutex_unlock(&irq_mapping_update_lock);
1235*4882a593Smuzhiyun return irq;
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain,evtchn_port_t remote_port,struct irq_chip * chip)1238*4882a593Smuzhiyun static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain,
1239*4882a593Smuzhiyun evtchn_port_t remote_port,
1240*4882a593Smuzhiyun struct irq_chip *chip)
1241*4882a593Smuzhiyun {
1242*4882a593Smuzhiyun struct evtchn_bind_interdomain bind_interdomain;
1243*4882a593Smuzhiyun int err;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun bind_interdomain.remote_dom = remote_domain;
1246*4882a593Smuzhiyun bind_interdomain.remote_port = remote_port;
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1249*4882a593Smuzhiyun &bind_interdomain);
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
1252*4882a593Smuzhiyun chip);
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,evtchn_port_t remote_port)1255*4882a593Smuzhiyun int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
1256*4882a593Smuzhiyun evtchn_port_t remote_port)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
1259*4882a593Smuzhiyun &xen_lateeoi_chip);
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
1262*4882a593Smuzhiyun
find_virq(unsigned int virq,unsigned int cpu,evtchn_port_t * evtchn)1263*4882a593Smuzhiyun static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun struct evtchn_status status;
1266*4882a593Smuzhiyun evtchn_port_t port;
1267*4882a593Smuzhiyun int rc = -ENOENT;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun memset(&status, 0, sizeof(status));
1270*4882a593Smuzhiyun for (port = 0; port < xen_evtchn_max_channels(); port++) {
1271*4882a593Smuzhiyun status.dom = DOMID_SELF;
1272*4882a593Smuzhiyun status.port = port;
1273*4882a593Smuzhiyun rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
1274*4882a593Smuzhiyun if (rc < 0)
1275*4882a593Smuzhiyun continue;
1276*4882a593Smuzhiyun if (status.status != EVTCHNSTAT_virq)
1277*4882a593Smuzhiyun continue;
1278*4882a593Smuzhiyun if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
1279*4882a593Smuzhiyun *evtchn = port;
1280*4882a593Smuzhiyun break;
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun return rc;
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun /**
1287*4882a593Smuzhiyun * xen_evtchn_nr_channels - number of usable event channel ports
1288*4882a593Smuzhiyun *
1289*4882a593Smuzhiyun * This may be less than the maximum supported by the current
1290*4882a593Smuzhiyun * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
1291*4882a593Smuzhiyun * supported.
1292*4882a593Smuzhiyun */
xen_evtchn_nr_channels(void)1293*4882a593Smuzhiyun unsigned xen_evtchn_nr_channels(void)
1294*4882a593Smuzhiyun {
1295*4882a593Smuzhiyun return evtchn_ops->nr_channels();
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
1298*4882a593Smuzhiyun
bind_virq_to_irq(unsigned int virq,unsigned int cpu,bool percpu)1299*4882a593Smuzhiyun int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun struct evtchn_bind_virq bind_virq;
1302*4882a593Smuzhiyun evtchn_port_t evtchn = 0;
1303*4882a593Smuzhiyun int irq, ret;
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun mutex_lock(&irq_mapping_update_lock);
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun irq = per_cpu(virq_to_irq, cpu)[virq];
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun if (irq == -1) {
1310*4882a593Smuzhiyun irq = xen_allocate_irq_dynamic();
1311*4882a593Smuzhiyun if (irq < 0)
1312*4882a593Smuzhiyun goto out;
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun if (percpu)
1315*4882a593Smuzhiyun irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
1316*4882a593Smuzhiyun handle_percpu_irq, "virq");
1317*4882a593Smuzhiyun else
1318*4882a593Smuzhiyun irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
1319*4882a593Smuzhiyun handle_edge_irq, "virq");
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun bind_virq.virq = virq;
1322*4882a593Smuzhiyun bind_virq.vcpu = xen_vcpu_nr(cpu);
1323*4882a593Smuzhiyun ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1324*4882a593Smuzhiyun &bind_virq);
1325*4882a593Smuzhiyun if (ret == 0)
1326*4882a593Smuzhiyun evtchn = bind_virq.port;
1327*4882a593Smuzhiyun else {
1328*4882a593Smuzhiyun if (ret == -EEXIST)
1329*4882a593Smuzhiyun ret = find_virq(virq, cpu, &evtchn);
1330*4882a593Smuzhiyun BUG_ON(ret < 0);
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1334*4882a593Smuzhiyun if (ret < 0) {
1335*4882a593Smuzhiyun __unbind_from_irq(irq);
1336*4882a593Smuzhiyun irq = ret;
1337*4882a593Smuzhiyun goto out;
1338*4882a593Smuzhiyun }
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun bind_evtchn_to_cpu(evtchn, cpu);
1341*4882a593Smuzhiyun } else {
1342*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
1343*4882a593Smuzhiyun WARN_ON(info == NULL || info->type != IRQT_VIRQ);
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun out:
1347*4882a593Smuzhiyun mutex_unlock(&irq_mapping_update_lock);
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun return irq;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun
unbind_from_irq(unsigned int irq)1352*4882a593Smuzhiyun static void unbind_from_irq(unsigned int irq)
1353*4882a593Smuzhiyun {
1354*4882a593Smuzhiyun mutex_lock(&irq_mapping_update_lock);
1355*4882a593Smuzhiyun __unbind_from_irq(irq);
1356*4882a593Smuzhiyun mutex_unlock(&irq_mapping_update_lock);
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id,struct irq_chip * chip)1359*4882a593Smuzhiyun static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
1360*4882a593Smuzhiyun irq_handler_t handler,
1361*4882a593Smuzhiyun unsigned long irqflags,
1362*4882a593Smuzhiyun const char *devname, void *dev_id,
1363*4882a593Smuzhiyun struct irq_chip *chip)
1364*4882a593Smuzhiyun {
1365*4882a593Smuzhiyun int irq, retval;
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun irq = bind_evtchn_to_irq_chip(evtchn, chip);
1368*4882a593Smuzhiyun if (irq < 0)
1369*4882a593Smuzhiyun return irq;
1370*4882a593Smuzhiyun retval = request_irq(irq, handler, irqflags, devname, dev_id);
1371*4882a593Smuzhiyun if (retval != 0) {
1372*4882a593Smuzhiyun unbind_from_irq(irq);
1373*4882a593Smuzhiyun return retval;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun return irq;
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun
bind_evtchn_to_irqhandler(evtchn_port_t evtchn,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)1379*4882a593Smuzhiyun int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
1380*4882a593Smuzhiyun irq_handler_t handler,
1381*4882a593Smuzhiyun unsigned long irqflags,
1382*4882a593Smuzhiyun const char *devname, void *dev_id)
1383*4882a593Smuzhiyun {
1384*4882a593Smuzhiyun return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
1385*4882a593Smuzhiyun devname, dev_id,
1386*4882a593Smuzhiyun &xen_dynamic_chip);
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1389*4882a593Smuzhiyun
bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)1390*4882a593Smuzhiyun int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
1391*4882a593Smuzhiyun irq_handler_t handler,
1392*4882a593Smuzhiyun unsigned long irqflags,
1393*4882a593Smuzhiyun const char *devname, void *dev_id)
1394*4882a593Smuzhiyun {
1395*4882a593Smuzhiyun return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
1396*4882a593Smuzhiyun devname, dev_id,
1397*4882a593Smuzhiyun &xen_lateeoi_chip);
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
1400*4882a593Smuzhiyun
bind_interdomain_evtchn_to_irqhandler_chip(unsigned int remote_domain,evtchn_port_t remote_port,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id,struct irq_chip * chip)1401*4882a593Smuzhiyun static int bind_interdomain_evtchn_to_irqhandler_chip(
1402*4882a593Smuzhiyun unsigned int remote_domain, evtchn_port_t remote_port,
1403*4882a593Smuzhiyun irq_handler_t handler, unsigned long irqflags,
1404*4882a593Smuzhiyun const char *devname, void *dev_id, struct irq_chip *chip)
1405*4882a593Smuzhiyun {
1406*4882a593Smuzhiyun int irq, retval;
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
1409*4882a593Smuzhiyun chip);
1410*4882a593Smuzhiyun if (irq < 0)
1411*4882a593Smuzhiyun return irq;
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun retval = request_irq(irq, handler, irqflags, devname, dev_id);
1414*4882a593Smuzhiyun if (retval != 0) {
1415*4882a593Smuzhiyun unbind_from_irq(irq);
1416*4882a593Smuzhiyun return retval;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun return irq;
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun
bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,evtchn_port_t remote_port,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)1422*4882a593Smuzhiyun int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
1423*4882a593Smuzhiyun evtchn_port_t remote_port,
1424*4882a593Smuzhiyun irq_handler_t handler,
1425*4882a593Smuzhiyun unsigned long irqflags,
1426*4882a593Smuzhiyun const char *devname,
1427*4882a593Smuzhiyun void *dev_id)
1428*4882a593Smuzhiyun {
1429*4882a593Smuzhiyun return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
1430*4882a593Smuzhiyun remote_port, handler, irqflags, devname,
1431*4882a593Smuzhiyun dev_id, &xen_lateeoi_chip);
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
1434*4882a593Smuzhiyun
bind_virq_to_irqhandler(unsigned int virq,unsigned int cpu,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)1435*4882a593Smuzhiyun int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1436*4882a593Smuzhiyun irq_handler_t handler,
1437*4882a593Smuzhiyun unsigned long irqflags, const char *devname, void *dev_id)
1438*4882a593Smuzhiyun {
1439*4882a593Smuzhiyun int irq, retval;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
1442*4882a593Smuzhiyun if (irq < 0)
1443*4882a593Smuzhiyun return irq;
1444*4882a593Smuzhiyun retval = request_irq(irq, handler, irqflags, devname, dev_id);
1445*4882a593Smuzhiyun if (retval != 0) {
1446*4882a593Smuzhiyun unbind_from_irq(irq);
1447*4882a593Smuzhiyun return retval;
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun return irq;
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1453*4882a593Smuzhiyun
bind_ipi_to_irqhandler(enum ipi_vector ipi,unsigned int cpu,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)1454*4882a593Smuzhiyun int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1455*4882a593Smuzhiyun unsigned int cpu,
1456*4882a593Smuzhiyun irq_handler_t handler,
1457*4882a593Smuzhiyun unsigned long irqflags,
1458*4882a593Smuzhiyun const char *devname,
1459*4882a593Smuzhiyun void *dev_id)
1460*4882a593Smuzhiyun {
1461*4882a593Smuzhiyun int irq, retval;
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun irq = bind_ipi_to_irq(ipi, cpu);
1464*4882a593Smuzhiyun if (irq < 0)
1465*4882a593Smuzhiyun return irq;
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
1468*4882a593Smuzhiyun retval = request_irq(irq, handler, irqflags, devname, dev_id);
1469*4882a593Smuzhiyun if (retval != 0) {
1470*4882a593Smuzhiyun unbind_from_irq(irq);
1471*4882a593Smuzhiyun return retval;
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun return irq;
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun
unbind_from_irqhandler(unsigned int irq,void * dev_id)1477*4882a593Smuzhiyun void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1478*4882a593Smuzhiyun {
1479*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun if (WARN_ON(!info))
1482*4882a593Smuzhiyun return;
1483*4882a593Smuzhiyun free_irq(irq, dev_id);
1484*4882a593Smuzhiyun unbind_from_irq(irq);
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun /**
1489*4882a593Smuzhiyun * xen_set_irq_priority() - set an event channel priority.
1490*4882a593Smuzhiyun * @irq:irq bound to an event channel.
1491*4882a593Smuzhiyun * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
1492*4882a593Smuzhiyun */
xen_set_irq_priority(unsigned irq,unsigned priority)1493*4882a593Smuzhiyun int xen_set_irq_priority(unsigned irq, unsigned priority)
1494*4882a593Smuzhiyun {
1495*4882a593Smuzhiyun struct evtchn_set_priority set_priority;
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun set_priority.port = evtchn_from_irq(irq);
1498*4882a593Smuzhiyun set_priority.priority = priority;
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
1501*4882a593Smuzhiyun &set_priority);
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_set_irq_priority);
1504*4882a593Smuzhiyun
evtchn_make_refcounted(evtchn_port_t evtchn)1505*4882a593Smuzhiyun int evtchn_make_refcounted(evtchn_port_t evtchn)
1506*4882a593Smuzhiyun {
1507*4882a593Smuzhiyun int irq = get_evtchn_to_irq(evtchn);
1508*4882a593Smuzhiyun struct irq_info *info;
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun if (irq == -1)
1511*4882a593Smuzhiyun return -ENOENT;
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun info = info_for_irq(irq);
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun if (!info)
1516*4882a593Smuzhiyun return -ENOENT;
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun WARN_ON(info->refcnt != -1);
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun info->refcnt = 1;
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun return 0;
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1525*4882a593Smuzhiyun
evtchn_get(evtchn_port_t evtchn)1526*4882a593Smuzhiyun int evtchn_get(evtchn_port_t evtchn)
1527*4882a593Smuzhiyun {
1528*4882a593Smuzhiyun int irq;
1529*4882a593Smuzhiyun struct irq_info *info;
1530*4882a593Smuzhiyun int err = -ENOENT;
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun if (evtchn >= xen_evtchn_max_channels())
1533*4882a593Smuzhiyun return -EINVAL;
1534*4882a593Smuzhiyun
1535*4882a593Smuzhiyun mutex_lock(&irq_mapping_update_lock);
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun irq = get_evtchn_to_irq(evtchn);
1538*4882a593Smuzhiyun if (irq == -1)
1539*4882a593Smuzhiyun goto done;
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun info = info_for_irq(irq);
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun if (!info)
1544*4882a593Smuzhiyun goto done;
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun err = -EINVAL;
1547*4882a593Smuzhiyun if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
1548*4882a593Smuzhiyun goto done;
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun info->refcnt++;
1551*4882a593Smuzhiyun err = 0;
1552*4882a593Smuzhiyun done:
1553*4882a593Smuzhiyun mutex_unlock(&irq_mapping_update_lock);
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun return err;
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(evtchn_get);
1558*4882a593Smuzhiyun
evtchn_put(evtchn_port_t evtchn)1559*4882a593Smuzhiyun void evtchn_put(evtchn_port_t evtchn)
1560*4882a593Smuzhiyun {
1561*4882a593Smuzhiyun int irq = get_evtchn_to_irq(evtchn);
1562*4882a593Smuzhiyun if (WARN_ON(irq == -1))
1563*4882a593Smuzhiyun return;
1564*4882a593Smuzhiyun unbind_from_irq(irq);
1565*4882a593Smuzhiyun }
1566*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(evtchn_put);
1567*4882a593Smuzhiyun
xen_send_IPI_one(unsigned int cpu,enum ipi_vector vector)1568*4882a593Smuzhiyun void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1569*4882a593Smuzhiyun {
1570*4882a593Smuzhiyun int irq;
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun #ifdef CONFIG_X86
1573*4882a593Smuzhiyun if (unlikely(vector == XEN_NMI_VECTOR)) {
1574*4882a593Smuzhiyun int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
1575*4882a593Smuzhiyun NULL);
1576*4882a593Smuzhiyun if (rc < 0)
1577*4882a593Smuzhiyun printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1578*4882a593Smuzhiyun return;
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun #endif
1581*4882a593Smuzhiyun irq = per_cpu(ipi_to_irq, cpu)[vector];
1582*4882a593Smuzhiyun BUG_ON(irq < 0);
1583*4882a593Smuzhiyun notify_remote_via_irq(irq);
1584*4882a593Smuzhiyun }
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun struct evtchn_loop_ctrl {
1587*4882a593Smuzhiyun ktime_t timeout;
1588*4882a593Smuzhiyun unsigned count;
1589*4882a593Smuzhiyun bool defer_eoi;
1590*4882a593Smuzhiyun };
1591*4882a593Smuzhiyun
handle_irq_for_port(evtchn_port_t port,struct evtchn_loop_ctrl * ctrl)1592*4882a593Smuzhiyun void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
1593*4882a593Smuzhiyun {
1594*4882a593Smuzhiyun int irq;
1595*4882a593Smuzhiyun struct irq_info *info;
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun irq = get_evtchn_to_irq(port);
1598*4882a593Smuzhiyun if (irq == -1)
1599*4882a593Smuzhiyun return;
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun /*
1602*4882a593Smuzhiyun * Check for timeout every 256 events.
1603*4882a593Smuzhiyun * We are setting the timeout value only after the first 256
1604*4882a593Smuzhiyun * events in order to not hurt the common case of few loop
1605*4882a593Smuzhiyun * iterations. The 256 is basically an arbitrary value.
1606*4882a593Smuzhiyun *
1607*4882a593Smuzhiyun * In case we are hitting the timeout we need to defer all further
1608*4882a593Smuzhiyun * EOIs in order to ensure to leave the event handling loop rather
1609*4882a593Smuzhiyun * sooner than later.
1610*4882a593Smuzhiyun */
1611*4882a593Smuzhiyun if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
1612*4882a593Smuzhiyun ktime_t kt = ktime_get();
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun if (!ctrl->timeout) {
1615*4882a593Smuzhiyun kt = ktime_add_ms(kt,
1616*4882a593Smuzhiyun jiffies_to_msecs(event_loop_timeout));
1617*4882a593Smuzhiyun ctrl->timeout = kt;
1618*4882a593Smuzhiyun } else if (kt > ctrl->timeout) {
1619*4882a593Smuzhiyun ctrl->defer_eoi = true;
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun info = info_for_irq(irq);
1624*4882a593Smuzhiyun if (xchg_acquire(&info->is_active, 1))
1625*4882a593Smuzhiyun return;
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun if (ctrl->defer_eoi) {
1628*4882a593Smuzhiyun info->eoi_cpu = smp_processor_id();
1629*4882a593Smuzhiyun info->irq_epoch = __this_cpu_read(irq_epoch);
1630*4882a593Smuzhiyun info->eoi_time = get_jiffies_64() + event_eoi_delay;
1631*4882a593Smuzhiyun }
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun generic_handle_irq(irq);
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun
__xen_evtchn_do_upcall(void)1636*4882a593Smuzhiyun static void __xen_evtchn_do_upcall(void)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1639*4882a593Smuzhiyun int cpu = smp_processor_id();
1640*4882a593Smuzhiyun struct evtchn_loop_ctrl ctrl = { 0 };
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun read_lock(&evtchn_rwlock);
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun do {
1645*4882a593Smuzhiyun vcpu_info->evtchn_upcall_pending = 0;
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun xen_evtchn_handle_events(cpu, &ctrl);
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun BUG_ON(!irqs_disabled());
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun virt_rmb(); /* Hypervisor can set upcall pending. */
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun } while (vcpu_info->evtchn_upcall_pending);
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun read_unlock(&evtchn_rwlock);
1656*4882a593Smuzhiyun
1657*4882a593Smuzhiyun /*
1658*4882a593Smuzhiyun * Increment irq_epoch only now to defer EOIs only for
1659*4882a593Smuzhiyun * xen_irq_lateeoi() invocations occurring from inside the loop
1660*4882a593Smuzhiyun * above.
1661*4882a593Smuzhiyun */
1662*4882a593Smuzhiyun __this_cpu_inc(irq_epoch);
1663*4882a593Smuzhiyun }
1664*4882a593Smuzhiyun
xen_evtchn_do_upcall(struct pt_regs * regs)1665*4882a593Smuzhiyun void xen_evtchn_do_upcall(struct pt_regs *regs)
1666*4882a593Smuzhiyun {
1667*4882a593Smuzhiyun struct pt_regs *old_regs = set_irq_regs(regs);
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun irq_enter();
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun __xen_evtchn_do_upcall();
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun irq_exit();
1674*4882a593Smuzhiyun set_irq_regs(old_regs);
1675*4882a593Smuzhiyun }
1676*4882a593Smuzhiyun
xen_hvm_evtchn_do_upcall(void)1677*4882a593Smuzhiyun void xen_hvm_evtchn_do_upcall(void)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun __xen_evtchn_do_upcall();
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1682*4882a593Smuzhiyun
1683*4882a593Smuzhiyun /* Rebind a new event channel to an existing irq. */
rebind_evtchn_irq(evtchn_port_t evtchn,int irq)1684*4882a593Smuzhiyun void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
1685*4882a593Smuzhiyun {
1686*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun if (WARN_ON(!info))
1689*4882a593Smuzhiyun return;
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun /* Make sure the irq is masked, since the new event channel
1692*4882a593Smuzhiyun will also be masked. */
1693*4882a593Smuzhiyun disable_irq(irq);
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun mutex_lock(&irq_mapping_update_lock);
1696*4882a593Smuzhiyun
1697*4882a593Smuzhiyun /* After resume the irq<->evtchn mappings are all cleared out */
1698*4882a593Smuzhiyun BUG_ON(get_evtchn_to_irq(evtchn) != -1);
1699*4882a593Smuzhiyun /* Expect irq to have been bound before,
1700*4882a593Smuzhiyun so there should be a proper type */
1701*4882a593Smuzhiyun BUG_ON(info->type == IRQT_UNBOUND);
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun (void)xen_irq_info_evtchn_setup(irq, evtchn);
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun mutex_unlock(&irq_mapping_update_lock);
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun bind_evtchn_to_cpu(evtchn, info->cpu);
1708*4882a593Smuzhiyun /* This will be deferred until interrupt is processed */
1709*4882a593Smuzhiyun irq_set_affinity(irq, cpumask_of(info->cpu));
1710*4882a593Smuzhiyun
1711*4882a593Smuzhiyun /* Unmask the event channel. */
1712*4882a593Smuzhiyun enable_irq(irq);
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun /* Rebind an evtchn so that it gets delivered to a specific cpu */
xen_rebind_evtchn_to_cpu(struct irq_info * info,unsigned int tcpu)1716*4882a593Smuzhiyun static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
1717*4882a593Smuzhiyun {
1718*4882a593Smuzhiyun struct evtchn_bind_vcpu bind_vcpu;
1719*4882a593Smuzhiyun evtchn_port_t evtchn = info ? info->evtchn : 0;
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun if (!VALID_EVTCHN(evtchn))
1722*4882a593Smuzhiyun return -1;
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun if (!xen_support_evtchn_rebind())
1725*4882a593Smuzhiyun return -1;
1726*4882a593Smuzhiyun
1727*4882a593Smuzhiyun /* Send future instances of this interrupt to other vcpu. */
1728*4882a593Smuzhiyun bind_vcpu.port = evtchn;
1729*4882a593Smuzhiyun bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
1730*4882a593Smuzhiyun
1731*4882a593Smuzhiyun /*
1732*4882a593Smuzhiyun * Mask the event while changing the VCPU binding to prevent
1733*4882a593Smuzhiyun * it being delivered on an unexpected VCPU.
1734*4882a593Smuzhiyun */
1735*4882a593Smuzhiyun do_mask(info, EVT_MASK_REASON_TEMPORARY);
1736*4882a593Smuzhiyun
1737*4882a593Smuzhiyun /*
1738*4882a593Smuzhiyun * If this fails, it usually just indicates that we're dealing with a
1739*4882a593Smuzhiyun * virq or IPI channel, which don't actually need to be rebound. Ignore
1740*4882a593Smuzhiyun * it, but don't do the xenlinux-level rebind in that case.
1741*4882a593Smuzhiyun */
1742*4882a593Smuzhiyun if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1743*4882a593Smuzhiyun bind_evtchn_to_cpu(evtchn, tcpu);
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun return 0;
1748*4882a593Smuzhiyun }
1749*4882a593Smuzhiyun
set_affinity_irq(struct irq_data * data,const struct cpumask * dest,bool force)1750*4882a593Smuzhiyun static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1751*4882a593Smuzhiyun bool force)
1752*4882a593Smuzhiyun {
1753*4882a593Smuzhiyun unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
1754*4882a593Smuzhiyun int ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun if (!ret)
1757*4882a593Smuzhiyun irq_data_update_effective_affinity(data, cpumask_of(tcpu));
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun return ret;
1760*4882a593Smuzhiyun }
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun /* To be called with desc->lock held. */
xen_set_affinity_evtchn(struct irq_desc * desc,unsigned int tcpu)1763*4882a593Smuzhiyun int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
1764*4882a593Smuzhiyun {
1765*4882a593Smuzhiyun struct irq_data *d = irq_desc_get_irq_data(desc);
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun return set_affinity_irq(d, cpumask_of(tcpu), false);
1768*4882a593Smuzhiyun }
1769*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
1770*4882a593Smuzhiyun
enable_dynirq(struct irq_data * data)1771*4882a593Smuzhiyun static void enable_dynirq(struct irq_data *data)
1772*4882a593Smuzhiyun {
1773*4882a593Smuzhiyun struct irq_info *info = info_for_irq(data->irq);
1774*4882a593Smuzhiyun evtchn_port_t evtchn = info ? info->evtchn : 0;
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun if (VALID_EVTCHN(evtchn))
1777*4882a593Smuzhiyun do_unmask(info, EVT_MASK_REASON_EXPLICIT);
1778*4882a593Smuzhiyun }
1779*4882a593Smuzhiyun
disable_dynirq(struct irq_data * data)1780*4882a593Smuzhiyun static void disable_dynirq(struct irq_data *data)
1781*4882a593Smuzhiyun {
1782*4882a593Smuzhiyun struct irq_info *info = info_for_irq(data->irq);
1783*4882a593Smuzhiyun evtchn_port_t evtchn = info ? info->evtchn : 0;
1784*4882a593Smuzhiyun
1785*4882a593Smuzhiyun if (VALID_EVTCHN(evtchn))
1786*4882a593Smuzhiyun do_mask(info, EVT_MASK_REASON_EXPLICIT);
1787*4882a593Smuzhiyun }
1788*4882a593Smuzhiyun
ack_dynirq(struct irq_data * data)1789*4882a593Smuzhiyun static void ack_dynirq(struct irq_data *data)
1790*4882a593Smuzhiyun {
1791*4882a593Smuzhiyun struct irq_info *info = info_for_irq(data->irq);
1792*4882a593Smuzhiyun evtchn_port_t evtchn = info ? info->evtchn : 0;
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun if (!VALID_EVTCHN(evtchn))
1795*4882a593Smuzhiyun return;
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun if (unlikely(irqd_is_setaffinity_pending(data)) &&
1798*4882a593Smuzhiyun likely(!irqd_irq_disabled(data))) {
1799*4882a593Smuzhiyun do_mask(info, EVT_MASK_REASON_TEMPORARY);
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun event_handler_exit(info);
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun irq_move_masked_irq(data);
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1806*4882a593Smuzhiyun } else
1807*4882a593Smuzhiyun event_handler_exit(info);
1808*4882a593Smuzhiyun }
1809*4882a593Smuzhiyun
mask_ack_dynirq(struct irq_data * data)1810*4882a593Smuzhiyun static void mask_ack_dynirq(struct irq_data *data)
1811*4882a593Smuzhiyun {
1812*4882a593Smuzhiyun disable_dynirq(data);
1813*4882a593Smuzhiyun ack_dynirq(data);
1814*4882a593Smuzhiyun }
1815*4882a593Smuzhiyun
lateeoi_ack_dynirq(struct irq_data * data)1816*4882a593Smuzhiyun static void lateeoi_ack_dynirq(struct irq_data *data)
1817*4882a593Smuzhiyun {
1818*4882a593Smuzhiyun struct irq_info *info = info_for_irq(data->irq);
1819*4882a593Smuzhiyun evtchn_port_t evtchn = info ? info->evtchn : 0;
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun if (!VALID_EVTCHN(evtchn))
1822*4882a593Smuzhiyun return;
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun do_mask(info, EVT_MASK_REASON_EOI_PENDING);
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun if (unlikely(irqd_is_setaffinity_pending(data)) &&
1827*4882a593Smuzhiyun likely(!irqd_irq_disabled(data))) {
1828*4882a593Smuzhiyun do_mask(info, EVT_MASK_REASON_TEMPORARY);
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun clear_evtchn(evtchn);
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun irq_move_masked_irq(data);
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1835*4882a593Smuzhiyun } else
1836*4882a593Smuzhiyun clear_evtchn(evtchn);
1837*4882a593Smuzhiyun }
1838*4882a593Smuzhiyun
lateeoi_mask_ack_dynirq(struct irq_data * data)1839*4882a593Smuzhiyun static void lateeoi_mask_ack_dynirq(struct irq_data *data)
1840*4882a593Smuzhiyun {
1841*4882a593Smuzhiyun struct irq_info *info = info_for_irq(data->irq);
1842*4882a593Smuzhiyun evtchn_port_t evtchn = info ? info->evtchn : 0;
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun if (VALID_EVTCHN(evtchn)) {
1845*4882a593Smuzhiyun do_mask(info, EVT_MASK_REASON_EXPLICIT);
1846*4882a593Smuzhiyun ack_dynirq(data);
1847*4882a593Smuzhiyun }
1848*4882a593Smuzhiyun }
1849*4882a593Smuzhiyun
retrigger_dynirq(struct irq_data * data)1850*4882a593Smuzhiyun static int retrigger_dynirq(struct irq_data *data)
1851*4882a593Smuzhiyun {
1852*4882a593Smuzhiyun struct irq_info *info = info_for_irq(data->irq);
1853*4882a593Smuzhiyun evtchn_port_t evtchn = info ? info->evtchn : 0;
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun if (!VALID_EVTCHN(evtchn))
1856*4882a593Smuzhiyun return 0;
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun do_mask(info, EVT_MASK_REASON_TEMPORARY);
1859*4882a593Smuzhiyun set_evtchn(evtchn);
1860*4882a593Smuzhiyun do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1861*4882a593Smuzhiyun
1862*4882a593Smuzhiyun return 1;
1863*4882a593Smuzhiyun }
1864*4882a593Smuzhiyun
restore_pirqs(void)1865*4882a593Smuzhiyun static void restore_pirqs(void)
1866*4882a593Smuzhiyun {
1867*4882a593Smuzhiyun int pirq, rc, irq, gsi;
1868*4882a593Smuzhiyun struct physdev_map_pirq map_irq;
1869*4882a593Smuzhiyun struct irq_info *info;
1870*4882a593Smuzhiyun
1871*4882a593Smuzhiyun list_for_each_entry(info, &xen_irq_list_head, list) {
1872*4882a593Smuzhiyun if (info->type != IRQT_PIRQ)
1873*4882a593Smuzhiyun continue;
1874*4882a593Smuzhiyun
1875*4882a593Smuzhiyun pirq = info->u.pirq.pirq;
1876*4882a593Smuzhiyun gsi = info->u.pirq.gsi;
1877*4882a593Smuzhiyun irq = info->irq;
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun /* save/restore of PT devices doesn't work, so at this point the
1880*4882a593Smuzhiyun * only devices present are GSI based emulated devices */
1881*4882a593Smuzhiyun if (!gsi)
1882*4882a593Smuzhiyun continue;
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun map_irq.domid = DOMID_SELF;
1885*4882a593Smuzhiyun map_irq.type = MAP_PIRQ_TYPE_GSI;
1886*4882a593Smuzhiyun map_irq.index = gsi;
1887*4882a593Smuzhiyun map_irq.pirq = pirq;
1888*4882a593Smuzhiyun
1889*4882a593Smuzhiyun rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1890*4882a593Smuzhiyun if (rc) {
1891*4882a593Smuzhiyun pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1892*4882a593Smuzhiyun gsi, irq, pirq, rc);
1893*4882a593Smuzhiyun xen_free_irq(irq);
1894*4882a593Smuzhiyun continue;
1895*4882a593Smuzhiyun }
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun __startup_pirq(irq);
1900*4882a593Smuzhiyun }
1901*4882a593Smuzhiyun }
1902*4882a593Smuzhiyun
restore_cpu_virqs(unsigned int cpu)1903*4882a593Smuzhiyun static void restore_cpu_virqs(unsigned int cpu)
1904*4882a593Smuzhiyun {
1905*4882a593Smuzhiyun struct evtchn_bind_virq bind_virq;
1906*4882a593Smuzhiyun evtchn_port_t evtchn;
1907*4882a593Smuzhiyun int virq, irq;
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun for (virq = 0; virq < NR_VIRQS; virq++) {
1910*4882a593Smuzhiyun if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1911*4882a593Smuzhiyun continue;
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun BUG_ON(virq_from_irq(irq) != virq);
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun /* Get a new binding from Xen. */
1916*4882a593Smuzhiyun bind_virq.virq = virq;
1917*4882a593Smuzhiyun bind_virq.vcpu = xen_vcpu_nr(cpu);
1918*4882a593Smuzhiyun if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1919*4882a593Smuzhiyun &bind_virq) != 0)
1920*4882a593Smuzhiyun BUG();
1921*4882a593Smuzhiyun evtchn = bind_virq.port;
1922*4882a593Smuzhiyun
1923*4882a593Smuzhiyun /* Record the new mapping. */
1924*4882a593Smuzhiyun (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1925*4882a593Smuzhiyun bind_evtchn_to_cpu(evtchn, cpu);
1926*4882a593Smuzhiyun }
1927*4882a593Smuzhiyun }
1928*4882a593Smuzhiyun
restore_cpu_ipis(unsigned int cpu)1929*4882a593Smuzhiyun static void restore_cpu_ipis(unsigned int cpu)
1930*4882a593Smuzhiyun {
1931*4882a593Smuzhiyun struct evtchn_bind_ipi bind_ipi;
1932*4882a593Smuzhiyun evtchn_port_t evtchn;
1933*4882a593Smuzhiyun int ipi, irq;
1934*4882a593Smuzhiyun
1935*4882a593Smuzhiyun for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1936*4882a593Smuzhiyun if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1937*4882a593Smuzhiyun continue;
1938*4882a593Smuzhiyun
1939*4882a593Smuzhiyun BUG_ON(ipi_from_irq(irq) != ipi);
1940*4882a593Smuzhiyun
1941*4882a593Smuzhiyun /* Get a new binding from Xen. */
1942*4882a593Smuzhiyun bind_ipi.vcpu = xen_vcpu_nr(cpu);
1943*4882a593Smuzhiyun if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1944*4882a593Smuzhiyun &bind_ipi) != 0)
1945*4882a593Smuzhiyun BUG();
1946*4882a593Smuzhiyun evtchn = bind_ipi.port;
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun /* Record the new mapping. */
1949*4882a593Smuzhiyun (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
1950*4882a593Smuzhiyun bind_evtchn_to_cpu(evtchn, cpu);
1951*4882a593Smuzhiyun }
1952*4882a593Smuzhiyun }
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun /* Clear an irq's pending state, in preparation for polling on it */
xen_clear_irq_pending(int irq)1955*4882a593Smuzhiyun void xen_clear_irq_pending(int irq)
1956*4882a593Smuzhiyun {
1957*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
1958*4882a593Smuzhiyun evtchn_port_t evtchn = info ? info->evtchn : 0;
1959*4882a593Smuzhiyun
1960*4882a593Smuzhiyun if (VALID_EVTCHN(evtchn))
1961*4882a593Smuzhiyun event_handler_exit(info);
1962*4882a593Smuzhiyun }
1963*4882a593Smuzhiyun EXPORT_SYMBOL(xen_clear_irq_pending);
xen_set_irq_pending(int irq)1964*4882a593Smuzhiyun void xen_set_irq_pending(int irq)
1965*4882a593Smuzhiyun {
1966*4882a593Smuzhiyun evtchn_port_t evtchn = evtchn_from_irq(irq);
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun if (VALID_EVTCHN(evtchn))
1969*4882a593Smuzhiyun set_evtchn(evtchn);
1970*4882a593Smuzhiyun }
1971*4882a593Smuzhiyun
xen_test_irq_pending(int irq)1972*4882a593Smuzhiyun bool xen_test_irq_pending(int irq)
1973*4882a593Smuzhiyun {
1974*4882a593Smuzhiyun evtchn_port_t evtchn = evtchn_from_irq(irq);
1975*4882a593Smuzhiyun bool ret = false;
1976*4882a593Smuzhiyun
1977*4882a593Smuzhiyun if (VALID_EVTCHN(evtchn))
1978*4882a593Smuzhiyun ret = test_evtchn(evtchn);
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun return ret;
1981*4882a593Smuzhiyun }
1982*4882a593Smuzhiyun
1983*4882a593Smuzhiyun /* Poll waiting for an irq to become pending with timeout. In the usual case,
1984*4882a593Smuzhiyun * the irq will be disabled so it won't deliver an interrupt. */
xen_poll_irq_timeout(int irq,u64 timeout)1985*4882a593Smuzhiyun void xen_poll_irq_timeout(int irq, u64 timeout)
1986*4882a593Smuzhiyun {
1987*4882a593Smuzhiyun evtchn_port_t evtchn = evtchn_from_irq(irq);
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun if (VALID_EVTCHN(evtchn)) {
1990*4882a593Smuzhiyun struct sched_poll poll;
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun poll.nr_ports = 1;
1993*4882a593Smuzhiyun poll.timeout = timeout;
1994*4882a593Smuzhiyun set_xen_guest_handle(poll.ports, &evtchn);
1995*4882a593Smuzhiyun
1996*4882a593Smuzhiyun if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1997*4882a593Smuzhiyun BUG();
1998*4882a593Smuzhiyun }
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun EXPORT_SYMBOL(xen_poll_irq_timeout);
2001*4882a593Smuzhiyun /* Poll waiting for an irq to become pending. In the usual case, the
2002*4882a593Smuzhiyun * irq will be disabled so it won't deliver an interrupt. */
xen_poll_irq(int irq)2003*4882a593Smuzhiyun void xen_poll_irq(int irq)
2004*4882a593Smuzhiyun {
2005*4882a593Smuzhiyun xen_poll_irq_timeout(irq, 0 /* no timeout */);
2006*4882a593Smuzhiyun }
2007*4882a593Smuzhiyun
2008*4882a593Smuzhiyun /* Check whether the IRQ line is shared with other guests. */
xen_test_irq_shared(int irq)2009*4882a593Smuzhiyun int xen_test_irq_shared(int irq)
2010*4882a593Smuzhiyun {
2011*4882a593Smuzhiyun struct irq_info *info = info_for_irq(irq);
2012*4882a593Smuzhiyun struct physdev_irq_status_query irq_status;
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun if (WARN_ON(!info))
2015*4882a593Smuzhiyun return -ENOENT;
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun irq_status.irq = info->u.pirq.pirq;
2018*4882a593Smuzhiyun
2019*4882a593Smuzhiyun if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
2020*4882a593Smuzhiyun return 0;
2021*4882a593Smuzhiyun return !(irq_status.flags & XENIRQSTAT_shared);
2022*4882a593Smuzhiyun }
2023*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_test_irq_shared);
2024*4882a593Smuzhiyun
xen_irq_resume(void)2025*4882a593Smuzhiyun void xen_irq_resume(void)
2026*4882a593Smuzhiyun {
2027*4882a593Smuzhiyun unsigned int cpu;
2028*4882a593Smuzhiyun struct irq_info *info;
2029*4882a593Smuzhiyun
2030*4882a593Smuzhiyun /* New event-channel space is not 'live' yet. */
2031*4882a593Smuzhiyun xen_evtchn_resume();
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun /* No IRQ <-> event-channel mappings. */
2034*4882a593Smuzhiyun list_for_each_entry(info, &xen_irq_list_head, list)
2035*4882a593Smuzhiyun info->evtchn = 0; /* zap event-channel binding */
2036*4882a593Smuzhiyun
2037*4882a593Smuzhiyun clear_evtchn_to_irq_all();
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
2040*4882a593Smuzhiyun restore_cpu_virqs(cpu);
2041*4882a593Smuzhiyun restore_cpu_ipis(cpu);
2042*4882a593Smuzhiyun }
2043*4882a593Smuzhiyun
2044*4882a593Smuzhiyun restore_pirqs();
2045*4882a593Smuzhiyun }
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun static struct irq_chip xen_dynamic_chip __read_mostly = {
2048*4882a593Smuzhiyun .name = "xen-dyn",
2049*4882a593Smuzhiyun
2050*4882a593Smuzhiyun .irq_disable = disable_dynirq,
2051*4882a593Smuzhiyun .irq_mask = disable_dynirq,
2052*4882a593Smuzhiyun .irq_unmask = enable_dynirq,
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun .irq_ack = ack_dynirq,
2055*4882a593Smuzhiyun .irq_mask_ack = mask_ack_dynirq,
2056*4882a593Smuzhiyun
2057*4882a593Smuzhiyun .irq_set_affinity = set_affinity_irq,
2058*4882a593Smuzhiyun .irq_retrigger = retrigger_dynirq,
2059*4882a593Smuzhiyun };
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun static struct irq_chip xen_lateeoi_chip __read_mostly = {
2062*4882a593Smuzhiyun /* The chip name needs to contain "xen-dyn" for irqbalance to work. */
2063*4882a593Smuzhiyun .name = "xen-dyn-lateeoi",
2064*4882a593Smuzhiyun
2065*4882a593Smuzhiyun .irq_disable = disable_dynirq,
2066*4882a593Smuzhiyun .irq_mask = disable_dynirq,
2067*4882a593Smuzhiyun .irq_unmask = enable_dynirq,
2068*4882a593Smuzhiyun
2069*4882a593Smuzhiyun .irq_ack = lateeoi_ack_dynirq,
2070*4882a593Smuzhiyun .irq_mask_ack = lateeoi_mask_ack_dynirq,
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun .irq_set_affinity = set_affinity_irq,
2073*4882a593Smuzhiyun .irq_retrigger = retrigger_dynirq,
2074*4882a593Smuzhiyun };
2075*4882a593Smuzhiyun
2076*4882a593Smuzhiyun static struct irq_chip xen_pirq_chip __read_mostly = {
2077*4882a593Smuzhiyun .name = "xen-pirq",
2078*4882a593Smuzhiyun
2079*4882a593Smuzhiyun .irq_startup = startup_pirq,
2080*4882a593Smuzhiyun .irq_shutdown = shutdown_pirq,
2081*4882a593Smuzhiyun .irq_enable = enable_pirq,
2082*4882a593Smuzhiyun .irq_disable = disable_pirq,
2083*4882a593Smuzhiyun
2084*4882a593Smuzhiyun .irq_mask = disable_dynirq,
2085*4882a593Smuzhiyun .irq_unmask = enable_dynirq,
2086*4882a593Smuzhiyun
2087*4882a593Smuzhiyun .irq_ack = eoi_pirq,
2088*4882a593Smuzhiyun .irq_eoi = eoi_pirq,
2089*4882a593Smuzhiyun .irq_mask_ack = mask_ack_pirq,
2090*4882a593Smuzhiyun
2091*4882a593Smuzhiyun .irq_set_affinity = set_affinity_irq,
2092*4882a593Smuzhiyun
2093*4882a593Smuzhiyun .irq_retrigger = retrigger_dynirq,
2094*4882a593Smuzhiyun };
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun static struct irq_chip xen_percpu_chip __read_mostly = {
2097*4882a593Smuzhiyun .name = "xen-percpu",
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun .irq_disable = disable_dynirq,
2100*4882a593Smuzhiyun .irq_mask = disable_dynirq,
2101*4882a593Smuzhiyun .irq_unmask = enable_dynirq,
2102*4882a593Smuzhiyun
2103*4882a593Smuzhiyun .irq_ack = ack_dynirq,
2104*4882a593Smuzhiyun };
2105*4882a593Smuzhiyun
2106*4882a593Smuzhiyun #ifdef CONFIG_XEN_PVHVM
2107*4882a593Smuzhiyun /* Vector callbacks are better than PCI interrupts to receive event
2108*4882a593Smuzhiyun * channel notifications because we can receive vector callbacks on any
2109*4882a593Smuzhiyun * vcpu and we don't need PCI support or APIC interactions. */
xen_setup_callback_vector(void)2110*4882a593Smuzhiyun void xen_setup_callback_vector(void)
2111*4882a593Smuzhiyun {
2112*4882a593Smuzhiyun uint64_t callback_via;
2113*4882a593Smuzhiyun
2114*4882a593Smuzhiyun if (xen_have_vector_callback) {
2115*4882a593Smuzhiyun callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
2116*4882a593Smuzhiyun if (xen_set_callback_via(callback_via)) {
2117*4882a593Smuzhiyun pr_err("Request for Xen HVM callback vector failed\n");
2118*4882a593Smuzhiyun xen_have_vector_callback = 0;
2119*4882a593Smuzhiyun }
2120*4882a593Smuzhiyun }
2121*4882a593Smuzhiyun }
2122*4882a593Smuzhiyun
xen_alloc_callback_vector(void)2123*4882a593Smuzhiyun static __init void xen_alloc_callback_vector(void)
2124*4882a593Smuzhiyun {
2125*4882a593Smuzhiyun if (!xen_have_vector_callback)
2126*4882a593Smuzhiyun return;
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun pr_info("Xen HVM callback vector for event delivery is enabled\n");
2129*4882a593Smuzhiyun alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback);
2130*4882a593Smuzhiyun }
2131*4882a593Smuzhiyun #else
xen_setup_callback_vector(void)2132*4882a593Smuzhiyun void xen_setup_callback_vector(void) {}
xen_alloc_callback_vector(void)2133*4882a593Smuzhiyun static inline void xen_alloc_callback_vector(void) {}
2134*4882a593Smuzhiyun #endif
2135*4882a593Smuzhiyun
2136*4882a593Smuzhiyun bool xen_fifo_events = true;
2137*4882a593Smuzhiyun module_param_named(fifo_events, xen_fifo_events, bool, 0);
2138*4882a593Smuzhiyun
xen_evtchn_cpu_prepare(unsigned int cpu)2139*4882a593Smuzhiyun static int xen_evtchn_cpu_prepare(unsigned int cpu)
2140*4882a593Smuzhiyun {
2141*4882a593Smuzhiyun int ret = 0;
2142*4882a593Smuzhiyun
2143*4882a593Smuzhiyun xen_cpu_init_eoi(cpu);
2144*4882a593Smuzhiyun
2145*4882a593Smuzhiyun if (evtchn_ops->percpu_init)
2146*4882a593Smuzhiyun ret = evtchn_ops->percpu_init(cpu);
2147*4882a593Smuzhiyun
2148*4882a593Smuzhiyun return ret;
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun
xen_evtchn_cpu_dead(unsigned int cpu)2151*4882a593Smuzhiyun static int xen_evtchn_cpu_dead(unsigned int cpu)
2152*4882a593Smuzhiyun {
2153*4882a593Smuzhiyun int ret = 0;
2154*4882a593Smuzhiyun
2155*4882a593Smuzhiyun if (evtchn_ops->percpu_deinit)
2156*4882a593Smuzhiyun ret = evtchn_ops->percpu_deinit(cpu);
2157*4882a593Smuzhiyun
2158*4882a593Smuzhiyun return ret;
2159*4882a593Smuzhiyun }
2160*4882a593Smuzhiyun
xen_init_IRQ(void)2161*4882a593Smuzhiyun void __init xen_init_IRQ(void)
2162*4882a593Smuzhiyun {
2163*4882a593Smuzhiyun int ret = -EINVAL;
2164*4882a593Smuzhiyun evtchn_port_t evtchn;
2165*4882a593Smuzhiyun
2166*4882a593Smuzhiyun if (xen_fifo_events)
2167*4882a593Smuzhiyun ret = xen_evtchn_fifo_init();
2168*4882a593Smuzhiyun if (ret < 0) {
2169*4882a593Smuzhiyun xen_evtchn_2l_init();
2170*4882a593Smuzhiyun xen_fifo_events = false;
2171*4882a593Smuzhiyun }
2172*4882a593Smuzhiyun
2173*4882a593Smuzhiyun xen_cpu_init_eoi(smp_processor_id());
2174*4882a593Smuzhiyun
2175*4882a593Smuzhiyun cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
2176*4882a593Smuzhiyun "xen/evtchn:prepare",
2177*4882a593Smuzhiyun xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
2178*4882a593Smuzhiyun
2179*4882a593Smuzhiyun evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
2180*4882a593Smuzhiyun sizeof(*evtchn_to_irq), GFP_KERNEL);
2181*4882a593Smuzhiyun BUG_ON(!evtchn_to_irq);
2182*4882a593Smuzhiyun
2183*4882a593Smuzhiyun /* No event channels are 'live' right now. */
2184*4882a593Smuzhiyun for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
2185*4882a593Smuzhiyun mask_evtchn(evtchn);
2186*4882a593Smuzhiyun
2187*4882a593Smuzhiyun pirq_needs_eoi = pirq_needs_eoi_flag;
2188*4882a593Smuzhiyun
2189*4882a593Smuzhiyun #ifdef CONFIG_X86
2190*4882a593Smuzhiyun if (xen_pv_domain()) {
2191*4882a593Smuzhiyun if (xen_initial_domain())
2192*4882a593Smuzhiyun pci_xen_initial_domain();
2193*4882a593Smuzhiyun }
2194*4882a593Smuzhiyun if (xen_feature(XENFEAT_hvm_callback_vector)) {
2195*4882a593Smuzhiyun xen_setup_callback_vector();
2196*4882a593Smuzhiyun xen_alloc_callback_vector();
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun
2199*4882a593Smuzhiyun if (xen_hvm_domain()) {
2200*4882a593Smuzhiyun native_init_IRQ();
2201*4882a593Smuzhiyun /* pci_xen_hvm_init must be called after native_init_IRQ so that
2202*4882a593Smuzhiyun * __acpi_register_gsi can point at the right function */
2203*4882a593Smuzhiyun pci_xen_hvm_init();
2204*4882a593Smuzhiyun } else {
2205*4882a593Smuzhiyun int rc;
2206*4882a593Smuzhiyun struct physdev_pirq_eoi_gmfn eoi_gmfn;
2207*4882a593Smuzhiyun
2208*4882a593Smuzhiyun pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
2209*4882a593Smuzhiyun eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
2210*4882a593Smuzhiyun rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
2211*4882a593Smuzhiyun if (rc != 0) {
2212*4882a593Smuzhiyun free_page((unsigned long) pirq_eoi_map);
2213*4882a593Smuzhiyun pirq_eoi_map = NULL;
2214*4882a593Smuzhiyun } else
2215*4882a593Smuzhiyun pirq_needs_eoi = pirq_check_eoi_map;
2216*4882a593Smuzhiyun }
2217*4882a593Smuzhiyun #endif
2218*4882a593Smuzhiyun }
2219