1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) 2001 MandrakeSoft S.A.
3*4882a593Smuzhiyun * Copyright 2010 Red Hat, Inc. and/or its affiliates.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * MandrakeSoft S.A.
6*4882a593Smuzhiyun * 43, rue d'Aboukir
7*4882a593Smuzhiyun * 75002 Paris - France
8*4882a593Smuzhiyun * http://www.linux-mandrake.com/
9*4882a593Smuzhiyun * http://www.mandrakesoft.com/
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * This library is free software; you can redistribute it and/or
12*4882a593Smuzhiyun * modify it under the terms of the GNU Lesser General Public
13*4882a593Smuzhiyun * License as published by the Free Software Foundation; either
14*4882a593Smuzhiyun * version 2 of the License, or (at your option) any later version.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * This library is distributed in the hope that it will be useful,
17*4882a593Smuzhiyun * but WITHOUT ANY WARRANTY; without even the implied warranty of
18*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19*4882a593Smuzhiyun * Lesser General Public License for more details.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * You should have received a copy of the GNU Lesser General Public
22*4882a593Smuzhiyun * License along with this library; if not, write to the Free Software
23*4882a593Smuzhiyun * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * Yunhong Jiang <yunhong.jiang@intel.com>
26*4882a593Smuzhiyun * Yaozu (Eddie) Dong <eddie.dong@intel.com>
27*4882a593Smuzhiyun * Based on Xen 3.1 code.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include <linux/kvm_host.h>
31*4882a593Smuzhiyun #include <linux/kvm.h>
32*4882a593Smuzhiyun #include <linux/mm.h>
33*4882a593Smuzhiyun #include <linux/highmem.h>
34*4882a593Smuzhiyun #include <linux/smp.h>
35*4882a593Smuzhiyun #include <linux/hrtimer.h>
36*4882a593Smuzhiyun #include <linux/io.h>
37*4882a593Smuzhiyun #include <linux/slab.h>
38*4882a593Smuzhiyun #include <linux/export.h>
39*4882a593Smuzhiyun #include <linux/nospec.h>
40*4882a593Smuzhiyun #include <asm/processor.h>
41*4882a593Smuzhiyun #include <asm/page.h>
42*4882a593Smuzhiyun #include <asm/current.h>
43*4882a593Smuzhiyun #include <trace/events/kvm.h>
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #include "ioapic.h"
46*4882a593Smuzhiyun #include "lapic.h"
47*4882a593Smuzhiyun #include "irq.h"
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
50*4882a593Smuzhiyun bool line_status);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
53*4882a593Smuzhiyun struct kvm_ioapic *ioapic,
54*4882a593Smuzhiyun int trigger_mode,
55*4882a593Smuzhiyun int pin);
56*4882a593Smuzhiyun
ioapic_read_indirect(struct kvm_ioapic * ioapic,unsigned long addr,unsigned long length)57*4882a593Smuzhiyun static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
58*4882a593Smuzhiyun unsigned long addr,
59*4882a593Smuzhiyun unsigned long length)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun unsigned long result = 0;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun switch (ioapic->ioregsel) {
64*4882a593Smuzhiyun case IOAPIC_REG_VERSION:
65*4882a593Smuzhiyun result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
66*4882a593Smuzhiyun | (IOAPIC_VERSION_ID & 0xff));
67*4882a593Smuzhiyun break;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun case IOAPIC_REG_APIC_ID:
70*4882a593Smuzhiyun case IOAPIC_REG_ARB_ID:
71*4882a593Smuzhiyun result = ((ioapic->id & 0xf) << 24);
72*4882a593Smuzhiyun break;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun default:
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
77*4882a593Smuzhiyun u64 redir_content = ~0ULL;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun if (redir_index < IOAPIC_NUM_PINS) {
80*4882a593Smuzhiyun u32 index = array_index_nospec(
81*4882a593Smuzhiyun redir_index, IOAPIC_NUM_PINS);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun redir_content = ioapic->redirtbl[index].bits;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun result = (ioapic->ioregsel & 0x1) ?
87*4882a593Smuzhiyun (redir_content >> 32) & 0xffffffff :
88*4882a593Smuzhiyun redir_content & 0xffffffff;
89*4882a593Smuzhiyun break;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun return result;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
rtc_irq_eoi_tracking_reset(struct kvm_ioapic * ioapic)96*4882a593Smuzhiyun static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun ioapic->rtc_status.pending_eoi = 0;
99*4882a593Smuzhiyun bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
103*4882a593Smuzhiyun
rtc_status_pending_eoi_check_valid(struct kvm_ioapic * ioapic)104*4882a593Smuzhiyun static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
107*4882a593Smuzhiyun kvm_rtc_eoi_tracking_restore_all(ioapic);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
__rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu * vcpu)110*4882a593Smuzhiyun static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun bool new_val, old_val;
113*4882a593Smuzhiyun struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
114*4882a593Smuzhiyun struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
115*4882a593Smuzhiyun union kvm_ioapic_redirect_entry *e;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun e = &ioapic->redirtbl[RTC_GSI];
118*4882a593Smuzhiyun if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
119*4882a593Smuzhiyun e->fields.dest_id,
120*4882a593Smuzhiyun kvm_lapic_irq_dest_mode(!!e->fields.dest_mode)))
121*4882a593Smuzhiyun return;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
124*4882a593Smuzhiyun old_val = test_bit(vcpu->vcpu_id, dest_map->map);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun if (new_val == old_val)
127*4882a593Smuzhiyun return;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun if (new_val) {
130*4882a593Smuzhiyun __set_bit(vcpu->vcpu_id, dest_map->map);
131*4882a593Smuzhiyun dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
132*4882a593Smuzhiyun ioapic->rtc_status.pending_eoi++;
133*4882a593Smuzhiyun } else {
134*4882a593Smuzhiyun __clear_bit(vcpu->vcpu_id, dest_map->map);
135*4882a593Smuzhiyun ioapic->rtc_status.pending_eoi--;
136*4882a593Smuzhiyun rtc_status_pending_eoi_check_valid(ioapic);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu * vcpu)140*4882a593Smuzhiyun void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun spin_lock(&ioapic->lock);
145*4882a593Smuzhiyun __rtc_irq_eoi_tracking_restore_one(vcpu);
146*4882a593Smuzhiyun spin_unlock(&ioapic->lock);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic * ioapic)149*4882a593Smuzhiyun static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
152*4882a593Smuzhiyun int i;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (RTC_GSI >= IOAPIC_NUM_PINS)
155*4882a593Smuzhiyun return;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun rtc_irq_eoi_tracking_reset(ioapic);
158*4882a593Smuzhiyun kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
159*4882a593Smuzhiyun __rtc_irq_eoi_tracking_restore_one(vcpu);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
rtc_irq_eoi(struct kvm_ioapic * ioapic,struct kvm_vcpu * vcpu,int vector)162*4882a593Smuzhiyun static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu,
163*4882a593Smuzhiyun int vector)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* RTC special handling */
168*4882a593Smuzhiyun if (test_bit(vcpu->vcpu_id, dest_map->map) &&
169*4882a593Smuzhiyun (vector == dest_map->vectors[vcpu->vcpu_id]) &&
170*4882a593Smuzhiyun (test_and_clear_bit(vcpu->vcpu_id,
171*4882a593Smuzhiyun ioapic->rtc_status.dest_map.map))) {
172*4882a593Smuzhiyun --ioapic->rtc_status.pending_eoi;
173*4882a593Smuzhiyun rtc_status_pending_eoi_check_valid(ioapic);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
rtc_irq_check_coalesced(struct kvm_ioapic * ioapic)177*4882a593Smuzhiyun static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun if (ioapic->rtc_status.pending_eoi > 0)
180*4882a593Smuzhiyun return true; /* coalesced */
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun return false;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
ioapic_lazy_update_eoi(struct kvm_ioapic * ioapic,int irq)185*4882a593Smuzhiyun static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun int i;
188*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
189*4882a593Smuzhiyun union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun kvm_for_each_vcpu(i, vcpu, ioapic->kvm) {
192*4882a593Smuzhiyun if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
193*4882a593Smuzhiyun entry->fields.dest_id,
194*4882a593Smuzhiyun entry->fields.dest_mode) ||
195*4882a593Smuzhiyun kvm_apic_pending_eoi(vcpu, entry->fields.vector))
196*4882a593Smuzhiyun continue;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * If no longer has pending EOI in LAPICs, update
200*4882a593Smuzhiyun * EOI for this vector.
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun rtc_irq_eoi(ioapic, vcpu, entry->fields.vector);
203*4882a593Smuzhiyun break;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
ioapic_set_irq(struct kvm_ioapic * ioapic,unsigned int irq,int irq_level,bool line_status)207*4882a593Smuzhiyun static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
208*4882a593Smuzhiyun int irq_level, bool line_status)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun union kvm_ioapic_redirect_entry entry;
211*4882a593Smuzhiyun u32 mask = 1 << irq;
212*4882a593Smuzhiyun u32 old_irr;
213*4882a593Smuzhiyun int edge, ret;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun entry = ioapic->redirtbl[irq];
216*4882a593Smuzhiyun edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (!irq_level) {
219*4882a593Smuzhiyun ioapic->irr &= ~mask;
220*4882a593Smuzhiyun ret = 1;
221*4882a593Smuzhiyun goto out;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /*
225*4882a593Smuzhiyun * AMD SVM AVIC accelerate EOI write iff the interrupt is edge
226*4882a593Smuzhiyun * triggered, in which case the in-kernel IOAPIC will not be able
227*4882a593Smuzhiyun * to receive the EOI. In this case, we do a lazy update of the
228*4882a593Smuzhiyun * pending EOI when trying to set IOAPIC irq.
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun if (edge && kvm_apicv_activated(ioapic->kvm))
231*4882a593Smuzhiyun ioapic_lazy_update_eoi(ioapic, irq);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /*
234*4882a593Smuzhiyun * Return 0 for coalesced interrupts; for edge-triggered interrupts,
235*4882a593Smuzhiyun * this only happens if a previous edge has not been delivered due
236*4882a593Smuzhiyun * to masking. For level interrupts, the remote_irr field tells
237*4882a593Smuzhiyun * us if the interrupt is waiting for an EOI.
238*4882a593Smuzhiyun *
239*4882a593Smuzhiyun * RTC is special: it is edge-triggered, but userspace likes to know
240*4882a593Smuzhiyun * if it has been already ack-ed via EOI because coalesced RTC
241*4882a593Smuzhiyun * interrupts lead to time drift in Windows guests. So we track
242*4882a593Smuzhiyun * EOI manually for the RTC interrupt.
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun if (irq == RTC_GSI && line_status &&
245*4882a593Smuzhiyun rtc_irq_check_coalesced(ioapic)) {
246*4882a593Smuzhiyun ret = 0;
247*4882a593Smuzhiyun goto out;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun old_irr = ioapic->irr;
251*4882a593Smuzhiyun ioapic->irr |= mask;
252*4882a593Smuzhiyun if (edge) {
253*4882a593Smuzhiyun ioapic->irr_delivered &= ~mask;
254*4882a593Smuzhiyun if (old_irr == ioapic->irr) {
255*4882a593Smuzhiyun ret = 0;
256*4882a593Smuzhiyun goto out;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun ret = ioapic_service(ioapic, irq, line_status);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun out:
263*4882a593Smuzhiyun trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
264*4882a593Smuzhiyun return ret;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
kvm_ioapic_inject_all(struct kvm_ioapic * ioapic,unsigned long irr)267*4882a593Smuzhiyun static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun u32 idx;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun rtc_irq_eoi_tracking_reset(ioapic);
272*4882a593Smuzhiyun for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
273*4882a593Smuzhiyun ioapic_set_irq(ioapic, idx, 1, true);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun kvm_rtc_eoi_tracking_restore_all(ioapic);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun
kvm_ioapic_scan_entry(struct kvm_vcpu * vcpu,ulong * ioapic_handled_vectors)279*4882a593Smuzhiyun void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
282*4882a593Smuzhiyun struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
283*4882a593Smuzhiyun union kvm_ioapic_redirect_entry *e;
284*4882a593Smuzhiyun int index;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun spin_lock(&ioapic->lock);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* Make sure we see any missing RTC EOI */
289*4882a593Smuzhiyun if (test_bit(vcpu->vcpu_id, dest_map->map))
290*4882a593Smuzhiyun __set_bit(dest_map->vectors[vcpu->vcpu_id],
291*4882a593Smuzhiyun ioapic_handled_vectors);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun for (index = 0; index < IOAPIC_NUM_PINS; index++) {
294*4882a593Smuzhiyun e = &ioapic->redirtbl[index];
295*4882a593Smuzhiyun if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
296*4882a593Smuzhiyun kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
297*4882a593Smuzhiyun index == RTC_GSI) {
298*4882a593Smuzhiyun u16 dm = kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
301*4882a593Smuzhiyun e->fields.dest_id, dm) ||
302*4882a593Smuzhiyun kvm_apic_pending_eoi(vcpu, e->fields.vector))
303*4882a593Smuzhiyun __set_bit(e->fields.vector,
304*4882a593Smuzhiyun ioapic_handled_vectors);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun spin_unlock(&ioapic->lock);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
kvm_arch_post_irq_ack_notifier_list_update(struct kvm * kvm)310*4882a593Smuzhiyun void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun if (!ioapic_in_kernel(kvm))
313*4882a593Smuzhiyun return;
314*4882a593Smuzhiyun kvm_make_scan_ioapic_request(kvm);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
ioapic_write_indirect(struct kvm_ioapic * ioapic,u32 val)317*4882a593Smuzhiyun static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun unsigned index;
320*4882a593Smuzhiyun bool mask_before, mask_after;
321*4882a593Smuzhiyun union kvm_ioapic_redirect_entry *e;
322*4882a593Smuzhiyun int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
323*4882a593Smuzhiyun DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun switch (ioapic->ioregsel) {
326*4882a593Smuzhiyun case IOAPIC_REG_VERSION:
327*4882a593Smuzhiyun /* Writes are ignored. */
328*4882a593Smuzhiyun break;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun case IOAPIC_REG_APIC_ID:
331*4882a593Smuzhiyun ioapic->id = (val >> 24) & 0xf;
332*4882a593Smuzhiyun break;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun case IOAPIC_REG_ARB_ID:
335*4882a593Smuzhiyun break;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun default:
338*4882a593Smuzhiyun index = (ioapic->ioregsel - 0x10) >> 1;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (index >= IOAPIC_NUM_PINS)
341*4882a593Smuzhiyun return;
342*4882a593Smuzhiyun index = array_index_nospec(index, IOAPIC_NUM_PINS);
343*4882a593Smuzhiyun e = &ioapic->redirtbl[index];
344*4882a593Smuzhiyun mask_before = e->fields.mask;
345*4882a593Smuzhiyun /* Preserve read-only fields */
346*4882a593Smuzhiyun old_remote_irr = e->fields.remote_irr;
347*4882a593Smuzhiyun old_delivery_status = e->fields.delivery_status;
348*4882a593Smuzhiyun old_dest_id = e->fields.dest_id;
349*4882a593Smuzhiyun old_dest_mode = e->fields.dest_mode;
350*4882a593Smuzhiyun if (ioapic->ioregsel & 1) {
351*4882a593Smuzhiyun e->bits &= 0xffffffff;
352*4882a593Smuzhiyun e->bits |= (u64) val << 32;
353*4882a593Smuzhiyun } else {
354*4882a593Smuzhiyun e->bits &= ~0xffffffffULL;
355*4882a593Smuzhiyun e->bits |= (u32) val;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun e->fields.remote_irr = old_remote_irr;
358*4882a593Smuzhiyun e->fields.delivery_status = old_delivery_status;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /*
361*4882a593Smuzhiyun * Some OSes (Linux, Xen) assume that Remote IRR bit will
362*4882a593Smuzhiyun * be cleared by IOAPIC hardware when the entry is configured
363*4882a593Smuzhiyun * as edge-triggered. This behavior is used to simulate an
364*4882a593Smuzhiyun * explicit EOI on IOAPICs that don't have the EOI register.
365*4882a593Smuzhiyun */
366*4882a593Smuzhiyun if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
367*4882a593Smuzhiyun e->fields.remote_irr = 0;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun mask_after = e->fields.mask;
370*4882a593Smuzhiyun if (mask_before != mask_after)
371*4882a593Smuzhiyun kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
372*4882a593Smuzhiyun if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
373*4882a593Smuzhiyun && ioapic->irr & (1 << index))
374*4882a593Smuzhiyun ioapic_service(ioapic, index, false);
375*4882a593Smuzhiyun if (e->fields.delivery_mode == APIC_DM_FIXED) {
376*4882a593Smuzhiyun struct kvm_lapic_irq irq;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun irq.vector = e->fields.vector;
379*4882a593Smuzhiyun irq.delivery_mode = e->fields.delivery_mode << 8;
380*4882a593Smuzhiyun irq.dest_mode =
381*4882a593Smuzhiyun kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
382*4882a593Smuzhiyun irq.level = false;
383*4882a593Smuzhiyun irq.trig_mode = e->fields.trig_mode;
384*4882a593Smuzhiyun irq.shorthand = APIC_DEST_NOSHORT;
385*4882a593Smuzhiyun irq.dest_id = e->fields.dest_id;
386*4882a593Smuzhiyun irq.msi_redir_hint = false;
387*4882a593Smuzhiyun bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
388*4882a593Smuzhiyun kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
389*4882a593Smuzhiyun vcpu_bitmap);
390*4882a593Smuzhiyun if (old_dest_mode != e->fields.dest_mode ||
391*4882a593Smuzhiyun old_dest_id != e->fields.dest_id) {
392*4882a593Smuzhiyun /*
393*4882a593Smuzhiyun * Update vcpu_bitmap with vcpus specified in
394*4882a593Smuzhiyun * the previous request as well. This is done to
395*4882a593Smuzhiyun * keep ioapic_handled_vectors synchronized.
396*4882a593Smuzhiyun */
397*4882a593Smuzhiyun irq.dest_id = old_dest_id;
398*4882a593Smuzhiyun irq.dest_mode =
399*4882a593Smuzhiyun kvm_lapic_irq_dest_mode(
400*4882a593Smuzhiyun !!e->fields.dest_mode);
401*4882a593Smuzhiyun kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
402*4882a593Smuzhiyun vcpu_bitmap);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun kvm_make_scan_ioapic_request_mask(ioapic->kvm,
405*4882a593Smuzhiyun vcpu_bitmap);
406*4882a593Smuzhiyun } else {
407*4882a593Smuzhiyun kvm_make_scan_ioapic_request(ioapic->kvm);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun break;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
ioapic_service(struct kvm_ioapic * ioapic,int irq,bool line_status)413*4882a593Smuzhiyun static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
416*4882a593Smuzhiyun struct kvm_lapic_irq irqe;
417*4882a593Smuzhiyun int ret;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (entry->fields.mask ||
420*4882a593Smuzhiyun (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
421*4882a593Smuzhiyun entry->fields.remote_irr))
422*4882a593Smuzhiyun return -1;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun irqe.dest_id = entry->fields.dest_id;
425*4882a593Smuzhiyun irqe.vector = entry->fields.vector;
426*4882a593Smuzhiyun irqe.dest_mode = kvm_lapic_irq_dest_mode(!!entry->fields.dest_mode);
427*4882a593Smuzhiyun irqe.trig_mode = entry->fields.trig_mode;
428*4882a593Smuzhiyun irqe.delivery_mode = entry->fields.delivery_mode << 8;
429*4882a593Smuzhiyun irqe.level = 1;
430*4882a593Smuzhiyun irqe.shorthand = APIC_DEST_NOSHORT;
431*4882a593Smuzhiyun irqe.msi_redir_hint = false;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
434*4882a593Smuzhiyun ioapic->irr_delivered |= 1 << irq;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun if (irq == RTC_GSI && line_status) {
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun * pending_eoi cannot ever become negative (see
439*4882a593Smuzhiyun * rtc_status_pending_eoi_check_valid) and the caller
440*4882a593Smuzhiyun * ensures that it is only called if it is >= zero, namely
441*4882a593Smuzhiyun * if rtc_irq_check_coalesced returns false).
442*4882a593Smuzhiyun */
443*4882a593Smuzhiyun BUG_ON(ioapic->rtc_status.pending_eoi != 0);
444*4882a593Smuzhiyun ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
445*4882a593Smuzhiyun &ioapic->rtc_status.dest_map);
446*4882a593Smuzhiyun ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
447*4882a593Smuzhiyun } else
448*4882a593Smuzhiyun ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
451*4882a593Smuzhiyun entry->fields.remote_irr = 1;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun return ret;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
kvm_ioapic_set_irq(struct kvm_ioapic * ioapic,int irq,int irq_source_id,int level,bool line_status)456*4882a593Smuzhiyun int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
457*4882a593Smuzhiyun int level, bool line_status)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun int ret, irq_level;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun spin_lock(&ioapic->lock);
464*4882a593Smuzhiyun irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
465*4882a593Smuzhiyun irq_source_id, level);
466*4882a593Smuzhiyun ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun spin_unlock(&ioapic->lock);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun return ret;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
kvm_ioapic_clear_all(struct kvm_ioapic * ioapic,int irq_source_id)473*4882a593Smuzhiyun void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun int i;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun spin_lock(&ioapic->lock);
478*4882a593Smuzhiyun for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
479*4882a593Smuzhiyun __clear_bit(irq_source_id, &ioapic->irq_states[i]);
480*4882a593Smuzhiyun spin_unlock(&ioapic->lock);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
kvm_ioapic_eoi_inject_work(struct work_struct * work)483*4882a593Smuzhiyun static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun int i;
486*4882a593Smuzhiyun struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
487*4882a593Smuzhiyun eoi_inject.work);
488*4882a593Smuzhiyun spin_lock(&ioapic->lock);
489*4882a593Smuzhiyun for (i = 0; i < IOAPIC_NUM_PINS; i++) {
490*4882a593Smuzhiyun union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
493*4882a593Smuzhiyun continue;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
496*4882a593Smuzhiyun ioapic_service(ioapic, i, false);
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun spin_unlock(&ioapic->lock);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
kvm_ioapic_update_eoi_one(struct kvm_vcpu * vcpu,struct kvm_ioapic * ioapic,int trigger_mode,int pin)502*4882a593Smuzhiyun static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
503*4882a593Smuzhiyun struct kvm_ioapic *ioapic,
504*4882a593Smuzhiyun int trigger_mode,
505*4882a593Smuzhiyun int pin)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun struct kvm_lapic *apic = vcpu->arch.apic;
508*4882a593Smuzhiyun union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[pin];
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /*
511*4882a593Smuzhiyun * We are dropping lock while calling ack notifiers because ack
512*4882a593Smuzhiyun * notifier callbacks for assigned devices call into IOAPIC
513*4882a593Smuzhiyun * recursively. Since remote_irr is cleared only after call
514*4882a593Smuzhiyun * to notifiers if the same vector will be delivered while lock
515*4882a593Smuzhiyun * is dropped it will be put into irr and will be delivered
516*4882a593Smuzhiyun * after ack notifier returns.
517*4882a593Smuzhiyun */
518*4882a593Smuzhiyun spin_unlock(&ioapic->lock);
519*4882a593Smuzhiyun kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin);
520*4882a593Smuzhiyun spin_lock(&ioapic->lock);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (trigger_mode != IOAPIC_LEVEL_TRIG ||
523*4882a593Smuzhiyun kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
524*4882a593Smuzhiyun return;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
527*4882a593Smuzhiyun ent->fields.remote_irr = 0;
528*4882a593Smuzhiyun if (!ent->fields.mask && (ioapic->irr & (1 << pin))) {
529*4882a593Smuzhiyun ++ioapic->irq_eoi[pin];
530*4882a593Smuzhiyun if (ioapic->irq_eoi[pin] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
531*4882a593Smuzhiyun /*
532*4882a593Smuzhiyun * Real hardware does not deliver the interrupt
533*4882a593Smuzhiyun * immediately during eoi broadcast, and this
534*4882a593Smuzhiyun * lets a buggy guest make slow progress
535*4882a593Smuzhiyun * even if it does not correctly handle a
536*4882a593Smuzhiyun * level-triggered interrupt. Emulate this
537*4882a593Smuzhiyun * behavior if we detect an interrupt storm.
538*4882a593Smuzhiyun */
539*4882a593Smuzhiyun schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
540*4882a593Smuzhiyun ioapic->irq_eoi[pin] = 0;
541*4882a593Smuzhiyun trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
542*4882a593Smuzhiyun } else {
543*4882a593Smuzhiyun ioapic_service(ioapic, pin, false);
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun } else {
546*4882a593Smuzhiyun ioapic->irq_eoi[pin] = 0;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
kvm_ioapic_update_eoi(struct kvm_vcpu * vcpu,int vector,int trigger_mode)550*4882a593Smuzhiyun void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun int i;
553*4882a593Smuzhiyun struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun spin_lock(&ioapic->lock);
556*4882a593Smuzhiyun rtc_irq_eoi(ioapic, vcpu, vector);
557*4882a593Smuzhiyun for (i = 0; i < IOAPIC_NUM_PINS; i++) {
558*4882a593Smuzhiyun union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun if (ent->fields.vector != vector)
561*4882a593Smuzhiyun continue;
562*4882a593Smuzhiyun kvm_ioapic_update_eoi_one(vcpu, ioapic, trigger_mode, i);
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun spin_unlock(&ioapic->lock);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
to_ioapic(struct kvm_io_device * dev)567*4882a593Smuzhiyun static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun return container_of(dev, struct kvm_ioapic, dev);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
ioapic_in_range(struct kvm_ioapic * ioapic,gpa_t addr)572*4882a593Smuzhiyun static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun return ((addr >= ioapic->base_address &&
575*4882a593Smuzhiyun (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
ioapic_mmio_read(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,void * val)578*4882a593Smuzhiyun static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
579*4882a593Smuzhiyun gpa_t addr, int len, void *val)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun struct kvm_ioapic *ioapic = to_ioapic(this);
582*4882a593Smuzhiyun u32 result;
583*4882a593Smuzhiyun if (!ioapic_in_range(ioapic, addr))
584*4882a593Smuzhiyun return -EOPNOTSUPP;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun ASSERT(!(addr & 0xf)); /* check alignment */
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun addr &= 0xff;
589*4882a593Smuzhiyun spin_lock(&ioapic->lock);
590*4882a593Smuzhiyun switch (addr) {
591*4882a593Smuzhiyun case IOAPIC_REG_SELECT:
592*4882a593Smuzhiyun result = ioapic->ioregsel;
593*4882a593Smuzhiyun break;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun case IOAPIC_REG_WINDOW:
596*4882a593Smuzhiyun result = ioapic_read_indirect(ioapic, addr, len);
597*4882a593Smuzhiyun break;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun default:
600*4882a593Smuzhiyun result = 0;
601*4882a593Smuzhiyun break;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun spin_unlock(&ioapic->lock);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun switch (len) {
606*4882a593Smuzhiyun case 8:
607*4882a593Smuzhiyun *(u64 *) val = result;
608*4882a593Smuzhiyun break;
609*4882a593Smuzhiyun case 1:
610*4882a593Smuzhiyun case 2:
611*4882a593Smuzhiyun case 4:
612*4882a593Smuzhiyun memcpy(val, (char *)&result, len);
613*4882a593Smuzhiyun break;
614*4882a593Smuzhiyun default:
615*4882a593Smuzhiyun printk(KERN_WARNING "ioapic: wrong length %d\n", len);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun return 0;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
ioapic_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,const void * val)620*4882a593Smuzhiyun static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
621*4882a593Smuzhiyun gpa_t addr, int len, const void *val)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun struct kvm_ioapic *ioapic = to_ioapic(this);
624*4882a593Smuzhiyun u32 data;
625*4882a593Smuzhiyun if (!ioapic_in_range(ioapic, addr))
626*4882a593Smuzhiyun return -EOPNOTSUPP;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun ASSERT(!(addr & 0xf)); /* check alignment */
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun switch (len) {
631*4882a593Smuzhiyun case 8:
632*4882a593Smuzhiyun case 4:
633*4882a593Smuzhiyun data = *(u32 *) val;
634*4882a593Smuzhiyun break;
635*4882a593Smuzhiyun case 2:
636*4882a593Smuzhiyun data = *(u16 *) val;
637*4882a593Smuzhiyun break;
638*4882a593Smuzhiyun case 1:
639*4882a593Smuzhiyun data = *(u8 *) val;
640*4882a593Smuzhiyun break;
641*4882a593Smuzhiyun default:
642*4882a593Smuzhiyun printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
643*4882a593Smuzhiyun return 0;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun addr &= 0xff;
647*4882a593Smuzhiyun spin_lock(&ioapic->lock);
648*4882a593Smuzhiyun switch (addr) {
649*4882a593Smuzhiyun case IOAPIC_REG_SELECT:
650*4882a593Smuzhiyun ioapic->ioregsel = data & 0xFF; /* 8-bit register */
651*4882a593Smuzhiyun break;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun case IOAPIC_REG_WINDOW:
654*4882a593Smuzhiyun ioapic_write_indirect(ioapic, data);
655*4882a593Smuzhiyun break;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun default:
658*4882a593Smuzhiyun break;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun spin_unlock(&ioapic->lock);
661*4882a593Smuzhiyun return 0;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
kvm_ioapic_reset(struct kvm_ioapic * ioapic)664*4882a593Smuzhiyun static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun int i;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun cancel_delayed_work_sync(&ioapic->eoi_inject);
669*4882a593Smuzhiyun for (i = 0; i < IOAPIC_NUM_PINS; i++)
670*4882a593Smuzhiyun ioapic->redirtbl[i].fields.mask = 1;
671*4882a593Smuzhiyun ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
672*4882a593Smuzhiyun ioapic->ioregsel = 0;
673*4882a593Smuzhiyun ioapic->irr = 0;
674*4882a593Smuzhiyun ioapic->irr_delivered = 0;
675*4882a593Smuzhiyun ioapic->id = 0;
676*4882a593Smuzhiyun memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
677*4882a593Smuzhiyun rtc_irq_eoi_tracking_reset(ioapic);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun static const struct kvm_io_device_ops ioapic_mmio_ops = {
681*4882a593Smuzhiyun .read = ioapic_mmio_read,
682*4882a593Smuzhiyun .write = ioapic_mmio_write,
683*4882a593Smuzhiyun };
684*4882a593Smuzhiyun
kvm_ioapic_init(struct kvm * kvm)685*4882a593Smuzhiyun int kvm_ioapic_init(struct kvm *kvm)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun struct kvm_ioapic *ioapic;
688*4882a593Smuzhiyun int ret;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL_ACCOUNT);
691*4882a593Smuzhiyun if (!ioapic)
692*4882a593Smuzhiyun return -ENOMEM;
693*4882a593Smuzhiyun spin_lock_init(&ioapic->lock);
694*4882a593Smuzhiyun INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
695*4882a593Smuzhiyun kvm->arch.vioapic = ioapic;
696*4882a593Smuzhiyun kvm_ioapic_reset(ioapic);
697*4882a593Smuzhiyun kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
698*4882a593Smuzhiyun ioapic->kvm = kvm;
699*4882a593Smuzhiyun mutex_lock(&kvm->slots_lock);
700*4882a593Smuzhiyun ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
701*4882a593Smuzhiyun IOAPIC_MEM_LENGTH, &ioapic->dev);
702*4882a593Smuzhiyun mutex_unlock(&kvm->slots_lock);
703*4882a593Smuzhiyun if (ret < 0) {
704*4882a593Smuzhiyun kvm->arch.vioapic = NULL;
705*4882a593Smuzhiyun kfree(ioapic);
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun return ret;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
kvm_ioapic_destroy(struct kvm * kvm)711*4882a593Smuzhiyun void kvm_ioapic_destroy(struct kvm *kvm)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun struct kvm_ioapic *ioapic = kvm->arch.vioapic;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun if (!ioapic)
716*4882a593Smuzhiyun return;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun cancel_delayed_work_sync(&ioapic->eoi_inject);
719*4882a593Smuzhiyun mutex_lock(&kvm->slots_lock);
720*4882a593Smuzhiyun kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
721*4882a593Smuzhiyun mutex_unlock(&kvm->slots_lock);
722*4882a593Smuzhiyun kvm->arch.vioapic = NULL;
723*4882a593Smuzhiyun kfree(ioapic);
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
kvm_get_ioapic(struct kvm * kvm,struct kvm_ioapic_state * state)726*4882a593Smuzhiyun void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun struct kvm_ioapic *ioapic = kvm->arch.vioapic;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun spin_lock(&ioapic->lock);
731*4882a593Smuzhiyun memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
732*4882a593Smuzhiyun state->irr &= ~ioapic->irr_delivered;
733*4882a593Smuzhiyun spin_unlock(&ioapic->lock);
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun
kvm_set_ioapic(struct kvm * kvm,struct kvm_ioapic_state * state)736*4882a593Smuzhiyun void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
737*4882a593Smuzhiyun {
738*4882a593Smuzhiyun struct kvm_ioapic *ioapic = kvm->arch.vioapic;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun spin_lock(&ioapic->lock);
741*4882a593Smuzhiyun memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
742*4882a593Smuzhiyun ioapic->irr = 0;
743*4882a593Smuzhiyun ioapic->irr_delivered = 0;
744*4882a593Smuzhiyun kvm_make_scan_ioapic_request(kvm);
745*4882a593Smuzhiyun kvm_ioapic_inject_all(ioapic, state->irr);
746*4882a593Smuzhiyun spin_unlock(&ioapic->lock);
747*4882a593Smuzhiyun }
748