1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun #include <linux/irqchip/arm-gic-v3.h>
4*4882a593Smuzhiyun #include <linux/kvm.h>
5*4882a593Smuzhiyun #include <linux/kvm_host.h>
6*4882a593Smuzhiyun #include <kvm/arm_vgic.h>
7*4882a593Smuzhiyun #include <asm/kvm_hyp.h>
8*4882a593Smuzhiyun #include <asm/kvm_mmu.h>
9*4882a593Smuzhiyun #include <asm/kvm_asm.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include "vgic.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun static bool group0_trap;
14*4882a593Smuzhiyun static bool group1_trap;
15*4882a593Smuzhiyun static bool common_trap;
16*4882a593Smuzhiyun static bool gicv4_enable;
17*4882a593Smuzhiyun
vgic_v3_set_underflow(struct kvm_vcpu * vcpu)18*4882a593Smuzhiyun void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun cpuif->vgic_hcr |= ICH_HCR_UIE;
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun
lr_signals_eoi_mi(u64 lr_val)25*4882a593Smuzhiyun static bool lr_signals_eoi_mi(u64 lr_val)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
28*4882a593Smuzhiyun !(lr_val & ICH_LR_HW);
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun
vgic_v3_fold_lr_state(struct kvm_vcpu * vcpu)31*4882a593Smuzhiyun void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
34*4882a593Smuzhiyun struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
35*4882a593Smuzhiyun u32 model = vcpu->kvm->arch.vgic.vgic_model;
36*4882a593Smuzhiyun int lr;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun cpuif->vgic_hcr &= ~ICH_HCR_UIE;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun for (lr = 0; lr < cpuif->used_lrs; lr++) {
43*4882a593Smuzhiyun u64 val = cpuif->vgic_lr[lr];
44*4882a593Smuzhiyun u32 intid, cpuid;
45*4882a593Smuzhiyun struct vgic_irq *irq;
46*4882a593Smuzhiyun bool is_v2_sgi = false;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun cpuid = val & GICH_LR_PHYSID_CPUID;
49*4882a593Smuzhiyun cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
52*4882a593Smuzhiyun intid = val & ICH_LR_VIRTUAL_ID_MASK;
53*4882a593Smuzhiyun } else {
54*4882a593Smuzhiyun intid = val & GICH_LR_VIRTUALID;
55*4882a593Smuzhiyun is_v2_sgi = vgic_irq_is_sgi(intid);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* Notify fds when the guest EOI'ed a level-triggered IRQ */
59*4882a593Smuzhiyun if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
60*4882a593Smuzhiyun kvm_notify_acked_irq(vcpu->kvm, 0,
61*4882a593Smuzhiyun intid - VGIC_NR_PRIVATE_IRQS);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
64*4882a593Smuzhiyun if (!irq) /* An LPI could have been unmapped. */
65*4882a593Smuzhiyun continue;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun raw_spin_lock(&irq->irq_lock);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* Always preserve the active bit */
70*4882a593Smuzhiyun irq->active = !!(val & ICH_LR_ACTIVE_BIT);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun if (irq->active && is_v2_sgi)
73*4882a593Smuzhiyun irq->active_source = cpuid;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* Edge is the only case where we preserve the pending bit */
76*4882a593Smuzhiyun if (irq->config == VGIC_CONFIG_EDGE &&
77*4882a593Smuzhiyun (val & ICH_LR_PENDING_BIT)) {
78*4882a593Smuzhiyun irq->pending_latch = true;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun if (is_v2_sgi)
81*4882a593Smuzhiyun irq->source |= (1 << cpuid);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun * Clear soft pending state when level irqs have been acked.
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
88*4882a593Smuzhiyun irq->pending_latch = false;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun * Level-triggered mapped IRQs are special because we only
92*4882a593Smuzhiyun * observe rising edges as input to the VGIC.
93*4882a593Smuzhiyun *
94*4882a593Smuzhiyun * If the guest never acked the interrupt we have to sample
95*4882a593Smuzhiyun * the physical line and set the line level, because the
96*4882a593Smuzhiyun * device state could have changed or we simply need to
97*4882a593Smuzhiyun * process the still pending interrupt later.
98*4882a593Smuzhiyun *
99*4882a593Smuzhiyun * If this causes us to lower the level, we have to also clear
100*4882a593Smuzhiyun * the physical active state, since we will otherwise never be
101*4882a593Smuzhiyun * told when the interrupt becomes asserted again.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT)) {
104*4882a593Smuzhiyun irq->line_level = vgic_get_phys_line_level(irq);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (!irq->line_level)
107*4882a593Smuzhiyun vgic_irq_set_phys_active(irq, false);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun raw_spin_unlock(&irq->irq_lock);
111*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun cpuif->used_lrs = 0;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* Requires the irq to be locked already */
vgic_v3_populate_lr(struct kvm_vcpu * vcpu,struct vgic_irq * irq,int lr)118*4882a593Smuzhiyun void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun u32 model = vcpu->kvm->arch.vgic.vgic_model;
121*4882a593Smuzhiyun u64 val = irq->intid;
122*4882a593Smuzhiyun bool allow_pending = true, is_v2_sgi;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
125*4882a593Smuzhiyun model == KVM_DEV_TYPE_ARM_VGIC_V2);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun if (irq->active) {
128*4882a593Smuzhiyun val |= ICH_LR_ACTIVE_BIT;
129*4882a593Smuzhiyun if (is_v2_sgi)
130*4882a593Smuzhiyun val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
131*4882a593Smuzhiyun if (vgic_irq_is_multi_sgi(irq)) {
132*4882a593Smuzhiyun allow_pending = false;
133*4882a593Smuzhiyun val |= ICH_LR_EOI;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (irq->hw) {
138*4882a593Smuzhiyun val |= ICH_LR_HW;
139*4882a593Smuzhiyun val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
140*4882a593Smuzhiyun /*
141*4882a593Smuzhiyun * Never set pending+active on a HW interrupt, as the
142*4882a593Smuzhiyun * pending state is kept at the physical distributor
143*4882a593Smuzhiyun * level.
144*4882a593Smuzhiyun */
145*4882a593Smuzhiyun if (irq->active)
146*4882a593Smuzhiyun allow_pending = false;
147*4882a593Smuzhiyun } else {
148*4882a593Smuzhiyun if (irq->config == VGIC_CONFIG_LEVEL) {
149*4882a593Smuzhiyun val |= ICH_LR_EOI;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * Software resampling doesn't work very well
153*4882a593Smuzhiyun * if we allow P+A, so let's not do that.
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun if (irq->active)
156*4882a593Smuzhiyun allow_pending = false;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (allow_pending && irq_is_pending(irq)) {
161*4882a593Smuzhiyun val |= ICH_LR_PENDING_BIT;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (irq->config == VGIC_CONFIG_EDGE)
164*4882a593Smuzhiyun irq->pending_latch = false;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (vgic_irq_is_sgi(irq->intid) &&
167*4882a593Smuzhiyun model == KVM_DEV_TYPE_ARM_VGIC_V2) {
168*4882a593Smuzhiyun u32 src = ffs(irq->source);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
171*4882a593Smuzhiyun irq->intid))
172*4882a593Smuzhiyun return;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
175*4882a593Smuzhiyun irq->source &= ~(1 << (src - 1));
176*4882a593Smuzhiyun if (irq->source) {
177*4882a593Smuzhiyun irq->pending_latch = true;
178*4882a593Smuzhiyun val |= ICH_LR_EOI;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * Level-triggered mapped IRQs are special because we only observe
185*4882a593Smuzhiyun * rising edges as input to the VGIC. We therefore lower the line
186*4882a593Smuzhiyun * level here, so that we can take new virtual IRQs. See
187*4882a593Smuzhiyun * vgic_v3_fold_lr_state for more info.
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
190*4882a593Smuzhiyun irq->line_level = false;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (irq->group)
193*4882a593Smuzhiyun val |= ICH_LR_GROUP;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
vgic_v3_clear_lr(struct kvm_vcpu * vcpu,int lr)200*4882a593Smuzhiyun void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
vgic_v3_set_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcrp)205*4882a593Smuzhiyun void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
208*4882a593Smuzhiyun u32 model = vcpu->kvm->arch.vgic.vgic_model;
209*4882a593Smuzhiyun u32 vmcr;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
212*4882a593Smuzhiyun vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
213*4882a593Smuzhiyun ICH_VMCR_ACK_CTL_MASK;
214*4882a593Smuzhiyun vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
215*4882a593Smuzhiyun ICH_VMCR_FIQ_EN_MASK;
216*4882a593Smuzhiyun } else {
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun * When emulating GICv3 on GICv3 with SRE=1 on the
219*4882a593Smuzhiyun * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
220*4882a593Smuzhiyun */
221*4882a593Smuzhiyun vmcr = ICH_VMCR_FIQ_EN_MASK;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
225*4882a593Smuzhiyun vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
226*4882a593Smuzhiyun vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
227*4882a593Smuzhiyun vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
228*4882a593Smuzhiyun vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
229*4882a593Smuzhiyun vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
230*4882a593Smuzhiyun vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun cpu_if->vgic_vmcr = vmcr;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
vgic_v3_get_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcrp)235*4882a593Smuzhiyun void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
238*4882a593Smuzhiyun u32 model = vcpu->kvm->arch.vgic.vgic_model;
239*4882a593Smuzhiyun u32 vmcr;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun vmcr = cpu_if->vgic_vmcr;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
244*4882a593Smuzhiyun vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
245*4882a593Smuzhiyun ICH_VMCR_ACK_CTL_SHIFT;
246*4882a593Smuzhiyun vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
247*4882a593Smuzhiyun ICH_VMCR_FIQ_EN_SHIFT;
248*4882a593Smuzhiyun } else {
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun * When emulating GICv3 on GICv3 with SRE=1 on the
251*4882a593Smuzhiyun * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun vmcrp->fiqen = 1;
254*4882a593Smuzhiyun vmcrp->ackctl = 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
258*4882a593Smuzhiyun vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
259*4882a593Smuzhiyun vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
260*4882a593Smuzhiyun vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
261*4882a593Smuzhiyun vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
262*4882a593Smuzhiyun vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
263*4882a593Smuzhiyun vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun #define INITIAL_PENDBASER_VALUE \
267*4882a593Smuzhiyun (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
268*4882a593Smuzhiyun GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
269*4882a593Smuzhiyun GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
270*4882a593Smuzhiyun
vgic_v3_enable(struct kvm_vcpu * vcpu)271*4882a593Smuzhiyun void vgic_v3_enable(struct kvm_vcpu *vcpu)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /*
276*4882a593Smuzhiyun * By forcing VMCR to zero, the GIC will restore the binary
277*4882a593Smuzhiyun * points to their reset values. Anything else resets to zero
278*4882a593Smuzhiyun * anyway.
279*4882a593Smuzhiyun */
280*4882a593Smuzhiyun vgic_v3->vgic_vmcr = 0;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /*
283*4882a593Smuzhiyun * If we are emulating a GICv3, we do it in an non-GICv2-compatible
284*4882a593Smuzhiyun * way, so we force SRE to 1 to demonstrate this to the guest.
285*4882a593Smuzhiyun * Also, we don't support any form of IRQ/FIQ bypass.
286*4882a593Smuzhiyun * This goes with the spec allowing the value to be RAO/WI.
287*4882a593Smuzhiyun */
288*4882a593Smuzhiyun if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
289*4882a593Smuzhiyun vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
290*4882a593Smuzhiyun ICC_SRE_EL1_DFB |
291*4882a593Smuzhiyun ICC_SRE_EL1_SRE);
292*4882a593Smuzhiyun vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
293*4882a593Smuzhiyun } else {
294*4882a593Smuzhiyun vgic_v3->vgic_sre = 0;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
298*4882a593Smuzhiyun ICH_VTR_ID_BITS_MASK) >>
299*4882a593Smuzhiyun ICH_VTR_ID_BITS_SHIFT;
300*4882a593Smuzhiyun vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
301*4882a593Smuzhiyun ICH_VTR_PRI_BITS_MASK) >>
302*4882a593Smuzhiyun ICH_VTR_PRI_BITS_SHIFT) + 1;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* Get the show on the road... */
305*4882a593Smuzhiyun vgic_v3->vgic_hcr = ICH_HCR_EN;
306*4882a593Smuzhiyun if (group0_trap)
307*4882a593Smuzhiyun vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
308*4882a593Smuzhiyun if (group1_trap)
309*4882a593Smuzhiyun vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
310*4882a593Smuzhiyun if (common_trap)
311*4882a593Smuzhiyun vgic_v3->vgic_hcr |= ICH_HCR_TC;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
vgic_v3_lpi_sync_pending_status(struct kvm * kvm,struct vgic_irq * irq)314*4882a593Smuzhiyun int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
317*4882a593Smuzhiyun int byte_offset, bit_nr;
318*4882a593Smuzhiyun gpa_t pendbase, ptr;
319*4882a593Smuzhiyun bool status;
320*4882a593Smuzhiyun u8 val;
321*4882a593Smuzhiyun int ret;
322*4882a593Smuzhiyun unsigned long flags;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun retry:
325*4882a593Smuzhiyun vcpu = irq->target_vcpu;
326*4882a593Smuzhiyun if (!vcpu)
327*4882a593Smuzhiyun return 0;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun byte_offset = irq->intid / BITS_PER_BYTE;
332*4882a593Smuzhiyun bit_nr = irq->intid % BITS_PER_BYTE;
333*4882a593Smuzhiyun ptr = pendbase + byte_offset;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
336*4882a593Smuzhiyun if (ret)
337*4882a593Smuzhiyun return ret;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun status = val & (1 << bit_nr);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
342*4882a593Smuzhiyun if (irq->target_vcpu != vcpu) {
343*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
344*4882a593Smuzhiyun goto retry;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun irq->pending_latch = status;
347*4882a593Smuzhiyun vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun if (status) {
350*4882a593Smuzhiyun /* clear consumed data */
351*4882a593Smuzhiyun val &= ~(1 << bit_nr);
352*4882a593Smuzhiyun ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
353*4882a593Smuzhiyun if (ret)
354*4882a593Smuzhiyun return ret;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun return 0;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /**
360*4882a593Smuzhiyun * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
361*4882a593Smuzhiyun * kvm lock and all vcpu lock must be held
362*4882a593Smuzhiyun */
vgic_v3_save_pending_tables(struct kvm * kvm)363*4882a593Smuzhiyun int vgic_v3_save_pending_tables(struct kvm *kvm)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun struct vgic_dist *dist = &kvm->arch.vgic;
366*4882a593Smuzhiyun struct vgic_irq *irq;
367*4882a593Smuzhiyun gpa_t last_ptr = ~(gpa_t)0;
368*4882a593Smuzhiyun int ret;
369*4882a593Smuzhiyun u8 val;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
372*4882a593Smuzhiyun int byte_offset, bit_nr;
373*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
374*4882a593Smuzhiyun gpa_t pendbase, ptr;
375*4882a593Smuzhiyun bool stored;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun vcpu = irq->target_vcpu;
378*4882a593Smuzhiyun if (!vcpu)
379*4882a593Smuzhiyun continue;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun byte_offset = irq->intid / BITS_PER_BYTE;
384*4882a593Smuzhiyun bit_nr = irq->intid % BITS_PER_BYTE;
385*4882a593Smuzhiyun ptr = pendbase + byte_offset;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (ptr != last_ptr) {
388*4882a593Smuzhiyun ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
389*4882a593Smuzhiyun if (ret)
390*4882a593Smuzhiyun return ret;
391*4882a593Smuzhiyun last_ptr = ptr;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun stored = val & (1U << bit_nr);
395*4882a593Smuzhiyun if (stored == irq->pending_latch)
396*4882a593Smuzhiyun continue;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun if (irq->pending_latch)
399*4882a593Smuzhiyun val |= 1 << bit_nr;
400*4882a593Smuzhiyun else
401*4882a593Smuzhiyun val &= ~(1 << bit_nr);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
404*4882a593Smuzhiyun if (ret)
405*4882a593Smuzhiyun return ret;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun return 0;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /**
411*4882a593Smuzhiyun * vgic_v3_rdist_overlap - check if a region overlaps with any
412*4882a593Smuzhiyun * existing redistributor region
413*4882a593Smuzhiyun *
414*4882a593Smuzhiyun * @kvm: kvm handle
415*4882a593Smuzhiyun * @base: base of the region
416*4882a593Smuzhiyun * @size: size of region
417*4882a593Smuzhiyun *
418*4882a593Smuzhiyun * Return: true if there is an overlap
419*4882a593Smuzhiyun */
vgic_v3_rdist_overlap(struct kvm * kvm,gpa_t base,size_t size)420*4882a593Smuzhiyun bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct vgic_dist *d = &kvm->arch.vgic;
423*4882a593Smuzhiyun struct vgic_redist_region *rdreg;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun list_for_each_entry(rdreg, &d->rd_regions, list) {
426*4882a593Smuzhiyun if ((base + size > rdreg->base) &&
427*4882a593Smuzhiyun (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
428*4882a593Smuzhiyun return true;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun return false;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /*
434*4882a593Smuzhiyun * Check for overlapping regions and for regions crossing the end of memory
435*4882a593Smuzhiyun * for base addresses which have already been set.
436*4882a593Smuzhiyun */
vgic_v3_check_base(struct kvm * kvm)437*4882a593Smuzhiyun bool vgic_v3_check_base(struct kvm *kvm)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun struct vgic_dist *d = &kvm->arch.vgic;
440*4882a593Smuzhiyun struct vgic_redist_region *rdreg;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
443*4882a593Smuzhiyun d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
444*4882a593Smuzhiyun return false;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun list_for_each_entry(rdreg, &d->rd_regions, list) {
447*4882a593Smuzhiyun if (rdreg->base + vgic_v3_rd_region_size(kvm, rdreg) <
448*4882a593Smuzhiyun rdreg->base)
449*4882a593Smuzhiyun return false;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
453*4882a593Smuzhiyun return true;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
456*4882a593Smuzhiyun KVM_VGIC_V3_DIST_SIZE);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /**
460*4882a593Smuzhiyun * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
461*4882a593Smuzhiyun * which has free space to put a new rdist region.
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * @rd_regions: redistributor region list head
464*4882a593Smuzhiyun *
465*4882a593Smuzhiyun * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
466*4882a593Smuzhiyun * Stride between redistributors is 0 and regions are filled in the index order.
467*4882a593Smuzhiyun *
468*4882a593Smuzhiyun * Return: the redist region handle, if any, that has space to map a new rdist
469*4882a593Smuzhiyun * region.
470*4882a593Smuzhiyun */
vgic_v3_rdist_free_slot(struct list_head * rd_regions)471*4882a593Smuzhiyun struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun struct vgic_redist_region *rdreg;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun list_for_each_entry(rdreg, rd_regions, list) {
476*4882a593Smuzhiyun if (!vgic_v3_redist_region_full(rdreg))
477*4882a593Smuzhiyun return rdreg;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun return NULL;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
vgic_v3_rdist_region_from_index(struct kvm * kvm,u32 index)482*4882a593Smuzhiyun struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
483*4882a593Smuzhiyun u32 index)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
486*4882a593Smuzhiyun struct vgic_redist_region *rdreg;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun list_for_each_entry(rdreg, rd_regions, list) {
489*4882a593Smuzhiyun if (rdreg->index == index)
490*4882a593Smuzhiyun return rdreg;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun return NULL;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun
vgic_v3_map_resources(struct kvm * kvm)496*4882a593Smuzhiyun int vgic_v3_map_resources(struct kvm *kvm)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun struct vgic_dist *dist = &kvm->arch.vgic;
499*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
500*4882a593Smuzhiyun int ret = 0;
501*4882a593Smuzhiyun int c;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun kvm_for_each_vcpu(c, vcpu, kvm) {
504*4882a593Smuzhiyun struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
507*4882a593Smuzhiyun kvm_debug("vcpu %d redistributor base not set\n", c);
508*4882a593Smuzhiyun return -ENXIO;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
513*4882a593Smuzhiyun kvm_err("Need to set vgic distributor addresses first\n");
514*4882a593Smuzhiyun return -ENXIO;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun if (!vgic_v3_check_base(kvm)) {
518*4882a593Smuzhiyun kvm_err("VGIC redist and dist frames overlap\n");
519*4882a593Smuzhiyun return -EINVAL;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /*
523*4882a593Smuzhiyun * For a VGICv3 we require the userland to explicitly initialize
524*4882a593Smuzhiyun * the VGIC before we need to use it.
525*4882a593Smuzhiyun */
526*4882a593Smuzhiyun if (!vgic_initialized(kvm)) {
527*4882a593Smuzhiyun return -EBUSY;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
531*4882a593Smuzhiyun if (ret) {
532*4882a593Smuzhiyun kvm_err("Unable to register VGICv3 dist MMIO regions\n");
533*4882a593Smuzhiyun return ret;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun if (kvm_vgic_global_state.has_gicv4_1)
537*4882a593Smuzhiyun vgic_v4_configure_vsgis(kvm);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun return 0;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
543*4882a593Smuzhiyun
early_group0_trap_cfg(char * buf)544*4882a593Smuzhiyun static int __init early_group0_trap_cfg(char *buf)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun return strtobool(buf, &group0_trap);
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
549*4882a593Smuzhiyun
early_group1_trap_cfg(char * buf)550*4882a593Smuzhiyun static int __init early_group1_trap_cfg(char *buf)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun return strtobool(buf, &group1_trap);
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
555*4882a593Smuzhiyun
early_common_trap_cfg(char * buf)556*4882a593Smuzhiyun static int __init early_common_trap_cfg(char *buf)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun return strtobool(buf, &common_trap);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
561*4882a593Smuzhiyun
early_gicv4_enable(char * buf)562*4882a593Smuzhiyun static int __init early_gicv4_enable(char *buf)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun return strtobool(buf, &gicv4_enable);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /**
569*4882a593Smuzhiyun * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
570*4882a593Smuzhiyun * @info: pointer to the GIC description
571*4882a593Smuzhiyun *
572*4882a593Smuzhiyun * Returns 0 if the VGICv3 has been probed successfully, returns an error code
573*4882a593Smuzhiyun * otherwise
574*4882a593Smuzhiyun */
vgic_v3_probe(const struct gic_kvm_info * info)575*4882a593Smuzhiyun int vgic_v3_probe(const struct gic_kvm_info *info)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
578*4882a593Smuzhiyun bool has_v2;
579*4882a593Smuzhiyun int ret;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun has_v2 = ich_vtr_el2 >> 63;
582*4882a593Smuzhiyun ich_vtr_el2 = (u32)ich_vtr_el2;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /*
585*4882a593Smuzhiyun * The ListRegs field is 5 bits, but there is an architectural
586*4882a593Smuzhiyun * maximum of 16 list registers. Just ignore bit 4...
587*4882a593Smuzhiyun */
588*4882a593Smuzhiyun kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
589*4882a593Smuzhiyun kvm_vgic_global_state.can_emulate_gicv2 = false;
590*4882a593Smuzhiyun kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /* GICv4 support? */
593*4882a593Smuzhiyun if (info->has_v4) {
594*4882a593Smuzhiyun kvm_vgic_global_state.has_gicv4 = gicv4_enable;
595*4882a593Smuzhiyun kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
596*4882a593Smuzhiyun kvm_info("GICv4%s support %sabled\n",
597*4882a593Smuzhiyun kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
598*4882a593Smuzhiyun gicv4_enable ? "en" : "dis");
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun kvm_vgic_global_state.vcpu_base = 0;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun if (!info->vcpu.start) {
604*4882a593Smuzhiyun kvm_info("GICv3: no GICV resource entry\n");
605*4882a593Smuzhiyun } else if (!has_v2) {
606*4882a593Smuzhiyun pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
607*4882a593Smuzhiyun } else if (!PAGE_ALIGNED(info->vcpu.start)) {
608*4882a593Smuzhiyun pr_warn("GICV physical address 0x%llx not page aligned\n",
609*4882a593Smuzhiyun (unsigned long long)info->vcpu.start);
610*4882a593Smuzhiyun } else {
611*4882a593Smuzhiyun kvm_vgic_global_state.vcpu_base = info->vcpu.start;
612*4882a593Smuzhiyun kvm_vgic_global_state.can_emulate_gicv2 = true;
613*4882a593Smuzhiyun ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
614*4882a593Smuzhiyun if (ret) {
615*4882a593Smuzhiyun kvm_err("Cannot register GICv2 KVM device.\n");
616*4882a593Smuzhiyun return ret;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun kvm_info("vgic-v2@%llx\n", info->vcpu.start);
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
621*4882a593Smuzhiyun if (ret) {
622*4882a593Smuzhiyun kvm_err("Cannot register GICv3 KVM device.\n");
623*4882a593Smuzhiyun kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
624*4882a593Smuzhiyun return ret;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun if (kvm_vgic_global_state.vcpu_base == 0)
628*4882a593Smuzhiyun kvm_info("disabling GICv2 emulation\n");
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
631*4882a593Smuzhiyun group0_trap = true;
632*4882a593Smuzhiyun group1_trap = true;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (group0_trap || group1_trap || common_trap) {
636*4882a593Smuzhiyun kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
637*4882a593Smuzhiyun group0_trap ? "G0" : "",
638*4882a593Smuzhiyun group1_trap ? "G1" : "",
639*4882a593Smuzhiyun common_trap ? "C" : "");
640*4882a593Smuzhiyun static_branch_enable(&vgic_v3_cpuif_trap);
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun kvm_vgic_global_state.vctrl_base = NULL;
644*4882a593Smuzhiyun kvm_vgic_global_state.type = VGIC_V3;
645*4882a593Smuzhiyun kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun return 0;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
vgic_v3_load(struct kvm_vcpu * vcpu)650*4882a593Smuzhiyun void vgic_v3_load(struct kvm_vcpu *vcpu)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /*
655*4882a593Smuzhiyun * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
656*4882a593Smuzhiyun * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
657*4882a593Smuzhiyun * VMCR_EL2 save/restore in the world switch.
658*4882a593Smuzhiyun */
659*4882a593Smuzhiyun if (likely(cpu_if->vgic_sre))
660*4882a593Smuzhiyun kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (has_vhe())
665*4882a593Smuzhiyun __vgic_v3_activate_traps(cpu_if);
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun WARN_ON(vgic_v4_load(vcpu));
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
vgic_v3_vmcr_sync(struct kvm_vcpu * vcpu)670*4882a593Smuzhiyun void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun if (likely(cpu_if->vgic_sre))
675*4882a593Smuzhiyun cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
vgic_v3_put(struct kvm_vcpu * vcpu)678*4882a593Smuzhiyun void vgic_v3_put(struct kvm_vcpu *vcpu)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun WARN_ON(vgic_v4_put(vcpu, false));
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun vgic_v3_vmcr_sync(vcpu);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun if (has_vhe())
689*4882a593Smuzhiyun __vgic_v3_deactivate_traps(cpu_if);
690*4882a593Smuzhiyun }
691