1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012-2015 - ARM Ltd
4*4882a593Smuzhiyun * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <hyp/adjust_pc.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/compiler.h>
10*4882a593Smuzhiyun #include <linux/irqchip/arm-gic-v3.h>
11*4882a593Smuzhiyun #include <linux/kvm_host.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <asm/kvm_emulate.h>
14*4882a593Smuzhiyun #include <asm/kvm_hyp.h>
15*4882a593Smuzhiyun #include <asm/kvm_mmu.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #define vtr_to_max_lr_idx(v) ((v) & 0xf)
18*4882a593Smuzhiyun #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
19*4882a593Smuzhiyun #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
20*4882a593Smuzhiyun
__gic_v3_get_lr(unsigned int lr)21*4882a593Smuzhiyun static u64 __gic_v3_get_lr(unsigned int lr)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun switch (lr & 0xf) {
24*4882a593Smuzhiyun case 0:
25*4882a593Smuzhiyun return read_gicreg(ICH_LR0_EL2);
26*4882a593Smuzhiyun case 1:
27*4882a593Smuzhiyun return read_gicreg(ICH_LR1_EL2);
28*4882a593Smuzhiyun case 2:
29*4882a593Smuzhiyun return read_gicreg(ICH_LR2_EL2);
30*4882a593Smuzhiyun case 3:
31*4882a593Smuzhiyun return read_gicreg(ICH_LR3_EL2);
32*4882a593Smuzhiyun case 4:
33*4882a593Smuzhiyun return read_gicreg(ICH_LR4_EL2);
34*4882a593Smuzhiyun case 5:
35*4882a593Smuzhiyun return read_gicreg(ICH_LR5_EL2);
36*4882a593Smuzhiyun case 6:
37*4882a593Smuzhiyun return read_gicreg(ICH_LR6_EL2);
38*4882a593Smuzhiyun case 7:
39*4882a593Smuzhiyun return read_gicreg(ICH_LR7_EL2);
40*4882a593Smuzhiyun case 8:
41*4882a593Smuzhiyun return read_gicreg(ICH_LR8_EL2);
42*4882a593Smuzhiyun case 9:
43*4882a593Smuzhiyun return read_gicreg(ICH_LR9_EL2);
44*4882a593Smuzhiyun case 10:
45*4882a593Smuzhiyun return read_gicreg(ICH_LR10_EL2);
46*4882a593Smuzhiyun case 11:
47*4882a593Smuzhiyun return read_gicreg(ICH_LR11_EL2);
48*4882a593Smuzhiyun case 12:
49*4882a593Smuzhiyun return read_gicreg(ICH_LR12_EL2);
50*4882a593Smuzhiyun case 13:
51*4882a593Smuzhiyun return read_gicreg(ICH_LR13_EL2);
52*4882a593Smuzhiyun case 14:
53*4882a593Smuzhiyun return read_gicreg(ICH_LR14_EL2);
54*4882a593Smuzhiyun case 15:
55*4882a593Smuzhiyun return read_gicreg(ICH_LR15_EL2);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun unreachable();
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
__gic_v3_set_lr(u64 val,int lr)61*4882a593Smuzhiyun static void __gic_v3_set_lr(u64 val, int lr)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun switch (lr & 0xf) {
64*4882a593Smuzhiyun case 0:
65*4882a593Smuzhiyun write_gicreg(val, ICH_LR0_EL2);
66*4882a593Smuzhiyun break;
67*4882a593Smuzhiyun case 1:
68*4882a593Smuzhiyun write_gicreg(val, ICH_LR1_EL2);
69*4882a593Smuzhiyun break;
70*4882a593Smuzhiyun case 2:
71*4882a593Smuzhiyun write_gicreg(val, ICH_LR2_EL2);
72*4882a593Smuzhiyun break;
73*4882a593Smuzhiyun case 3:
74*4882a593Smuzhiyun write_gicreg(val, ICH_LR3_EL2);
75*4882a593Smuzhiyun break;
76*4882a593Smuzhiyun case 4:
77*4882a593Smuzhiyun write_gicreg(val, ICH_LR4_EL2);
78*4882a593Smuzhiyun break;
79*4882a593Smuzhiyun case 5:
80*4882a593Smuzhiyun write_gicreg(val, ICH_LR5_EL2);
81*4882a593Smuzhiyun break;
82*4882a593Smuzhiyun case 6:
83*4882a593Smuzhiyun write_gicreg(val, ICH_LR6_EL2);
84*4882a593Smuzhiyun break;
85*4882a593Smuzhiyun case 7:
86*4882a593Smuzhiyun write_gicreg(val, ICH_LR7_EL2);
87*4882a593Smuzhiyun break;
88*4882a593Smuzhiyun case 8:
89*4882a593Smuzhiyun write_gicreg(val, ICH_LR8_EL2);
90*4882a593Smuzhiyun break;
91*4882a593Smuzhiyun case 9:
92*4882a593Smuzhiyun write_gicreg(val, ICH_LR9_EL2);
93*4882a593Smuzhiyun break;
94*4882a593Smuzhiyun case 10:
95*4882a593Smuzhiyun write_gicreg(val, ICH_LR10_EL2);
96*4882a593Smuzhiyun break;
97*4882a593Smuzhiyun case 11:
98*4882a593Smuzhiyun write_gicreg(val, ICH_LR11_EL2);
99*4882a593Smuzhiyun break;
100*4882a593Smuzhiyun case 12:
101*4882a593Smuzhiyun write_gicreg(val, ICH_LR12_EL2);
102*4882a593Smuzhiyun break;
103*4882a593Smuzhiyun case 13:
104*4882a593Smuzhiyun write_gicreg(val, ICH_LR13_EL2);
105*4882a593Smuzhiyun break;
106*4882a593Smuzhiyun case 14:
107*4882a593Smuzhiyun write_gicreg(val, ICH_LR14_EL2);
108*4882a593Smuzhiyun break;
109*4882a593Smuzhiyun case 15:
110*4882a593Smuzhiyun write_gicreg(val, ICH_LR15_EL2);
111*4882a593Smuzhiyun break;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
__vgic_v3_write_ap0rn(u32 val,int n)115*4882a593Smuzhiyun static void __vgic_v3_write_ap0rn(u32 val, int n)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun switch (n) {
118*4882a593Smuzhiyun case 0:
119*4882a593Smuzhiyun write_gicreg(val, ICH_AP0R0_EL2);
120*4882a593Smuzhiyun break;
121*4882a593Smuzhiyun case 1:
122*4882a593Smuzhiyun write_gicreg(val, ICH_AP0R1_EL2);
123*4882a593Smuzhiyun break;
124*4882a593Smuzhiyun case 2:
125*4882a593Smuzhiyun write_gicreg(val, ICH_AP0R2_EL2);
126*4882a593Smuzhiyun break;
127*4882a593Smuzhiyun case 3:
128*4882a593Smuzhiyun write_gicreg(val, ICH_AP0R3_EL2);
129*4882a593Smuzhiyun break;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
__vgic_v3_write_ap1rn(u32 val,int n)133*4882a593Smuzhiyun static void __vgic_v3_write_ap1rn(u32 val, int n)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun switch (n) {
136*4882a593Smuzhiyun case 0:
137*4882a593Smuzhiyun write_gicreg(val, ICH_AP1R0_EL2);
138*4882a593Smuzhiyun break;
139*4882a593Smuzhiyun case 1:
140*4882a593Smuzhiyun write_gicreg(val, ICH_AP1R1_EL2);
141*4882a593Smuzhiyun break;
142*4882a593Smuzhiyun case 2:
143*4882a593Smuzhiyun write_gicreg(val, ICH_AP1R2_EL2);
144*4882a593Smuzhiyun break;
145*4882a593Smuzhiyun case 3:
146*4882a593Smuzhiyun write_gicreg(val, ICH_AP1R3_EL2);
147*4882a593Smuzhiyun break;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
__vgic_v3_read_ap0rn(int n)151*4882a593Smuzhiyun static u32 __vgic_v3_read_ap0rn(int n)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun u32 val;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun switch (n) {
156*4882a593Smuzhiyun case 0:
157*4882a593Smuzhiyun val = read_gicreg(ICH_AP0R0_EL2);
158*4882a593Smuzhiyun break;
159*4882a593Smuzhiyun case 1:
160*4882a593Smuzhiyun val = read_gicreg(ICH_AP0R1_EL2);
161*4882a593Smuzhiyun break;
162*4882a593Smuzhiyun case 2:
163*4882a593Smuzhiyun val = read_gicreg(ICH_AP0R2_EL2);
164*4882a593Smuzhiyun break;
165*4882a593Smuzhiyun case 3:
166*4882a593Smuzhiyun val = read_gicreg(ICH_AP0R3_EL2);
167*4882a593Smuzhiyun break;
168*4882a593Smuzhiyun default:
169*4882a593Smuzhiyun unreachable();
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun return val;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
__vgic_v3_read_ap1rn(int n)175*4882a593Smuzhiyun static u32 __vgic_v3_read_ap1rn(int n)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun u32 val;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun switch (n) {
180*4882a593Smuzhiyun case 0:
181*4882a593Smuzhiyun val = read_gicreg(ICH_AP1R0_EL2);
182*4882a593Smuzhiyun break;
183*4882a593Smuzhiyun case 1:
184*4882a593Smuzhiyun val = read_gicreg(ICH_AP1R1_EL2);
185*4882a593Smuzhiyun break;
186*4882a593Smuzhiyun case 2:
187*4882a593Smuzhiyun val = read_gicreg(ICH_AP1R2_EL2);
188*4882a593Smuzhiyun break;
189*4882a593Smuzhiyun case 3:
190*4882a593Smuzhiyun val = read_gicreg(ICH_AP1R3_EL2);
191*4882a593Smuzhiyun break;
192*4882a593Smuzhiyun default:
193*4882a593Smuzhiyun unreachable();
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun return val;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
__vgic_v3_save_state(struct vgic_v3_cpu_if * cpu_if)199*4882a593Smuzhiyun void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun u64 used_lrs = cpu_if->used_lrs;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * Make sure stores to the GIC via the memory mapped interface
205*4882a593Smuzhiyun * are now visible to the system register interface when reading the
206*4882a593Smuzhiyun * LRs, and when reading back the VMCR on non-VHE systems.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun if (used_lrs || !has_vhe()) {
209*4882a593Smuzhiyun if (!cpu_if->vgic_sre) {
210*4882a593Smuzhiyun dsb(sy);
211*4882a593Smuzhiyun isb();
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (used_lrs || cpu_if->its_vpe.its_vm) {
216*4882a593Smuzhiyun int i;
217*4882a593Smuzhiyun u32 elrsr;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun elrsr = read_gicreg(ICH_ELRSR_EL2);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun for (i = 0; i < used_lrs; i++) {
224*4882a593Smuzhiyun if (elrsr & (1 << i))
225*4882a593Smuzhiyun cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
226*4882a593Smuzhiyun else
227*4882a593Smuzhiyun cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun __gic_v3_set_lr(0, i);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
__vgic_v3_restore_state(struct vgic_v3_cpu_if * cpu_if)234*4882a593Smuzhiyun void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun u64 used_lrs = cpu_if->used_lrs;
237*4882a593Smuzhiyun int i;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (used_lrs || cpu_if->its_vpe.its_vm) {
240*4882a593Smuzhiyun write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun for (i = 0; i < used_lrs; i++)
243*4882a593Smuzhiyun __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Ensure that writes to the LRs, and on non-VHE systems ensure that
248*4882a593Smuzhiyun * the write to the VMCR in __vgic_v3_activate_traps(), will have
249*4882a593Smuzhiyun * reached the (re)distributors. This ensure the guest will read the
250*4882a593Smuzhiyun * correct values from the memory-mapped interface.
251*4882a593Smuzhiyun */
252*4882a593Smuzhiyun if (used_lrs || !has_vhe()) {
253*4882a593Smuzhiyun if (!cpu_if->vgic_sre) {
254*4882a593Smuzhiyun isb();
255*4882a593Smuzhiyun dsb(sy);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
__vgic_v3_activate_traps(struct vgic_v3_cpu_if * cpu_if)260*4882a593Smuzhiyun void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
264*4882a593Smuzhiyun * Group0 interrupt (as generated in GICv2 mode) to be
265*4882a593Smuzhiyun * delivered as a FIQ to the guest, with potentially fatal
266*4882a593Smuzhiyun * consequences. So we must make sure that ICC_SRE_EL1 has
267*4882a593Smuzhiyun * been actually programmed with the value we want before
268*4882a593Smuzhiyun * starting to mess with the rest of the GIC, and VMCR_EL2 in
269*4882a593Smuzhiyun * particular. This logic must be called before
270*4882a593Smuzhiyun * __vgic_v3_restore_state().
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun if (!cpu_if->vgic_sre) {
273*4882a593Smuzhiyun write_gicreg(0, ICC_SRE_EL1);
274*4882a593Smuzhiyun isb();
275*4882a593Smuzhiyun write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (has_vhe()) {
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun * Ensure that the write to the VMCR will have reached
281*4882a593Smuzhiyun * the (re)distributors. This ensure the guest will
282*4882a593Smuzhiyun * read the correct values from the memory-mapped
283*4882a593Smuzhiyun * interface.
284*4882a593Smuzhiyun */
285*4882a593Smuzhiyun isb();
286*4882a593Smuzhiyun dsb(sy);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /*
291*4882a593Smuzhiyun * Prevent the guest from touching the GIC system registers if
292*4882a593Smuzhiyun * SRE isn't enabled for GICv3 emulation.
293*4882a593Smuzhiyun */
294*4882a593Smuzhiyun write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
295*4882a593Smuzhiyun ICC_SRE_EL2);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /*
298*4882a593Smuzhiyun * If we need to trap system registers, we must write
299*4882a593Smuzhiyun * ICH_HCR_EL2 anyway, even if no interrupts are being
300*4882a593Smuzhiyun * injected,
301*4882a593Smuzhiyun */
302*4882a593Smuzhiyun if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
303*4882a593Smuzhiyun cpu_if->its_vpe.its_vm)
304*4882a593Smuzhiyun write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
__vgic_v3_deactivate_traps(struct vgic_v3_cpu_if * cpu_if)307*4882a593Smuzhiyun void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun u64 val;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (!cpu_if->vgic_sre) {
312*4882a593Smuzhiyun cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun val = read_gicreg(ICC_SRE_EL2);
316*4882a593Smuzhiyun write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if (!cpu_if->vgic_sre) {
319*4882a593Smuzhiyun /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
320*4882a593Smuzhiyun isb();
321*4882a593Smuzhiyun write_gicreg(1, ICC_SRE_EL1);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /*
325*4882a593Smuzhiyun * If we were trapping system registers, we enabled the VGIC even if
326*4882a593Smuzhiyun * no interrupts were being injected, and we disable it again here.
327*4882a593Smuzhiyun */
328*4882a593Smuzhiyun if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
329*4882a593Smuzhiyun cpu_if->its_vpe.its_vm)
330*4882a593Smuzhiyun write_gicreg(0, ICH_HCR_EL2);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
__vgic_v3_save_aprs(struct vgic_v3_cpu_if * cpu_if)333*4882a593Smuzhiyun void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun u64 val;
336*4882a593Smuzhiyun u32 nr_pre_bits;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun val = read_gicreg(ICH_VTR_EL2);
339*4882a593Smuzhiyun nr_pre_bits = vtr_to_nr_pre_bits(val);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun switch (nr_pre_bits) {
342*4882a593Smuzhiyun case 7:
343*4882a593Smuzhiyun cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
344*4882a593Smuzhiyun cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
345*4882a593Smuzhiyun fallthrough;
346*4882a593Smuzhiyun case 6:
347*4882a593Smuzhiyun cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
348*4882a593Smuzhiyun fallthrough;
349*4882a593Smuzhiyun default:
350*4882a593Smuzhiyun cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun switch (nr_pre_bits) {
354*4882a593Smuzhiyun case 7:
355*4882a593Smuzhiyun cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
356*4882a593Smuzhiyun cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
357*4882a593Smuzhiyun fallthrough;
358*4882a593Smuzhiyun case 6:
359*4882a593Smuzhiyun cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
360*4882a593Smuzhiyun fallthrough;
361*4882a593Smuzhiyun default:
362*4882a593Smuzhiyun cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
__vgic_v3_restore_aprs(struct vgic_v3_cpu_if * cpu_if)366*4882a593Smuzhiyun void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun u64 val;
369*4882a593Smuzhiyun u32 nr_pre_bits;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun val = read_gicreg(ICH_VTR_EL2);
372*4882a593Smuzhiyun nr_pre_bits = vtr_to_nr_pre_bits(val);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun switch (nr_pre_bits) {
375*4882a593Smuzhiyun case 7:
376*4882a593Smuzhiyun __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
377*4882a593Smuzhiyun __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
378*4882a593Smuzhiyun fallthrough;
379*4882a593Smuzhiyun case 6:
380*4882a593Smuzhiyun __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
381*4882a593Smuzhiyun fallthrough;
382*4882a593Smuzhiyun default:
383*4882a593Smuzhiyun __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun switch (nr_pre_bits) {
387*4882a593Smuzhiyun case 7:
388*4882a593Smuzhiyun __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
389*4882a593Smuzhiyun __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
390*4882a593Smuzhiyun fallthrough;
391*4882a593Smuzhiyun case 6:
392*4882a593Smuzhiyun __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
393*4882a593Smuzhiyun fallthrough;
394*4882a593Smuzhiyun default:
395*4882a593Smuzhiyun __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
__vgic_v3_init_lrs(void)399*4882a593Smuzhiyun void __vgic_v3_init_lrs(void)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
402*4882a593Smuzhiyun int i;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun for (i = 0; i <= max_lr_idx; i++)
405*4882a593Smuzhiyun __gic_v3_set_lr(0, i);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /*
409*4882a593Smuzhiyun * Return the GIC CPU configuration:
410*4882a593Smuzhiyun * - [31:0] ICH_VTR_EL2
411*4882a593Smuzhiyun * - [62:32] RES0
412*4882a593Smuzhiyun * - [63] MMIO (GICv2) capable
413*4882a593Smuzhiyun */
__vgic_v3_get_gic_config(void)414*4882a593Smuzhiyun u64 __vgic_v3_get_gic_config(void)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun u64 val, sre = read_gicreg(ICC_SRE_EL1);
417*4882a593Smuzhiyun unsigned long flags = 0;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun * To check whether we have a MMIO-based (GICv2 compatible)
421*4882a593Smuzhiyun * CPU interface, we need to disable the system register
422*4882a593Smuzhiyun * view. To do that safely, we have to prevent any interrupt
423*4882a593Smuzhiyun * from firing (which would be deadly).
424*4882a593Smuzhiyun *
425*4882a593Smuzhiyun * Note that this only makes sense on VHE, as interrupts are
426*4882a593Smuzhiyun * already masked for nVHE as part of the exception entry to
427*4882a593Smuzhiyun * EL2.
428*4882a593Smuzhiyun */
429*4882a593Smuzhiyun if (has_vhe())
430*4882a593Smuzhiyun flags = local_daif_save();
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /*
433*4882a593Smuzhiyun * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
434*4882a593Smuzhiyun * that to be able to set ICC_SRE_EL1.SRE to 0, all the
435*4882a593Smuzhiyun * interrupt overrides must be set. You've got to love this.
436*4882a593Smuzhiyun */
437*4882a593Smuzhiyun sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
438*4882a593Smuzhiyun isb();
439*4882a593Smuzhiyun write_gicreg(0, ICC_SRE_EL1);
440*4882a593Smuzhiyun isb();
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun val = read_gicreg(ICC_SRE_EL1);
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun write_gicreg(sre, ICC_SRE_EL1);
445*4882a593Smuzhiyun isb();
446*4882a593Smuzhiyun sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
447*4882a593Smuzhiyun isb();
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun if (has_vhe())
450*4882a593Smuzhiyun local_daif_restore(flags);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
453*4882a593Smuzhiyun val |= read_gicreg(ICH_VTR_EL2);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun return val;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
__vgic_v3_read_vmcr(void)458*4882a593Smuzhiyun u64 __vgic_v3_read_vmcr(void)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun return read_gicreg(ICH_VMCR_EL2);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
__vgic_v3_write_vmcr(u32 vmcr)463*4882a593Smuzhiyun void __vgic_v3_write_vmcr(u32 vmcr)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun write_gicreg(vmcr, ICH_VMCR_EL2);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
__vgic_v3_bpr_min(void)468*4882a593Smuzhiyun static int __vgic_v3_bpr_min(void)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun /* See Pseudocode for VPriorityGroup */
471*4882a593Smuzhiyun return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
__vgic_v3_get_group(struct kvm_vcpu * vcpu)474*4882a593Smuzhiyun static int __vgic_v3_get_group(struct kvm_vcpu *vcpu)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun u32 esr = kvm_vcpu_get_esr(vcpu);
477*4882a593Smuzhiyun u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun return crm != 8;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun #define GICv3_IDLE_PRIORITY 0xff
483*4882a593Smuzhiyun
__vgic_v3_highest_priority_lr(struct kvm_vcpu * vcpu,u32 vmcr,u64 * lr_val)484*4882a593Smuzhiyun static int __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, u32 vmcr,
485*4882a593Smuzhiyun u64 *lr_val)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
488*4882a593Smuzhiyun u8 priority = GICv3_IDLE_PRIORITY;
489*4882a593Smuzhiyun int i, lr = -1;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun for (i = 0; i < used_lrs; i++) {
492*4882a593Smuzhiyun u64 val = __gic_v3_get_lr(i);
493*4882a593Smuzhiyun u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /* Not pending in the state? */
496*4882a593Smuzhiyun if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
497*4882a593Smuzhiyun continue;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /* Group-0 interrupt, but Group-0 disabled? */
500*4882a593Smuzhiyun if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
501*4882a593Smuzhiyun continue;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /* Group-1 interrupt, but Group-1 disabled? */
504*4882a593Smuzhiyun if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
505*4882a593Smuzhiyun continue;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /* Not the highest priority? */
508*4882a593Smuzhiyun if (lr_prio >= priority)
509*4882a593Smuzhiyun continue;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun /* This is a candidate */
512*4882a593Smuzhiyun priority = lr_prio;
513*4882a593Smuzhiyun *lr_val = val;
514*4882a593Smuzhiyun lr = i;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun if (lr == -1)
518*4882a593Smuzhiyun *lr_val = ICC_IAR1_EL1_SPURIOUS;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun return lr;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
__vgic_v3_find_active_lr(struct kvm_vcpu * vcpu,int intid,u64 * lr_val)523*4882a593Smuzhiyun static int __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, int intid,
524*4882a593Smuzhiyun u64 *lr_val)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
527*4882a593Smuzhiyun int i;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun for (i = 0; i < used_lrs; i++) {
530*4882a593Smuzhiyun u64 val = __gic_v3_get_lr(i);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
533*4882a593Smuzhiyun (val & ICH_LR_ACTIVE_BIT)) {
534*4882a593Smuzhiyun *lr_val = val;
535*4882a593Smuzhiyun return i;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun *lr_val = ICC_IAR1_EL1_SPURIOUS;
540*4882a593Smuzhiyun return -1;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
__vgic_v3_get_highest_active_priority(void)543*4882a593Smuzhiyun static int __vgic_v3_get_highest_active_priority(void)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
546*4882a593Smuzhiyun u32 hap = 0;
547*4882a593Smuzhiyun int i;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun for (i = 0; i < nr_apr_regs; i++) {
550*4882a593Smuzhiyun u32 val;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /*
553*4882a593Smuzhiyun * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
554*4882a593Smuzhiyun * contain the active priority levels for this VCPU
555*4882a593Smuzhiyun * for the maximum number of supported priority
556*4882a593Smuzhiyun * levels, and we return the full priority level only
557*4882a593Smuzhiyun * if the BPR is programmed to its minimum, otherwise
558*4882a593Smuzhiyun * we return a combination of the priority level and
559*4882a593Smuzhiyun * subpriority, as determined by the setting of the
560*4882a593Smuzhiyun * BPR, but without the full subpriority.
561*4882a593Smuzhiyun */
562*4882a593Smuzhiyun val = __vgic_v3_read_ap0rn(i);
563*4882a593Smuzhiyun val |= __vgic_v3_read_ap1rn(i);
564*4882a593Smuzhiyun if (!val) {
565*4882a593Smuzhiyun hap += 32;
566*4882a593Smuzhiyun continue;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun return (hap + __ffs(val)) << __vgic_v3_bpr_min();
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun return GICv3_IDLE_PRIORITY;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
__vgic_v3_get_bpr0(u32 vmcr)575*4882a593Smuzhiyun static unsigned int __vgic_v3_get_bpr0(u32 vmcr)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
__vgic_v3_get_bpr1(u32 vmcr)580*4882a593Smuzhiyun static unsigned int __vgic_v3_get_bpr1(u32 vmcr)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun unsigned int bpr;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun if (vmcr & ICH_VMCR_CBPR_MASK) {
585*4882a593Smuzhiyun bpr = __vgic_v3_get_bpr0(vmcr);
586*4882a593Smuzhiyun if (bpr < 7)
587*4882a593Smuzhiyun bpr++;
588*4882a593Smuzhiyun } else {
589*4882a593Smuzhiyun bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun return bpr;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /*
596*4882a593Smuzhiyun * Convert a priority to a preemption level, taking the relevant BPR
597*4882a593Smuzhiyun * into account by zeroing the sub-priority bits.
598*4882a593Smuzhiyun */
__vgic_v3_pri_to_pre(u8 pri,u32 vmcr,int grp)599*4882a593Smuzhiyun static u8 __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun unsigned int bpr;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun if (!grp)
604*4882a593Smuzhiyun bpr = __vgic_v3_get_bpr0(vmcr) + 1;
605*4882a593Smuzhiyun else
606*4882a593Smuzhiyun bpr = __vgic_v3_get_bpr1(vmcr);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun return pri & (GENMASK(7, 0) << bpr);
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /*
612*4882a593Smuzhiyun * The priority value is independent of any of the BPR values, so we
613*4882a593Smuzhiyun * normalize it using the minimal BPR value. This guarantees that no
614*4882a593Smuzhiyun * matter what the guest does with its BPR, we can always set/get the
615*4882a593Smuzhiyun * same value of a priority.
616*4882a593Smuzhiyun */
__vgic_v3_set_active_priority(u8 pri,u32 vmcr,int grp)617*4882a593Smuzhiyun static void __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun u8 pre, ap;
620*4882a593Smuzhiyun u32 val;
621*4882a593Smuzhiyun int apr;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
624*4882a593Smuzhiyun ap = pre >> __vgic_v3_bpr_min();
625*4882a593Smuzhiyun apr = ap / 32;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun if (!grp) {
628*4882a593Smuzhiyun val = __vgic_v3_read_ap0rn(apr);
629*4882a593Smuzhiyun __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
630*4882a593Smuzhiyun } else {
631*4882a593Smuzhiyun val = __vgic_v3_read_ap1rn(apr);
632*4882a593Smuzhiyun __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
__vgic_v3_clear_highest_active_priority(void)636*4882a593Smuzhiyun static int __vgic_v3_clear_highest_active_priority(void)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
639*4882a593Smuzhiyun u32 hap = 0;
640*4882a593Smuzhiyun int i;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun for (i = 0; i < nr_apr_regs; i++) {
643*4882a593Smuzhiyun u32 ap0, ap1;
644*4882a593Smuzhiyun int c0, c1;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun ap0 = __vgic_v3_read_ap0rn(i);
647*4882a593Smuzhiyun ap1 = __vgic_v3_read_ap1rn(i);
648*4882a593Smuzhiyun if (!ap0 && !ap1) {
649*4882a593Smuzhiyun hap += 32;
650*4882a593Smuzhiyun continue;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun c0 = ap0 ? __ffs(ap0) : 32;
654*4882a593Smuzhiyun c1 = ap1 ? __ffs(ap1) : 32;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /* Always clear the LSB, which is the highest priority */
657*4882a593Smuzhiyun if (c0 < c1) {
658*4882a593Smuzhiyun ap0 &= ~BIT(c0);
659*4882a593Smuzhiyun __vgic_v3_write_ap0rn(ap0, i);
660*4882a593Smuzhiyun hap += c0;
661*4882a593Smuzhiyun } else {
662*4882a593Smuzhiyun ap1 &= ~BIT(c1);
663*4882a593Smuzhiyun __vgic_v3_write_ap1rn(ap1, i);
664*4882a593Smuzhiyun hap += c1;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /* Rescale to 8 bits of priority */
668*4882a593Smuzhiyun return hap << __vgic_v3_bpr_min();
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun return GICv3_IDLE_PRIORITY;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
__vgic_v3_read_iar(struct kvm_vcpu * vcpu,u32 vmcr,int rt)674*4882a593Smuzhiyun static void __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun u64 lr_val;
677*4882a593Smuzhiyun u8 lr_prio, pmr;
678*4882a593Smuzhiyun int lr, grp;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun grp = __vgic_v3_get_group(vcpu);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
683*4882a593Smuzhiyun if (lr < 0)
684*4882a593Smuzhiyun goto spurious;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun if (grp != !!(lr_val & ICH_LR_GROUP))
687*4882a593Smuzhiyun goto spurious;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
690*4882a593Smuzhiyun lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
691*4882a593Smuzhiyun if (pmr <= lr_prio)
692*4882a593Smuzhiyun goto spurious;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
695*4882a593Smuzhiyun goto spurious;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun lr_val &= ~ICH_LR_STATE;
698*4882a593Smuzhiyun /* No active state for LPIs */
699*4882a593Smuzhiyun if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
700*4882a593Smuzhiyun lr_val |= ICH_LR_ACTIVE_BIT;
701*4882a593Smuzhiyun __gic_v3_set_lr(lr_val, lr);
702*4882a593Smuzhiyun __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
703*4882a593Smuzhiyun vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
704*4882a593Smuzhiyun return;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun spurious:
707*4882a593Smuzhiyun vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
__vgic_v3_clear_active_lr(int lr,u64 lr_val)710*4882a593Smuzhiyun static void __vgic_v3_clear_active_lr(int lr, u64 lr_val)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun lr_val &= ~ICH_LR_ACTIVE_BIT;
713*4882a593Smuzhiyun if (lr_val & ICH_LR_HW) {
714*4882a593Smuzhiyun u32 pid;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
717*4882a593Smuzhiyun gic_write_dir(pid);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun __gic_v3_set_lr(lr_val, lr);
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
__vgic_v3_bump_eoicount(void)723*4882a593Smuzhiyun static void __vgic_v3_bump_eoicount(void)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun u32 hcr;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun hcr = read_gicreg(ICH_HCR_EL2);
728*4882a593Smuzhiyun hcr += 1 << ICH_HCR_EOIcount_SHIFT;
729*4882a593Smuzhiyun write_gicreg(hcr, ICH_HCR_EL2);
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
__vgic_v3_write_dir(struct kvm_vcpu * vcpu,u32 vmcr,int rt)732*4882a593Smuzhiyun static void __vgic_v3_write_dir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun u32 vid = vcpu_get_reg(vcpu, rt);
735*4882a593Smuzhiyun u64 lr_val;
736*4882a593Smuzhiyun int lr;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun /* EOImode == 0, nothing to be done here */
739*4882a593Smuzhiyun if (!(vmcr & ICH_VMCR_EOIM_MASK))
740*4882a593Smuzhiyun return;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun /* No deactivate to be performed on an LPI */
743*4882a593Smuzhiyun if (vid >= VGIC_MIN_LPI)
744*4882a593Smuzhiyun return;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
747*4882a593Smuzhiyun if (lr == -1) {
748*4882a593Smuzhiyun __vgic_v3_bump_eoicount();
749*4882a593Smuzhiyun return;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun __vgic_v3_clear_active_lr(lr, lr_val);
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
__vgic_v3_write_eoir(struct kvm_vcpu * vcpu,u32 vmcr,int rt)755*4882a593Smuzhiyun static void __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun u32 vid = vcpu_get_reg(vcpu, rt);
758*4882a593Smuzhiyun u64 lr_val;
759*4882a593Smuzhiyun u8 lr_prio, act_prio;
760*4882a593Smuzhiyun int lr, grp;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun grp = __vgic_v3_get_group(vcpu);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /* Drop priority in any case */
765*4882a593Smuzhiyun act_prio = __vgic_v3_clear_highest_active_priority();
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun /* If EOIing an LPI, no deactivate to be performed */
768*4882a593Smuzhiyun if (vid >= VGIC_MIN_LPI)
769*4882a593Smuzhiyun return;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun /* EOImode == 1, nothing to be done here */
772*4882a593Smuzhiyun if (vmcr & ICH_VMCR_EOIM_MASK)
773*4882a593Smuzhiyun return;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
776*4882a593Smuzhiyun if (lr == -1) {
777*4882a593Smuzhiyun __vgic_v3_bump_eoicount();
778*4882a593Smuzhiyun return;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /* If priorities or group do not match, the guest has fscked-up. */
784*4882a593Smuzhiyun if (grp != !!(lr_val & ICH_LR_GROUP) ||
785*4882a593Smuzhiyun __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
786*4882a593Smuzhiyun return;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun /* Let's now perform the deactivation */
789*4882a593Smuzhiyun __vgic_v3_clear_active_lr(lr, lr_val);
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
__vgic_v3_read_igrpen0(struct kvm_vcpu * vcpu,u32 vmcr,int rt)792*4882a593Smuzhiyun static void __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
__vgic_v3_read_igrpen1(struct kvm_vcpu * vcpu,u32 vmcr,int rt)797*4882a593Smuzhiyun static void __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
__vgic_v3_write_igrpen0(struct kvm_vcpu * vcpu,u32 vmcr,int rt)802*4882a593Smuzhiyun static void __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun u64 val = vcpu_get_reg(vcpu, rt);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun if (val & 1)
807*4882a593Smuzhiyun vmcr |= ICH_VMCR_ENG0_MASK;
808*4882a593Smuzhiyun else
809*4882a593Smuzhiyun vmcr &= ~ICH_VMCR_ENG0_MASK;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun __vgic_v3_write_vmcr(vmcr);
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
__vgic_v3_write_igrpen1(struct kvm_vcpu * vcpu,u32 vmcr,int rt)814*4882a593Smuzhiyun static void __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun u64 val = vcpu_get_reg(vcpu, rt);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun if (val & 1)
819*4882a593Smuzhiyun vmcr |= ICH_VMCR_ENG1_MASK;
820*4882a593Smuzhiyun else
821*4882a593Smuzhiyun vmcr &= ~ICH_VMCR_ENG1_MASK;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun __vgic_v3_write_vmcr(vmcr);
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
__vgic_v3_read_bpr0(struct kvm_vcpu * vcpu,u32 vmcr,int rt)826*4882a593Smuzhiyun static void __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun
__vgic_v3_read_bpr1(struct kvm_vcpu * vcpu,u32 vmcr,int rt)831*4882a593Smuzhiyun static void __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
__vgic_v3_write_bpr0(struct kvm_vcpu * vcpu,u32 vmcr,int rt)836*4882a593Smuzhiyun static void __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun u64 val = vcpu_get_reg(vcpu, rt);
839*4882a593Smuzhiyun u8 bpr_min = __vgic_v3_bpr_min() - 1;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /* Enforce BPR limiting */
842*4882a593Smuzhiyun if (val < bpr_min)
843*4882a593Smuzhiyun val = bpr_min;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun val <<= ICH_VMCR_BPR0_SHIFT;
846*4882a593Smuzhiyun val &= ICH_VMCR_BPR0_MASK;
847*4882a593Smuzhiyun vmcr &= ~ICH_VMCR_BPR0_MASK;
848*4882a593Smuzhiyun vmcr |= val;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun __vgic_v3_write_vmcr(vmcr);
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
__vgic_v3_write_bpr1(struct kvm_vcpu * vcpu,u32 vmcr,int rt)853*4882a593Smuzhiyun static void __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun u64 val = vcpu_get_reg(vcpu, rt);
856*4882a593Smuzhiyun u8 bpr_min = __vgic_v3_bpr_min();
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun if (vmcr & ICH_VMCR_CBPR_MASK)
859*4882a593Smuzhiyun return;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun /* Enforce BPR limiting */
862*4882a593Smuzhiyun if (val < bpr_min)
863*4882a593Smuzhiyun val = bpr_min;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun val <<= ICH_VMCR_BPR1_SHIFT;
866*4882a593Smuzhiyun val &= ICH_VMCR_BPR1_MASK;
867*4882a593Smuzhiyun vmcr &= ~ICH_VMCR_BPR1_MASK;
868*4882a593Smuzhiyun vmcr |= val;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun __vgic_v3_write_vmcr(vmcr);
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
__vgic_v3_read_apxrn(struct kvm_vcpu * vcpu,int rt,int n)873*4882a593Smuzhiyun static void __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun u32 val;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun if (!__vgic_v3_get_group(vcpu))
878*4882a593Smuzhiyun val = __vgic_v3_read_ap0rn(n);
879*4882a593Smuzhiyun else
880*4882a593Smuzhiyun val = __vgic_v3_read_ap1rn(n);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun vcpu_set_reg(vcpu, rt, val);
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
__vgic_v3_write_apxrn(struct kvm_vcpu * vcpu,int rt,int n)885*4882a593Smuzhiyun static void __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun u32 val = vcpu_get_reg(vcpu, rt);
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun if (!__vgic_v3_get_group(vcpu))
890*4882a593Smuzhiyun __vgic_v3_write_ap0rn(val, n);
891*4882a593Smuzhiyun else
892*4882a593Smuzhiyun __vgic_v3_write_ap1rn(val, n);
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
__vgic_v3_read_apxr0(struct kvm_vcpu * vcpu,u32 vmcr,int rt)895*4882a593Smuzhiyun static void __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
896*4882a593Smuzhiyun u32 vmcr, int rt)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun __vgic_v3_read_apxrn(vcpu, rt, 0);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun
__vgic_v3_read_apxr1(struct kvm_vcpu * vcpu,u32 vmcr,int rt)901*4882a593Smuzhiyun static void __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
902*4882a593Smuzhiyun u32 vmcr, int rt)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun __vgic_v3_read_apxrn(vcpu, rt, 1);
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
__vgic_v3_read_apxr2(struct kvm_vcpu * vcpu,u32 vmcr,int rt)907*4882a593Smuzhiyun static void __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun __vgic_v3_read_apxrn(vcpu, rt, 2);
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
__vgic_v3_read_apxr3(struct kvm_vcpu * vcpu,u32 vmcr,int rt)912*4882a593Smuzhiyun static void __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun __vgic_v3_read_apxrn(vcpu, rt, 3);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
__vgic_v3_write_apxr0(struct kvm_vcpu * vcpu,u32 vmcr,int rt)917*4882a593Smuzhiyun static void __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun __vgic_v3_write_apxrn(vcpu, rt, 0);
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
__vgic_v3_write_apxr1(struct kvm_vcpu * vcpu,u32 vmcr,int rt)922*4882a593Smuzhiyun static void __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun __vgic_v3_write_apxrn(vcpu, rt, 1);
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
__vgic_v3_write_apxr2(struct kvm_vcpu * vcpu,u32 vmcr,int rt)927*4882a593Smuzhiyun static void __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun __vgic_v3_write_apxrn(vcpu, rt, 2);
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
__vgic_v3_write_apxr3(struct kvm_vcpu * vcpu,u32 vmcr,int rt)932*4882a593Smuzhiyun static void __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun __vgic_v3_write_apxrn(vcpu, rt, 3);
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
__vgic_v3_read_hppir(struct kvm_vcpu * vcpu,u32 vmcr,int rt)937*4882a593Smuzhiyun static void __vgic_v3_read_hppir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun u64 lr_val;
940*4882a593Smuzhiyun int lr, lr_grp, grp;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun grp = __vgic_v3_get_group(vcpu);
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
945*4882a593Smuzhiyun if (lr == -1)
946*4882a593Smuzhiyun goto spurious;
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun lr_grp = !!(lr_val & ICH_LR_GROUP);
949*4882a593Smuzhiyun if (lr_grp != grp)
950*4882a593Smuzhiyun lr_val = ICC_IAR1_EL1_SPURIOUS;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun spurious:
953*4882a593Smuzhiyun vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun
__vgic_v3_read_pmr(struct kvm_vcpu * vcpu,u32 vmcr,int rt)956*4882a593Smuzhiyun static void __vgic_v3_read_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun vmcr &= ICH_VMCR_PMR_MASK;
959*4882a593Smuzhiyun vmcr >>= ICH_VMCR_PMR_SHIFT;
960*4882a593Smuzhiyun vcpu_set_reg(vcpu, rt, vmcr);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
__vgic_v3_write_pmr(struct kvm_vcpu * vcpu,u32 vmcr,int rt)963*4882a593Smuzhiyun static void __vgic_v3_write_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun u32 val = vcpu_get_reg(vcpu, rt);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun val <<= ICH_VMCR_PMR_SHIFT;
968*4882a593Smuzhiyun val &= ICH_VMCR_PMR_MASK;
969*4882a593Smuzhiyun vmcr &= ~ICH_VMCR_PMR_MASK;
970*4882a593Smuzhiyun vmcr |= val;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun write_gicreg(vmcr, ICH_VMCR_EL2);
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
__vgic_v3_read_rpr(struct kvm_vcpu * vcpu,u32 vmcr,int rt)975*4882a593Smuzhiyun static void __vgic_v3_read_rpr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun u32 val = __vgic_v3_get_highest_active_priority();
978*4882a593Smuzhiyun vcpu_set_reg(vcpu, rt, val);
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun
__vgic_v3_read_ctlr(struct kvm_vcpu * vcpu,u32 vmcr,int rt)981*4882a593Smuzhiyun static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun u32 vtr, val;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun vtr = read_gicreg(ICH_VTR_EL2);
986*4882a593Smuzhiyun /* PRIbits */
987*4882a593Smuzhiyun val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
988*4882a593Smuzhiyun /* IDbits */
989*4882a593Smuzhiyun val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
990*4882a593Smuzhiyun /* SEIS */
991*4882a593Smuzhiyun val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
992*4882a593Smuzhiyun /* A3V */
993*4882a593Smuzhiyun val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
994*4882a593Smuzhiyun /* EOImode */
995*4882a593Smuzhiyun val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
996*4882a593Smuzhiyun /* CBPR */
997*4882a593Smuzhiyun val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun vcpu_set_reg(vcpu, rt, val);
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
__vgic_v3_write_ctlr(struct kvm_vcpu * vcpu,u32 vmcr,int rt)1002*4882a593Smuzhiyun static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun u32 val = vcpu_get_reg(vcpu, rt);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun if (val & ICC_CTLR_EL1_CBPR_MASK)
1007*4882a593Smuzhiyun vmcr |= ICH_VMCR_CBPR_MASK;
1008*4882a593Smuzhiyun else
1009*4882a593Smuzhiyun vmcr &= ~ICH_VMCR_CBPR_MASK;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun if (val & ICC_CTLR_EL1_EOImode_MASK)
1012*4882a593Smuzhiyun vmcr |= ICH_VMCR_EOIM_MASK;
1013*4882a593Smuzhiyun else
1014*4882a593Smuzhiyun vmcr &= ~ICH_VMCR_EOIM_MASK;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun write_gicreg(vmcr, ICH_VMCR_EL2);
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
__vgic_v3_perform_cpuif_access(struct kvm_vcpu * vcpu)1019*4882a593Smuzhiyun int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun int rt;
1022*4882a593Smuzhiyun u32 esr;
1023*4882a593Smuzhiyun u32 vmcr;
1024*4882a593Smuzhiyun void (*fn)(struct kvm_vcpu *, u32, int);
1025*4882a593Smuzhiyun bool is_read;
1026*4882a593Smuzhiyun u32 sysreg;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun esr = kvm_vcpu_get_esr(vcpu);
1029*4882a593Smuzhiyun if (vcpu_mode_is_32bit(vcpu)) {
1030*4882a593Smuzhiyun if (!kvm_condition_valid(vcpu)) {
1031*4882a593Smuzhiyun __kvm_skip_instr(vcpu);
1032*4882a593Smuzhiyun return 1;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun sysreg = esr_cp15_to_sysreg(esr);
1036*4882a593Smuzhiyun } else {
1037*4882a593Smuzhiyun sysreg = esr_sys64_to_sysreg(esr);
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun switch (sysreg) {
1043*4882a593Smuzhiyun case SYS_ICC_IAR0_EL1:
1044*4882a593Smuzhiyun case SYS_ICC_IAR1_EL1:
1045*4882a593Smuzhiyun if (unlikely(!is_read))
1046*4882a593Smuzhiyun return 0;
1047*4882a593Smuzhiyun fn = __vgic_v3_read_iar;
1048*4882a593Smuzhiyun break;
1049*4882a593Smuzhiyun case SYS_ICC_EOIR0_EL1:
1050*4882a593Smuzhiyun case SYS_ICC_EOIR1_EL1:
1051*4882a593Smuzhiyun if (unlikely(is_read))
1052*4882a593Smuzhiyun return 0;
1053*4882a593Smuzhiyun fn = __vgic_v3_write_eoir;
1054*4882a593Smuzhiyun break;
1055*4882a593Smuzhiyun case SYS_ICC_IGRPEN1_EL1:
1056*4882a593Smuzhiyun if (is_read)
1057*4882a593Smuzhiyun fn = __vgic_v3_read_igrpen1;
1058*4882a593Smuzhiyun else
1059*4882a593Smuzhiyun fn = __vgic_v3_write_igrpen1;
1060*4882a593Smuzhiyun break;
1061*4882a593Smuzhiyun case SYS_ICC_BPR1_EL1:
1062*4882a593Smuzhiyun if (is_read)
1063*4882a593Smuzhiyun fn = __vgic_v3_read_bpr1;
1064*4882a593Smuzhiyun else
1065*4882a593Smuzhiyun fn = __vgic_v3_write_bpr1;
1066*4882a593Smuzhiyun break;
1067*4882a593Smuzhiyun case SYS_ICC_AP0Rn_EL1(0):
1068*4882a593Smuzhiyun case SYS_ICC_AP1Rn_EL1(0):
1069*4882a593Smuzhiyun if (is_read)
1070*4882a593Smuzhiyun fn = __vgic_v3_read_apxr0;
1071*4882a593Smuzhiyun else
1072*4882a593Smuzhiyun fn = __vgic_v3_write_apxr0;
1073*4882a593Smuzhiyun break;
1074*4882a593Smuzhiyun case SYS_ICC_AP0Rn_EL1(1):
1075*4882a593Smuzhiyun case SYS_ICC_AP1Rn_EL1(1):
1076*4882a593Smuzhiyun if (is_read)
1077*4882a593Smuzhiyun fn = __vgic_v3_read_apxr1;
1078*4882a593Smuzhiyun else
1079*4882a593Smuzhiyun fn = __vgic_v3_write_apxr1;
1080*4882a593Smuzhiyun break;
1081*4882a593Smuzhiyun case SYS_ICC_AP0Rn_EL1(2):
1082*4882a593Smuzhiyun case SYS_ICC_AP1Rn_EL1(2):
1083*4882a593Smuzhiyun if (is_read)
1084*4882a593Smuzhiyun fn = __vgic_v3_read_apxr2;
1085*4882a593Smuzhiyun else
1086*4882a593Smuzhiyun fn = __vgic_v3_write_apxr2;
1087*4882a593Smuzhiyun break;
1088*4882a593Smuzhiyun case SYS_ICC_AP0Rn_EL1(3):
1089*4882a593Smuzhiyun case SYS_ICC_AP1Rn_EL1(3):
1090*4882a593Smuzhiyun if (is_read)
1091*4882a593Smuzhiyun fn = __vgic_v3_read_apxr3;
1092*4882a593Smuzhiyun else
1093*4882a593Smuzhiyun fn = __vgic_v3_write_apxr3;
1094*4882a593Smuzhiyun break;
1095*4882a593Smuzhiyun case SYS_ICC_HPPIR0_EL1:
1096*4882a593Smuzhiyun case SYS_ICC_HPPIR1_EL1:
1097*4882a593Smuzhiyun if (unlikely(!is_read))
1098*4882a593Smuzhiyun return 0;
1099*4882a593Smuzhiyun fn = __vgic_v3_read_hppir;
1100*4882a593Smuzhiyun break;
1101*4882a593Smuzhiyun case SYS_ICC_IGRPEN0_EL1:
1102*4882a593Smuzhiyun if (is_read)
1103*4882a593Smuzhiyun fn = __vgic_v3_read_igrpen0;
1104*4882a593Smuzhiyun else
1105*4882a593Smuzhiyun fn = __vgic_v3_write_igrpen0;
1106*4882a593Smuzhiyun break;
1107*4882a593Smuzhiyun case SYS_ICC_BPR0_EL1:
1108*4882a593Smuzhiyun if (is_read)
1109*4882a593Smuzhiyun fn = __vgic_v3_read_bpr0;
1110*4882a593Smuzhiyun else
1111*4882a593Smuzhiyun fn = __vgic_v3_write_bpr0;
1112*4882a593Smuzhiyun break;
1113*4882a593Smuzhiyun case SYS_ICC_DIR_EL1:
1114*4882a593Smuzhiyun if (unlikely(is_read))
1115*4882a593Smuzhiyun return 0;
1116*4882a593Smuzhiyun fn = __vgic_v3_write_dir;
1117*4882a593Smuzhiyun break;
1118*4882a593Smuzhiyun case SYS_ICC_RPR_EL1:
1119*4882a593Smuzhiyun if (unlikely(!is_read))
1120*4882a593Smuzhiyun return 0;
1121*4882a593Smuzhiyun fn = __vgic_v3_read_rpr;
1122*4882a593Smuzhiyun break;
1123*4882a593Smuzhiyun case SYS_ICC_CTLR_EL1:
1124*4882a593Smuzhiyun if (is_read)
1125*4882a593Smuzhiyun fn = __vgic_v3_read_ctlr;
1126*4882a593Smuzhiyun else
1127*4882a593Smuzhiyun fn = __vgic_v3_write_ctlr;
1128*4882a593Smuzhiyun break;
1129*4882a593Smuzhiyun case SYS_ICC_PMR_EL1:
1130*4882a593Smuzhiyun if (is_read)
1131*4882a593Smuzhiyun fn = __vgic_v3_read_pmr;
1132*4882a593Smuzhiyun else
1133*4882a593Smuzhiyun fn = __vgic_v3_write_pmr;
1134*4882a593Smuzhiyun break;
1135*4882a593Smuzhiyun default:
1136*4882a593Smuzhiyun return 0;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun vmcr = __vgic_v3_read_vmcr();
1140*4882a593Smuzhiyun rt = kvm_vcpu_sys_get_rt(vcpu);
1141*4882a593Smuzhiyun fn(vcpu, vmcr, rt);
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun __kvm_skip_instr(vcpu);
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun return 1;
1146*4882a593Smuzhiyun }
1147